blob: 101ae7e9c35acc9c2e40e2c8026643852371a2e3 [file] [log] [blame]
David Zeuthen21e95262016-07-27 17:58:40 -04001#!/usr/bin/env python
2
3# Copyright 2016, The Android Open Source Project
4#
David Zeuthenc612e2e2016-09-16 16:44:08 -04005# Permission is hereby granted, free of charge, to any person
6# obtaining a copy of this software and associated documentation
7# files (the "Software"), to deal in the Software without
8# restriction, including without limitation the rights to use, copy,
9# modify, merge, publish, distribute, sublicense, and/or sell copies
10# of the Software, and to permit persons to whom the Software is
11# furnished to do so, subject to the following conditions:
David Zeuthen21e95262016-07-27 17:58:40 -040012#
David Zeuthenc612e2e2016-09-16 16:44:08 -040013# The above copyright notice and this permission notice shall be
14# included in all copies or substantial portions of the Software.
David Zeuthen21e95262016-07-27 17:58:40 -040015#
David Zeuthenc612e2e2016-09-16 16:44:08 -040016# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
20# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23# SOFTWARE.
David Zeuthen21e95262016-07-27 17:58:40 -040024#
David Zeuthen8b6973b2016-09-20 12:39:49 -040025"""Command-line tool for working with Android Verified Boot images."""
David Zeuthen21e95262016-07-27 17:58:40 -040026
27import argparse
David Zeuthen8b6973b2016-09-20 12:39:49 -040028import binascii
David Zeuthena4fee8b2016-08-22 15:20:43 -040029import bisect
David Zeuthen21e95262016-07-27 17:58:40 -040030import hashlib
31import os
32import struct
33import subprocess
34import sys
35
36import Crypto.PublicKey.RSA
37
38# Keep in sync with avb_vbmeta_header.h.
39AVB_VERSION_MAJOR = 1
40AVB_VERSION_MINOR = 0
41
42
43class AvbError(Exception):
44 """Application-specific errors.
45
46 These errors represent issues for which a stack-trace should not be
47 presented.
48
49 Attributes:
50 message: Error message.
51 """
52
53 def __init__(self, message):
54 Exception.__init__(self, message)
55
56
57class Algorithm(object):
58 """Contains details about an algorithm.
59
60 See the avb_vbmeta_header.h file for more details about
61 algorithms.
62
63 The constant |ALGORITHMS| is a dictionary from human-readable
64 names (e.g 'SHA256_RSA2048') to instances of this class.
65
66 Attributes:
67 algorithm_type: Integer code corresponding to |AvbAlgorithmType|.
68 hash_num_bytes: Number of bytes used to store the hash.
69 signature_num_bytes: Number of bytes used to store the signature.
70 public_key_num_bytes: Number of bytes used to store the public key.
71 padding: Padding used for signature, if any.
72 """
73
74 def __init__(self, algorithm_type, hash_num_bytes, signature_num_bytes,
75 public_key_num_bytes, padding):
76 self.algorithm_type = algorithm_type
77 self.hash_num_bytes = hash_num_bytes
78 self.signature_num_bytes = signature_num_bytes
79 self.public_key_num_bytes = public_key_num_bytes
80 self.padding = padding
81
82# This must be kept in sync with the avb_crypto.h file.
83#
84# The PKC1-v1.5 padding is a blob of binary DER of ASN.1 and is
85# obtained from section 5.2.2 of RFC 4880.
86ALGORITHMS = {
87 'NONE': Algorithm(
88 algorithm_type=0, # AVB_ALGORITHM_TYPE_NONE
89 hash_num_bytes=0,
90 signature_num_bytes=0,
91 public_key_num_bytes=0,
92 padding=[]),
93 'SHA256_RSA2048': Algorithm(
94 algorithm_type=1, # AVB_ALGORITHM_TYPE_SHA256_RSA2048
95 hash_num_bytes=32,
96 signature_num_bytes=256,
97 public_key_num_bytes=8 + 2*2048/8,
98 padding=[
99 # PKCS1-v1_5 padding
100 0x00, 0x01] + [0xff]*202 + [0x00] + [
101 # ASN.1 header
102 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
103 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
104 0x00, 0x04, 0x20,
105 ]),
106 'SHA256_RSA4096': Algorithm(
107 algorithm_type=2, # AVB_ALGORITHM_TYPE_SHA256_RSA4096
108 hash_num_bytes=32,
109 signature_num_bytes=512,
110 public_key_num_bytes=8 + 2*4096/8,
111 padding=[
112 # PKCS1-v1_5 padding
113 0x00, 0x01] + [0xff]*458 + [0x00] + [
114 # ASN.1 header
115 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
116 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
117 0x00, 0x04, 0x20,
118 ]),
119 'SHA256_RSA8192': Algorithm(
120 algorithm_type=3, # AVB_ALGORITHM_TYPE_SHA256_RSA8192
121 hash_num_bytes=32,
122 signature_num_bytes=1024,
123 public_key_num_bytes=8 + 2*8192/8,
124 padding=[
125 # PKCS1-v1_5 padding
126 0x00, 0x01] + [0xff]*970 + [0x00] + [
127 # ASN.1 header
128 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
129 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
130 0x00, 0x04, 0x20,
131 ]),
132 'SHA512_RSA2048': Algorithm(
133 algorithm_type=4, # AVB_ALGORITHM_TYPE_SHA512_RSA2048
134 hash_num_bytes=64,
135 signature_num_bytes=256,
136 public_key_num_bytes=8 + 2*2048/8,
137 padding=[
138 # PKCS1-v1_5 padding
139 0x00, 0x01] + [0xff]*170 + [0x00] + [
140 # ASN.1 header
141 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
142 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
143 0x00, 0x04, 0x40
144 ]),
145 'SHA512_RSA4096': Algorithm(
146 algorithm_type=5, # AVB_ALGORITHM_TYPE_SHA512_RSA4096
147 hash_num_bytes=64,
148 signature_num_bytes=512,
149 public_key_num_bytes=8 + 2*4096/8,
150 padding=[
151 # PKCS1-v1_5 padding
152 0x00, 0x01] + [0xff]*426 + [0x00] + [
153 # ASN.1 header
154 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
155 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
156 0x00, 0x04, 0x40
157 ]),
158 'SHA512_RSA8192': Algorithm(
159 algorithm_type=6, # AVB_ALGORITHM_TYPE_SHA512_RSA8192
160 hash_num_bytes=64,
161 signature_num_bytes=1024,
162 public_key_num_bytes=8 + 2*8192/8,
163 padding=[
164 # PKCS1-v1_5 padding
165 0x00, 0x01] + [0xff]*938 + [0x00] + [
166 # ASN.1 header
167 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
168 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
169 0x00, 0x04, 0x40
170 ]),
171}
172
173
174def round_to_multiple(number, size):
175 """Rounds a number up to nearest multiple of another number.
176
177 Args:
178 number: The number to round up.
179 size: The multiple to round up to.
180
181 Returns:
182 If |number| is a multiple of |size|, returns |number|, otherwise
183 returns |number| + |size|.
184 """
185 remainder = number % size
186 if remainder == 0:
187 return number
188 return number + size - remainder
189
190
191def round_to_pow2(number):
192 """Rounds a number up to the next power of 2.
193
194 Args:
195 number: The number to round up.
196
197 Returns:
198 If |number| is already a power of 2 then |number| is
199 returned. Otherwise the smallest power of 2 greater than |number|
200 is returned.
201 """
202 return 2**((number - 1).bit_length())
203
204
205def write_long(output, num_bits, value):
206 """Writes a long to an output stream using a given amount of bits.
207
208 This number is written big-endian, e.g. with the most significant
209 bit first.
210
211 Arguments:
212 output: The object to write the output to.
213 num_bits: The number of bits to write, e.g. 2048.
214 value: The value to write.
215 """
216 for bit_pos in range(num_bits, 0, -8):
217 octet = (value >> (bit_pos - 8)) & 0xff
218 output.write(struct.pack('!B', octet))
219
220
221def encode_long(num_bits, value):
222 """Encodes a long to a bytearray() using a given amount of bits.
223
224 This number is written big-endian, e.g. with the most significant
225 bit first.
226
227 Arguments:
228 num_bits: The number of bits to write, e.g. 2048.
229 value: The value to write.
230
231 Returns:
232 A bytearray() with the encoded long.
233 """
234 ret = bytearray()
235 for bit_pos in range(num_bits, 0, -8):
236 octet = (value >> (bit_pos - 8)) & 0xff
237 ret.extend(struct.pack('!B', octet))
238 return ret
239
240
241def egcd(a, b):
242 """Calculate greatest common divisor of two numbers.
243
244 This implementation uses a recursive version of the extended
245 Euclidian algorithm.
246
247 Arguments:
248 a: First number.
249 b: Second number.
250
251 Returns:
252 A tuple (gcd, x, y) that where |gcd| is the greatest common
253 divisor of |a| and |b| and |a|*|x| + |b|*|y| = |gcd|.
254 """
255 if a == 0:
256 return (b, 0, 1)
257 else:
258 g, y, x = egcd(b % a, a)
259 return (g, x - (b // a) * y, y)
260
261
262def modinv(a, m):
263 """Calculate modular multiplicative inverse of |a| modulo |m|.
264
265 This calculates the number |x| such that |a| * |x| == 1 (modulo
266 |m|). This number only exists if |a| and |m| are co-prime - |None|
267 is returned if this isn't true.
268
269 Arguments:
270 a: The number to calculate a modular inverse of.
271 m: The modulo to use.
272
273 Returns:
274 The modular multiplicative inverse of |a| and |m| or |None| if
275 these numbers are not co-prime.
276 """
277 gcd, x, _ = egcd(a, m)
278 if gcd != 1:
279 return None # modular inverse does not exist
280 else:
281 return x % m
282
283
284def parse_number(string):
285 """Parse a string as a number.
286
287 This is just a short-hand for int(string, 0) suitable for use in the
288 |type| parameter of |ArgumentParser|'s add_argument() function. An
289 improvement to just using type=int is that this function supports
290 numbers in other bases, e.g. "0x1234".
291
292 Arguments:
293 string: The string to parse.
294
295 Returns:
296 The parsed integer.
297
298 Raises:
299 ValueError: If the number could not be parsed.
300 """
301 return int(string, 0)
302
303
304def write_rsa_key(output, key):
305 """Writes a public RSA key in |AvbRSAPublicKeyHeader| format.
306
307 This writes the |AvbRSAPublicKeyHeader| as well as the two large
308 numbers (|key_num_bits| bits long) following it.
309
310 Arguments:
311 output: The object to write the output to.
312 key: A Crypto.PublicKey.RSA object.
313 """
314 # key.e is exponent
315 # key.n is modulus
316 key_num_bits = key.size() + 1
317 # Calculate n0inv = -1/n[0] (mod 2^32)
318 b = 2L**32
319 n0inv = b - modinv(key.n, b)
320 # Calculate rr = r^2 (mod N), where r = 2^(# of key bits)
321 r = 2L**key.n.bit_length()
322 rrmodn = r * r % key.n
323 output.write(struct.pack('!II', key_num_bits, n0inv))
324 write_long(output, key_num_bits, key.n)
325 write_long(output, key_num_bits, rrmodn)
326
327
328def encode_rsa_key(key):
329 """Encodes a public RSA key in |AvbRSAPublicKeyHeader| format.
330
331 This creates a |AvbRSAPublicKeyHeader| as well as the two large
332 numbers (|key_num_bits| bits long) following it.
333
334 Arguments:
335 key: A Crypto.PublicKey.RSA object.
336
337 Returns:
338 A bytearray() with the |AvbRSAPublicKeyHeader|.
339 """
340 ret = bytearray()
341 # key.e is exponent
342 # key.n is modulus
343 key_num_bits = key.size() + 1
344 # Calculate n0inv = -1/n[0] (mod 2^32)
345 b = 2L**32
346 n0inv = b - modinv(key.n, b)
347 # Calculate rr = r^2 (mod N), where r = 2^(# of key bits)
348 r = 2L**key.n.bit_length()
349 rrmodn = r * r % key.n
350 ret.extend(struct.pack('!II', key_num_bits, n0inv))
351 ret.extend(encode_long(key_num_bits, key.n))
352 ret.extend(encode_long(key_num_bits, rrmodn))
353 return ret
354
355
356def lookup_algorithm_by_type(alg_type):
357 """Looks up algorithm by type.
358
359 Arguments:
360 alg_type: The integer representing the type.
361
362 Returns:
363 A tuple with the algorithm name and an |Algorithm| instance.
364
365 Raises:
366 Exception: If the algorithm cannot be found
367 """
368 for alg_name in ALGORITHMS:
369 alg_data = ALGORITHMS[alg_name]
370 if alg_data.algorithm_type == alg_type:
371 return (alg_name, alg_data)
372 raise AvbError('Unknown algorithm type {}'.format(alg_type))
373
374
David Zeuthena4fee8b2016-08-22 15:20:43 -0400375class ImageChunk(object):
376 """Data structure used for representing chunks in Android sparse files.
377
378 Attributes:
379 chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
380 chunk_offset: Offset in the sparse file where this chunk begins.
381 output_offset: Offset in de-sparsified file where output begins.
382 output_size: Number of bytes in output.
383 input_offset: Offset in sparse file for data if TYPE_RAW otherwise None.
384 fill_data: Blob with data to fill if TYPE_FILL otherwise None.
385 """
386
387 FORMAT = '<2H2I'
388 TYPE_RAW = 0xcac1
389 TYPE_FILL = 0xcac2
390 TYPE_DONT_CARE = 0xcac3
391 TYPE_CRC32 = 0xcac4
392
393 def __init__(self, chunk_type, chunk_offset, output_offset, output_size,
394 input_offset, fill_data):
395 """Initializes an ImageChunk object.
396
397 Arguments:
398 chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
399 chunk_offset: Offset in the sparse file where this chunk begins.
400 output_offset: Offset in de-sparsified file.
401 output_size: Number of bytes in output.
402 input_offset: Offset in sparse file if TYPE_RAW otherwise None.
403 fill_data: Blob with data to fill if TYPE_FILL otherwise None.
404
405 Raises:
406 ValueError: If data is not well-formed.
407 """
408 self.chunk_type = chunk_type
409 self.chunk_offset = chunk_offset
410 self.output_offset = output_offset
411 self.output_size = output_size
412 self.input_offset = input_offset
413 self.fill_data = fill_data
414 # Check invariants.
415 if self.chunk_type == self.TYPE_RAW:
416 if self.fill_data is not None:
417 raise ValueError('RAW chunk cannot have fill_data set.')
418 if not self.input_offset:
419 raise ValueError('RAW chunk must have input_offset set.')
420 elif self.chunk_type == self.TYPE_FILL:
421 if self.fill_data is None:
422 raise ValueError('FILL chunk must have fill_data set.')
423 if self.input_offset:
424 raise ValueError('FILL chunk cannot have input_offset set.')
425 elif self.chunk_type == self.TYPE_DONT_CARE:
426 if self.fill_data is not None:
427 raise ValueError('DONT_CARE chunk cannot have fill_data set.')
428 if self.input_offset:
429 raise ValueError('DONT_CARE chunk cannot have input_offset set.')
430 else:
431 raise ValueError('Invalid chunk type')
432
433
434class ImageHandler(object):
435 """Abstraction for image I/O with support for Android sparse images.
436
437 This class provides an interface for working with image files that
438 may be using the Android Sparse Image format. When an instance is
439 constructed, we test whether it's an Android sparse file. If so,
440 operations will be on the sparse file by interpreting the sparse
441 format, otherwise they will be directly on the file. Either way the
442 operations do the same.
443
444 For reading, this interface mimics a file object - it has seek(),
445 tell(), and read() methods. For writing, only truncation
446 (truncate()) and appending is supported (append_raw() and
447 append_dont_care()). Additionally, data can only be written in units
448 of the block size.
449
450 Attributes:
451 is_sparse: Whether the file being operated on is sparse.
452 block_size: The block size, typically 4096.
453 image_size: The size of the unsparsified file.
David Zeuthena4fee8b2016-08-22 15:20:43 -0400454 """
455 # See system/core/libsparse/sparse_format.h for details.
456 MAGIC = 0xed26ff3a
457 HEADER_FORMAT = '<I4H4I'
458
459 # These are formats and offset of just the |total_chunks| and
460 # |total_blocks| fields.
461 NUM_CHUNKS_AND_BLOCKS_FORMAT = '<II'
462 NUM_CHUNKS_AND_BLOCKS_OFFSET = 16
463
464 def __init__(self, image_filename):
465 """Initializes an image handler.
466
467 Arguments:
468 image_filename: The name of the file to operate on.
469
470 Raises:
471 ValueError: If data in the file is invalid.
472 """
473 self._image_filename = image_filename
474 self._read_header()
475
476 def _read_header(self):
477 """Initializes internal data structures used for reading file.
478
479 This may be called multiple times and is typically called after
480 modifying the file (e.g. appending, truncation).
481
482 Raises:
483 ValueError: If data in the file is invalid.
484 """
485 self.is_sparse = False
486 self.block_size = 4096
487 self._file_pos = 0
488 self._image = open(self._image_filename, 'r+b')
489 self._image.seek(0, os.SEEK_END)
David Zeuthena4fee8b2016-08-22 15:20:43 -0400490 self.image_size = self._image.tell()
491
492 self._image.seek(0, os.SEEK_SET)
493 header_bin = self._image.read(struct.calcsize(self.HEADER_FORMAT))
494 (magic, major_version, minor_version, file_hdr_sz, chunk_hdr_sz,
495 block_size, self._num_total_blocks, self._num_total_chunks,
496 _) = struct.unpack(self.HEADER_FORMAT, header_bin)
497 if magic != self.MAGIC:
498 # Not a sparse image, our job here is done.
499 return
500 if not (major_version == 1 and minor_version == 0):
501 raise ValueError('Encountered sparse image format version {}.{} but '
502 'only 1.0 is supported'.format(major_version,
503 minor_version))
504 if file_hdr_sz != struct.calcsize(self.HEADER_FORMAT):
505 raise ValueError('Unexpected file_hdr_sz value {}.'.
506 format(file_hdr_sz))
507 if chunk_hdr_sz != struct.calcsize(ImageChunk.FORMAT):
508 raise ValueError('Unexpected chunk_hdr_sz value {}.'.
509 format(chunk_hdr_sz))
510
511 self.block_size = block_size
512
513 # Build an list of chunks by parsing the file.
514 self._chunks = []
515
516 # Find the smallest offset where only "Don't care" chunks
517 # follow. This will be the size of the content in the sparse
518 # image.
519 offset = 0
520 output_offset = 0
David Zeuthena4fee8b2016-08-22 15:20:43 -0400521 for _ in xrange(1, self._num_total_chunks + 1):
522 chunk_offset = self._image.tell()
523
524 header_bin = self._image.read(struct.calcsize(ImageChunk.FORMAT))
525 (chunk_type, _, chunk_sz, total_sz) = struct.unpack(ImageChunk.FORMAT,
526 header_bin)
527 data_sz = total_sz - struct.calcsize(ImageChunk.FORMAT)
528
David Zeuthena4fee8b2016-08-22 15:20:43 -0400529 if chunk_type == ImageChunk.TYPE_RAW:
530 if data_sz != (chunk_sz * self.block_size):
531 raise ValueError('Raw chunk input size ({}) does not match output '
532 'size ({})'.
533 format(data_sz, chunk_sz*self.block_size))
534 self._chunks.append(ImageChunk(ImageChunk.TYPE_RAW,
535 chunk_offset,
536 output_offset,
537 chunk_sz*self.block_size,
538 self._image.tell(),
539 None))
540 self._image.read(data_sz)
541
542 elif chunk_type == ImageChunk.TYPE_FILL:
543 if data_sz != 4:
544 raise ValueError('Fill chunk should have 4 bytes of fill, but this '
545 'has {}'.format(data_sz))
546 fill_data = self._image.read(4)
547 self._chunks.append(ImageChunk(ImageChunk.TYPE_FILL,
548 chunk_offset,
549 output_offset,
550 chunk_sz*self.block_size,
551 None,
552 fill_data))
553 elif chunk_type == ImageChunk.TYPE_DONT_CARE:
554 if data_sz != 0:
555 raise ValueError('Don\'t care chunk input size is non-zero ({})'.
556 format(data_sz))
David Zeuthena4fee8b2016-08-22 15:20:43 -0400557 self._chunks.append(ImageChunk(ImageChunk.TYPE_DONT_CARE,
558 chunk_offset,
559 output_offset,
560 chunk_sz*self.block_size,
561 None,
562 None))
563 elif chunk_type == ImageChunk.TYPE_CRC32:
564 if data_sz != 4:
565 raise ValueError('CRC32 chunk should have 4 bytes of CRC, but '
566 'this has {}'.format(data_sz))
567 self._image.read(4)
568 else:
569 raise ValueError('Unknown chunk type {}'.format(chunk_type))
570
571 offset += chunk_sz
572 output_offset += chunk_sz*self.block_size
573
574 # Record where sparse data end.
575 self._sparse_end = self._image.tell()
576
577 # Now that we've traversed all chunks, sanity check.
578 if self._num_total_blocks != offset:
579 raise ValueError('The header said we should have {} output blocks, '
580 'but we saw {}'.format(self._num_total_blocks, offset))
581 junk_len = len(self._image.read())
582 if junk_len > 0:
583 raise ValueError('There were {} bytes of extra data at the end of the '
584 'file.'.format(junk_len))
585
David Zeuthen09692692016-09-30 16:16:40 -0400586 # Assign |image_size|.
David Zeuthena4fee8b2016-08-22 15:20:43 -0400587 self.image_size = output_offset
David Zeuthena4fee8b2016-08-22 15:20:43 -0400588
589 # This is used when bisecting in read() to find the initial slice.
590 self._chunk_output_offsets = [i.output_offset for i in self._chunks]
591
592 self.is_sparse = True
593
594 def _update_chunks_and_blocks(self):
595 """Helper function to update the image header.
596
597 The the |total_chunks| and |total_blocks| fields in the header
598 will be set to value of the |_num_total_blocks| and
599 |_num_total_chunks| attributes.
600
601 """
602 self._image.seek(self.NUM_CHUNKS_AND_BLOCKS_OFFSET, os.SEEK_SET)
603 self._image.write(struct.pack(self.NUM_CHUNKS_AND_BLOCKS_FORMAT,
604 self._num_total_blocks,
605 self._num_total_chunks))
606
607 def append_dont_care(self, num_bytes):
608 """Appends a DONT_CARE chunk to the sparse file.
609
610 The given number of bytes must be a multiple of the block size.
611
612 Arguments:
613 num_bytes: Size in number of bytes of the DONT_CARE chunk.
614 """
615 assert num_bytes % self.block_size == 0
616
617 if not self.is_sparse:
618 self._image.seek(0, os.SEEK_END)
619 # This is more efficient that writing NUL bytes since it'll add
620 # a hole on file systems that support sparse files (native
621 # sparse, not Android sparse).
622 self._image.truncate(self._image.tell() + num_bytes)
623 self._read_header()
624 return
625
626 self._num_total_chunks += 1
627 self._num_total_blocks += num_bytes / self.block_size
628 self._update_chunks_and_blocks()
629
630 self._image.seek(self._sparse_end, os.SEEK_SET)
631 self._image.write(struct.pack(ImageChunk.FORMAT,
632 ImageChunk.TYPE_DONT_CARE,
633 0, # Reserved
634 num_bytes / self.block_size,
635 struct.calcsize(ImageChunk.FORMAT)))
636 self._read_header()
637
638 def append_raw(self, data):
639 """Appends a RAW chunk to the sparse file.
640
641 The length of the given data must be a multiple of the block size.
642
643 Arguments:
644 data: Data to append.
645 """
646 assert len(data) % self.block_size == 0
647
648 if not self.is_sparse:
649 self._image.seek(0, os.SEEK_END)
650 self._image.write(data)
651 self._read_header()
652 return
653
654 self._num_total_chunks += 1
655 self._num_total_blocks += len(data) / self.block_size
656 self._update_chunks_and_blocks()
657
658 self._image.seek(self._sparse_end, os.SEEK_SET)
659 self._image.write(struct.pack(ImageChunk.FORMAT,
660 ImageChunk.TYPE_RAW,
661 0, # Reserved
662 len(data) / self.block_size,
663 len(data) +
664 struct.calcsize(ImageChunk.FORMAT)))
665 self._image.write(data)
666 self._read_header()
667
668 def append_fill(self, fill_data, size):
669 """Appends a fill chunk to the sparse file.
670
671 The total length of the fill data must be a multiple of the block size.
672
673 Arguments:
674 fill_data: Fill data to append - must be four bytes.
675 size: Number of chunk - must be a multiple of four and the block size.
676 """
677 assert len(fill_data) == 4
678 assert size % 4 == 0
679 assert size % self.block_size == 0
680
681 if not self.is_sparse:
682 self._image.seek(0, os.SEEK_END)
683 self._image.write(fill_data * (size/4))
684 self._read_header()
685 return
686
687 self._num_total_chunks += 1
688 self._num_total_blocks += size / self.block_size
689 self._update_chunks_and_blocks()
690
691 self._image.seek(self._sparse_end, os.SEEK_SET)
692 self._image.write(struct.pack(ImageChunk.FORMAT,
693 ImageChunk.TYPE_FILL,
694 0, # Reserved
695 size / self.block_size,
696 4 + struct.calcsize(ImageChunk.FORMAT)))
697 self._image.write(fill_data)
698 self._read_header()
699
700 def seek(self, offset):
701 """Sets the cursor position for reading from unsparsified file.
702
703 Arguments:
704 offset: Offset to seek to from the beginning of the file.
705 """
706 self._file_pos = offset
707
708 def read(self, size):
709 """Reads data from the unsparsified file.
710
711 This method may return fewer than |size| bytes of data if the end
712 of the file was encountered.
713
714 The file cursor for reading is advanced by the number of bytes
715 read.
716
717 Arguments:
718 size: Number of bytes to read.
719
720 Returns:
721 The data.
722
723 """
724 if not self.is_sparse:
725 self._image.seek(self._file_pos)
726 data = self._image.read(size)
727 self._file_pos += len(data)
728 return data
729
730 # Iterate over all chunks.
731 chunk_idx = bisect.bisect_right(self._chunk_output_offsets,
732 self._file_pos) - 1
733 data = bytearray()
734 to_go = size
735 while to_go > 0:
736 chunk = self._chunks[chunk_idx]
737 chunk_pos_offset = self._file_pos - chunk.output_offset
738 chunk_pos_to_go = min(chunk.output_size - chunk_pos_offset, to_go)
739
740 if chunk.chunk_type == ImageChunk.TYPE_RAW:
741 self._image.seek(chunk.input_offset + chunk_pos_offset)
742 data.extend(self._image.read(chunk_pos_to_go))
743 elif chunk.chunk_type == ImageChunk.TYPE_FILL:
744 all_data = chunk.fill_data*(chunk_pos_to_go/len(chunk.fill_data) + 2)
745 offset_mod = chunk_pos_offset % len(chunk.fill_data)
746 data.extend(all_data[offset_mod:(offset_mod + chunk_pos_to_go)])
747 else:
748 assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
749 data.extend('\0' * chunk_pos_to_go)
750
751 to_go -= chunk_pos_to_go
752 self._file_pos += chunk_pos_to_go
753 chunk_idx += 1
754 # Generate partial read in case of EOF.
755 if chunk_idx >= len(self._chunks):
756 break
757
758 return data
759
760 def tell(self):
761 """Returns the file cursor position for reading from unsparsified file.
762
763 Returns:
764 The file cursor position for reading.
765 """
766 return self._file_pos
767
768 def truncate(self, size):
769 """Truncates the unsparsified file.
770
771 Arguments:
772 size: Desired size of unsparsified file.
773
774 Raises:
775 ValueError: If desired size isn't a multiple of the block size.
776 """
777 if not self.is_sparse:
778 self._image.truncate(size)
779 self._read_header()
780 return
781
782 if size % self.block_size != 0:
783 raise ValueError('Cannot truncate to a size which is not a multiple '
784 'of the block size')
785
786 if size == self.image_size:
787 # Trivial where there's nothing to do.
788 return
789 elif size < self.image_size:
790 chunk_idx = bisect.bisect_right(self._chunk_output_offsets, size) - 1
791 chunk = self._chunks[chunk_idx]
792 if chunk.output_offset != size:
793 # Truncation in the middle of a trunk - need to keep the chunk
794 # and modify it.
795 chunk_idx_for_update = chunk_idx + 1
796 num_to_keep = size - chunk.output_offset
797 assert num_to_keep % self.block_size == 0
798 if chunk.chunk_type == ImageChunk.TYPE_RAW:
799 truncate_at = (chunk.chunk_offset +
800 struct.calcsize(ImageChunk.FORMAT) + num_to_keep)
801 data_sz = num_to_keep
802 elif chunk.chunk_type == ImageChunk.TYPE_FILL:
803 truncate_at = (chunk.chunk_offset +
804 struct.calcsize(ImageChunk.FORMAT) + 4)
805 data_sz = 4
806 else:
807 assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
808 truncate_at = chunk.chunk_offset + struct.calcsize(ImageChunk.FORMAT)
809 data_sz = 0
810 chunk_sz = num_to_keep/self.block_size
811 total_sz = data_sz + struct.calcsize(ImageChunk.FORMAT)
812 self._image.seek(chunk.chunk_offset)
813 self._image.write(struct.pack(ImageChunk.FORMAT,
814 chunk.chunk_type,
815 0, # Reserved
816 chunk_sz,
817 total_sz))
818 chunk.output_size = num_to_keep
819 else:
820 # Truncation at trunk boundary.
821 truncate_at = chunk.chunk_offset
822 chunk_idx_for_update = chunk_idx
823
824 self._num_total_chunks = chunk_idx_for_update
825 self._num_total_blocks = 0
826 for i in range(0, chunk_idx_for_update):
827 self._num_total_blocks += self._chunks[i].output_size / self.block_size
828 self._update_chunks_and_blocks()
829 self._image.truncate(truncate_at)
830
831 # We've modified the file so re-read all data.
832 self._read_header()
833 else:
834 # Truncating to grow - just add a DONT_CARE section.
835 self.append_dont_care(size - self.image_size)
836
837
David Zeuthen21e95262016-07-27 17:58:40 -0400838class AvbDescriptor(object):
839 """Class for AVB descriptor.
840
841 See the |AvbDescriptor| C struct for more information.
842
843 Attributes:
844 tag: The tag identifying what kind of descriptor this is.
845 data: The data in the descriptor.
846 """
847
848 SIZE = 16
849 FORMAT_STRING = ('!QQ') # tag, num_bytes_following (descriptor header)
850
851 def __init__(self, data):
852 """Initializes a new property descriptor.
853
854 Arguments:
855 data: If not None, must be a bytearray().
856
857 Raises:
858 LookupError: If the given descriptor is malformed.
859 """
860 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
861
862 if data:
863 (self.tag, num_bytes_following) = (
864 struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]))
865 self.data = data[self.SIZE:self.SIZE + num_bytes_following]
866 else:
867 self.tag = None
868 self.data = None
869
870 def print_desc(self, o):
871 """Print the descriptor.
872
873 Arguments:
874 o: The object to write the output to.
875 """
876 o.write(' Unknown descriptor:\n')
877 o.write(' Tag: {}\n'.format(self.tag))
878 if len(self.data) < 256:
879 o.write(' Data: {} ({} bytes)\n'.format(
880 repr(str(self.data)), len(self.data)))
881 else:
882 o.write(' Data: {} bytes\n'.format(len(self.data)))
883
884 def encode(self):
885 """Serializes the descriptor.
886
887 Returns:
888 A bytearray() with the descriptor data.
889 """
890 num_bytes_following = len(self.data)
891 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
892 padding_size = nbf_with_padding - num_bytes_following
893 desc = struct.pack(self.FORMAT_STRING, self.tag, nbf_with_padding)
894 padding = struct.pack(str(padding_size) + 'x')
895 ret = desc + self.data + padding
896 return bytearray(ret)
897
898
899class AvbPropertyDescriptor(AvbDescriptor):
900 """A class for property descriptors.
901
902 See the |AvbPropertyDescriptor| C struct for more information.
903
904 Attributes:
905 key: The key.
906 value: The key.
907 """
908
909 TAG = 0
910 SIZE = 32
911 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
912 'Q' # key size (bytes)
913 'Q') # value size (bytes)
914
915 def __init__(self, data=None):
916 """Initializes a new property descriptor.
917
918 Arguments:
919 data: If not None, must be a bytearray of size |SIZE|.
920
921 Raises:
922 LookupError: If the given descriptor is malformed.
923 """
924 AvbDescriptor.__init__(self, None)
925 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
926
927 if data:
928 (tag, num_bytes_following, key_size,
929 value_size) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
930 expected_size = round_to_multiple(
931 self.SIZE - 16 + key_size + 1 + value_size + 1, 8)
932 if tag != self.TAG or num_bytes_following != expected_size:
933 raise LookupError('Given data does not look like a property '
934 'descriptor.')
935 self.key = data[self.SIZE:(self.SIZE + key_size)]
936 self.value = data[(self.SIZE + key_size + 1):(self.SIZE + key_size + 1 +
937 value_size)]
938 else:
939 self.key = ''
940 self.value = ''
941
942 def print_desc(self, o):
943 """Print the descriptor.
944
945 Arguments:
946 o: The object to write the output to.
947 """
948 if len(self.value) < 256:
949 o.write(' Prop: {} -> {}\n'.format(self.key, repr(str(self.value))))
950 else:
951 o.write(' Prop: {} -> ({} bytes)\n'.format(self.key, len(self.value)))
952
953 def encode(self):
954 """Serializes the descriptor.
955
956 Returns:
957 A bytearray() with the descriptor data.
958 """
959 num_bytes_following = self.SIZE + len(self.key) + len(self.value) + 2 - 16
960 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
961 padding_size = nbf_with_padding - num_bytes_following
962 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
963 len(self.key), len(self.value))
964 padding = struct.pack(str(padding_size) + 'x')
965 ret = desc + self.key + '\0' + self.value + '\0' + padding
966 return bytearray(ret)
967
968
969class AvbHashtreeDescriptor(AvbDescriptor):
970 """A class for hashtree descriptors.
971
972 See the |AvbHashtreeDescriptor| C struct for more information.
973
974 Attributes:
975 dm_verity_version: dm-verity version used.
976 image_size: Size of the image, after rounding up to |block_size|.
977 tree_offset: Offset of the hash tree in the file.
978 tree_size: Size of the tree.
979 data_block_size: Data block size
980 hash_block_size: Hash block size
981 hash_algorithm: Hash algorithm used.
982 partition_name: Partition name.
983 salt: Salt used.
984 root_digest: Root digest.
985 """
986
987 TAG = 1
988 SIZE = 96
989 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
990 'L' # dm-verity version used
991 'Q' # image size (bytes)
992 'Q' # tree offset (bytes)
993 'Q' # tree size (bytes)
994 'L' # data block size (bytes)
995 'L' # hash block size (bytes)
996 '32s' # hash algorithm used
997 'L' # partition name (bytes)
998 'L' # salt length (bytes)
999 'L') # root digest length (bytes)
1000
1001 def __init__(self, data=None):
1002 """Initializes a new hashtree descriptor.
1003
1004 Arguments:
1005 data: If not None, must be a bytearray of size |SIZE|.
1006
1007 Raises:
1008 LookupError: If the given descriptor is malformed.
1009 """
1010 AvbDescriptor.__init__(self, None)
1011 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1012
1013 if data:
1014 (tag, num_bytes_following, self.dm_verity_version, self.image_size,
1015 self.tree_offset, self.tree_size, self.data_block_size,
1016 self.hash_block_size, self.hash_algorithm, partition_name_len, salt_len,
1017 root_digest_len) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
1018 expected_size = round_to_multiple(
1019 self.SIZE - 16 + partition_name_len + salt_len + root_digest_len, 8)
1020 if tag != self.TAG or num_bytes_following != expected_size:
1021 raise LookupError('Given data does not look like a hashtree '
1022 'descriptor.')
1023 # Nuke NUL-bytes at the end.
1024 self.hash_algorithm = self.hash_algorithm.split('\0', 1)[0]
1025 o = 0
1026 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o +
1027 partition_name_len)])
1028 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8.
1029 self.partition_name.decode('utf-8')
1030 o += partition_name_len
1031 self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)]
1032 o += salt_len
1033 self.root_digest = data[(self.SIZE + o):(self.SIZE + o + root_digest_len)]
1034 if root_digest_len != len(hashlib.new(name=self.hash_algorithm).digest()):
1035 raise LookupError('root_digest_len doesn\'t match hash algorithm')
1036
1037 else:
1038 self.dm_verity_version = 0
1039 self.image_size = 0
1040 self.tree_offset = 0
1041 self.tree_size = 0
1042 self.data_block_size = 0
1043 self.hash_block_size = 0
1044 self.hash_algorithm = ''
1045 self.partition_name = ''
1046 self.salt = bytearray()
1047 self.root_digest = bytearray()
1048
1049 def print_desc(self, o):
1050 """Print the descriptor.
1051
1052 Arguments:
1053 o: The object to write the output to.
1054 """
1055 o.write(' Hashtree descriptor:\n')
1056 o.write(' Version of dm-verity: {}\n'.format(self.dm_verity_version))
1057 o.write(' Image Size: {} bytes\n'.format(self.image_size))
1058 o.write(' Tree Offset: {}\n'.format(self.tree_offset))
1059 o.write(' Tree Size: {} bytes\n'.format(self.tree_size))
1060 o.write(' Data Block Size: {} bytes\n'.format(
1061 self.data_block_size))
1062 o.write(' Hash Block Size: {} bytes\n'.format(
1063 self.hash_block_size))
1064 o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm))
1065 o.write(' Partition Name: {}\n'.format(self.partition_name))
1066 o.write(' Salt: {}\n'.format(str(self.salt).encode(
1067 'hex')))
1068 o.write(' Root Digest: {}\n'.format(str(
1069 self.root_digest).encode('hex')))
1070
1071 def encode(self):
1072 """Serializes the descriptor.
1073
1074 Returns:
1075 A bytearray() with the descriptor data.
1076 """
1077 encoded_name = self.partition_name.encode('utf-8')
1078 num_bytes_following = (self.SIZE + len(encoded_name) + len(self.salt) +
1079 len(self.root_digest) - 16)
1080 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
1081 padding_size = nbf_with_padding - num_bytes_following
1082 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
1083 self.dm_verity_version, self.image_size,
1084 self.tree_offset, self.tree_size, self.data_block_size,
1085 self.hash_block_size, self.hash_algorithm,
1086 len(encoded_name), len(self.salt), len(self.root_digest))
1087 padding = struct.pack(str(padding_size) + 'x')
1088 ret = desc + encoded_name + self.salt + self.root_digest + padding
1089 return bytearray(ret)
1090
1091
1092class AvbHashDescriptor(AvbDescriptor):
1093 """A class for hash descriptors.
1094
1095 See the |AvbHashDescriptor| C struct for more information.
1096
1097 Attributes:
1098 image_size: Image size, in bytes.
1099 hash_algorithm: Hash algorithm used.
1100 partition_name: Partition name.
1101 salt: Salt used.
1102 digest: The hash value of salt and data combined.
1103 """
1104
1105 TAG = 2
1106 SIZE = 68
1107 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
1108 'Q' # image size (bytes)
1109 '32s' # hash algorithm used
1110 'L' # partition name (bytes)
1111 'L' # salt length (bytes)
1112 'L') # digest length (bytes)
1113
1114 def __init__(self, data=None):
1115 """Initializes a new hash descriptor.
1116
1117 Arguments:
1118 data: If not None, must be a bytearray of size |SIZE|.
1119
1120 Raises:
1121 LookupError: If the given descriptor is malformed.
1122 """
1123 AvbDescriptor.__init__(self, None)
1124 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1125
1126 if data:
1127 (tag, num_bytes_following, self.image_size, self.hash_algorithm,
1128 partition_name_len, salt_len,
1129 digest_len) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
1130 expected_size = round_to_multiple(
1131 self.SIZE - 16 + partition_name_len + salt_len + digest_len, 8)
1132 if tag != self.TAG or num_bytes_following != expected_size:
1133 raise LookupError('Given data does not look like a hash ' 'descriptor.')
1134 # Nuke NUL-bytes at the end.
1135 self.hash_algorithm = self.hash_algorithm.split('\0', 1)[0]
1136 o = 0
1137 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o +
1138 partition_name_len)])
1139 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8.
1140 self.partition_name.decode('utf-8')
1141 o += partition_name_len
1142 self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)]
1143 o += salt_len
1144 self.digest = data[(self.SIZE + o):(self.SIZE + o + digest_len)]
1145 if digest_len != len(hashlib.new(name=self.hash_algorithm).digest()):
1146 raise LookupError('digest_len doesn\'t match hash algorithm')
1147
1148 else:
1149 self.image_size = 0
1150 self.hash_algorithm = ''
1151 self.partition_name = ''
1152 self.salt = bytearray()
1153 self.digest = bytearray()
1154
1155 def print_desc(self, o):
1156 """Print the descriptor.
1157
1158 Arguments:
1159 o: The object to write the output to.
1160 """
1161 o.write(' Hash descriptor:\n')
1162 o.write(' Image Size: {} bytes\n'.format(self.image_size))
1163 o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm))
1164 o.write(' Partition Name: {}\n'.format(self.partition_name))
1165 o.write(' Salt: {}\n'.format(str(self.salt).encode(
1166 'hex')))
1167 o.write(' Digest: {}\n'.format(str(self.digest).encode(
1168 'hex')))
1169
1170 def encode(self):
1171 """Serializes the descriptor.
1172
1173 Returns:
1174 A bytearray() with the descriptor data.
1175 """
1176 encoded_name = self.partition_name.encode('utf-8')
1177 num_bytes_following = (
1178 self.SIZE + len(encoded_name) + len(self.salt) + len(self.digest) - 16)
1179 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
1180 padding_size = nbf_with_padding - num_bytes_following
1181 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
1182 self.image_size, self.hash_algorithm, len(encoded_name),
1183 len(self.salt), len(self.digest))
1184 padding = struct.pack(str(padding_size) + 'x')
1185 ret = desc + encoded_name + self.salt + self.digest + padding
1186 return bytearray(ret)
1187
1188
1189class AvbKernelCmdlineDescriptor(AvbDescriptor):
1190 """A class for kernel command-line descriptors.
1191
1192 See the |AvbKernelCmdlineDescriptor| C struct for more information.
1193
1194 Attributes:
1195 kernel_cmdline: The kernel command-line.
1196 """
1197
1198 TAG = 3
1199 SIZE = 20
1200 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
1201 'L') # cmdline length (bytes)
1202
1203 def __init__(self, data=None):
1204 """Initializes a new kernel cmdline descriptor.
1205
1206 Arguments:
1207 data: If not None, must be a bytearray of size |SIZE|.
1208
1209 Raises:
1210 LookupError: If the given descriptor is malformed.
1211 """
1212 AvbDescriptor.__init__(self, None)
1213 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1214
1215 if data:
1216 (tag, num_bytes_following, kernel_cmdline_length) = (
1217 struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]))
1218 expected_size = round_to_multiple(self.SIZE - 16 + kernel_cmdline_length,
1219 8)
1220 if tag != self.TAG or num_bytes_following != expected_size:
1221 raise LookupError('Given data does not look like a kernel cmdline '
1222 'descriptor.')
1223 # Nuke NUL-bytes at the end.
1224 self.kernel_cmdline = str(data[self.SIZE:(self.SIZE +
1225 kernel_cmdline_length)])
1226 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8.
1227 self.kernel_cmdline.decode('utf-8')
1228 else:
1229 self.kernel_cmdline = ''
1230
1231 def print_desc(self, o):
1232 """Print the descriptor.
1233
1234 Arguments:
1235 o: The object to write the output to.
1236 """
1237 o.write(' Kernel Cmdline descriptor:\n')
1238 o.write(' Kernel Cmdline: {}\n'.format(repr(
1239 self.kernel_cmdline)))
1240
1241 def encode(self):
1242 """Serializes the descriptor.
1243
1244 Returns:
1245 A bytearray() with the descriptor data.
1246 """
1247 encoded_str = self.kernel_cmdline.encode('utf-8')
1248 num_bytes_following = (self.SIZE + len(encoded_str) - 16)
1249 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
1250 padding_size = nbf_with_padding - num_bytes_following
1251 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
1252 len(encoded_str))
1253 padding = struct.pack(str(padding_size) + 'x')
1254 ret = desc + encoded_str + padding
1255 return bytearray(ret)
1256
1257
1258class AvbChainPartitionDescriptor(AvbDescriptor):
1259 """A class for chained partition descriptors.
1260
1261 See the |AvbChainPartitionDescriptor| C struct for more information.
1262
1263 Attributes:
1264 rollback_index_slot: The rollback index slot to use.
1265 partition_name: Partition name.
1266 public_key: Bytes for the public key.
1267 """
1268
1269 TAG = 4
1270 SIZE = 28
1271 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
1272 'L' # rollback_index_slot
1273 'L' # partition_name_size (bytes)
1274 'L') # public_key_size (bytes)
1275
1276 def __init__(self, data=None):
1277 """Initializes a new chain partition descriptor.
1278
1279 Arguments:
1280 data: If not None, must be a bytearray of size |SIZE|.
1281
1282 Raises:
1283 LookupError: If the given descriptor is malformed.
1284 """
1285 AvbDescriptor.__init__(self, None)
1286 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1287
1288 if data:
1289 (tag, num_bytes_following, self.rollback_index_slot, partition_name_len,
1290 public_key_len) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
1291 expected_size = round_to_multiple(
1292 self.SIZE - 16 + partition_name_len + public_key_len, 8)
1293 if tag != self.TAG or num_bytes_following != expected_size:
1294 raise LookupError('Given data does not look like a chain partition '
1295 'descriptor.')
1296 o = 0
1297 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o +
1298 partition_name_len)])
1299 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8.
1300 self.partition_name.decode('utf-8')
1301 o += partition_name_len
1302 self.public_key = data[(self.SIZE + o):(self.SIZE + o + public_key_len)]
1303
1304 else:
1305 self.rollback_index_slot = 0
1306 self.partition_name = ''
1307 self.public_key = bytearray()
1308
1309 def print_desc(self, o):
1310 """Print the descriptor.
1311
1312 Arguments:
1313 o: The object to write the output to.
1314 """
1315 o.write(' Chain Partition descriptor:\n')
1316 o.write(' Partition Name: {}\n'.format(self.partition_name))
1317 o.write(' Rollback Index Slot: {}\n'.format(
1318 self.rollback_index_slot))
1319 # Just show the SHA1 of the key, for size reasons.
1320 hexdig = hashlib.sha1(self.public_key).hexdigest()
1321 o.write(' Public key (sha1): {}\n'.format(hexdig))
1322
1323 def encode(self):
1324 """Serializes the descriptor.
1325
1326 Returns:
1327 A bytearray() with the descriptor data.
1328 """
1329 encoded_name = self.partition_name.encode('utf-8')
1330 num_bytes_following = (
1331 self.SIZE + len(encoded_name) + len(self.public_key) - 16)
1332 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
1333 padding_size = nbf_with_padding - num_bytes_following
1334 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
1335 self.rollback_index_slot, len(encoded_name),
1336 len(self.public_key))
1337 padding = struct.pack(str(padding_size) + 'x')
1338 ret = desc + encoded_name + self.public_key + padding
1339 return bytearray(ret)
1340
1341
1342DESCRIPTOR_CLASSES = [
1343 AvbPropertyDescriptor, AvbHashtreeDescriptor, AvbHashDescriptor,
1344 AvbKernelCmdlineDescriptor, AvbChainPartitionDescriptor
1345]
1346
1347
1348def parse_descriptors(data):
1349 """Parses a blob of data into descriptors.
1350
1351 Arguments:
1352 data: A bytearray() with encoded descriptors.
1353
1354 Returns:
1355 A list of instances of objects derived from AvbDescriptor. For
1356 unknown descriptors, the class AvbDescriptor is used.
1357 """
1358 o = 0
1359 ret = []
1360 while o < len(data):
1361 tag, nb_following = struct.unpack('!2Q', data[o:o + 16])
1362 if tag < len(DESCRIPTOR_CLASSES):
1363 c = DESCRIPTOR_CLASSES[tag]
1364 else:
1365 c = AvbDescriptor
1366 ret.append(c(bytearray(data[o:o + 16 + nb_following])))
1367 o += 16 + nb_following
1368 return ret
1369
1370
1371class AvbFooter(object):
1372 """A class for parsing and writing footers.
1373
1374 Footers are stored at the end of partitions and point to where the
1375 AvbVBMeta blob is located. They also contain the original size of
1376 the image before AVB information was added.
1377
1378 Attributes:
1379 magic: Magic for identifying the footer, see |MAGIC|.
1380 version_major: The major version of avbtool that wrote the footer.
1381 version_minor: The minor version of avbtool that wrote the footer.
1382 original_image_size: Original image size.
1383 vbmeta_offset: Offset of where the AvbVBMeta blob is stored.
1384 vbmeta_size: Size of the AvbVBMeta blob.
1385 """
1386
1387 MAGIC = 'AVBf'
1388 SIZE = 64
1389 RESERVED = 28
1390 FORMAT_STRING = ('!4s2L' # magic, 2 x version.
1391 'Q' # Original image size.
1392 'Q' # Offset of VBMeta blob.
1393 'Q' + # Size of VBMeta blob.
1394 str(RESERVED) + 'x') # padding for reserved bytes
1395
1396 def __init__(self, data=None):
1397 """Initializes a new footer object.
1398
1399 Arguments:
1400 data: If not None, must be a bytearray of size 4096.
1401
1402 Raises:
1403 LookupError: If the given footer is malformed.
1404 struct.error: If the given data has no footer.
1405 """
1406 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1407
1408 if data:
1409 (self.magic, self.version_major, self.version_minor,
1410 self.original_image_size, self.vbmeta_offset,
1411 self.vbmeta_size) = struct.unpack(self.FORMAT_STRING, data)
1412 if self.magic != self.MAGIC:
David Zeuthen8b6973b2016-09-20 12:39:49 -04001413 raise LookupError('Given data does not look like a AVB footer.')
David Zeuthen21e95262016-07-27 17:58:40 -04001414 else:
1415 self.magic = self.MAGIC
1416 self.version_major = AVB_VERSION_MAJOR
1417 self.version_minor = AVB_VERSION_MINOR
1418 self.original_image_size = 0
1419 self.vbmeta_offset = 0
1420 self.vbmeta_size = 0
1421
David Zeuthena4fee8b2016-08-22 15:20:43 -04001422 def encode(self):
1423 """Gets a string representing the binary encoding of the footer.
David Zeuthen21e95262016-07-27 17:58:40 -04001424
David Zeuthena4fee8b2016-08-22 15:20:43 -04001425 Returns:
1426 A bytearray() with a binary representation of the footer.
David Zeuthen21e95262016-07-27 17:58:40 -04001427 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04001428 return struct.pack(self.FORMAT_STRING, self.magic, self.version_major,
1429 self.version_minor, self.original_image_size,
1430 self.vbmeta_offset, self.vbmeta_size)
David Zeuthen21e95262016-07-27 17:58:40 -04001431
1432
1433class AvbVBMetaHeader(object):
David Zeuthen8b6973b2016-09-20 12:39:49 -04001434 """A class for parsing and writing AVB vbmeta images.
David Zeuthen21e95262016-07-27 17:58:40 -04001435
1436 Attributes:
1437 The attributes correspond to the |AvbVBMetaHeader| struct
1438 defined in avb_vbmeta_header.h.
1439 """
1440
1441 SIZE = 256
1442
1443 # Keep in sync with |reserved| field of |AvbVBMetaImageHeader|.
1444 RESERVED = 152
1445
1446 # Keep in sync with |AvbVBMetaImageHeader|.
1447 FORMAT_STRING = ('!4s2L' # magic, 2 x version
1448 '2Q' # 2 x block size
1449 'L' # algorithm type
1450 '2Q' # offset, size (hash)
1451 '2Q' # offset, size (signature)
1452 '2Q' # offset, size (public key)
1453 '2Q' # offset, size (descriptors)
1454 'Q' + # rollback_index
1455 str(RESERVED) + 'x') # padding for reserved bytes
1456
1457 def __init__(self, data=None):
1458 """Initializes a new header object.
1459
1460 Arguments:
1461 data: If not None, must be a bytearray of size 8192.
1462
1463 Raises:
1464 Exception: If the given data is malformed.
1465 """
1466 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1467
1468 if data:
1469 (self.magic, self.header_version_major, self.header_version_minor,
1470 self.authentication_data_block_size, self.auxiliary_data_block_size,
1471 self.algorithm_type, self.hash_offset, self.hash_size,
1472 self.signature_offset, self.signature_size, self.public_key_offset,
1473 self.public_key_size, self.descriptors_offset, self.descriptors_size,
1474 self.rollback_index) = struct.unpack(self.FORMAT_STRING, data)
1475 # Nuke NUL-bytes at the end of the string.
1476 if self.magic != 'AVB0':
David Zeuthen8b6973b2016-09-20 12:39:49 -04001477 raise AvbError('Given image does not look like a vbmeta image.')
David Zeuthen21e95262016-07-27 17:58:40 -04001478 else:
1479 self.magic = 'AVB0'
1480 self.header_version_major = AVB_VERSION_MAJOR
1481 self.header_version_minor = AVB_VERSION_MINOR
1482 self.authentication_data_block_size = 0
1483 self.auxiliary_data_block_size = 0
1484 self.algorithm_type = 0
1485 self.hash_offset = 0
1486 self.hash_size = 0
1487 self.signature_offset = 0
1488 self.signature_size = 0
1489 self.public_key_offset = 0
1490 self.public_key_size = 0
1491 self.descriptors_offset = 0
1492 self.descriptors_size = 0
1493 self.rollback_index = 0
1494
1495 def save(self, output):
1496 """Serializes the header (256 bytes) to disk.
1497
1498 Arguments:
1499 output: The object to write the output to.
1500 """
1501 output.write(struct.pack(
1502 self.FORMAT_STRING, self.magic, self.header_version_major,
1503 self.header_version_minor, self.authentication_data_block_size,
1504 self.auxiliary_data_block_size, self.algorithm_type, self.hash_offset,
1505 self.hash_size, self.signature_offset, self.signature_size,
1506 self.public_key_offset, self.public_key_size, self.descriptors_offset,
1507 self.descriptors_size, self.rollback_index))
1508
1509 def encode(self):
1510 """Serializes the header (256) to a bytearray().
1511
1512 Returns:
1513 A bytearray() with the encoded header.
1514 """
1515 return struct.pack(self.FORMAT_STRING, self.magic,
1516 self.header_version_major, self.header_version_minor,
1517 self.authentication_data_block_size,
1518 self.auxiliary_data_block_size, self.algorithm_type,
1519 self.hash_offset, self.hash_size, self.signature_offset,
1520 self.signature_size, self.public_key_offset,
1521 self.public_key_size, self.descriptors_offset,
1522 self.descriptors_size, self.rollback_index)
1523
1524
1525class Avb(object):
1526 """Business logic for avbtool command-line tool."""
1527
David Zeuthen8b6973b2016-09-20 12:39:49 -04001528 # Keep in sync with avb_ab_flow.h.
1529 AB_FORMAT_NO_CRC = '!4sBB2xBBBxBBBx12x'
1530 AB_MAGIC = '\0AB0'
1531 AB_MAJOR_VERSION = 1
1532 AB_MINOR_VERSION = 0
1533 AB_MISC_METADATA_OFFSET = 2048
1534
David Zeuthen09692692016-09-30 16:16:40 -04001535 # Constants for maximum metadata size. These are used to give
1536 # meaningful errors if the value passed in via --partition_size is
1537 # too small and when --calc_max_image_size is used. We use
1538 # conservative figures.
1539 MAX_VBMETA_SIZE = 64 * 1024
1540 MAX_FOOTER_SIZE = 4096
1541
David Zeuthena4fee8b2016-08-22 15:20:43 -04001542 def erase_footer(self, image_filename, keep_hashtree):
David Zeuthen21e95262016-07-27 17:58:40 -04001543 """Implements the 'erase_footer' command.
1544
1545 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001546 image_filename: File to erase a footer from.
David Zeuthen21e95262016-07-27 17:58:40 -04001547 keep_hashtree: If True, keep the hashtree around.
1548
1549 Raises:
1550 AvbError: If there's no footer in the image.
1551 """
1552
David Zeuthena4fee8b2016-08-22 15:20:43 -04001553 image = ImageHandler(image_filename)
1554
David Zeuthen21e95262016-07-27 17:58:40 -04001555 (footer, _, descriptors, _) = self._parse_image(image)
1556
1557 if not footer:
1558 raise AvbError('Given image does not have a footer.')
1559
1560 new_image_size = None
1561 if not keep_hashtree:
1562 new_image_size = footer.original_image_size
1563 else:
1564 # If requested to keep the hashtree, search for a hashtree
1565 # descriptor to figure out the location and size of the hashtree.
1566 for desc in descriptors:
1567 if isinstance(desc, AvbHashtreeDescriptor):
1568 # The hashtree is always just following the main data so the
1569 # new size is easily derived.
1570 new_image_size = desc.tree_offset + desc.tree_size
1571 break
1572 if not new_image_size:
1573 raise AvbError('Requested to keep hashtree but no hashtree '
1574 'descriptor was found.')
1575
1576 # And cut...
1577 image.truncate(new_image_size)
1578
David Zeuthen8b6973b2016-09-20 12:39:49 -04001579 def set_ab_metadata(self, misc_image, slot_data):
1580 """Implements the 'set_ab_metadata' command.
1581
1582 The |slot_data| argument must be of the form 'A_priority:A_tries_remaining:
1583 A_successful_boot:B_priority:B_tries_remaining:B_successful_boot'.
1584
1585 Arguments:
1586 misc_image: The misc image to write to.
1587 slot_data: Slot data as a string
1588
1589 Raises:
1590 AvbError: If slot data is malformed.
1591 """
1592 tokens = slot_data.split(':')
1593 if len(tokens) != 6:
1594 raise AvbError('Malformed slot data "{}".'.format(slot_data))
1595 a_priority = int(tokens[0])
1596 a_tries_remaining = int(tokens[1])
1597 a_success = True if int(tokens[2]) != 0 else False
1598 b_priority = int(tokens[3])
1599 b_tries_remaining = int(tokens[4])
1600 b_success = True if int(tokens[5]) != 0 else False
1601
1602 ab_data_no_crc = struct.pack(self.AB_FORMAT_NO_CRC,
1603 self.AB_MAGIC,
1604 self.AB_MAJOR_VERSION, self.AB_MINOR_VERSION,
1605 a_priority, a_tries_remaining, a_success,
1606 b_priority, b_tries_remaining, b_success)
1607 # Force CRC to be unsigned, see https://bugs.python.org/issue4903 for why.
1608 crc_value = binascii.crc32(ab_data_no_crc) & 0xffffffff
1609 ab_data = ab_data_no_crc + struct.pack('!I', crc_value)
1610 misc_image.seek(self.AB_MISC_METADATA_OFFSET)
1611 misc_image.write(ab_data)
1612
David Zeuthena4fee8b2016-08-22 15:20:43 -04001613 def info_image(self, image_filename, output):
David Zeuthen21e95262016-07-27 17:58:40 -04001614 """Implements the 'info_image' command.
1615
1616 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001617 image_filename: Image file to get information from (file object).
David Zeuthen21e95262016-07-27 17:58:40 -04001618 output: Output file to write human-readable information to (file object).
1619 """
1620
David Zeuthena4fee8b2016-08-22 15:20:43 -04001621 image = ImageHandler(image_filename)
1622
David Zeuthen21e95262016-07-27 17:58:40 -04001623 o = output
1624
1625 (footer, header, descriptors, image_size) = self._parse_image(image)
1626
1627 if footer:
1628 o.write('Footer version: {}.{}\n'.format(footer.version_major,
1629 footer.version_minor))
1630 o.write('Image size: {} bytes\n'.format(image_size))
1631 o.write('Original image size: {} bytes\n'.format(
1632 footer.original_image_size))
1633 o.write('VBMeta offset: {}\n'.format(footer.vbmeta_offset))
1634 o.write('VBMeta size: {} bytes\n'.format(footer.vbmeta_size))
1635 o.write('--\n')
1636
1637 (alg_name, _) = lookup_algorithm_by_type(header.algorithm_type)
1638
David Zeuthena4fee8b2016-08-22 15:20:43 -04001639 o.write('VBMeta image version: {}.{}{}\n'.format(
1640 header.header_version_major, header.header_version_minor,
1641 ' (Sparse)' if image.is_sparse else ''))
David Zeuthen21e95262016-07-27 17:58:40 -04001642 o.write('Header Block: {} bytes\n'.format(AvbVBMetaHeader.SIZE))
1643 o.write('Authentication Block: {} bytes\n'.format(
1644 header.authentication_data_block_size))
1645 o.write('Auxiliary Block: {} bytes\n'.format(
1646 header.auxiliary_data_block_size))
1647 o.write('Algorithm: {}\n'.format(alg_name))
1648 o.write('Rollback Index: {}\n'.format(header.rollback_index))
1649
1650 # Print descriptors.
1651 num_printed = 0
1652 o.write('Descriptors:\n')
1653 for desc in descriptors:
1654 desc.print_desc(o)
1655 num_printed += 1
1656 if num_printed == 0:
1657 o.write(' (none)\n')
1658
1659 def _parse_image(self, image):
1660 """Gets information about an image.
1661
1662 The image can either be a vbmeta or an image with a footer.
1663
1664 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001665 image: An ImageHandler (vbmeta or footer) with a hashtree descriptor.
David Zeuthen21e95262016-07-27 17:58:40 -04001666
1667 Returns:
1668 A tuple where the first argument is a AvbFooter (None if there
1669 is no footer on the image), the second argument is a
1670 AvbVBMetaHeader, the third argument is a list of
1671 AvbDescriptor-derived instances, and the fourth argument is the
1672 size of |image|.
1673 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04001674 assert isinstance(image, ImageHandler)
David Zeuthen21e95262016-07-27 17:58:40 -04001675 footer = None
David Zeuthen09692692016-09-30 16:16:40 -04001676 image.seek(image.image_size - AvbFooter.SIZE)
David Zeuthen21e95262016-07-27 17:58:40 -04001677 try:
1678 footer = AvbFooter(image.read(AvbFooter.SIZE))
1679 except (LookupError, struct.error):
1680 # Nope, just seek back to the start.
1681 image.seek(0)
1682
1683 vbmeta_offset = 0
1684 if footer:
1685 vbmeta_offset = footer.vbmeta_offset
1686
1687 image.seek(vbmeta_offset)
1688 h = AvbVBMetaHeader(image.read(AvbVBMetaHeader.SIZE))
1689
1690 auth_block_offset = vbmeta_offset + AvbVBMetaHeader.SIZE
1691 aux_block_offset = auth_block_offset + h.authentication_data_block_size
1692 desc_start_offset = aux_block_offset + h.descriptors_offset
1693 image.seek(desc_start_offset)
1694 descriptors = parse_descriptors(image.read(h.descriptors_size))
1695
David Zeuthen09692692016-09-30 16:16:40 -04001696 return footer, h, descriptors, image.image_size
David Zeuthen21e95262016-07-27 17:58:40 -04001697
1698 def _get_cmdline_descriptor_for_dm_verity(self, image):
1699 """Generate kernel cmdline descriptor for dm-verity.
1700
1701 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001702 image: An ImageHandler (vbmeta or footer) with a hashtree descriptor.
David Zeuthen21e95262016-07-27 17:58:40 -04001703
1704 Returns:
1705 A AvbKernelCmdlineDescriptor with dm-verity kernel cmdline
1706 instructions for the hashtree.
1707
1708 Raises:
1709 AvbError: If |image| doesn't have a hashtree descriptor.
1710
1711 """
1712
1713 (_, _, descriptors, _) = self._parse_image(image)
1714
1715 ht = None
1716 for desc in descriptors:
1717 if isinstance(desc, AvbHashtreeDescriptor):
1718 ht = desc
1719 break
1720
1721 if not ht:
1722 raise AvbError('No hashtree descriptor in given image')
1723
1724 c = 'dm="1 vroot none ro 1,'
1725 c += '0 ' # start
1726 c += '{} '.format((ht.image_size / 512)) # size (# sectors)
1727 c += 'verity {} '.format(ht.dm_verity_version) # type and version
1728 c += 'PARTUUID=$(ANDROID_SYSTEM_PARTUUID) ' # data_dev
1729 c += 'PARTUUID=$(ANDROID_SYSTEM_PARTUUID) ' # hash_dev
1730 c += '{} '.format(ht.data_block_size) # data_block
1731 c += '{} '.format(ht.hash_block_size) # hash_block
1732 c += '{} '.format(ht.image_size / ht.data_block_size) # #blocks
1733 c += '{} '.format(ht.image_size / ht.data_block_size) # hash_offset
1734 c += '{} '.format(ht.hash_algorithm) # hash_alg
1735 c += '{} '.format(str(ht.root_digest).encode('hex')) # root_digest
1736 c += '{}'.format(str(ht.salt).encode('hex')) # salt
1737 c += '"'
1738
1739 desc = AvbKernelCmdlineDescriptor()
1740 desc.kernel_cmdline = c
1741 return desc
1742
1743 def make_vbmeta_image(self, output, chain_partitions, algorithm_name,
1744 key_path, rollback_index, props, props_from_file,
1745 kernel_cmdlines,
1746 generate_dm_verity_cmdline_from_hashtree,
1747 include_descriptors_from_image):
1748 """Implements the 'make_vbmeta_image' command.
1749
1750 Arguments:
1751 output: File to write the image to.
1752 chain_partitions: List of partitions to chain.
1753 algorithm_name: Name of algorithm to use.
1754 key_path: Path to key to use or None.
1755 rollback_index: The rollback index to use.
1756 props: Properties to insert (list of strings of the form 'key:value').
1757 props_from_file: Properties to insert (list of strings 'key:<path>').
1758 kernel_cmdlines: Kernel cmdlines to insert (list of strings).
1759 generate_dm_verity_cmdline_from_hashtree: None or file to generate from.
1760 include_descriptors_from_image: List of file objects with descriptors.
1761
1762 Raises:
1763 AvbError: If a chained partition is malformed.
1764 """
1765
1766 descriptors = []
1767
1768 # Insert chained partition descriptors.
1769 if chain_partitions:
1770 for cp in chain_partitions:
1771 cp_tokens = cp.split(':')
1772 if len(cp_tokens) != 3:
1773 raise AvbError('Malformed chained partition "{}".'.format(cp))
1774 desc = AvbChainPartitionDescriptor()
1775 desc.partition_name = cp_tokens[0]
1776 desc.rollback_index_slot = int(cp_tokens[1])
1777 if desc.rollback_index_slot < 1:
1778 raise AvbError('Rollback index slot must be 1 or larger.')
1779 file_path = cp_tokens[2]
1780 desc.public_key = open(file_path, 'rb').read()
1781 descriptors.append(desc)
1782
1783 vbmeta_blob = self._generate_vbmeta_blob(
1784 algorithm_name, key_path, descriptors, rollback_index, props,
1785 props_from_file, kernel_cmdlines,
1786 generate_dm_verity_cmdline_from_hashtree,
1787 include_descriptors_from_image)
1788
1789 # Write entire vbmeta blob (header, authentication, auxiliary).
1790 output.seek(0)
1791 output.write(vbmeta_blob)
1792
1793 def _generate_vbmeta_blob(self, algorithm_name, key_path, descriptors,
1794 rollback_index, props, props_from_file,
1795 kernel_cmdlines,
1796 generate_dm_verity_cmdline_from_hashtree,
1797 include_descriptors_from_image):
1798 """Generates a VBMeta blob.
1799
1800 This blob contains the header (struct AvbVBMetaHeader), the
1801 authentication data block (which contains the hash and signature
1802 for the header and auxiliary block), and the auxiliary block
1803 (which contains descriptors, the public key used, and other data).
1804
1805 The |key| parameter can |None| only if the |algorithm_name| is
1806 'NONE'.
1807
1808 Arguments:
1809 algorithm_name: The algorithm name as per the ALGORITHMS dict.
1810 key_path: The path to the .pem file used to sign the blob.
1811 descriptors: A list of descriptors to insert or None.
1812 rollback_index: The rollback index to use.
1813 props: Properties to insert (List of strings of the form 'key:value').
1814 props_from_file: Properties to insert (List of strings 'key:<path>').
1815 kernel_cmdlines: Kernel cmdlines to insert (list of strings).
1816 generate_dm_verity_cmdline_from_hashtree: None or file to generate
1817 dm-verity kernel cmdline from.
1818 include_descriptors_from_image: List of file objects for which
1819 to insert descriptors from.
1820
1821 Returns:
1822 A bytearray() with the VBMeta blob.
1823
1824 Raises:
1825 Exception: If the |algorithm_name| is not found, if no key has
1826 been given and the given algorithm requires one, or the key is
1827 of the wrong size.
1828
1829 """
1830 try:
1831 alg = ALGORITHMS[algorithm_name]
1832 except KeyError:
1833 raise AvbError('Unknown algorithm with name {}'.format(algorithm_name))
1834
1835 # Descriptors.
1836 encoded_descriptors = bytearray()
1837 if descriptors:
1838 for desc in descriptors:
1839 encoded_descriptors.extend(desc.encode())
1840
1841 # Add properties.
1842 if props:
1843 for prop in props:
1844 idx = prop.find(':')
1845 if idx == -1:
1846 raise AvbError('Malformed property "{}".'.format(prop))
1847 desc = AvbPropertyDescriptor()
1848 desc.key = prop[0:idx]
1849 desc.value = prop[(idx + 1):]
1850 encoded_descriptors.extend(desc.encode())
1851 if props_from_file:
1852 for prop in props_from_file:
1853 idx = prop.find(':')
1854 if idx == -1:
1855 raise AvbError('Malformed property "{}".'.format(prop))
1856 desc = AvbPropertyDescriptor()
1857 desc.key = prop[0:idx]
1858 desc.value = prop[(idx + 1):]
1859 file_path = prop[(idx + 1):]
1860 desc.value = open(file_path, 'rb').read()
1861 encoded_descriptors.extend(desc.encode())
1862
1863 # Add AvbKernelCmdline descriptor for dm-verity, if requested.
1864 if generate_dm_verity_cmdline_from_hashtree:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001865 image_handler = ImageHandler(
1866 generate_dm_verity_cmdline_from_hashtree.name)
David Zeuthen21e95262016-07-27 17:58:40 -04001867 encoded_descriptors.extend(self._get_cmdline_descriptor_for_dm_verity(
David Zeuthena4fee8b2016-08-22 15:20:43 -04001868 image_handler).encode())
David Zeuthen21e95262016-07-27 17:58:40 -04001869
1870 # Add kernel command-lines.
1871 if kernel_cmdlines:
1872 for i in kernel_cmdlines:
1873 desc = AvbKernelCmdlineDescriptor()
1874 desc.kernel_cmdline = i
1875 encoded_descriptors.extend(desc.encode())
1876
1877 # Add descriptors from other images.
1878 if include_descriptors_from_image:
1879 for image in include_descriptors_from_image:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001880 image_handler = ImageHandler(image.name)
1881 (_, _, image_descriptors, _) = self._parse_image(image_handler)
David Zeuthen21e95262016-07-27 17:58:40 -04001882 for desc in image_descriptors:
1883 encoded_descriptors.extend(desc.encode())
1884
1885 key = None
1886 encoded_key = bytearray()
1887 if alg.public_key_num_bytes > 0:
1888 if not key_path:
1889 raise AvbError('Key is required for algorithm {}'.format(
1890 algorithm_name))
1891 key = Crypto.PublicKey.RSA.importKey(open(key_path).read())
1892 encoded_key = encode_rsa_key(key)
1893 if len(encoded_key) != alg.public_key_num_bytes:
1894 raise AvbError('Key is wrong size for algorithm {}'.format(
1895 algorithm_name))
1896
1897 h = AvbVBMetaHeader()
1898
1899 # For the Auxiliary data block, descriptors are stored at offset 0
1900 # and the public key is immediately after that.
1901 h.auxiliary_data_block_size = round_to_multiple(
1902 len(encoded_descriptors) + len(encoded_key), 64)
1903 h.descriptors_offset = 0
1904 h.descriptors_size = len(encoded_descriptors)
1905 h.public_key_offset = h.descriptors_size
1906 h.public_key_size = len(encoded_key)
1907
1908 # For the Authentication data block, the hash is first and then
1909 # the signature.
1910 h.authentication_data_block_size = round_to_multiple(
1911 alg.hash_num_bytes + alg.public_key_num_bytes, 64)
1912 h.algorithm_type = alg.algorithm_type
1913 h.hash_offset = 0
1914 h.hash_size = alg.hash_num_bytes
1915 # Signature offset and size - it's stored right after the hash
1916 # (in Authentication data block).
1917 h.signature_offset = alg.hash_num_bytes
1918 h.signature_size = alg.signature_num_bytes
1919
1920 h.rollback_index = rollback_index
1921
1922 # Generate Header data block.
1923 header_data_blob = h.encode()
1924
1925 # Generate Auxiliary data block.
1926 aux_data_blob = bytearray()
1927 aux_data_blob.extend(encoded_descriptors)
1928 aux_data_blob.extend(encoded_key)
1929 padding_bytes = h.auxiliary_data_block_size - len(aux_data_blob)
1930 aux_data_blob.extend('\0' * padding_bytes)
1931
1932 # Calculate the hash.
1933 binary_hash = bytearray()
1934 binary_signature = bytearray()
1935 if algorithm_name != 'NONE':
1936 if algorithm_name[0:6] == 'SHA256':
1937 ha = hashlib.sha256()
1938 elif algorithm_name[0:6] == 'SHA512':
1939 ha = hashlib.sha512()
1940 else:
1941 raise AvbError('Unsupported algorithm {}.'.format(algorithm_name))
1942 ha.update(header_data_blob)
1943 ha.update(aux_data_blob)
1944 binary_hash.extend(ha.digest())
1945
1946 # Calculate the signature.
1947 p = subprocess.Popen(
1948 ['openssl', 'rsautl', '-sign', '-inkey', key_path, '-raw'],
1949 stdin=subprocess.PIPE,
1950 stdout=subprocess.PIPE,
1951 stderr=subprocess.PIPE)
1952 padding_and_hash = str(bytearray(alg.padding)) + binary_hash
1953 (pout, perr) = p.communicate(padding_and_hash)
1954 retcode = p.wait()
1955 if retcode != 0:
1956 raise AvbError('Error signing: {}'.format(perr))
1957 binary_signature.extend(pout)
1958
1959 # Generate Authentication data block.
1960 auth_data_blob = bytearray()
1961 auth_data_blob.extend(binary_hash)
1962 auth_data_blob.extend(binary_signature)
1963 padding_bytes = h.authentication_data_block_size - len(auth_data_blob)
1964 auth_data_blob.extend('\0' * padding_bytes)
1965
1966 return header_data_blob + auth_data_blob + aux_data_blob
1967
1968 def extract_public_key(self, key_path, output):
1969 """Implements the 'extract_public_key' command.
1970
1971 Arguments:
1972 key_path: The path to a RSA private key file.
1973 output: The file to write to.
1974 """
1975 key = Crypto.PublicKey.RSA.importKey(open(key_path).read())
1976 write_rsa_key(output, key)
1977
David Zeuthena4fee8b2016-08-22 15:20:43 -04001978 def add_hash_footer(self, image_filename, partition_size, partition_name,
David Zeuthen21e95262016-07-27 17:58:40 -04001979 hash_algorithm, salt, algorithm_name, key_path,
1980 rollback_index, props, props_from_file, kernel_cmdlines,
1981 generate_dm_verity_cmdline_from_hashtree,
1982 include_descriptors_from_image):
David Zeuthena4fee8b2016-08-22 15:20:43 -04001983 """Implementation of the add_hash_footer on unsparse images.
David Zeuthen21e95262016-07-27 17:58:40 -04001984
1985 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001986 image_filename: File to add the footer to.
David Zeuthen21e95262016-07-27 17:58:40 -04001987 partition_size: Size of partition.
1988 partition_name: Name of partition (without A/B suffix).
1989 hash_algorithm: Hash algorithm to use.
1990 salt: Salt to use as a hexadecimal string or None to use /dev/urandom.
1991 algorithm_name: Name of algorithm to use.
1992 key_path: Path to key to use or None.
1993 rollback_index: Rollback index.
1994 props: Properties to insert (List of strings of the form 'key:value').
1995 props_from_file: Properties to insert (List of strings 'key:<path>').
1996 kernel_cmdlines: Kernel cmdlines to insert (list of strings).
1997 generate_dm_verity_cmdline_from_hashtree: None or file to generate
1998 dm-verity kernel cmdline from.
1999 include_descriptors_from_image: List of file objects for which
2000 to insert descriptors from.
David Zeuthena4fee8b2016-08-22 15:20:43 -04002001
2002 Raises:
2003 AvbError: If an argument is incorrect.
David Zeuthen21e95262016-07-27 17:58:40 -04002004 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04002005 image = ImageHandler(image_filename)
2006
2007 if partition_size % image.block_size != 0:
2008 raise AvbError('Partition size of {} is not a multiple of the image '
2009 'block size {}.'.format(partition_size,
2010 image.block_size))
2011
David Zeuthen21e95262016-07-27 17:58:40 -04002012 # If there's already a footer, truncate the image to its original
2013 # size. This way 'avbtool add_hash_footer' is idempotent (modulo
2014 # salts).
David Zeuthen09692692016-09-30 16:16:40 -04002015 image.seek(image.image_size - AvbFooter.SIZE)
David Zeuthen21e95262016-07-27 17:58:40 -04002016 try:
2017 footer = AvbFooter(image.read(AvbFooter.SIZE))
2018 # Existing footer found. Just truncate.
2019 original_image_size = footer.original_image_size
David Zeuthen09692692016-09-30 16:16:40 -04002020 image.truncate(footer.original_image_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002021 except (LookupError, struct.error):
David Zeuthen09692692016-09-30 16:16:40 -04002022 original_image_size = image.image_size
David Zeuthen21e95262016-07-27 17:58:40 -04002023
2024 # If anything goes wrong from here-on, restore the image back to
2025 # its original size.
2026 try:
David Zeuthen09692692016-09-30 16:16:40 -04002027 # First, calculate the maximum image size such that an image
2028 # this size + metadata (footer + vbmeta struct) fits in
2029 # |partition_size|.
2030 max_metadata_size = self.MAX_VBMETA_SIZE + self.MAX_FOOTER_SIZE
2031 max_image_size = partition_size - max_metadata_size
2032
2033 # If image size exceeds the maximum image size, fail.
2034 if image.image_size > max_image_size:
2035 raise AvbError('Image size of {} exceeds maximum image '
2036 'size of {} in order to fit in a partition '
2037 'size of {}.'.format(image.image_size, max_image_size,
2038 partition_size))
2039
David Zeuthen21e95262016-07-27 17:58:40 -04002040 digest_size = len(hashlib.new(name=hash_algorithm).digest())
2041 if salt:
2042 salt = salt.decode('hex')
2043 else:
2044 if salt is None:
2045 # If salt is not explicitly specified, choose a hash
2046 # that's the same size as the hash size.
2047 hash_size = digest_size
2048 salt = open('/dev/urandom').read(hash_size)
2049 else:
2050 salt = ''
2051
2052 hasher = hashlib.new(name=hash_algorithm, string=salt)
2053 # TODO(zeuthen): might want to read this in chunks to avoid
2054 # memory pressure, then again, this is only supposed to be used
2055 # on kernel/initramfs partitions. Possible optimization.
2056 image.seek(0)
David Zeuthen09692692016-09-30 16:16:40 -04002057 hasher.update(image.read(image.image_size))
David Zeuthen21e95262016-07-27 17:58:40 -04002058 digest = hasher.digest()
2059
2060 h_desc = AvbHashDescriptor()
David Zeuthen09692692016-09-30 16:16:40 -04002061 h_desc.image_size = image.image_size
David Zeuthen21e95262016-07-27 17:58:40 -04002062 h_desc.hash_algorithm = hash_algorithm
2063 h_desc.partition_name = partition_name
2064 h_desc.salt = salt
2065 h_desc.digest = digest
2066
2067 # Generate the VBMeta footer.
David Zeuthen21e95262016-07-27 17:58:40 -04002068 vbmeta_blob = self._generate_vbmeta_blob(
2069 algorithm_name, key_path, [h_desc], rollback_index, props,
2070 props_from_file, kernel_cmdlines,
2071 generate_dm_verity_cmdline_from_hashtree,
2072 include_descriptors_from_image)
2073
David Zeuthena4fee8b2016-08-22 15:20:43 -04002074 # If the image isn't sparse, its size might not be a multiple of
2075 # the block size. This will screw up padding later so just grow it.
David Zeuthen09692692016-09-30 16:16:40 -04002076 if image.image_size % image.block_size != 0:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002077 assert not image.is_sparse
David Zeuthen09692692016-09-30 16:16:40 -04002078 padding_needed = image.block_size - (image.image_size%image.block_size)
2079 image.truncate(image.image_size + padding_needed)
David Zeuthen21e95262016-07-27 17:58:40 -04002080
David Zeuthena4fee8b2016-08-22 15:20:43 -04002081 # The append_raw() method requires content with size being a
2082 # multiple of |block_size| so add padding as needed. Also record
2083 # where this is written to since we'll need to put that in the
2084 # footer.
David Zeuthen09692692016-09-30 16:16:40 -04002085 vbmeta_offset = image.image_size
David Zeuthena4fee8b2016-08-22 15:20:43 -04002086 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) -
2087 len(vbmeta_blob))
2088 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed
2089 image.append_raw(vbmeta_blob_with_padding)
2090 vbmeta_end_offset = vbmeta_offset + len(vbmeta_blob_with_padding)
2091
2092 # Now insert a DONT_CARE chunk with enough bytes such that the
2093 # final Footer block is at the end of partition_size..
2094 image.append_dont_care(partition_size - vbmeta_end_offset -
2095 1*image.block_size)
2096
2097 # Generate the Footer that tells where the VBMeta footer
2098 # is. Also put enough padding in the front of the footer since
2099 # we'll write out an entire block.
David Zeuthen21e95262016-07-27 17:58:40 -04002100 footer = AvbFooter()
2101 footer.original_image_size = original_image_size
2102 footer.vbmeta_offset = vbmeta_offset
2103 footer.vbmeta_size = len(vbmeta_blob)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002104 footer_blob = footer.encode()
2105 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) +
2106 footer_blob)
2107 image.append_raw(footer_blob_with_padding)
2108
David Zeuthen21e95262016-07-27 17:58:40 -04002109 except:
2110 # Truncate back to original size, then re-raise
2111 image.truncate(original_image_size)
2112 raise
2113
David Zeuthena4fee8b2016-08-22 15:20:43 -04002114 def add_hashtree_footer(self, image_filename, partition_size, partition_name,
David Zeuthen21e95262016-07-27 17:58:40 -04002115 hash_algorithm, block_size, salt, algorithm_name,
2116 key_path, rollback_index, props, props_from_file,
2117 kernel_cmdlines,
2118 generate_dm_verity_cmdline_from_hashtree,
David Zeuthen09692692016-09-30 16:16:40 -04002119 include_descriptors_from_image,
2120 calc_max_image_size):
David Zeuthen21e95262016-07-27 17:58:40 -04002121 """Implements the 'add_hashtree_footer' command.
2122
2123 See https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity for
2124 more information about dm-verity and these hashes.
2125
2126 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002127 image_filename: File to add the footer to.
David Zeuthen21e95262016-07-27 17:58:40 -04002128 partition_size: Size of partition.
2129 partition_name: Name of partition (without A/B suffix).
2130 hash_algorithm: Hash algorithm to use.
2131 block_size: Block size to use.
2132 salt: Salt to use as a hexadecimal string or None to use /dev/urandom.
2133 algorithm_name: Name of algorithm to use.
2134 key_path: Path to key to use or None.
2135 rollback_index: Rollback index.
2136 props: Properties to insert (List of strings of the form 'key:value').
2137 props_from_file: Properties to insert (List of strings 'key:<path>').
2138 kernel_cmdlines: Kernel cmdlines to insert (list of strings).
2139 generate_dm_verity_cmdline_from_hashtree: None or file to generate
2140 dm-verity kernel cmdline from.
2141 include_descriptors_from_image: List of file objects for which
2142 to insert descriptors from.
David Zeuthen09692692016-09-30 16:16:40 -04002143 calc_max_image_size: Don't store the hashtree or footer - instead
2144 calculate the maximum image size leaving enough room for hashtree
2145 and metadata with the given |partition_size|.
David Zeuthena4fee8b2016-08-22 15:20:43 -04002146
2147 Raises:
2148 AvbError: If an argument is incorrect.
David Zeuthen21e95262016-07-27 17:58:40 -04002149 """
David Zeuthen09692692016-09-30 16:16:40 -04002150 digest_size = len(hashlib.new(name=hash_algorithm).digest())
2151 digest_padding = round_to_pow2(digest_size) - digest_size
2152
2153 # First, calculate the maximum image size such that an image
2154 # this size + the hashtree + metadata (footer + vbmeta struct)
2155 # fits in |partition_size|. We use very conservative figures for
2156 # metadata.
2157 (_, max_tree_size) = calc_hash_level_offsets(
2158 partition_size, block_size, digest_size + digest_padding)
2159 max_metadata_size = (max_tree_size + self.MAX_VBMETA_SIZE +
2160 self.MAX_FOOTER_SIZE)
2161 max_image_size = partition_size - max_metadata_size
2162
2163 # If we're asked to only calculate the maximum image size, we're done.
2164 if calc_max_image_size:
2165 print '{}'.format(max_image_size)
2166 return
2167
David Zeuthena4fee8b2016-08-22 15:20:43 -04002168 image = ImageHandler(image_filename)
2169
2170 if partition_size % image.block_size != 0:
2171 raise AvbError('Partition size of {} is not a multiple of the image '
2172 'block size {}.'.format(partition_size,
2173 image.block_size))
2174
David Zeuthen21e95262016-07-27 17:58:40 -04002175 # If there's already a footer, truncate the image to its original
2176 # size. This way 'avbtool add_hashtree_footer' is idempotent
2177 # (modulo salts).
David Zeuthen09692692016-09-30 16:16:40 -04002178 image.seek(image.image_size - AvbFooter.SIZE)
David Zeuthen21e95262016-07-27 17:58:40 -04002179 try:
2180 footer = AvbFooter(image.read(AvbFooter.SIZE))
2181 # Existing footer found. Just truncate.
2182 original_image_size = footer.original_image_size
David Zeuthen09692692016-09-30 16:16:40 -04002183 image.truncate(footer.original_image_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002184 except (LookupError, struct.error):
David Zeuthen09692692016-09-30 16:16:40 -04002185 original_image_size = image.image_size
David Zeuthen21e95262016-07-27 17:58:40 -04002186
2187 # If anything goes wrong from here-on, restore the image back to
2188 # its original size.
2189 try:
2190 # Ensure image is multiple of block_size.
David Zeuthen09692692016-09-30 16:16:40 -04002191 rounded_image_size = round_to_multiple(image.image_size, block_size)
2192 if rounded_image_size > image.image_size:
2193 image.append_raw('\0' * (rounded_image_size - image.image_size))
David Zeuthen21e95262016-07-27 17:58:40 -04002194
David Zeuthen09692692016-09-30 16:16:40 -04002195 # If image size exceeds the maximum image size, fail.
2196 if image.image_size > max_image_size:
2197 raise AvbError('Image size of {} exceeds maximum image '
2198 'size of {} in order to fit in a partition '
2199 'size of {}.'.format(image.image_size, max_image_size,
2200 partition_size))
David Zeuthen21e95262016-07-27 17:58:40 -04002201
2202 if salt:
2203 salt = salt.decode('hex')
2204 else:
2205 if salt is None:
2206 # If salt is not explicitly specified, choose a hash
2207 # that's the same size as the hash size.
2208 hash_size = digest_size
2209 salt = open('/dev/urandom').read(hash_size)
2210 else:
2211 salt = ''
2212
David Zeuthena4fee8b2016-08-22 15:20:43 -04002213 # Hashes are stored upside down so we need to calculate hash
David Zeuthen21e95262016-07-27 17:58:40 -04002214 # offsets in advance.
2215 (hash_level_offsets, tree_size) = calc_hash_level_offsets(
David Zeuthen09692692016-09-30 16:16:40 -04002216 image.image_size, block_size, digest_size + digest_padding)
David Zeuthen21e95262016-07-27 17:58:40 -04002217
David Zeuthena4fee8b2016-08-22 15:20:43 -04002218 # If the image isn't sparse, its size might not be a multiple of
2219 # the block size. This will screw up padding later so just grow it.
David Zeuthen09692692016-09-30 16:16:40 -04002220 if image.image_size % image.block_size != 0:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002221 assert not image.is_sparse
David Zeuthen09692692016-09-30 16:16:40 -04002222 padding_needed = image.block_size - (image.image_size%image.block_size)
2223 image.truncate(image.image_size + padding_needed)
David Zeuthen21e95262016-07-27 17:58:40 -04002224
David Zeuthena4fee8b2016-08-22 15:20:43 -04002225 # Generate the tree and add padding as needed.
David Zeuthen09692692016-09-30 16:16:40 -04002226 tree_offset = image.image_size
2227 root_digest, hash_tree = generate_hash_tree(image, image.image_size,
David Zeuthena4fee8b2016-08-22 15:20:43 -04002228 block_size,
2229 hash_algorithm, salt,
2230 digest_padding,
2231 hash_level_offsets,
2232 tree_size)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002233
2234 # Generate HashtreeDescriptor with details about the tree we
2235 # just generated.
David Zeuthen21e95262016-07-27 17:58:40 -04002236 ht_desc = AvbHashtreeDescriptor()
2237 ht_desc.dm_verity_version = 1
David Zeuthen09692692016-09-30 16:16:40 -04002238 ht_desc.image_size = image.image_size
David Zeuthen21e95262016-07-27 17:58:40 -04002239 ht_desc.tree_offset = tree_offset
2240 ht_desc.tree_size = tree_size
2241 ht_desc.data_block_size = block_size
2242 ht_desc.hash_block_size = block_size
2243 ht_desc.hash_algorithm = hash_algorithm
2244 ht_desc.partition_name = partition_name
2245 ht_desc.salt = salt
2246 ht_desc.root_digest = root_digest
2247
David Zeuthen09692692016-09-30 16:16:40 -04002248 # Write the hash tree
2249 padding_needed = (round_to_multiple(len(hash_tree), image.block_size) -
2250 len(hash_tree))
2251 hash_tree_with_padding = hash_tree + '\0'*padding_needed
2252 image.append_raw(hash_tree_with_padding)
2253
David Zeuthena4fee8b2016-08-22 15:20:43 -04002254 # Generate the VBMeta footer and add padding as needed.
2255 vbmeta_offset = tree_offset + len(hash_tree_with_padding)
David Zeuthen21e95262016-07-27 17:58:40 -04002256 vbmeta_blob = self._generate_vbmeta_blob(
2257 algorithm_name, key_path, [ht_desc], rollback_index, props,
2258 props_from_file, kernel_cmdlines,
2259 generate_dm_verity_cmdline_from_hashtree,
2260 include_descriptors_from_image)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002261 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) -
2262 len(vbmeta_blob))
2263 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed
2264 image.append_raw(vbmeta_blob_with_padding)
David Zeuthen21e95262016-07-27 17:58:40 -04002265
David Zeuthena4fee8b2016-08-22 15:20:43 -04002266 # Now insert a DONT_CARE chunk with enough bytes such that the
2267 # final Footer block is at the end of partition_size..
David Zeuthen09692692016-09-30 16:16:40 -04002268 image.append_dont_care(partition_size - image.image_size -
David Zeuthena4fee8b2016-08-22 15:20:43 -04002269 1*image.block_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002270
David Zeuthena4fee8b2016-08-22 15:20:43 -04002271 # Generate the Footer that tells where the VBMeta footer
2272 # is. Also put enough padding in the front of the footer since
2273 # we'll write out an entire block.
David Zeuthen21e95262016-07-27 17:58:40 -04002274 footer = AvbFooter()
2275 footer.original_image_size = original_image_size
2276 footer.vbmeta_offset = vbmeta_offset
2277 footer.vbmeta_size = len(vbmeta_blob)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002278 footer_blob = footer.encode()
2279 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) +
2280 footer_blob)
2281 image.append_raw(footer_blob_with_padding)
2282
David Zeuthen21e95262016-07-27 17:58:40 -04002283 except:
David Zeuthen09692692016-09-30 16:16:40 -04002284 # Truncate back to original size, then re-raise.
David Zeuthen21e95262016-07-27 17:58:40 -04002285 image.truncate(original_image_size)
2286 raise
2287
2288
2289def calc_hash_level_offsets(image_size, block_size, digest_size):
2290 """Calculate the offsets of all the hash-levels in a Merkle-tree.
2291
2292 Arguments:
2293 image_size: The size of the image to calculate a Merkle-tree for.
2294 block_size: The block size, e.g. 4096.
2295 digest_size: The size of each hash, e.g. 32 for SHA-256.
2296
2297 Returns:
2298 A tuple where the first argument is an array of offsets and the
2299 second is size of the tree, in bytes.
2300 """
2301 level_offsets = []
2302 level_sizes = []
2303 tree_size = 0
2304
2305 num_levels = 0
2306 size = image_size
2307 while size > block_size:
2308 num_blocks = (size + block_size - 1) / block_size
2309 level_size = round_to_multiple(num_blocks * digest_size, block_size)
2310
2311 level_sizes.append(level_size)
2312 tree_size += level_size
2313 num_levels += 1
2314
2315 size = level_size
2316
2317 for n in range(0, num_levels):
2318 offset = 0
2319 for m in range(n + 1, num_levels):
2320 offset += level_sizes[m]
2321 level_offsets.append(offset)
2322
David Zeuthena4fee8b2016-08-22 15:20:43 -04002323 return level_offsets, tree_size
David Zeuthen21e95262016-07-27 17:58:40 -04002324
2325
2326def generate_hash_tree(image, image_size, block_size, hash_alg_name, salt,
David Zeuthena4fee8b2016-08-22 15:20:43 -04002327 digest_padding, hash_level_offsets, tree_size):
David Zeuthen21e95262016-07-27 17:58:40 -04002328 """Generates a Merkle-tree for a file.
2329
2330 Args:
2331 image: The image, as a file.
2332 image_size: The size of the image.
2333 block_size: The block size, e.g. 4096.
2334 hash_alg_name: The hash algorithm, e.g. 'sha256' or 'sha1'.
2335 salt: The salt to use.
2336 digest_padding: The padding for each digest.
David Zeuthen21e95262016-07-27 17:58:40 -04002337 hash_level_offsets: The offsets from calc_hash_level_offsets().
David Zeuthena4fee8b2016-08-22 15:20:43 -04002338 tree_size: The size of the tree, in number of bytes.
David Zeuthen21e95262016-07-27 17:58:40 -04002339
2340 Returns:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002341 A tuple where the first element is the top-level hash and the
2342 second element is the hash-tree.
David Zeuthen21e95262016-07-27 17:58:40 -04002343 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04002344 hash_ret = bytearray(tree_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002345 hash_src_offset = 0
2346 hash_src_size = image_size
2347 level_num = 0
2348 while hash_src_size > block_size:
2349 level_output = ''
David Zeuthen21e95262016-07-27 17:58:40 -04002350 remaining = hash_src_size
2351 while remaining > 0:
2352 hasher = hashlib.new(name=hash_alg_name, string=salt)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002353 # Only read from the file for the first level - for subsequent
2354 # levels, access the array we're building.
2355 if level_num == 0:
2356 image.seek(hash_src_offset + hash_src_size - remaining)
2357 data = image.read(min(remaining, block_size))
2358 else:
2359 offset = hash_level_offsets[level_num - 1] + hash_src_size - remaining
2360 data = hash_ret[offset:offset + block_size]
David Zeuthen21e95262016-07-27 17:58:40 -04002361 hasher.update(data)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002362
2363 remaining -= len(data)
David Zeuthen21e95262016-07-27 17:58:40 -04002364 if len(data) < block_size:
2365 hasher.update('\0' * (block_size - len(data)))
2366 level_output += hasher.digest()
2367 if digest_padding > 0:
2368 level_output += '\0' * digest_padding
2369
2370 padding_needed = (round_to_multiple(
2371 len(level_output), block_size) - len(level_output))
2372 level_output += '\0' * padding_needed
2373
David Zeuthena4fee8b2016-08-22 15:20:43 -04002374 # Copy level-output into resulting tree.
2375 offset = hash_level_offsets[level_num]
2376 hash_ret[offset:offset + len(level_output)] = level_output
David Zeuthen21e95262016-07-27 17:58:40 -04002377
David Zeuthena4fee8b2016-08-22 15:20:43 -04002378 # Continue on to the next level.
David Zeuthen21e95262016-07-27 17:58:40 -04002379 hash_src_size = len(level_output)
David Zeuthen21e95262016-07-27 17:58:40 -04002380 level_num += 1
2381
2382 hasher = hashlib.new(name=hash_alg_name, string=salt)
2383 hasher.update(level_output)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002384 return hasher.digest(), hash_ret
David Zeuthen21e95262016-07-27 17:58:40 -04002385
2386
2387class AvbTool(object):
2388 """Object for avbtool command-line tool."""
2389
2390 def __init__(self):
2391 """Initializer method."""
2392 self.avb = Avb()
2393
2394 def _add_common_args(self, sub_parser):
2395 """Adds arguments used by several sub-commands.
2396
2397 Arguments:
2398 sub_parser: The parser to add arguments to.
2399 """
2400 sub_parser.add_argument('--algorithm',
2401 help='Algorithm to use (default: NONE)',
2402 metavar='ALGORITHM',
2403 default='NONE')
2404 sub_parser.add_argument('--key',
2405 help='Path to RSA private key file',
2406 metavar='KEY',
2407 required=False)
2408 sub_parser.add_argument('--rollback_index',
2409 help='Rollback Index',
2410 type=parse_number,
2411 default=0)
2412 sub_parser.add_argument('--prop',
2413 help='Add property',
2414 metavar='KEY:VALUE',
2415 action='append')
2416 sub_parser.add_argument('--prop_from_file',
2417 help='Add property from file',
2418 metavar='KEY:PATH',
2419 action='append')
2420 sub_parser.add_argument('--kernel_cmdline',
2421 help='Add kernel cmdline',
2422 metavar='CMDLINE',
2423 action='append')
2424 sub_parser.add_argument('--generate_dm_verity_cmdline_from_hashtree',
2425 metavar='IMAGE',
2426 help='Generate kernel cmdline for dm-verity',
2427 type=argparse.FileType('rb'))
2428 sub_parser.add_argument('--include_descriptors_from_image',
2429 help='Include descriptors from image',
2430 metavar='IMAGE',
2431 action='append',
2432 type=argparse.FileType('rb'))
2433
2434 def run(self, argv):
2435 """Command-line processor.
2436
2437 Arguments:
2438 argv: Pass sys.argv from main.
2439 """
2440 parser = argparse.ArgumentParser()
2441 subparsers = parser.add_subparsers(title='subcommands')
2442
2443 sub_parser = subparsers.add_parser('version',
2444 help='Prints version of avbtool.')
2445 sub_parser.set_defaults(func=self.version)
2446
2447 sub_parser = subparsers.add_parser('extract_public_key',
2448 help='Extract public key.')
2449 sub_parser.add_argument('--key',
2450 help='Path to RSA private key file',
2451 required=True)
2452 sub_parser.add_argument('--output',
2453 help='Output file name',
2454 type=argparse.FileType('wb'),
2455 required=True)
2456 sub_parser.set_defaults(func=self.extract_public_key)
2457
2458 sub_parser = subparsers.add_parser('make_vbmeta_image',
2459 help='Makes a vbmeta image.')
2460 sub_parser.add_argument('--output',
2461 help='Output file name',
2462 type=argparse.FileType('wb'),
2463 required=True)
2464 self._add_common_args(sub_parser)
2465 sub_parser.add_argument('--chain_partition',
2466 help='Allow signed integrity-data for partition',
2467 metavar='PART_NAME:ROLLBACK_SLOT:KEY_PATH',
2468 action='append')
2469 sub_parser.set_defaults(func=self.make_vbmeta_image)
2470
2471 sub_parser = subparsers.add_parser('add_hash_footer',
2472 help='Add hashes and footer to image.')
2473 sub_parser.add_argument('--image',
David Zeuthen8b6973b2016-09-20 12:39:49 -04002474 help='Image to add hashes to',
David Zeuthen21e95262016-07-27 17:58:40 -04002475 type=argparse.FileType('rab+'))
2476 sub_parser.add_argument('--partition_size',
2477 help='Partition size',
2478 type=parse_number,
2479 required=True)
2480 sub_parser.add_argument('--partition_name',
2481 help='Partition name',
2482 required=True)
2483 sub_parser.add_argument('--hash_algorithm',
2484 help='Hash algorithm to use (default: sha256)',
2485 default='sha256')
2486 sub_parser.add_argument('--salt',
2487 help='Salt in hex (default: /dev/urandom)')
2488 self._add_common_args(sub_parser)
2489 sub_parser.set_defaults(func=self.add_hash_footer)
2490
2491 sub_parser = subparsers.add_parser('add_hashtree_footer',
2492 help='Add hashtree and footer to image.')
2493 sub_parser.add_argument('--image',
David Zeuthen8b6973b2016-09-20 12:39:49 -04002494 help='Image to add hashtree to',
David Zeuthen21e95262016-07-27 17:58:40 -04002495 type=argparse.FileType('rab+'))
2496 sub_parser.add_argument('--partition_size',
2497 help='Partition size',
2498 type=parse_number,
2499 required=True)
2500 sub_parser.add_argument('--partition_name',
2501 help='Partition name',
David Zeuthen09692692016-09-30 16:16:40 -04002502 default=None)
David Zeuthen21e95262016-07-27 17:58:40 -04002503 sub_parser.add_argument('--hash_algorithm',
2504 help='Hash algorithm to use (default: sha1)',
2505 default='sha1')
2506 sub_parser.add_argument('--salt',
2507 help='Salt in hex (default: /dev/urandom)')
2508 sub_parser.add_argument('--block_size',
2509 help='Block size (default: 4096)',
2510 type=parse_number,
2511 default=4096)
David Zeuthen09692692016-09-30 16:16:40 -04002512 sub_parser.add_argument('--calc_max_image_size',
2513 help=('Don\'t store the hashtree or footer - '
2514 'instead calculate the maximum image size '
2515 'leaving enough room for hashtree '
2516 'and metadata with the given partition '
2517 'size.'),
2518 action='store_true')
David Zeuthen21e95262016-07-27 17:58:40 -04002519 self._add_common_args(sub_parser)
2520 sub_parser.set_defaults(func=self.add_hashtree_footer)
2521
2522 sub_parser = subparsers.add_parser('erase_footer',
2523 help='Erase footer from an image.')
2524 sub_parser.add_argument('--image',
David Zeuthen8b6973b2016-09-20 12:39:49 -04002525 help='Image with a footer',
David Zeuthen21e95262016-07-27 17:58:40 -04002526 type=argparse.FileType('rwb+'),
2527 required=True)
2528 sub_parser.add_argument('--keep_hashtree',
2529 help='Keep the hashtree in the image',
2530 action='store_true')
2531 sub_parser.set_defaults(func=self.erase_footer)
2532
2533 sub_parser = subparsers.add_parser(
2534 'info_image',
2535 help='Show information about vbmeta or footer.')
2536 sub_parser.add_argument('--image',
David Zeuthen8b6973b2016-09-20 12:39:49 -04002537 help='Image to show information about',
David Zeuthen21e95262016-07-27 17:58:40 -04002538 type=argparse.FileType('rb'),
2539 required=True)
2540 sub_parser.add_argument('--output',
2541 help='Write info to file',
2542 type=argparse.FileType('wt'),
2543 default=sys.stdout)
2544 sub_parser.set_defaults(func=self.info_image)
2545
David Zeuthen8b6973b2016-09-20 12:39:49 -04002546 sub_parser = subparsers.add_parser('set_ab_metadata',
2547 help='Set A/B metadata.')
2548 sub_parser.add_argument('--misc_image',
2549 help=('The misc image to modify. If the image does '
2550 'not exist, it will be created.'),
2551 type=argparse.FileType('r+b'),
2552 required=True)
2553 sub_parser.add_argument('--slot_data',
2554 help=('Slot data of the form "priority", '
2555 '"tries_remaining", "sucessful_boot" for '
2556 'slot A followed by the same for slot B, '
2557 'separated by colons. The default value '
2558 'is 15:7:0:14:7:0.'),
2559 default='15:7:0:14:7:0')
2560 sub_parser.set_defaults(func=self.set_ab_metadata)
2561
David Zeuthen21e95262016-07-27 17:58:40 -04002562 args = parser.parse_args(argv[1:])
2563 try:
2564 args.func(args)
2565 except AvbError as e:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002566 sys.stderr.write('{}: {}\n'.format(argv[0], e.message))
David Zeuthen21e95262016-07-27 17:58:40 -04002567 sys.exit(1)
2568
2569 def version(self, _):
2570 """Implements the 'version' sub-command."""
2571 print '{}.{}'.format(AVB_VERSION_MAJOR, AVB_VERSION_MINOR)
2572
2573 def extract_public_key(self, args):
2574 """Implements the 'extract_public_key' sub-command."""
2575 self.avb.extract_public_key(args.key, args.output)
2576
2577 def make_vbmeta_image(self, args):
2578 """Implements the 'make_vbmeta_image' sub-command."""
2579 self.avb.make_vbmeta_image(args.output, args.chain_partition,
2580 args.algorithm, args.key, args.rollback_index,
2581 args.prop, args.prop_from_file,
2582 args.kernel_cmdline,
2583 args.generate_dm_verity_cmdline_from_hashtree,
2584 args.include_descriptors_from_image)
2585
2586 def add_hash_footer(self, args):
2587 """Implements the 'add_hash_footer' sub-command."""
David Zeuthena4fee8b2016-08-22 15:20:43 -04002588 self.avb.add_hash_footer(args.image.name, args.partition_size,
David Zeuthen21e95262016-07-27 17:58:40 -04002589 args.partition_name, args.hash_algorithm,
2590 args.salt, args.algorithm, args.key,
2591 args.rollback_index, args.prop,
2592 args.prop_from_file, args.kernel_cmdline,
2593 args.generate_dm_verity_cmdline_from_hashtree,
2594 args.include_descriptors_from_image)
2595
2596 def add_hashtree_footer(self, args):
2597 """Implements the 'add_hashtree_footer' sub-command."""
David Zeuthen09692692016-09-30 16:16:40 -04002598 self.avb.add_hashtree_footer(args.image.name if args.image else None,
2599 args.partition_size,
2600 args.partition_name,
2601 args.hash_algorithm, args.block_size,
2602 args.salt, args.algorithm, args.key,
2603 args.rollback_index, args.prop,
2604 args.prop_from_file,
2605 args.kernel_cmdline,
David Zeuthen21e95262016-07-27 17:58:40 -04002606 args.generate_dm_verity_cmdline_from_hashtree,
David Zeuthen09692692016-09-30 16:16:40 -04002607 args.include_descriptors_from_image,
2608 args.calc_max_image_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002609
2610 def erase_footer(self, args):
2611 """Implements the 'erase_footer' sub-command."""
David Zeuthena4fee8b2016-08-22 15:20:43 -04002612 self.avb.erase_footer(args.image.name, args.keep_hashtree)
David Zeuthen21e95262016-07-27 17:58:40 -04002613
David Zeuthen8b6973b2016-09-20 12:39:49 -04002614 def set_ab_metadata(self, args):
2615 """Implements the 'set_ab_metadata' sub-command."""
2616 self.avb.set_ab_metadata(args.misc_image, args.slot_data)
2617
David Zeuthen21e95262016-07-27 17:58:40 -04002618 def info_image(self, args):
2619 """Implements the 'info_image' sub-command."""
David Zeuthena4fee8b2016-08-22 15:20:43 -04002620 self.avb.info_image(args.image.name, args.output)
David Zeuthen21e95262016-07-27 17:58:40 -04002621
2622
2623if __name__ == '__main__':
2624 tool = AvbTool()
2625 tool.run(sys.argv)