blob: e0f148e3db23b695ca1d2a1814c1d348bc478ef6 [file] [log] [blame]
David Zeuthen21e95262016-07-27 17:58:40 -04001#!/usr/bin/env python
2
3# Copyright 2016, The Android Open Source Project
4#
David Zeuthenc612e2e2016-09-16 16:44:08 -04005# Permission is hereby granted, free of charge, to any person
6# obtaining a copy of this software and associated documentation
7# files (the "Software"), to deal in the Software without
8# restriction, including without limitation the rights to use, copy,
9# modify, merge, publish, distribute, sublicense, and/or sell copies
10# of the Software, and to permit persons to whom the Software is
11# furnished to do so, subject to the following conditions:
David Zeuthen21e95262016-07-27 17:58:40 -040012#
David Zeuthenc612e2e2016-09-16 16:44:08 -040013# The above copyright notice and this permission notice shall be
14# included in all copies or substantial portions of the Software.
David Zeuthen21e95262016-07-27 17:58:40 -040015#
David Zeuthenc612e2e2016-09-16 16:44:08 -040016# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
20# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23# SOFTWARE.
David Zeuthen21e95262016-07-27 17:58:40 -040024#
David Zeuthen8b6973b2016-09-20 12:39:49 -040025"""Command-line tool for working with Android Verified Boot images."""
David Zeuthen21e95262016-07-27 17:58:40 -040026
27import argparse
David Zeuthen8b6973b2016-09-20 12:39:49 -040028import binascii
David Zeuthena4fee8b2016-08-22 15:20:43 -040029import bisect
David Zeuthen21e95262016-07-27 17:58:40 -040030import hashlib
31import os
32import struct
33import subprocess
34import sys
35
36import Crypto.PublicKey.RSA
37
38# Keep in sync with avb_vbmeta_header.h.
39AVB_VERSION_MAJOR = 1
40AVB_VERSION_MINOR = 0
41
42
43class AvbError(Exception):
44 """Application-specific errors.
45
46 These errors represent issues for which a stack-trace should not be
47 presented.
48
49 Attributes:
50 message: Error message.
51 """
52
53 def __init__(self, message):
54 Exception.__init__(self, message)
55
56
57class Algorithm(object):
58 """Contains details about an algorithm.
59
60 See the avb_vbmeta_header.h file for more details about
61 algorithms.
62
63 The constant |ALGORITHMS| is a dictionary from human-readable
64 names (e.g 'SHA256_RSA2048') to instances of this class.
65
66 Attributes:
67 algorithm_type: Integer code corresponding to |AvbAlgorithmType|.
68 hash_num_bytes: Number of bytes used to store the hash.
69 signature_num_bytes: Number of bytes used to store the signature.
70 public_key_num_bytes: Number of bytes used to store the public key.
71 padding: Padding used for signature, if any.
72 """
73
74 def __init__(self, algorithm_type, hash_num_bytes, signature_num_bytes,
75 public_key_num_bytes, padding):
76 self.algorithm_type = algorithm_type
77 self.hash_num_bytes = hash_num_bytes
78 self.signature_num_bytes = signature_num_bytes
79 self.public_key_num_bytes = public_key_num_bytes
80 self.padding = padding
81
82# This must be kept in sync with the avb_crypto.h file.
83#
84# The PKC1-v1.5 padding is a blob of binary DER of ASN.1 and is
85# obtained from section 5.2.2 of RFC 4880.
86ALGORITHMS = {
87 'NONE': Algorithm(
88 algorithm_type=0, # AVB_ALGORITHM_TYPE_NONE
89 hash_num_bytes=0,
90 signature_num_bytes=0,
91 public_key_num_bytes=0,
92 padding=[]),
93 'SHA256_RSA2048': Algorithm(
94 algorithm_type=1, # AVB_ALGORITHM_TYPE_SHA256_RSA2048
95 hash_num_bytes=32,
96 signature_num_bytes=256,
97 public_key_num_bytes=8 + 2*2048/8,
98 padding=[
99 # PKCS1-v1_5 padding
100 0x00, 0x01] + [0xff]*202 + [0x00] + [
101 # ASN.1 header
102 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
103 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
104 0x00, 0x04, 0x20,
105 ]),
106 'SHA256_RSA4096': Algorithm(
107 algorithm_type=2, # AVB_ALGORITHM_TYPE_SHA256_RSA4096
108 hash_num_bytes=32,
109 signature_num_bytes=512,
110 public_key_num_bytes=8 + 2*4096/8,
111 padding=[
112 # PKCS1-v1_5 padding
113 0x00, 0x01] + [0xff]*458 + [0x00] + [
114 # ASN.1 header
115 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
116 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
117 0x00, 0x04, 0x20,
118 ]),
119 'SHA256_RSA8192': Algorithm(
120 algorithm_type=3, # AVB_ALGORITHM_TYPE_SHA256_RSA8192
121 hash_num_bytes=32,
122 signature_num_bytes=1024,
123 public_key_num_bytes=8 + 2*8192/8,
124 padding=[
125 # PKCS1-v1_5 padding
126 0x00, 0x01] + [0xff]*970 + [0x00] + [
127 # ASN.1 header
128 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
129 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
130 0x00, 0x04, 0x20,
131 ]),
132 'SHA512_RSA2048': Algorithm(
133 algorithm_type=4, # AVB_ALGORITHM_TYPE_SHA512_RSA2048
134 hash_num_bytes=64,
135 signature_num_bytes=256,
136 public_key_num_bytes=8 + 2*2048/8,
137 padding=[
138 # PKCS1-v1_5 padding
139 0x00, 0x01] + [0xff]*170 + [0x00] + [
140 # ASN.1 header
141 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
142 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
143 0x00, 0x04, 0x40
144 ]),
145 'SHA512_RSA4096': Algorithm(
146 algorithm_type=5, # AVB_ALGORITHM_TYPE_SHA512_RSA4096
147 hash_num_bytes=64,
148 signature_num_bytes=512,
149 public_key_num_bytes=8 + 2*4096/8,
150 padding=[
151 # PKCS1-v1_5 padding
152 0x00, 0x01] + [0xff]*426 + [0x00] + [
153 # ASN.1 header
154 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
155 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
156 0x00, 0x04, 0x40
157 ]),
158 'SHA512_RSA8192': Algorithm(
159 algorithm_type=6, # AVB_ALGORITHM_TYPE_SHA512_RSA8192
160 hash_num_bytes=64,
161 signature_num_bytes=1024,
162 public_key_num_bytes=8 + 2*8192/8,
163 padding=[
164 # PKCS1-v1_5 padding
165 0x00, 0x01] + [0xff]*938 + [0x00] + [
166 # ASN.1 header
167 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
168 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
169 0x00, 0x04, 0x40
170 ]),
171}
172
173
174def round_to_multiple(number, size):
175 """Rounds a number up to nearest multiple of another number.
176
177 Args:
178 number: The number to round up.
179 size: The multiple to round up to.
180
181 Returns:
182 If |number| is a multiple of |size|, returns |number|, otherwise
183 returns |number| + |size|.
184 """
185 remainder = number % size
186 if remainder == 0:
187 return number
188 return number + size - remainder
189
190
191def round_to_pow2(number):
192 """Rounds a number up to the next power of 2.
193
194 Args:
195 number: The number to round up.
196
197 Returns:
198 If |number| is already a power of 2 then |number| is
199 returned. Otherwise the smallest power of 2 greater than |number|
200 is returned.
201 """
202 return 2**((number - 1).bit_length())
203
204
205def write_long(output, num_bits, value):
206 """Writes a long to an output stream using a given amount of bits.
207
208 This number is written big-endian, e.g. with the most significant
209 bit first.
210
211 Arguments:
212 output: The object to write the output to.
213 num_bits: The number of bits to write, e.g. 2048.
214 value: The value to write.
215 """
216 for bit_pos in range(num_bits, 0, -8):
217 octet = (value >> (bit_pos - 8)) & 0xff
218 output.write(struct.pack('!B', octet))
219
220
221def encode_long(num_bits, value):
222 """Encodes a long to a bytearray() using a given amount of bits.
223
224 This number is written big-endian, e.g. with the most significant
225 bit first.
226
227 Arguments:
228 num_bits: The number of bits to write, e.g. 2048.
229 value: The value to write.
230
231 Returns:
232 A bytearray() with the encoded long.
233 """
234 ret = bytearray()
235 for bit_pos in range(num_bits, 0, -8):
236 octet = (value >> (bit_pos - 8)) & 0xff
237 ret.extend(struct.pack('!B', octet))
238 return ret
239
240
241def egcd(a, b):
242 """Calculate greatest common divisor of two numbers.
243
244 This implementation uses a recursive version of the extended
245 Euclidian algorithm.
246
247 Arguments:
248 a: First number.
249 b: Second number.
250
251 Returns:
252 A tuple (gcd, x, y) that where |gcd| is the greatest common
253 divisor of |a| and |b| and |a|*|x| + |b|*|y| = |gcd|.
254 """
255 if a == 0:
256 return (b, 0, 1)
257 else:
258 g, y, x = egcd(b % a, a)
259 return (g, x - (b // a) * y, y)
260
261
262def modinv(a, m):
263 """Calculate modular multiplicative inverse of |a| modulo |m|.
264
265 This calculates the number |x| such that |a| * |x| == 1 (modulo
266 |m|). This number only exists if |a| and |m| are co-prime - |None|
267 is returned if this isn't true.
268
269 Arguments:
270 a: The number to calculate a modular inverse of.
271 m: The modulo to use.
272
273 Returns:
274 The modular multiplicative inverse of |a| and |m| or |None| if
275 these numbers are not co-prime.
276 """
277 gcd, x, _ = egcd(a, m)
278 if gcd != 1:
279 return None # modular inverse does not exist
280 else:
281 return x % m
282
283
284def parse_number(string):
285 """Parse a string as a number.
286
287 This is just a short-hand for int(string, 0) suitable for use in the
288 |type| parameter of |ArgumentParser|'s add_argument() function. An
289 improvement to just using type=int is that this function supports
290 numbers in other bases, e.g. "0x1234".
291
292 Arguments:
293 string: The string to parse.
294
295 Returns:
296 The parsed integer.
297
298 Raises:
299 ValueError: If the number could not be parsed.
300 """
301 return int(string, 0)
302
303
304def write_rsa_key(output, key):
305 """Writes a public RSA key in |AvbRSAPublicKeyHeader| format.
306
307 This writes the |AvbRSAPublicKeyHeader| as well as the two large
308 numbers (|key_num_bits| bits long) following it.
309
310 Arguments:
311 output: The object to write the output to.
312 key: A Crypto.PublicKey.RSA object.
313 """
314 # key.e is exponent
315 # key.n is modulus
316 key_num_bits = key.size() + 1
317 # Calculate n0inv = -1/n[0] (mod 2^32)
318 b = 2L**32
319 n0inv = b - modinv(key.n, b)
320 # Calculate rr = r^2 (mod N), where r = 2^(# of key bits)
321 r = 2L**key.n.bit_length()
322 rrmodn = r * r % key.n
323 output.write(struct.pack('!II', key_num_bits, n0inv))
324 write_long(output, key_num_bits, key.n)
325 write_long(output, key_num_bits, rrmodn)
326
327
328def encode_rsa_key(key):
329 """Encodes a public RSA key in |AvbRSAPublicKeyHeader| format.
330
331 This creates a |AvbRSAPublicKeyHeader| as well as the two large
332 numbers (|key_num_bits| bits long) following it.
333
334 Arguments:
335 key: A Crypto.PublicKey.RSA object.
336
337 Returns:
338 A bytearray() with the |AvbRSAPublicKeyHeader|.
339 """
340 ret = bytearray()
341 # key.e is exponent
342 # key.n is modulus
343 key_num_bits = key.size() + 1
344 # Calculate n0inv = -1/n[0] (mod 2^32)
345 b = 2L**32
346 n0inv = b - modinv(key.n, b)
347 # Calculate rr = r^2 (mod N), where r = 2^(# of key bits)
348 r = 2L**key.n.bit_length()
349 rrmodn = r * r % key.n
350 ret.extend(struct.pack('!II', key_num_bits, n0inv))
351 ret.extend(encode_long(key_num_bits, key.n))
352 ret.extend(encode_long(key_num_bits, rrmodn))
353 return ret
354
355
356def lookup_algorithm_by_type(alg_type):
357 """Looks up algorithm by type.
358
359 Arguments:
360 alg_type: The integer representing the type.
361
362 Returns:
363 A tuple with the algorithm name and an |Algorithm| instance.
364
365 Raises:
366 Exception: If the algorithm cannot be found
367 """
368 for alg_name in ALGORITHMS:
369 alg_data = ALGORITHMS[alg_name]
370 if alg_data.algorithm_type == alg_type:
371 return (alg_name, alg_data)
372 raise AvbError('Unknown algorithm type {}'.format(alg_type))
373
374
David Zeuthena4fee8b2016-08-22 15:20:43 -0400375class ImageChunk(object):
376 """Data structure used for representing chunks in Android sparse files.
377
378 Attributes:
379 chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
380 chunk_offset: Offset in the sparse file where this chunk begins.
381 output_offset: Offset in de-sparsified file where output begins.
382 output_size: Number of bytes in output.
383 input_offset: Offset in sparse file for data if TYPE_RAW otherwise None.
384 fill_data: Blob with data to fill if TYPE_FILL otherwise None.
385 """
386
387 FORMAT = '<2H2I'
388 TYPE_RAW = 0xcac1
389 TYPE_FILL = 0xcac2
390 TYPE_DONT_CARE = 0xcac3
391 TYPE_CRC32 = 0xcac4
392
393 def __init__(self, chunk_type, chunk_offset, output_offset, output_size,
394 input_offset, fill_data):
395 """Initializes an ImageChunk object.
396
397 Arguments:
398 chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
399 chunk_offset: Offset in the sparse file where this chunk begins.
400 output_offset: Offset in de-sparsified file.
401 output_size: Number of bytes in output.
402 input_offset: Offset in sparse file if TYPE_RAW otherwise None.
403 fill_data: Blob with data to fill if TYPE_FILL otherwise None.
404
405 Raises:
406 ValueError: If data is not well-formed.
407 """
408 self.chunk_type = chunk_type
409 self.chunk_offset = chunk_offset
410 self.output_offset = output_offset
411 self.output_size = output_size
412 self.input_offset = input_offset
413 self.fill_data = fill_data
414 # Check invariants.
415 if self.chunk_type == self.TYPE_RAW:
416 if self.fill_data is not None:
417 raise ValueError('RAW chunk cannot have fill_data set.')
418 if not self.input_offset:
419 raise ValueError('RAW chunk must have input_offset set.')
420 elif self.chunk_type == self.TYPE_FILL:
421 if self.fill_data is None:
422 raise ValueError('FILL chunk must have fill_data set.')
423 if self.input_offset:
424 raise ValueError('FILL chunk cannot have input_offset set.')
425 elif self.chunk_type == self.TYPE_DONT_CARE:
426 if self.fill_data is not None:
427 raise ValueError('DONT_CARE chunk cannot have fill_data set.')
428 if self.input_offset:
429 raise ValueError('DONT_CARE chunk cannot have input_offset set.')
430 else:
431 raise ValueError('Invalid chunk type')
432
433
434class ImageHandler(object):
435 """Abstraction for image I/O with support for Android sparse images.
436
437 This class provides an interface for working with image files that
438 may be using the Android Sparse Image format. When an instance is
439 constructed, we test whether it's an Android sparse file. If so,
440 operations will be on the sparse file by interpreting the sparse
441 format, otherwise they will be directly on the file. Either way the
442 operations do the same.
443
444 For reading, this interface mimics a file object - it has seek(),
445 tell(), and read() methods. For writing, only truncation
446 (truncate()) and appending is supported (append_raw() and
447 append_dont_care()). Additionally, data can only be written in units
448 of the block size.
449
450 Attributes:
451 is_sparse: Whether the file being operated on is sparse.
452 block_size: The block size, typically 4096.
453 image_size: The size of the unsparsified file.
454 care_size: Position in the unsparsified file where only
455 DONT_CARE data follows.
456 """
457 # See system/core/libsparse/sparse_format.h for details.
458 MAGIC = 0xed26ff3a
459 HEADER_FORMAT = '<I4H4I'
460
461 # These are formats and offset of just the |total_chunks| and
462 # |total_blocks| fields.
463 NUM_CHUNKS_AND_BLOCKS_FORMAT = '<II'
464 NUM_CHUNKS_AND_BLOCKS_OFFSET = 16
465
466 def __init__(self, image_filename):
467 """Initializes an image handler.
468
469 Arguments:
470 image_filename: The name of the file to operate on.
471
472 Raises:
473 ValueError: If data in the file is invalid.
474 """
475 self._image_filename = image_filename
476 self._read_header()
477
478 def _read_header(self):
479 """Initializes internal data structures used for reading file.
480
481 This may be called multiple times and is typically called after
482 modifying the file (e.g. appending, truncation).
483
484 Raises:
485 ValueError: If data in the file is invalid.
486 """
487 self.is_sparse = False
488 self.block_size = 4096
489 self._file_pos = 0
490 self._image = open(self._image_filename, 'r+b')
491 self._image.seek(0, os.SEEK_END)
492 self.care_size = self._image.tell()
493 self.image_size = self._image.tell()
494
495 self._image.seek(0, os.SEEK_SET)
496 header_bin = self._image.read(struct.calcsize(self.HEADER_FORMAT))
497 (magic, major_version, minor_version, file_hdr_sz, chunk_hdr_sz,
498 block_size, self._num_total_blocks, self._num_total_chunks,
499 _) = struct.unpack(self.HEADER_FORMAT, header_bin)
500 if magic != self.MAGIC:
501 # Not a sparse image, our job here is done.
502 return
503 if not (major_version == 1 and minor_version == 0):
504 raise ValueError('Encountered sparse image format version {}.{} but '
505 'only 1.0 is supported'.format(major_version,
506 minor_version))
507 if file_hdr_sz != struct.calcsize(self.HEADER_FORMAT):
508 raise ValueError('Unexpected file_hdr_sz value {}.'.
509 format(file_hdr_sz))
510 if chunk_hdr_sz != struct.calcsize(ImageChunk.FORMAT):
511 raise ValueError('Unexpected chunk_hdr_sz value {}.'.
512 format(chunk_hdr_sz))
513
514 self.block_size = block_size
515
516 # Build an list of chunks by parsing the file.
517 self._chunks = []
518
519 # Find the smallest offset where only "Don't care" chunks
520 # follow. This will be the size of the content in the sparse
521 # image.
522 offset = 0
523 output_offset = 0
524 last_dont_care_section_output_offset = None
525 last_section_was_dont_care = False
526 for _ in xrange(1, self._num_total_chunks + 1):
527 chunk_offset = self._image.tell()
528
529 header_bin = self._image.read(struct.calcsize(ImageChunk.FORMAT))
530 (chunk_type, _, chunk_sz, total_sz) = struct.unpack(ImageChunk.FORMAT,
531 header_bin)
532 data_sz = total_sz - struct.calcsize(ImageChunk.FORMAT)
533
534 last_section_was_dont_care = False
535
536 if chunk_type == ImageChunk.TYPE_RAW:
537 if data_sz != (chunk_sz * self.block_size):
538 raise ValueError('Raw chunk input size ({}) does not match output '
539 'size ({})'.
540 format(data_sz, chunk_sz*self.block_size))
541 self._chunks.append(ImageChunk(ImageChunk.TYPE_RAW,
542 chunk_offset,
543 output_offset,
544 chunk_sz*self.block_size,
545 self._image.tell(),
546 None))
547 self._image.read(data_sz)
548
549 elif chunk_type == ImageChunk.TYPE_FILL:
550 if data_sz != 4:
551 raise ValueError('Fill chunk should have 4 bytes of fill, but this '
552 'has {}'.format(data_sz))
553 fill_data = self._image.read(4)
554 self._chunks.append(ImageChunk(ImageChunk.TYPE_FILL,
555 chunk_offset,
556 output_offset,
557 chunk_sz*self.block_size,
558 None,
559 fill_data))
560 elif chunk_type == ImageChunk.TYPE_DONT_CARE:
561 if data_sz != 0:
562 raise ValueError('Don\'t care chunk input size is non-zero ({})'.
563 format(data_sz))
564 else:
565 if not last_section_was_dont_care:
566 last_dont_care_section_output_offset = output_offset
567 last_section_was_dont_care = True
568 self._chunks.append(ImageChunk(ImageChunk.TYPE_DONT_CARE,
569 chunk_offset,
570 output_offset,
571 chunk_sz*self.block_size,
572 None,
573 None))
574 elif chunk_type == ImageChunk.TYPE_CRC32:
575 if data_sz != 4:
576 raise ValueError('CRC32 chunk should have 4 bytes of CRC, but '
577 'this has {}'.format(data_sz))
578 self._image.read(4)
579 else:
580 raise ValueError('Unknown chunk type {}'.format(chunk_type))
581
582 offset += chunk_sz
583 output_offset += chunk_sz*self.block_size
584
585 # Record where sparse data end.
586 self._sparse_end = self._image.tell()
587
588 # Now that we've traversed all chunks, sanity check.
589 if self._num_total_blocks != offset:
590 raise ValueError('The header said we should have {} output blocks, '
591 'but we saw {}'.format(self._num_total_blocks, offset))
592 junk_len = len(self._image.read())
593 if junk_len > 0:
594 raise ValueError('There were {} bytes of extra data at the end of the '
595 'file.'.format(junk_len))
596
597 # Assign |image_size| and |care_size| attributes.
598 self.image_size = output_offset
599 if last_section_was_dont_care:
600 self.care_size = last_dont_care_section_output_offset
601 else:
602 self.care_size = output_offset
603
604 # This is used when bisecting in read() to find the initial slice.
605 self._chunk_output_offsets = [i.output_offset for i in self._chunks]
606
607 self.is_sparse = True
608
609 def _update_chunks_and_blocks(self):
610 """Helper function to update the image header.
611
612 The the |total_chunks| and |total_blocks| fields in the header
613 will be set to value of the |_num_total_blocks| and
614 |_num_total_chunks| attributes.
615
616 """
617 self._image.seek(self.NUM_CHUNKS_AND_BLOCKS_OFFSET, os.SEEK_SET)
618 self._image.write(struct.pack(self.NUM_CHUNKS_AND_BLOCKS_FORMAT,
619 self._num_total_blocks,
620 self._num_total_chunks))
621
622 def append_dont_care(self, num_bytes):
623 """Appends a DONT_CARE chunk to the sparse file.
624
625 The given number of bytes must be a multiple of the block size.
626
627 Arguments:
628 num_bytes: Size in number of bytes of the DONT_CARE chunk.
629 """
630 assert num_bytes % self.block_size == 0
631
632 if not self.is_sparse:
633 self._image.seek(0, os.SEEK_END)
634 # This is more efficient that writing NUL bytes since it'll add
635 # a hole on file systems that support sparse files (native
636 # sparse, not Android sparse).
637 self._image.truncate(self._image.tell() + num_bytes)
638 self._read_header()
639 return
640
641 self._num_total_chunks += 1
642 self._num_total_blocks += num_bytes / self.block_size
643 self._update_chunks_and_blocks()
644
645 self._image.seek(self._sparse_end, os.SEEK_SET)
646 self._image.write(struct.pack(ImageChunk.FORMAT,
647 ImageChunk.TYPE_DONT_CARE,
648 0, # Reserved
649 num_bytes / self.block_size,
650 struct.calcsize(ImageChunk.FORMAT)))
651 self._read_header()
652
653 def append_raw(self, data):
654 """Appends a RAW chunk to the sparse file.
655
656 The length of the given data must be a multiple of the block size.
657
658 Arguments:
659 data: Data to append.
660 """
661 assert len(data) % self.block_size == 0
662
663 if not self.is_sparse:
664 self._image.seek(0, os.SEEK_END)
665 self._image.write(data)
666 self._read_header()
667 return
668
669 self._num_total_chunks += 1
670 self._num_total_blocks += len(data) / self.block_size
671 self._update_chunks_and_blocks()
672
673 self._image.seek(self._sparse_end, os.SEEK_SET)
674 self._image.write(struct.pack(ImageChunk.FORMAT,
675 ImageChunk.TYPE_RAW,
676 0, # Reserved
677 len(data) / self.block_size,
678 len(data) +
679 struct.calcsize(ImageChunk.FORMAT)))
680 self._image.write(data)
681 self._read_header()
682
683 def append_fill(self, fill_data, size):
684 """Appends a fill chunk to the sparse file.
685
686 The total length of the fill data must be a multiple of the block size.
687
688 Arguments:
689 fill_data: Fill data to append - must be four bytes.
690 size: Number of chunk - must be a multiple of four and the block size.
691 """
692 assert len(fill_data) == 4
693 assert size % 4 == 0
694 assert size % self.block_size == 0
695
696 if not self.is_sparse:
697 self._image.seek(0, os.SEEK_END)
698 self._image.write(fill_data * (size/4))
699 self._read_header()
700 return
701
702 self._num_total_chunks += 1
703 self._num_total_blocks += size / self.block_size
704 self._update_chunks_and_blocks()
705
706 self._image.seek(self._sparse_end, os.SEEK_SET)
707 self._image.write(struct.pack(ImageChunk.FORMAT,
708 ImageChunk.TYPE_FILL,
709 0, # Reserved
710 size / self.block_size,
711 4 + struct.calcsize(ImageChunk.FORMAT)))
712 self._image.write(fill_data)
713 self._read_header()
714
715 def seek(self, offset):
716 """Sets the cursor position for reading from unsparsified file.
717
718 Arguments:
719 offset: Offset to seek to from the beginning of the file.
720 """
721 self._file_pos = offset
722
723 def read(self, size):
724 """Reads data from the unsparsified file.
725
726 This method may return fewer than |size| bytes of data if the end
727 of the file was encountered.
728
729 The file cursor for reading is advanced by the number of bytes
730 read.
731
732 Arguments:
733 size: Number of bytes to read.
734
735 Returns:
736 The data.
737
738 """
739 if not self.is_sparse:
740 self._image.seek(self._file_pos)
741 data = self._image.read(size)
742 self._file_pos += len(data)
743 return data
744
745 # Iterate over all chunks.
746 chunk_idx = bisect.bisect_right(self._chunk_output_offsets,
747 self._file_pos) - 1
748 data = bytearray()
749 to_go = size
750 while to_go > 0:
751 chunk = self._chunks[chunk_idx]
752 chunk_pos_offset = self._file_pos - chunk.output_offset
753 chunk_pos_to_go = min(chunk.output_size - chunk_pos_offset, to_go)
754
755 if chunk.chunk_type == ImageChunk.TYPE_RAW:
756 self._image.seek(chunk.input_offset + chunk_pos_offset)
757 data.extend(self._image.read(chunk_pos_to_go))
758 elif chunk.chunk_type == ImageChunk.TYPE_FILL:
759 all_data = chunk.fill_data*(chunk_pos_to_go/len(chunk.fill_data) + 2)
760 offset_mod = chunk_pos_offset % len(chunk.fill_data)
761 data.extend(all_data[offset_mod:(offset_mod + chunk_pos_to_go)])
762 else:
763 assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
764 data.extend('\0' * chunk_pos_to_go)
765
766 to_go -= chunk_pos_to_go
767 self._file_pos += chunk_pos_to_go
768 chunk_idx += 1
769 # Generate partial read in case of EOF.
770 if chunk_idx >= len(self._chunks):
771 break
772
773 return data
774
775 def tell(self):
776 """Returns the file cursor position for reading from unsparsified file.
777
778 Returns:
779 The file cursor position for reading.
780 """
781 return self._file_pos
782
783 def truncate(self, size):
784 """Truncates the unsparsified file.
785
786 Arguments:
787 size: Desired size of unsparsified file.
788
789 Raises:
790 ValueError: If desired size isn't a multiple of the block size.
791 """
792 if not self.is_sparse:
793 self._image.truncate(size)
794 self._read_header()
795 return
796
797 if size % self.block_size != 0:
798 raise ValueError('Cannot truncate to a size which is not a multiple '
799 'of the block size')
800
801 if size == self.image_size:
802 # Trivial where there's nothing to do.
803 return
804 elif size < self.image_size:
805 chunk_idx = bisect.bisect_right(self._chunk_output_offsets, size) - 1
806 chunk = self._chunks[chunk_idx]
807 if chunk.output_offset != size:
808 # Truncation in the middle of a trunk - need to keep the chunk
809 # and modify it.
810 chunk_idx_for_update = chunk_idx + 1
811 num_to_keep = size - chunk.output_offset
812 assert num_to_keep % self.block_size == 0
813 if chunk.chunk_type == ImageChunk.TYPE_RAW:
814 truncate_at = (chunk.chunk_offset +
815 struct.calcsize(ImageChunk.FORMAT) + num_to_keep)
816 data_sz = num_to_keep
817 elif chunk.chunk_type == ImageChunk.TYPE_FILL:
818 truncate_at = (chunk.chunk_offset +
819 struct.calcsize(ImageChunk.FORMAT) + 4)
820 data_sz = 4
821 else:
822 assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
823 truncate_at = chunk.chunk_offset + struct.calcsize(ImageChunk.FORMAT)
824 data_sz = 0
825 chunk_sz = num_to_keep/self.block_size
826 total_sz = data_sz + struct.calcsize(ImageChunk.FORMAT)
827 self._image.seek(chunk.chunk_offset)
828 self._image.write(struct.pack(ImageChunk.FORMAT,
829 chunk.chunk_type,
830 0, # Reserved
831 chunk_sz,
832 total_sz))
833 chunk.output_size = num_to_keep
834 else:
835 # Truncation at trunk boundary.
836 truncate_at = chunk.chunk_offset
837 chunk_idx_for_update = chunk_idx
838
839 self._num_total_chunks = chunk_idx_for_update
840 self._num_total_blocks = 0
841 for i in range(0, chunk_idx_for_update):
842 self._num_total_blocks += self._chunks[i].output_size / self.block_size
843 self._update_chunks_and_blocks()
844 self._image.truncate(truncate_at)
845
846 # We've modified the file so re-read all data.
847 self._read_header()
848 else:
849 # Truncating to grow - just add a DONT_CARE section.
850 self.append_dont_care(size - self.image_size)
851
852
David Zeuthen21e95262016-07-27 17:58:40 -0400853class AvbDescriptor(object):
854 """Class for AVB descriptor.
855
856 See the |AvbDescriptor| C struct for more information.
857
858 Attributes:
859 tag: The tag identifying what kind of descriptor this is.
860 data: The data in the descriptor.
861 """
862
863 SIZE = 16
864 FORMAT_STRING = ('!QQ') # tag, num_bytes_following (descriptor header)
865
866 def __init__(self, data):
867 """Initializes a new property descriptor.
868
869 Arguments:
870 data: If not None, must be a bytearray().
871
872 Raises:
873 LookupError: If the given descriptor is malformed.
874 """
875 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
876
877 if data:
878 (self.tag, num_bytes_following) = (
879 struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]))
880 self.data = data[self.SIZE:self.SIZE + num_bytes_following]
881 else:
882 self.tag = None
883 self.data = None
884
885 def print_desc(self, o):
886 """Print the descriptor.
887
888 Arguments:
889 o: The object to write the output to.
890 """
891 o.write(' Unknown descriptor:\n')
892 o.write(' Tag: {}\n'.format(self.tag))
893 if len(self.data) < 256:
894 o.write(' Data: {} ({} bytes)\n'.format(
895 repr(str(self.data)), len(self.data)))
896 else:
897 o.write(' Data: {} bytes\n'.format(len(self.data)))
898
899 def encode(self):
900 """Serializes the descriptor.
901
902 Returns:
903 A bytearray() with the descriptor data.
904 """
905 num_bytes_following = len(self.data)
906 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
907 padding_size = nbf_with_padding - num_bytes_following
908 desc = struct.pack(self.FORMAT_STRING, self.tag, nbf_with_padding)
909 padding = struct.pack(str(padding_size) + 'x')
910 ret = desc + self.data + padding
911 return bytearray(ret)
912
913
914class AvbPropertyDescriptor(AvbDescriptor):
915 """A class for property descriptors.
916
917 See the |AvbPropertyDescriptor| C struct for more information.
918
919 Attributes:
920 key: The key.
921 value: The key.
922 """
923
924 TAG = 0
925 SIZE = 32
926 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
927 'Q' # key size (bytes)
928 'Q') # value size (bytes)
929
930 def __init__(self, data=None):
931 """Initializes a new property descriptor.
932
933 Arguments:
934 data: If not None, must be a bytearray of size |SIZE|.
935
936 Raises:
937 LookupError: If the given descriptor is malformed.
938 """
939 AvbDescriptor.__init__(self, None)
940 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
941
942 if data:
943 (tag, num_bytes_following, key_size,
944 value_size) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
945 expected_size = round_to_multiple(
946 self.SIZE - 16 + key_size + 1 + value_size + 1, 8)
947 if tag != self.TAG or num_bytes_following != expected_size:
948 raise LookupError('Given data does not look like a property '
949 'descriptor.')
950 self.key = data[self.SIZE:(self.SIZE + key_size)]
951 self.value = data[(self.SIZE + key_size + 1):(self.SIZE + key_size + 1 +
952 value_size)]
953 else:
954 self.key = ''
955 self.value = ''
956
957 def print_desc(self, o):
958 """Print the descriptor.
959
960 Arguments:
961 o: The object to write the output to.
962 """
963 if len(self.value) < 256:
964 o.write(' Prop: {} -> {}\n'.format(self.key, repr(str(self.value))))
965 else:
966 o.write(' Prop: {} -> ({} bytes)\n'.format(self.key, len(self.value)))
967
968 def encode(self):
969 """Serializes the descriptor.
970
971 Returns:
972 A bytearray() with the descriptor data.
973 """
974 num_bytes_following = self.SIZE + len(self.key) + len(self.value) + 2 - 16
975 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
976 padding_size = nbf_with_padding - num_bytes_following
977 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
978 len(self.key), len(self.value))
979 padding = struct.pack(str(padding_size) + 'x')
980 ret = desc + self.key + '\0' + self.value + '\0' + padding
981 return bytearray(ret)
982
983
984class AvbHashtreeDescriptor(AvbDescriptor):
985 """A class for hashtree descriptors.
986
987 See the |AvbHashtreeDescriptor| C struct for more information.
988
989 Attributes:
990 dm_verity_version: dm-verity version used.
991 image_size: Size of the image, after rounding up to |block_size|.
992 tree_offset: Offset of the hash tree in the file.
993 tree_size: Size of the tree.
994 data_block_size: Data block size
995 hash_block_size: Hash block size
996 hash_algorithm: Hash algorithm used.
997 partition_name: Partition name.
998 salt: Salt used.
999 root_digest: Root digest.
1000 """
1001
1002 TAG = 1
1003 SIZE = 96
1004 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
1005 'L' # dm-verity version used
1006 'Q' # image size (bytes)
1007 'Q' # tree offset (bytes)
1008 'Q' # tree size (bytes)
1009 'L' # data block size (bytes)
1010 'L' # hash block size (bytes)
1011 '32s' # hash algorithm used
1012 'L' # partition name (bytes)
1013 'L' # salt length (bytes)
1014 'L') # root digest length (bytes)
1015
1016 def __init__(self, data=None):
1017 """Initializes a new hashtree descriptor.
1018
1019 Arguments:
1020 data: If not None, must be a bytearray of size |SIZE|.
1021
1022 Raises:
1023 LookupError: If the given descriptor is malformed.
1024 """
1025 AvbDescriptor.__init__(self, None)
1026 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1027
1028 if data:
1029 (tag, num_bytes_following, self.dm_verity_version, self.image_size,
1030 self.tree_offset, self.tree_size, self.data_block_size,
1031 self.hash_block_size, self.hash_algorithm, partition_name_len, salt_len,
1032 root_digest_len) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
1033 expected_size = round_to_multiple(
1034 self.SIZE - 16 + partition_name_len + salt_len + root_digest_len, 8)
1035 if tag != self.TAG or num_bytes_following != expected_size:
1036 raise LookupError('Given data does not look like a hashtree '
1037 'descriptor.')
1038 # Nuke NUL-bytes at the end.
1039 self.hash_algorithm = self.hash_algorithm.split('\0', 1)[0]
1040 o = 0
1041 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o +
1042 partition_name_len)])
1043 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8.
1044 self.partition_name.decode('utf-8')
1045 o += partition_name_len
1046 self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)]
1047 o += salt_len
1048 self.root_digest = data[(self.SIZE + o):(self.SIZE + o + root_digest_len)]
1049 if root_digest_len != len(hashlib.new(name=self.hash_algorithm).digest()):
1050 raise LookupError('root_digest_len doesn\'t match hash algorithm')
1051
1052 else:
1053 self.dm_verity_version = 0
1054 self.image_size = 0
1055 self.tree_offset = 0
1056 self.tree_size = 0
1057 self.data_block_size = 0
1058 self.hash_block_size = 0
1059 self.hash_algorithm = ''
1060 self.partition_name = ''
1061 self.salt = bytearray()
1062 self.root_digest = bytearray()
1063
1064 def print_desc(self, o):
1065 """Print the descriptor.
1066
1067 Arguments:
1068 o: The object to write the output to.
1069 """
1070 o.write(' Hashtree descriptor:\n')
1071 o.write(' Version of dm-verity: {}\n'.format(self.dm_verity_version))
1072 o.write(' Image Size: {} bytes\n'.format(self.image_size))
1073 o.write(' Tree Offset: {}\n'.format(self.tree_offset))
1074 o.write(' Tree Size: {} bytes\n'.format(self.tree_size))
1075 o.write(' Data Block Size: {} bytes\n'.format(
1076 self.data_block_size))
1077 o.write(' Hash Block Size: {} bytes\n'.format(
1078 self.hash_block_size))
1079 o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm))
1080 o.write(' Partition Name: {}\n'.format(self.partition_name))
1081 o.write(' Salt: {}\n'.format(str(self.salt).encode(
1082 'hex')))
1083 o.write(' Root Digest: {}\n'.format(str(
1084 self.root_digest).encode('hex')))
1085
1086 def encode(self):
1087 """Serializes the descriptor.
1088
1089 Returns:
1090 A bytearray() with the descriptor data.
1091 """
1092 encoded_name = self.partition_name.encode('utf-8')
1093 num_bytes_following = (self.SIZE + len(encoded_name) + len(self.salt) +
1094 len(self.root_digest) - 16)
1095 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
1096 padding_size = nbf_with_padding - num_bytes_following
1097 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
1098 self.dm_verity_version, self.image_size,
1099 self.tree_offset, self.tree_size, self.data_block_size,
1100 self.hash_block_size, self.hash_algorithm,
1101 len(encoded_name), len(self.salt), len(self.root_digest))
1102 padding = struct.pack(str(padding_size) + 'x')
1103 ret = desc + encoded_name + self.salt + self.root_digest + padding
1104 return bytearray(ret)
1105
1106
1107class AvbHashDescriptor(AvbDescriptor):
1108 """A class for hash descriptors.
1109
1110 See the |AvbHashDescriptor| C struct for more information.
1111
1112 Attributes:
1113 image_size: Image size, in bytes.
1114 hash_algorithm: Hash algorithm used.
1115 partition_name: Partition name.
1116 salt: Salt used.
1117 digest: The hash value of salt and data combined.
1118 """
1119
1120 TAG = 2
1121 SIZE = 68
1122 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
1123 'Q' # image size (bytes)
1124 '32s' # hash algorithm used
1125 'L' # partition name (bytes)
1126 'L' # salt length (bytes)
1127 'L') # digest length (bytes)
1128
1129 def __init__(self, data=None):
1130 """Initializes a new hash descriptor.
1131
1132 Arguments:
1133 data: If not None, must be a bytearray of size |SIZE|.
1134
1135 Raises:
1136 LookupError: If the given descriptor is malformed.
1137 """
1138 AvbDescriptor.__init__(self, None)
1139 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1140
1141 if data:
1142 (tag, num_bytes_following, self.image_size, self.hash_algorithm,
1143 partition_name_len, salt_len,
1144 digest_len) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
1145 expected_size = round_to_multiple(
1146 self.SIZE - 16 + partition_name_len + salt_len + digest_len, 8)
1147 if tag != self.TAG or num_bytes_following != expected_size:
1148 raise LookupError('Given data does not look like a hash ' 'descriptor.')
1149 # Nuke NUL-bytes at the end.
1150 self.hash_algorithm = self.hash_algorithm.split('\0', 1)[0]
1151 o = 0
1152 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o +
1153 partition_name_len)])
1154 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8.
1155 self.partition_name.decode('utf-8')
1156 o += partition_name_len
1157 self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)]
1158 o += salt_len
1159 self.digest = data[(self.SIZE + o):(self.SIZE + o + digest_len)]
1160 if digest_len != len(hashlib.new(name=self.hash_algorithm).digest()):
1161 raise LookupError('digest_len doesn\'t match hash algorithm')
1162
1163 else:
1164 self.image_size = 0
1165 self.hash_algorithm = ''
1166 self.partition_name = ''
1167 self.salt = bytearray()
1168 self.digest = bytearray()
1169
1170 def print_desc(self, o):
1171 """Print the descriptor.
1172
1173 Arguments:
1174 o: The object to write the output to.
1175 """
1176 o.write(' Hash descriptor:\n')
1177 o.write(' Image Size: {} bytes\n'.format(self.image_size))
1178 o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm))
1179 o.write(' Partition Name: {}\n'.format(self.partition_name))
1180 o.write(' Salt: {}\n'.format(str(self.salt).encode(
1181 'hex')))
1182 o.write(' Digest: {}\n'.format(str(self.digest).encode(
1183 'hex')))
1184
1185 def encode(self):
1186 """Serializes the descriptor.
1187
1188 Returns:
1189 A bytearray() with the descriptor data.
1190 """
1191 encoded_name = self.partition_name.encode('utf-8')
1192 num_bytes_following = (
1193 self.SIZE + len(encoded_name) + len(self.salt) + len(self.digest) - 16)
1194 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
1195 padding_size = nbf_with_padding - num_bytes_following
1196 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
1197 self.image_size, self.hash_algorithm, len(encoded_name),
1198 len(self.salt), len(self.digest))
1199 padding = struct.pack(str(padding_size) + 'x')
1200 ret = desc + encoded_name + self.salt + self.digest + padding
1201 return bytearray(ret)
1202
1203
1204class AvbKernelCmdlineDescriptor(AvbDescriptor):
1205 """A class for kernel command-line descriptors.
1206
1207 See the |AvbKernelCmdlineDescriptor| C struct for more information.
1208
1209 Attributes:
1210 kernel_cmdline: The kernel command-line.
1211 """
1212
1213 TAG = 3
1214 SIZE = 20
1215 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
1216 'L') # cmdline length (bytes)
1217
1218 def __init__(self, data=None):
1219 """Initializes a new kernel cmdline descriptor.
1220
1221 Arguments:
1222 data: If not None, must be a bytearray of size |SIZE|.
1223
1224 Raises:
1225 LookupError: If the given descriptor is malformed.
1226 """
1227 AvbDescriptor.__init__(self, None)
1228 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1229
1230 if data:
1231 (tag, num_bytes_following, kernel_cmdline_length) = (
1232 struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]))
1233 expected_size = round_to_multiple(self.SIZE - 16 + kernel_cmdline_length,
1234 8)
1235 if tag != self.TAG or num_bytes_following != expected_size:
1236 raise LookupError('Given data does not look like a kernel cmdline '
1237 'descriptor.')
1238 # Nuke NUL-bytes at the end.
1239 self.kernel_cmdline = str(data[self.SIZE:(self.SIZE +
1240 kernel_cmdline_length)])
1241 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8.
1242 self.kernel_cmdline.decode('utf-8')
1243 else:
1244 self.kernel_cmdline = ''
1245
1246 def print_desc(self, o):
1247 """Print the descriptor.
1248
1249 Arguments:
1250 o: The object to write the output to.
1251 """
1252 o.write(' Kernel Cmdline descriptor:\n')
1253 o.write(' Kernel Cmdline: {}\n'.format(repr(
1254 self.kernel_cmdline)))
1255
1256 def encode(self):
1257 """Serializes the descriptor.
1258
1259 Returns:
1260 A bytearray() with the descriptor data.
1261 """
1262 encoded_str = self.kernel_cmdline.encode('utf-8')
1263 num_bytes_following = (self.SIZE + len(encoded_str) - 16)
1264 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
1265 padding_size = nbf_with_padding - num_bytes_following
1266 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
1267 len(encoded_str))
1268 padding = struct.pack(str(padding_size) + 'x')
1269 ret = desc + encoded_str + padding
1270 return bytearray(ret)
1271
1272
1273class AvbChainPartitionDescriptor(AvbDescriptor):
1274 """A class for chained partition descriptors.
1275
1276 See the |AvbChainPartitionDescriptor| C struct for more information.
1277
1278 Attributes:
1279 rollback_index_slot: The rollback index slot to use.
1280 partition_name: Partition name.
1281 public_key: Bytes for the public key.
1282 """
1283
1284 TAG = 4
1285 SIZE = 28
1286 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
1287 'L' # rollback_index_slot
1288 'L' # partition_name_size (bytes)
1289 'L') # public_key_size (bytes)
1290
1291 def __init__(self, data=None):
1292 """Initializes a new chain partition descriptor.
1293
1294 Arguments:
1295 data: If not None, must be a bytearray of size |SIZE|.
1296
1297 Raises:
1298 LookupError: If the given descriptor is malformed.
1299 """
1300 AvbDescriptor.__init__(self, None)
1301 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1302
1303 if data:
1304 (tag, num_bytes_following, self.rollback_index_slot, partition_name_len,
1305 public_key_len) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
1306 expected_size = round_to_multiple(
1307 self.SIZE - 16 + partition_name_len + public_key_len, 8)
1308 if tag != self.TAG or num_bytes_following != expected_size:
1309 raise LookupError('Given data does not look like a chain partition '
1310 'descriptor.')
1311 o = 0
1312 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o +
1313 partition_name_len)])
1314 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8.
1315 self.partition_name.decode('utf-8')
1316 o += partition_name_len
1317 self.public_key = data[(self.SIZE + o):(self.SIZE + o + public_key_len)]
1318
1319 else:
1320 self.rollback_index_slot = 0
1321 self.partition_name = ''
1322 self.public_key = bytearray()
1323
1324 def print_desc(self, o):
1325 """Print the descriptor.
1326
1327 Arguments:
1328 o: The object to write the output to.
1329 """
1330 o.write(' Chain Partition descriptor:\n')
1331 o.write(' Partition Name: {}\n'.format(self.partition_name))
1332 o.write(' Rollback Index Slot: {}\n'.format(
1333 self.rollback_index_slot))
1334 # Just show the SHA1 of the key, for size reasons.
1335 hexdig = hashlib.sha1(self.public_key).hexdigest()
1336 o.write(' Public key (sha1): {}\n'.format(hexdig))
1337
1338 def encode(self):
1339 """Serializes the descriptor.
1340
1341 Returns:
1342 A bytearray() with the descriptor data.
1343 """
1344 encoded_name = self.partition_name.encode('utf-8')
1345 num_bytes_following = (
1346 self.SIZE + len(encoded_name) + len(self.public_key) - 16)
1347 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
1348 padding_size = nbf_with_padding - num_bytes_following
1349 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
1350 self.rollback_index_slot, len(encoded_name),
1351 len(self.public_key))
1352 padding = struct.pack(str(padding_size) + 'x')
1353 ret = desc + encoded_name + self.public_key + padding
1354 return bytearray(ret)
1355
1356
1357DESCRIPTOR_CLASSES = [
1358 AvbPropertyDescriptor, AvbHashtreeDescriptor, AvbHashDescriptor,
1359 AvbKernelCmdlineDescriptor, AvbChainPartitionDescriptor
1360]
1361
1362
1363def parse_descriptors(data):
1364 """Parses a blob of data into descriptors.
1365
1366 Arguments:
1367 data: A bytearray() with encoded descriptors.
1368
1369 Returns:
1370 A list of instances of objects derived from AvbDescriptor. For
1371 unknown descriptors, the class AvbDescriptor is used.
1372 """
1373 o = 0
1374 ret = []
1375 while o < len(data):
1376 tag, nb_following = struct.unpack('!2Q', data[o:o + 16])
1377 if tag < len(DESCRIPTOR_CLASSES):
1378 c = DESCRIPTOR_CLASSES[tag]
1379 else:
1380 c = AvbDescriptor
1381 ret.append(c(bytearray(data[o:o + 16 + nb_following])))
1382 o += 16 + nb_following
1383 return ret
1384
1385
1386class AvbFooter(object):
1387 """A class for parsing and writing footers.
1388
1389 Footers are stored at the end of partitions and point to where the
1390 AvbVBMeta blob is located. They also contain the original size of
1391 the image before AVB information was added.
1392
1393 Attributes:
1394 magic: Magic for identifying the footer, see |MAGIC|.
1395 version_major: The major version of avbtool that wrote the footer.
1396 version_minor: The minor version of avbtool that wrote the footer.
1397 original_image_size: Original image size.
1398 vbmeta_offset: Offset of where the AvbVBMeta blob is stored.
1399 vbmeta_size: Size of the AvbVBMeta blob.
1400 """
1401
1402 MAGIC = 'AVBf'
1403 SIZE = 64
1404 RESERVED = 28
1405 FORMAT_STRING = ('!4s2L' # magic, 2 x version.
1406 'Q' # Original image size.
1407 'Q' # Offset of VBMeta blob.
1408 'Q' + # Size of VBMeta blob.
1409 str(RESERVED) + 'x') # padding for reserved bytes
1410
1411 def __init__(self, data=None):
1412 """Initializes a new footer object.
1413
1414 Arguments:
1415 data: If not None, must be a bytearray of size 4096.
1416
1417 Raises:
1418 LookupError: If the given footer is malformed.
1419 struct.error: If the given data has no footer.
1420 """
1421 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1422
1423 if data:
1424 (self.magic, self.version_major, self.version_minor,
1425 self.original_image_size, self.vbmeta_offset,
1426 self.vbmeta_size) = struct.unpack(self.FORMAT_STRING, data)
1427 if self.magic != self.MAGIC:
David Zeuthen8b6973b2016-09-20 12:39:49 -04001428 raise LookupError('Given data does not look like a AVB footer.')
David Zeuthen21e95262016-07-27 17:58:40 -04001429 else:
1430 self.magic = self.MAGIC
1431 self.version_major = AVB_VERSION_MAJOR
1432 self.version_minor = AVB_VERSION_MINOR
1433 self.original_image_size = 0
1434 self.vbmeta_offset = 0
1435 self.vbmeta_size = 0
1436
David Zeuthena4fee8b2016-08-22 15:20:43 -04001437 def encode(self):
1438 """Gets a string representing the binary encoding of the footer.
David Zeuthen21e95262016-07-27 17:58:40 -04001439
David Zeuthena4fee8b2016-08-22 15:20:43 -04001440 Returns:
1441 A bytearray() with a binary representation of the footer.
David Zeuthen21e95262016-07-27 17:58:40 -04001442 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04001443 return struct.pack(self.FORMAT_STRING, self.magic, self.version_major,
1444 self.version_minor, self.original_image_size,
1445 self.vbmeta_offset, self.vbmeta_size)
David Zeuthen21e95262016-07-27 17:58:40 -04001446
1447
1448class AvbVBMetaHeader(object):
David Zeuthen8b6973b2016-09-20 12:39:49 -04001449 """A class for parsing and writing AVB vbmeta images.
David Zeuthen21e95262016-07-27 17:58:40 -04001450
1451 Attributes:
1452 The attributes correspond to the |AvbVBMetaHeader| struct
1453 defined in avb_vbmeta_header.h.
1454 """
1455
1456 SIZE = 256
1457
1458 # Keep in sync with |reserved| field of |AvbVBMetaImageHeader|.
1459 RESERVED = 152
1460
1461 # Keep in sync with |AvbVBMetaImageHeader|.
1462 FORMAT_STRING = ('!4s2L' # magic, 2 x version
1463 '2Q' # 2 x block size
1464 'L' # algorithm type
1465 '2Q' # offset, size (hash)
1466 '2Q' # offset, size (signature)
1467 '2Q' # offset, size (public key)
1468 '2Q' # offset, size (descriptors)
1469 'Q' + # rollback_index
1470 str(RESERVED) + 'x') # padding for reserved bytes
1471
1472 def __init__(self, data=None):
1473 """Initializes a new header object.
1474
1475 Arguments:
1476 data: If not None, must be a bytearray of size 8192.
1477
1478 Raises:
1479 Exception: If the given data is malformed.
1480 """
1481 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1482
1483 if data:
1484 (self.magic, self.header_version_major, self.header_version_minor,
1485 self.authentication_data_block_size, self.auxiliary_data_block_size,
1486 self.algorithm_type, self.hash_offset, self.hash_size,
1487 self.signature_offset, self.signature_size, self.public_key_offset,
1488 self.public_key_size, self.descriptors_offset, self.descriptors_size,
1489 self.rollback_index) = struct.unpack(self.FORMAT_STRING, data)
1490 # Nuke NUL-bytes at the end of the string.
1491 if self.magic != 'AVB0':
David Zeuthen8b6973b2016-09-20 12:39:49 -04001492 raise AvbError('Given image does not look like a vbmeta image.')
David Zeuthen21e95262016-07-27 17:58:40 -04001493 else:
1494 self.magic = 'AVB0'
1495 self.header_version_major = AVB_VERSION_MAJOR
1496 self.header_version_minor = AVB_VERSION_MINOR
1497 self.authentication_data_block_size = 0
1498 self.auxiliary_data_block_size = 0
1499 self.algorithm_type = 0
1500 self.hash_offset = 0
1501 self.hash_size = 0
1502 self.signature_offset = 0
1503 self.signature_size = 0
1504 self.public_key_offset = 0
1505 self.public_key_size = 0
1506 self.descriptors_offset = 0
1507 self.descriptors_size = 0
1508 self.rollback_index = 0
1509
1510 def save(self, output):
1511 """Serializes the header (256 bytes) to disk.
1512
1513 Arguments:
1514 output: The object to write the output to.
1515 """
1516 output.write(struct.pack(
1517 self.FORMAT_STRING, self.magic, self.header_version_major,
1518 self.header_version_minor, self.authentication_data_block_size,
1519 self.auxiliary_data_block_size, self.algorithm_type, self.hash_offset,
1520 self.hash_size, self.signature_offset, self.signature_size,
1521 self.public_key_offset, self.public_key_size, self.descriptors_offset,
1522 self.descriptors_size, self.rollback_index))
1523
1524 def encode(self):
1525 """Serializes the header (256) to a bytearray().
1526
1527 Returns:
1528 A bytearray() with the encoded header.
1529 """
1530 return struct.pack(self.FORMAT_STRING, self.magic,
1531 self.header_version_major, self.header_version_minor,
1532 self.authentication_data_block_size,
1533 self.auxiliary_data_block_size, self.algorithm_type,
1534 self.hash_offset, self.hash_size, self.signature_offset,
1535 self.signature_size, self.public_key_offset,
1536 self.public_key_size, self.descriptors_offset,
1537 self.descriptors_size, self.rollback_index)
1538
1539
1540class Avb(object):
1541 """Business logic for avbtool command-line tool."""
1542
David Zeuthen8b6973b2016-09-20 12:39:49 -04001543 # Keep in sync with avb_ab_flow.h.
1544 AB_FORMAT_NO_CRC = '!4sBB2xBBBxBBBx12x'
1545 AB_MAGIC = '\0AB0'
1546 AB_MAJOR_VERSION = 1
1547 AB_MINOR_VERSION = 0
1548 AB_MISC_METADATA_OFFSET = 2048
1549
David Zeuthena4fee8b2016-08-22 15:20:43 -04001550 def erase_footer(self, image_filename, keep_hashtree):
David Zeuthen21e95262016-07-27 17:58:40 -04001551 """Implements the 'erase_footer' command.
1552
1553 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001554 image_filename: File to erase a footer from.
David Zeuthen21e95262016-07-27 17:58:40 -04001555 keep_hashtree: If True, keep the hashtree around.
1556
1557 Raises:
1558 AvbError: If there's no footer in the image.
1559 """
1560
David Zeuthena4fee8b2016-08-22 15:20:43 -04001561 image = ImageHandler(image_filename)
1562
David Zeuthen21e95262016-07-27 17:58:40 -04001563 (footer, _, descriptors, _) = self._parse_image(image)
1564
1565 if not footer:
1566 raise AvbError('Given image does not have a footer.')
1567
1568 new_image_size = None
1569 if not keep_hashtree:
1570 new_image_size = footer.original_image_size
1571 else:
1572 # If requested to keep the hashtree, search for a hashtree
1573 # descriptor to figure out the location and size of the hashtree.
1574 for desc in descriptors:
1575 if isinstance(desc, AvbHashtreeDescriptor):
1576 # The hashtree is always just following the main data so the
1577 # new size is easily derived.
1578 new_image_size = desc.tree_offset + desc.tree_size
1579 break
1580 if not new_image_size:
1581 raise AvbError('Requested to keep hashtree but no hashtree '
1582 'descriptor was found.')
1583
1584 # And cut...
1585 image.truncate(new_image_size)
1586
David Zeuthen8b6973b2016-09-20 12:39:49 -04001587 def set_ab_metadata(self, misc_image, slot_data):
1588 """Implements the 'set_ab_metadata' command.
1589
1590 The |slot_data| argument must be of the form 'A_priority:A_tries_remaining:
1591 A_successful_boot:B_priority:B_tries_remaining:B_successful_boot'.
1592
1593 Arguments:
1594 misc_image: The misc image to write to.
1595 slot_data: Slot data as a string
1596
1597 Raises:
1598 AvbError: If slot data is malformed.
1599 """
1600 tokens = slot_data.split(':')
1601 if len(tokens) != 6:
1602 raise AvbError('Malformed slot data "{}".'.format(slot_data))
1603 a_priority = int(tokens[0])
1604 a_tries_remaining = int(tokens[1])
1605 a_success = True if int(tokens[2]) != 0 else False
1606 b_priority = int(tokens[3])
1607 b_tries_remaining = int(tokens[4])
1608 b_success = True if int(tokens[5]) != 0 else False
1609
1610 ab_data_no_crc = struct.pack(self.AB_FORMAT_NO_CRC,
1611 self.AB_MAGIC,
1612 self.AB_MAJOR_VERSION, self.AB_MINOR_VERSION,
1613 a_priority, a_tries_remaining, a_success,
1614 b_priority, b_tries_remaining, b_success)
1615 # Force CRC to be unsigned, see https://bugs.python.org/issue4903 for why.
1616 crc_value = binascii.crc32(ab_data_no_crc) & 0xffffffff
1617 ab_data = ab_data_no_crc + struct.pack('!I', crc_value)
1618 misc_image.seek(self.AB_MISC_METADATA_OFFSET)
1619 misc_image.write(ab_data)
1620
David Zeuthena4fee8b2016-08-22 15:20:43 -04001621 def info_image(self, image_filename, output):
David Zeuthen21e95262016-07-27 17:58:40 -04001622 """Implements the 'info_image' command.
1623
1624 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001625 image_filename: Image file to get information from (file object).
David Zeuthen21e95262016-07-27 17:58:40 -04001626 output: Output file to write human-readable information to (file object).
1627 """
1628
David Zeuthena4fee8b2016-08-22 15:20:43 -04001629 image = ImageHandler(image_filename)
1630
David Zeuthen21e95262016-07-27 17:58:40 -04001631 o = output
1632
1633 (footer, header, descriptors, image_size) = self._parse_image(image)
1634
1635 if footer:
1636 o.write('Footer version: {}.{}\n'.format(footer.version_major,
1637 footer.version_minor))
1638 o.write('Image size: {} bytes\n'.format(image_size))
1639 o.write('Original image size: {} bytes\n'.format(
1640 footer.original_image_size))
1641 o.write('VBMeta offset: {}\n'.format(footer.vbmeta_offset))
1642 o.write('VBMeta size: {} bytes\n'.format(footer.vbmeta_size))
1643 o.write('--\n')
1644
1645 (alg_name, _) = lookup_algorithm_by_type(header.algorithm_type)
1646
David Zeuthena4fee8b2016-08-22 15:20:43 -04001647 o.write('VBMeta image version: {}.{}{}\n'.format(
1648 header.header_version_major, header.header_version_minor,
1649 ' (Sparse)' if image.is_sparse else ''))
David Zeuthen21e95262016-07-27 17:58:40 -04001650 o.write('Header Block: {} bytes\n'.format(AvbVBMetaHeader.SIZE))
1651 o.write('Authentication Block: {} bytes\n'.format(
1652 header.authentication_data_block_size))
1653 o.write('Auxiliary Block: {} bytes\n'.format(
1654 header.auxiliary_data_block_size))
1655 o.write('Algorithm: {}\n'.format(alg_name))
1656 o.write('Rollback Index: {}\n'.format(header.rollback_index))
1657
1658 # Print descriptors.
1659 num_printed = 0
1660 o.write('Descriptors:\n')
1661 for desc in descriptors:
1662 desc.print_desc(o)
1663 num_printed += 1
1664 if num_printed == 0:
1665 o.write(' (none)\n')
1666
1667 def _parse_image(self, image):
1668 """Gets information about an image.
1669
1670 The image can either be a vbmeta or an image with a footer.
1671
1672 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001673 image: An ImageHandler (vbmeta or footer) with a hashtree descriptor.
David Zeuthen21e95262016-07-27 17:58:40 -04001674
1675 Returns:
1676 A tuple where the first argument is a AvbFooter (None if there
1677 is no footer on the image), the second argument is a
1678 AvbVBMetaHeader, the third argument is a list of
1679 AvbDescriptor-derived instances, and the fourth argument is the
1680 size of |image|.
1681 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04001682 assert isinstance(image, ImageHandler)
David Zeuthen21e95262016-07-27 17:58:40 -04001683 footer = None
David Zeuthena4fee8b2016-08-22 15:20:43 -04001684 image_size = image.care_size
David Zeuthen21e95262016-07-27 17:58:40 -04001685 image.seek(image_size - AvbFooter.SIZE)
1686 try:
1687 footer = AvbFooter(image.read(AvbFooter.SIZE))
1688 except (LookupError, struct.error):
1689 # Nope, just seek back to the start.
1690 image.seek(0)
1691
1692 vbmeta_offset = 0
1693 if footer:
1694 vbmeta_offset = footer.vbmeta_offset
1695
1696 image.seek(vbmeta_offset)
1697 h = AvbVBMetaHeader(image.read(AvbVBMetaHeader.SIZE))
1698
1699 auth_block_offset = vbmeta_offset + AvbVBMetaHeader.SIZE
1700 aux_block_offset = auth_block_offset + h.authentication_data_block_size
1701 desc_start_offset = aux_block_offset + h.descriptors_offset
1702 image.seek(desc_start_offset)
1703 descriptors = parse_descriptors(image.read(h.descriptors_size))
1704
1705 return footer, h, descriptors, image_size
1706
1707 def _get_cmdline_descriptor_for_dm_verity(self, image):
1708 """Generate kernel cmdline descriptor for dm-verity.
1709
1710 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001711 image: An ImageHandler (vbmeta or footer) with a hashtree descriptor.
David Zeuthen21e95262016-07-27 17:58:40 -04001712
1713 Returns:
1714 A AvbKernelCmdlineDescriptor with dm-verity kernel cmdline
1715 instructions for the hashtree.
1716
1717 Raises:
1718 AvbError: If |image| doesn't have a hashtree descriptor.
1719
1720 """
1721
1722 (_, _, descriptors, _) = self._parse_image(image)
1723
1724 ht = None
1725 for desc in descriptors:
1726 if isinstance(desc, AvbHashtreeDescriptor):
1727 ht = desc
1728 break
1729
1730 if not ht:
1731 raise AvbError('No hashtree descriptor in given image')
1732
1733 c = 'dm="1 vroot none ro 1,'
1734 c += '0 ' # start
1735 c += '{} '.format((ht.image_size / 512)) # size (# sectors)
1736 c += 'verity {} '.format(ht.dm_verity_version) # type and version
1737 c += 'PARTUUID=$(ANDROID_SYSTEM_PARTUUID) ' # data_dev
1738 c += 'PARTUUID=$(ANDROID_SYSTEM_PARTUUID) ' # hash_dev
1739 c += '{} '.format(ht.data_block_size) # data_block
1740 c += '{} '.format(ht.hash_block_size) # hash_block
1741 c += '{} '.format(ht.image_size / ht.data_block_size) # #blocks
1742 c += '{} '.format(ht.image_size / ht.data_block_size) # hash_offset
1743 c += '{} '.format(ht.hash_algorithm) # hash_alg
1744 c += '{} '.format(str(ht.root_digest).encode('hex')) # root_digest
1745 c += '{}'.format(str(ht.salt).encode('hex')) # salt
1746 c += '"'
1747
1748 desc = AvbKernelCmdlineDescriptor()
1749 desc.kernel_cmdline = c
1750 return desc
1751
1752 def make_vbmeta_image(self, output, chain_partitions, algorithm_name,
1753 key_path, rollback_index, props, props_from_file,
1754 kernel_cmdlines,
1755 generate_dm_verity_cmdline_from_hashtree,
1756 include_descriptors_from_image):
1757 """Implements the 'make_vbmeta_image' command.
1758
1759 Arguments:
1760 output: File to write the image to.
1761 chain_partitions: List of partitions to chain.
1762 algorithm_name: Name of algorithm to use.
1763 key_path: Path to key to use or None.
1764 rollback_index: The rollback index to use.
1765 props: Properties to insert (list of strings of the form 'key:value').
1766 props_from_file: Properties to insert (list of strings 'key:<path>').
1767 kernel_cmdlines: Kernel cmdlines to insert (list of strings).
1768 generate_dm_verity_cmdline_from_hashtree: None or file to generate from.
1769 include_descriptors_from_image: List of file objects with descriptors.
1770
1771 Raises:
1772 AvbError: If a chained partition is malformed.
1773 """
1774
1775 descriptors = []
1776
1777 # Insert chained partition descriptors.
1778 if chain_partitions:
1779 for cp in chain_partitions:
1780 cp_tokens = cp.split(':')
1781 if len(cp_tokens) != 3:
1782 raise AvbError('Malformed chained partition "{}".'.format(cp))
1783 desc = AvbChainPartitionDescriptor()
1784 desc.partition_name = cp_tokens[0]
1785 desc.rollback_index_slot = int(cp_tokens[1])
1786 if desc.rollback_index_slot < 1:
1787 raise AvbError('Rollback index slot must be 1 or larger.')
1788 file_path = cp_tokens[2]
1789 desc.public_key = open(file_path, 'rb').read()
1790 descriptors.append(desc)
1791
1792 vbmeta_blob = self._generate_vbmeta_blob(
1793 algorithm_name, key_path, descriptors, rollback_index, props,
1794 props_from_file, kernel_cmdlines,
1795 generate_dm_verity_cmdline_from_hashtree,
1796 include_descriptors_from_image)
1797
1798 # Write entire vbmeta blob (header, authentication, auxiliary).
1799 output.seek(0)
1800 output.write(vbmeta_blob)
1801
1802 def _generate_vbmeta_blob(self, algorithm_name, key_path, descriptors,
1803 rollback_index, props, props_from_file,
1804 kernel_cmdlines,
1805 generate_dm_verity_cmdline_from_hashtree,
1806 include_descriptors_from_image):
1807 """Generates a VBMeta blob.
1808
1809 This blob contains the header (struct AvbVBMetaHeader), the
1810 authentication data block (which contains the hash and signature
1811 for the header and auxiliary block), and the auxiliary block
1812 (which contains descriptors, the public key used, and other data).
1813
1814 The |key| parameter can |None| only if the |algorithm_name| is
1815 'NONE'.
1816
1817 Arguments:
1818 algorithm_name: The algorithm name as per the ALGORITHMS dict.
1819 key_path: The path to the .pem file used to sign the blob.
1820 descriptors: A list of descriptors to insert or None.
1821 rollback_index: The rollback index to use.
1822 props: Properties to insert (List of strings of the form 'key:value').
1823 props_from_file: Properties to insert (List of strings 'key:<path>').
1824 kernel_cmdlines: Kernel cmdlines to insert (list of strings).
1825 generate_dm_verity_cmdline_from_hashtree: None or file to generate
1826 dm-verity kernel cmdline from.
1827 include_descriptors_from_image: List of file objects for which
1828 to insert descriptors from.
1829
1830 Returns:
1831 A bytearray() with the VBMeta blob.
1832
1833 Raises:
1834 Exception: If the |algorithm_name| is not found, if no key has
1835 been given and the given algorithm requires one, or the key is
1836 of the wrong size.
1837
1838 """
1839 try:
1840 alg = ALGORITHMS[algorithm_name]
1841 except KeyError:
1842 raise AvbError('Unknown algorithm with name {}'.format(algorithm_name))
1843
1844 # Descriptors.
1845 encoded_descriptors = bytearray()
1846 if descriptors:
1847 for desc in descriptors:
1848 encoded_descriptors.extend(desc.encode())
1849
1850 # Add properties.
1851 if props:
1852 for prop in props:
1853 idx = prop.find(':')
1854 if idx == -1:
1855 raise AvbError('Malformed property "{}".'.format(prop))
1856 desc = AvbPropertyDescriptor()
1857 desc.key = prop[0:idx]
1858 desc.value = prop[(idx + 1):]
1859 encoded_descriptors.extend(desc.encode())
1860 if props_from_file:
1861 for prop in props_from_file:
1862 idx = prop.find(':')
1863 if idx == -1:
1864 raise AvbError('Malformed property "{}".'.format(prop))
1865 desc = AvbPropertyDescriptor()
1866 desc.key = prop[0:idx]
1867 desc.value = prop[(idx + 1):]
1868 file_path = prop[(idx + 1):]
1869 desc.value = open(file_path, 'rb').read()
1870 encoded_descriptors.extend(desc.encode())
1871
1872 # Add AvbKernelCmdline descriptor for dm-verity, if requested.
1873 if generate_dm_verity_cmdline_from_hashtree:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001874 image_handler = ImageHandler(
1875 generate_dm_verity_cmdline_from_hashtree.name)
David Zeuthen21e95262016-07-27 17:58:40 -04001876 encoded_descriptors.extend(self._get_cmdline_descriptor_for_dm_verity(
David Zeuthena4fee8b2016-08-22 15:20:43 -04001877 image_handler).encode())
David Zeuthen21e95262016-07-27 17:58:40 -04001878
1879 # Add kernel command-lines.
1880 if kernel_cmdlines:
1881 for i in kernel_cmdlines:
1882 desc = AvbKernelCmdlineDescriptor()
1883 desc.kernel_cmdline = i
1884 encoded_descriptors.extend(desc.encode())
1885
1886 # Add descriptors from other images.
1887 if include_descriptors_from_image:
1888 for image in include_descriptors_from_image:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001889 image_handler = ImageHandler(image.name)
1890 (_, _, image_descriptors, _) = self._parse_image(image_handler)
David Zeuthen21e95262016-07-27 17:58:40 -04001891 for desc in image_descriptors:
1892 encoded_descriptors.extend(desc.encode())
1893
1894 key = None
1895 encoded_key = bytearray()
1896 if alg.public_key_num_bytes > 0:
1897 if not key_path:
1898 raise AvbError('Key is required for algorithm {}'.format(
1899 algorithm_name))
1900 key = Crypto.PublicKey.RSA.importKey(open(key_path).read())
1901 encoded_key = encode_rsa_key(key)
1902 if len(encoded_key) != alg.public_key_num_bytes:
1903 raise AvbError('Key is wrong size for algorithm {}'.format(
1904 algorithm_name))
1905
1906 h = AvbVBMetaHeader()
1907
1908 # For the Auxiliary data block, descriptors are stored at offset 0
1909 # and the public key is immediately after that.
1910 h.auxiliary_data_block_size = round_to_multiple(
1911 len(encoded_descriptors) + len(encoded_key), 64)
1912 h.descriptors_offset = 0
1913 h.descriptors_size = len(encoded_descriptors)
1914 h.public_key_offset = h.descriptors_size
1915 h.public_key_size = len(encoded_key)
1916
1917 # For the Authentication data block, the hash is first and then
1918 # the signature.
1919 h.authentication_data_block_size = round_to_multiple(
1920 alg.hash_num_bytes + alg.public_key_num_bytes, 64)
1921 h.algorithm_type = alg.algorithm_type
1922 h.hash_offset = 0
1923 h.hash_size = alg.hash_num_bytes
1924 # Signature offset and size - it's stored right after the hash
1925 # (in Authentication data block).
1926 h.signature_offset = alg.hash_num_bytes
1927 h.signature_size = alg.signature_num_bytes
1928
1929 h.rollback_index = rollback_index
1930
1931 # Generate Header data block.
1932 header_data_blob = h.encode()
1933
1934 # Generate Auxiliary data block.
1935 aux_data_blob = bytearray()
1936 aux_data_blob.extend(encoded_descriptors)
1937 aux_data_blob.extend(encoded_key)
1938 padding_bytes = h.auxiliary_data_block_size - len(aux_data_blob)
1939 aux_data_blob.extend('\0' * padding_bytes)
1940
1941 # Calculate the hash.
1942 binary_hash = bytearray()
1943 binary_signature = bytearray()
1944 if algorithm_name != 'NONE':
1945 if algorithm_name[0:6] == 'SHA256':
1946 ha = hashlib.sha256()
1947 elif algorithm_name[0:6] == 'SHA512':
1948 ha = hashlib.sha512()
1949 else:
1950 raise AvbError('Unsupported algorithm {}.'.format(algorithm_name))
1951 ha.update(header_data_blob)
1952 ha.update(aux_data_blob)
1953 binary_hash.extend(ha.digest())
1954
1955 # Calculate the signature.
1956 p = subprocess.Popen(
1957 ['openssl', 'rsautl', '-sign', '-inkey', key_path, '-raw'],
1958 stdin=subprocess.PIPE,
1959 stdout=subprocess.PIPE,
1960 stderr=subprocess.PIPE)
1961 padding_and_hash = str(bytearray(alg.padding)) + binary_hash
1962 (pout, perr) = p.communicate(padding_and_hash)
1963 retcode = p.wait()
1964 if retcode != 0:
1965 raise AvbError('Error signing: {}'.format(perr))
1966 binary_signature.extend(pout)
1967
1968 # Generate Authentication data block.
1969 auth_data_blob = bytearray()
1970 auth_data_blob.extend(binary_hash)
1971 auth_data_blob.extend(binary_signature)
1972 padding_bytes = h.authentication_data_block_size - len(auth_data_blob)
1973 auth_data_blob.extend('\0' * padding_bytes)
1974
1975 return header_data_blob + auth_data_blob + aux_data_blob
1976
1977 def extract_public_key(self, key_path, output):
1978 """Implements the 'extract_public_key' command.
1979
1980 Arguments:
1981 key_path: The path to a RSA private key file.
1982 output: The file to write to.
1983 """
1984 key = Crypto.PublicKey.RSA.importKey(open(key_path).read())
1985 write_rsa_key(output, key)
1986
David Zeuthena4fee8b2016-08-22 15:20:43 -04001987 def add_hash_footer(self, image_filename, partition_size, partition_name,
David Zeuthen21e95262016-07-27 17:58:40 -04001988 hash_algorithm, salt, algorithm_name, key_path,
1989 rollback_index, props, props_from_file, kernel_cmdlines,
1990 generate_dm_verity_cmdline_from_hashtree,
1991 include_descriptors_from_image):
David Zeuthena4fee8b2016-08-22 15:20:43 -04001992 """Implementation of the add_hash_footer on unsparse images.
David Zeuthen21e95262016-07-27 17:58:40 -04001993
1994 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001995 image_filename: File to add the footer to.
David Zeuthen21e95262016-07-27 17:58:40 -04001996 partition_size: Size of partition.
1997 partition_name: Name of partition (without A/B suffix).
1998 hash_algorithm: Hash algorithm to use.
1999 salt: Salt to use as a hexadecimal string or None to use /dev/urandom.
2000 algorithm_name: Name of algorithm to use.
2001 key_path: Path to key to use or None.
2002 rollback_index: Rollback index.
2003 props: Properties to insert (List of strings of the form 'key:value').
2004 props_from_file: Properties to insert (List of strings 'key:<path>').
2005 kernel_cmdlines: Kernel cmdlines to insert (list of strings).
2006 generate_dm_verity_cmdline_from_hashtree: None or file to generate
2007 dm-verity kernel cmdline from.
2008 include_descriptors_from_image: List of file objects for which
2009 to insert descriptors from.
David Zeuthena4fee8b2016-08-22 15:20:43 -04002010
2011 Raises:
2012 AvbError: If an argument is incorrect.
David Zeuthen21e95262016-07-27 17:58:40 -04002013 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04002014 image = ImageHandler(image_filename)
2015
2016 if partition_size % image.block_size != 0:
2017 raise AvbError('Partition size of {} is not a multiple of the image '
2018 'block size {}.'.format(partition_size,
2019 image.block_size))
2020
David Zeuthen21e95262016-07-27 17:58:40 -04002021 # If there's already a footer, truncate the image to its original
2022 # size. This way 'avbtool add_hash_footer' is idempotent (modulo
2023 # salts).
David Zeuthena4fee8b2016-08-22 15:20:43 -04002024 image_size = image.care_size
David Zeuthen21e95262016-07-27 17:58:40 -04002025 image.seek(image_size - AvbFooter.SIZE)
2026 try:
2027 footer = AvbFooter(image.read(AvbFooter.SIZE))
2028 # Existing footer found. Just truncate.
2029 original_image_size = footer.original_image_size
2030 image_size = footer.original_image_size
2031 image.truncate(image_size)
2032 except (LookupError, struct.error):
2033 original_image_size = image_size
2034
2035 # If anything goes wrong from here-on, restore the image back to
2036 # its original size.
2037 try:
2038 digest_size = len(hashlib.new(name=hash_algorithm).digest())
2039 if salt:
2040 salt = salt.decode('hex')
2041 else:
2042 if salt is None:
2043 # If salt is not explicitly specified, choose a hash
2044 # that's the same size as the hash size.
2045 hash_size = digest_size
2046 salt = open('/dev/urandom').read(hash_size)
2047 else:
2048 salt = ''
2049
2050 hasher = hashlib.new(name=hash_algorithm, string=salt)
2051 # TODO(zeuthen): might want to read this in chunks to avoid
2052 # memory pressure, then again, this is only supposed to be used
2053 # on kernel/initramfs partitions. Possible optimization.
2054 image.seek(0)
2055 hasher.update(image.read(image_size))
2056 digest = hasher.digest()
2057
2058 h_desc = AvbHashDescriptor()
2059 h_desc.image_size = image_size
2060 h_desc.hash_algorithm = hash_algorithm
2061 h_desc.partition_name = partition_name
2062 h_desc.salt = salt
2063 h_desc.digest = digest
2064
2065 # Generate the VBMeta footer.
David Zeuthen21e95262016-07-27 17:58:40 -04002066 vbmeta_blob = self._generate_vbmeta_blob(
2067 algorithm_name, key_path, [h_desc], rollback_index, props,
2068 props_from_file, kernel_cmdlines,
2069 generate_dm_verity_cmdline_from_hashtree,
2070 include_descriptors_from_image)
2071
David Zeuthena4fee8b2016-08-22 15:20:43 -04002072 # We might have a DONT_CARE hole at the end (in which case
2073 # |image.care_size| < |image.image_size|) so truncate here.
2074 image.truncate(image.care_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002075
David Zeuthena4fee8b2016-08-22 15:20:43 -04002076 # If the image isn't sparse, its size might not be a multiple of
2077 # the block size. This will screw up padding later so just grow it.
2078 if image.care_size % image.block_size != 0:
2079 assert not image.is_sparse
2080 padding_needed = image.block_size - (image.care_size%image.block_size)
2081 image.truncate(image.care_size + padding_needed)
David Zeuthen21e95262016-07-27 17:58:40 -04002082
David Zeuthena4fee8b2016-08-22 15:20:43 -04002083 # The append_raw() method requires content with size being a
2084 # multiple of |block_size| so add padding as needed. Also record
2085 # where this is written to since we'll need to put that in the
2086 # footer.
2087 vbmeta_offset = image.care_size
2088 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) -
2089 len(vbmeta_blob))
2090 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed
2091 image.append_raw(vbmeta_blob_with_padding)
2092 vbmeta_end_offset = vbmeta_offset + len(vbmeta_blob_with_padding)
2093
2094 # Now insert a DONT_CARE chunk with enough bytes such that the
2095 # final Footer block is at the end of partition_size..
2096 image.append_dont_care(partition_size - vbmeta_end_offset -
2097 1*image.block_size)
2098
2099 # Generate the Footer that tells where the VBMeta footer
2100 # is. Also put enough padding in the front of the footer since
2101 # we'll write out an entire block.
David Zeuthen21e95262016-07-27 17:58:40 -04002102 footer = AvbFooter()
2103 footer.original_image_size = original_image_size
2104 footer.vbmeta_offset = vbmeta_offset
2105 footer.vbmeta_size = len(vbmeta_blob)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002106 footer_blob = footer.encode()
2107 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) +
2108 footer_blob)
2109 image.append_raw(footer_blob_with_padding)
2110
David Zeuthen21e95262016-07-27 17:58:40 -04002111 except:
2112 # Truncate back to original size, then re-raise
2113 image.truncate(original_image_size)
2114 raise
2115
David Zeuthena4fee8b2016-08-22 15:20:43 -04002116 def add_hashtree_footer(self, image_filename, partition_size, partition_name,
David Zeuthen21e95262016-07-27 17:58:40 -04002117 hash_algorithm, block_size, salt, algorithm_name,
2118 key_path, rollback_index, props, props_from_file,
2119 kernel_cmdlines,
2120 generate_dm_verity_cmdline_from_hashtree,
2121 include_descriptors_from_image):
2122 """Implements the 'add_hashtree_footer' command.
2123
2124 See https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity for
2125 more information about dm-verity and these hashes.
2126
2127 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002128 image_filename: File to add the footer to.
David Zeuthen21e95262016-07-27 17:58:40 -04002129 partition_size: Size of partition.
2130 partition_name: Name of partition (without A/B suffix).
2131 hash_algorithm: Hash algorithm to use.
2132 block_size: Block size to use.
2133 salt: Salt to use as a hexadecimal string or None to use /dev/urandom.
2134 algorithm_name: Name of algorithm to use.
2135 key_path: Path to key to use or None.
2136 rollback_index: Rollback index.
2137 props: Properties to insert (List of strings of the form 'key:value').
2138 props_from_file: Properties to insert (List of strings 'key:<path>').
2139 kernel_cmdlines: Kernel cmdlines to insert (list of strings).
2140 generate_dm_verity_cmdline_from_hashtree: None or file to generate
2141 dm-verity kernel cmdline from.
2142 include_descriptors_from_image: List of file objects for which
2143 to insert descriptors from.
David Zeuthena4fee8b2016-08-22 15:20:43 -04002144
2145 Raises:
2146 AvbError: If an argument is incorrect.
David Zeuthen21e95262016-07-27 17:58:40 -04002147 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04002148 image = ImageHandler(image_filename)
2149
2150 if partition_size % image.block_size != 0:
2151 raise AvbError('Partition size of {} is not a multiple of the image '
2152 'block size {}.'.format(partition_size,
2153 image.block_size))
2154
David Zeuthen21e95262016-07-27 17:58:40 -04002155 # If there's already a footer, truncate the image to its original
2156 # size. This way 'avbtool add_hashtree_footer' is idempotent
2157 # (modulo salts).
David Zeuthena4fee8b2016-08-22 15:20:43 -04002158 image_size = image.care_size
David Zeuthen21e95262016-07-27 17:58:40 -04002159 image.seek(image_size - AvbFooter.SIZE)
2160 try:
2161 footer = AvbFooter(image.read(AvbFooter.SIZE))
2162 # Existing footer found. Just truncate.
2163 original_image_size = footer.original_image_size
2164 image_size = footer.original_image_size
2165 image.truncate(image_size)
2166 except (LookupError, struct.error):
2167 original_image_size = image_size
2168
2169 # If anything goes wrong from here-on, restore the image back to
2170 # its original size.
2171 try:
2172 # Ensure image is multiple of block_size.
2173 rounded_image_size = round_to_multiple(image_size, block_size)
2174 if rounded_image_size > image_size:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002175 image.append_raw('\0' * (rounded_image_size - image_size))
David Zeuthen21e95262016-07-27 17:58:40 -04002176 image_size = rounded_image_size
2177
David Zeuthen21e95262016-07-27 17:58:40 -04002178 digest_size = len(hashlib.new(name=hash_algorithm).digest())
2179 digest_padding = round_to_pow2(digest_size) - digest_size
2180
2181 if salt:
2182 salt = salt.decode('hex')
2183 else:
2184 if salt is None:
2185 # If salt is not explicitly specified, choose a hash
2186 # that's the same size as the hash size.
2187 hash_size = digest_size
2188 salt = open('/dev/urandom').read(hash_size)
2189 else:
2190 salt = ''
2191
David Zeuthena4fee8b2016-08-22 15:20:43 -04002192 # Hashes are stored upside down so we need to calculate hash
David Zeuthen21e95262016-07-27 17:58:40 -04002193 # offsets in advance.
2194 (hash_level_offsets, tree_size) = calc_hash_level_offsets(
2195 image_size, block_size, digest_size + digest_padding)
2196
David Zeuthena4fee8b2016-08-22 15:20:43 -04002197 # We might have a DONT_CARE hole at the end (in which case
2198 # |image.care_size| < |image.image_size|) so truncate here.
2199 image.truncate(image.care_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002200
David Zeuthena4fee8b2016-08-22 15:20:43 -04002201 # If the image isn't sparse, its size might not be a multiple of
2202 # the block size. This will screw up padding later so just grow it.
2203 if image.care_size % image.block_size != 0:
2204 assert not image.is_sparse
2205 padding_needed = image.block_size - (image.care_size%image.block_size)
2206 image.truncate(image.care_size + padding_needed)
David Zeuthen21e95262016-07-27 17:58:40 -04002207
David Zeuthena4fee8b2016-08-22 15:20:43 -04002208 # Generate the tree and add padding as needed.
2209 tree_offset = image.care_size
2210 root_digest, hash_tree = generate_hash_tree(image, image_size,
2211 block_size,
2212 hash_algorithm, salt,
2213 digest_padding,
2214 hash_level_offsets,
2215 tree_size)
2216 padding_needed = (round_to_multiple(len(hash_tree), image.block_size) -
2217 len(hash_tree))
2218 hash_tree_with_padding = hash_tree + '\0'*padding_needed
2219 image.append_raw(hash_tree_with_padding)
2220
2221 # Generate HashtreeDescriptor with details about the tree we
2222 # just generated.
David Zeuthen21e95262016-07-27 17:58:40 -04002223 ht_desc = AvbHashtreeDescriptor()
2224 ht_desc.dm_verity_version = 1
2225 ht_desc.image_size = image_size
2226 ht_desc.tree_offset = tree_offset
2227 ht_desc.tree_size = tree_size
2228 ht_desc.data_block_size = block_size
2229 ht_desc.hash_block_size = block_size
2230 ht_desc.hash_algorithm = hash_algorithm
2231 ht_desc.partition_name = partition_name
2232 ht_desc.salt = salt
2233 ht_desc.root_digest = root_digest
2234
David Zeuthena4fee8b2016-08-22 15:20:43 -04002235 # Generate the VBMeta footer and add padding as needed.
2236 vbmeta_offset = tree_offset + len(hash_tree_with_padding)
David Zeuthen21e95262016-07-27 17:58:40 -04002237 vbmeta_blob = self._generate_vbmeta_blob(
2238 algorithm_name, key_path, [ht_desc], rollback_index, props,
2239 props_from_file, kernel_cmdlines,
2240 generate_dm_verity_cmdline_from_hashtree,
2241 include_descriptors_from_image)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002242 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) -
2243 len(vbmeta_blob))
2244 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed
2245 image.append_raw(vbmeta_blob_with_padding)
David Zeuthen21e95262016-07-27 17:58:40 -04002246
David Zeuthena4fee8b2016-08-22 15:20:43 -04002247 # Now insert a DONT_CARE chunk with enough bytes such that the
2248 # final Footer block is at the end of partition_size..
2249 image.append_dont_care(partition_size - image.care_size -
2250 1*image.block_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002251
David Zeuthena4fee8b2016-08-22 15:20:43 -04002252 # Generate the Footer that tells where the VBMeta footer
2253 # is. Also put enough padding in the front of the footer since
2254 # we'll write out an entire block.
David Zeuthen21e95262016-07-27 17:58:40 -04002255 footer = AvbFooter()
2256 footer.original_image_size = original_image_size
2257 footer.vbmeta_offset = vbmeta_offset
2258 footer.vbmeta_size = len(vbmeta_blob)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002259 footer_blob = footer.encode()
2260 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) +
2261 footer_blob)
2262 image.append_raw(footer_blob_with_padding)
2263
David Zeuthen21e95262016-07-27 17:58:40 -04002264 except:
2265 # Truncate back to original size, then re-raise
2266 image.truncate(original_image_size)
2267 raise
2268
2269
2270def calc_hash_level_offsets(image_size, block_size, digest_size):
2271 """Calculate the offsets of all the hash-levels in a Merkle-tree.
2272
2273 Arguments:
2274 image_size: The size of the image to calculate a Merkle-tree for.
2275 block_size: The block size, e.g. 4096.
2276 digest_size: The size of each hash, e.g. 32 for SHA-256.
2277
2278 Returns:
2279 A tuple where the first argument is an array of offsets and the
2280 second is size of the tree, in bytes.
2281 """
2282 level_offsets = []
2283 level_sizes = []
2284 tree_size = 0
2285
2286 num_levels = 0
2287 size = image_size
2288 while size > block_size:
2289 num_blocks = (size + block_size - 1) / block_size
2290 level_size = round_to_multiple(num_blocks * digest_size, block_size)
2291
2292 level_sizes.append(level_size)
2293 tree_size += level_size
2294 num_levels += 1
2295
2296 size = level_size
2297
2298 for n in range(0, num_levels):
2299 offset = 0
2300 for m in range(n + 1, num_levels):
2301 offset += level_sizes[m]
2302 level_offsets.append(offset)
2303
David Zeuthena4fee8b2016-08-22 15:20:43 -04002304 return level_offsets, tree_size
David Zeuthen21e95262016-07-27 17:58:40 -04002305
2306
2307def generate_hash_tree(image, image_size, block_size, hash_alg_name, salt,
David Zeuthena4fee8b2016-08-22 15:20:43 -04002308 digest_padding, hash_level_offsets, tree_size):
David Zeuthen21e95262016-07-27 17:58:40 -04002309 """Generates a Merkle-tree for a file.
2310
2311 Args:
2312 image: The image, as a file.
2313 image_size: The size of the image.
2314 block_size: The block size, e.g. 4096.
2315 hash_alg_name: The hash algorithm, e.g. 'sha256' or 'sha1'.
2316 salt: The salt to use.
2317 digest_padding: The padding for each digest.
David Zeuthen21e95262016-07-27 17:58:40 -04002318 hash_level_offsets: The offsets from calc_hash_level_offsets().
David Zeuthena4fee8b2016-08-22 15:20:43 -04002319 tree_size: The size of the tree, in number of bytes.
David Zeuthen21e95262016-07-27 17:58:40 -04002320
2321 Returns:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002322 A tuple where the first element is the top-level hash and the
2323 second element is the hash-tree.
David Zeuthen21e95262016-07-27 17:58:40 -04002324 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04002325 hash_ret = bytearray(tree_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002326 hash_src_offset = 0
2327 hash_src_size = image_size
2328 level_num = 0
2329 while hash_src_size > block_size:
2330 level_output = ''
David Zeuthen21e95262016-07-27 17:58:40 -04002331 remaining = hash_src_size
2332 while remaining > 0:
2333 hasher = hashlib.new(name=hash_alg_name, string=salt)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002334 # Only read from the file for the first level - for subsequent
2335 # levels, access the array we're building.
2336 if level_num == 0:
2337 image.seek(hash_src_offset + hash_src_size - remaining)
2338 data = image.read(min(remaining, block_size))
2339 else:
2340 offset = hash_level_offsets[level_num - 1] + hash_src_size - remaining
2341 data = hash_ret[offset:offset + block_size]
David Zeuthen21e95262016-07-27 17:58:40 -04002342 hasher.update(data)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002343
2344 remaining -= len(data)
David Zeuthen21e95262016-07-27 17:58:40 -04002345 if len(data) < block_size:
2346 hasher.update('\0' * (block_size - len(data)))
2347 level_output += hasher.digest()
2348 if digest_padding > 0:
2349 level_output += '\0' * digest_padding
2350
2351 padding_needed = (round_to_multiple(
2352 len(level_output), block_size) - len(level_output))
2353 level_output += '\0' * padding_needed
2354
David Zeuthena4fee8b2016-08-22 15:20:43 -04002355 # Copy level-output into resulting tree.
2356 offset = hash_level_offsets[level_num]
2357 hash_ret[offset:offset + len(level_output)] = level_output
David Zeuthen21e95262016-07-27 17:58:40 -04002358
David Zeuthena4fee8b2016-08-22 15:20:43 -04002359 # Continue on to the next level.
David Zeuthen21e95262016-07-27 17:58:40 -04002360 hash_src_size = len(level_output)
David Zeuthen21e95262016-07-27 17:58:40 -04002361 level_num += 1
2362
2363 hasher = hashlib.new(name=hash_alg_name, string=salt)
2364 hasher.update(level_output)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002365 return hasher.digest(), hash_ret
David Zeuthen21e95262016-07-27 17:58:40 -04002366
2367
2368class AvbTool(object):
2369 """Object for avbtool command-line tool."""
2370
2371 def __init__(self):
2372 """Initializer method."""
2373 self.avb = Avb()
2374
2375 def _add_common_args(self, sub_parser):
2376 """Adds arguments used by several sub-commands.
2377
2378 Arguments:
2379 sub_parser: The parser to add arguments to.
2380 """
2381 sub_parser.add_argument('--algorithm',
2382 help='Algorithm to use (default: NONE)',
2383 metavar='ALGORITHM',
2384 default='NONE')
2385 sub_parser.add_argument('--key',
2386 help='Path to RSA private key file',
2387 metavar='KEY',
2388 required=False)
2389 sub_parser.add_argument('--rollback_index',
2390 help='Rollback Index',
2391 type=parse_number,
2392 default=0)
2393 sub_parser.add_argument('--prop',
2394 help='Add property',
2395 metavar='KEY:VALUE',
2396 action='append')
2397 sub_parser.add_argument('--prop_from_file',
2398 help='Add property from file',
2399 metavar='KEY:PATH',
2400 action='append')
2401 sub_parser.add_argument('--kernel_cmdline',
2402 help='Add kernel cmdline',
2403 metavar='CMDLINE',
2404 action='append')
2405 sub_parser.add_argument('--generate_dm_verity_cmdline_from_hashtree',
2406 metavar='IMAGE',
2407 help='Generate kernel cmdline for dm-verity',
2408 type=argparse.FileType('rb'))
2409 sub_parser.add_argument('--include_descriptors_from_image',
2410 help='Include descriptors from image',
2411 metavar='IMAGE',
2412 action='append',
2413 type=argparse.FileType('rb'))
2414
2415 def run(self, argv):
2416 """Command-line processor.
2417
2418 Arguments:
2419 argv: Pass sys.argv from main.
2420 """
2421 parser = argparse.ArgumentParser()
2422 subparsers = parser.add_subparsers(title='subcommands')
2423
2424 sub_parser = subparsers.add_parser('version',
2425 help='Prints version of avbtool.')
2426 sub_parser.set_defaults(func=self.version)
2427
2428 sub_parser = subparsers.add_parser('extract_public_key',
2429 help='Extract public key.')
2430 sub_parser.add_argument('--key',
2431 help='Path to RSA private key file',
2432 required=True)
2433 sub_parser.add_argument('--output',
2434 help='Output file name',
2435 type=argparse.FileType('wb'),
2436 required=True)
2437 sub_parser.set_defaults(func=self.extract_public_key)
2438
2439 sub_parser = subparsers.add_parser('make_vbmeta_image',
2440 help='Makes a vbmeta image.')
2441 sub_parser.add_argument('--output',
2442 help='Output file name',
2443 type=argparse.FileType('wb'),
2444 required=True)
2445 self._add_common_args(sub_parser)
2446 sub_parser.add_argument('--chain_partition',
2447 help='Allow signed integrity-data for partition',
2448 metavar='PART_NAME:ROLLBACK_SLOT:KEY_PATH',
2449 action='append')
2450 sub_parser.set_defaults(func=self.make_vbmeta_image)
2451
2452 sub_parser = subparsers.add_parser('add_hash_footer',
2453 help='Add hashes and footer to image.')
2454 sub_parser.add_argument('--image',
David Zeuthen8b6973b2016-09-20 12:39:49 -04002455 help='Image to add hashes to',
David Zeuthen21e95262016-07-27 17:58:40 -04002456 type=argparse.FileType('rab+'))
2457 sub_parser.add_argument('--partition_size',
2458 help='Partition size',
2459 type=parse_number,
2460 required=True)
2461 sub_parser.add_argument('--partition_name',
2462 help='Partition name',
2463 required=True)
2464 sub_parser.add_argument('--hash_algorithm',
2465 help='Hash algorithm to use (default: sha256)',
2466 default='sha256')
2467 sub_parser.add_argument('--salt',
2468 help='Salt in hex (default: /dev/urandom)')
2469 self._add_common_args(sub_parser)
2470 sub_parser.set_defaults(func=self.add_hash_footer)
2471
2472 sub_parser = subparsers.add_parser('add_hashtree_footer',
2473 help='Add hashtree and footer to image.')
2474 sub_parser.add_argument('--image',
David Zeuthen8b6973b2016-09-20 12:39:49 -04002475 help='Image to add hashtree to',
David Zeuthen21e95262016-07-27 17:58:40 -04002476 type=argparse.FileType('rab+'))
2477 sub_parser.add_argument('--partition_size',
2478 help='Partition size',
2479 type=parse_number,
2480 required=True)
2481 sub_parser.add_argument('--partition_name',
2482 help='Partition name',
2483 required=True)
2484 sub_parser.add_argument('--hash_algorithm',
2485 help='Hash algorithm to use (default: sha1)',
2486 default='sha1')
2487 sub_parser.add_argument('--salt',
2488 help='Salt in hex (default: /dev/urandom)')
2489 sub_parser.add_argument('--block_size',
2490 help='Block size (default: 4096)',
2491 type=parse_number,
2492 default=4096)
2493 self._add_common_args(sub_parser)
2494 sub_parser.set_defaults(func=self.add_hashtree_footer)
2495
2496 sub_parser = subparsers.add_parser('erase_footer',
2497 help='Erase footer from an image.')
2498 sub_parser.add_argument('--image',
David Zeuthen8b6973b2016-09-20 12:39:49 -04002499 help='Image with a footer',
David Zeuthen21e95262016-07-27 17:58:40 -04002500 type=argparse.FileType('rwb+'),
2501 required=True)
2502 sub_parser.add_argument('--keep_hashtree',
2503 help='Keep the hashtree in the image',
2504 action='store_true')
2505 sub_parser.set_defaults(func=self.erase_footer)
2506
2507 sub_parser = subparsers.add_parser(
2508 'info_image',
2509 help='Show information about vbmeta or footer.')
2510 sub_parser.add_argument('--image',
David Zeuthen8b6973b2016-09-20 12:39:49 -04002511 help='Image to show information about',
David Zeuthen21e95262016-07-27 17:58:40 -04002512 type=argparse.FileType('rb'),
2513 required=True)
2514 sub_parser.add_argument('--output',
2515 help='Write info to file',
2516 type=argparse.FileType('wt'),
2517 default=sys.stdout)
2518 sub_parser.set_defaults(func=self.info_image)
2519
David Zeuthen8b6973b2016-09-20 12:39:49 -04002520 sub_parser = subparsers.add_parser('set_ab_metadata',
2521 help='Set A/B metadata.')
2522 sub_parser.add_argument('--misc_image',
2523 help=('The misc image to modify. If the image does '
2524 'not exist, it will be created.'),
2525 type=argparse.FileType('r+b'),
2526 required=True)
2527 sub_parser.add_argument('--slot_data',
2528 help=('Slot data of the form "priority", '
2529 '"tries_remaining", "sucessful_boot" for '
2530 'slot A followed by the same for slot B, '
2531 'separated by colons. The default value '
2532 'is 15:7:0:14:7:0.'),
2533 default='15:7:0:14:7:0')
2534 sub_parser.set_defaults(func=self.set_ab_metadata)
2535
David Zeuthen21e95262016-07-27 17:58:40 -04002536 args = parser.parse_args(argv[1:])
2537 try:
2538 args.func(args)
2539 except AvbError as e:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002540 sys.stderr.write('{}: {}\n'.format(argv[0], e.message))
David Zeuthen21e95262016-07-27 17:58:40 -04002541 sys.exit(1)
2542
2543 def version(self, _):
2544 """Implements the 'version' sub-command."""
2545 print '{}.{}'.format(AVB_VERSION_MAJOR, AVB_VERSION_MINOR)
2546
2547 def extract_public_key(self, args):
2548 """Implements the 'extract_public_key' sub-command."""
2549 self.avb.extract_public_key(args.key, args.output)
2550
2551 def make_vbmeta_image(self, args):
2552 """Implements the 'make_vbmeta_image' sub-command."""
2553 self.avb.make_vbmeta_image(args.output, args.chain_partition,
2554 args.algorithm, args.key, args.rollback_index,
2555 args.prop, args.prop_from_file,
2556 args.kernel_cmdline,
2557 args.generate_dm_verity_cmdline_from_hashtree,
2558 args.include_descriptors_from_image)
2559
2560 def add_hash_footer(self, args):
2561 """Implements the 'add_hash_footer' sub-command."""
David Zeuthena4fee8b2016-08-22 15:20:43 -04002562 self.avb.add_hash_footer(args.image.name, args.partition_size,
David Zeuthen21e95262016-07-27 17:58:40 -04002563 args.partition_name, args.hash_algorithm,
2564 args.salt, args.algorithm, args.key,
2565 args.rollback_index, args.prop,
2566 args.prop_from_file, args.kernel_cmdline,
2567 args.generate_dm_verity_cmdline_from_hashtree,
2568 args.include_descriptors_from_image)
2569
2570 def add_hashtree_footer(self, args):
2571 """Implements the 'add_hashtree_footer' sub-command."""
David Zeuthena4fee8b2016-08-22 15:20:43 -04002572 self.avb.add_hashtree_footer(args.image.name, args.partition_size,
David Zeuthen21e95262016-07-27 17:58:40 -04002573 args.partition_name, args.hash_algorithm,
2574 args.block_size, args.salt, args.algorithm,
2575 args.key, args.rollback_index, args.prop,
2576 args.prop_from_file, args.kernel_cmdline,
2577 args.generate_dm_verity_cmdline_from_hashtree,
2578 args.include_descriptors_from_image)
2579
2580 def erase_footer(self, args):
2581 """Implements the 'erase_footer' sub-command."""
David Zeuthena4fee8b2016-08-22 15:20:43 -04002582 self.avb.erase_footer(args.image.name, args.keep_hashtree)
David Zeuthen21e95262016-07-27 17:58:40 -04002583
David Zeuthen8b6973b2016-09-20 12:39:49 -04002584 def set_ab_metadata(self, args):
2585 """Implements the 'set_ab_metadata' sub-command."""
2586 self.avb.set_ab_metadata(args.misc_image, args.slot_data)
2587
David Zeuthen21e95262016-07-27 17:58:40 -04002588 def info_image(self, args):
2589 """Implements the 'info_image' sub-command."""
David Zeuthena4fee8b2016-08-22 15:20:43 -04002590 self.avb.info_image(args.image.name, args.output)
David Zeuthen21e95262016-07-27 17:58:40 -04002591
2592
2593if __name__ == '__main__':
2594 tool = AvbTool()
2595 tool.run(sys.argv)