blob: 4a0ad79cb79a19c98d229ec5b21ed8e581cc3982 [file] [log] [blame]
David Zeuthen21e95262016-07-27 17:58:40 -04001#!/usr/bin/env python
2
3# Copyright 2016, The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17"""Command-line tool for working with Brillo Verified Boot images."""
18
19import argparse
David Zeuthena4fee8b2016-08-22 15:20:43 -040020import bisect
David Zeuthen21e95262016-07-27 17:58:40 -040021import hashlib
22import os
23import struct
24import subprocess
25import sys
26
27import Crypto.PublicKey.RSA
28
29# Keep in sync with avb_vbmeta_header.h.
30AVB_VERSION_MAJOR = 1
31AVB_VERSION_MINOR = 0
32
33
34class AvbError(Exception):
35 """Application-specific errors.
36
37 These errors represent issues for which a stack-trace should not be
38 presented.
39
40 Attributes:
41 message: Error message.
42 """
43
44 def __init__(self, message):
45 Exception.__init__(self, message)
46
47
48class Algorithm(object):
49 """Contains details about an algorithm.
50
51 See the avb_vbmeta_header.h file for more details about
52 algorithms.
53
54 The constant |ALGORITHMS| is a dictionary from human-readable
55 names (e.g 'SHA256_RSA2048') to instances of this class.
56
57 Attributes:
58 algorithm_type: Integer code corresponding to |AvbAlgorithmType|.
59 hash_num_bytes: Number of bytes used to store the hash.
60 signature_num_bytes: Number of bytes used to store the signature.
61 public_key_num_bytes: Number of bytes used to store the public key.
62 padding: Padding used for signature, if any.
63 """
64
65 def __init__(self, algorithm_type, hash_num_bytes, signature_num_bytes,
66 public_key_num_bytes, padding):
67 self.algorithm_type = algorithm_type
68 self.hash_num_bytes = hash_num_bytes
69 self.signature_num_bytes = signature_num_bytes
70 self.public_key_num_bytes = public_key_num_bytes
71 self.padding = padding
72
73# This must be kept in sync with the avb_crypto.h file.
74#
75# The PKC1-v1.5 padding is a blob of binary DER of ASN.1 and is
76# obtained from section 5.2.2 of RFC 4880.
77ALGORITHMS = {
78 'NONE': Algorithm(
79 algorithm_type=0, # AVB_ALGORITHM_TYPE_NONE
80 hash_num_bytes=0,
81 signature_num_bytes=0,
82 public_key_num_bytes=0,
83 padding=[]),
84 'SHA256_RSA2048': Algorithm(
85 algorithm_type=1, # AVB_ALGORITHM_TYPE_SHA256_RSA2048
86 hash_num_bytes=32,
87 signature_num_bytes=256,
88 public_key_num_bytes=8 + 2*2048/8,
89 padding=[
90 # PKCS1-v1_5 padding
91 0x00, 0x01] + [0xff]*202 + [0x00] + [
92 # ASN.1 header
93 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
94 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
95 0x00, 0x04, 0x20,
96 ]),
97 'SHA256_RSA4096': Algorithm(
98 algorithm_type=2, # AVB_ALGORITHM_TYPE_SHA256_RSA4096
99 hash_num_bytes=32,
100 signature_num_bytes=512,
101 public_key_num_bytes=8 + 2*4096/8,
102 padding=[
103 # PKCS1-v1_5 padding
104 0x00, 0x01] + [0xff]*458 + [0x00] + [
105 # ASN.1 header
106 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
107 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
108 0x00, 0x04, 0x20,
109 ]),
110 'SHA256_RSA8192': Algorithm(
111 algorithm_type=3, # AVB_ALGORITHM_TYPE_SHA256_RSA8192
112 hash_num_bytes=32,
113 signature_num_bytes=1024,
114 public_key_num_bytes=8 + 2*8192/8,
115 padding=[
116 # PKCS1-v1_5 padding
117 0x00, 0x01] + [0xff]*970 + [0x00] + [
118 # ASN.1 header
119 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
120 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
121 0x00, 0x04, 0x20,
122 ]),
123 'SHA512_RSA2048': Algorithm(
124 algorithm_type=4, # AVB_ALGORITHM_TYPE_SHA512_RSA2048
125 hash_num_bytes=64,
126 signature_num_bytes=256,
127 public_key_num_bytes=8 + 2*2048/8,
128 padding=[
129 # PKCS1-v1_5 padding
130 0x00, 0x01] + [0xff]*170 + [0x00] + [
131 # ASN.1 header
132 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
133 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
134 0x00, 0x04, 0x40
135 ]),
136 'SHA512_RSA4096': Algorithm(
137 algorithm_type=5, # AVB_ALGORITHM_TYPE_SHA512_RSA4096
138 hash_num_bytes=64,
139 signature_num_bytes=512,
140 public_key_num_bytes=8 + 2*4096/8,
141 padding=[
142 # PKCS1-v1_5 padding
143 0x00, 0x01] + [0xff]*426 + [0x00] + [
144 # ASN.1 header
145 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
146 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
147 0x00, 0x04, 0x40
148 ]),
149 'SHA512_RSA8192': Algorithm(
150 algorithm_type=6, # AVB_ALGORITHM_TYPE_SHA512_RSA8192
151 hash_num_bytes=64,
152 signature_num_bytes=1024,
153 public_key_num_bytes=8 + 2*8192/8,
154 padding=[
155 # PKCS1-v1_5 padding
156 0x00, 0x01] + [0xff]*938 + [0x00] + [
157 # ASN.1 header
158 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
159 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
160 0x00, 0x04, 0x40
161 ]),
162}
163
164
165def round_to_multiple(number, size):
166 """Rounds a number up to nearest multiple of another number.
167
168 Args:
169 number: The number to round up.
170 size: The multiple to round up to.
171
172 Returns:
173 If |number| is a multiple of |size|, returns |number|, otherwise
174 returns |number| + |size|.
175 """
176 remainder = number % size
177 if remainder == 0:
178 return number
179 return number + size - remainder
180
181
182def round_to_pow2(number):
183 """Rounds a number up to the next power of 2.
184
185 Args:
186 number: The number to round up.
187
188 Returns:
189 If |number| is already a power of 2 then |number| is
190 returned. Otherwise the smallest power of 2 greater than |number|
191 is returned.
192 """
193 return 2**((number - 1).bit_length())
194
195
196def write_long(output, num_bits, value):
197 """Writes a long to an output stream using a given amount of bits.
198
199 This number is written big-endian, e.g. with the most significant
200 bit first.
201
202 Arguments:
203 output: The object to write the output to.
204 num_bits: The number of bits to write, e.g. 2048.
205 value: The value to write.
206 """
207 for bit_pos in range(num_bits, 0, -8):
208 octet = (value >> (bit_pos - 8)) & 0xff
209 output.write(struct.pack('!B', octet))
210
211
212def encode_long(num_bits, value):
213 """Encodes a long to a bytearray() using a given amount of bits.
214
215 This number is written big-endian, e.g. with the most significant
216 bit first.
217
218 Arguments:
219 num_bits: The number of bits to write, e.g. 2048.
220 value: The value to write.
221
222 Returns:
223 A bytearray() with the encoded long.
224 """
225 ret = bytearray()
226 for bit_pos in range(num_bits, 0, -8):
227 octet = (value >> (bit_pos - 8)) & 0xff
228 ret.extend(struct.pack('!B', octet))
229 return ret
230
231
232def egcd(a, b):
233 """Calculate greatest common divisor of two numbers.
234
235 This implementation uses a recursive version of the extended
236 Euclidian algorithm.
237
238 Arguments:
239 a: First number.
240 b: Second number.
241
242 Returns:
243 A tuple (gcd, x, y) that where |gcd| is the greatest common
244 divisor of |a| and |b| and |a|*|x| + |b|*|y| = |gcd|.
245 """
246 if a == 0:
247 return (b, 0, 1)
248 else:
249 g, y, x = egcd(b % a, a)
250 return (g, x - (b // a) * y, y)
251
252
253def modinv(a, m):
254 """Calculate modular multiplicative inverse of |a| modulo |m|.
255
256 This calculates the number |x| such that |a| * |x| == 1 (modulo
257 |m|). This number only exists if |a| and |m| are co-prime - |None|
258 is returned if this isn't true.
259
260 Arguments:
261 a: The number to calculate a modular inverse of.
262 m: The modulo to use.
263
264 Returns:
265 The modular multiplicative inverse of |a| and |m| or |None| if
266 these numbers are not co-prime.
267 """
268 gcd, x, _ = egcd(a, m)
269 if gcd != 1:
270 return None # modular inverse does not exist
271 else:
272 return x % m
273
274
275def parse_number(string):
276 """Parse a string as a number.
277
278 This is just a short-hand for int(string, 0) suitable for use in the
279 |type| parameter of |ArgumentParser|'s add_argument() function. An
280 improvement to just using type=int is that this function supports
281 numbers in other bases, e.g. "0x1234".
282
283 Arguments:
284 string: The string to parse.
285
286 Returns:
287 The parsed integer.
288
289 Raises:
290 ValueError: If the number could not be parsed.
291 """
292 return int(string, 0)
293
294
295def write_rsa_key(output, key):
296 """Writes a public RSA key in |AvbRSAPublicKeyHeader| format.
297
298 This writes the |AvbRSAPublicKeyHeader| as well as the two large
299 numbers (|key_num_bits| bits long) following it.
300
301 Arguments:
302 output: The object to write the output to.
303 key: A Crypto.PublicKey.RSA object.
304 """
305 # key.e is exponent
306 # key.n is modulus
307 key_num_bits = key.size() + 1
308 # Calculate n0inv = -1/n[0] (mod 2^32)
309 b = 2L**32
310 n0inv = b - modinv(key.n, b)
311 # Calculate rr = r^2 (mod N), where r = 2^(# of key bits)
312 r = 2L**key.n.bit_length()
313 rrmodn = r * r % key.n
314 output.write(struct.pack('!II', key_num_bits, n0inv))
315 write_long(output, key_num_bits, key.n)
316 write_long(output, key_num_bits, rrmodn)
317
318
319def encode_rsa_key(key):
320 """Encodes a public RSA key in |AvbRSAPublicKeyHeader| format.
321
322 This creates a |AvbRSAPublicKeyHeader| as well as the two large
323 numbers (|key_num_bits| bits long) following it.
324
325 Arguments:
326 key: A Crypto.PublicKey.RSA object.
327
328 Returns:
329 A bytearray() with the |AvbRSAPublicKeyHeader|.
330 """
331 ret = bytearray()
332 # key.e is exponent
333 # key.n is modulus
334 key_num_bits = key.size() + 1
335 # Calculate n0inv = -1/n[0] (mod 2^32)
336 b = 2L**32
337 n0inv = b - modinv(key.n, b)
338 # Calculate rr = r^2 (mod N), where r = 2^(# of key bits)
339 r = 2L**key.n.bit_length()
340 rrmodn = r * r % key.n
341 ret.extend(struct.pack('!II', key_num_bits, n0inv))
342 ret.extend(encode_long(key_num_bits, key.n))
343 ret.extend(encode_long(key_num_bits, rrmodn))
344 return ret
345
346
347def lookup_algorithm_by_type(alg_type):
348 """Looks up algorithm by type.
349
350 Arguments:
351 alg_type: The integer representing the type.
352
353 Returns:
354 A tuple with the algorithm name and an |Algorithm| instance.
355
356 Raises:
357 Exception: If the algorithm cannot be found
358 """
359 for alg_name in ALGORITHMS:
360 alg_data = ALGORITHMS[alg_name]
361 if alg_data.algorithm_type == alg_type:
362 return (alg_name, alg_data)
363 raise AvbError('Unknown algorithm type {}'.format(alg_type))
364
365
David Zeuthena4fee8b2016-08-22 15:20:43 -0400366class ImageChunk(object):
367 """Data structure used for representing chunks in Android sparse files.
368
369 Attributes:
370 chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
371 chunk_offset: Offset in the sparse file where this chunk begins.
372 output_offset: Offset in de-sparsified file where output begins.
373 output_size: Number of bytes in output.
374 input_offset: Offset in sparse file for data if TYPE_RAW otherwise None.
375 fill_data: Blob with data to fill if TYPE_FILL otherwise None.
376 """
377
378 FORMAT = '<2H2I'
379 TYPE_RAW = 0xcac1
380 TYPE_FILL = 0xcac2
381 TYPE_DONT_CARE = 0xcac3
382 TYPE_CRC32 = 0xcac4
383
384 def __init__(self, chunk_type, chunk_offset, output_offset, output_size,
385 input_offset, fill_data):
386 """Initializes an ImageChunk object.
387
388 Arguments:
389 chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
390 chunk_offset: Offset in the sparse file where this chunk begins.
391 output_offset: Offset in de-sparsified file.
392 output_size: Number of bytes in output.
393 input_offset: Offset in sparse file if TYPE_RAW otherwise None.
394 fill_data: Blob with data to fill if TYPE_FILL otherwise None.
395
396 Raises:
397 ValueError: If data is not well-formed.
398 """
399 self.chunk_type = chunk_type
400 self.chunk_offset = chunk_offset
401 self.output_offset = output_offset
402 self.output_size = output_size
403 self.input_offset = input_offset
404 self.fill_data = fill_data
405 # Check invariants.
406 if self.chunk_type == self.TYPE_RAW:
407 if self.fill_data is not None:
408 raise ValueError('RAW chunk cannot have fill_data set.')
409 if not self.input_offset:
410 raise ValueError('RAW chunk must have input_offset set.')
411 elif self.chunk_type == self.TYPE_FILL:
412 if self.fill_data is None:
413 raise ValueError('FILL chunk must have fill_data set.')
414 if self.input_offset:
415 raise ValueError('FILL chunk cannot have input_offset set.')
416 elif self.chunk_type == self.TYPE_DONT_CARE:
417 if self.fill_data is not None:
418 raise ValueError('DONT_CARE chunk cannot have fill_data set.')
419 if self.input_offset:
420 raise ValueError('DONT_CARE chunk cannot have input_offset set.')
421 else:
422 raise ValueError('Invalid chunk type')
423
424
425class ImageHandler(object):
426 """Abstraction for image I/O with support for Android sparse images.
427
428 This class provides an interface for working with image files that
429 may be using the Android Sparse Image format. When an instance is
430 constructed, we test whether it's an Android sparse file. If so,
431 operations will be on the sparse file by interpreting the sparse
432 format, otherwise they will be directly on the file. Either way the
433 operations do the same.
434
435 For reading, this interface mimics a file object - it has seek(),
436 tell(), and read() methods. For writing, only truncation
437 (truncate()) and appending is supported (append_raw() and
438 append_dont_care()). Additionally, data can only be written in units
439 of the block size.
440
441 Attributes:
442 is_sparse: Whether the file being operated on is sparse.
443 block_size: The block size, typically 4096.
444 image_size: The size of the unsparsified file.
445 care_size: Position in the unsparsified file where only
446 DONT_CARE data follows.
447 """
448 # See system/core/libsparse/sparse_format.h for details.
449 MAGIC = 0xed26ff3a
450 HEADER_FORMAT = '<I4H4I'
451
452 # These are formats and offset of just the |total_chunks| and
453 # |total_blocks| fields.
454 NUM_CHUNKS_AND_BLOCKS_FORMAT = '<II'
455 NUM_CHUNKS_AND_BLOCKS_OFFSET = 16
456
457 def __init__(self, image_filename):
458 """Initializes an image handler.
459
460 Arguments:
461 image_filename: The name of the file to operate on.
462
463 Raises:
464 ValueError: If data in the file is invalid.
465 """
466 self._image_filename = image_filename
467 self._read_header()
468
469 def _read_header(self):
470 """Initializes internal data structures used for reading file.
471
472 This may be called multiple times and is typically called after
473 modifying the file (e.g. appending, truncation).
474
475 Raises:
476 ValueError: If data in the file is invalid.
477 """
478 self.is_sparse = False
479 self.block_size = 4096
480 self._file_pos = 0
481 self._image = open(self._image_filename, 'r+b')
482 self._image.seek(0, os.SEEK_END)
483 self.care_size = self._image.tell()
484 self.image_size = self._image.tell()
485
486 self._image.seek(0, os.SEEK_SET)
487 header_bin = self._image.read(struct.calcsize(self.HEADER_FORMAT))
488 (magic, major_version, minor_version, file_hdr_sz, chunk_hdr_sz,
489 block_size, self._num_total_blocks, self._num_total_chunks,
490 _) = struct.unpack(self.HEADER_FORMAT, header_bin)
491 if magic != self.MAGIC:
492 # Not a sparse image, our job here is done.
493 return
494 if not (major_version == 1 and minor_version == 0):
495 raise ValueError('Encountered sparse image format version {}.{} but '
496 'only 1.0 is supported'.format(major_version,
497 minor_version))
498 if file_hdr_sz != struct.calcsize(self.HEADER_FORMAT):
499 raise ValueError('Unexpected file_hdr_sz value {}.'.
500 format(file_hdr_sz))
501 if chunk_hdr_sz != struct.calcsize(ImageChunk.FORMAT):
502 raise ValueError('Unexpected chunk_hdr_sz value {}.'.
503 format(chunk_hdr_sz))
504
505 self.block_size = block_size
506
507 # Build an list of chunks by parsing the file.
508 self._chunks = []
509
510 # Find the smallest offset where only "Don't care" chunks
511 # follow. This will be the size of the content in the sparse
512 # image.
513 offset = 0
514 output_offset = 0
515 last_dont_care_section_output_offset = None
516 last_section_was_dont_care = False
517 for _ in xrange(1, self._num_total_chunks + 1):
518 chunk_offset = self._image.tell()
519
520 header_bin = self._image.read(struct.calcsize(ImageChunk.FORMAT))
521 (chunk_type, _, chunk_sz, total_sz) = struct.unpack(ImageChunk.FORMAT,
522 header_bin)
523 data_sz = total_sz - struct.calcsize(ImageChunk.FORMAT)
524
525 last_section_was_dont_care = False
526
527 if chunk_type == ImageChunk.TYPE_RAW:
528 if data_sz != (chunk_sz * self.block_size):
529 raise ValueError('Raw chunk input size ({}) does not match output '
530 'size ({})'.
531 format(data_sz, chunk_sz*self.block_size))
532 self._chunks.append(ImageChunk(ImageChunk.TYPE_RAW,
533 chunk_offset,
534 output_offset,
535 chunk_sz*self.block_size,
536 self._image.tell(),
537 None))
538 self._image.read(data_sz)
539
540 elif chunk_type == ImageChunk.TYPE_FILL:
541 if data_sz != 4:
542 raise ValueError('Fill chunk should have 4 bytes of fill, but this '
543 'has {}'.format(data_sz))
544 fill_data = self._image.read(4)
545 self._chunks.append(ImageChunk(ImageChunk.TYPE_FILL,
546 chunk_offset,
547 output_offset,
548 chunk_sz*self.block_size,
549 None,
550 fill_data))
551 elif chunk_type == ImageChunk.TYPE_DONT_CARE:
552 if data_sz != 0:
553 raise ValueError('Don\'t care chunk input size is non-zero ({})'.
554 format(data_sz))
555 else:
556 if not last_section_was_dont_care:
557 last_dont_care_section_output_offset = output_offset
558 last_section_was_dont_care = True
559 self._chunks.append(ImageChunk(ImageChunk.TYPE_DONT_CARE,
560 chunk_offset,
561 output_offset,
562 chunk_sz*self.block_size,
563 None,
564 None))
565 elif chunk_type == ImageChunk.TYPE_CRC32:
566 if data_sz != 4:
567 raise ValueError('CRC32 chunk should have 4 bytes of CRC, but '
568 'this has {}'.format(data_sz))
569 self._image.read(4)
570 else:
571 raise ValueError('Unknown chunk type {}'.format(chunk_type))
572
573 offset += chunk_sz
574 output_offset += chunk_sz*self.block_size
575
576 # Record where sparse data end.
577 self._sparse_end = self._image.tell()
578
579 # Now that we've traversed all chunks, sanity check.
580 if self._num_total_blocks != offset:
581 raise ValueError('The header said we should have {} output blocks, '
582 'but we saw {}'.format(self._num_total_blocks, offset))
583 junk_len = len(self._image.read())
584 if junk_len > 0:
585 raise ValueError('There were {} bytes of extra data at the end of the '
586 'file.'.format(junk_len))
587
588 # Assign |image_size| and |care_size| attributes.
589 self.image_size = output_offset
590 if last_section_was_dont_care:
591 self.care_size = last_dont_care_section_output_offset
592 else:
593 self.care_size = output_offset
594
595 # This is used when bisecting in read() to find the initial slice.
596 self._chunk_output_offsets = [i.output_offset for i in self._chunks]
597
598 self.is_sparse = True
599
600 def _update_chunks_and_blocks(self):
601 """Helper function to update the image header.
602
603 The the |total_chunks| and |total_blocks| fields in the header
604 will be set to value of the |_num_total_blocks| and
605 |_num_total_chunks| attributes.
606
607 """
608 self._image.seek(self.NUM_CHUNKS_AND_BLOCKS_OFFSET, os.SEEK_SET)
609 self._image.write(struct.pack(self.NUM_CHUNKS_AND_BLOCKS_FORMAT,
610 self._num_total_blocks,
611 self._num_total_chunks))
612
613 def append_dont_care(self, num_bytes):
614 """Appends a DONT_CARE chunk to the sparse file.
615
616 The given number of bytes must be a multiple of the block size.
617
618 Arguments:
619 num_bytes: Size in number of bytes of the DONT_CARE chunk.
620 """
621 assert num_bytes % self.block_size == 0
622
623 if not self.is_sparse:
624 self._image.seek(0, os.SEEK_END)
625 # This is more efficient that writing NUL bytes since it'll add
626 # a hole on file systems that support sparse files (native
627 # sparse, not Android sparse).
628 self._image.truncate(self._image.tell() + num_bytes)
629 self._read_header()
630 return
631
632 self._num_total_chunks += 1
633 self._num_total_blocks += num_bytes / self.block_size
634 self._update_chunks_and_blocks()
635
636 self._image.seek(self._sparse_end, os.SEEK_SET)
637 self._image.write(struct.pack(ImageChunk.FORMAT,
638 ImageChunk.TYPE_DONT_CARE,
639 0, # Reserved
640 num_bytes / self.block_size,
641 struct.calcsize(ImageChunk.FORMAT)))
642 self._read_header()
643
644 def append_raw(self, data):
645 """Appends a RAW chunk to the sparse file.
646
647 The length of the given data must be a multiple of the block size.
648
649 Arguments:
650 data: Data to append.
651 """
652 assert len(data) % self.block_size == 0
653
654 if not self.is_sparse:
655 self._image.seek(0, os.SEEK_END)
656 self._image.write(data)
657 self._read_header()
658 return
659
660 self._num_total_chunks += 1
661 self._num_total_blocks += len(data) / self.block_size
662 self._update_chunks_and_blocks()
663
664 self._image.seek(self._sparse_end, os.SEEK_SET)
665 self._image.write(struct.pack(ImageChunk.FORMAT,
666 ImageChunk.TYPE_RAW,
667 0, # Reserved
668 len(data) / self.block_size,
669 len(data) +
670 struct.calcsize(ImageChunk.FORMAT)))
671 self._image.write(data)
672 self._read_header()
673
674 def append_fill(self, fill_data, size):
675 """Appends a fill chunk to the sparse file.
676
677 The total length of the fill data must be a multiple of the block size.
678
679 Arguments:
680 fill_data: Fill data to append - must be four bytes.
681 size: Number of chunk - must be a multiple of four and the block size.
682 """
683 assert len(fill_data) == 4
684 assert size % 4 == 0
685 assert size % self.block_size == 0
686
687 if not self.is_sparse:
688 self._image.seek(0, os.SEEK_END)
689 self._image.write(fill_data * (size/4))
690 self._read_header()
691 return
692
693 self._num_total_chunks += 1
694 self._num_total_blocks += size / self.block_size
695 self._update_chunks_and_blocks()
696
697 self._image.seek(self._sparse_end, os.SEEK_SET)
698 self._image.write(struct.pack(ImageChunk.FORMAT,
699 ImageChunk.TYPE_FILL,
700 0, # Reserved
701 size / self.block_size,
702 4 + struct.calcsize(ImageChunk.FORMAT)))
703 self._image.write(fill_data)
704 self._read_header()
705
706 def seek(self, offset):
707 """Sets the cursor position for reading from unsparsified file.
708
709 Arguments:
710 offset: Offset to seek to from the beginning of the file.
711 """
712 self._file_pos = offset
713
714 def read(self, size):
715 """Reads data from the unsparsified file.
716
717 This method may return fewer than |size| bytes of data if the end
718 of the file was encountered.
719
720 The file cursor for reading is advanced by the number of bytes
721 read.
722
723 Arguments:
724 size: Number of bytes to read.
725
726 Returns:
727 The data.
728
729 """
730 if not self.is_sparse:
731 self._image.seek(self._file_pos)
732 data = self._image.read(size)
733 self._file_pos += len(data)
734 return data
735
736 # Iterate over all chunks.
737 chunk_idx = bisect.bisect_right(self._chunk_output_offsets,
738 self._file_pos) - 1
739 data = bytearray()
740 to_go = size
741 while to_go > 0:
742 chunk = self._chunks[chunk_idx]
743 chunk_pos_offset = self._file_pos - chunk.output_offset
744 chunk_pos_to_go = min(chunk.output_size - chunk_pos_offset, to_go)
745
746 if chunk.chunk_type == ImageChunk.TYPE_RAW:
747 self._image.seek(chunk.input_offset + chunk_pos_offset)
748 data.extend(self._image.read(chunk_pos_to_go))
749 elif chunk.chunk_type == ImageChunk.TYPE_FILL:
750 all_data = chunk.fill_data*(chunk_pos_to_go/len(chunk.fill_data) + 2)
751 offset_mod = chunk_pos_offset % len(chunk.fill_data)
752 data.extend(all_data[offset_mod:(offset_mod + chunk_pos_to_go)])
753 else:
754 assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
755 data.extend('\0' * chunk_pos_to_go)
756
757 to_go -= chunk_pos_to_go
758 self._file_pos += chunk_pos_to_go
759 chunk_idx += 1
760 # Generate partial read in case of EOF.
761 if chunk_idx >= len(self._chunks):
762 break
763
764 return data
765
766 def tell(self):
767 """Returns the file cursor position for reading from unsparsified file.
768
769 Returns:
770 The file cursor position for reading.
771 """
772 return self._file_pos
773
774 def truncate(self, size):
775 """Truncates the unsparsified file.
776
777 Arguments:
778 size: Desired size of unsparsified file.
779
780 Raises:
781 ValueError: If desired size isn't a multiple of the block size.
782 """
783 if not self.is_sparse:
784 self._image.truncate(size)
785 self._read_header()
786 return
787
788 if size % self.block_size != 0:
789 raise ValueError('Cannot truncate to a size which is not a multiple '
790 'of the block size')
791
792 if size == self.image_size:
793 # Trivial where there's nothing to do.
794 return
795 elif size < self.image_size:
796 chunk_idx = bisect.bisect_right(self._chunk_output_offsets, size) - 1
797 chunk = self._chunks[chunk_idx]
798 if chunk.output_offset != size:
799 # Truncation in the middle of a trunk - need to keep the chunk
800 # and modify it.
801 chunk_idx_for_update = chunk_idx + 1
802 num_to_keep = size - chunk.output_offset
803 assert num_to_keep % self.block_size == 0
804 if chunk.chunk_type == ImageChunk.TYPE_RAW:
805 truncate_at = (chunk.chunk_offset +
806 struct.calcsize(ImageChunk.FORMAT) + num_to_keep)
807 data_sz = num_to_keep
808 elif chunk.chunk_type == ImageChunk.TYPE_FILL:
809 truncate_at = (chunk.chunk_offset +
810 struct.calcsize(ImageChunk.FORMAT) + 4)
811 data_sz = 4
812 else:
813 assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
814 truncate_at = chunk.chunk_offset + struct.calcsize(ImageChunk.FORMAT)
815 data_sz = 0
816 chunk_sz = num_to_keep/self.block_size
817 total_sz = data_sz + struct.calcsize(ImageChunk.FORMAT)
818 self._image.seek(chunk.chunk_offset)
819 self._image.write(struct.pack(ImageChunk.FORMAT,
820 chunk.chunk_type,
821 0, # Reserved
822 chunk_sz,
823 total_sz))
824 chunk.output_size = num_to_keep
825 else:
826 # Truncation at trunk boundary.
827 truncate_at = chunk.chunk_offset
828 chunk_idx_for_update = chunk_idx
829
830 self._num_total_chunks = chunk_idx_for_update
831 self._num_total_blocks = 0
832 for i in range(0, chunk_idx_for_update):
833 self._num_total_blocks += self._chunks[i].output_size / self.block_size
834 self._update_chunks_and_blocks()
835 self._image.truncate(truncate_at)
836
837 # We've modified the file so re-read all data.
838 self._read_header()
839 else:
840 # Truncating to grow - just add a DONT_CARE section.
841 self.append_dont_care(size - self.image_size)
842
843
David Zeuthen21e95262016-07-27 17:58:40 -0400844class AvbDescriptor(object):
845 """Class for AVB descriptor.
846
847 See the |AvbDescriptor| C struct for more information.
848
849 Attributes:
850 tag: The tag identifying what kind of descriptor this is.
851 data: The data in the descriptor.
852 """
853
854 SIZE = 16
855 FORMAT_STRING = ('!QQ') # tag, num_bytes_following (descriptor header)
856
857 def __init__(self, data):
858 """Initializes a new property descriptor.
859
860 Arguments:
861 data: If not None, must be a bytearray().
862
863 Raises:
864 LookupError: If the given descriptor is malformed.
865 """
866 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
867
868 if data:
869 (self.tag, num_bytes_following) = (
870 struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]))
871 self.data = data[self.SIZE:self.SIZE + num_bytes_following]
872 else:
873 self.tag = None
874 self.data = None
875
876 def print_desc(self, o):
877 """Print the descriptor.
878
879 Arguments:
880 o: The object to write the output to.
881 """
882 o.write(' Unknown descriptor:\n')
883 o.write(' Tag: {}\n'.format(self.tag))
884 if len(self.data) < 256:
885 o.write(' Data: {} ({} bytes)\n'.format(
886 repr(str(self.data)), len(self.data)))
887 else:
888 o.write(' Data: {} bytes\n'.format(len(self.data)))
889
890 def encode(self):
891 """Serializes the descriptor.
892
893 Returns:
894 A bytearray() with the descriptor data.
895 """
896 num_bytes_following = len(self.data)
897 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
898 padding_size = nbf_with_padding - num_bytes_following
899 desc = struct.pack(self.FORMAT_STRING, self.tag, nbf_with_padding)
900 padding = struct.pack(str(padding_size) + 'x')
901 ret = desc + self.data + padding
902 return bytearray(ret)
903
904
905class AvbPropertyDescriptor(AvbDescriptor):
906 """A class for property descriptors.
907
908 See the |AvbPropertyDescriptor| C struct for more information.
909
910 Attributes:
911 key: The key.
912 value: The key.
913 """
914
915 TAG = 0
916 SIZE = 32
917 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
918 'Q' # key size (bytes)
919 'Q') # value size (bytes)
920
921 def __init__(self, data=None):
922 """Initializes a new property descriptor.
923
924 Arguments:
925 data: If not None, must be a bytearray of size |SIZE|.
926
927 Raises:
928 LookupError: If the given descriptor is malformed.
929 """
930 AvbDescriptor.__init__(self, None)
931 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
932
933 if data:
934 (tag, num_bytes_following, key_size,
935 value_size) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
936 expected_size = round_to_multiple(
937 self.SIZE - 16 + key_size + 1 + value_size + 1, 8)
938 if tag != self.TAG or num_bytes_following != expected_size:
939 raise LookupError('Given data does not look like a property '
940 'descriptor.')
941 self.key = data[self.SIZE:(self.SIZE + key_size)]
942 self.value = data[(self.SIZE + key_size + 1):(self.SIZE + key_size + 1 +
943 value_size)]
944 else:
945 self.key = ''
946 self.value = ''
947
948 def print_desc(self, o):
949 """Print the descriptor.
950
951 Arguments:
952 o: The object to write the output to.
953 """
954 if len(self.value) < 256:
955 o.write(' Prop: {} -> {}\n'.format(self.key, repr(str(self.value))))
956 else:
957 o.write(' Prop: {} -> ({} bytes)\n'.format(self.key, len(self.value)))
958
959 def encode(self):
960 """Serializes the descriptor.
961
962 Returns:
963 A bytearray() with the descriptor data.
964 """
965 num_bytes_following = self.SIZE + len(self.key) + len(self.value) + 2 - 16
966 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
967 padding_size = nbf_with_padding - num_bytes_following
968 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
969 len(self.key), len(self.value))
970 padding = struct.pack(str(padding_size) + 'x')
971 ret = desc + self.key + '\0' + self.value + '\0' + padding
972 return bytearray(ret)
973
974
975class AvbHashtreeDescriptor(AvbDescriptor):
976 """A class for hashtree descriptors.
977
978 See the |AvbHashtreeDescriptor| C struct for more information.
979
980 Attributes:
981 dm_verity_version: dm-verity version used.
982 image_size: Size of the image, after rounding up to |block_size|.
983 tree_offset: Offset of the hash tree in the file.
984 tree_size: Size of the tree.
985 data_block_size: Data block size
986 hash_block_size: Hash block size
987 hash_algorithm: Hash algorithm used.
988 partition_name: Partition name.
989 salt: Salt used.
990 root_digest: Root digest.
991 """
992
993 TAG = 1
994 SIZE = 96
995 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
996 'L' # dm-verity version used
997 'Q' # image size (bytes)
998 'Q' # tree offset (bytes)
999 'Q' # tree size (bytes)
1000 'L' # data block size (bytes)
1001 'L' # hash block size (bytes)
1002 '32s' # hash algorithm used
1003 'L' # partition name (bytes)
1004 'L' # salt length (bytes)
1005 'L') # root digest length (bytes)
1006
1007 def __init__(self, data=None):
1008 """Initializes a new hashtree descriptor.
1009
1010 Arguments:
1011 data: If not None, must be a bytearray of size |SIZE|.
1012
1013 Raises:
1014 LookupError: If the given descriptor is malformed.
1015 """
1016 AvbDescriptor.__init__(self, None)
1017 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1018
1019 if data:
1020 (tag, num_bytes_following, self.dm_verity_version, self.image_size,
1021 self.tree_offset, self.tree_size, self.data_block_size,
1022 self.hash_block_size, self.hash_algorithm, partition_name_len, salt_len,
1023 root_digest_len) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
1024 expected_size = round_to_multiple(
1025 self.SIZE - 16 + partition_name_len + salt_len + root_digest_len, 8)
1026 if tag != self.TAG or num_bytes_following != expected_size:
1027 raise LookupError('Given data does not look like a hashtree '
1028 'descriptor.')
1029 # Nuke NUL-bytes at the end.
1030 self.hash_algorithm = self.hash_algorithm.split('\0', 1)[0]
1031 o = 0
1032 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o +
1033 partition_name_len)])
1034 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8.
1035 self.partition_name.decode('utf-8')
1036 o += partition_name_len
1037 self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)]
1038 o += salt_len
1039 self.root_digest = data[(self.SIZE + o):(self.SIZE + o + root_digest_len)]
1040 if root_digest_len != len(hashlib.new(name=self.hash_algorithm).digest()):
1041 raise LookupError('root_digest_len doesn\'t match hash algorithm')
1042
1043 else:
1044 self.dm_verity_version = 0
1045 self.image_size = 0
1046 self.tree_offset = 0
1047 self.tree_size = 0
1048 self.data_block_size = 0
1049 self.hash_block_size = 0
1050 self.hash_algorithm = ''
1051 self.partition_name = ''
1052 self.salt = bytearray()
1053 self.root_digest = bytearray()
1054
1055 def print_desc(self, o):
1056 """Print the descriptor.
1057
1058 Arguments:
1059 o: The object to write the output to.
1060 """
1061 o.write(' Hashtree descriptor:\n')
1062 o.write(' Version of dm-verity: {}\n'.format(self.dm_verity_version))
1063 o.write(' Image Size: {} bytes\n'.format(self.image_size))
1064 o.write(' Tree Offset: {}\n'.format(self.tree_offset))
1065 o.write(' Tree Size: {} bytes\n'.format(self.tree_size))
1066 o.write(' Data Block Size: {} bytes\n'.format(
1067 self.data_block_size))
1068 o.write(' Hash Block Size: {} bytes\n'.format(
1069 self.hash_block_size))
1070 o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm))
1071 o.write(' Partition Name: {}\n'.format(self.partition_name))
1072 o.write(' Salt: {}\n'.format(str(self.salt).encode(
1073 'hex')))
1074 o.write(' Root Digest: {}\n'.format(str(
1075 self.root_digest).encode('hex')))
1076
1077 def encode(self):
1078 """Serializes the descriptor.
1079
1080 Returns:
1081 A bytearray() with the descriptor data.
1082 """
1083 encoded_name = self.partition_name.encode('utf-8')
1084 num_bytes_following = (self.SIZE + len(encoded_name) + len(self.salt) +
1085 len(self.root_digest) - 16)
1086 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
1087 padding_size = nbf_with_padding - num_bytes_following
1088 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
1089 self.dm_verity_version, self.image_size,
1090 self.tree_offset, self.tree_size, self.data_block_size,
1091 self.hash_block_size, self.hash_algorithm,
1092 len(encoded_name), len(self.salt), len(self.root_digest))
1093 padding = struct.pack(str(padding_size) + 'x')
1094 ret = desc + encoded_name + self.salt + self.root_digest + padding
1095 return bytearray(ret)
1096
1097
1098class AvbHashDescriptor(AvbDescriptor):
1099 """A class for hash descriptors.
1100
1101 See the |AvbHashDescriptor| C struct for more information.
1102
1103 Attributes:
1104 image_size: Image size, in bytes.
1105 hash_algorithm: Hash algorithm used.
1106 partition_name: Partition name.
1107 salt: Salt used.
1108 digest: The hash value of salt and data combined.
1109 """
1110
1111 TAG = 2
1112 SIZE = 68
1113 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
1114 'Q' # image size (bytes)
1115 '32s' # hash algorithm used
1116 'L' # partition name (bytes)
1117 'L' # salt length (bytes)
1118 'L') # digest length (bytes)
1119
1120 def __init__(self, data=None):
1121 """Initializes a new hash descriptor.
1122
1123 Arguments:
1124 data: If not None, must be a bytearray of size |SIZE|.
1125
1126 Raises:
1127 LookupError: If the given descriptor is malformed.
1128 """
1129 AvbDescriptor.__init__(self, None)
1130 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1131
1132 if data:
1133 (tag, num_bytes_following, self.image_size, self.hash_algorithm,
1134 partition_name_len, salt_len,
1135 digest_len) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
1136 expected_size = round_to_multiple(
1137 self.SIZE - 16 + partition_name_len + salt_len + digest_len, 8)
1138 if tag != self.TAG or num_bytes_following != expected_size:
1139 raise LookupError('Given data does not look like a hash ' 'descriptor.')
1140 # Nuke NUL-bytes at the end.
1141 self.hash_algorithm = self.hash_algorithm.split('\0', 1)[0]
1142 o = 0
1143 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o +
1144 partition_name_len)])
1145 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8.
1146 self.partition_name.decode('utf-8')
1147 o += partition_name_len
1148 self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)]
1149 o += salt_len
1150 self.digest = data[(self.SIZE + o):(self.SIZE + o + digest_len)]
1151 if digest_len != len(hashlib.new(name=self.hash_algorithm).digest()):
1152 raise LookupError('digest_len doesn\'t match hash algorithm')
1153
1154 else:
1155 self.image_size = 0
1156 self.hash_algorithm = ''
1157 self.partition_name = ''
1158 self.salt = bytearray()
1159 self.digest = bytearray()
1160
1161 def print_desc(self, o):
1162 """Print the descriptor.
1163
1164 Arguments:
1165 o: The object to write the output to.
1166 """
1167 o.write(' Hash descriptor:\n')
1168 o.write(' Image Size: {} bytes\n'.format(self.image_size))
1169 o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm))
1170 o.write(' Partition Name: {}\n'.format(self.partition_name))
1171 o.write(' Salt: {}\n'.format(str(self.salt).encode(
1172 'hex')))
1173 o.write(' Digest: {}\n'.format(str(self.digest).encode(
1174 'hex')))
1175
1176 def encode(self):
1177 """Serializes the descriptor.
1178
1179 Returns:
1180 A bytearray() with the descriptor data.
1181 """
1182 encoded_name = self.partition_name.encode('utf-8')
1183 num_bytes_following = (
1184 self.SIZE + len(encoded_name) + len(self.salt) + len(self.digest) - 16)
1185 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
1186 padding_size = nbf_with_padding - num_bytes_following
1187 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
1188 self.image_size, self.hash_algorithm, len(encoded_name),
1189 len(self.salt), len(self.digest))
1190 padding = struct.pack(str(padding_size) + 'x')
1191 ret = desc + encoded_name + self.salt + self.digest + padding
1192 return bytearray(ret)
1193
1194
1195class AvbKernelCmdlineDescriptor(AvbDescriptor):
1196 """A class for kernel command-line descriptors.
1197
1198 See the |AvbKernelCmdlineDescriptor| C struct for more information.
1199
1200 Attributes:
1201 kernel_cmdline: The kernel command-line.
1202 """
1203
1204 TAG = 3
1205 SIZE = 20
1206 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
1207 'L') # cmdline length (bytes)
1208
1209 def __init__(self, data=None):
1210 """Initializes a new kernel cmdline descriptor.
1211
1212 Arguments:
1213 data: If not None, must be a bytearray of size |SIZE|.
1214
1215 Raises:
1216 LookupError: If the given descriptor is malformed.
1217 """
1218 AvbDescriptor.__init__(self, None)
1219 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1220
1221 if data:
1222 (tag, num_bytes_following, kernel_cmdline_length) = (
1223 struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]))
1224 expected_size = round_to_multiple(self.SIZE - 16 + kernel_cmdline_length,
1225 8)
1226 if tag != self.TAG or num_bytes_following != expected_size:
1227 raise LookupError('Given data does not look like a kernel cmdline '
1228 'descriptor.')
1229 # Nuke NUL-bytes at the end.
1230 self.kernel_cmdline = str(data[self.SIZE:(self.SIZE +
1231 kernel_cmdline_length)])
1232 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8.
1233 self.kernel_cmdline.decode('utf-8')
1234 else:
1235 self.kernel_cmdline = ''
1236
1237 def print_desc(self, o):
1238 """Print the descriptor.
1239
1240 Arguments:
1241 o: The object to write the output to.
1242 """
1243 o.write(' Kernel Cmdline descriptor:\n')
1244 o.write(' Kernel Cmdline: {}\n'.format(repr(
1245 self.kernel_cmdline)))
1246
1247 def encode(self):
1248 """Serializes the descriptor.
1249
1250 Returns:
1251 A bytearray() with the descriptor data.
1252 """
1253 encoded_str = self.kernel_cmdline.encode('utf-8')
1254 num_bytes_following = (self.SIZE + len(encoded_str) - 16)
1255 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
1256 padding_size = nbf_with_padding - num_bytes_following
1257 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
1258 len(encoded_str))
1259 padding = struct.pack(str(padding_size) + 'x')
1260 ret = desc + encoded_str + padding
1261 return bytearray(ret)
1262
1263
1264class AvbChainPartitionDescriptor(AvbDescriptor):
1265 """A class for chained partition descriptors.
1266
1267 See the |AvbChainPartitionDescriptor| C struct for more information.
1268
1269 Attributes:
1270 rollback_index_slot: The rollback index slot to use.
1271 partition_name: Partition name.
1272 public_key: Bytes for the public key.
1273 """
1274
1275 TAG = 4
1276 SIZE = 28
1277 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
1278 'L' # rollback_index_slot
1279 'L' # partition_name_size (bytes)
1280 'L') # public_key_size (bytes)
1281
1282 def __init__(self, data=None):
1283 """Initializes a new chain partition descriptor.
1284
1285 Arguments:
1286 data: If not None, must be a bytearray of size |SIZE|.
1287
1288 Raises:
1289 LookupError: If the given descriptor is malformed.
1290 """
1291 AvbDescriptor.__init__(self, None)
1292 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1293
1294 if data:
1295 (tag, num_bytes_following, self.rollback_index_slot, partition_name_len,
1296 public_key_len) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
1297 expected_size = round_to_multiple(
1298 self.SIZE - 16 + partition_name_len + public_key_len, 8)
1299 if tag != self.TAG or num_bytes_following != expected_size:
1300 raise LookupError('Given data does not look like a chain partition '
1301 'descriptor.')
1302 o = 0
1303 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o +
1304 partition_name_len)])
1305 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8.
1306 self.partition_name.decode('utf-8')
1307 o += partition_name_len
1308 self.public_key = data[(self.SIZE + o):(self.SIZE + o + public_key_len)]
1309
1310 else:
1311 self.rollback_index_slot = 0
1312 self.partition_name = ''
1313 self.public_key = bytearray()
1314
1315 def print_desc(self, o):
1316 """Print the descriptor.
1317
1318 Arguments:
1319 o: The object to write the output to.
1320 """
1321 o.write(' Chain Partition descriptor:\n')
1322 o.write(' Partition Name: {}\n'.format(self.partition_name))
1323 o.write(' Rollback Index Slot: {}\n'.format(
1324 self.rollback_index_slot))
1325 # Just show the SHA1 of the key, for size reasons.
1326 hexdig = hashlib.sha1(self.public_key).hexdigest()
1327 o.write(' Public key (sha1): {}\n'.format(hexdig))
1328
1329 def encode(self):
1330 """Serializes the descriptor.
1331
1332 Returns:
1333 A bytearray() with the descriptor data.
1334 """
1335 encoded_name = self.partition_name.encode('utf-8')
1336 num_bytes_following = (
1337 self.SIZE + len(encoded_name) + len(self.public_key) - 16)
1338 nbf_with_padding = round_to_multiple(num_bytes_following, 8)
1339 padding_size = nbf_with_padding - num_bytes_following
1340 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
1341 self.rollback_index_slot, len(encoded_name),
1342 len(self.public_key))
1343 padding = struct.pack(str(padding_size) + 'x')
1344 ret = desc + encoded_name + self.public_key + padding
1345 return bytearray(ret)
1346
1347
1348DESCRIPTOR_CLASSES = [
1349 AvbPropertyDescriptor, AvbHashtreeDescriptor, AvbHashDescriptor,
1350 AvbKernelCmdlineDescriptor, AvbChainPartitionDescriptor
1351]
1352
1353
1354def parse_descriptors(data):
1355 """Parses a blob of data into descriptors.
1356
1357 Arguments:
1358 data: A bytearray() with encoded descriptors.
1359
1360 Returns:
1361 A list of instances of objects derived from AvbDescriptor. For
1362 unknown descriptors, the class AvbDescriptor is used.
1363 """
1364 o = 0
1365 ret = []
1366 while o < len(data):
1367 tag, nb_following = struct.unpack('!2Q', data[o:o + 16])
1368 if tag < len(DESCRIPTOR_CLASSES):
1369 c = DESCRIPTOR_CLASSES[tag]
1370 else:
1371 c = AvbDescriptor
1372 ret.append(c(bytearray(data[o:o + 16 + nb_following])))
1373 o += 16 + nb_following
1374 return ret
1375
1376
1377class AvbFooter(object):
1378 """A class for parsing and writing footers.
1379
1380 Footers are stored at the end of partitions and point to where the
1381 AvbVBMeta blob is located. They also contain the original size of
1382 the image before AVB information was added.
1383
1384 Attributes:
1385 magic: Magic for identifying the footer, see |MAGIC|.
1386 version_major: The major version of avbtool that wrote the footer.
1387 version_minor: The minor version of avbtool that wrote the footer.
1388 original_image_size: Original image size.
1389 vbmeta_offset: Offset of where the AvbVBMeta blob is stored.
1390 vbmeta_size: Size of the AvbVBMeta blob.
1391 """
1392
1393 MAGIC = 'AVBf'
1394 SIZE = 64
1395 RESERVED = 28
1396 FORMAT_STRING = ('!4s2L' # magic, 2 x version.
1397 'Q' # Original image size.
1398 'Q' # Offset of VBMeta blob.
1399 'Q' + # Size of VBMeta blob.
1400 str(RESERVED) + 'x') # padding for reserved bytes
1401
1402 def __init__(self, data=None):
1403 """Initializes a new footer object.
1404
1405 Arguments:
1406 data: If not None, must be a bytearray of size 4096.
1407
1408 Raises:
1409 LookupError: If the given footer is malformed.
1410 struct.error: If the given data has no footer.
1411 """
1412 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1413
1414 if data:
1415 (self.magic, self.version_major, self.version_minor,
1416 self.original_image_size, self.vbmeta_offset,
1417 self.vbmeta_size) = struct.unpack(self.FORMAT_STRING, data)
1418 if self.magic != self.MAGIC:
1419 raise LookupError('Given data does not look like a Brillo footer.')
1420 else:
1421 self.magic = self.MAGIC
1422 self.version_major = AVB_VERSION_MAJOR
1423 self.version_minor = AVB_VERSION_MINOR
1424 self.original_image_size = 0
1425 self.vbmeta_offset = 0
1426 self.vbmeta_size = 0
1427
David Zeuthena4fee8b2016-08-22 15:20:43 -04001428 def encode(self):
1429 """Gets a string representing the binary encoding of the footer.
David Zeuthen21e95262016-07-27 17:58:40 -04001430
David Zeuthena4fee8b2016-08-22 15:20:43 -04001431 Returns:
1432 A bytearray() with a binary representation of the footer.
David Zeuthen21e95262016-07-27 17:58:40 -04001433 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04001434 return struct.pack(self.FORMAT_STRING, self.magic, self.version_major,
1435 self.version_minor, self.original_image_size,
1436 self.vbmeta_offset, self.vbmeta_size)
David Zeuthen21e95262016-07-27 17:58:40 -04001437
1438
1439class AvbVBMetaHeader(object):
1440 """A class for parsing and writing Brillo Verified Boot vbmeta images.
1441
1442 Attributes:
1443 The attributes correspond to the |AvbVBMetaHeader| struct
1444 defined in avb_vbmeta_header.h.
1445 """
1446
1447 SIZE = 256
1448
1449 # Keep in sync with |reserved| field of |AvbVBMetaImageHeader|.
1450 RESERVED = 152
1451
1452 # Keep in sync with |AvbVBMetaImageHeader|.
1453 FORMAT_STRING = ('!4s2L' # magic, 2 x version
1454 '2Q' # 2 x block size
1455 'L' # algorithm type
1456 '2Q' # offset, size (hash)
1457 '2Q' # offset, size (signature)
1458 '2Q' # offset, size (public key)
1459 '2Q' # offset, size (descriptors)
1460 'Q' + # rollback_index
1461 str(RESERVED) + 'x') # padding for reserved bytes
1462
1463 def __init__(self, data=None):
1464 """Initializes a new header object.
1465
1466 Arguments:
1467 data: If not None, must be a bytearray of size 8192.
1468
1469 Raises:
1470 Exception: If the given data is malformed.
1471 """
1472 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
1473
1474 if data:
1475 (self.magic, self.header_version_major, self.header_version_minor,
1476 self.authentication_data_block_size, self.auxiliary_data_block_size,
1477 self.algorithm_type, self.hash_offset, self.hash_size,
1478 self.signature_offset, self.signature_size, self.public_key_offset,
1479 self.public_key_size, self.descriptors_offset, self.descriptors_size,
1480 self.rollback_index) = struct.unpack(self.FORMAT_STRING, data)
1481 # Nuke NUL-bytes at the end of the string.
1482 if self.magic != 'AVB0':
1483 raise AvbError('Given image does not look like a Brillo boot image')
1484 else:
1485 self.magic = 'AVB0'
1486 self.header_version_major = AVB_VERSION_MAJOR
1487 self.header_version_minor = AVB_VERSION_MINOR
1488 self.authentication_data_block_size = 0
1489 self.auxiliary_data_block_size = 0
1490 self.algorithm_type = 0
1491 self.hash_offset = 0
1492 self.hash_size = 0
1493 self.signature_offset = 0
1494 self.signature_size = 0
1495 self.public_key_offset = 0
1496 self.public_key_size = 0
1497 self.descriptors_offset = 0
1498 self.descriptors_size = 0
1499 self.rollback_index = 0
1500
1501 def save(self, output):
1502 """Serializes the header (256 bytes) to disk.
1503
1504 Arguments:
1505 output: The object to write the output to.
1506 """
1507 output.write(struct.pack(
1508 self.FORMAT_STRING, self.magic, self.header_version_major,
1509 self.header_version_minor, self.authentication_data_block_size,
1510 self.auxiliary_data_block_size, self.algorithm_type, self.hash_offset,
1511 self.hash_size, self.signature_offset, self.signature_size,
1512 self.public_key_offset, self.public_key_size, self.descriptors_offset,
1513 self.descriptors_size, self.rollback_index))
1514
1515 def encode(self):
1516 """Serializes the header (256) to a bytearray().
1517
1518 Returns:
1519 A bytearray() with the encoded header.
1520 """
1521 return struct.pack(self.FORMAT_STRING, self.magic,
1522 self.header_version_major, self.header_version_minor,
1523 self.authentication_data_block_size,
1524 self.auxiliary_data_block_size, self.algorithm_type,
1525 self.hash_offset, self.hash_size, self.signature_offset,
1526 self.signature_size, self.public_key_offset,
1527 self.public_key_size, self.descriptors_offset,
1528 self.descriptors_size, self.rollback_index)
1529
1530
1531class Avb(object):
1532 """Business logic for avbtool command-line tool."""
1533
David Zeuthena4fee8b2016-08-22 15:20:43 -04001534 def erase_footer(self, image_filename, keep_hashtree):
David Zeuthen21e95262016-07-27 17:58:40 -04001535 """Implements the 'erase_footer' command.
1536
1537 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001538 image_filename: File to erase a footer from.
David Zeuthen21e95262016-07-27 17:58:40 -04001539 keep_hashtree: If True, keep the hashtree around.
1540
1541 Raises:
1542 AvbError: If there's no footer in the image.
1543 """
1544
David Zeuthena4fee8b2016-08-22 15:20:43 -04001545 image = ImageHandler(image_filename)
1546
David Zeuthen21e95262016-07-27 17:58:40 -04001547 (footer, _, descriptors, _) = self._parse_image(image)
1548
1549 if not footer:
1550 raise AvbError('Given image does not have a footer.')
1551
1552 new_image_size = None
1553 if not keep_hashtree:
1554 new_image_size = footer.original_image_size
1555 else:
1556 # If requested to keep the hashtree, search for a hashtree
1557 # descriptor to figure out the location and size of the hashtree.
1558 for desc in descriptors:
1559 if isinstance(desc, AvbHashtreeDescriptor):
1560 # The hashtree is always just following the main data so the
1561 # new size is easily derived.
1562 new_image_size = desc.tree_offset + desc.tree_size
1563 break
1564 if not new_image_size:
1565 raise AvbError('Requested to keep hashtree but no hashtree '
1566 'descriptor was found.')
1567
1568 # And cut...
1569 image.truncate(new_image_size)
1570
David Zeuthena4fee8b2016-08-22 15:20:43 -04001571 def info_image(self, image_filename, output):
David Zeuthen21e95262016-07-27 17:58:40 -04001572 """Implements the 'info_image' command.
1573
1574 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001575 image_filename: Image file to get information from (file object).
David Zeuthen21e95262016-07-27 17:58:40 -04001576 output: Output file to write human-readable information to (file object).
1577 """
1578
David Zeuthena4fee8b2016-08-22 15:20:43 -04001579 image = ImageHandler(image_filename)
1580
David Zeuthen21e95262016-07-27 17:58:40 -04001581 o = output
1582
1583 (footer, header, descriptors, image_size) = self._parse_image(image)
1584
1585 if footer:
1586 o.write('Footer version: {}.{}\n'.format(footer.version_major,
1587 footer.version_minor))
1588 o.write('Image size: {} bytes\n'.format(image_size))
1589 o.write('Original image size: {} bytes\n'.format(
1590 footer.original_image_size))
1591 o.write('VBMeta offset: {}\n'.format(footer.vbmeta_offset))
1592 o.write('VBMeta size: {} bytes\n'.format(footer.vbmeta_size))
1593 o.write('--\n')
1594
1595 (alg_name, _) = lookup_algorithm_by_type(header.algorithm_type)
1596
David Zeuthena4fee8b2016-08-22 15:20:43 -04001597 o.write('VBMeta image version: {}.{}{}\n'.format(
1598 header.header_version_major, header.header_version_minor,
1599 ' (Sparse)' if image.is_sparse else ''))
David Zeuthen21e95262016-07-27 17:58:40 -04001600 o.write('Header Block: {} bytes\n'.format(AvbVBMetaHeader.SIZE))
1601 o.write('Authentication Block: {} bytes\n'.format(
1602 header.authentication_data_block_size))
1603 o.write('Auxiliary Block: {} bytes\n'.format(
1604 header.auxiliary_data_block_size))
1605 o.write('Algorithm: {}\n'.format(alg_name))
1606 o.write('Rollback Index: {}\n'.format(header.rollback_index))
1607
1608 # Print descriptors.
1609 num_printed = 0
1610 o.write('Descriptors:\n')
1611 for desc in descriptors:
1612 desc.print_desc(o)
1613 num_printed += 1
1614 if num_printed == 0:
1615 o.write(' (none)\n')
1616
1617 def _parse_image(self, image):
1618 """Gets information about an image.
1619
1620 The image can either be a vbmeta or an image with a footer.
1621
1622 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001623 image: An ImageHandler (vbmeta or footer) with a hashtree descriptor.
David Zeuthen21e95262016-07-27 17:58:40 -04001624
1625 Returns:
1626 A tuple where the first argument is a AvbFooter (None if there
1627 is no footer on the image), the second argument is a
1628 AvbVBMetaHeader, the third argument is a list of
1629 AvbDescriptor-derived instances, and the fourth argument is the
1630 size of |image|.
1631 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04001632 assert isinstance(image, ImageHandler)
David Zeuthen21e95262016-07-27 17:58:40 -04001633 footer = None
David Zeuthena4fee8b2016-08-22 15:20:43 -04001634 image_size = image.care_size
David Zeuthen21e95262016-07-27 17:58:40 -04001635 image.seek(image_size - AvbFooter.SIZE)
1636 try:
1637 footer = AvbFooter(image.read(AvbFooter.SIZE))
1638 except (LookupError, struct.error):
1639 # Nope, just seek back to the start.
1640 image.seek(0)
1641
1642 vbmeta_offset = 0
1643 if footer:
1644 vbmeta_offset = footer.vbmeta_offset
1645
1646 image.seek(vbmeta_offset)
1647 h = AvbVBMetaHeader(image.read(AvbVBMetaHeader.SIZE))
1648
1649 auth_block_offset = vbmeta_offset + AvbVBMetaHeader.SIZE
1650 aux_block_offset = auth_block_offset + h.authentication_data_block_size
1651 desc_start_offset = aux_block_offset + h.descriptors_offset
1652 image.seek(desc_start_offset)
1653 descriptors = parse_descriptors(image.read(h.descriptors_size))
1654
1655 return footer, h, descriptors, image_size
1656
1657 def _get_cmdline_descriptor_for_dm_verity(self, image):
1658 """Generate kernel cmdline descriptor for dm-verity.
1659
1660 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001661 image: An ImageHandler (vbmeta or footer) with a hashtree descriptor.
David Zeuthen21e95262016-07-27 17:58:40 -04001662
1663 Returns:
1664 A AvbKernelCmdlineDescriptor with dm-verity kernel cmdline
1665 instructions for the hashtree.
1666
1667 Raises:
1668 AvbError: If |image| doesn't have a hashtree descriptor.
1669
1670 """
1671
1672 (_, _, descriptors, _) = self._parse_image(image)
1673
1674 ht = None
1675 for desc in descriptors:
1676 if isinstance(desc, AvbHashtreeDescriptor):
1677 ht = desc
1678 break
1679
1680 if not ht:
1681 raise AvbError('No hashtree descriptor in given image')
1682
1683 c = 'dm="1 vroot none ro 1,'
1684 c += '0 ' # start
1685 c += '{} '.format((ht.image_size / 512)) # size (# sectors)
1686 c += 'verity {} '.format(ht.dm_verity_version) # type and version
1687 c += 'PARTUUID=$(ANDROID_SYSTEM_PARTUUID) ' # data_dev
1688 c += 'PARTUUID=$(ANDROID_SYSTEM_PARTUUID) ' # hash_dev
1689 c += '{} '.format(ht.data_block_size) # data_block
1690 c += '{} '.format(ht.hash_block_size) # hash_block
1691 c += '{} '.format(ht.image_size / ht.data_block_size) # #blocks
1692 c += '{} '.format(ht.image_size / ht.data_block_size) # hash_offset
1693 c += '{} '.format(ht.hash_algorithm) # hash_alg
1694 c += '{} '.format(str(ht.root_digest).encode('hex')) # root_digest
1695 c += '{}'.format(str(ht.salt).encode('hex')) # salt
1696 c += '"'
1697
1698 desc = AvbKernelCmdlineDescriptor()
1699 desc.kernel_cmdline = c
1700 return desc
1701
1702 def make_vbmeta_image(self, output, chain_partitions, algorithm_name,
1703 key_path, rollback_index, props, props_from_file,
1704 kernel_cmdlines,
1705 generate_dm_verity_cmdline_from_hashtree,
1706 include_descriptors_from_image):
1707 """Implements the 'make_vbmeta_image' command.
1708
1709 Arguments:
1710 output: File to write the image to.
1711 chain_partitions: List of partitions to chain.
1712 algorithm_name: Name of algorithm to use.
1713 key_path: Path to key to use or None.
1714 rollback_index: The rollback index to use.
1715 props: Properties to insert (list of strings of the form 'key:value').
1716 props_from_file: Properties to insert (list of strings 'key:<path>').
1717 kernel_cmdlines: Kernel cmdlines to insert (list of strings).
1718 generate_dm_verity_cmdline_from_hashtree: None or file to generate from.
1719 include_descriptors_from_image: List of file objects with descriptors.
1720
1721 Raises:
1722 AvbError: If a chained partition is malformed.
1723 """
1724
1725 descriptors = []
1726
1727 # Insert chained partition descriptors.
1728 if chain_partitions:
1729 for cp in chain_partitions:
1730 cp_tokens = cp.split(':')
1731 if len(cp_tokens) != 3:
1732 raise AvbError('Malformed chained partition "{}".'.format(cp))
1733 desc = AvbChainPartitionDescriptor()
1734 desc.partition_name = cp_tokens[0]
1735 desc.rollback_index_slot = int(cp_tokens[1])
1736 if desc.rollback_index_slot < 1:
1737 raise AvbError('Rollback index slot must be 1 or larger.')
1738 file_path = cp_tokens[2]
1739 desc.public_key = open(file_path, 'rb').read()
1740 descriptors.append(desc)
1741
1742 vbmeta_blob = self._generate_vbmeta_blob(
1743 algorithm_name, key_path, descriptors, rollback_index, props,
1744 props_from_file, kernel_cmdlines,
1745 generate_dm_verity_cmdline_from_hashtree,
1746 include_descriptors_from_image)
1747
1748 # Write entire vbmeta blob (header, authentication, auxiliary).
1749 output.seek(0)
1750 output.write(vbmeta_blob)
1751
1752 def _generate_vbmeta_blob(self, algorithm_name, key_path, descriptors,
1753 rollback_index, props, props_from_file,
1754 kernel_cmdlines,
1755 generate_dm_verity_cmdline_from_hashtree,
1756 include_descriptors_from_image):
1757 """Generates a VBMeta blob.
1758
1759 This blob contains the header (struct AvbVBMetaHeader), the
1760 authentication data block (which contains the hash and signature
1761 for the header and auxiliary block), and the auxiliary block
1762 (which contains descriptors, the public key used, and other data).
1763
1764 The |key| parameter can |None| only if the |algorithm_name| is
1765 'NONE'.
1766
1767 Arguments:
1768 algorithm_name: The algorithm name as per the ALGORITHMS dict.
1769 key_path: The path to the .pem file used to sign the blob.
1770 descriptors: A list of descriptors to insert or None.
1771 rollback_index: The rollback index to use.
1772 props: Properties to insert (List of strings of the form 'key:value').
1773 props_from_file: Properties to insert (List of strings 'key:<path>').
1774 kernel_cmdlines: Kernel cmdlines to insert (list of strings).
1775 generate_dm_verity_cmdline_from_hashtree: None or file to generate
1776 dm-verity kernel cmdline from.
1777 include_descriptors_from_image: List of file objects for which
1778 to insert descriptors from.
1779
1780 Returns:
1781 A bytearray() with the VBMeta blob.
1782
1783 Raises:
1784 Exception: If the |algorithm_name| is not found, if no key has
1785 been given and the given algorithm requires one, or the key is
1786 of the wrong size.
1787
1788 """
1789 try:
1790 alg = ALGORITHMS[algorithm_name]
1791 except KeyError:
1792 raise AvbError('Unknown algorithm with name {}'.format(algorithm_name))
1793
1794 # Descriptors.
1795 encoded_descriptors = bytearray()
1796 if descriptors:
1797 for desc in descriptors:
1798 encoded_descriptors.extend(desc.encode())
1799
1800 # Add properties.
1801 if props:
1802 for prop in props:
1803 idx = prop.find(':')
1804 if idx == -1:
1805 raise AvbError('Malformed property "{}".'.format(prop))
1806 desc = AvbPropertyDescriptor()
1807 desc.key = prop[0:idx]
1808 desc.value = prop[(idx + 1):]
1809 encoded_descriptors.extend(desc.encode())
1810 if props_from_file:
1811 for prop in props_from_file:
1812 idx = prop.find(':')
1813 if idx == -1:
1814 raise AvbError('Malformed property "{}".'.format(prop))
1815 desc = AvbPropertyDescriptor()
1816 desc.key = prop[0:idx]
1817 desc.value = prop[(idx + 1):]
1818 file_path = prop[(idx + 1):]
1819 desc.value = open(file_path, 'rb').read()
1820 encoded_descriptors.extend(desc.encode())
1821
1822 # Add AvbKernelCmdline descriptor for dm-verity, if requested.
1823 if generate_dm_verity_cmdline_from_hashtree:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001824 image_handler = ImageHandler(
1825 generate_dm_verity_cmdline_from_hashtree.name)
David Zeuthen21e95262016-07-27 17:58:40 -04001826 encoded_descriptors.extend(self._get_cmdline_descriptor_for_dm_verity(
David Zeuthena4fee8b2016-08-22 15:20:43 -04001827 image_handler).encode())
David Zeuthen21e95262016-07-27 17:58:40 -04001828
1829 # Add kernel command-lines.
1830 if kernel_cmdlines:
1831 for i in kernel_cmdlines:
1832 desc = AvbKernelCmdlineDescriptor()
1833 desc.kernel_cmdline = i
1834 encoded_descriptors.extend(desc.encode())
1835
1836 # Add descriptors from other images.
1837 if include_descriptors_from_image:
1838 for image in include_descriptors_from_image:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001839 image_handler = ImageHandler(image.name)
1840 (_, _, image_descriptors, _) = self._parse_image(image_handler)
David Zeuthen21e95262016-07-27 17:58:40 -04001841 for desc in image_descriptors:
1842 encoded_descriptors.extend(desc.encode())
1843
1844 key = None
1845 encoded_key = bytearray()
1846 if alg.public_key_num_bytes > 0:
1847 if not key_path:
1848 raise AvbError('Key is required for algorithm {}'.format(
1849 algorithm_name))
1850 key = Crypto.PublicKey.RSA.importKey(open(key_path).read())
1851 encoded_key = encode_rsa_key(key)
1852 if len(encoded_key) != alg.public_key_num_bytes:
1853 raise AvbError('Key is wrong size for algorithm {}'.format(
1854 algorithm_name))
1855
1856 h = AvbVBMetaHeader()
1857
1858 # For the Auxiliary data block, descriptors are stored at offset 0
1859 # and the public key is immediately after that.
1860 h.auxiliary_data_block_size = round_to_multiple(
1861 len(encoded_descriptors) + len(encoded_key), 64)
1862 h.descriptors_offset = 0
1863 h.descriptors_size = len(encoded_descriptors)
1864 h.public_key_offset = h.descriptors_size
1865 h.public_key_size = len(encoded_key)
1866
1867 # For the Authentication data block, the hash is first and then
1868 # the signature.
1869 h.authentication_data_block_size = round_to_multiple(
1870 alg.hash_num_bytes + alg.public_key_num_bytes, 64)
1871 h.algorithm_type = alg.algorithm_type
1872 h.hash_offset = 0
1873 h.hash_size = alg.hash_num_bytes
1874 # Signature offset and size - it's stored right after the hash
1875 # (in Authentication data block).
1876 h.signature_offset = alg.hash_num_bytes
1877 h.signature_size = alg.signature_num_bytes
1878
1879 h.rollback_index = rollback_index
1880
1881 # Generate Header data block.
1882 header_data_blob = h.encode()
1883
1884 # Generate Auxiliary data block.
1885 aux_data_blob = bytearray()
1886 aux_data_blob.extend(encoded_descriptors)
1887 aux_data_blob.extend(encoded_key)
1888 padding_bytes = h.auxiliary_data_block_size - len(aux_data_blob)
1889 aux_data_blob.extend('\0' * padding_bytes)
1890
1891 # Calculate the hash.
1892 binary_hash = bytearray()
1893 binary_signature = bytearray()
1894 if algorithm_name != 'NONE':
1895 if algorithm_name[0:6] == 'SHA256':
1896 ha = hashlib.sha256()
1897 elif algorithm_name[0:6] == 'SHA512':
1898 ha = hashlib.sha512()
1899 else:
1900 raise AvbError('Unsupported algorithm {}.'.format(algorithm_name))
1901 ha.update(header_data_blob)
1902 ha.update(aux_data_blob)
1903 binary_hash.extend(ha.digest())
1904
1905 # Calculate the signature.
1906 p = subprocess.Popen(
1907 ['openssl', 'rsautl', '-sign', '-inkey', key_path, '-raw'],
1908 stdin=subprocess.PIPE,
1909 stdout=subprocess.PIPE,
1910 stderr=subprocess.PIPE)
1911 padding_and_hash = str(bytearray(alg.padding)) + binary_hash
1912 (pout, perr) = p.communicate(padding_and_hash)
1913 retcode = p.wait()
1914 if retcode != 0:
1915 raise AvbError('Error signing: {}'.format(perr))
1916 binary_signature.extend(pout)
1917
1918 # Generate Authentication data block.
1919 auth_data_blob = bytearray()
1920 auth_data_blob.extend(binary_hash)
1921 auth_data_blob.extend(binary_signature)
1922 padding_bytes = h.authentication_data_block_size - len(auth_data_blob)
1923 auth_data_blob.extend('\0' * padding_bytes)
1924
1925 return header_data_blob + auth_data_blob + aux_data_blob
1926
1927 def extract_public_key(self, key_path, output):
1928 """Implements the 'extract_public_key' command.
1929
1930 Arguments:
1931 key_path: The path to a RSA private key file.
1932 output: The file to write to.
1933 """
1934 key = Crypto.PublicKey.RSA.importKey(open(key_path).read())
1935 write_rsa_key(output, key)
1936
David Zeuthena4fee8b2016-08-22 15:20:43 -04001937 def add_hash_footer(self, image_filename, partition_size, partition_name,
David Zeuthen21e95262016-07-27 17:58:40 -04001938 hash_algorithm, salt, algorithm_name, key_path,
1939 rollback_index, props, props_from_file, kernel_cmdlines,
1940 generate_dm_verity_cmdline_from_hashtree,
1941 include_descriptors_from_image):
David Zeuthena4fee8b2016-08-22 15:20:43 -04001942 """Implementation of the add_hash_footer on unsparse images.
David Zeuthen21e95262016-07-27 17:58:40 -04001943
1944 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04001945 image_filename: File to add the footer to.
David Zeuthen21e95262016-07-27 17:58:40 -04001946 partition_size: Size of partition.
1947 partition_name: Name of partition (without A/B suffix).
1948 hash_algorithm: Hash algorithm to use.
1949 salt: Salt to use as a hexadecimal string or None to use /dev/urandom.
1950 algorithm_name: Name of algorithm to use.
1951 key_path: Path to key to use or None.
1952 rollback_index: Rollback index.
1953 props: Properties to insert (List of strings of the form 'key:value').
1954 props_from_file: Properties to insert (List of strings 'key:<path>').
1955 kernel_cmdlines: Kernel cmdlines to insert (list of strings).
1956 generate_dm_verity_cmdline_from_hashtree: None or file to generate
1957 dm-verity kernel cmdline from.
1958 include_descriptors_from_image: List of file objects for which
1959 to insert descriptors from.
David Zeuthena4fee8b2016-08-22 15:20:43 -04001960
1961 Raises:
1962 AvbError: If an argument is incorrect.
David Zeuthen21e95262016-07-27 17:58:40 -04001963 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04001964 image = ImageHandler(image_filename)
1965
1966 if partition_size % image.block_size != 0:
1967 raise AvbError('Partition size of {} is not a multiple of the image '
1968 'block size {}.'.format(partition_size,
1969 image.block_size))
1970
David Zeuthen21e95262016-07-27 17:58:40 -04001971 # If there's already a footer, truncate the image to its original
1972 # size. This way 'avbtool add_hash_footer' is idempotent (modulo
1973 # salts).
David Zeuthena4fee8b2016-08-22 15:20:43 -04001974 image_size = image.care_size
David Zeuthen21e95262016-07-27 17:58:40 -04001975 image.seek(image_size - AvbFooter.SIZE)
1976 try:
1977 footer = AvbFooter(image.read(AvbFooter.SIZE))
1978 # Existing footer found. Just truncate.
1979 original_image_size = footer.original_image_size
1980 image_size = footer.original_image_size
1981 image.truncate(image_size)
1982 except (LookupError, struct.error):
1983 original_image_size = image_size
1984
1985 # If anything goes wrong from here-on, restore the image back to
1986 # its original size.
1987 try:
1988 digest_size = len(hashlib.new(name=hash_algorithm).digest())
1989 if salt:
1990 salt = salt.decode('hex')
1991 else:
1992 if salt is None:
1993 # If salt is not explicitly specified, choose a hash
1994 # that's the same size as the hash size.
1995 hash_size = digest_size
1996 salt = open('/dev/urandom').read(hash_size)
1997 else:
1998 salt = ''
1999
2000 hasher = hashlib.new(name=hash_algorithm, string=salt)
2001 # TODO(zeuthen): might want to read this in chunks to avoid
2002 # memory pressure, then again, this is only supposed to be used
2003 # on kernel/initramfs partitions. Possible optimization.
2004 image.seek(0)
2005 hasher.update(image.read(image_size))
2006 digest = hasher.digest()
2007
2008 h_desc = AvbHashDescriptor()
2009 h_desc.image_size = image_size
2010 h_desc.hash_algorithm = hash_algorithm
2011 h_desc.partition_name = partition_name
2012 h_desc.salt = salt
2013 h_desc.digest = digest
2014
2015 # Generate the VBMeta footer.
David Zeuthen21e95262016-07-27 17:58:40 -04002016 vbmeta_blob = self._generate_vbmeta_blob(
2017 algorithm_name, key_path, [h_desc], rollback_index, props,
2018 props_from_file, kernel_cmdlines,
2019 generate_dm_verity_cmdline_from_hashtree,
2020 include_descriptors_from_image)
2021
David Zeuthena4fee8b2016-08-22 15:20:43 -04002022 # We might have a DONT_CARE hole at the end (in which case
2023 # |image.care_size| < |image.image_size|) so truncate here.
2024 image.truncate(image.care_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002025
David Zeuthena4fee8b2016-08-22 15:20:43 -04002026 # If the image isn't sparse, its size might not be a multiple of
2027 # the block size. This will screw up padding later so just grow it.
2028 if image.care_size % image.block_size != 0:
2029 assert not image.is_sparse
2030 padding_needed = image.block_size - (image.care_size%image.block_size)
2031 image.truncate(image.care_size + padding_needed)
David Zeuthen21e95262016-07-27 17:58:40 -04002032
David Zeuthena4fee8b2016-08-22 15:20:43 -04002033 # The append_raw() method requires content with size being a
2034 # multiple of |block_size| so add padding as needed. Also record
2035 # where this is written to since we'll need to put that in the
2036 # footer.
2037 vbmeta_offset = image.care_size
2038 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) -
2039 len(vbmeta_blob))
2040 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed
2041 image.append_raw(vbmeta_blob_with_padding)
2042 vbmeta_end_offset = vbmeta_offset + len(vbmeta_blob_with_padding)
2043
2044 # Now insert a DONT_CARE chunk with enough bytes such that the
2045 # final Footer block is at the end of partition_size..
2046 image.append_dont_care(partition_size - vbmeta_end_offset -
2047 1*image.block_size)
2048
2049 # Generate the Footer that tells where the VBMeta footer
2050 # is. Also put enough padding in the front of the footer since
2051 # we'll write out an entire block.
David Zeuthen21e95262016-07-27 17:58:40 -04002052 footer = AvbFooter()
2053 footer.original_image_size = original_image_size
2054 footer.vbmeta_offset = vbmeta_offset
2055 footer.vbmeta_size = len(vbmeta_blob)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002056 footer_blob = footer.encode()
2057 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) +
2058 footer_blob)
2059 image.append_raw(footer_blob_with_padding)
2060
David Zeuthen21e95262016-07-27 17:58:40 -04002061 except:
2062 # Truncate back to original size, then re-raise
2063 image.truncate(original_image_size)
2064 raise
2065
David Zeuthena4fee8b2016-08-22 15:20:43 -04002066 def add_hashtree_footer(self, image_filename, partition_size, partition_name,
David Zeuthen21e95262016-07-27 17:58:40 -04002067 hash_algorithm, block_size, salt, algorithm_name,
2068 key_path, rollback_index, props, props_from_file,
2069 kernel_cmdlines,
2070 generate_dm_verity_cmdline_from_hashtree,
2071 include_descriptors_from_image):
2072 """Implements the 'add_hashtree_footer' command.
2073
2074 See https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity for
2075 more information about dm-verity and these hashes.
2076
2077 Arguments:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002078 image_filename: File to add the footer to.
David Zeuthen21e95262016-07-27 17:58:40 -04002079 partition_size: Size of partition.
2080 partition_name: Name of partition (without A/B suffix).
2081 hash_algorithm: Hash algorithm to use.
2082 block_size: Block size to use.
2083 salt: Salt to use as a hexadecimal string or None to use /dev/urandom.
2084 algorithm_name: Name of algorithm to use.
2085 key_path: Path to key to use or None.
2086 rollback_index: Rollback index.
2087 props: Properties to insert (List of strings of the form 'key:value').
2088 props_from_file: Properties to insert (List of strings 'key:<path>').
2089 kernel_cmdlines: Kernel cmdlines to insert (list of strings).
2090 generate_dm_verity_cmdline_from_hashtree: None or file to generate
2091 dm-verity kernel cmdline from.
2092 include_descriptors_from_image: List of file objects for which
2093 to insert descriptors from.
David Zeuthena4fee8b2016-08-22 15:20:43 -04002094
2095 Raises:
2096 AvbError: If an argument is incorrect.
David Zeuthen21e95262016-07-27 17:58:40 -04002097 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04002098 image = ImageHandler(image_filename)
2099
2100 if partition_size % image.block_size != 0:
2101 raise AvbError('Partition size of {} is not a multiple of the image '
2102 'block size {}.'.format(partition_size,
2103 image.block_size))
2104
David Zeuthen21e95262016-07-27 17:58:40 -04002105 # If there's already a footer, truncate the image to its original
2106 # size. This way 'avbtool add_hashtree_footer' is idempotent
2107 # (modulo salts).
David Zeuthena4fee8b2016-08-22 15:20:43 -04002108 image_size = image.care_size
David Zeuthen21e95262016-07-27 17:58:40 -04002109 image.seek(image_size - AvbFooter.SIZE)
2110 try:
2111 footer = AvbFooter(image.read(AvbFooter.SIZE))
2112 # Existing footer found. Just truncate.
2113 original_image_size = footer.original_image_size
2114 image_size = footer.original_image_size
2115 image.truncate(image_size)
2116 except (LookupError, struct.error):
2117 original_image_size = image_size
2118
2119 # If anything goes wrong from here-on, restore the image back to
2120 # its original size.
2121 try:
2122 # Ensure image is multiple of block_size.
2123 rounded_image_size = round_to_multiple(image_size, block_size)
2124 if rounded_image_size > image_size:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002125 image.append_raw('\0' * (rounded_image_size - image_size))
David Zeuthen21e95262016-07-27 17:58:40 -04002126 image_size = rounded_image_size
2127
David Zeuthen21e95262016-07-27 17:58:40 -04002128 digest_size = len(hashlib.new(name=hash_algorithm).digest())
2129 digest_padding = round_to_pow2(digest_size) - digest_size
2130
2131 if salt:
2132 salt = salt.decode('hex')
2133 else:
2134 if salt is None:
2135 # If salt is not explicitly specified, choose a hash
2136 # that's the same size as the hash size.
2137 hash_size = digest_size
2138 salt = open('/dev/urandom').read(hash_size)
2139 else:
2140 salt = ''
2141
David Zeuthena4fee8b2016-08-22 15:20:43 -04002142 # Hashes are stored upside down so we need to calculate hash
David Zeuthen21e95262016-07-27 17:58:40 -04002143 # offsets in advance.
2144 (hash_level_offsets, tree_size) = calc_hash_level_offsets(
2145 image_size, block_size, digest_size + digest_padding)
2146
David Zeuthena4fee8b2016-08-22 15:20:43 -04002147 # We might have a DONT_CARE hole at the end (in which case
2148 # |image.care_size| < |image.image_size|) so truncate here.
2149 image.truncate(image.care_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002150
David Zeuthena4fee8b2016-08-22 15:20:43 -04002151 # If the image isn't sparse, its size might not be a multiple of
2152 # the block size. This will screw up padding later so just grow it.
2153 if image.care_size % image.block_size != 0:
2154 assert not image.is_sparse
2155 padding_needed = image.block_size - (image.care_size%image.block_size)
2156 image.truncate(image.care_size + padding_needed)
David Zeuthen21e95262016-07-27 17:58:40 -04002157
David Zeuthena4fee8b2016-08-22 15:20:43 -04002158 # Generate the tree and add padding as needed.
2159 tree_offset = image.care_size
2160 root_digest, hash_tree = generate_hash_tree(image, image_size,
2161 block_size,
2162 hash_algorithm, salt,
2163 digest_padding,
2164 hash_level_offsets,
2165 tree_size)
2166 padding_needed = (round_to_multiple(len(hash_tree), image.block_size) -
2167 len(hash_tree))
2168 hash_tree_with_padding = hash_tree + '\0'*padding_needed
2169 image.append_raw(hash_tree_with_padding)
2170
2171 # Generate HashtreeDescriptor with details about the tree we
2172 # just generated.
David Zeuthen21e95262016-07-27 17:58:40 -04002173 ht_desc = AvbHashtreeDescriptor()
2174 ht_desc.dm_verity_version = 1
2175 ht_desc.image_size = image_size
2176 ht_desc.tree_offset = tree_offset
2177 ht_desc.tree_size = tree_size
2178 ht_desc.data_block_size = block_size
2179 ht_desc.hash_block_size = block_size
2180 ht_desc.hash_algorithm = hash_algorithm
2181 ht_desc.partition_name = partition_name
2182 ht_desc.salt = salt
2183 ht_desc.root_digest = root_digest
2184
David Zeuthena4fee8b2016-08-22 15:20:43 -04002185 # Generate the VBMeta footer and add padding as needed.
2186 vbmeta_offset = tree_offset + len(hash_tree_with_padding)
David Zeuthen21e95262016-07-27 17:58:40 -04002187 vbmeta_blob = self._generate_vbmeta_blob(
2188 algorithm_name, key_path, [ht_desc], rollback_index, props,
2189 props_from_file, kernel_cmdlines,
2190 generate_dm_verity_cmdline_from_hashtree,
2191 include_descriptors_from_image)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002192 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) -
2193 len(vbmeta_blob))
2194 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed
2195 image.append_raw(vbmeta_blob_with_padding)
David Zeuthen21e95262016-07-27 17:58:40 -04002196
David Zeuthena4fee8b2016-08-22 15:20:43 -04002197 # Now insert a DONT_CARE chunk with enough bytes such that the
2198 # final Footer block is at the end of partition_size..
2199 image.append_dont_care(partition_size - image.care_size -
2200 1*image.block_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002201
David Zeuthena4fee8b2016-08-22 15:20:43 -04002202 # Generate the Footer that tells where the VBMeta footer
2203 # is. Also put enough padding in the front of the footer since
2204 # we'll write out an entire block.
David Zeuthen21e95262016-07-27 17:58:40 -04002205 footer = AvbFooter()
2206 footer.original_image_size = original_image_size
2207 footer.vbmeta_offset = vbmeta_offset
2208 footer.vbmeta_size = len(vbmeta_blob)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002209 footer_blob = footer.encode()
2210 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) +
2211 footer_blob)
2212 image.append_raw(footer_blob_with_padding)
2213
David Zeuthen21e95262016-07-27 17:58:40 -04002214 except:
2215 # Truncate back to original size, then re-raise
2216 image.truncate(original_image_size)
2217 raise
2218
2219
2220def calc_hash_level_offsets(image_size, block_size, digest_size):
2221 """Calculate the offsets of all the hash-levels in a Merkle-tree.
2222
2223 Arguments:
2224 image_size: The size of the image to calculate a Merkle-tree for.
2225 block_size: The block size, e.g. 4096.
2226 digest_size: The size of each hash, e.g. 32 for SHA-256.
2227
2228 Returns:
2229 A tuple where the first argument is an array of offsets and the
2230 second is size of the tree, in bytes.
2231 """
2232 level_offsets = []
2233 level_sizes = []
2234 tree_size = 0
2235
2236 num_levels = 0
2237 size = image_size
2238 while size > block_size:
2239 num_blocks = (size + block_size - 1) / block_size
2240 level_size = round_to_multiple(num_blocks * digest_size, block_size)
2241
2242 level_sizes.append(level_size)
2243 tree_size += level_size
2244 num_levels += 1
2245
2246 size = level_size
2247
2248 for n in range(0, num_levels):
2249 offset = 0
2250 for m in range(n + 1, num_levels):
2251 offset += level_sizes[m]
2252 level_offsets.append(offset)
2253
David Zeuthena4fee8b2016-08-22 15:20:43 -04002254 return level_offsets, tree_size
David Zeuthen21e95262016-07-27 17:58:40 -04002255
2256
2257def generate_hash_tree(image, image_size, block_size, hash_alg_name, salt,
David Zeuthena4fee8b2016-08-22 15:20:43 -04002258 digest_padding, hash_level_offsets, tree_size):
David Zeuthen21e95262016-07-27 17:58:40 -04002259 """Generates a Merkle-tree for a file.
2260
2261 Args:
2262 image: The image, as a file.
2263 image_size: The size of the image.
2264 block_size: The block size, e.g. 4096.
2265 hash_alg_name: The hash algorithm, e.g. 'sha256' or 'sha1'.
2266 salt: The salt to use.
2267 digest_padding: The padding for each digest.
David Zeuthen21e95262016-07-27 17:58:40 -04002268 hash_level_offsets: The offsets from calc_hash_level_offsets().
David Zeuthena4fee8b2016-08-22 15:20:43 -04002269 tree_size: The size of the tree, in number of bytes.
David Zeuthen21e95262016-07-27 17:58:40 -04002270
2271 Returns:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002272 A tuple where the first element is the top-level hash and the
2273 second element is the hash-tree.
David Zeuthen21e95262016-07-27 17:58:40 -04002274 """
David Zeuthena4fee8b2016-08-22 15:20:43 -04002275 hash_ret = bytearray(tree_size)
David Zeuthen21e95262016-07-27 17:58:40 -04002276 hash_src_offset = 0
2277 hash_src_size = image_size
2278 level_num = 0
2279 while hash_src_size > block_size:
2280 level_output = ''
David Zeuthen21e95262016-07-27 17:58:40 -04002281 remaining = hash_src_size
2282 while remaining > 0:
2283 hasher = hashlib.new(name=hash_alg_name, string=salt)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002284 # Only read from the file for the first level - for subsequent
2285 # levels, access the array we're building.
2286 if level_num == 0:
2287 image.seek(hash_src_offset + hash_src_size - remaining)
2288 data = image.read(min(remaining, block_size))
2289 else:
2290 offset = hash_level_offsets[level_num - 1] + hash_src_size - remaining
2291 data = hash_ret[offset:offset + block_size]
David Zeuthen21e95262016-07-27 17:58:40 -04002292 hasher.update(data)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002293
2294 remaining -= len(data)
David Zeuthen21e95262016-07-27 17:58:40 -04002295 if len(data) < block_size:
2296 hasher.update('\0' * (block_size - len(data)))
2297 level_output += hasher.digest()
2298 if digest_padding > 0:
2299 level_output += '\0' * digest_padding
2300
2301 padding_needed = (round_to_multiple(
2302 len(level_output), block_size) - len(level_output))
2303 level_output += '\0' * padding_needed
2304
David Zeuthena4fee8b2016-08-22 15:20:43 -04002305 # Copy level-output into resulting tree.
2306 offset = hash_level_offsets[level_num]
2307 hash_ret[offset:offset + len(level_output)] = level_output
David Zeuthen21e95262016-07-27 17:58:40 -04002308
David Zeuthena4fee8b2016-08-22 15:20:43 -04002309 # Continue on to the next level.
David Zeuthen21e95262016-07-27 17:58:40 -04002310 hash_src_size = len(level_output)
David Zeuthen21e95262016-07-27 17:58:40 -04002311 level_num += 1
2312
2313 hasher = hashlib.new(name=hash_alg_name, string=salt)
2314 hasher.update(level_output)
David Zeuthena4fee8b2016-08-22 15:20:43 -04002315 return hasher.digest(), hash_ret
David Zeuthen21e95262016-07-27 17:58:40 -04002316
2317
2318class AvbTool(object):
2319 """Object for avbtool command-line tool."""
2320
2321 def __init__(self):
2322 """Initializer method."""
2323 self.avb = Avb()
2324
2325 def _add_common_args(self, sub_parser):
2326 """Adds arguments used by several sub-commands.
2327
2328 Arguments:
2329 sub_parser: The parser to add arguments to.
2330 """
2331 sub_parser.add_argument('--algorithm',
2332 help='Algorithm to use (default: NONE)',
2333 metavar='ALGORITHM',
2334 default='NONE')
2335 sub_parser.add_argument('--key',
2336 help='Path to RSA private key file',
2337 metavar='KEY',
2338 required=False)
2339 sub_parser.add_argument('--rollback_index',
2340 help='Rollback Index',
2341 type=parse_number,
2342 default=0)
2343 sub_parser.add_argument('--prop',
2344 help='Add property',
2345 metavar='KEY:VALUE',
2346 action='append')
2347 sub_parser.add_argument('--prop_from_file',
2348 help='Add property from file',
2349 metavar='KEY:PATH',
2350 action='append')
2351 sub_parser.add_argument('--kernel_cmdline',
2352 help='Add kernel cmdline',
2353 metavar='CMDLINE',
2354 action='append')
2355 sub_parser.add_argument('--generate_dm_verity_cmdline_from_hashtree',
2356 metavar='IMAGE',
2357 help='Generate kernel cmdline for dm-verity',
2358 type=argparse.FileType('rb'))
2359 sub_parser.add_argument('--include_descriptors_from_image',
2360 help='Include descriptors from image',
2361 metavar='IMAGE',
2362 action='append',
2363 type=argparse.FileType('rb'))
2364
2365 def run(self, argv):
2366 """Command-line processor.
2367
2368 Arguments:
2369 argv: Pass sys.argv from main.
2370 """
2371 parser = argparse.ArgumentParser()
2372 subparsers = parser.add_subparsers(title='subcommands')
2373
2374 sub_parser = subparsers.add_parser('version',
2375 help='Prints version of avbtool.')
2376 sub_parser.set_defaults(func=self.version)
2377
2378 sub_parser = subparsers.add_parser('extract_public_key',
2379 help='Extract public key.')
2380 sub_parser.add_argument('--key',
2381 help='Path to RSA private key file',
2382 required=True)
2383 sub_parser.add_argument('--output',
2384 help='Output file name',
2385 type=argparse.FileType('wb'),
2386 required=True)
2387 sub_parser.set_defaults(func=self.extract_public_key)
2388
2389 sub_parser = subparsers.add_parser('make_vbmeta_image',
2390 help='Makes a vbmeta image.')
2391 sub_parser.add_argument('--output',
2392 help='Output file name',
2393 type=argparse.FileType('wb'),
2394 required=True)
2395 self._add_common_args(sub_parser)
2396 sub_parser.add_argument('--chain_partition',
2397 help='Allow signed integrity-data for partition',
2398 metavar='PART_NAME:ROLLBACK_SLOT:KEY_PATH',
2399 action='append')
2400 sub_parser.set_defaults(func=self.make_vbmeta_image)
2401
2402 sub_parser = subparsers.add_parser('add_hash_footer',
2403 help='Add hashes and footer to image.')
2404 sub_parser.add_argument('--image',
2405 help='Brillo boot image to add hashes to',
2406 type=argparse.FileType('rab+'))
2407 sub_parser.add_argument('--partition_size',
2408 help='Partition size',
2409 type=parse_number,
2410 required=True)
2411 sub_parser.add_argument('--partition_name',
2412 help='Partition name',
2413 required=True)
2414 sub_parser.add_argument('--hash_algorithm',
2415 help='Hash algorithm to use (default: sha256)',
2416 default='sha256')
2417 sub_parser.add_argument('--salt',
2418 help='Salt in hex (default: /dev/urandom)')
2419 self._add_common_args(sub_parser)
2420 sub_parser.set_defaults(func=self.add_hash_footer)
2421
2422 sub_parser = subparsers.add_parser('add_hashtree_footer',
2423 help='Add hashtree and footer to image.')
2424 sub_parser.add_argument('--image',
2425 help='Brillo boot image to add hashes to',
2426 type=argparse.FileType('rab+'))
2427 sub_parser.add_argument('--partition_size',
2428 help='Partition size',
2429 type=parse_number,
2430 required=True)
2431 sub_parser.add_argument('--partition_name',
2432 help='Partition name',
2433 required=True)
2434 sub_parser.add_argument('--hash_algorithm',
2435 help='Hash algorithm to use (default: sha1)',
2436 default='sha1')
2437 sub_parser.add_argument('--salt',
2438 help='Salt in hex (default: /dev/urandom)')
2439 sub_parser.add_argument('--block_size',
2440 help='Block size (default: 4096)',
2441 type=parse_number,
2442 default=4096)
2443 self._add_common_args(sub_parser)
2444 sub_parser.set_defaults(func=self.add_hashtree_footer)
2445
2446 sub_parser = subparsers.add_parser('erase_footer',
2447 help='Erase footer from an image.')
2448 sub_parser.add_argument('--image',
2449 help='Brillo image with a footer',
2450 type=argparse.FileType('rwb+'),
2451 required=True)
2452 sub_parser.add_argument('--keep_hashtree',
2453 help='Keep the hashtree in the image',
2454 action='store_true')
2455 sub_parser.set_defaults(func=self.erase_footer)
2456
2457 sub_parser = subparsers.add_parser(
2458 'info_image',
2459 help='Show information about vbmeta or footer.')
2460 sub_parser.add_argument('--image',
2461 help='Brillo boot image to use',
2462 type=argparse.FileType('rb'),
2463 required=True)
2464 sub_parser.add_argument('--output',
2465 help='Write info to file',
2466 type=argparse.FileType('wt'),
2467 default=sys.stdout)
2468 sub_parser.set_defaults(func=self.info_image)
2469
2470 args = parser.parse_args(argv[1:])
2471 try:
2472 args.func(args)
2473 except AvbError as e:
David Zeuthena4fee8b2016-08-22 15:20:43 -04002474 sys.stderr.write('{}: {}\n'.format(argv[0], e.message))
David Zeuthen21e95262016-07-27 17:58:40 -04002475 sys.exit(1)
2476
2477 def version(self, _):
2478 """Implements the 'version' sub-command."""
2479 print '{}.{}'.format(AVB_VERSION_MAJOR, AVB_VERSION_MINOR)
2480
2481 def extract_public_key(self, args):
2482 """Implements the 'extract_public_key' sub-command."""
2483 self.avb.extract_public_key(args.key, args.output)
2484
2485 def make_vbmeta_image(self, args):
2486 """Implements the 'make_vbmeta_image' sub-command."""
2487 self.avb.make_vbmeta_image(args.output, args.chain_partition,
2488 args.algorithm, args.key, args.rollback_index,
2489 args.prop, args.prop_from_file,
2490 args.kernel_cmdline,
2491 args.generate_dm_verity_cmdline_from_hashtree,
2492 args.include_descriptors_from_image)
2493
2494 def add_hash_footer(self, args):
2495 """Implements the 'add_hash_footer' sub-command."""
David Zeuthena4fee8b2016-08-22 15:20:43 -04002496 self.avb.add_hash_footer(args.image.name, args.partition_size,
David Zeuthen21e95262016-07-27 17:58:40 -04002497 args.partition_name, args.hash_algorithm,
2498 args.salt, args.algorithm, args.key,
2499 args.rollback_index, args.prop,
2500 args.prop_from_file, args.kernel_cmdline,
2501 args.generate_dm_verity_cmdline_from_hashtree,
2502 args.include_descriptors_from_image)
2503
2504 def add_hashtree_footer(self, args):
2505 """Implements the 'add_hashtree_footer' sub-command."""
David Zeuthena4fee8b2016-08-22 15:20:43 -04002506 self.avb.add_hashtree_footer(args.image.name, args.partition_size,
David Zeuthen21e95262016-07-27 17:58:40 -04002507 args.partition_name, args.hash_algorithm,
2508 args.block_size, args.salt, args.algorithm,
2509 args.key, args.rollback_index, args.prop,
2510 args.prop_from_file, args.kernel_cmdline,
2511 args.generate_dm_verity_cmdline_from_hashtree,
2512 args.include_descriptors_from_image)
2513
2514 def erase_footer(self, args):
2515 """Implements the 'erase_footer' sub-command."""
David Zeuthena4fee8b2016-08-22 15:20:43 -04002516 self.avb.erase_footer(args.image.name, args.keep_hashtree)
David Zeuthen21e95262016-07-27 17:58:40 -04002517
2518 def info_image(self, args):
2519 """Implements the 'info_image' sub-command."""
David Zeuthena4fee8b2016-08-22 15:20:43 -04002520 self.avb.info_image(args.image.name, args.output)
David Zeuthen21e95262016-07-27 17:58:40 -04002521
2522
2523if __name__ == '__main__':
2524 tool = AvbTool()
2525 tool.run(sys.argv)