Updated calls to hashlib.new() to be py3 compatible.
The keyword argument 'string' was renamed to 'data' in python3.
Changed calls to be compatible with both py2 and py3 by using
positional arguments.
Bug: 151336743
Test: atest --host libavb_host_unittest
Test: atest --host aftltool_test
Test: ./aftltool_integration_test.py
Change-Id: I93bd18eec5eac9d8f5ccf265efac6f8d46865b5c
diff --git a/avbtool b/avbtool
index 0e89f33..840741c 100755
--- a/avbtool
+++ b/avbtool
@@ -1350,7 +1350,7 @@
self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)]
o += salt_len
self.root_digest = data[(self.SIZE + o):(self.SIZE + o + root_digest_len)]
- if root_digest_len != len(hashlib.new(name=self.hash_algorithm).digest()):
+ if root_digest_len != len(hashlib.new(self.hash_algorithm).digest()):
if root_digest_len != 0:
raise LookupError('root_digest_len doesn\'t match hash algorithm')
@@ -1441,7 +1441,7 @@
image_filename = os.path.join(image_dir, self.partition_name + image_ext)
image = ImageHandler(image_filename)
# Generate the hashtree and checks that it matches what's in the file.
- digest_size = len(hashlib.new(name=self.hash_algorithm).digest())
+ digest_size = len(hashlib.new(self.hash_algorithm).digest())
digest_padding = round_to_pow2(digest_size) - digest_size
(hash_level_offsets, tree_size) = calc_hash_level_offsets(
self.image_size, self.data_block_size, digest_size + digest_padding)
@@ -1537,7 +1537,7 @@
self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)]
o += salt_len
self.digest = data[(self.SIZE + o):(self.SIZE + o + digest_len)]
- if digest_len != len(hashlib.new(name=self.hash_algorithm).digest()):
+ if digest_len != len(hashlib.new(self.hash_algorithm).digest()):
if digest_len != 0:
raise LookupError('digest_len doesn\'t match hash algorithm')
@@ -2513,7 +2513,7 @@
image.seek(offset)
vbmeta_blob = image.read(size)
- hasher = hashlib.new(name=hash_algorithm)
+ hasher = hashlib.new(hash_algorithm)
hasher.update(vbmeta_blob)
for desc in descriptors:
@@ -3264,7 +3264,7 @@
'size of {}.'.format(image.image_size, max_image_size,
partition_size))
- digest_size = len(hashlib.new(name=hash_algorithm).digest())
+ digest_size = len(hashlib.new(hash_algorithm).digest())
if salt:
salt = binascii.unhexlify(salt)
elif salt is None and not use_persistent_digest:
@@ -3276,7 +3276,7 @@
else:
salt = ''
- hasher = hashlib.new(name=hash_algorithm, string=salt)
+ hasher = hashlib.new(hash_algorithm, salt)
# TODO(zeuthen): might want to read this in chunks to avoid
# memory pressure, then again, this is only supposed to be used
# on kernel/initramfs partitions. Possible optimization.
@@ -3426,7 +3426,7 @@
print('1.{}'.format(required_libavb_version_minor))
return
- digest_size = len(hashlib.new(name=hash_algorithm).digest())
+ digest_size = len(hashlib.new(hash_algorithm).digest())
digest_padding = round_to_pow2(digest_size) - digest_size
# If |partition_size| is given (e.g. not 0), calculate the maximum image
@@ -3918,7 +3918,7 @@
level_output_list = []
remaining = hash_src_size
while remaining > 0:
- hasher = hashlib.new(name=hash_alg_name, string=salt)
+ hasher = hashlib.new(hash_alg_name, salt)
# Only read from the file for the first level - for subsequent
# levels, access the array we're building.
if level_num == 0:
@@ -3950,7 +3950,7 @@
hash_src_size = len(level_output)
level_num += 1
- hasher = hashlib.new(name=hash_alg_name, string=salt)
+ hasher = hashlib.new(hash_alg_name, salt)
hasher.update(level_output)
return hasher.digest(), hash_ret