blob: 4ffb8cd994436a544122ed1c1f4c60dfdf08169b [file] [log] [blame]
Paul Crowleyf71ace32016-06-02 11:01:19 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "EncryptInplace.h"
18
19#include <stdio.h>
20#include <stdint.h>
Paul Crowleyf71ace32016-06-02 11:01:19 -070021#include <inttypes.h>
22#include <time.h>
23#include <sys/types.h>
24#include <sys/stat.h>
25#include <fcntl.h>
26#include <ext4_utils/ext4.h>
27#include <ext4_utils/ext4_utils.h>
28#include <f2fs_sparseblock.h>
29
30#include <algorithm>
31
32#include "cutils/properties.h"
33#define LOG_TAG "EncryptInplace"
34#include "cutils/log.h"
35#include "CheckBattery.h"
36
37// HORRIBLE HACK, FIXME
38#include "cryptfs.h"
39
40// FIXME horrible cut-and-paste code
41static inline int unix_read(int fd, void* buff, int len)
42{
43 return TEMP_FAILURE_RETRY(read(fd, buff, len));
44}
45
46static inline int unix_write(int fd, const void* buff, int len)
47{
48 return TEMP_FAILURE_RETRY(write(fd, buff, len));
49}
50
51#define CRYPT_SECTORS_PER_BUFSIZE (CRYPT_INPLACE_BUFSIZE / CRYPT_SECTOR_SIZE)
52
53/* aligned 32K writes tends to make flash happy.
54 * SD card association recommends it.
55 */
56#ifndef CONFIG_HW_DISK_ENCRYPTION
57#define BLOCKS_AT_A_TIME 8
58#else
59#define BLOCKS_AT_A_TIME 1024
60#endif
61
62struct encryptGroupsData
63{
64 int realfd;
65 int cryptofd;
66 off64_t numblocks;
67 off64_t one_pct, cur_pct, new_pct;
68 off64_t blocks_already_done, tot_numblocks;
69 off64_t used_blocks_already_done, tot_used_blocks;
70 char* real_blkdev, * crypto_blkdev;
71 int count;
72 off64_t offset;
73 char* buffer;
74 off64_t last_written_sector;
75 int completed;
76 time_t time_started;
77 int remaining_time;
78};
79
80static void update_progress(struct encryptGroupsData* data, int is_used)
81{
82 data->blocks_already_done++;
83
84 if (is_used) {
85 data->used_blocks_already_done++;
86 }
87 if (data->tot_used_blocks) {
88 data->new_pct = data->used_blocks_already_done / data->one_pct;
89 } else {
90 data->new_pct = data->blocks_already_done / data->one_pct;
91 }
92
93 if (data->new_pct > data->cur_pct) {
94 char buf[8];
95 data->cur_pct = data->new_pct;
96 snprintf(buf, sizeof(buf), "%" PRId64, data->cur_pct);
97 property_set("vold.encrypt_progress", buf);
98 }
99
100 if (data->cur_pct >= 5) {
101 struct timespec time_now;
102 if (clock_gettime(CLOCK_MONOTONIC, &time_now)) {
103 SLOGW("Error getting time");
104 } else {
105 double elapsed_time = difftime(time_now.tv_sec, data->time_started);
106 off64_t remaining_blocks = data->tot_used_blocks
107 - data->used_blocks_already_done;
108 int remaining_time = (int)(elapsed_time * remaining_blocks
109 / data->used_blocks_already_done);
110
111 // Change time only if not yet set, lower, or a lot higher for
112 // best user experience
113 if (data->remaining_time == -1
114 || remaining_time < data->remaining_time
115 || remaining_time > data->remaining_time + 60) {
116 char buf[8];
117 snprintf(buf, sizeof(buf), "%d", remaining_time);
118 property_set("vold.encrypt_time_remaining", buf);
119 data->remaining_time = remaining_time;
120 }
121 }
122 }
123}
124
125static void log_progress(struct encryptGroupsData const* data, bool completed)
126{
127 // Precondition - if completed data = 0 else data != 0
128
129 // Track progress so we can skip logging blocks
130 static off64_t offset = -1;
131
132 // Need to close existing 'Encrypting from' log?
133 if (completed || (offset != -1 && data->offset != offset)) {
134 SLOGI("Encrypted to sector %" PRId64,
135 offset / info.block_size * CRYPT_SECTOR_SIZE);
136 offset = -1;
137 }
138
139 // Need to start new 'Encrypting from' log?
140 if (!completed && offset != data->offset) {
141 SLOGI("Encrypting from sector %" PRId64,
142 data->offset / info.block_size * CRYPT_SECTOR_SIZE);
143 }
144
145 // Update offset
146 if (!completed) {
147 offset = data->offset + (off64_t)data->count * info.block_size;
148 }
149}
150
151static int flush_outstanding_data(struct encryptGroupsData* data)
152{
153 if (data->count == 0) {
154 return 0;
155 }
156
157 SLOGV("Copying %d blocks at offset %" PRIx64, data->count, data->offset);
158
159 if (pread64(data->realfd, data->buffer,
160 info.block_size * data->count, data->offset)
161 <= 0) {
162 SLOGE("Error reading real_blkdev %s for inplace encrypt",
163 data->real_blkdev);
164 return -1;
165 }
166
167 if (pwrite64(data->cryptofd, data->buffer,
168 info.block_size * data->count, data->offset)
169 <= 0) {
170 SLOGE("Error writing crypto_blkdev %s for inplace encrypt",
171 data->crypto_blkdev);
172 return -1;
173 } else {
174 log_progress(data, false);
175 }
176
177 data->count = 0;
178 data->last_written_sector = (data->offset + data->count)
179 / info.block_size * CRYPT_SECTOR_SIZE - 1;
180 return 0;
181}
182
183static int encrypt_groups(struct encryptGroupsData* data)
184{
185 unsigned int i;
186 u8 *block_bitmap = 0;
187 unsigned int block;
188 off64_t ret;
189 int rc = -1;
190
191 data->buffer = (char*) malloc(info.block_size * BLOCKS_AT_A_TIME);
192 if (!data->buffer) {
193 SLOGE("Failed to allocate crypto buffer");
194 goto errout;
195 }
196
197 block_bitmap = (u8*) malloc(info.block_size);
198 if (!block_bitmap) {
199 SLOGE("failed to allocate block bitmap");
200 goto errout;
201 }
202
203 for (i = 0; i < aux_info.groups; ++i) {
204 SLOGI("Encrypting group %d", i);
205
206 u32 first_block = aux_info.first_data_block + i * info.blocks_per_group;
207 u32 block_count = std::min(info.blocks_per_group,
208 (u32)(aux_info.len_blocks - first_block));
209
210 off64_t offset = (u64)info.block_size
211 * aux_info.bg_desc[i].bg_block_bitmap;
212
213 ret = pread64(data->realfd, block_bitmap, info.block_size, offset);
214 if (ret != (int)info.block_size) {
215 SLOGE("failed to read all of block group bitmap %d", i);
216 goto errout;
217 }
218
219 offset = (u64)info.block_size * first_block;
220
221 data->count = 0;
222
223 for (block = 0; block < block_count; block++) {
224 int used = (aux_info.bg_desc[i].bg_flags & EXT4_BG_BLOCK_UNINIT) ?
225 0 : bitmap_get_bit(block_bitmap, block);
226 update_progress(data, used);
227 if (used) {
228 if (data->count == 0) {
229 data->offset = offset;
230 }
231 data->count++;
232 } else {
233 if (flush_outstanding_data(data)) {
234 goto errout;
235 }
236 }
237
238 offset += info.block_size;
239
240 /* Write data if we are aligned or buffer size reached */
241 if (offset % (info.block_size * BLOCKS_AT_A_TIME) == 0
242 || data->count == BLOCKS_AT_A_TIME) {
243 if (flush_outstanding_data(data)) {
244 goto errout;
245 }
246 }
247
248 if (!is_battery_ok_to_continue()) {
249 SLOGE("Stopping encryption due to low battery");
250 rc = 0;
251 goto errout;
252 }
253
254 }
255 if (flush_outstanding_data(data)) {
256 goto errout;
257 }
258 }
259
260 data->completed = 1;
261 rc = 0;
262
263errout:
264 log_progress(0, true);
265 free(data->buffer);
266 free(block_bitmap);
267 return rc;
268}
269
270static int cryptfs_enable_inplace_ext4(char *crypto_blkdev,
271 char *real_blkdev,
272 off64_t size,
273 off64_t *size_already_done,
274 off64_t tot_size,
275 off64_t previously_encrypted_upto)
276{
277 u32 i;
278 struct encryptGroupsData data;
279 int rc; // Can't initialize without causing warning -Wclobbered
280 int retries = RETRY_MOUNT_ATTEMPTS;
281 struct timespec time_started = {0};
282
283 if (previously_encrypted_upto > *size_already_done) {
284 SLOGD("Not fast encrypting since resuming part way through");
285 return -1;
286 }
287
288 memset(&data, 0, sizeof(data));
289 data.real_blkdev = real_blkdev;
290 data.crypto_blkdev = crypto_blkdev;
291
292 if ( (data.realfd = open(real_blkdev, O_RDWR|O_CLOEXEC)) < 0) {
293 SLOGE("Error opening real_blkdev %s for inplace encrypt. err=%d(%s)\n",
294 real_blkdev, errno, strerror(errno));
295 rc = -1;
296 goto errout;
297 }
298
299 // Wait until the block device appears. Re-use the mount retry values since it is reasonable.
300 while ((data.cryptofd = open(crypto_blkdev, O_WRONLY|O_CLOEXEC)) < 0) {
301 if (--retries) {
302 SLOGE("Error opening crypto_blkdev %s for ext4 inplace encrypt. err=%d(%s), retrying\n",
303 crypto_blkdev, errno, strerror(errno));
304 sleep(RETRY_MOUNT_DELAY_SECONDS);
305 } else {
306 SLOGE("Error opening crypto_blkdev %s for ext4 inplace encrypt. err=%d(%s)\n",
307 crypto_blkdev, errno, strerror(errno));
308 rc = ENABLE_INPLACE_ERR_DEV;
309 goto errout;
310 }
311 }
312
313 if (setjmp(setjmp_env)) { // NOLINT
314 SLOGE("Reading ext4 extent caused an exception\n");
315 rc = -1;
316 goto errout;
317 }
318
319 if (read_ext(data.realfd, 0) != 0) {
320 SLOGE("Failed to read ext4 extent\n");
321 rc = -1;
322 goto errout;
323 }
324
325 data.numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
326 data.tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
327 data.blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
328
329 SLOGI("Encrypting ext4 filesystem in place...");
330
331 data.tot_used_blocks = data.numblocks;
332 for (i = 0; i < aux_info.groups; ++i) {
333 data.tot_used_blocks -= aux_info.bg_desc[i].bg_free_blocks_count;
334 }
335
336 data.one_pct = data.tot_used_blocks / 100;
337 data.cur_pct = 0;
338
339 if (clock_gettime(CLOCK_MONOTONIC, &time_started)) {
340 SLOGW("Error getting time at start");
341 // Note - continue anyway - we'll run with 0
342 }
343 data.time_started = time_started.tv_sec;
344 data.remaining_time = -1;
345
346 rc = encrypt_groups(&data);
347 if (rc) {
348 SLOGE("Error encrypting groups");
349 goto errout;
350 }
351
352 *size_already_done += data.completed ? size : data.last_written_sector;
353 rc = 0;
354
355errout:
356 close(data.realfd);
357 close(data.cryptofd);
358
359 return rc;
360}
361
362static void log_progress_f2fs(u64 block, bool completed)
363{
364 // Precondition - if completed data = 0 else data != 0
365
366 // Track progress so we can skip logging blocks
367 static u64 last_block = (u64)-1;
368
369 // Need to close existing 'Encrypting from' log?
370 if (completed || (last_block != (u64)-1 && block != last_block + 1)) {
371 SLOGI("Encrypted to block %" PRId64, last_block);
372 last_block = -1;
373 }
374
375 // Need to start new 'Encrypting from' log?
376 if (!completed && (last_block == (u64)-1 || block != last_block + 1)) {
377 SLOGI("Encrypting from block %" PRId64, block);
378 }
379
380 // Update offset
381 if (!completed) {
382 last_block = block;
383 }
384}
385
386static int encrypt_one_block_f2fs(u64 pos, void *data)
387{
388 struct encryptGroupsData *priv_dat = (struct encryptGroupsData *)data;
389
390 priv_dat->blocks_already_done = pos - 1;
391 update_progress(priv_dat, 1);
392
393 off64_t offset = pos * CRYPT_INPLACE_BUFSIZE;
394
395 if (pread64(priv_dat->realfd, priv_dat->buffer, CRYPT_INPLACE_BUFSIZE, offset) <= 0) {
396 SLOGE("Error reading real_blkdev %s for f2fs inplace encrypt", priv_dat->crypto_blkdev);
397 return -1;
398 }
399
400 if (pwrite64(priv_dat->cryptofd, priv_dat->buffer, CRYPT_INPLACE_BUFSIZE, offset) <= 0) {
401 SLOGE("Error writing crypto_blkdev %s for f2fs inplace encrypt", priv_dat->crypto_blkdev);
402 return -1;
403 } else {
404 log_progress_f2fs(pos, false);
405 }
406
407 return 0;
408}
409
410static int cryptfs_enable_inplace_f2fs(char *crypto_blkdev,
411 char *real_blkdev,
412 off64_t size,
413 off64_t *size_already_done,
414 off64_t tot_size,
415 off64_t previously_encrypted_upto)
416{
417 struct encryptGroupsData data;
418 struct f2fs_info *f2fs_info = NULL;
419 int rc = ENABLE_INPLACE_ERR_OTHER;
420 if (previously_encrypted_upto > *size_already_done) {
421 SLOGD("Not fast encrypting since resuming part way through");
422 return ENABLE_INPLACE_ERR_OTHER;
423 }
424 memset(&data, 0, sizeof(data));
425 data.real_blkdev = real_blkdev;
426 data.crypto_blkdev = crypto_blkdev;
427 data.realfd = -1;
428 data.cryptofd = -1;
429 if ( (data.realfd = open64(real_blkdev, O_RDWR|O_CLOEXEC)) < 0) {
430 SLOGE("Error opening real_blkdev %s for f2fs inplace encrypt\n",
431 real_blkdev);
432 goto errout;
433 }
434 if ( (data.cryptofd = open64(crypto_blkdev, O_WRONLY|O_CLOEXEC)) < 0) {
435 SLOGE("Error opening crypto_blkdev %s for f2fs inplace encrypt. err=%d(%s)\n",
436 crypto_blkdev, errno, strerror(errno));
437 rc = ENABLE_INPLACE_ERR_DEV;
438 goto errout;
439 }
440
441 f2fs_info = generate_f2fs_info(data.realfd);
442 if (!f2fs_info)
443 goto errout;
444
445 data.numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
446 data.tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
447 data.blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
448
449 data.tot_used_blocks = get_num_blocks_used(f2fs_info);
450
451 data.one_pct = data.tot_used_blocks / 100;
452 data.cur_pct = 0;
453 data.time_started = time(NULL);
454 data.remaining_time = -1;
455
456 data.buffer = (char*) malloc(f2fs_info->block_size);
457 if (!data.buffer) {
458 SLOGE("Failed to allocate crypto buffer");
459 goto errout;
460 }
461
462 data.count = 0;
463
464 /* Currently, this either runs to completion, or hits a nonrecoverable error */
465 rc = run_on_used_blocks(data.blocks_already_done, f2fs_info, &encrypt_one_block_f2fs, &data);
466
467 if (rc) {
468 SLOGE("Error in running over f2fs blocks");
469 rc = ENABLE_INPLACE_ERR_OTHER;
470 goto errout;
471 }
472
473 *size_already_done += size;
474 rc = 0;
475
476errout:
477 if (rc)
478 SLOGE("Failed to encrypt f2fs filesystem on %s", real_blkdev);
479
480 log_progress_f2fs(0, true);
481 free(f2fs_info);
482 free(data.buffer);
483 close(data.realfd);
484 close(data.cryptofd);
485
486 return rc;
487}
488
489static int cryptfs_enable_inplace_full(char *crypto_blkdev, char *real_blkdev,
490 off64_t size, off64_t *size_already_done,
491 off64_t tot_size,
492 off64_t previously_encrypted_upto)
493{
494 int realfd, cryptofd;
495 char *buf[CRYPT_INPLACE_BUFSIZE];
496 int rc = ENABLE_INPLACE_ERR_OTHER;
497 off64_t numblocks, i, remainder;
498 off64_t one_pct, cur_pct, new_pct;
499 off64_t blocks_already_done, tot_numblocks;
500
501 if ( (realfd = open(real_blkdev, O_RDONLY|O_CLOEXEC)) < 0) {
502 SLOGE("Error opening real_blkdev %s for inplace encrypt\n", real_blkdev);
503 return ENABLE_INPLACE_ERR_OTHER;
504 }
505
506 if ( (cryptofd = open(crypto_blkdev, O_WRONLY|O_CLOEXEC)) < 0) {
507 SLOGE("Error opening crypto_blkdev %s for inplace encrypt. err=%d(%s)\n",
508 crypto_blkdev, errno, strerror(errno));
509 close(realfd);
510 return ENABLE_INPLACE_ERR_DEV;
511 }
512
513 /* This is pretty much a simple loop of reading 4K, and writing 4K.
514 * The size passed in is the number of 512 byte sectors in the filesystem.
515 * So compute the number of whole 4K blocks we should read/write,
516 * and the remainder.
517 */
518 numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
519 remainder = size % CRYPT_SECTORS_PER_BUFSIZE;
520 tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
521 blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
522
523 SLOGE("Encrypting filesystem in place...");
524
525 i = previously_encrypted_upto + 1 - *size_already_done;
526
527 if (lseek64(realfd, i * CRYPT_SECTOR_SIZE, SEEK_SET) < 0) {
528 SLOGE("Cannot seek to previously encrypted point on %s", real_blkdev);
529 goto errout;
530 }
531
532 if (lseek64(cryptofd, i * CRYPT_SECTOR_SIZE, SEEK_SET) < 0) {
533 SLOGE("Cannot seek to previously encrypted point on %s", crypto_blkdev);
534 goto errout;
535 }
536
537 for (;i < size && i % CRYPT_SECTORS_PER_BUFSIZE != 0; ++i) {
538 if (unix_read(realfd, buf, CRYPT_SECTOR_SIZE) <= 0) {
539 SLOGE("Error reading initial sectors from real_blkdev %s for "
540 "inplace encrypt\n", crypto_blkdev);
541 goto errout;
542 }
543 if (unix_write(cryptofd, buf, CRYPT_SECTOR_SIZE) <= 0) {
544 SLOGE("Error writing initial sectors to crypto_blkdev %s for "
545 "inplace encrypt\n", crypto_blkdev);
546 goto errout;
547 } else {
548 SLOGI("Encrypted 1 block at %" PRId64, i);
549 }
550 }
551
552 one_pct = tot_numblocks / 100;
553 cur_pct = 0;
554 /* process the majority of the filesystem in blocks */
555 for (i/=CRYPT_SECTORS_PER_BUFSIZE; i<numblocks; i++) {
556 new_pct = (i + blocks_already_done) / one_pct;
557 if (new_pct > cur_pct) {
558 char buf[8];
559
560 cur_pct = new_pct;
561 snprintf(buf, sizeof(buf), "%" PRId64, cur_pct);
562 property_set("vold.encrypt_progress", buf);
563 }
564 if (unix_read(realfd, buf, CRYPT_INPLACE_BUFSIZE) <= 0) {
565 SLOGE("Error reading real_blkdev %s for inplace encrypt", crypto_blkdev);
566 goto errout;
567 }
568 if (unix_write(cryptofd, buf, CRYPT_INPLACE_BUFSIZE) <= 0) {
569 SLOGE("Error writing crypto_blkdev %s for inplace encrypt", crypto_blkdev);
570 goto errout;
571 } else {
572 SLOGD("Encrypted %d block at %" PRId64,
573 CRYPT_SECTORS_PER_BUFSIZE,
574 i * CRYPT_SECTORS_PER_BUFSIZE);
575 }
576
577 if (!is_battery_ok_to_continue()) {
578 SLOGE("Stopping encryption due to low battery");
579 *size_already_done += (i + 1) * CRYPT_SECTORS_PER_BUFSIZE - 1;
580 rc = 0;
581 goto errout;
582 }
583 }
584
585 /* Do any remaining sectors */
586 for (i=0; i<remainder; i++) {
587 if (unix_read(realfd, buf, CRYPT_SECTOR_SIZE) <= 0) {
588 SLOGE("Error reading final sectors from real_blkdev %s for inplace encrypt", crypto_blkdev);
589 goto errout;
590 }
591 if (unix_write(cryptofd, buf, CRYPT_SECTOR_SIZE) <= 0) {
592 SLOGE("Error writing final sectors to crypto_blkdev %s for inplace encrypt", crypto_blkdev);
593 goto errout;
594 } else {
595 SLOGI("Encrypted 1 block at next location");
596 }
597 }
598
599 *size_already_done += size;
600 rc = 0;
601
602errout:
603 close(realfd);
604 close(cryptofd);
605
606 return rc;
607}
608
609/* returns on of the ENABLE_INPLACE_* return codes */
610int cryptfs_enable_inplace(char *crypto_blkdev, char *real_blkdev,
611 off64_t size, off64_t *size_already_done,
612 off64_t tot_size,
613 off64_t previously_encrypted_upto)
614{
615 int rc_ext4, rc_f2fs, rc_full;
616 if (previously_encrypted_upto) {
617 SLOGD("Continuing encryption from %" PRId64, previously_encrypted_upto);
618 }
619
620 if (*size_already_done + size < previously_encrypted_upto) {
621 *size_already_done += size;
622 return 0;
623 }
624
625 /* TODO: identify filesystem type.
626 * As is, cryptfs_enable_inplace_ext4 will fail on an f2fs partition, and
627 * then we will drop down to cryptfs_enable_inplace_f2fs.
628 * */
629 if ((rc_ext4 = cryptfs_enable_inplace_ext4(crypto_blkdev, real_blkdev,
630 size, size_already_done,
631 tot_size, previously_encrypted_upto)) == 0) {
632 return 0;
633 }
634 SLOGD("cryptfs_enable_inplace_ext4()=%d\n", rc_ext4);
635
636 if ((rc_f2fs = cryptfs_enable_inplace_f2fs(crypto_blkdev, real_blkdev,
637 size, size_already_done,
638 tot_size, previously_encrypted_upto)) == 0) {
639 return 0;
640 }
641 SLOGD("cryptfs_enable_inplace_f2fs()=%d\n", rc_f2fs);
642
643 rc_full = cryptfs_enable_inplace_full(crypto_blkdev, real_blkdev,
644 size, size_already_done, tot_size,
645 previously_encrypted_upto);
646 SLOGD("cryptfs_enable_inplace_full()=%d\n", rc_full);
647
648 /* Hack for b/17898962, the following is the symptom... */
649 if (rc_ext4 == ENABLE_INPLACE_ERR_DEV
650 && rc_f2fs == ENABLE_INPLACE_ERR_DEV
651 && rc_full == ENABLE_INPLACE_ERR_DEV) {
652 return ENABLE_INPLACE_ERR_DEV;
653 }
654 return rc_full;
655}