blob: cdc62dc4c912f813fda8b032d82f0e8f7f07a417 [file] [log] [blame]
Christopher Tatef9767d62015-04-08 14:35:43 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.app.backup;
18
19import android.os.ParcelFileDescriptor;
20import android.util.ArrayMap;
21import android.util.Log;
22
23import java.io.BufferedInputStream;
24import java.io.ByteArrayInputStream;
25import java.io.ByteArrayOutputStream;
26import java.io.DataInputStream;
27import java.io.DataOutputStream;
28import java.io.EOFException;
29import java.io.FileInputStream;
30import java.io.FileOutputStream;
31import java.io.IOException;
32import java.util.zip.CRC32;
33import java.util.zip.DeflaterOutputStream;
34import java.util.zip.InflaterInputStream;
35
36/**
37 * Utility class for writing BackupHelpers whose underlying data is a
38 * fixed set of byte-array blobs. The helper manages diff detection
39 * and compression on the wire.
40 *
41 * @hide
42 */
43public abstract class BlobBackupHelper implements BackupHelper {
44 private static final String TAG = "BlobBackupHelper";
45 private static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG);
46
47 private final int mCurrentBlobVersion;
48 private final String[] mKeys;
49
50 public BlobBackupHelper(int currentBlobVersion, String... keys) {
51 mCurrentBlobVersion = currentBlobVersion;
52 mKeys = keys;
53 }
54
55 // Client interface
56
57 /**
58 * Generate and return the byte array containing the backup payload describing
59 * the current data state. During a backup operation this method is called once
60 * per key that was supplied to the helper's constructor.
61 *
62 * @return A byte array containing the data blob that the caller wishes to store,
63 * or {@code null} if the current state is empty or undefined.
64 */
65 abstract protected byte[] getBackupPayload(String key);
66
67 /**
68 * Given a byte array that was restored from backup, do whatever is appropriate
69 * to apply that described state in the live system. This method is called once
70 * per key/value payload that was delivered for restore. Typically data is delivered
71 * for restore in lexical order by key, <i>not</i> in the order in which the keys
72 * were supplied in the constructor.
73 *
74 * @param payload The byte array that was passed to {@link #getBackupPayload()}
75 * on the ancestral device.
76 */
77 abstract protected void applyRestoredPayload(String key, byte[] payload);
78
79
80 // Internal implementation
81
82 /*
83 * State on-disk format:
84 * [Int] : overall blob version number
85 * [Int=N] : number of keys represented in the state blob
86 * N* :
87 * [String] key
88 * [Long] blob checksum, calculated after compression
89 */
90 @SuppressWarnings("resource")
91 private ArrayMap<String, Long> readOldState(ParcelFileDescriptor oldStateFd) {
92 final ArrayMap<String, Long> state = new ArrayMap<String, Long>();
93
94 FileInputStream fis = new FileInputStream(oldStateFd.getFileDescriptor());
95 BufferedInputStream bis = new BufferedInputStream(fis);
96 DataInputStream in = new DataInputStream(bis);
97
98 try {
99 int version = in.readInt();
100 if (version <= mCurrentBlobVersion) {
101 final int numKeys = in.readInt();
102 for (int i = 0; i < numKeys; i++) {
103 String key = in.readUTF();
104 long checksum = in.readLong();
105 state.put(key, checksum);
106 }
107 } else {
108 Log.w(TAG, "Prior state from unrecognized version " + version);
109 }
110 } catch (EOFException e) {
111 // Empty file is expected on first backup, so carry on. If the state
112 // is truncated we just treat it the same way.
113 state.clear();
114 } catch (Exception e) {
115 Log.e(TAG, "Error examining prior backup state " + e.getMessage());
116 state.clear();
117 }
118
119 return state;
120 }
121
122 /**
123 * New overall state record
124 */
125 private void writeBackupState(ArrayMap<String, Long> state, ParcelFileDescriptor stateFile) {
126 try {
127 FileOutputStream fos = new FileOutputStream(stateFile.getFileDescriptor());
128
129 // We explicitly don't close 'out' because we must not close the backing fd.
130 // The FileOutputStream will not close it implicitly.
131 @SuppressWarnings("resource")
132 DataOutputStream out = new DataOutputStream(fos);
133
134 out.writeInt(mCurrentBlobVersion);
135
Christopher Tatef7cb8a02015-04-20 16:09:48 -0700136 final int N = (state != null) ? state.size() : 0;
Christopher Tatef9767d62015-04-08 14:35:43 -0700137 out.writeInt(N);
138 for (int i = 0; i < N; i++) {
139 out.writeUTF(state.keyAt(i));
140 out.writeLong(state.valueAt(i).longValue());
141 }
142 } catch (IOException e) {
143 Log.e(TAG, "Unable to write updated state", e);
144 }
145 }
146
147 // Also versions the deflated blob internally in case we need to revise it
148 private byte[] deflate(byte[] data) {
149 byte[] result = null;
150 if (data != null) {
151 try {
152 ByteArrayOutputStream sink = new ByteArrayOutputStream();
153 DataOutputStream headerOut = new DataOutputStream(sink);
154
155 // write the header directly to the sink ahead of the deflated payload
156 headerOut.writeInt(mCurrentBlobVersion);
157
158 DeflaterOutputStream out = new DeflaterOutputStream(sink);
159 out.write(data);
160 out.close(); // finishes and commits the compression run
161 result = sink.toByteArray();
162 if (DEBUG) {
163 Log.v(TAG, "Deflated " + data.length + " bytes to " + result.length);
164 }
165 } catch (IOException e) {
166 Log.w(TAG, "Unable to process payload: " + e.getMessage());
167 }
168 }
169 return result;
170 }
171
172 // Returns null if inflation failed
173 private byte[] inflate(byte[] compressedData) {
174 byte[] result = null;
175 if (compressedData != null) {
176 try {
177 ByteArrayInputStream source = new ByteArrayInputStream(compressedData);
178 DataInputStream headerIn = new DataInputStream(source);
179 int version = headerIn.readInt();
180 if (version > mCurrentBlobVersion) {
181 Log.w(TAG, "Saved payload from unrecognized version " + version);
182 return null;
183 }
184
185 InflaterInputStream in = new InflaterInputStream(source);
186 ByteArrayOutputStream inflated = new ByteArrayOutputStream();
187 byte[] buffer = new byte[4096];
188 int nRead;
189 while ((nRead = in.read(buffer)) > 0) {
190 inflated.write(buffer, 0, nRead);
191 }
192 in.close();
193 inflated.flush();
194 result = inflated.toByteArray();
195 if (DEBUG) {
196 Log.v(TAG, "Inflated " + compressedData.length + " bytes to " + result.length);
197 }
198 } catch (IOException e) {
199 // result is still null here
200 Log.w(TAG, "Unable to process restored payload: " + e.getMessage());
201 }
202 }
203 return result;
204 }
205
206 private long checksum(byte[] buffer) {
207 if (buffer != null) {
208 try {
209 CRC32 crc = new CRC32();
210 ByteArrayInputStream bis = new ByteArrayInputStream(buffer);
211 byte[] buf = new byte[4096];
212 int nRead = 0;
213 while ((nRead = bis.read(buf)) >= 0) {
214 crc.update(buf, 0, nRead);
215 }
216 return crc.getValue();
217 } catch (Exception e) {
218 // whoops; fall through with an explicitly bogus checksum
219 }
220 }
221 return -1;
222 }
223
224 // BackupHelper interface
225
226 @Override
227 public void performBackup(ParcelFileDescriptor oldStateFd, BackupDataOutput data,
228 ParcelFileDescriptor newStateFd) {
229
230 final ArrayMap<String, Long> oldState = readOldState(oldStateFd);
231 final ArrayMap<String, Long> newState = new ArrayMap<String, Long>();
232
233 try {
234 for (String key : mKeys) {
235 final byte[] payload = deflate(getBackupPayload(key));
236 final long checksum = checksum(payload);
237 newState.put(key, checksum);
238
239 Long oldChecksum = oldState.get(key);
240 if (oldChecksum == null || checksum != oldChecksum) {
241 if (DEBUG) {
242 Log.i(TAG, "State has changed for key " + key + ", writing");
243 }
244 if (payload != null) {
245 data.writeEntityHeader(key, payload.length);
246 data.writeEntityData(payload, payload.length);
247 } else {
248 // state's changed but there's no current payload => delete
249 data.writeEntityHeader(key, -1);
250 }
251 } else {
252 if (DEBUG) {
253 Log.i(TAG, "No change under key " + key + " => not writing");
254 }
255 }
256 }
257 } catch (Exception e) {
258 Log.w(TAG, "Unable to record notification state: " + e.getMessage());
259 newState.clear();
260 } finally {
261 // Always recommit the state even if nothing changed
262 writeBackupState(newState, newStateFd);
263 }
264 }
265
266 @Override
267 public void restoreEntity(BackupDataInputStream data) {
268 final String key = data.getKey();
269 try {
270 // known key?
271 int which;
272 for (which = 0; which < mKeys.length; which++) {
273 if (key.equals(mKeys[which])) {
274 break;
275 }
276 }
277 if (which >= mKeys.length) {
278 Log.e(TAG, "Unrecognized key " + key + ", ignoring");
279 return;
280 }
281
282 byte[] compressed = new byte[data.size()];
283 data.read(compressed);
284 byte[] payload = inflate(compressed);
285 applyRestoredPayload(key, payload);
286 } catch (Exception e) {
287 Log.e(TAG, "Exception restoring entity " + key + " : " + e.getMessage());
288 }
289 }
290
291 @Override
292 public void writeNewStateDescription(ParcelFileDescriptor newState) {
293 // Just ensure that we do a full backup the first time after a restore
294 writeBackupState(null, newState);
295 }
296}