blob: e40406db437c946cf119b7e3858ab8d25df28725 [file] [log] [blame]
Alex Deymo710b3da2017-10-26 13:13:28 +02001// Copyright 2017 The Chromium OS Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "bsdiff/endsley_patch_writer.h"
6
7#include <string.h>
8
9#include <algorithm>
10
Alex Deymo19fc5752018-02-15 16:56:39 +010011#include "bsdiff/brotli_compressor.h"
12#include "bsdiff/bz2_compressor.h"
Alex Deymo710b3da2017-10-26 13:13:28 +020013#include "bsdiff/logging.h"
14
Alex Deymo710b3da2017-10-26 13:13:28 +020015namespace {
16
17constexpr uint8_t kEndsleyMagicHeader[] = "ENDSLEY/BSDIFF43";
18
19void EncodeInt64(int64_t x, uint8_t* buf) {
20 uint64_t y = x < 0 ? (1ULL << 63ULL) - x : x;
21 for (int i = 0; i < 8; ++i) {
22 buf[i] = y & 0xff;
23 y /= 256;
24 }
25}
26
27// The minimum size that we would consider flushing out.
28constexpr size_t kMinimumFlushSize = 1024 * 1024; // 1 MiB
29
30} // namespace
31
32namespace bsdiff {
33
34bool EndsleyPatchWriter::Init(size_t new_size) {
Alex Deymo19fc5752018-02-15 16:56:39 +010035 switch (compressor_type_) {
36 case CompressorType::kNoCompression:
37 // The patch is uncompressed and it will need exactly:
38 // new_size + 24 * len(control_entries) + sizeof(header)
39 // We don't know the length of the control entries yet, but we can reserve
40 // enough space to hold at least |new_size|.
41 patch_->clear();
42 patch_->reserve(new_size);
43 break;
44 case CompressorType::kBrotli:
45 compressor_.reset(new BrotliCompressor(quality_));
46 if (!compressor_) {
47 LOG(ERROR) << "Error creating brotli compressor.";
48 return false;
49 }
50 break;
51 case CompressorType::kBZ2:
52 compressor_.reset(new BZ2Compressor());
53 if (!compressor_) {
54 LOG(ERROR) << "Error creating BZ2 compressor.";
55 return false;
56 }
57 break;
58 }
Alex Deymo710b3da2017-10-26 13:13:28 +020059
60 // Header is the magic followed by the new length.
61 uint8_t header[24];
62 memcpy(header, kEndsleyMagicHeader, 16);
63 EncodeInt64(new_size, header + 16);
64 EmitBuffer(header, sizeof(header));
65 return true;
66}
67
68bool EndsleyPatchWriter::WriteDiffStream(const uint8_t* data, size_t size) {
69 if (!size)
70 return true;
71 // Speed-up the common case where the diff stream data is added right after
72 // the control entry that refers to it.
73 if (control_.empty() && pending_diff_ >= size) {
74 pending_diff_ -= size;
75 EmitBuffer(data, size);
76 return true;
77 }
78
79 diff_data_.insert(diff_data_.end(), data, data + size);
80 return true;
81}
82
83bool EndsleyPatchWriter::WriteExtraStream(const uint8_t* data, size_t size) {
84 if (!size)
85 return true;
86 // Speed-up the common case where the extra stream data is added right after
87 // the diff stream data and the control entry that refers to it. Note that
88 // the diff data comes first so we need to make sure it is all out.
89 if (control_.empty() && !pending_diff_ && pending_extra_ >= size) {
90 pending_extra_ -= size;
91 EmitBuffer(data, size);
92 return true;
93 }
94
95 extra_data_.insert(extra_data_.end(), data, data + size);
96 return true;
97}
98
99bool EndsleyPatchWriter::AddControlEntry(const ControlEntry& entry) {
100 // Speed-up the common case where the control entry is added when there's
101 // nothing else pending.
102 if (control_.empty() && diff_data_.empty() && extra_data_.empty() &&
103 !pending_diff_ && !pending_extra_) {
104 pending_diff_ = entry.diff_size;
105 pending_extra_ = entry.extra_size;
106 EmitControlEntry(entry);
107 return true;
108 }
109
110 control_.push_back(entry);
111 pending_control_data_ += entry.diff_size + entry.extra_size;
112
113 // Check whether it is worth Flushing the entries now that the we have more
114 // control entries. We need control entries to write enough output data, and
115 // we need that output data to be at least 50% of the available diff and extra
116 // data. This last requirement is to reduce the overhead of removing the
117 // flushed data.
118 if (pending_control_data_ > kMinimumFlushSize &&
119 (diff_data_.size() + extra_data_.size()) / 2 <= pending_control_data_) {
120 Flush();
121 }
122
123 return true;
124}
125
126bool EndsleyPatchWriter::Close() {
127 // Flush any pending data.
128 Flush();
129
130 if (pending_diff_ || pending_extra_ || !control_.empty()) {
Tianjie Xu18480eb2017-11-29 16:21:43 -0800131 LOG(ERROR) << "Insufficient data sent to diff/extra streams";
Alex Deymo710b3da2017-10-26 13:13:28 +0200132 return false;
133 }
134
135 if (!diff_data_.empty() || !extra_data_.empty()) {
Tianjie Xu18480eb2017-11-29 16:21:43 -0800136 LOG(ERROR) << "Pending data to diff/extra not flushed out on Close()";
Alex Deymo710b3da2017-10-26 13:13:28 +0200137 return false;
138 }
Alex Deymo19fc5752018-02-15 16:56:39 +0100139
140 if (compressor_) {
141 if (!compressor_->Finish())
142 return false;
143 *patch_ = compressor_->GetCompressedData();
144 }
145
Alex Deymo710b3da2017-10-26 13:13:28 +0200146 return true;
147}
148
149void EndsleyPatchWriter::EmitControlEntry(const ControlEntry& entry) {
150 // Generate the 24 byte control entry.
151 uint8_t buf[24];
152 EncodeInt64(entry.diff_size, buf);
153 EncodeInt64(entry.extra_size, buf + 8);
154 EncodeInt64(entry.offset_increment, buf + 16);
155 EmitBuffer(buf, sizeof(buf));
156}
157
158void EndsleyPatchWriter::EmitBuffer(const uint8_t* data, size_t size) {
Alex Deymo19fc5752018-02-15 16:56:39 +0100159 if (compressor_) {
160 compressor_->Write(data, size);
161 } else {
162 patch_->insert(patch_->end(), data, data + size);
163 }
Alex Deymo710b3da2017-10-26 13:13:28 +0200164}
165
166void EndsleyPatchWriter::Flush() {
167 size_t used_diff = 0;
168 size_t used_extra = 0;
169 size_t used_control = 0;
170 do {
171 if (!pending_diff_ && !pending_extra_ && used_control < control_.size()) {
172 // We can emit a control entry in these conditions.
173 const ControlEntry& entry = control_[used_control];
174 used_control++;
175
176 pending_diff_ = entry.diff_size;
177 pending_extra_ = entry.extra_size;
178 pending_control_data_ -= entry.extra_size + entry.diff_size;
179 EmitControlEntry(entry);
180 }
181
182 if (pending_diff_) {
183 size_t diff_size = std::min(diff_data_.size() - used_diff, pending_diff_);
184 EmitBuffer(diff_data_.data() + used_diff, diff_size);
185 pending_diff_ -= diff_size;
186 used_diff += diff_size;
187 }
188
189 if (!pending_diff_ && pending_extra_) {
190 size_t extra_size =
191 std::min(extra_data_.size() - used_extra, pending_extra_);
192 EmitBuffer(extra_data_.data() + used_extra, extra_size);
193 pending_extra_ -= extra_size;
194 used_extra += extra_size;
195 }
196 } while (!pending_diff_ && !pending_extra_ && used_control < control_.size());
197
198 if (used_diff)
199 diff_data_.erase(diff_data_.begin(), diff_data_.begin() + used_diff);
200 if (used_extra)
201 extra_data_.erase(extra_data_.begin(), extra_data_.begin() + used_extra);
202 if (used_control)
203 control_.erase(control_.begin(), control_.begin() + used_control);
204}
205
206} // namespace bsdiff