blob: 3c3ec7daff631eaf9cb197223fe5f21464e65d2e [file] [log] [blame]
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +00001// Copyright (c) 2013 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/simple/simple_entry_impl.h"
6
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +01007#include <algorithm>
8#include <cstring>
9#include <vector>
10
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +000011#include "base/bind.h"
12#include "base/bind_helpers.h"
13#include "base/callback.h"
14#include "base/location.h"
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +010015#include "base/logging.h"
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +010016#include "base/message_loop/message_loop_proxy.h"
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +010017#include "base/metrics/histogram.h"
Torne (Richard Coles)7d4cd472013-06-19 11:58:07 +010018#include "base/task_runner.h"
Ben Murdocheb525c52013-07-10 11:40:50 +010019#include "base/time/time.h"
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +000020#include "net/base/io_buffer.h"
21#include "net/base/net_errors.h"
Ben Murdoch7dbb3d52013-07-17 14:55:54 +010022#include "net/disk_cache/net_log_parameters.h"
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +010023#include "net/disk_cache/simple/simple_backend_impl.h"
24#include "net/disk_cache/simple/simple_index.h"
Ben Murdoch7dbb3d52013-07-17 14:55:54 +010025#include "net/disk_cache/simple/simple_net_log_parameters.h"
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +000026#include "net/disk_cache/simple/simple_synchronous_entry.h"
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +010027#include "net/disk_cache/simple/simple_util.h"
28#include "third_party/zlib/zlib.h"
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +000029
30namespace {
31
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +010032// Used in histograms, please only add entries at the end.
33enum ReadResult {
34 READ_RESULT_SUCCESS = 0,
35 READ_RESULT_INVALID_ARGUMENT = 1,
36 READ_RESULT_NONBLOCK_EMPTY_RETURN = 2,
37 READ_RESULT_BAD_STATE = 3,
38 READ_RESULT_FAST_EMPTY_RETURN = 4,
39 READ_RESULT_SYNC_READ_FAILURE = 5,
40 READ_RESULT_SYNC_CHECKSUM_FAILURE = 6,
41 READ_RESULT_MAX = 7,
42};
43
44// Used in histograms, please only add entries at the end.
45enum WriteResult {
46 WRITE_RESULT_SUCCESS = 0,
47 WRITE_RESULT_INVALID_ARGUMENT = 1,
48 WRITE_RESULT_OVER_MAX_SIZE = 2,
49 WRITE_RESULT_BAD_STATE = 3,
50 WRITE_RESULT_SYNC_WRITE_FAILURE = 4,
51 WRITE_RESULT_MAX = 5,
52};
53
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +010054// Used in histograms, please only add entries at the end.
55enum HeaderSizeChange {
56 HEADER_SIZE_CHANGE_INITIAL,
57 HEADER_SIZE_CHANGE_SAME,
58 HEADER_SIZE_CHANGE_INCREASE,
59 HEADER_SIZE_CHANGE_DECREASE,
60 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
61 HEADER_SIZE_CHANGE_MAX
62};
63
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +010064void RecordReadResult(ReadResult result) {
65 UMA_HISTOGRAM_ENUMERATION("SimpleCache.ReadResult", result, READ_RESULT_MAX);
66};
67
68void RecordWriteResult(WriteResult result) {
69 UMA_HISTOGRAM_ENUMERATION("SimpleCache.WriteResult",
70 result, WRITE_RESULT_MAX);
71};
72
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +010073// TODO(ttuttle): Consider removing this once we have a good handle on header
74// size changes.
75void RecordHeaderSizeChange(int old_size, int new_size) {
76 HeaderSizeChange size_change;
77
78 UMA_HISTOGRAM_COUNTS_10000("SimpleCache.HeaderSize", new_size);
79
80 if (old_size == 0) {
81 size_change = HEADER_SIZE_CHANGE_INITIAL;
82 } else if (new_size == old_size) {
83 size_change = HEADER_SIZE_CHANGE_SAME;
84 } else if (new_size > old_size) {
85 int delta = new_size - old_size;
86 UMA_HISTOGRAM_COUNTS_10000("SimpleCache.HeaderSizeIncreaseAbsolute",
87 delta);
88 UMA_HISTOGRAM_PERCENTAGE("SimpleCache.HeaderSizeIncreasePercentage",
89 delta * 100 / old_size);
90 size_change = HEADER_SIZE_CHANGE_INCREASE;
91 } else { // new_size < old_size
92 int delta = old_size - new_size;
93 UMA_HISTOGRAM_COUNTS_10000("SimpleCache.HeaderSizeDecreaseAbsolute",
94 delta);
95 UMA_HISTOGRAM_PERCENTAGE("SimpleCache.HeaderSizeDecreasePercentage",
96 delta * 100 / old_size);
97 size_change = HEADER_SIZE_CHANGE_DECREASE;
98 }
99
100 UMA_HISTOGRAM_ENUMERATION("SimpleCache.HeaderSizeChange",
101 size_change,
102 HEADER_SIZE_CHANGE_MAX);
103}
104
105void RecordUnexpectedStream0Write() {
106 UMA_HISTOGRAM_ENUMERATION("SimpleCache.HeaderSizeChange",
107 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
108 HEADER_SIZE_CHANGE_MAX);
109}
110
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100111// Short trampoline to take an owned input parameter and call a net completion
112// callback with its value.
113void CallCompletionCallback(const net::CompletionCallback& callback,
114 scoped_ptr<int> result) {
115 DCHECK(result);
116 if (!callback.is_null())
117 callback.Run(*result);
118}
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000119
Ben Murdoch558790d2013-07-30 15:19:42 +0100120int g_open_entry_count = 0;
121
122void AdjustOpenEntryCountBy(int offset) {
123 g_open_entry_count += offset;
124 UMA_HISTOGRAM_COUNTS_10000("SimpleCache.GlobalOpenEntryCount",
125 g_open_entry_count);
126}
127
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000128} // namespace
129
130namespace disk_cache {
131
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100132using base::Closure;
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000133using base::FilePath;
134using base::MessageLoopProxy;
135using base::Time;
Torne (Richard Coles)7d4cd472013-06-19 11:58:07 +0100136using base::TaskRunner;
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000137
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100138// A helper class to insure that RunNextOperationIfNeeded() is called when
139// exiting the current stack frame.
140class SimpleEntryImpl::ScopedOperationRunner {
141 public:
142 explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
143 }
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000144
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100145 ~ScopedOperationRunner() {
146 entry_->RunNextOperationIfNeeded();
147 }
148
149 private:
150 SimpleEntryImpl* const entry_;
151};
152
Ben Murdochbbcdd452013-07-25 10:06:34 +0100153SimpleEntryImpl::SimpleEntryImpl(const FilePath& path,
Ben Murdoch7dbb3d52013-07-17 14:55:54 +0100154 const uint64 entry_hash,
Ben Murdochbbcdd452013-07-25 10:06:34 +0100155 OperationsMode operations_mode,
156 SimpleBackendImpl* backend,
Ben Murdoch7dbb3d52013-07-17 14:55:54 +0100157 net::NetLog* net_log)
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100158 : backend_(backend->AsWeakPtr()),
Torne (Richard Coles)7d4cd472013-06-19 11:58:07 +0100159 worker_pool_(backend->worker_pool()),
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100160 path_(path),
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100161 entry_hash_(entry_hash),
Ben Murdochbbcdd452013-07-25 10:06:34 +0100162 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
Torne (Richard Coles)90dce4d2013-05-29 14:40:03 +0100163 last_used_(Time::Now()),
164 last_modified_(last_used_),
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100165 open_count_(0),
166 state_(STATE_UNINITIALIZED),
Ben Murdoch7dbb3d52013-07-17 14:55:54 +0100167 synchronous_entry_(NULL),
168 net_log_(net::BoundNetLog::Make(
Ben Murdoch558790d2013-07-30 15:19:42 +0100169 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)) {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100170 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
171 arrays_should_be_same_size);
172 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +0100173 arrays_should_be_same_size);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100174 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +0100175 arrays_should_be_same_size);
176 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_),
177 arrays_should_be_same_size);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100178 MakeUninitialized();
Ben Murdoch2385ea32013-08-06 11:01:04 +0100179 net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY,
180 CreateNetLogSimpleEntryConstructionCallback(this));
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100181}
182
183int SimpleEntryImpl::OpenEntry(Entry** out_entry,
184 const CompletionCallback& callback) {
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +0100185 DCHECK(backend_.get());
Ben Murdoch2385ea32013-08-06 11:01:04 +0100186
187 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL);
188
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100189 bool have_index = backend_->index()->initialized();
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100190 // This enumeration is used in histograms, add entries only at end.
191 enum OpenEntryIndexEnum {
192 INDEX_NOEXIST = 0,
193 INDEX_MISS = 1,
194 INDEX_HIT = 2,
195 INDEX_MAX = 3,
196 };
197 OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100198 if (have_index) {
Ben Murdocheb525c52013-07-10 11:40:50 +0100199 if (backend_->index()->Has(entry_hash_))
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100200 open_entry_index_enum = INDEX_HIT;
201 else
202 open_entry_index_enum = INDEX_MISS;
203 }
204 UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState",
205 open_entry_index_enum, INDEX_MAX);
206
207 // If entry is not known to the index, initiate fast failover to the network.
Ben Murdoch2385ea32013-08-06 11:01:04 +0100208 if (open_entry_index_enum == INDEX_MISS) {
209 net_log_.AddEventWithNetErrorCode(
210 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
211 net::ERR_FAILED);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100212 return net::ERR_FAILED;
Ben Murdoch2385ea32013-08-06 11:01:04 +0100213 }
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100214
Ben Murdochbb1529c2013-08-08 10:24:53 +0100215 pending_operations_.push(SimpleEntryOperation::OpenOperation(
216 this, have_index, callback, out_entry));
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100217 RunNextOperationIfNeeded();
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000218 return net::ERR_IO_PENDING;
219}
220
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100221int SimpleEntryImpl::CreateEntry(Entry** out_entry,
222 const CompletionCallback& callback) {
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +0100223 DCHECK(backend_.get());
Ben Murdocheb525c52013-07-10 11:40:50 +0100224 DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
Ben Murdoch2385ea32013-08-06 11:01:04 +0100225
226 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL);
227
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100228 bool have_index = backend_->index()->initialized();
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100229 int ret_value = net::ERR_FAILED;
Ben Murdochbbcdd452013-07-25 10:06:34 +0100230 if (use_optimistic_operations_ &&
231 state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
Ben Murdoch2385ea32013-08-06 11:01:04 +0100232 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
233
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100234 ReturnEntryToCaller(out_entry);
Ben Murdochbb1529c2013-08-08 10:24:53 +0100235 pending_operations_.push(SimpleEntryOperation::CreateOperation(
236 this, have_index, CompletionCallback(), static_cast<Entry**>(NULL)));
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100237 ret_value = net::OK;
238 } else {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100239 pending_operations_.push(SimpleEntryOperation::CreateOperation(
240 this, have_index, callback, out_entry));
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100241 ret_value = net::ERR_IO_PENDING;
242 }
243
244 // We insert the entry in the index before creating the entry files in the
245 // SimpleSynchronousEntry, because this way the worst scenario is when we
246 // have the entry in the index but we don't have the created files yet, this
247 // way we never leak files. CreationOperationComplete will remove the entry
248 // from the index if the creation fails.
Ben Murdochbbcdd452013-07-25 10:06:34 +0100249 backend_->index()->Insert(key_);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100250
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100251 RunNextOperationIfNeeded();
252 return ret_value;
253}
254
255int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
Ben Murdoch2385ea32013-08-06 11:01:04 +0100256 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_CALL);
257 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_BEGIN);
258
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100259 MarkAsDoomed();
260 scoped_ptr<int> result(new int());
261 Closure task = base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_,
262 entry_hash_, result.get());
263 Closure reply = base::Bind(&CallCompletionCallback,
264 callback, base::Passed(&result));
Torne (Richard Coles)7d4cd472013-06-19 11:58:07 +0100265 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000266 return net::ERR_IO_PENDING;
267}
268
Ben Murdoch2385ea32013-08-06 11:01:04 +0100269void SimpleEntryImpl::SetKey(const std::string& key) {
270 key_ = key;
271 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY,
272 net::NetLog::StringCallback("key", &key));
273}
274
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000275void SimpleEntryImpl::Doom() {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100276 DoomEntry(CompletionCallback());
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000277}
278
279void SimpleEntryImpl::Close() {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100280 DCHECK(io_thread_checker_.CalledOnValidThread());
281 DCHECK_LT(0, open_count_);
282
Ben Murdoch2385ea32013-08-06 11:01:04 +0100283 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_CALL);
284
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100285 if (--open_count_ > 0) {
286 DCHECK(!HasOneRef());
287 Release(); // Balanced in ReturnEntryToCaller().
288 return;
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000289 }
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100290
Ben Murdochbb1529c2013-08-08 10:24:53 +0100291 pending_operations_.push(SimpleEntryOperation::CloseOperation(this));
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100292 DCHECK(!HasOneRef());
293 Release(); // Balanced in ReturnEntryToCaller().
294 RunNextOperationIfNeeded();
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000295}
296
297std::string SimpleEntryImpl::GetKey() const {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100298 DCHECK(io_thread_checker_.CalledOnValidThread());
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000299 return key_;
300}
301
302Time SimpleEntryImpl::GetLastUsed() const {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100303 DCHECK(io_thread_checker_.CalledOnValidThread());
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000304 return last_used_;
305}
306
307Time SimpleEntryImpl::GetLastModified() const {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100308 DCHECK(io_thread_checker_.CalledOnValidThread());
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000309 return last_modified_;
310}
311
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100312int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
313 DCHECK(io_thread_checker_.CalledOnValidThread());
314 DCHECK_LE(0, data_size_[stream_index]);
315 return data_size_[stream_index];
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000316}
317
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100318int SimpleEntryImpl::ReadData(int stream_index,
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000319 int offset,
320 net::IOBuffer* buf,
321 int buf_len,
322 const CompletionCallback& callback) {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100323 DCHECK(io_thread_checker_.CalledOnValidThread());
Ben Murdoch2385ea32013-08-06 11:01:04 +0100324
325 if (net_log_.IsLoggingAllEvents()) {
326 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL,
327 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
328 false));
329 }
330
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100331 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount ||
332 buf_len < 0) {
Ben Murdoch2385ea32013-08-06 11:01:04 +0100333 if (net_log_.IsLoggingAllEvents()) {
334 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
335 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
336 }
337
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100338 RecordReadResult(READ_RESULT_INVALID_ARGUMENT);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100339 return net::ERR_INVALID_ARGUMENT;
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100340 }
Torne (Richard Coles)90dce4d2013-05-29 14:40:03 +0100341 if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) ||
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100342 offset < 0 || !buf_len)) {
Ben Murdoch2385ea32013-08-06 11:01:04 +0100343 if (net_log_.IsLoggingAllEvents()) {
344 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
345 CreateNetLogReadWriteCompleteCallback(0));
346 }
347
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100348 RecordReadResult(READ_RESULT_NONBLOCK_EMPTY_RETURN);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100349 return 0;
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100350 }
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100351
352 // TODO(felipeg): Optimization: Add support for truly parallel read
353 // operations.
Ben Murdochbb1529c2013-08-08 10:24:53 +0100354 bool alone_in_queue =
355 pending_operations_.size() == 0 && state_ == STATE_READY;
356 pending_operations_.push(SimpleEntryOperation::ReadOperation(
357 this, stream_index, offset, buf_len, buf, callback, alone_in_queue));
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100358 RunNextOperationIfNeeded();
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000359 return net::ERR_IO_PENDING;
360}
361
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100362int SimpleEntryImpl::WriteData(int stream_index,
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000363 int offset,
364 net::IOBuffer* buf,
365 int buf_len,
366 const CompletionCallback& callback,
367 bool truncate) {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100368 DCHECK(io_thread_checker_.CalledOnValidThread());
Ben Murdoch2385ea32013-08-06 11:01:04 +0100369
370 if (net_log_.IsLoggingAllEvents()) {
371 net_log_.AddEvent(
372 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL,
373 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
374 truncate));
375 }
376
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100377 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 ||
378 buf_len < 0) {
Ben Murdoch2385ea32013-08-06 11:01:04 +0100379 if (net_log_.IsLoggingAllEvents()) {
380 net_log_.AddEvent(
381 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
382 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
383 }
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100384 RecordWriteResult(WRITE_RESULT_INVALID_ARGUMENT);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100385 return net::ERR_INVALID_ARGUMENT;
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000386 }
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +0100387 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
Ben Murdoch2385ea32013-08-06 11:01:04 +0100388 if (net_log_.IsLoggingAllEvents()) {
389 net_log_.AddEvent(
390 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
391 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
392 }
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100393 RecordWriteResult(WRITE_RESULT_OVER_MAX_SIZE);
Torne (Richard Coles)b2df76e2013-05-13 16:52:09 +0100394 return net::ERR_FAILED;
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100395 }
Ben Murdochbbcdd452013-07-25 10:06:34 +0100396 ScopedOperationRunner operation_runner(this);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100397
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100398 // Currently, Simple Cache is only used for HTTP, which stores the headers in
399 // stream 0 and always writes them with a single, truncating write. Detect
400 // these writes and record the size and size changes of the headers. Also,
401 // note writes to stream 0 that violate those assumptions.
402 if (stream_index == 0) {
403 if (offset == 0 && truncate)
404 RecordHeaderSizeChange(data_size_[0], buf_len);
405 else
406 RecordUnexpectedStream0Write();
407 }
408
Ben Murdoch558790d2013-07-30 15:19:42 +0100409 // We can only do optimistic Write if there is no pending operations, so
410 // that we are sure that the next call to RunNextOperationIfNeeded will
411 // actually run the write operation that sets the stream size. It also
412 // prevents from previous possibly-conflicting writes that could be stacked
413 // in the |pending_operations_|. We could optimize this for when we have
414 // only read operations enqueued.
415 const bool optimistic =
416 (use_optimistic_operations_ && state_ == STATE_READY &&
417 pending_operations_.size() == 0);
418 CompletionCallback op_callback;
419 scoped_refptr<net::IOBuffer> op_buf;
420 int ret_value = net::ERR_FAILED;
421 if (!optimistic) {
422 op_buf = buf;
423 op_callback = callback;
424 ret_value = net::ERR_IO_PENDING;
425 } else {
426 // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
427 // here to avoid paying the price of the RefCountedThreadSafe atomic
428 // operations.
429 if (buf) {
430 op_buf = new IOBuffer(buf_len);
431 memcpy(op_buf->data(), buf->data(), buf_len);
432 }
433 op_callback = CompletionCallback();
434 ret_value = buf_len;
Ben Murdoch2385ea32013-08-06 11:01:04 +0100435 if (net_log_.IsLoggingAllEvents()) {
436 net_log_.AddEvent(
437 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC,
438 CreateNetLogReadWriteCompleteCallback(buf_len));
439 }
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100440 }
441
Ben Murdochbb1529c2013-08-08 10:24:53 +0100442 pending_operations_.push(SimpleEntryOperation::WriteOperation(this,
443 stream_index,
444 offset,
445 buf_len,
446 op_buf.get(),
447 truncate,
448 optimistic,
449 op_callback));
Ben Murdoch558790d2013-07-30 15:19:42 +0100450 return ret_value;
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000451}
452
453int SimpleEntryImpl::ReadSparseData(int64 offset,
454 net::IOBuffer* buf,
455 int buf_len,
456 const CompletionCallback& callback) {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100457 DCHECK(io_thread_checker_.CalledOnValidThread());
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000458 // TODO(gavinp): Determine if the simple backend should support sparse data.
459 NOTIMPLEMENTED();
460 return net::ERR_FAILED;
461}
462
463int SimpleEntryImpl::WriteSparseData(int64 offset,
464 net::IOBuffer* buf,
465 int buf_len,
466 const CompletionCallback& callback) {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100467 DCHECK(io_thread_checker_.CalledOnValidThread());
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000468 // TODO(gavinp): Determine if the simple backend should support sparse data.
469 NOTIMPLEMENTED();
470 return net::ERR_FAILED;
471}
472
473int SimpleEntryImpl::GetAvailableRange(int64 offset,
474 int len,
475 int64* start,
476 const CompletionCallback& callback) {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100477 DCHECK(io_thread_checker_.CalledOnValidThread());
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000478 // TODO(gavinp): Determine if the simple backend should support sparse data.
479 NOTIMPLEMENTED();
480 return net::ERR_FAILED;
481}
482
483bool SimpleEntryImpl::CouldBeSparse() const {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100484 DCHECK(io_thread_checker_.CalledOnValidThread());
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000485 // TODO(gavinp): Determine if the simple backend should support sparse data.
486 return false;
487}
488
489void SimpleEntryImpl::CancelSparseIO() {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100490 DCHECK(io_thread_checker_.CalledOnValidThread());
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000491 // TODO(gavinp): Determine if the simple backend should support sparse data.
492 NOTIMPLEMENTED();
493}
494
495int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100496 DCHECK(io_thread_checker_.CalledOnValidThread());
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000497 // TODO(gavinp): Determine if the simple backend should support sparse data.
498 NOTIMPLEMENTED();
499 return net::ERR_FAILED;
500}
501
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000502SimpleEntryImpl::~SimpleEntryImpl() {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100503 DCHECK(io_thread_checker_.CalledOnValidThread());
504 DCHECK_EQ(0U, pending_operations_.size());
Torne (Richard Coles)7d4cd472013-06-19 11:58:07 +0100505 DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_FAILURE);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100506 DCHECK(!synchronous_entry_);
507 RemoveSelfFromBackend();
Ben Murdoch2385ea32013-08-06 11:01:04 +0100508 net_log_.EndEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY);
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000509}
510
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100511void SimpleEntryImpl::MakeUninitialized() {
512 state_ = STATE_UNINITIALIZED;
513 std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
514 std::memset(crc32s_, 0, sizeof(crc32s_));
515 std::memset(have_written_, 0, sizeof(have_written_));
516 std::memset(data_size_, 0, sizeof(data_size_));
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100517 for (size_t i = 0; i < arraysize(crc_check_state_); ++i) {
518 crc_check_state_[i] = CRC_CHECK_NEVER_READ_AT_ALL;
519 }
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100520}
521
522void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
523 DCHECK(out_entry);
524 ++open_count_;
525 AddRef(); // Balanced in Close()
526 *out_entry = this;
527}
528
529void SimpleEntryImpl::RemoveSelfFromBackend() {
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +0100530 if (!backend_.get())
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100531 return;
532 backend_->OnDeactivated(this);
533 backend_.reset();
534}
535
536void SimpleEntryImpl::MarkAsDoomed() {
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +0100537 if (!backend_.get())
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100538 return;
539 backend_->index()->Remove(key_);
540 RemoveSelfFromBackend();
541}
542
543void SimpleEntryImpl::RunNextOperationIfNeeded() {
544 DCHECK(io_thread_checker_.CalledOnValidThread());
545 UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending",
546 pending_operations_.size(), 0, 100, 20);
547 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100548 scoped_ptr<SimpleEntryOperation> operation(
549 new SimpleEntryOperation(pending_operations_.front()));
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100550 pending_operations_.pop();
Ben Murdochbb1529c2013-08-08 10:24:53 +0100551 switch (operation->type()) {
552 case SimpleEntryOperation::TYPE_OPEN:
553 OpenEntryInternal(operation->have_index(),
554 operation->callback(),
555 operation->out_entry());
556 break;
557 case SimpleEntryOperation::TYPE_CREATE:
558 CreateEntryInternal(operation->have_index(),
559 operation->callback(),
560 operation->out_entry());
561 break;
562 case SimpleEntryOperation::TYPE_CLOSE:
563 CloseInternal();
564 break;
565 case SimpleEntryOperation::TYPE_READ:
566 RecordReadIsParallelizable(*operation);
567 ReadDataInternal(operation->index(),
568 operation->offset(),
569 operation->buf(),
570 operation->length(),
571 operation->callback());
572 break;
573 case SimpleEntryOperation::TYPE_WRITE:
574 RecordWriteDependencyType(*operation);
575 WriteDataInternal(operation->index(),
576 operation->offset(),
577 operation->buf(),
578 operation->length(),
579 operation->callback(),
580 operation->truncate());
581 break;
582 default:
583 NOTREACHED();
584 }
585 // The operation is kept for histograms. Makes sure it does not leak
586 // resources.
587 executing_operation_.swap(operation);
588 executing_operation_->ReleaseReferences();
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100589 // |this| may have been deleted.
590 }
591}
592
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100593void SimpleEntryImpl::OpenEntryInternal(bool have_index,
594 const CompletionCallback& callback,
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100595 Entry** out_entry) {
596 ScopedOperationRunner operation_runner(this);
Ben Murdoch2385ea32013-08-06 11:01:04 +0100597
598 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN);
599
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100600 if (state_ == STATE_READY) {
601 ReturnEntryToCaller(out_entry);
602 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback,
603 net::OK));
Ben Murdoch2385ea32013-08-06 11:01:04 +0100604 net_log_.AddEvent(
605 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
606 CreateNetLogSimpleEntryCreationCallback(this, net::OK));
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100607 return;
608 } else if (state_ == STATE_FAILURE) {
609 if (!callback.is_null()) {
610 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
611 callback, net::ERR_FAILED));
612 }
Ben Murdoch2385ea32013-08-06 11:01:04 +0100613 net_log_.AddEvent(
614 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
615 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000616 return;
617 }
Ben Murdoch2385ea32013-08-06 11:01:04 +0100618
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100619 DCHECK_EQ(STATE_UNINITIALIZED, state_);
Torne (Richard Coles)90dce4d2013-05-29 14:40:03 +0100620 DCHECK(!synchronous_entry_);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100621 state_ = STATE_IO_PENDING;
622 const base::TimeTicks start_time = base::TimeTicks::Now();
Ben Murdoch2385ea32013-08-06 11:01:04 +0100623 scoped_ptr<SimpleEntryCreationResults> results(
624 new SimpleEntryCreationResults(
625 SimpleEntryStat(last_used_, last_modified_, data_size_)));
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100626 Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry,
627 path_,
628 entry_hash_,
629 have_index,
Ben Murdoch2385ea32013-08-06 11:01:04 +0100630 results.get());
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100631 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
632 this,
633 callback,
634 start_time,
Ben Murdoch2385ea32013-08-06 11:01:04 +0100635 base::Passed(&results),
636 out_entry,
637 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END);
Torne (Richard Coles)7d4cd472013-06-19 11:58:07 +0100638 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000639}
640
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100641void SimpleEntryImpl::CreateEntryInternal(bool have_index,
642 const CompletionCallback& callback,
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100643 Entry** out_entry) {
644 ScopedOperationRunner operation_runner(this);
Ben Murdoch2385ea32013-08-06 11:01:04 +0100645
646 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_BEGIN);
647
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100648 if (state_ != STATE_UNINITIALIZED) {
649 // There is already an active normal entry.
Ben Murdoch2385ea32013-08-06 11:01:04 +0100650 net_log_.AddEvent(
651 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END,
652 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
653
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100654 if (!callback.is_null()) {
655 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
656 callback, net::ERR_FAILED));
657 }
658 return;
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +0000659 }
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100660 DCHECK_EQ(STATE_UNINITIALIZED, state_);
Torne (Richard Coles)90dce4d2013-05-29 14:40:03 +0100661 DCHECK(!synchronous_entry_);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100662
663 state_ = STATE_IO_PENDING;
664
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100665 // Since we don't know the correct values for |last_used_| and
666 // |last_modified_| yet, we make this approximation.
667 last_used_ = last_modified_ = base::Time::Now();
668
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100669 // If creation succeeds, we should mark all streams to be saved on close.
670 for (int i = 0; i < kSimpleEntryFileCount; ++i)
671 have_written_[i] = true;
672
673 const base::TimeTicks start_time = base::TimeTicks::Now();
Ben Murdoch2385ea32013-08-06 11:01:04 +0100674 scoped_ptr<SimpleEntryCreationResults> results(
675 new SimpleEntryCreationResults(
676 SimpleEntryStat(last_used_, last_modified_, data_size_)));
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100677 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
678 path_,
679 key_,
680 entry_hash_,
681 have_index,
Ben Murdoch2385ea32013-08-06 11:01:04 +0100682 results.get());
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100683 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
684 this,
685 callback,
686 start_time,
Ben Murdoch2385ea32013-08-06 11:01:04 +0100687 base::Passed(&results),
688 out_entry,
689 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END);
Torne (Richard Coles)7d4cd472013-06-19 11:58:07 +0100690 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100691}
692
693void SimpleEntryImpl::CloseInternal() {
694 DCHECK(io_thread_checker_.CalledOnValidThread());
695 typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
696 scoped_ptr<std::vector<CRCRecord> >
697 crc32s_to_write(new std::vector<CRCRecord>());
698
Ben Murdoch2385ea32013-08-06 11:01:04 +0100699 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
Ben Murdoch7dbb3d52013-07-17 14:55:54 +0100700
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100701 if (state_ == STATE_READY) {
702 DCHECK(synchronous_entry_);
703 state_ = STATE_IO_PENDING;
704 for (int i = 0; i < kSimpleEntryFileCount; ++i) {
705 if (have_written_[i]) {
Torne (Richard Coles)90dce4d2013-05-29 14:40:03 +0100706 if (GetDataSize(i) == crc32s_end_offset_[i]) {
707 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100708 crc32s_to_write->push_back(CRCRecord(i, true, crc));
709 } else {
710 crc32s_to_write->push_back(CRCRecord(i, false, 0));
711 }
712 }
713 }
714 } else {
Torne (Richard Coles)90dce4d2013-05-29 14:40:03 +0100715 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100716 }
717
718 if (synchronous_entry_) {
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100719 Closure task =
720 base::Bind(&SimpleSynchronousEntry::Close,
721 base::Unretained(synchronous_entry_),
722 SimpleEntryStat(last_used_, last_modified_, data_size_),
723 base::Passed(&crc32s_to_write));
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100724 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
725 synchronous_entry_ = NULL;
Torne (Richard Coles)7d4cd472013-06-19 11:58:07 +0100726 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +0100727
728 for (int i = 0; i < kSimpleEntryFileCount; ++i) {
729 if (!have_written_[i]) {
730 UMA_HISTOGRAM_ENUMERATION("SimpleCache.CheckCRCResult",
731 crc_check_state_[i], CRC_CHECK_MAX);
732 }
733 }
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100734 } else {
735 synchronous_entry_ = NULL;
736 CloseOperationComplete();
737 }
738}
739
740void SimpleEntryImpl::ReadDataInternal(int stream_index,
741 int offset,
742 net::IOBuffer* buf,
743 int buf_len,
744 const CompletionCallback& callback) {
745 DCHECK(io_thread_checker_.CalledOnValidThread());
746 ScopedOperationRunner operation_runner(this);
747
Ben Murdoch2385ea32013-08-06 11:01:04 +0100748 if (net_log_.IsLoggingAllEvents()) {
749 net_log_.AddEvent(
750 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN,
751 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
752 false));
753 }
754
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100755 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
756 if (!callback.is_null()) {
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100757 RecordReadResult(READ_RESULT_BAD_STATE);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100758 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
759 callback, net::ERR_FAILED));
760 }
Ben Murdoch2385ea32013-08-06 11:01:04 +0100761 if (net_log_.IsLoggingAllEvents()) {
762 net_log_.AddEvent(
763 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
764 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
765 }
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100766 return;
767 }
768 DCHECK_EQ(STATE_READY, state_);
Torne (Richard Coles)90dce4d2013-05-29 14:40:03 +0100769 if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100770 RecordReadResult(READ_RESULT_FAST_EMPTY_RETURN);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100771 // If there is nothing to read, we bail out before setting state_ to
772 // STATE_IO_PENDING.
773 if (!callback.is_null())
774 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
775 callback, 0));
776 return;
777 }
Ben Murdoch7dbb3d52013-07-17 14:55:54 +0100778
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100779 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100780
781 state_ = STATE_IO_PENDING;
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +0100782 if (backend_.get())
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100783 backend_->index()->UseIfExists(key_);
784
785 scoped_ptr<uint32> read_crc32(new uint32());
786 scoped_ptr<int> result(new int());
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100787 scoped_ptr<base::Time> last_used(new base::Time());
788 Closure task = base::Bind(
789 &SimpleSynchronousEntry::ReadData,
790 base::Unretained(synchronous_entry_),
791 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len),
792 make_scoped_refptr(buf),
793 read_crc32.get(),
794 last_used.get(),
795 result.get());
796 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete,
797 this,
798 stream_index,
799 offset,
800 callback,
801 base::Passed(&read_crc32),
802 base::Passed(&last_used),
803 base::Passed(&result));
Torne (Richard Coles)7d4cd472013-06-19 11:58:07 +0100804 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100805}
806
807void SimpleEntryImpl::WriteDataInternal(int stream_index,
808 int offset,
809 net::IOBuffer* buf,
810 int buf_len,
811 const CompletionCallback& callback,
812 bool truncate) {
813 DCHECK(io_thread_checker_.CalledOnValidThread());
814 ScopedOperationRunner operation_runner(this);
Ben Murdoch2385ea32013-08-06 11:01:04 +0100815
816 if (net_log_.IsLoggingAllEvents()) {
817 net_log_.AddEvent(
818 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN,
819 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
820 truncate));
821 }
822
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100823 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100824 RecordWriteResult(WRITE_RESULT_BAD_STATE);
Ben Murdoch2385ea32013-08-06 11:01:04 +0100825 if (net_log_.IsLoggingAllEvents()) {
826 net_log_.AddEvent(
827 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
828 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
829 }
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100830 if (!callback.is_null()) {
831 // We need to posttask so that we don't go in a loop when we call the
832 // callback directly.
833 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
834 callback, net::ERR_FAILED));
835 }
836 // |this| may be destroyed after return here.
837 return;
838 }
Ben Murdoch7dbb3d52013-07-17 14:55:54 +0100839
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100840 DCHECK_EQ(STATE_READY, state_);
841 state_ = STATE_IO_PENDING;
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +0100842 if (backend_.get())
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100843 backend_->index()->UseIfExists(key_);
844 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
845 // if |offset == 0| or we have already computed the CRC for [0 .. offset).
846 // We rely on most write operations being sequential, start to end to compute
847 // the crc of the data. When we write to an entry and close without having
848 // done a sequential write, we don't check the CRC on read.
849 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
850 uint32 initial_crc = (offset != 0) ? crc32s_[stream_index]
851 : crc32(0, Z_NULL, 0);
852 if (buf_len > 0) {
853 crc32s_[stream_index] = crc32(initial_crc,
854 reinterpret_cast<const Bytef*>(buf->data()),
855 buf_len);
856 }
857 crc32s_end_offset_[stream_index] = offset + buf_len;
858 }
859
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100860 // |entry_stat| needs to be initialized before modifying |data_size_|.
861 scoped_ptr<SimpleEntryStat> entry_stat(
862 new SimpleEntryStat(last_used_, last_modified_, data_size_));
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100863 if (truncate) {
864 data_size_[stream_index] = offset + buf_len;
865 } else {
866 data_size_[stream_index] = std::max(offset + buf_len,
Torne (Richard Coles)90dce4d2013-05-29 14:40:03 +0100867 GetDataSize(stream_index));
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +0100868 }
869
870 // Since we don't know the correct values for |last_used_| and
871 // |last_modified_| yet, we make this approximation.
872 last_used_ = last_modified_ = base::Time::Now();
873
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100874 have_written_[stream_index] = true;
875
876 scoped_ptr<int> result(new int());
877 Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
878 base::Unretained(synchronous_entry_),
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100879 SimpleSynchronousEntry::EntryOperationData(
880 stream_index, offset, buf_len, truncate),
881 make_scoped_refptr(buf),
882 entry_stat.get(),
883 result.get());
884 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete,
885 this,
886 stream_index,
887 callback,
888 base::Passed(&entry_stat),
889 base::Passed(&result));
Torne (Richard Coles)7d4cd472013-06-19 11:58:07 +0100890 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100891}
892
893void SimpleEntryImpl::CreationOperationComplete(
894 const CompletionCallback& completion_callback,
895 const base::TimeTicks& start_time,
Ben Murdoch2385ea32013-08-06 11:01:04 +0100896 scoped_ptr<SimpleEntryCreationResults> in_results,
897 Entry** out_entry,
898 net::NetLog::EventType end_event_type) {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100899 DCHECK(io_thread_checker_.CalledOnValidThread());
900 DCHECK_EQ(state_, STATE_IO_PENDING);
Ben Murdoch2385ea32013-08-06 11:01:04 +0100901 DCHECK(in_results);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100902 ScopedOperationRunner operation_runner(this);
903 UMA_HISTOGRAM_BOOLEAN(
Ben Murdoch2385ea32013-08-06 11:01:04 +0100904 "SimpleCache.EntryCreationResult", in_results->result == net::OK);
905 if (in_results->result != net::OK) {
906 if (in_results->result != net::ERR_FILE_EXISTS)
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100907 MarkAsDoomed();
Ben Murdoch2385ea32013-08-06 11:01:04 +0100908
909 net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED);
910
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100911 if (!completion_callback.is_null()) {
912 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
913 completion_callback, net::ERR_FAILED));
914 }
915 MakeUninitialized();
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100916 return;
917 }
918 // If out_entry is NULL, it means we already called ReturnEntryToCaller from
919 // the optimistic Create case.
920 if (out_entry)
921 ReturnEntryToCaller(out_entry);
922
923 state_ = STATE_READY;
Ben Murdoch2385ea32013-08-06 11:01:04 +0100924 synchronous_entry_ = in_results->sync_entry;
Ben Murdocheb525c52013-07-10 11:40:50 +0100925 if (key_.empty()) {
Ben Murdoch2385ea32013-08-06 11:01:04 +0100926 SetKey(synchronous_entry_->key());
Ben Murdocheb525c52013-07-10 11:40:50 +0100927 } else {
928 // This should only be triggered when creating an entry. The key check in
929 // the open case is handled in SimpleBackendImpl.
930 DCHECK_EQ(key_, synchronous_entry_->key());
931 }
Ben Murdoch2385ea32013-08-06 11:01:04 +0100932 UpdateDataFromEntryStat(in_results->entry_stat);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100933 UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime",
934 (base::TimeTicks::Now() - start_time));
Ben Murdoch558790d2013-07-30 15:19:42 +0100935 AdjustOpenEntryCountBy(1);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100936
Ben Murdoch2385ea32013-08-06 11:01:04 +0100937 net_log_.AddEvent(end_event_type);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100938 if (!completion_callback.is_null()) {
939 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
940 completion_callback, net::OK));
941 }
942}
943
944void SimpleEntryImpl::EntryOperationComplete(
945 int stream_index,
946 const CompletionCallback& completion_callback,
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100947 const SimpleEntryStat& entry_stat,
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100948 scoped_ptr<int> result) {
949 DCHECK(io_thread_checker_.CalledOnValidThread());
950 DCHECK(synchronous_entry_);
951 DCHECK_EQ(STATE_IO_PENDING, state_);
952 DCHECK(result);
953 state_ = STATE_READY;
954 if (*result < 0) {
955 MarkAsDoomed();
956 state_ = STATE_FAILURE;
957 crc32s_end_offset_[stream_index] = 0;
958 } else {
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100959 UpdateDataFromEntryStat(entry_stat);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100960 }
961
962 if (!completion_callback.is_null()) {
963 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
964 completion_callback, *result));
965 }
966 RunNextOperationIfNeeded();
967}
968
969void SimpleEntryImpl::ReadOperationComplete(
970 int stream_index,
971 int offset,
972 const CompletionCallback& completion_callback,
973 scoped_ptr<uint32> read_crc32,
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100974 scoped_ptr<base::Time> last_used,
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100975 scoped_ptr<int> result) {
976 DCHECK(io_thread_checker_.CalledOnValidThread());
977 DCHECK(synchronous_entry_);
978 DCHECK_EQ(STATE_IO_PENDING, state_);
979 DCHECK(read_crc32);
980 DCHECK(result);
981
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100982 if (*result > 0 &&
983 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) {
984 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END;
985 }
986
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100987 if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
988 uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0)
989 : crc32s_[stream_index];
990 crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
991 crc32s_end_offset_[stream_index] += *result;
992 if (!have_written_[stream_index] &&
Torne (Richard Coles)90dce4d2013-05-29 14:40:03 +0100993 GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +0100994 // We have just read a file from start to finish, and so we have
995 // computed a crc of the entire file. We can check it now. If a cache
996 // entry has a single reader, the normal pattern is to read from start
997 // to finish.
998
999 // Other cases are possible. In the case of two readers on the same
1000 // entry, one reader can be behind the other. In this case we compute
1001 // the crc as the most advanced reader progresses, and check it for
1002 // both readers as they read the last byte.
1003
Ben Murdoch2385ea32013-08-06 11:01:04 +01001004 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN);
1005
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +01001006 scoped_ptr<int> new_result(new int());
1007 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
1008 base::Unretained(synchronous_entry_),
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +01001009 stream_index,
1010 data_size_[stream_index],
1011 crc32s_[stream_index],
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +01001012 new_result.get());
1013 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
1014 this, *result, stream_index,
1015 completion_callback,
1016 base::Passed(&new_result));
Torne (Richard Coles)7d4cd472013-06-19 11:58:07 +01001017 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +01001018 crc_check_state_[stream_index] = CRC_CHECK_DONE;
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +01001019 return;
1020 }
1021 }
Ben Murdoch7dbb3d52013-07-17 14:55:54 +01001022
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +01001023 if (*result < 0) {
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +01001024 RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +01001025 } else {
1026 RecordReadResult(READ_RESULT_SUCCESS);
1027 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
1028 offset + *result == GetDataSize(stream_index)) {
1029 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
1030 }
1031 }
Ben Murdoch2385ea32013-08-06 11:01:04 +01001032 if (net_log_.IsLoggingAllEvents()) {
1033 net_log_.AddEvent(
1034 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1035 CreateNetLogReadWriteCompleteCallback(*result));
1036 }
1037
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +01001038 EntryOperationComplete(
1039 stream_index,
1040 completion_callback,
1041 SimpleEntryStat(*last_used, last_modified_, data_size_),
1042 result.Pass());
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +01001043}
1044
1045void SimpleEntryImpl::WriteOperationComplete(
1046 int stream_index,
1047 const CompletionCallback& completion_callback,
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +01001048 scoped_ptr<SimpleEntryStat> entry_stat,
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +01001049 scoped_ptr<int> result) {
1050 if (*result >= 0)
1051 RecordWriteResult(WRITE_RESULT_SUCCESS);
1052 else
1053 RecordWriteResult(WRITE_RESULT_SYNC_WRITE_FAILURE);
Ben Murdoch2385ea32013-08-06 11:01:04 +01001054 if (net_log_.IsLoggingAllEvents()) {
1055 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
1056 CreateNetLogReadWriteCompleteCallback(*result));
1057 }
1058
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +01001059 EntryOperationComplete(
1060 stream_index, completion_callback, *entry_stat, result.Pass());
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +01001061}
1062
1063void SimpleEntryImpl::ChecksumOperationComplete(
1064 int orig_result,
1065 int stream_index,
1066 const CompletionCallback& completion_callback,
1067 scoped_ptr<int> result) {
1068 DCHECK(io_thread_checker_.CalledOnValidThread());
1069 DCHECK(synchronous_entry_);
1070 DCHECK_EQ(STATE_IO_PENDING, state_);
1071 DCHECK(result);
Ben Murdoch7dbb3d52013-07-17 14:55:54 +01001072
1073 if (net_log_.IsLoggingAllEvents()) {
Ben Murdoch2385ea32013-08-06 11:01:04 +01001074 net_log_.AddEventWithNetErrorCode(
1075 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END,
1076 *result);
Ben Murdoch7dbb3d52013-07-17 14:55:54 +01001077 }
1078
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +01001079 if (*result == net::OK) {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +01001080 *result = orig_result;
Torne (Richard Coles)a93a17c2013-05-15 11:34:50 +01001081 if (orig_result >= 0)
1082 RecordReadResult(READ_RESULT_SUCCESS);
1083 else
1084 RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
1085 } else {
1086 RecordReadResult(READ_RESULT_SYNC_CHECKSUM_FAILURE);
1087 }
Ben Murdoch2385ea32013-08-06 11:01:04 +01001088 if (net_log_.IsLoggingAllEvents()) {
1089 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1090 CreateNetLogReadWriteCompleteCallback(*result));
1091 }
1092
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +01001093 EntryOperationComplete(
1094 stream_index,
1095 completion_callback,
1096 SimpleEntryStat(last_used_, last_modified_, data_size_),
1097 result.Pass());
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +01001098}
1099
1100void SimpleEntryImpl::CloseOperationComplete() {
1101 DCHECK(!synchronous_entry_);
1102 DCHECK_EQ(0, open_count_);
Torne (Richard Coles)90dce4d2013-05-29 14:40:03 +01001103 DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
1104 STATE_UNINITIALIZED == state_);
Ben Murdoch2385ea32013-08-06 11:01:04 +01001105 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END);
Ben Murdoch558790d2013-07-30 15:19:42 +01001106 AdjustOpenEntryCountBy(-1);
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +01001107 MakeUninitialized();
1108 RunNextOperationIfNeeded();
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +00001109}
1110
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +01001111void SimpleEntryImpl::UpdateDataFromEntryStat(
1112 const SimpleEntryStat& entry_stat) {
Torne (Richard Coles)c2e0dbd2013-05-09 18:35:53 +01001113 DCHECK(io_thread_checker_.CalledOnValidThread());
1114 DCHECK(synchronous_entry_);
1115 DCHECK_EQ(STATE_READY, state_);
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +01001116
1117 last_used_ = entry_stat.last_used;
1118 last_modified_ = entry_stat.last_modified;
1119 for (int i = 0; i < kSimpleEntryFileCount; ++i) {
1120 data_size_[i] = entry_stat.data_size[i];
1121 }
Torne (Richard Coles)868fa2f2013-06-11 10:57:03 +01001122 if (backend_.get())
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +01001123 backend_->index()->UpdateEntrySize(key_, GetDiskUsage());
1124}
1125
1126int64 SimpleEntryImpl::GetDiskUsage() const {
1127 int64 file_size = 0;
1128 for (int i = 0; i < kSimpleEntryFileCount; ++i) {
1129 file_size +=
1130 simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]);
1131 }
1132 return file_size;
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +00001133}
1134
Ben Murdochbb1529c2013-08-08 10:24:53 +01001135void SimpleEntryImpl::RecordReadIsParallelizable(
1136 const SimpleEntryOperation& operation) const {
1137 if (!executing_operation_)
1138 return;
1139 // TODO(clamy): The values of this histogram should be changed to something
1140 // more useful.
1141 bool parallelizable_read =
1142 !operation.alone_in_queue() &&
1143 executing_operation_->type() == SimpleEntryOperation::TYPE_READ;
1144 UMA_HISTOGRAM_BOOLEAN("SimpleCache.ReadIsParallelizable",
1145 parallelizable_read);
1146}
1147
1148void SimpleEntryImpl::RecordWriteDependencyType(
1149 const SimpleEntryOperation& operation) const {
1150 if (!executing_operation_)
1151 return;
1152 // Used in histograms, please only add entries at the end.
1153 enum WriteDependencyType {
1154 WRITE_OPTIMISTIC = 0,
1155 WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC = 1,
1156 WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC = 2,
1157 WRITE_FOLLOWS_CONFLICTING_WRITE = 3,
1158 WRITE_FOLLOWS_NON_CONFLICTING_WRITE = 4,
1159 WRITE_FOLLOWS_CONFLICTING_READ = 5,
1160 WRITE_FOLLOWS_NON_CONFLICTING_READ = 6,
1161 WRITE_FOLLOWS_OTHER = 7,
1162 WRITE_DEPENDENCY_TYPE_MAX = 8,
1163 };
1164
1165 WriteDependencyType type = WRITE_FOLLOWS_OTHER;
1166 if (operation.optimistic()) {
1167 type = WRITE_OPTIMISTIC;
1168 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ ||
1169 executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1170 bool conflicting = executing_operation_->ConflictsWith(operation);
1171
1172 if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1173 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_READ
1174 : WRITE_FOLLOWS_NON_CONFLICTING_READ;
1175 } else if (executing_operation_->optimistic()) {
1176 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
1177 : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC;
1178 } else {
1179 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE
1180 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE;
1181 }
1182 }
1183 UMA_HISTOGRAM_ENUMERATION(
1184 "SimpleCache.WriteDependencyType", type, WRITE_DEPENDENCY_TYPE_MAX);
1185}
1186
Torne (Richard Coles)2a99a7e2013-03-28 15:31:22 +00001187} // namespace disk_cache