blob: 748f9553c2cb8a0dabeefcb677049463458e61f3 [file] [log] [blame]
David Howells952efe72009-04-03 16:42:39 +01001/* FS-Cache worker operation management routines
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * See Documentation/filesystems/caching/operations.txt
12 */
13
14#define FSCACHE_DEBUG_LEVEL OPERATION
15#include <linux/module.h>
David Howells440f0af2009-11-19 18:11:01 +000016#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
David Howells952efe72009-04-03 16:42:39 +010018#include "internal.h"
19
20atomic_t fscache_op_debug_id;
21EXPORT_SYMBOL(fscache_op_debug_id);
22
23/**
24 * fscache_enqueue_operation - Enqueue an operation for processing
25 * @op: The operation to enqueue
26 *
27 * Enqueue an operation for processing by the FS-Cache thread pool.
28 *
29 * This will get its own ref on the object.
30 */
31void fscache_enqueue_operation(struct fscache_operation *op)
32{
33 _enter("{OBJ%x OP%x,%u}",
34 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
35
David Howells5753c442009-11-19 18:11:19 +000036 ASSERT(list_empty(&op->pend_link));
David Howells952efe72009-04-03 16:42:39 +010037 ASSERT(op->processor != NULL);
38 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39 ASSERTCMP(atomic_read(&op->usage), >, 0);
David Howells9f105232012-12-20 21:52:35 +000040 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
David Howells952efe72009-04-03 16:42:39 +010041
David Howells5753c442009-11-19 18:11:19 +000042 fscache_stat(&fscache_n_op_enqueue);
43 switch (op->flags & FSCACHE_OP_TYPE) {
Tejun Heo8af7c122010-07-20 22:09:01 +020044 case FSCACHE_OP_ASYNC:
45 _debug("queue async");
David Howells5753c442009-11-19 18:11:19 +000046 atomic_inc(&op->usage);
Tejun Heo8af7c122010-07-20 22:09:01 +020047 if (!queue_work(fscache_op_wq, &op->work))
David Howells5753c442009-11-19 18:11:19 +000048 fscache_put_operation(op);
49 break;
David Howells5753c442009-11-19 18:11:19 +000050 case FSCACHE_OP_MYTHREAD:
51 _debug("queue for caller's attention");
52 break;
53 default:
54 printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
55 op->flags);
56 BUG();
57 break;
David Howells952efe72009-04-03 16:42:39 +010058 }
59}
60EXPORT_SYMBOL(fscache_enqueue_operation);
61
62/*
63 * start an op running
64 */
65static void fscache_run_op(struct fscache_object *object,
66 struct fscache_operation *op)
67{
David Howells9f105232012-12-20 21:52:35 +000068 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
69
70 op->state = FSCACHE_OP_ST_IN_PROGRESS;
David Howells952efe72009-04-03 16:42:39 +010071 object->n_in_progress++;
72 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
73 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
74 if (op->processor)
75 fscache_enqueue_operation(op);
76 fscache_stat(&fscache_n_op_run);
77}
78
79/*
80 * submit an exclusive operation for an object
81 * - other ops are excluded from running simultaneously with this one
82 * - this gets any extra refs it needs on an op
83 */
84int fscache_submit_exclusive_op(struct fscache_object *object,
85 struct fscache_operation *op)
86{
David Howells952efe72009-04-03 16:42:39 +010087 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
88
David Howells9f105232012-12-20 21:52:35 +000089 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
90 ASSERTCMP(atomic_read(&op->usage), >, 0);
91
David Howells952efe72009-04-03 16:42:39 +010092 spin_lock(&object->lock);
93 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
94 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
David Howells5753c442009-11-19 18:11:19 +000095 ASSERT(list_empty(&op->pend_link));
David Howells952efe72009-04-03 16:42:39 +010096
David Howells9f105232012-12-20 21:52:35 +000097 op->state = FSCACHE_OP_ST_PENDING;
David Howells952efe72009-04-03 16:42:39 +010098 if (fscache_object_is_active(object)) {
99 op->object = object;
100 object->n_ops++;
101 object->n_exclusive++; /* reads and writes must wait */
102
David Howells9f105232012-12-20 21:52:35 +0000103 if (object->n_in_progress > 0) {
David Howells952efe72009-04-03 16:42:39 +0100104 atomic_inc(&op->usage);
105 list_add_tail(&op->pend_link, &object->pending_ops);
106 fscache_stat(&fscache_n_op_pend);
107 } else if (!list_empty(&object->pending_ops)) {
108 atomic_inc(&op->usage);
109 list_add_tail(&op->pend_link, &object->pending_ops);
110 fscache_stat(&fscache_n_op_pend);
111 fscache_start_operations(object);
112 } else {
113 ASSERTCMP(object->n_in_progress, ==, 0);
114 fscache_run_op(object, op);
115 }
116
117 /* need to issue a new write op after this */
118 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
David Howells952efe72009-04-03 16:42:39 +0100119 } else if (object->state == FSCACHE_OBJECT_CREATING) {
120 op->object = object;
121 object->n_ops++;
122 object->n_exclusive++; /* reads and writes must wait */
123 atomic_inc(&op->usage);
124 list_add_tail(&op->pend_link, &object->pending_ops);
125 fscache_stat(&fscache_n_op_pend);
David Howells952efe72009-04-03 16:42:39 +0100126 } else {
127 /* not allowed to submit ops in any other state */
128 BUG();
129 }
130
131 spin_unlock(&object->lock);
David Howells9f105232012-12-20 21:52:35 +0000132 return 0;
David Howells952efe72009-04-03 16:42:39 +0100133}
134
135/*
136 * report an unexpected submission
137 */
138static void fscache_report_unexpected_submission(struct fscache_object *object,
139 struct fscache_operation *op,
140 unsigned long ostate)
141{
142 static bool once_only;
143 struct fscache_operation *p;
144 unsigned n;
145
146 if (once_only)
147 return;
148 once_only = true;
149
150 kdebug("unexpected submission OP%x [OBJ%x %s]",
151 op->debug_id, object->debug_id,
152 fscache_object_states[object->state]);
153 kdebug("objstate=%s [%s]",
154 fscache_object_states[object->state],
155 fscache_object_states[ostate]);
156 kdebug("objflags=%lx", object->flags);
157 kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
158 kdebug("ops=%u inp=%u exc=%u",
159 object->n_ops, object->n_in_progress, object->n_exclusive);
160
161 if (!list_empty(&object->pending_ops)) {
162 n = 0;
163 list_for_each_entry(p, &object->pending_ops, pend_link) {
164 ASSERTCMP(p->object, ==, object);
165 kdebug("%p %p", op->processor, op->release);
166 n++;
167 }
168
169 kdebug("n=%u", n);
170 }
171
172 dump_stack();
173}
174
175/*
176 * submit an operation for an object
177 * - objects may be submitted only in the following states:
178 * - during object creation (write ops may be submitted)
179 * - whilst the object is active
180 * - after an I/O error incurred in one of the two above states (op rejected)
181 * - this gets any extra refs it needs on an op
182 */
183int fscache_submit_op(struct fscache_object *object,
184 struct fscache_operation *op)
185{
186 unsigned long ostate;
187 int ret;
188
189 _enter("{OBJ%x OP%x},{%u}",
190 object->debug_id, op->debug_id, atomic_read(&op->usage));
191
David Howells9f105232012-12-20 21:52:35 +0000192 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
David Howells952efe72009-04-03 16:42:39 +0100193 ASSERTCMP(atomic_read(&op->usage), >, 0);
194
195 spin_lock(&object->lock);
196 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
197 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
David Howells5753c442009-11-19 18:11:19 +0000198 ASSERT(list_empty(&op->pend_link));
David Howells952efe72009-04-03 16:42:39 +0100199
200 ostate = object->state;
201 smp_rmb();
202
David Howells9f105232012-12-20 21:52:35 +0000203 op->state = FSCACHE_OP_ST_PENDING;
David Howells952efe72009-04-03 16:42:39 +0100204 if (fscache_object_is_active(object)) {
205 op->object = object;
206 object->n_ops++;
207
208 if (object->n_exclusive > 0) {
209 atomic_inc(&op->usage);
210 list_add_tail(&op->pend_link, &object->pending_ops);
211 fscache_stat(&fscache_n_op_pend);
212 } else if (!list_empty(&object->pending_ops)) {
213 atomic_inc(&op->usage);
214 list_add_tail(&op->pend_link, &object->pending_ops);
215 fscache_stat(&fscache_n_op_pend);
216 fscache_start_operations(object);
217 } else {
218 ASSERTCMP(object->n_exclusive, ==, 0);
219 fscache_run_op(object, op);
220 }
221 ret = 0;
222 } else if (object->state == FSCACHE_OBJECT_CREATING) {
223 op->object = object;
224 object->n_ops++;
225 atomic_inc(&op->usage);
226 list_add_tail(&op->pend_link, &object->pending_ops);
227 fscache_stat(&fscache_n_op_pend);
228 ret = 0;
David Howellse3d4d282009-11-19 18:11:32 +0000229 } else if (object->state == FSCACHE_OBJECT_DYING ||
230 object->state == FSCACHE_OBJECT_LC_DYING ||
231 object->state == FSCACHE_OBJECT_WITHDRAWING) {
232 fscache_stat(&fscache_n_op_rejected);
David Howells9f105232012-12-20 21:52:35 +0000233 op->state = FSCACHE_OP_ST_CANCELLED;
David Howellse3d4d282009-11-19 18:11:32 +0000234 ret = -ENOBUFS;
David Howells952efe72009-04-03 16:42:39 +0100235 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
236 fscache_report_unexpected_submission(object, op, ostate);
237 ASSERT(!fscache_object_is_active(object));
David Howells9f105232012-12-20 21:52:35 +0000238 op->state = FSCACHE_OP_ST_CANCELLED;
David Howells952efe72009-04-03 16:42:39 +0100239 ret = -ENOBUFS;
240 } else {
David Howells9f105232012-12-20 21:52:35 +0000241 op->state = FSCACHE_OP_ST_CANCELLED;
David Howells952efe72009-04-03 16:42:39 +0100242 ret = -ENOBUFS;
243 }
244
245 spin_unlock(&object->lock);
246 return ret;
247}
248
249/*
250 * queue an object for withdrawal on error, aborting all following asynchronous
251 * operations
252 */
253void fscache_abort_object(struct fscache_object *object)
254{
255 _enter("{OBJ%x}", object->debug_id);
256
257 fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
258}
259
260/*
261 * jump start the operation processing on an object
262 * - caller must hold object->lock
263 */
264void fscache_start_operations(struct fscache_object *object)
265{
266 struct fscache_operation *op;
267 bool stop = false;
268
269 while (!list_empty(&object->pending_ops) && !stop) {
270 op = list_entry(object->pending_ops.next,
271 struct fscache_operation, pend_link);
272
273 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
274 if (object->n_in_progress > 0)
275 break;
276 stop = true;
277 }
278 list_del_init(&op->pend_link);
David Howells5753c442009-11-19 18:11:19 +0000279 fscache_run_op(object, op);
David Howells952efe72009-04-03 16:42:39 +0100280
281 /* the pending queue was holding a ref on the object */
282 fscache_put_operation(op);
283 }
284
285 ASSERTCMP(object->n_in_progress, <=, object->n_ops);
286
287 _debug("woke %d ops on OBJ%x",
288 object->n_in_progress, object->debug_id);
289}
290
291/*
David Howells5753c442009-11-19 18:11:19 +0000292 * cancel an operation that's pending on an object
293 */
294int fscache_cancel_op(struct fscache_operation *op)
295{
296 struct fscache_object *object = op->object;
297 int ret;
298
299 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
300
David Howells9f105232012-12-20 21:52:35 +0000301 ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
302 ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
303 ASSERTCMP(atomic_read(&op->usage), >, 0);
304
David Howells5753c442009-11-19 18:11:19 +0000305 spin_lock(&object->lock);
306
307 ret = -EBUSY;
David Howells9f105232012-12-20 21:52:35 +0000308 if (op->state == FSCACHE_OP_ST_PENDING) {
309 ASSERT(!list_empty(&op->pend_link));
David Howells5753c442009-11-19 18:11:19 +0000310 fscache_stat(&fscache_n_op_cancelled);
311 list_del_init(&op->pend_link);
David Howells9f105232012-12-20 21:52:35 +0000312 op->state = FSCACHE_OP_ST_CANCELLED;
David Howells5753c442009-11-19 18:11:19 +0000313 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
314 object->n_exclusive--;
315 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
316 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
317 fscache_put_operation(op);
318 ret = 0;
319 }
320
321 spin_unlock(&object->lock);
322 _leave(" = %d", ret);
323 return ret;
324}
325
326/*
David Howells9f105232012-12-20 21:52:35 +0000327 * Record the completion of an in-progress operation.
328 */
329void fscache_op_complete(struct fscache_operation *op)
330{
331 struct fscache_object *object = op->object;
332
333 _enter("OBJ%x", object->debug_id);
334
335 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
336 ASSERTCMP(object->n_in_progress, >, 0);
337 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
338 object->n_exclusive, >, 0);
339 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
340 object->n_in_progress, ==, 1);
341
342 spin_lock(&object->lock);
343
344 op->state = FSCACHE_OP_ST_COMPLETE;
345
346 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
347 object->n_exclusive--;
348 object->n_in_progress--;
349 if (object->n_in_progress == 0)
350 fscache_start_operations(object);
351
352 spin_unlock(&object->lock);
353 _leave("");
354}
355EXPORT_SYMBOL(fscache_op_complete);
356
357/*
David Howells952efe72009-04-03 16:42:39 +0100358 * release an operation
359 * - queues pending ops if this is the last in-progress op
360 */
361void fscache_put_operation(struct fscache_operation *op)
362{
363 struct fscache_object *object;
364 struct fscache_cache *cache;
365
366 _enter("{OBJ%x OP%x,%d}",
367 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
368
369 ASSERTCMP(atomic_read(&op->usage), >, 0);
370
371 if (!atomic_dec_and_test(&op->usage))
372 return;
373
374 _debug("PUT OP");
David Howells9f105232012-12-20 21:52:35 +0000375 ASSERTIFCMP(op->state != FSCACHE_OP_ST_COMPLETE,
376 op->state, ==, FSCACHE_OP_ST_CANCELLED);
377 op->state = FSCACHE_OP_ST_DEAD;
David Howells952efe72009-04-03 16:42:39 +0100378
379 fscache_stat(&fscache_n_op_release);
380
381 if (op->release) {
382 op->release(op);
383 op->release = NULL;
384 }
385
386 object = op->object;
387
David Howellsef46ed82012-12-20 21:52:35 +0000388 if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) {
389 if (atomic_dec_and_test(&object->n_reads)) {
390 clear_bit(FSCACHE_COOKIE_WAITING_ON_READS,
391 &object->cookie->flags);
392 wake_up_bit(&object->cookie->flags,
393 FSCACHE_COOKIE_WAITING_ON_READS);
394 }
395 }
David Howells4fbf4292009-11-19 18:11:04 +0000396
David Howells952efe72009-04-03 16:42:39 +0100397 /* now... we may get called with the object spinlock held, so we
398 * complete the cleanup here only if we can immediately acquire the
399 * lock, and defer it otherwise */
400 if (!spin_trylock(&object->lock)) {
401 _debug("defer put");
402 fscache_stat(&fscache_n_op_deferred_release);
403
404 cache = object->cache;
405 spin_lock(&cache->op_gc_list_lock);
406 list_add_tail(&op->pend_link, &cache->op_gc_list);
407 spin_unlock(&cache->op_gc_list_lock);
408 schedule_work(&cache->op_gc);
409 _leave(" [defer]");
410 return;
411 }
412
David Howells952efe72009-04-03 16:42:39 +0100413 ASSERTCMP(object->n_ops, >, 0);
414 object->n_ops--;
415 if (object->n_ops == 0)
416 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
417
418 spin_unlock(&object->lock);
419
420 kfree(op);
421 _leave(" [done]");
422}
423EXPORT_SYMBOL(fscache_put_operation);
424
425/*
426 * garbage collect operations that have had their release deferred
427 */
428void fscache_operation_gc(struct work_struct *work)
429{
430 struct fscache_operation *op;
431 struct fscache_object *object;
432 struct fscache_cache *cache =
433 container_of(work, struct fscache_cache, op_gc);
434 int count = 0;
435
436 _enter("");
437
438 do {
439 spin_lock(&cache->op_gc_list_lock);
440 if (list_empty(&cache->op_gc_list)) {
441 spin_unlock(&cache->op_gc_list_lock);
442 break;
443 }
444
445 op = list_entry(cache->op_gc_list.next,
446 struct fscache_operation, pend_link);
447 list_del(&op->pend_link);
448 spin_unlock(&cache->op_gc_list_lock);
449
450 object = op->object;
David Howells9f105232012-12-20 21:52:35 +0000451 spin_lock(&object->lock);
David Howells952efe72009-04-03 16:42:39 +0100452
453 _debug("GC DEFERRED REL OBJ%x OP%x",
454 object->debug_id, op->debug_id);
455 fscache_stat(&fscache_n_op_gc);
456
457 ASSERTCMP(atomic_read(&op->usage), ==, 0);
David Howells9f105232012-12-20 21:52:35 +0000458 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
David Howells952efe72009-04-03 16:42:39 +0100459
460 ASSERTCMP(object->n_ops, >, 0);
461 object->n_ops--;
462 if (object->n_ops == 0)
463 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
464
465 spin_unlock(&object->lock);
David Howells9f105232012-12-20 21:52:35 +0000466 kfree(op);
David Howells952efe72009-04-03 16:42:39 +0100467
468 } while (count++ < 20);
469
470 if (!list_empty(&cache->op_gc_list))
471 schedule_work(&cache->op_gc);
472
473 _leave("");
474}
475
476/*
Tejun Heo8af7c122010-07-20 22:09:01 +0200477 * execute an operation using fs_op_wq to provide processing context -
478 * the caller holds a ref to this object, so we don't need to hold one
David Howells952efe72009-04-03 16:42:39 +0100479 */
Tejun Heo8af7c122010-07-20 22:09:01 +0200480void fscache_op_work_func(struct work_struct *work)
David Howells952efe72009-04-03 16:42:39 +0100481{
482 struct fscache_operation *op =
Tejun Heo8af7c122010-07-20 22:09:01 +0200483 container_of(work, struct fscache_operation, work);
David Howells952efe72009-04-03 16:42:39 +0100484 unsigned long start;
485
486 _enter("{OBJ%x OP%x,%d}",
487 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
488
489 ASSERT(op->processor != NULL);
490 start = jiffies;
491 op->processor(op);
492 fscache_hist(fscache_ops_histogram, start);
Tejun Heo8af7c122010-07-20 22:09:01 +0200493 fscache_put_operation(op);
David Howells952efe72009-04-03 16:42:39 +0100494
495 _leave("");
496}