blob: ab1d7f35f6c2b7b889565cc2f5d36f129ff6f14f [file] [log] [blame]
David Howells36c95592009-04-03 16:42:38 +01001/* FS-Cache object state machine handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * See Documentation/filesystems/caching/object.txt for a description of the
12 * object state machine and the in-kernel representations.
13 */
14
15#define FSCACHE_DEBUG_LEVEL COOKIE
16#include <linux/module.h>
David Howellsef778e72012-12-20 21:52:36 +000017#include <linux/slab.h>
David Howellscaaef692013-05-10 19:50:26 +010018#include <linux/prefetch.h>
David Howells36c95592009-04-03 16:42:38 +010019#include "internal.h"
20
David Howellscaaef692013-05-10 19:50:26 +010021static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
22static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
23static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
24static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
25static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
26static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
27static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
28static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
29static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
30static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
31static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
32static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
David Howells11696dc2017-05-23 21:54:04 -040033static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
David Howells36c95592009-04-03 16:42:38 +010034
David Howellscaaef692013-05-10 19:50:26 +010035#define __STATE_NAME(n) fscache_osm_##n
36#define STATE(n) (&__STATE_NAME(n))
37
38/*
39 * Define a work state. Work states are execution states. No event processing
40 * is performed by them. The function attached to a work state returns a
41 * pointer indicating the next state to which the state machine should
42 * transition. Returning NO_TRANSIT repeats the current state, but goes back
43 * to the scheduler first.
44 */
45#define WORK_STATE(n, sn, f) \
46 const struct fscache_state __STATE_NAME(n) = { \
47 .name = #n, \
48 .short_name = sn, \
49 .work = f \
50 }
51
52/*
53 * Returns from work states.
54 */
55#define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
56
57#define NO_TRANSIT ((struct fscache_state *)NULL)
58
59/*
60 * Define a wait state. Wait states are event processing states. No execution
61 * is performed by them. Wait states are just tables of "if event X occurs,
62 * clear it and transition to state Y". The dispatcher returns to the
63 * scheduler if none of the events in which the wait state has an interest are
64 * currently pending.
65 */
66#define WAIT_STATE(n, sn, ...) \
67 const struct fscache_state __STATE_NAME(n) = { \
68 .name = #n, \
69 .short_name = sn, \
70 .work = NULL, \
71 .transitions = { __VA_ARGS__, { 0, NULL } } \
72 }
73
74#define TRANSIT_TO(state, emask) \
75 { .events = (emask), .transit_to = STATE(state) }
76
77/*
78 * The object state machine.
79 */
80static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object);
81static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready);
82static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation);
83static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object);
84static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object);
85static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available);
86static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents);
87
88static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object);
89static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object);
90
91static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
92static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
93static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
94static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
David Howells11696dc2017-05-23 21:54:04 -040095static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
David Howellscaaef692013-05-10 19:50:26 +010096
97static WAIT_STATE(WAIT_FOR_INIT, "?INI",
98 TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
99
100static WAIT_STATE(WAIT_FOR_PARENT, "?PRN",
101 TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY));
102
103static WAIT_STATE(WAIT_FOR_CMD, "?CMD",
104 TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE),
105 TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE),
106 TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
107
108static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR",
109 TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED));
110
111/*
112 * Out-of-band event transition tables. These are for handling unexpected
113 * events, such as an I/O error. If an OOB event occurs, the state machine
114 * clears and disables the event and forces a transition to the nominated work
115 * state (acurrently executing work states will complete first).
116 *
117 * In such a situation, object->state remembers the state the machine should
118 * have been in/gone to and returning NO_TRANSIT returns to that.
119 */
120static const struct fscache_transition fscache_osm_init_oob[] = {
121 TRANSIT_TO(ABORT_INIT,
122 (1 << FSCACHE_OBJECT_EV_ERROR) |
123 (1 << FSCACHE_OBJECT_EV_KILL)),
124 { 0, NULL }
125};
126
127static const struct fscache_transition fscache_osm_lookup_oob[] = {
128 TRANSIT_TO(LOOKUP_FAILURE,
129 (1 << FSCACHE_OBJECT_EV_ERROR) |
130 (1 << FSCACHE_OBJECT_EV_KILL)),
131 { 0, NULL }
132};
133
134static const struct fscache_transition fscache_osm_run_oob[] = {
135 TRANSIT_TO(KILL_OBJECT,
136 (1 << FSCACHE_OBJECT_EV_ERROR) |
137 (1 << FSCACHE_OBJECT_EV_KILL)),
138 { 0, NULL }
David Howells440f0af2009-11-19 18:11:01 +0000139};
140
Tejun Heo8b8edef2010-07-20 22:09:01 +0200141static int fscache_get_object(struct fscache_object *);
142static void fscache_put_object(struct fscache_object *);
David Howellscaaef692013-05-10 19:50:26 +0100143static bool fscache_enqueue_dependents(struct fscache_object *, int);
David Howells36c95592009-04-03 16:42:38 +0100144static void fscache_dequeue_object(struct fscache_object *);
145
David Howells36c95592009-04-03 16:42:38 +0100146/*
147 * we need to notify the parent when an op completes that we had outstanding
148 * upon it
149 */
150static inline void fscache_done_parent_op(struct fscache_object *object)
151{
152 struct fscache_object *parent = object->parent;
153
154 _enter("OBJ%x {OBJ%x,%x}",
155 object->debug_id, parent->debug_id, parent->n_ops);
156
157 spin_lock_nested(&parent->lock, 1);
David Howells36c95592009-04-03 16:42:38 +0100158 parent->n_obj_ops--;
David Howells13627292013-05-10 19:50:26 +0100159 parent->n_ops--;
David Howells36c95592009-04-03 16:42:38 +0100160 if (parent->n_ops == 0)
161 fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
162 spin_unlock(&parent->lock);
163}
164
165/*
David Howellscaaef692013-05-10 19:50:26 +0100166 * Object state machine dispatcher.
David Howellsef778e72012-12-20 21:52:36 +0000167 */
David Howellscaaef692013-05-10 19:50:26 +0100168static void fscache_object_sm_dispatcher(struct fscache_object *object)
David Howellsef778e72012-12-20 21:52:36 +0000169{
David Howellscaaef692013-05-10 19:50:26 +0100170 const struct fscache_transition *t;
171 const struct fscache_state *state, *new_state;
172 unsigned long events, event_mask;
173 int event = -1;
David Howells36c95592009-04-03 16:42:38 +0100174
175 ASSERT(object != NULL);
176
177 _enter("{OBJ%x,%s,%lx}",
David Howellscaaef692013-05-10 19:50:26 +0100178 object->debug_id, object->state->name, object->events);
David Howells36c95592009-04-03 16:42:38 +0100179
David Howellscaaef692013-05-10 19:50:26 +0100180 event_mask = object->event_mask;
181restart:
182 object->event_mask = 0; /* Mask normal event handling */
183 state = object->state;
184restart_masked:
185 events = object->events;
David Howells36c95592009-04-03 16:42:38 +0100186
David Howellscaaef692013-05-10 19:50:26 +0100187 /* Handle any out-of-band events (typically an error) */
188 if (events & object->oob_event_mask) {
189 _debug("{OBJ%x} oob %lx",
190 object->debug_id, events & object->oob_event_mask);
191 for (t = object->oob_table; t->events; t++) {
192 if (events & t->events) {
193 state = t->transit_to;
194 ASSERT(state->work != NULL);
195 event = fls(events & t->events) - 1;
196 __clear_bit(event, &object->oob_event_mask);
197 clear_bit(event, &object->events);
198 goto execute_work_state;
199 }
David Howellsd461d262009-11-19 18:11:41 +0000200 }
David Howellscaaef692013-05-10 19:50:26 +0100201 }
David Howells36c95592009-04-03 16:42:38 +0100202
David Howellscaaef692013-05-10 19:50:26 +0100203 /* Wait states are just transition tables */
204 if (!state->work) {
205 if (events & event_mask) {
206 for (t = state->transitions; t->events; t++) {
207 if (events & t->events) {
208 new_state = t->transit_to;
209 event = fls(events & t->events) - 1;
210 clear_bit(event, &object->events);
211 _debug("{OBJ%x} ev %d: %s -> %s",
212 object->debug_id, event,
213 state->name, new_state->name);
214 object->state = state = new_state;
215 goto execute_work_state;
216 }
217 }
David Howells36c95592009-04-03 16:42:38 +0100218
David Howellscaaef692013-05-10 19:50:26 +0100219 /* The event mask didn't include all the tabled bits */
220 BUG();
David Howells36c95592009-04-03 16:42:38 +0100221 }
David Howellscaaef692013-05-10 19:50:26 +0100222 /* Randomly woke up */
223 goto unmask_events;
David Howells36c95592009-04-03 16:42:38 +0100224 }
225
David Howellscaaef692013-05-10 19:50:26 +0100226execute_work_state:
227 _debug("{OBJ%x} exec %s", object->debug_id, state->name);
228
229 new_state = state->work(object, event);
230 event = -1;
231 if (new_state == NO_TRANSIT) {
232 _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
David Howells11696dc2017-05-23 21:54:04 -0400233 if (unlikely(state == STATE(OBJECT_DEAD))) {
234 _leave(" [dead]");
235 return;
236 }
David Howellscaaef692013-05-10 19:50:26 +0100237 fscache_enqueue_object(object);
238 event_mask = object->oob_event_mask;
239 goto unmask_events;
David Howells36c95592009-04-03 16:42:38 +0100240 }
241
David Howellscaaef692013-05-10 19:50:26 +0100242 _debug("{OBJ%x} %s -> %s",
243 object->debug_id, state->name, new_state->name);
244 object->state = state = new_state;
245
246 if (state->work) {
David Howells11696dc2017-05-23 21:54:04 -0400247 if (unlikely(state == STATE(OBJECT_DEAD))) {
David Howellscaaef692013-05-10 19:50:26 +0100248 _leave(" [dead]");
249 return;
250 }
251 goto restart_masked;
David Howells36c95592009-04-03 16:42:38 +0100252 }
253
David Howellscaaef692013-05-10 19:50:26 +0100254 /* Transited to wait state */
255 event_mask = object->oob_event_mask;
256 for (t = state->transitions; t->events; t++)
257 event_mask |= t->events;
David Howells36c95592009-04-03 16:42:38 +0100258
David Howellscaaef692013-05-10 19:50:26 +0100259unmask_events:
260 object->event_mask = event_mask;
261 smp_mb();
262 events = object->events;
263 if (events & event_mask)
264 goto restart;
265 _leave(" [msk %lx]", event_mask);
David Howells36c95592009-04-03 16:42:38 +0100266}
267
268/*
269 * execute an object
270 */
David Howells610be242013-05-10 19:50:25 +0100271static void fscache_object_work_func(struct work_struct *work)
David Howells36c95592009-04-03 16:42:38 +0100272{
273 struct fscache_object *object =
274 container_of(work, struct fscache_object, work);
275 unsigned long start;
276
277 _enter("{OBJ%x}", object->debug_id);
278
David Howells36c95592009-04-03 16:42:38 +0100279 start = jiffies;
David Howellscaaef692013-05-10 19:50:26 +0100280 fscache_object_sm_dispatcher(object);
David Howells36c95592009-04-03 16:42:38 +0100281 fscache_hist(fscache_objs_histogram, start);
Tejun Heo8b8edef2010-07-20 22:09:01 +0200282 fscache_put_object(object);
David Howells36c95592009-04-03 16:42:38 +0100283}
David Howells610be242013-05-10 19:50:25 +0100284
285/**
286 * fscache_object_init - Initialise a cache object description
287 * @object: Object description
288 * @cookie: Cookie object will be attached to
289 * @cache: Cache in which backing object will be found
290 *
291 * Initialise a cache object description to its basic values.
292 *
293 * See Documentation/filesystems/caching/backend-api.txt for a complete
294 * description.
295 */
296void fscache_object_init(struct fscache_object *object,
297 struct fscache_cookie *cookie,
298 struct fscache_cache *cache)
299{
David Howellscaaef692013-05-10 19:50:26 +0100300 const struct fscache_transition *t;
301
David Howells610be242013-05-10 19:50:25 +0100302 atomic_inc(&cache->object_count);
303
David Howellscaaef692013-05-10 19:50:26 +0100304 object->state = STATE(WAIT_FOR_INIT);
305 object->oob_table = fscache_osm_init_oob;
306 object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
David Howells610be242013-05-10 19:50:25 +0100307 spin_lock_init(&object->lock);
308 INIT_LIST_HEAD(&object->cache_link);
309 INIT_HLIST_NODE(&object->cookie_link);
310 INIT_WORK(&object->work, fscache_object_work_func);
311 INIT_LIST_HEAD(&object->dependents);
312 INIT_LIST_HEAD(&object->dep_link);
313 INIT_LIST_HEAD(&object->pending_ops);
314 object->n_children = 0;
315 object->n_ops = object->n_in_progress = object->n_exclusive = 0;
David Howellscaaef692013-05-10 19:50:26 +0100316 object->events = 0;
David Howells610be242013-05-10 19:50:25 +0100317 object->store_limit = 0;
318 object->store_limit_l = 0;
319 object->cache = cache;
320 object->cookie = cookie;
321 object->parent = NULL;
David Howells7026f192014-02-17 15:01:47 +0000322#ifdef CONFIG_FSCACHE_OBJECT_LIST
323 RB_CLEAR_NODE(&object->objlist_link);
324#endif
David Howellscaaef692013-05-10 19:50:26 +0100325
326 object->oob_event_mask = 0;
327 for (t = object->oob_table; t->events; t++)
328 object->oob_event_mask |= t->events;
329 object->event_mask = object->oob_event_mask;
330 for (t = object->state->transitions; t->events; t++)
331 object->event_mask |= t->events;
David Howells610be242013-05-10 19:50:25 +0100332}
333EXPORT_SYMBOL(fscache_object_init);
David Howells440f0af2009-11-19 18:11:01 +0000334
335/*
David Howellsf09b4432015-02-24 10:05:28 +0000336 * Mark the object as no longer being live, making sure that we synchronise
337 * against op submission.
338 */
339static inline void fscache_mark_object_dead(struct fscache_object *object)
340{
341 spin_lock(&object->lock);
342 clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
343 spin_unlock(&object->lock);
344}
345
346/*
David Howellscaaef692013-05-10 19:50:26 +0100347 * Abort object initialisation before we start it.
348 */
349static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
350 int event)
351{
David Howellscaaef692013-05-10 19:50:26 +0100352 _enter("{OBJ%x},%d", object->debug_id, event);
353
354 object->oob_event_mask = 0;
David Howellscaaef692013-05-10 19:50:26 +0100355 fscache_dequeue_object(object);
David Howellscaaef692013-05-10 19:50:26 +0100356 return transit_to(KILL_OBJECT);
357}
358
359/*
David Howells36c95592009-04-03 16:42:38 +0100360 * initialise an object
361 * - check the specified object's parent to see if we can make use of it
362 * immediately to do a creation
363 * - we may need to start the process of creating a parent and we need to wait
364 * for the parent's lookup and creation to complete if it's not there yet
David Howells36c95592009-04-03 16:42:38 +0100365 */
David Howellscaaef692013-05-10 19:50:26 +0100366static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
367 int event)
David Howells36c95592009-04-03 16:42:38 +0100368{
369 struct fscache_object *parent;
David Howellscaaef692013-05-10 19:50:26 +0100370 bool success;
David Howells36c95592009-04-03 16:42:38 +0100371
David Howellscaaef692013-05-10 19:50:26 +0100372 _enter("{OBJ%x},%d", object->debug_id, event);
David Howells36c95592009-04-03 16:42:38 +0100373
David Howellscaaef692013-05-10 19:50:26 +0100374 ASSERT(list_empty(&object->dep_link));
David Howells36c95592009-04-03 16:42:38 +0100375
376 parent = object->parent;
377 if (!parent) {
David Howellscaaef692013-05-10 19:50:26 +0100378 _leave(" [no parent]");
David Howells13627292013-05-10 19:50:26 +0100379 return transit_to(DROP_OBJECT);
David Howells36c95592009-04-03 16:42:38 +0100380 }
381
David Howells13627292013-05-10 19:50:26 +0100382 _debug("parent: %s of:%lx", parent->state->name, parent->flags);
David Howellscaaef692013-05-10 19:50:26 +0100383
384 if (fscache_object_is_dying(parent)) {
385 _leave(" [bad parent]");
David Howells13627292013-05-10 19:50:26 +0100386 return transit_to(DROP_OBJECT);
David Howellscaaef692013-05-10 19:50:26 +0100387 }
388
389 if (fscache_object_is_available(parent)) {
390 _leave(" [ready]");
391 return transit_to(PARENT_READY);
392 }
393
394 _debug("wait");
395
396 spin_lock(&parent->lock);
397 fscache_stat(&fscache_n_cop_grab_object);
398 success = false;
399 if (fscache_object_is_live(parent) &&
400 object->cache->ops->grab_object(object)) {
401 list_add(&object->dep_link, &parent->dependents);
402 success = true;
403 }
404 fscache_stat_d(&fscache_n_cop_grab_object);
405 spin_unlock(&parent->lock);
406 if (!success) {
407 _leave(" [grab failed]");
David Howells13627292013-05-10 19:50:26 +0100408 return transit_to(DROP_OBJECT);
David Howellscaaef692013-05-10 19:50:26 +0100409 }
410
411 /* fscache_acquire_non_index_cookie() uses this
412 * to wake the chain up */
413 fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
414 _leave(" [wait]");
415 return transit_to(WAIT_FOR_PARENT);
416}
417
418/*
419 * Once the parent object is ready, we should kick off our lookup op.
420 */
421static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
422 int event)
423{
424 struct fscache_object *parent = object->parent;
425
426 _enter("{OBJ%x},%d", object->debug_id, event);
427
428 ASSERT(parent != NULL);
429
430 spin_lock(&parent->lock);
431 parent->n_ops++;
432 parent->n_obj_ops++;
433 object->lookup_jif = jiffies;
434 spin_unlock(&parent->lock);
435
David Howells36c95592009-04-03 16:42:38 +0100436 _leave("");
David Howellscaaef692013-05-10 19:50:26 +0100437 return transit_to(LOOK_UP_OBJECT);
David Howells36c95592009-04-03 16:42:38 +0100438}
439
440/*
441 * look an object up in the cache from which it was allocated
442 * - we hold an "access lock" on the parent object, so the parent object cannot
443 * be withdrawn by either party till we've finished
David Howells36c95592009-04-03 16:42:38 +0100444 */
David Howellscaaef692013-05-10 19:50:26 +0100445static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
446 int event)
David Howells36c95592009-04-03 16:42:38 +0100447{
448 struct fscache_cookie *cookie = object->cookie;
David Howellscaaef692013-05-10 19:50:26 +0100449 struct fscache_object *parent = object->parent;
David Howellsfee096d2009-11-19 18:12:05 +0000450 int ret;
David Howells36c95592009-04-03 16:42:38 +0100451
David Howellscaaef692013-05-10 19:50:26 +0100452 _enter("{OBJ%x},%d", object->debug_id, event);
David Howells36c95592009-04-03 16:42:38 +0100453
David Howellscaaef692013-05-10 19:50:26 +0100454 object->oob_table = fscache_osm_lookup_oob;
455
David Howells36c95592009-04-03 16:42:38 +0100456 ASSERT(parent != NULL);
457 ASSERTCMP(parent->n_ops, >, 0);
458 ASSERTCMP(parent->n_obj_ops, >, 0);
459
460 /* make sure the parent is still available */
David Howells493f7bc2013-05-10 19:50:26 +0100461 ASSERT(fscache_object_is_available(parent));
David Howells36c95592009-04-03 16:42:38 +0100462
David Howells493f7bc2013-05-10 19:50:26 +0100463 if (fscache_object_is_dying(parent) ||
David Howells13627292013-05-10 19:50:26 +0100464 test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
465 !fscache_use_cookie(object)) {
David Howellscaaef692013-05-10 19:50:26 +0100466 _leave(" [unavailable]");
467 return transit_to(LOOKUP_FAILURE);
David Howells36c95592009-04-03 16:42:38 +0100468 }
469
David Howells13627292013-05-10 19:50:26 +0100470 _debug("LOOKUP \"%s\" in \"%s\"",
471 cookie->def->name, object->cache->tag->name);
David Howells36c95592009-04-03 16:42:38 +0100472
473 fscache_stat(&fscache_n_object_lookups);
David Howells52bd75f2009-11-19 18:11:08 +0000474 fscache_stat(&fscache_n_cop_lookup_object);
David Howellsfee096d2009-11-19 18:12:05 +0000475 ret = object->cache->ops->lookup_object(object);
David Howells52bd75f2009-11-19 18:11:08 +0000476 fscache_stat_d(&fscache_n_cop_lookup_object);
David Howells36c95592009-04-03 16:42:38 +0100477
David Howells13627292013-05-10 19:50:26 +0100478 fscache_unuse_cookie(object);
David Howells36c95592009-04-03 16:42:38 +0100479
David Howellsfee096d2009-11-19 18:12:05 +0000480 if (ret == -ETIMEDOUT) {
481 /* probably stuck behind another object, so move this one to
482 * the back of the queue */
483 fscache_stat(&fscache_n_object_lookups_timed_out);
David Howellscaaef692013-05-10 19:50:26 +0100484 _leave(" [timeout]");
485 return NO_TRANSIT;
David Howellsfee096d2009-11-19 18:12:05 +0000486 }
487
David Howellscaaef692013-05-10 19:50:26 +0100488 if (ret < 0) {
489 _leave(" [error]");
490 return transit_to(LOOKUP_FAILURE);
491 }
492
493 _leave(" [ok]");
494 return transit_to(OBJECT_AVAILABLE);
David Howells36c95592009-04-03 16:42:38 +0100495}
496
497/**
498 * fscache_object_lookup_negative - Note negative cookie lookup
499 * @object: Object pointing to cookie to mark
500 *
501 * Note negative lookup, permitting those waiting to read data from an already
502 * existing backing object to continue as there's no data for them to read.
503 */
504void fscache_object_lookup_negative(struct fscache_object *object)
505{
506 struct fscache_cookie *cookie = object->cookie;
507
David Howellscaaef692013-05-10 19:50:26 +0100508 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
David Howells36c95592009-04-03 16:42:38 +0100509
David Howellscaaef692013-05-10 19:50:26 +0100510 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
David Howells36c95592009-04-03 16:42:38 +0100511 fscache_stat(&fscache_n_object_lookups_negative);
512
David Howellscaaef692013-05-10 19:50:26 +0100513 /* Allow write requests to begin stacking up and read requests to begin
514 * returning ENODATA.
515 */
David Howells36c95592009-04-03 16:42:38 +0100516 set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
David Howells94d30ae2013-09-21 00:09:31 +0100517 clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
David Howells36c95592009-04-03 16:42:38 +0100518
519 _debug("wake up lookup %p", &cookie->flags);
David Howellscaaef692013-05-10 19:50:26 +0100520 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
David Howells36c95592009-04-03 16:42:38 +0100521 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
David Howells36c95592009-04-03 16:42:38 +0100522 }
David Howells36c95592009-04-03 16:42:38 +0100523 _leave("");
524}
525EXPORT_SYMBOL(fscache_object_lookup_negative);
526
527/**
528 * fscache_obtained_object - Note successful object lookup or creation
529 * @object: Object pointing to cookie to mark
530 *
531 * Note successful lookup and/or creation, permitting those waiting to write
532 * data to a backing object to continue.
533 *
534 * Note that after calling this, an object's cookie may be relinquished by the
535 * netfs, and so must be accessed with object lock held.
536 */
537void fscache_obtained_object(struct fscache_object *object)
538{
539 struct fscache_cookie *cookie = object->cookie;
540
David Howellscaaef692013-05-10 19:50:26 +0100541 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
David Howells36c95592009-04-03 16:42:38 +0100542
543 /* if we were still looking up, then we must have a positive lookup
544 * result, in which case there may be data available */
David Howellscaaef692013-05-10 19:50:26 +0100545 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
David Howells36c95592009-04-03 16:42:38 +0100546 fscache_stat(&fscache_n_object_lookups_positive);
547
David Howellscaaef692013-05-10 19:50:26 +0100548 /* We do (presumably) have data */
549 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
David Howells94d30ae2013-09-21 00:09:31 +0100550 clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
David Howells36c95592009-04-03 16:42:38 +0100551
David Howellscaaef692013-05-10 19:50:26 +0100552 /* Allow write requests to begin stacking up and read requests
553 * to begin shovelling data.
554 */
555 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
David Howells36c95592009-04-03 16:42:38 +0100556 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
David Howells36c95592009-04-03 16:42:38 +0100557 } else {
David Howells36c95592009-04-03 16:42:38 +0100558 fscache_stat(&fscache_n_object_created);
David Howells36c95592009-04-03 16:42:38 +0100559 }
560
David Howellscaaef692013-05-10 19:50:26 +0100561 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
David Howells36c95592009-04-03 16:42:38 +0100562 _leave("");
563}
564EXPORT_SYMBOL(fscache_obtained_object);
565
566/*
567 * handle an object that has just become available
568 */
David Howellscaaef692013-05-10 19:50:26 +0100569static const struct fscache_state *fscache_object_available(struct fscache_object *object,
570 int event)
David Howells36c95592009-04-03 16:42:38 +0100571{
David Howellscaaef692013-05-10 19:50:26 +0100572 _enter("{OBJ%x},%d", object->debug_id, event);
573
574 object->oob_table = fscache_osm_run_oob;
David Howells36c95592009-04-03 16:42:38 +0100575
576 spin_lock(&object->lock);
577
David Howells36c95592009-04-03 16:42:38 +0100578 fscache_done_parent_op(object);
579 if (object->n_in_progress == 0) {
580 if (object->n_ops > 0) {
581 ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
David Howells36c95592009-04-03 16:42:38 +0100582 fscache_start_operations(object);
583 } else {
584 ASSERT(list_empty(&object->pending_ops));
585 }
586 }
587 spin_unlock(&object->lock);
588
David Howells52bd75f2009-11-19 18:11:08 +0000589 fscache_stat(&fscache_n_cop_lookup_complete);
David Howells36c95592009-04-03 16:42:38 +0100590 object->cache->ops->lookup_complete(object);
David Howells52bd75f2009-11-19 18:11:08 +0000591 fscache_stat_d(&fscache_n_cop_lookup_complete);
David Howells36c95592009-04-03 16:42:38 +0100592
593 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
594 fscache_stat(&fscache_n_object_avail);
595
596 _leave("");
David Howellscaaef692013-05-10 19:50:26 +0100597 return transit_to(JUMPSTART_DEPS);
David Howells36c95592009-04-03 16:42:38 +0100598}
599
600/*
David Howellscaaef692013-05-10 19:50:26 +0100601 * Wake up this object's dependent objects now that we've become available.
David Howells36c95592009-04-03 16:42:38 +0100602 */
David Howellscaaef692013-05-10 19:50:26 +0100603static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
604 int event)
605{
606 _enter("{OBJ%x},%d", object->debug_id, event);
607
608 if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
609 return NO_TRANSIT; /* Not finished; requeue */
610 return transit_to(WAIT_FOR_CMD);
611}
612
613/*
614 * Handle lookup or creation failute.
615 */
616static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
617 int event)
618{
619 struct fscache_cookie *cookie;
David Howellscaaef692013-05-10 19:50:26 +0100620
621 _enter("{OBJ%x},%d", object->debug_id, event);
622
623 object->oob_event_mask = 0;
624
625 fscache_stat(&fscache_n_cop_lookup_complete);
626 object->cache->ops->lookup_complete(object);
627 fscache_stat_d(&fscache_n_cop_lookup_complete);
628
David Howells6515d1d2015-02-25 11:53:57 +0000629 set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
630
David Howellscaaef692013-05-10 19:50:26 +0100631 cookie = object->cookie;
632 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
David Howells13627292013-05-10 19:50:26 +0100633 if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
David Howellscaaef692013-05-10 19:50:26 +0100634 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
David Howellscaaef692013-05-10 19:50:26 +0100635
636 fscache_done_parent_op(object);
637 return transit_to(KILL_OBJECT);
638}
639
640/*
641 * Wait for completion of all active operations on this object and the death of
642 * all child objects of this object.
643 */
644static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
645 int event)
646{
647 _enter("{OBJ%x,%d,%d},%d",
648 object->debug_id, object->n_ops, object->n_children, event);
649
David Howellsf09b4432015-02-24 10:05:28 +0000650 fscache_mark_object_dead(object);
David Howells13627292013-05-10 19:50:26 +0100651 object->oob_event_mask = 0;
David Howellscaaef692013-05-10 19:50:26 +0100652
David Howells34f1a462017-05-23 21:54:05 -0400653 if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
654 /* Reject any new read/write ops and abort any that are pending. */
655 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
656 fscache_cancel_all_ops(object);
657 }
658
David Howellscaaef692013-05-10 19:50:26 +0100659 if (list_empty(&object->dependents) &&
660 object->n_ops == 0 &&
661 object->n_children == 0)
David Howells13627292013-05-10 19:50:26 +0100662 return transit_to(DROP_OBJECT);
David Howellscaaef692013-05-10 19:50:26 +0100663
David Howells13627292013-05-10 19:50:26 +0100664 if (object->n_in_progress == 0) {
665 spin_lock(&object->lock);
666 if (object->n_ops > 0 && object->n_in_progress == 0)
667 fscache_start_operations(object);
668 spin_unlock(&object->lock);
669 }
David Howellscaaef692013-05-10 19:50:26 +0100670
671 if (!list_empty(&object->dependents))
672 return transit_to(KILL_DEPENDENTS);
673
674 return transit_to(WAIT_FOR_CLEARANCE);
675}
676
677/*
678 * Kill dependent objects.
679 */
680static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
681 int event)
682{
683 _enter("{OBJ%x},%d", object->debug_id, event);
684
685 if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
686 return NO_TRANSIT; /* Not finished */
687 return transit_to(WAIT_FOR_CLEARANCE);
688}
689
690/*
David Howellscaaef692013-05-10 19:50:26 +0100691 * Drop an object's attachments
692 */
693static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
694 int event)
David Howells36c95592009-04-03 16:42:38 +0100695{
696 struct fscache_object *parent = object->parent;
David Howells13627292013-05-10 19:50:26 +0100697 struct fscache_cookie *cookie = object->cookie;
David Howells36c95592009-04-03 16:42:38 +0100698 struct fscache_cache *cache = object->cache;
David Howells13627292013-05-10 19:50:26 +0100699 bool awaken = false;
David Howells36c95592009-04-03 16:42:38 +0100700
David Howellscaaef692013-05-10 19:50:26 +0100701 _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
David Howells36c95592009-04-03 16:42:38 +0100702
David Howells13627292013-05-10 19:50:26 +0100703 ASSERT(cookie != NULL);
704 ASSERT(!hlist_unhashed(&object->cookie_link));
705
706 /* Make sure the cookie no longer points here and that the netfs isn't
707 * waiting for us.
708 */
709 spin_lock(&cookie->lock);
710 hlist_del_init(&object->cookie_link);
David Howells94d30ae2013-09-21 00:09:31 +0100711 if (hlist_empty(&cookie->backing_objects) &&
712 test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
David Howells13627292013-05-10 19:50:26 +0100713 awaken = true;
714 spin_unlock(&cookie->lock);
715
716 if (awaken)
717 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
NeilBrown7a821a92018-10-26 17:16:29 +1100718 if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
719 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
720
David Howells6897e3d2009-11-19 18:11:22 +0000721
David Howellscaaef692013-05-10 19:50:26 +0100722 /* Prevent a race with our last child, which has to signal EV_CLEARED
723 * before dropping our spinlock.
724 */
725 spin_lock(&object->lock);
726 spin_unlock(&object->lock);
727
728 /* Discard from the cache's collection of objects */
David Howells36c95592009-04-03 16:42:38 +0100729 spin_lock(&cache->object_list_lock);
730 list_del_init(&object->cache_link);
731 spin_unlock(&cache->object_list_lock);
732
David Howells52bd75f2009-11-19 18:11:08 +0000733 fscache_stat(&fscache_n_cop_drop_object);
David Howells36c95592009-04-03 16:42:38 +0100734 cache->ops->drop_object(object);
David Howells52bd75f2009-11-19 18:11:08 +0000735 fscache_stat_d(&fscache_n_cop_drop_object);
David Howells36c95592009-04-03 16:42:38 +0100736
David Howellscaaef692013-05-10 19:50:26 +0100737 /* The parent object wants to know when all it dependents have gone */
David Howells36c95592009-04-03 16:42:38 +0100738 if (parent) {
739 _debug("release parent OBJ%x {%d}",
740 parent->debug_id, parent->n_children);
741
742 spin_lock(&parent->lock);
743 parent->n_children--;
744 if (parent->n_children == 0)
745 fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
746 spin_unlock(&parent->lock);
747 object->parent = NULL;
748 }
749
Tejun Heo8b8edef2010-07-20 22:09:01 +0200750 /* this just shifts the object release to the work processor */
751 fscache_put_object(object);
David Howellscaaef692013-05-10 19:50:26 +0100752 fscache_stat(&fscache_n_object_dead);
David Howells36c95592009-04-03 16:42:38 +0100753
754 _leave("");
David Howellscaaef692013-05-10 19:50:26 +0100755 return transit_to(OBJECT_DEAD);
David Howells36c95592009-04-03 16:42:38 +0100756}
757
758/*
Tejun Heo8b8edef2010-07-20 22:09:01 +0200759 * get a ref on an object
David Howells36c95592009-04-03 16:42:38 +0100760 */
Tejun Heo8b8edef2010-07-20 22:09:01 +0200761static int fscache_get_object(struct fscache_object *object)
David Howells36c95592009-04-03 16:42:38 +0100762{
David Howells52bd75f2009-11-19 18:11:08 +0000763 int ret;
David Howells36c95592009-04-03 16:42:38 +0100764
David Howells52bd75f2009-11-19 18:11:08 +0000765 fscache_stat(&fscache_n_cop_grab_object);
766 ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
767 fscache_stat_d(&fscache_n_cop_grab_object);
768 return ret;
David Howells36c95592009-04-03 16:42:38 +0100769}
770
771/*
David Howellscaaef692013-05-10 19:50:26 +0100772 * Discard a ref on an object
David Howells36c95592009-04-03 16:42:38 +0100773 */
Tejun Heo8b8edef2010-07-20 22:09:01 +0200774static void fscache_put_object(struct fscache_object *object)
David Howells36c95592009-04-03 16:42:38 +0100775{
David Howells52bd75f2009-11-19 18:11:08 +0000776 fscache_stat(&fscache_n_cop_put_object);
777 object->cache->ops->put_object(object);
778 fscache_stat_d(&fscache_n_cop_put_object);
David Howells36c95592009-04-03 16:42:38 +0100779}
780
David Howells13627292013-05-10 19:50:26 +0100781/**
782 * fscache_object_destroy - Note that a cache object is about to be destroyed
783 * @object: The object to be destroyed
784 *
785 * Note the imminent destruction and deallocation of a cache object record.
786 */
787void fscache_object_destroy(struct fscache_object *object)
788{
789 fscache_objlist_remove(object);
790
791 /* We can get rid of the cookie now */
792 fscache_cookie_put(object->cookie);
793 object->cookie = NULL;
794}
795EXPORT_SYMBOL(fscache_object_destroy);
796
David Howells36c95592009-04-03 16:42:38 +0100797/*
798 * enqueue an object for metadata-type processing
799 */
800void fscache_enqueue_object(struct fscache_object *object)
801{
802 _enter("{OBJ%x}", object->debug_id);
803
Tejun Heo8b8edef2010-07-20 22:09:01 +0200804 if (fscache_get_object(object) >= 0) {
805 wait_queue_head_t *cong_wq =
806 &get_cpu_var(fscache_object_cong_wait);
807
808 if (queue_work(fscache_object_wq, &object->work)) {
809 if (fscache_object_congested())
810 wake_up(cong_wq);
811 } else
812 fscache_put_object(object);
813
814 put_cpu_var(fscache_object_cong_wait);
815 }
David Howells36c95592009-04-03 16:42:38 +0100816}
817
Tejun Heo8b8edef2010-07-20 22:09:01 +0200818/**
819 * fscache_object_sleep_till_congested - Sleep until object wq is congested
David Howellscaaef692013-05-10 19:50:26 +0100820 * @timeoutp: Scheduler sleep timeout
Tejun Heo8b8edef2010-07-20 22:09:01 +0200821 *
822 * Allow an object handler to sleep until the object workqueue is congested.
823 *
824 * The caller must set up a wake up event before calling this and must have set
825 * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
826 * condition before calling this function as no test is made here.
827 *
828 * %true is returned if the object wq is congested, %false otherwise.
829 */
830bool fscache_object_sleep_till_congested(signed long *timeoutp)
831{
Christoph Lameter170d8002013-10-15 12:22:29 -0600832 wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
Tejun Heo8b8edef2010-07-20 22:09:01 +0200833 DEFINE_WAIT(wait);
834
835 if (fscache_object_congested())
836 return true;
837
838 add_wait_queue_exclusive(cong_wq, &wait);
839 if (!fscache_object_congested())
840 *timeoutp = schedule_timeout(*timeoutp);
841 finish_wait(cong_wq, &wait);
842
843 return fscache_object_congested();
844}
845EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
846
David Howells36c95592009-04-03 16:42:38 +0100847/*
David Howellscaaef692013-05-10 19:50:26 +0100848 * Enqueue the dependents of an object for metadata-type processing.
849 *
850 * If we don't manage to finish the list before the scheduler wants to run
851 * again then return false immediately. We return true if the list was
852 * cleared.
David Howells36c95592009-04-03 16:42:38 +0100853 */
David Howellscaaef692013-05-10 19:50:26 +0100854static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
David Howells36c95592009-04-03 16:42:38 +0100855{
856 struct fscache_object *dep;
David Howellscaaef692013-05-10 19:50:26 +0100857 bool ret = true;
David Howells36c95592009-04-03 16:42:38 +0100858
859 _enter("{OBJ%x}", object->debug_id);
860
861 if (list_empty(&object->dependents))
David Howellscaaef692013-05-10 19:50:26 +0100862 return true;
David Howells36c95592009-04-03 16:42:38 +0100863
864 spin_lock(&object->lock);
865
866 while (!list_empty(&object->dependents)) {
867 dep = list_entry(object->dependents.next,
868 struct fscache_object, dep_link);
869 list_del_init(&dep->dep_link);
870
David Howellscaaef692013-05-10 19:50:26 +0100871 fscache_raise_event(dep, event);
Tejun Heo8b8edef2010-07-20 22:09:01 +0200872 fscache_put_object(dep);
David Howells36c95592009-04-03 16:42:38 +0100873
David Howellscaaef692013-05-10 19:50:26 +0100874 if (!list_empty(&object->dependents) && need_resched()) {
875 ret = false;
876 break;
877 }
David Howells36c95592009-04-03 16:42:38 +0100878 }
879
880 spin_unlock(&object->lock);
David Howellscaaef692013-05-10 19:50:26 +0100881 return ret;
David Howells36c95592009-04-03 16:42:38 +0100882}
883
884/*
885 * remove an object from whatever queue it's waiting on
David Howells36c95592009-04-03 16:42:38 +0100886 */
David Howellscaaef692013-05-10 19:50:26 +0100887static void fscache_dequeue_object(struct fscache_object *object)
David Howells36c95592009-04-03 16:42:38 +0100888{
889 _enter("{OBJ%x}", object->debug_id);
890
891 if (!list_empty(&object->dep_link)) {
892 spin_lock(&object->parent->lock);
893 list_del_init(&object->dep_link);
894 spin_unlock(&object->parent->lock);
895 }
896
897 _leave("");
898}
899
900/**
901 * fscache_check_aux - Ask the netfs whether an object on disk is still valid
902 * @object: The object to ask about
903 * @data: The auxiliary data for the object
904 * @datalen: The size of the auxiliary data
905 *
David Howells13627292013-05-10 19:50:26 +0100906 * This function consults the netfs about the coherency state of an object.
907 * The caller must be holding a ref on cookie->n_active (held by
908 * fscache_look_up_object() on behalf of the cache backend during object lookup
909 * and creation).
David Howells36c95592009-04-03 16:42:38 +0100910 */
911enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
912 const void *data, uint16_t datalen)
913{
914 enum fscache_checkaux result;
915
916 if (!object->cookie->def->check_aux) {
917 fscache_stat(&fscache_n_checkaux_none);
918 return FSCACHE_CHECKAUX_OKAY;
919 }
920
921 result = object->cookie->def->check_aux(object->cookie->netfs_data,
922 data, datalen);
923 switch (result) {
924 /* entry okay as is */
925 case FSCACHE_CHECKAUX_OKAY:
926 fscache_stat(&fscache_n_checkaux_okay);
927 break;
928
929 /* entry requires update */
930 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
931 fscache_stat(&fscache_n_checkaux_update);
932 break;
933
934 /* entry requires deletion */
935 case FSCACHE_CHECKAUX_OBSOLETE:
936 fscache_stat(&fscache_n_checkaux_obsolete);
937 break;
938
939 default:
940 BUG();
941 }
942
943 return result;
944}
945EXPORT_SYMBOL(fscache_check_aux);
David Howellsef778e72012-12-20 21:52:36 +0000946
947/*
948 * Asynchronously invalidate an object.
949 */
David Howellscaaef692013-05-10 19:50:26 +0100950static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
951 int event)
David Howellsef778e72012-12-20 21:52:36 +0000952{
953 struct fscache_operation *op;
954 struct fscache_cookie *cookie = object->cookie;
955
David Howellscaaef692013-05-10 19:50:26 +0100956 _enter("{OBJ%x},%d", object->debug_id, event);
957
David Howells13627292013-05-10 19:50:26 +0100958 /* We're going to need the cookie. If the cookie is not available then
959 * retire the object instead.
960 */
961 if (!fscache_use_cookie(object)) {
962 ASSERT(object->cookie->stores.rnode == NULL);
David Howells94d30ae2013-09-21 00:09:31 +0100963 set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
David Howells13627292013-05-10 19:50:26 +0100964 _leave(" [no cookie]");
965 return transit_to(KILL_OBJECT);
966 }
David Howellsef778e72012-12-20 21:52:36 +0000967
968 /* Reject any new read/write ops and abort any that are pending. */
969 fscache_invalidate_writes(cookie);
970 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
971 fscache_cancel_all_ops(object);
972
973 /* Now we have to wait for in-progress reads and writes */
974 op = kzalloc(sizeof(*op), GFP_KERNEL);
David Howells13627292013-05-10 19:50:26 +0100975 if (!op)
976 goto nomem;
David Howellsef778e72012-12-20 21:52:36 +0000977
David Howellsd3b97ca2015-02-24 10:05:29 +0000978 fscache_operation_init(op, object->cache->ops->invalidate_object,
979 NULL, NULL);
David Howells13627292013-05-10 19:50:26 +0100980 op->flags = FSCACHE_OP_ASYNC |
981 (1 << FSCACHE_OP_EXCLUSIVE) |
982 (1 << FSCACHE_OP_UNUSE_COOKIE);
David Howellsef778e72012-12-20 21:52:36 +0000983
984 spin_lock(&cookie->lock);
985 if (fscache_submit_exclusive_op(object, op) < 0)
David Howells8d763492012-12-05 13:34:48 +0000986 goto submit_op_failed;
David Howellsef778e72012-12-20 21:52:36 +0000987 spin_unlock(&cookie->lock);
988 fscache_put_operation(op);
989
990 /* Once we've completed the invalidation, we know there will be no data
991 * stored in the cache and thus we can reinstate the data-check-skip
992 * optimisation.
993 */
994 set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
995
996 /* We can allow read and write requests to come in once again. They'll
997 * queue up behind our exclusive invalidation operation.
998 */
David Howellscaaef692013-05-10 19:50:26 +0100999 if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
1000 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
1001 _leave(" [ok]");
1002 return transit_to(UPDATE_OBJECT);
David Howells8d763492012-12-05 13:34:48 +00001003
David Howells13627292013-05-10 19:50:26 +01001004nomem:
David Howellsf09b4432015-02-24 10:05:28 +00001005 fscache_mark_object_dead(object);
David Howells13627292013-05-10 19:50:26 +01001006 fscache_unuse_cookie(object);
1007 _leave(" [ENOMEM]");
1008 return transit_to(KILL_OBJECT);
1009
David Howells8d763492012-12-05 13:34:48 +00001010submit_op_failed:
David Howellsf09b4432015-02-24 10:05:28 +00001011 fscache_mark_object_dead(object);
David Howells8d763492012-12-05 13:34:48 +00001012 spin_unlock(&cookie->lock);
Milosz Tanski920bce22014-08-13 12:58:21 -04001013 fscache_unuse_cookie(object);
David Howells8d763492012-12-05 13:34:48 +00001014 kfree(op);
David Howells8d763492012-12-05 13:34:48 +00001015 _leave(" [EIO]");
David Howellscaaef692013-05-10 19:50:26 +01001016 return transit_to(KILL_OBJECT);
1017}
1018
1019static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
1020 int event)
1021{
1022 const struct fscache_state *s;
1023
1024 fscache_stat(&fscache_n_invalidates_run);
1025 fscache_stat(&fscache_n_cop_invalidate_object);
1026 s = _fscache_invalidate_object(object, event);
1027 fscache_stat_d(&fscache_n_cop_invalidate_object);
1028 return s;
1029}
1030
1031/*
1032 * Asynchronously update an object.
1033 */
1034static const struct fscache_state *fscache_update_object(struct fscache_object *object,
1035 int event)
1036{
1037 _enter("{OBJ%x},%d", object->debug_id, event);
1038
1039 fscache_stat(&fscache_n_updates_run);
1040 fscache_stat(&fscache_n_cop_update_object);
1041 object->cache->ops->update_object(object);
1042 fscache_stat_d(&fscache_n_cop_update_object);
1043
1044 _leave("");
1045 return transit_to(WAIT_FOR_CMD);
David Howellsef778e72012-12-20 21:52:36 +00001046}
David Howells182d9192015-02-19 23:47:31 +00001047
1048/**
1049 * fscache_object_retrying_stale - Note retrying stale object
1050 * @object: The object that will be retried
1051 *
1052 * Note that an object lookup found an on-disk object that was adjudged to be
1053 * stale and has been deleted. The lookup will be retried.
1054 */
1055void fscache_object_retrying_stale(struct fscache_object *object)
1056{
1057 fscache_stat(&fscache_n_cache_no_space_reject);
1058}
1059EXPORT_SYMBOL(fscache_object_retrying_stale);
1060
1061/**
1062 * fscache_object_mark_killed - Note that an object was killed
1063 * @object: The object that was culled
1064 * @why: The reason the object was killed.
1065 *
1066 * Note that an object was killed. Returns true if the object was
1067 * already marked killed, false if it wasn't.
1068 */
1069void fscache_object_mark_killed(struct fscache_object *object,
1070 enum fscache_why_object_killed why)
1071{
1072 if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
1073 pr_err("Error: Object already killed by cache [%s]\n",
1074 object->cache->identifier);
1075 return;
1076 }
1077
1078 switch (why) {
1079 case FSCACHE_OBJECT_NO_SPACE:
1080 fscache_stat(&fscache_n_cache_no_space_reject);
1081 break;
1082 case FSCACHE_OBJECT_IS_STALE:
1083 fscache_stat(&fscache_n_cache_stale_objects);
1084 break;
1085 case FSCACHE_OBJECT_WAS_RETIRED:
1086 fscache_stat(&fscache_n_cache_retired_objects);
1087 break;
1088 case FSCACHE_OBJECT_WAS_CULLED:
1089 fscache_stat(&fscache_n_cache_culled_objects);
1090 break;
1091 }
1092}
1093EXPORT_SYMBOL(fscache_object_mark_killed);
David Howells11696dc2017-05-23 21:54:04 -04001094
1095/*
1096 * The object is dead. We can get here if an object gets queued by an event
1097 * that would lead to its death (such as EV_KILL) when the dispatcher is
1098 * already running (and so can be requeued) but hasn't yet cleared the event
1099 * mask.
1100 */
1101static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
1102 int event)
1103{
1104 if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
1105 &object->flags))
1106 return NO_TRANSIT;
1107
1108 WARN(true, "FS-Cache object redispatched after death");
1109 return NO_TRANSIT;
1110}