blob: 856a4b48fe238463fb8ded1d1bf2e3944e03190c [file] [log] [blame]
Mike Marshall1182fca2015-07-17 10:38:15 -04001/*
2 * (C) 2001 Clemson University and The University of Chicago
3 * (C) 2011 Omnibond Systems
4 *
5 * Changes by Acxiom Corporation to implement generic service_operation()
6 * function, Copyright Acxiom Corporation, 2005.
7 *
8 * See COPYING in top-level directory.
9 */
10
11/*
12 * In-kernel waitqueue operations.
13 */
14
15#include "protocol.h"
Mike Marshall575e9462015-12-04 12:56:14 -050016#include "orangefs-kernel.h"
17#include "orangefs-bufmap.h"
Mike Marshall1182fca2015-07-17 10:38:15 -040018
19/*
20 * What we do in this function is to walk the list of operations that are
21 * present in the request queue and mark them as purged.
22 * NOTE: This is called from the device close after client-core has
23 * guaranteed that no new operations could appear on the list since the
24 * client-core is anyway going to exit.
25 */
26void purge_waiting_ops(void)
27{
Yi Liu8bb8aef2015-11-24 15:12:14 -050028 struct orangefs_kernel_op_s *op;
Mike Marshall1182fca2015-07-17 10:38:15 -040029
Yi Liu8bb8aef2015-11-24 15:12:14 -050030 spin_lock(&orangefs_request_list_lock);
31 list_for_each_entry(op, &orangefs_request_list, list) {
Mike Marshall1182fca2015-07-17 10:38:15 -040032 gossip_debug(GOSSIP_WAIT_DEBUG,
33 "pvfs2-client-core: purging op tag %llu %s\n",
34 llu(op->tag),
35 get_opname_string(op));
36 spin_lock(&op->lock);
37 set_op_state_purged(op);
38 spin_unlock(&op->lock);
39 wake_up_interruptible(&op->waitq);
40 }
Yi Liu8bb8aef2015-11-24 15:12:14 -050041 spin_unlock(&orangefs_request_list_lock);
Mike Marshall1182fca2015-07-17 10:38:15 -040042}
43
44/*
Yi Liu8bb8aef2015-11-24 15:12:14 -050045 * submits a ORANGEFS operation and waits for it to complete
Mike Marshall1182fca2015-07-17 10:38:15 -040046 *
47 * Note op->downcall.status will contain the status of the operation (in
48 * errno format), whether provided by pvfs2-client or a result of failure to
49 * service the operation. If the caller wishes to distinguish, then
50 * op->state can be checked to see if it was serviced or not.
51 *
52 * Returns contents of op->downcall.status for convenience
53 */
Yi Liu8bb8aef2015-11-24 15:12:14 -050054int service_operation(struct orangefs_kernel_op_s *op,
Mike Marshall1182fca2015-07-17 10:38:15 -040055 const char *op_name,
56 int flags)
57{
58 /* flags to modify behavior */
59 sigset_t orig_sigset;
60 int ret = 0;
61
62 /* irqflags and wait_entry are only used IF the client-core aborts */
63 unsigned long irqflags;
64
Mike Marshallce6c4142015-12-14 14:54:46 -050065 DEFINE_WAIT(wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -040066
67 op->upcall.tgid = current->tgid;
68 op->upcall.pid = current->pid;
69
70retry_servicing:
71 op->downcall.status = 0;
72 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -050073 "orangefs: service_operation: %s %p\n",
Mike Marshall1182fca2015-07-17 10:38:15 -040074 op_name,
75 op);
76 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -050077 "orangefs: operation posted by process: %s, pid: %i\n",
Mike Marshall1182fca2015-07-17 10:38:15 -040078 current->comm,
79 current->pid);
80
81 /* mask out signals if this operation is not to be interrupted */
Yi Liu8bb8aef2015-11-24 15:12:14 -050082 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
Mike Marshall8c3905a2015-09-29 12:07:46 -040083 block_signals(&orig_sigset);
Mike Marshall1182fca2015-07-17 10:38:15 -040084
Yi Liu8bb8aef2015-11-24 15:12:14 -050085 if (!(flags & ORANGEFS_OP_NO_SEMAPHORE)) {
Mike Marshall1182fca2015-07-17 10:38:15 -040086 ret = mutex_lock_interruptible(&request_mutex);
87 /*
88 * check to see if we were interrupted while waiting for
89 * semaphore
90 */
91 if (ret < 0) {
Yi Liu8bb8aef2015-11-24 15:12:14 -050092 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
Mike Marshall8c3905a2015-09-29 12:07:46 -040093 set_signals(&orig_sigset);
Mike Marshall1182fca2015-07-17 10:38:15 -040094 op->downcall.status = ret;
95 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -050096 "orangefs: service_operation interrupted.\n");
Mike Marshall1182fca2015-07-17 10:38:15 -040097 return ret;
98 }
99 }
100
101 gossip_debug(GOSSIP_WAIT_DEBUG,
102 "%s:About to call is_daemon_in_service().\n",
103 __func__);
104
105 if (is_daemon_in_service() < 0) {
106 /*
107 * By incrementing the per-operation attempt counter, we
108 * directly go into the timeout logic while waiting for
109 * the matching downcall to be read
110 */
111 gossip_debug(GOSSIP_WAIT_DEBUG,
112 "%s:client core is NOT in service(%d).\n",
113 __func__,
114 is_daemon_in_service());
115 op->attempts++;
116 }
117
118 /* queue up the operation */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500119 if (flags & ORANGEFS_OP_PRIORITY) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400120 add_priority_op_to_request_list(op);
121 } else {
122 gossip_debug(GOSSIP_WAIT_DEBUG,
123 "%s:About to call add_op_to_request_list().\n",
124 __func__);
125 add_op_to_request_list(op);
126 }
127
Yi Liu8bb8aef2015-11-24 15:12:14 -0500128 if (!(flags & ORANGEFS_OP_NO_SEMAPHORE))
Mike Marshall1182fca2015-07-17 10:38:15 -0400129 mutex_unlock(&request_mutex);
130
131 /*
132 * If we are asked to service an asynchronous operation from
133 * VFS perspective, we are done.
134 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500135 if (flags & ORANGEFS_OP_ASYNC)
Mike Marshall1182fca2015-07-17 10:38:15 -0400136 return 0;
137
Yi Liu8bb8aef2015-11-24 15:12:14 -0500138 if (flags & ORANGEFS_OP_CANCELLATION) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400139 gossip_debug(GOSSIP_WAIT_DEBUG,
140 "%s:"
141 "About to call wait_for_cancellation_downcall.\n",
142 __func__);
143 ret = wait_for_cancellation_downcall(op);
144 } else {
145 ret = wait_for_matching_downcall(op);
146 }
147
148 if (ret < 0) {
149 /* failed to get matching downcall */
150 if (ret == -ETIMEDOUT) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500151 gossip_err("orangefs: %s -- wait timed out; aborting attempt.\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400152 op_name);
153 }
154 op->downcall.status = ret;
155 } else {
156 /* got matching downcall; make sure status is in errno format */
157 op->downcall.status =
Yi Liu8bb8aef2015-11-24 15:12:14 -0500158 orangefs_normalize_to_errno(op->downcall.status);
Mike Marshall1182fca2015-07-17 10:38:15 -0400159 ret = op->downcall.status;
160 }
161
Yi Liu8bb8aef2015-11-24 15:12:14 -0500162 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
Mike Marshall8c3905a2015-09-29 12:07:46 -0400163 set_signals(&orig_sigset);
Mike Marshall1182fca2015-07-17 10:38:15 -0400164
165 BUG_ON(ret != op->downcall.status);
166 /* retry if operation has not been serviced and if requested */
167 if (!op_state_serviced(op) && op->downcall.status == -EAGAIN) {
168 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500169 "orangefs: tag %llu (%s)"
Mike Marshall1182fca2015-07-17 10:38:15 -0400170 " -- operation to be retried (%d attempt)\n",
171 llu(op->tag),
172 op_name,
173 op->attempts + 1);
174
175 if (!op->uses_shared_memory)
176 /*
177 * this operation doesn't use the shared memory
178 * system
179 */
180 goto retry_servicing;
181
182 /* op uses shared memory */
183 if (get_bufmap_init() == 0) {
184 /*
185 * This operation uses the shared memory system AND
186 * the system is not yet ready. This situation occurs
187 * when the client-core is restarted AND there were
188 * operations waiting to be processed or were already
189 * in process.
190 */
191 gossip_debug(GOSSIP_WAIT_DEBUG,
192 "uses_shared_memory is true.\n");
193 gossip_debug(GOSSIP_WAIT_DEBUG,
194 "Client core in-service status(%d).\n",
195 is_daemon_in_service());
196 gossip_debug(GOSSIP_WAIT_DEBUG, "bufmap_init:%d.\n",
197 get_bufmap_init());
198 gossip_debug(GOSSIP_WAIT_DEBUG,
199 "operation's status is 0x%0x.\n",
200 op->op_state);
201
202 /*
203 * let process sleep for a few seconds so shared
204 * memory system can be initialized.
205 */
206 spin_lock_irqsave(&op->lock, irqflags);
Mike Marshallce6c4142015-12-14 14:54:46 -0500207 prepare_to_wait(&orangefs_bufmap_init_waitq,
208 &wait_entry,
209 TASK_INTERRUPTIBLE);
Mike Marshall1182fca2015-07-17 10:38:15 -0400210 spin_unlock_irqrestore(&op->lock, irqflags);
211
Mike Marshall1182fca2015-07-17 10:38:15 -0400212 /*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500213 * Wait for orangefs_bufmap_initialize() to wake me up
Mike Marshall1182fca2015-07-17 10:38:15 -0400214 * within the allotted time.
215 */
216 ret = schedule_timeout(MSECS_TO_JIFFIES
Yi Liu8bb8aef2015-11-24 15:12:14 -0500217 (1000 * ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS));
Mike Marshall1182fca2015-07-17 10:38:15 -0400218
219 gossip_debug(GOSSIP_WAIT_DEBUG,
220 "Value returned from schedule_timeout:"
221 "%d.\n",
222 ret);
223 gossip_debug(GOSSIP_WAIT_DEBUG,
224 "Is shared memory available? (%d).\n",
225 get_bufmap_init());
226
227 spin_lock_irqsave(&op->lock, irqflags);
Mike Marshallce6c4142015-12-14 14:54:46 -0500228 finish_wait(&orangefs_bufmap_init_waitq, &wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400229 spin_unlock_irqrestore(&op->lock, irqflags);
230
231 if (get_bufmap_init() == 0) {
232 gossip_err("%s:The shared memory system has not started in %d seconds after the client core restarted. Aborting user's request(%s).\n",
233 __func__,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500234 ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS,
Mike Marshall1182fca2015-07-17 10:38:15 -0400235 get_opname_string(op));
236 return -EIO;
237 }
238
239 /*
240 * Return to the calling function and re-populate a
241 * shared memory buffer.
242 */
243 return -EAGAIN;
244 }
245 }
246
247 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500248 "orangefs: service_operation %s returning: %d for %p.\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400249 op_name,
250 ret,
251 op);
252 return ret;
253}
254
Yi Liu8bb8aef2015-11-24 15:12:14 -0500255void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
Mike Marshall1182fca2015-07-17 10:38:15 -0400256{
257 /*
258 * handle interrupted cases depending on what state we were in when
259 * the interruption is detected. there is a coarse grained lock
260 * across the operation.
261 *
262 * NOTE: be sure not to reverse lock ordering by locking an op lock
263 * while holding the request_list lock. Here, we first lock the op
264 * and then lock the appropriate list.
265 */
266 if (!op) {
267 gossip_debug(GOSSIP_WAIT_DEBUG,
268 "%s: op is null, ignoring\n",
269 __func__);
270 return;
271 }
272
273 /*
274 * one more sanity check, make sure it's in one of the possible states
275 * or don't try to cancel it
276 */
277 if (!(op_state_waiting(op) ||
278 op_state_in_progress(op) ||
279 op_state_serviced(op) ||
280 op_state_purged(op))) {
281 gossip_debug(GOSSIP_WAIT_DEBUG,
282 "%s: op %p not in a valid state (%0x), "
283 "ignoring\n",
284 __func__,
285 op,
286 op->op_state);
287 return;
288 }
289
290 spin_lock(&op->lock);
291
292 if (op_state_waiting(op)) {
293 /*
294 * upcall hasn't been read; remove op from upcall request
295 * list.
296 */
297 spin_unlock(&op->lock);
298 remove_op_from_request_list(op);
299 gossip_debug(GOSSIP_WAIT_DEBUG,
300 "Interrupted: Removed op %p from request_list\n",
301 op);
302 } else if (op_state_in_progress(op)) {
303 /* op must be removed from the in progress htable */
304 spin_unlock(&op->lock);
305 spin_lock(&htable_ops_in_progress_lock);
306 list_del(&op->list);
307 spin_unlock(&htable_ops_in_progress_lock);
308 gossip_debug(GOSSIP_WAIT_DEBUG,
309 "Interrupted: Removed op %p"
310 " from htable_ops_in_progress\n",
311 op);
312 } else if (!op_state_serviced(op)) {
313 spin_unlock(&op->lock);
314 gossip_err("interrupted operation is in a weird state 0x%x\n",
315 op->op_state);
Mike Marshall84d02152015-07-28 13:27:51 -0400316 } else {
317 /*
318 * It is not intended for execution to flow here,
319 * but having this unlock here makes sparse happy.
320 */
321 gossip_err("%s: can't get here.\n", __func__);
322 spin_unlock(&op->lock);
Mike Marshall1182fca2015-07-17 10:38:15 -0400323 }
324}
325
326/*
327 * sleeps on waitqueue waiting for matching downcall.
328 * if client-core finishes servicing, then we are good to go.
329 * else if client-core exits, we get woken up here, and retry with a timeout
330 *
331 * Post when this call returns to the caller, the specified op will no
332 * longer be on any list or htable.
333 *
334 * Returns 0 on success and -errno on failure
335 * Errors are:
336 * EAGAIN in case we want the caller to requeue and try again..
337 * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
338 * operation since client-core seems to be exiting too often
339 * or if we were interrupted.
340 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500341int wait_for_matching_downcall(struct orangefs_kernel_op_s *op)
Mike Marshall1182fca2015-07-17 10:38:15 -0400342{
343 int ret = -EINVAL;
Mike Marshallce6c4142015-12-14 14:54:46 -0500344 DEFINE_WAIT(wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400345
346 while (1) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400347 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500348 prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
Mike Marshall1182fca2015-07-17 10:38:15 -0400349 if (op_state_serviced(op)) {
350 spin_unlock(&op->lock);
351 ret = 0;
352 break;
353 }
354 spin_unlock(&op->lock);
355
356 if (!signal_pending(current)) {
357 /*
358 * if this was our first attempt and client-core
359 * has not purged our operation, we are happy to
360 * simply wait
361 */
362 spin_lock(&op->lock);
363 if (op->attempts == 0 && !op_state_purged(op)) {
364 spin_unlock(&op->lock);
365 schedule();
366 } else {
367 spin_unlock(&op->lock);
368 /*
369 * subsequent attempts, we retry exactly once
370 * with timeouts
371 */
372 if (!schedule_timeout(MSECS_TO_JIFFIES
373 (1000 * op_timeout_secs))) {
374 gossip_debug(GOSSIP_WAIT_DEBUG,
375 "*** %s:"
376 " operation timed out (tag"
377 " %llu, %p, att %d)\n",
378 __func__,
379 llu(op->tag),
380 op,
381 op->attempts);
382 ret = -ETIMEDOUT;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500383 orangefs_clean_up_interrupted_operation
Mike Marshall1182fca2015-07-17 10:38:15 -0400384 (op);
385 break;
386 }
387 }
388 spin_lock(&op->lock);
389 op->attempts++;
390 /*
391 * if the operation was purged in the meantime, it
392 * is better to requeue it afresh but ensure that
393 * we have not been purged repeatedly. This could
394 * happen if client-core crashes when an op
395 * is being serviced, so we requeue the op, client
396 * core crashes again so we requeue the op, client
397 * core starts, and so on...
398 */
399 if (op_state_purged(op)) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500400 ret = (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
Mike Marshall1182fca2015-07-17 10:38:15 -0400401 -EAGAIN :
402 -EIO;
403 spin_unlock(&op->lock);
404 gossip_debug(GOSSIP_WAIT_DEBUG,
405 "*** %s:"
406 " operation purged (tag "
407 "%llu, %p, att %d)\n",
408 __func__,
409 llu(op->tag),
410 op,
411 op->attempts);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500412 orangefs_clean_up_interrupted_operation(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400413 break;
414 }
415 spin_unlock(&op->lock);
416 continue;
417 }
418
419 gossip_debug(GOSSIP_WAIT_DEBUG,
420 "*** %s:"
421 " operation interrupted by a signal (tag "
422 "%llu, op %p)\n",
423 __func__,
424 llu(op->tag),
425 op);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500426 orangefs_clean_up_interrupted_operation(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400427 ret = -EINTR;
428 break;
429 }
430
Mike Marshall1182fca2015-07-17 10:38:15 -0400431 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500432 finish_wait(&op->waitq, &wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400433 spin_unlock(&op->lock);
434
435 return ret;
436}
437
438/*
439 * similar to wait_for_matching_downcall(), but used in the special case
440 * of I/O cancellations.
441 *
442 * Note we need a special wait function because if this is called we already
443 * know that a signal is pending in current and need to service the
444 * cancellation upcall anyway. the only way to exit this is to either
445 * timeout or have the cancellation be serviced properly.
446 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500447int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op)
Mike Marshall1182fca2015-07-17 10:38:15 -0400448{
449 int ret = -EINVAL;
Mike Marshallce6c4142015-12-14 14:54:46 -0500450 DEFINE_WAIT(wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400451
452 while (1) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400453 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500454 prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
Mike Marshall1182fca2015-07-17 10:38:15 -0400455 if (op_state_serviced(op)) {
456 gossip_debug(GOSSIP_WAIT_DEBUG,
457 "%s:op-state is SERVICED.\n",
458 __func__);
459 spin_unlock(&op->lock);
460 ret = 0;
461 break;
462 }
463 spin_unlock(&op->lock);
464
465 if (signal_pending(current)) {
466 gossip_debug(GOSSIP_WAIT_DEBUG,
467 "%s:operation interrupted by a signal (tag"
468 " %llu, op %p)\n",
469 __func__,
470 llu(op->tag),
471 op);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500472 orangefs_clean_up_interrupted_operation(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400473 ret = -EINTR;
474 break;
475 }
476
477 gossip_debug(GOSSIP_WAIT_DEBUG,
478 "%s:About to call schedule_timeout.\n",
479 __func__);
480 ret =
481 schedule_timeout(MSECS_TO_JIFFIES(1000 * op_timeout_secs));
482
483 gossip_debug(GOSSIP_WAIT_DEBUG,
484 "%s:Value returned from schedule_timeout(%d).\n",
485 __func__,
486 ret);
487 if (!ret) {
488 gossip_debug(GOSSIP_WAIT_DEBUG,
489 "%s:*** operation timed out: %p\n",
490 __func__,
491 op);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500492 orangefs_clean_up_interrupted_operation(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400493 ret = -ETIMEDOUT;
494 break;
495 }
496
497 gossip_debug(GOSSIP_WAIT_DEBUG,
498 "%s:Breaking out of loop, regardless of value returned by schedule_timeout.\n",
499 __func__);
500 ret = -ETIMEDOUT;
501 break;
502 }
503
Mike Marshall1182fca2015-07-17 10:38:15 -0400504 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500505 finish_wait(&op->waitq, &wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400506 spin_unlock(&op->lock);
507
508 gossip_debug(GOSSIP_WAIT_DEBUG,
509 "%s:returning ret(%d)\n",
510 __func__,
511 ret);
512
513 return ret;
514}