blob: bc86f16c2037fce1b99d287e59002a78293a0dd1 [file] [log] [blame]
Mike Marshall1182fca2015-07-17 10:38:15 -04001/*
2 * (C) 2001 Clemson University and The University of Chicago
3 * (C) 2011 Omnibond Systems
4 *
5 * Changes by Acxiom Corporation to implement generic service_operation()
6 * function, Copyright Acxiom Corporation, 2005.
7 *
8 * See COPYING in top-level directory.
9 */
10
11/*
12 * In-kernel waitqueue operations.
13 */
14
15#include "protocol.h"
Mike Marshall575e9462015-12-04 12:56:14 -050016#include "orangefs-kernel.h"
17#include "orangefs-bufmap.h"
Mike Marshall1182fca2015-07-17 10:38:15 -040018
19/*
20 * What we do in this function is to walk the list of operations that are
21 * present in the request queue and mark them as purged.
22 * NOTE: This is called from the device close after client-core has
23 * guaranteed that no new operations could appear on the list since the
24 * client-core is anyway going to exit.
25 */
26void purge_waiting_ops(void)
27{
Yi Liu8bb8aef2015-11-24 15:12:14 -050028 struct orangefs_kernel_op_s *op;
Mike Marshall1182fca2015-07-17 10:38:15 -040029
Yi Liu8bb8aef2015-11-24 15:12:14 -050030 spin_lock(&orangefs_request_list_lock);
31 list_for_each_entry(op, &orangefs_request_list, list) {
Mike Marshall1182fca2015-07-17 10:38:15 -040032 gossip_debug(GOSSIP_WAIT_DEBUG,
33 "pvfs2-client-core: purging op tag %llu %s\n",
34 llu(op->tag),
35 get_opname_string(op));
36 spin_lock(&op->lock);
37 set_op_state_purged(op);
38 spin_unlock(&op->lock);
39 wake_up_interruptible(&op->waitq);
40 }
Yi Liu8bb8aef2015-11-24 15:12:14 -050041 spin_unlock(&orangefs_request_list_lock);
Mike Marshall1182fca2015-07-17 10:38:15 -040042}
43
Al Virofc916da2016-01-19 12:26:13 -050044static inline void
45add_op_to_request_list(struct orangefs_kernel_op_s *op)
46{
47 spin_lock(&orangefs_request_list_lock);
48 spin_lock(&op->lock);
49 set_op_state_waiting(op);
50 list_add_tail(&op->list, &orangefs_request_list);
51 spin_unlock(&orangefs_request_list_lock);
52 spin_unlock(&op->lock);
53 wake_up_interruptible(&orangefs_request_list_waitq);
54}
55
56static inline
57void add_priority_op_to_request_list(struct orangefs_kernel_op_s *op)
58{
59 spin_lock(&orangefs_request_list_lock);
60 spin_lock(&op->lock);
61 set_op_state_waiting(op);
62
63 list_add(&op->list, &orangefs_request_list);
64 spin_unlock(&orangefs_request_list_lock);
65 spin_unlock(&op->lock);
66 wake_up_interruptible(&orangefs_request_list_waitq);
67}
68
Mike Marshall1182fca2015-07-17 10:38:15 -040069/*
Yi Liu8bb8aef2015-11-24 15:12:14 -050070 * submits a ORANGEFS operation and waits for it to complete
Mike Marshall1182fca2015-07-17 10:38:15 -040071 *
72 * Note op->downcall.status will contain the status of the operation (in
73 * errno format), whether provided by pvfs2-client or a result of failure to
74 * service the operation. If the caller wishes to distinguish, then
75 * op->state can be checked to see if it was serviced or not.
76 *
77 * Returns contents of op->downcall.status for convenience
78 */
Yi Liu8bb8aef2015-11-24 15:12:14 -050079int service_operation(struct orangefs_kernel_op_s *op,
Mike Marshall1182fca2015-07-17 10:38:15 -040080 const char *op_name,
81 int flags)
82{
83 /* flags to modify behavior */
84 sigset_t orig_sigset;
85 int ret = 0;
86
87 /* irqflags and wait_entry are only used IF the client-core aborts */
88 unsigned long irqflags;
89
Mike Marshallce6c4142015-12-14 14:54:46 -050090 DEFINE_WAIT(wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -040091
92 op->upcall.tgid = current->tgid;
93 op->upcall.pid = current->pid;
94
95retry_servicing:
96 op->downcall.status = 0;
97 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -050098 "orangefs: service_operation: %s %p\n",
Mike Marshall1182fca2015-07-17 10:38:15 -040099 op_name,
100 op);
101 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500102 "orangefs: operation posted by process: %s, pid: %i\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400103 current->comm,
104 current->pid);
105
106 /* mask out signals if this operation is not to be interrupted */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500107 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
Richard Weinbergerc146c0b2016-01-02 23:04:47 +0100108 orangefs_block_signals(&orig_sigset);
Mike Marshall1182fca2015-07-17 10:38:15 -0400109
Yi Liu8bb8aef2015-11-24 15:12:14 -0500110 if (!(flags & ORANGEFS_OP_NO_SEMAPHORE)) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400111 ret = mutex_lock_interruptible(&request_mutex);
112 /*
113 * check to see if we were interrupted while waiting for
114 * semaphore
115 */
116 if (ret < 0) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500117 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
Richard Weinbergerc146c0b2016-01-02 23:04:47 +0100118 orangefs_set_signals(&orig_sigset);
Mike Marshall1182fca2015-07-17 10:38:15 -0400119 op->downcall.status = ret;
120 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500121 "orangefs: service_operation interrupted.\n");
Mike Marshall1182fca2015-07-17 10:38:15 -0400122 return ret;
123 }
124 }
125
126 gossip_debug(GOSSIP_WAIT_DEBUG,
127 "%s:About to call is_daemon_in_service().\n",
128 __func__);
129
130 if (is_daemon_in_service() < 0) {
131 /*
132 * By incrementing the per-operation attempt counter, we
133 * directly go into the timeout logic while waiting for
134 * the matching downcall to be read
135 */
136 gossip_debug(GOSSIP_WAIT_DEBUG,
137 "%s:client core is NOT in service(%d).\n",
138 __func__,
139 is_daemon_in_service());
140 op->attempts++;
141 }
142
143 /* queue up the operation */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500144 if (flags & ORANGEFS_OP_PRIORITY) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400145 add_priority_op_to_request_list(op);
146 } else {
147 gossip_debug(GOSSIP_WAIT_DEBUG,
148 "%s:About to call add_op_to_request_list().\n",
149 __func__);
150 add_op_to_request_list(op);
151 }
152
Yi Liu8bb8aef2015-11-24 15:12:14 -0500153 if (!(flags & ORANGEFS_OP_NO_SEMAPHORE))
Mike Marshall1182fca2015-07-17 10:38:15 -0400154 mutex_unlock(&request_mutex);
155
156 /*
157 * If we are asked to service an asynchronous operation from
158 * VFS perspective, we are done.
159 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500160 if (flags & ORANGEFS_OP_ASYNC)
Mike Marshall1182fca2015-07-17 10:38:15 -0400161 return 0;
162
Yi Liu8bb8aef2015-11-24 15:12:14 -0500163 if (flags & ORANGEFS_OP_CANCELLATION) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400164 gossip_debug(GOSSIP_WAIT_DEBUG,
165 "%s:"
166 "About to call wait_for_cancellation_downcall.\n",
167 __func__);
168 ret = wait_for_cancellation_downcall(op);
169 } else {
170 ret = wait_for_matching_downcall(op);
171 }
172
173 if (ret < 0) {
174 /* failed to get matching downcall */
175 if (ret == -ETIMEDOUT) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500176 gossip_err("orangefs: %s -- wait timed out; aborting attempt.\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400177 op_name);
178 }
179 op->downcall.status = ret;
180 } else {
181 /* got matching downcall; make sure status is in errno format */
182 op->downcall.status =
Yi Liu8bb8aef2015-11-24 15:12:14 -0500183 orangefs_normalize_to_errno(op->downcall.status);
Mike Marshall1182fca2015-07-17 10:38:15 -0400184 ret = op->downcall.status;
185 }
186
Yi Liu8bb8aef2015-11-24 15:12:14 -0500187 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
Richard Weinbergerc146c0b2016-01-02 23:04:47 +0100188 orangefs_set_signals(&orig_sigset);
Mike Marshall1182fca2015-07-17 10:38:15 -0400189
190 BUG_ON(ret != op->downcall.status);
191 /* retry if operation has not been serviced and if requested */
192 if (!op_state_serviced(op) && op->downcall.status == -EAGAIN) {
193 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500194 "orangefs: tag %llu (%s)"
Mike Marshall1182fca2015-07-17 10:38:15 -0400195 " -- operation to be retried (%d attempt)\n",
196 llu(op->tag),
197 op_name,
198 op->attempts + 1);
199
200 if (!op->uses_shared_memory)
201 /*
202 * this operation doesn't use the shared memory
203 * system
204 */
205 goto retry_servicing;
206
207 /* op uses shared memory */
Martin Brandenburg7d221482016-01-04 15:05:28 -0500208 if (orangefs_get_bufmap_init() == 0) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400209 /*
210 * This operation uses the shared memory system AND
211 * the system is not yet ready. This situation occurs
212 * when the client-core is restarted AND there were
213 * operations waiting to be processed or were already
214 * in process.
215 */
216 gossip_debug(GOSSIP_WAIT_DEBUG,
217 "uses_shared_memory is true.\n");
218 gossip_debug(GOSSIP_WAIT_DEBUG,
219 "Client core in-service status(%d).\n",
220 is_daemon_in_service());
221 gossip_debug(GOSSIP_WAIT_DEBUG, "bufmap_init:%d.\n",
Martin Brandenburg7d221482016-01-04 15:05:28 -0500222 orangefs_get_bufmap_init());
Mike Marshall1182fca2015-07-17 10:38:15 -0400223 gossip_debug(GOSSIP_WAIT_DEBUG,
224 "operation's status is 0x%0x.\n",
225 op->op_state);
226
227 /*
228 * let process sleep for a few seconds so shared
229 * memory system can be initialized.
230 */
231 spin_lock_irqsave(&op->lock, irqflags);
Mike Marshallce6c4142015-12-14 14:54:46 -0500232 prepare_to_wait(&orangefs_bufmap_init_waitq,
233 &wait_entry,
234 TASK_INTERRUPTIBLE);
Mike Marshall1182fca2015-07-17 10:38:15 -0400235 spin_unlock_irqrestore(&op->lock, irqflags);
236
Mike Marshall1182fca2015-07-17 10:38:15 -0400237 /*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500238 * Wait for orangefs_bufmap_initialize() to wake me up
Mike Marshall1182fca2015-07-17 10:38:15 -0400239 * within the allotted time.
240 */
241 ret = schedule_timeout(MSECS_TO_JIFFIES
Yi Liu8bb8aef2015-11-24 15:12:14 -0500242 (1000 * ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS));
Mike Marshall1182fca2015-07-17 10:38:15 -0400243
244 gossip_debug(GOSSIP_WAIT_DEBUG,
245 "Value returned from schedule_timeout:"
246 "%d.\n",
247 ret);
248 gossip_debug(GOSSIP_WAIT_DEBUG,
249 "Is shared memory available? (%d).\n",
Martin Brandenburg7d221482016-01-04 15:05:28 -0500250 orangefs_get_bufmap_init());
Mike Marshall1182fca2015-07-17 10:38:15 -0400251
252 spin_lock_irqsave(&op->lock, irqflags);
Mike Marshallce6c4142015-12-14 14:54:46 -0500253 finish_wait(&orangefs_bufmap_init_waitq, &wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400254 spin_unlock_irqrestore(&op->lock, irqflags);
255
Martin Brandenburg7d221482016-01-04 15:05:28 -0500256 if (orangefs_get_bufmap_init() == 0) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400257 gossip_err("%s:The shared memory system has not started in %d seconds after the client core restarted. Aborting user's request(%s).\n",
258 __func__,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500259 ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS,
Mike Marshall1182fca2015-07-17 10:38:15 -0400260 get_opname_string(op));
261 return -EIO;
262 }
263
264 /*
265 * Return to the calling function and re-populate a
266 * shared memory buffer.
267 */
268 return -EAGAIN;
269 }
270 }
271
272 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500273 "orangefs: service_operation %s returning: %d for %p.\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400274 op_name,
275 ret,
276 op);
277 return ret;
278}
279
Al Virofc916da2016-01-19 12:26:13 -0500280static inline void remove_op_from_request_list(struct orangefs_kernel_op_s *op)
281{
282 struct list_head *tmp = NULL;
283 struct list_head *tmp_safe = NULL;
284 struct orangefs_kernel_op_s *tmp_op = NULL;
285
286 spin_lock(&orangefs_request_list_lock);
287 list_for_each_safe(tmp, tmp_safe, &orangefs_request_list) {
288 tmp_op = list_entry(tmp,
289 struct orangefs_kernel_op_s,
290 list);
291 if (tmp_op && (tmp_op == op)) {
292 list_del(&tmp_op->list);
293 break;
294 }
295 }
296 spin_unlock(&orangefs_request_list_lock);
297}
298
Al Viroe07db0a2016-01-21 22:21:41 -0500299static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
Mike Marshall1182fca2015-07-17 10:38:15 -0400300{
301 /*
302 * handle interrupted cases depending on what state we were in when
303 * the interruption is detected. there is a coarse grained lock
304 * across the operation.
305 *
306 * NOTE: be sure not to reverse lock ordering by locking an op lock
307 * while holding the request_list lock. Here, we first lock the op
308 * and then lock the appropriate list.
309 */
310 if (!op) {
311 gossip_debug(GOSSIP_WAIT_DEBUG,
312 "%s: op is null, ignoring\n",
313 __func__);
314 return;
315 }
316
317 /*
318 * one more sanity check, make sure it's in one of the possible states
319 * or don't try to cancel it
320 */
321 if (!(op_state_waiting(op) ||
322 op_state_in_progress(op) ||
323 op_state_serviced(op) ||
324 op_state_purged(op))) {
325 gossip_debug(GOSSIP_WAIT_DEBUG,
326 "%s: op %p not in a valid state (%0x), "
327 "ignoring\n",
328 __func__,
329 op,
330 op->op_state);
331 return;
332 }
333
334 spin_lock(&op->lock);
335
336 if (op_state_waiting(op)) {
337 /*
338 * upcall hasn't been read; remove op from upcall request
339 * list.
340 */
341 spin_unlock(&op->lock);
342 remove_op_from_request_list(op);
343 gossip_debug(GOSSIP_WAIT_DEBUG,
344 "Interrupted: Removed op %p from request_list\n",
345 op);
346 } else if (op_state_in_progress(op)) {
347 /* op must be removed from the in progress htable */
348 spin_unlock(&op->lock);
349 spin_lock(&htable_ops_in_progress_lock);
350 list_del(&op->list);
351 spin_unlock(&htable_ops_in_progress_lock);
352 gossip_debug(GOSSIP_WAIT_DEBUG,
353 "Interrupted: Removed op %p"
354 " from htable_ops_in_progress\n",
355 op);
356 } else if (!op_state_serviced(op)) {
357 spin_unlock(&op->lock);
358 gossip_err("interrupted operation is in a weird state 0x%x\n",
359 op->op_state);
Mike Marshall84d02152015-07-28 13:27:51 -0400360 } else {
361 /*
362 * It is not intended for execution to flow here,
363 * but having this unlock here makes sparse happy.
364 */
365 gossip_err("%s: can't get here.\n", __func__);
366 spin_unlock(&op->lock);
Mike Marshall1182fca2015-07-17 10:38:15 -0400367 }
368}
369
370/*
371 * sleeps on waitqueue waiting for matching downcall.
372 * if client-core finishes servicing, then we are good to go.
373 * else if client-core exits, we get woken up here, and retry with a timeout
374 *
375 * Post when this call returns to the caller, the specified op will no
376 * longer be on any list or htable.
377 *
378 * Returns 0 on success and -errno on failure
379 * Errors are:
380 * EAGAIN in case we want the caller to requeue and try again..
381 * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
382 * operation since client-core seems to be exiting too often
383 * or if we were interrupted.
384 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500385int wait_for_matching_downcall(struct orangefs_kernel_op_s *op)
Mike Marshall1182fca2015-07-17 10:38:15 -0400386{
387 int ret = -EINVAL;
Mike Marshallce6c4142015-12-14 14:54:46 -0500388 DEFINE_WAIT(wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400389
390 while (1) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400391 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500392 prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
Mike Marshall1182fca2015-07-17 10:38:15 -0400393 if (op_state_serviced(op)) {
394 spin_unlock(&op->lock);
395 ret = 0;
396 break;
397 }
398 spin_unlock(&op->lock);
399
400 if (!signal_pending(current)) {
401 /*
402 * if this was our first attempt and client-core
403 * has not purged our operation, we are happy to
404 * simply wait
405 */
406 spin_lock(&op->lock);
407 if (op->attempts == 0 && !op_state_purged(op)) {
408 spin_unlock(&op->lock);
409 schedule();
410 } else {
411 spin_unlock(&op->lock);
412 /*
413 * subsequent attempts, we retry exactly once
414 * with timeouts
415 */
416 if (!schedule_timeout(MSECS_TO_JIFFIES
417 (1000 * op_timeout_secs))) {
418 gossip_debug(GOSSIP_WAIT_DEBUG,
419 "*** %s:"
420 " operation timed out (tag"
421 " %llu, %p, att %d)\n",
422 __func__,
423 llu(op->tag),
424 op,
425 op->attempts);
426 ret = -ETIMEDOUT;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500427 orangefs_clean_up_interrupted_operation
Mike Marshall1182fca2015-07-17 10:38:15 -0400428 (op);
429 break;
430 }
431 }
432 spin_lock(&op->lock);
433 op->attempts++;
434 /*
435 * if the operation was purged in the meantime, it
436 * is better to requeue it afresh but ensure that
437 * we have not been purged repeatedly. This could
438 * happen if client-core crashes when an op
439 * is being serviced, so we requeue the op, client
440 * core crashes again so we requeue the op, client
441 * core starts, and so on...
442 */
443 if (op_state_purged(op)) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500444 ret = (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
Mike Marshall1182fca2015-07-17 10:38:15 -0400445 -EAGAIN :
446 -EIO;
447 spin_unlock(&op->lock);
448 gossip_debug(GOSSIP_WAIT_DEBUG,
449 "*** %s:"
450 " operation purged (tag "
451 "%llu, %p, att %d)\n",
452 __func__,
453 llu(op->tag),
454 op,
455 op->attempts);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500456 orangefs_clean_up_interrupted_operation(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400457 break;
458 }
459 spin_unlock(&op->lock);
460 continue;
461 }
462
463 gossip_debug(GOSSIP_WAIT_DEBUG,
464 "*** %s:"
465 " operation interrupted by a signal (tag "
466 "%llu, op %p)\n",
467 __func__,
468 llu(op->tag),
469 op);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500470 orangefs_clean_up_interrupted_operation(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400471 ret = -EINTR;
472 break;
473 }
474
Mike Marshall1182fca2015-07-17 10:38:15 -0400475 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500476 finish_wait(&op->waitq, &wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400477 spin_unlock(&op->lock);
478
479 return ret;
480}
481
482/*
483 * similar to wait_for_matching_downcall(), but used in the special case
484 * of I/O cancellations.
485 *
486 * Note we need a special wait function because if this is called we already
487 * know that a signal is pending in current and need to service the
488 * cancellation upcall anyway. the only way to exit this is to either
489 * timeout or have the cancellation be serviced properly.
490 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500491int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op)
Mike Marshall1182fca2015-07-17 10:38:15 -0400492{
493 int ret = -EINVAL;
Mike Marshallce6c4142015-12-14 14:54:46 -0500494 DEFINE_WAIT(wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400495
496 while (1) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400497 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500498 prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
Mike Marshall1182fca2015-07-17 10:38:15 -0400499 if (op_state_serviced(op)) {
500 gossip_debug(GOSSIP_WAIT_DEBUG,
501 "%s:op-state is SERVICED.\n",
502 __func__);
503 spin_unlock(&op->lock);
504 ret = 0;
505 break;
506 }
507 spin_unlock(&op->lock);
508
509 if (signal_pending(current)) {
510 gossip_debug(GOSSIP_WAIT_DEBUG,
511 "%s:operation interrupted by a signal (tag"
512 " %llu, op %p)\n",
513 __func__,
514 llu(op->tag),
515 op);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500516 orangefs_clean_up_interrupted_operation(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400517 ret = -EINTR;
518 break;
519 }
520
521 gossip_debug(GOSSIP_WAIT_DEBUG,
522 "%s:About to call schedule_timeout.\n",
523 __func__);
524 ret =
525 schedule_timeout(MSECS_TO_JIFFIES(1000 * op_timeout_secs));
526
527 gossip_debug(GOSSIP_WAIT_DEBUG,
528 "%s:Value returned from schedule_timeout(%d).\n",
529 __func__,
530 ret);
531 if (!ret) {
532 gossip_debug(GOSSIP_WAIT_DEBUG,
533 "%s:*** operation timed out: %p\n",
534 __func__,
535 op);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500536 orangefs_clean_up_interrupted_operation(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400537 ret = -ETIMEDOUT;
538 break;
539 }
540
541 gossip_debug(GOSSIP_WAIT_DEBUG,
542 "%s:Breaking out of loop, regardless of value returned by schedule_timeout.\n",
543 __func__);
544 ret = -ETIMEDOUT;
545 break;
546 }
547
Mike Marshall1182fca2015-07-17 10:38:15 -0400548 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500549 finish_wait(&op->waitq, &wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400550 spin_unlock(&op->lock);
551
552 gossip_debug(GOSSIP_WAIT_DEBUG,
553 "%s:returning ret(%d)\n",
554 __func__,
555 ret);
556
557 return ret;
558}