orangefs: clean up op_alloc()
[linux-2.6-block.git] / fs / orangefs / waitqueue.c
CommitLineData
1182fca3
MM
1/*
2 * (C) 2001 Clemson University and The University of Chicago
3 * (C) 2011 Omnibond Systems
4 *
5 * Changes by Acxiom Corporation to implement generic service_operation()
6 * function, Copyright Acxiom Corporation, 2005.
7 *
8 * See COPYING in top-level directory.
9 */
10
11/*
12 * In-kernel waitqueue operations.
13 */
14
15#include "protocol.h"
575e9461
MM
16#include "orangefs-kernel.h"
17#include "orangefs-bufmap.h"
1182fca3 18
ade3d781
AV
19static int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *);
20static int wait_for_matching_downcall(struct orangefs_kernel_op_s *);
21
1182fca3
MM
22/*
23 * What we do in this function is to walk the list of operations that are
24 * present in the request queue and mark them as purged.
25 * NOTE: This is called from the device close after client-core has
26 * guaranteed that no new operations could appear on the list since the
27 * client-core is anyway going to exit.
28 */
29void purge_waiting_ops(void)
30{
8bb8aefd 31 struct orangefs_kernel_op_s *op;
1182fca3 32
8bb8aefd
YL
33 spin_lock(&orangefs_request_list_lock);
34 list_for_each_entry(op, &orangefs_request_list, list) {
1182fca3
MM
35 gossip_debug(GOSSIP_WAIT_DEBUG,
36 "pvfs2-client-core: purging op tag %llu %s\n",
37 llu(op->tag),
38 get_opname_string(op));
39 spin_lock(&op->lock);
40 set_op_state_purged(op);
41 spin_unlock(&op->lock);
1182fca3 42 }
8bb8aefd 43 spin_unlock(&orangefs_request_list_lock);
1182fca3
MM
44}
45
fc916da5
AV
46static inline void
47add_op_to_request_list(struct orangefs_kernel_op_s *op)
48{
49 spin_lock(&orangefs_request_list_lock);
50 spin_lock(&op->lock);
51 set_op_state_waiting(op);
52 list_add_tail(&op->list, &orangefs_request_list);
53 spin_unlock(&orangefs_request_list_lock);
54 spin_unlock(&op->lock);
55 wake_up_interruptible(&orangefs_request_list_waitq);
56}
57
58static inline
59void add_priority_op_to_request_list(struct orangefs_kernel_op_s *op)
60{
61 spin_lock(&orangefs_request_list_lock);
62 spin_lock(&op->lock);
63 set_op_state_waiting(op);
64
65 list_add(&op->list, &orangefs_request_list);
66 spin_unlock(&orangefs_request_list_lock);
67 spin_unlock(&op->lock);
68 wake_up_interruptible(&orangefs_request_list_waitq);
69}
70
1182fca3 71/*
8bb8aefd 72 * submits a ORANGEFS operation and waits for it to complete
1182fca3
MM
73 *
74 * Note op->downcall.status will contain the status of the operation (in
75 * errno format), whether provided by pvfs2-client or a result of failure to
76 * service the operation. If the caller wishes to distinguish, then
77 * op->state can be checked to see if it was serviced or not.
78 *
79 * Returns contents of op->downcall.status for convenience
80 */
8bb8aefd 81int service_operation(struct orangefs_kernel_op_s *op,
1182fca3
MM
82 const char *op_name,
83 int flags)
84{
85 /* flags to modify behavior */
86 sigset_t orig_sigset;
87 int ret = 0;
88
ce6c414e 89 DEFINE_WAIT(wait_entry);
1182fca3
MM
90
91 op->upcall.tgid = current->tgid;
92 op->upcall.pid = current->pid;
93
94retry_servicing:
95 op->downcall.status = 0;
96 gossip_debug(GOSSIP_WAIT_DEBUG,
8bb8aefd 97 "orangefs: service_operation: %s %p\n",
1182fca3
MM
98 op_name,
99 op);
100 gossip_debug(GOSSIP_WAIT_DEBUG,
8bb8aefd 101 "orangefs: operation posted by process: %s, pid: %i\n",
1182fca3
MM
102 current->comm,
103 current->pid);
104
105 /* mask out signals if this operation is not to be interrupted */
8bb8aefd 106 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
c146c0b8 107 orangefs_block_signals(&orig_sigset);
1182fca3 108
8bb8aefd 109 if (!(flags & ORANGEFS_OP_NO_SEMAPHORE)) {
1182fca3
MM
110 ret = mutex_lock_interruptible(&request_mutex);
111 /*
112 * check to see if we were interrupted while waiting for
113 * semaphore
114 */
115 if (ret < 0) {
8bb8aefd 116 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
c146c0b8 117 orangefs_set_signals(&orig_sigset);
1182fca3
MM
118 op->downcall.status = ret;
119 gossip_debug(GOSSIP_WAIT_DEBUG,
8bb8aefd 120 "orangefs: service_operation interrupted.\n");
1182fca3
MM
121 return ret;
122 }
123 }
124
125 gossip_debug(GOSSIP_WAIT_DEBUG,
126 "%s:About to call is_daemon_in_service().\n",
127 __func__);
128
129 if (is_daemon_in_service() < 0) {
130 /*
131 * By incrementing the per-operation attempt counter, we
132 * directly go into the timeout logic while waiting for
133 * the matching downcall to be read
134 */
135 gossip_debug(GOSSIP_WAIT_DEBUG,
136 "%s:client core is NOT in service(%d).\n",
137 __func__,
138 is_daemon_in_service());
139 op->attempts++;
140 }
141
142 /* queue up the operation */
8bb8aefd 143 if (flags & ORANGEFS_OP_PRIORITY) {
1182fca3
MM
144 add_priority_op_to_request_list(op);
145 } else {
146 gossip_debug(GOSSIP_WAIT_DEBUG,
147 "%s:About to call add_op_to_request_list().\n",
148 __func__);
149 add_op_to_request_list(op);
150 }
151
8bb8aefd 152 if (!(flags & ORANGEFS_OP_NO_SEMAPHORE))
1182fca3
MM
153 mutex_unlock(&request_mutex);
154
155 /*
156 * If we are asked to service an asynchronous operation from
157 * VFS perspective, we are done.
158 */
8bb8aefd 159 if (flags & ORANGEFS_OP_ASYNC)
1182fca3
MM
160 return 0;
161
8bb8aefd 162 if (flags & ORANGEFS_OP_CANCELLATION) {
1182fca3
MM
163 gossip_debug(GOSSIP_WAIT_DEBUG,
164 "%s:"
165 "About to call wait_for_cancellation_downcall.\n",
166 __func__);
167 ret = wait_for_cancellation_downcall(op);
168 } else {
169 ret = wait_for_matching_downcall(op);
170 }
171
172 if (ret < 0) {
173 /* failed to get matching downcall */
174 if (ret == -ETIMEDOUT) {
8bb8aefd 175 gossip_err("orangefs: %s -- wait timed out; aborting attempt.\n",
1182fca3
MM
176 op_name);
177 }
178 op->downcall.status = ret;
179 } else {
180 /* got matching downcall; make sure status is in errno format */
181 op->downcall.status =
8bb8aefd 182 orangefs_normalize_to_errno(op->downcall.status);
1182fca3
MM
183 ret = op->downcall.status;
184 }
185
8bb8aefd 186 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
c146c0b8 187 orangefs_set_signals(&orig_sigset);
1182fca3
MM
188
189 BUG_ON(ret != op->downcall.status);
190 /* retry if operation has not been serviced and if requested */
191 if (!op_state_serviced(op) && op->downcall.status == -EAGAIN) {
192 gossip_debug(GOSSIP_WAIT_DEBUG,
8bb8aefd 193 "orangefs: tag %llu (%s)"
1182fca3
MM
194 " -- operation to be retried (%d attempt)\n",
195 llu(op->tag),
196 op_name,
197 op->attempts + 1);
198
199 if (!op->uses_shared_memory)
200 /*
201 * this operation doesn't use the shared memory
202 * system
203 */
204 goto retry_servicing;
205
206 /* op uses shared memory */
7d221485 207 if (orangefs_get_bufmap_init() == 0) {
1182fca3
MM
208 /*
209 * This operation uses the shared memory system AND
210 * the system is not yet ready. This situation occurs
211 * when the client-core is restarted AND there were
212 * operations waiting to be processed or were already
213 * in process.
214 */
215 gossip_debug(GOSSIP_WAIT_DEBUG,
216 "uses_shared_memory is true.\n");
217 gossip_debug(GOSSIP_WAIT_DEBUG,
218 "Client core in-service status(%d).\n",
219 is_daemon_in_service());
220 gossip_debug(GOSSIP_WAIT_DEBUG, "bufmap_init:%d.\n",
7d221485 221 orangefs_get_bufmap_init());
1182fca3
MM
222 gossip_debug(GOSSIP_WAIT_DEBUG,
223 "operation's status is 0x%0x.\n",
224 op->op_state);
225
226 /*
227 * let process sleep for a few seconds so shared
228 * memory system can be initialized.
229 */
ce6c414e
MM
230 prepare_to_wait(&orangefs_bufmap_init_waitq,
231 &wait_entry,
232 TASK_INTERRUPTIBLE);
1182fca3 233
1182fca3 234 /*
8bb8aefd 235 * Wait for orangefs_bufmap_initialize() to wake me up
1182fca3
MM
236 * within the allotted time.
237 */
727cbfea
AV
238 ret = schedule_timeout(
239 ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ);
1182fca3
MM
240
241 gossip_debug(GOSSIP_WAIT_DEBUG,
242 "Value returned from schedule_timeout:"
243 "%d.\n",
244 ret);
245 gossip_debug(GOSSIP_WAIT_DEBUG,
246 "Is shared memory available? (%d).\n",
7d221485 247 orangefs_get_bufmap_init());
1182fca3 248
ce6c414e 249 finish_wait(&orangefs_bufmap_init_waitq, &wait_entry);
1182fca3 250
7d221485 251 if (orangefs_get_bufmap_init() == 0) {
1182fca3
MM
252 gossip_err("%s:The shared memory system has not started in %d seconds after the client core restarted. Aborting user's request(%s).\n",
253 __func__,
8bb8aefd 254 ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS,
1182fca3
MM
255 get_opname_string(op));
256 return -EIO;
257 }
258
259 /*
260 * Return to the calling function and re-populate a
261 * shared memory buffer.
262 */
263 return -EAGAIN;
264 }
265 }
266
267 gossip_debug(GOSSIP_WAIT_DEBUG,
8bb8aefd 268 "orangefs: service_operation %s returning: %d for %p.\n",
1182fca3
MM
269 op_name,
270 ret,
271 op);
272 return ret;
273}
274
e07db0a2 275static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
1182fca3
MM
276{
277 /*
278 * handle interrupted cases depending on what state we were in when
279 * the interruption is detected. there is a coarse grained lock
280 * across the operation.
281 *
eab9b389 282 * Called with op->lock held.
1182fca3 283 */
ed42fe05 284 op->op_state |= OP_VFS_STATE_GIVEN_UP;
1182fca3
MM
285
286 if (op_state_waiting(op)) {
287 /*
288 * upcall hasn't been read; remove op from upcall request
289 * list.
290 */
291 spin_unlock(&op->lock);
ed42fe05
AV
292 spin_lock(&orangefs_request_list_lock);
293 list_del(&op->list);
294 spin_unlock(&orangefs_request_list_lock);
1182fca3
MM
295 gossip_debug(GOSSIP_WAIT_DEBUG,
296 "Interrupted: Removed op %p from request_list\n",
297 op);
298 } else if (op_state_in_progress(op)) {
299 /* op must be removed from the in progress htable */
300 spin_unlock(&op->lock);
301 spin_lock(&htable_ops_in_progress_lock);
302 list_del(&op->list);
303 spin_unlock(&htable_ops_in_progress_lock);
304 gossip_debug(GOSSIP_WAIT_DEBUG,
305 "Interrupted: Removed op %p"
306 " from htable_ops_in_progress\n",
307 op);
308 } else if (!op_state_serviced(op)) {
309 spin_unlock(&op->lock);
310 gossip_err("interrupted operation is in a weird state 0x%x\n",
311 op->op_state);
84d02150
MM
312 } else {
313 /*
314 * It is not intended for execution to flow here,
315 * but having this unlock here makes sparse happy.
316 */
317 gossip_err("%s: can't get here.\n", __func__);
318 spin_unlock(&op->lock);
1182fca3
MM
319 }
320}
321
322/*
323 * sleeps on waitqueue waiting for matching downcall.
324 * if client-core finishes servicing, then we are good to go.
325 * else if client-core exits, we get woken up here, and retry with a timeout
326 *
327 * Post when this call returns to the caller, the specified op will no
328 * longer be on any list or htable.
329 *
330 * Returns 0 on success and -errno on failure
331 * Errors are:
332 * EAGAIN in case we want the caller to requeue and try again..
333 * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
334 * operation since client-core seems to be exiting too often
335 * or if we were interrupted.
336 */
b7ae37b0 337static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op)
1182fca3
MM
338{
339 int ret = -EINVAL;
ce6c414e 340 DEFINE_WAIT(wait_entry);
1182fca3
MM
341
342 while (1) {
1182fca3 343 spin_lock(&op->lock);
ce6c414e 344 prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
1182fca3
MM
345 if (op_state_serviced(op)) {
346 spin_unlock(&op->lock);
347 ret = 0;
348 break;
349 }
1182fca3 350
70c6ea26
AV
351 if (unlikely(signal_pending(current))) {
352 gossip_debug(GOSSIP_WAIT_DEBUG,
353 "*** %s:"
354 " operation interrupted by a signal (tag "
355 "%llu, op %p)\n",
356 __func__,
357 llu(op->tag),
358 op);
359 orangefs_clean_up_interrupted_operation(op);
360 ret = -EINTR;
361 break;
362 }
363
364 /*
365 * if this was our first attempt and client-core
366 * has not purged our operation, we are happy to
367 * simply wait
368 */
70c6ea26
AV
369 if (op->attempts == 0 && !op_state_purged(op)) {
370 spin_unlock(&op->lock);
371 schedule();
372 } else {
373 spin_unlock(&op->lock);
1182fca3 374 /*
70c6ea26
AV
375 * subsequent attempts, we retry exactly once
376 * with timeouts
1182fca3 377 */
727cbfea 378 if (!schedule_timeout(op_timeout_secs * HZ)) {
1182fca3
MM
379 gossip_debug(GOSSIP_WAIT_DEBUG,
380 "*** %s:"
70c6ea26
AV
381 " operation timed out (tag"
382 " %llu, %p, att %d)\n",
1182fca3
MM
383 __func__,
384 llu(op->tag),
385 op,
386 op->attempts);
70c6ea26 387 ret = -ETIMEDOUT;
eab9b389 388 spin_lock(&op->lock);
8bb8aefd 389 orangefs_clean_up_interrupted_operation(op);
1182fca3
MM
390 break;
391 }
70c6ea26
AV
392 }
393 spin_lock(&op->lock);
394 op->attempts++;
395 /*
396 * if the operation was purged in the meantime, it
397 * is better to requeue it afresh but ensure that
398 * we have not been purged repeatedly. This could
399 * happen if client-core crashes when an op
400 * is being serviced, so we requeue the op, client
401 * core crashes again so we requeue the op, client
402 * core starts, and so on...
403 */
404 if (op_state_purged(op)) {
405 ret = (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
406 -EAGAIN :
407 -EIO;
70c6ea26
AV
408 gossip_debug(GOSSIP_WAIT_DEBUG,
409 "*** %s:"
410 " operation purged (tag "
411 "%llu, %p, att %d)\n",
412 __func__,
413 llu(op->tag),
414 op,
415 op->attempts);
416 orangefs_clean_up_interrupted_operation(op);
417 break;
1182fca3 418 }
70c6ea26 419 spin_unlock(&op->lock);
1182fca3
MM
420 }
421
1182fca3 422 spin_lock(&op->lock);
ce6c414e 423 finish_wait(&op->waitq, &wait_entry);
1182fca3
MM
424 spin_unlock(&op->lock);
425
426 return ret;
427}
428
429/*
430 * similar to wait_for_matching_downcall(), but used in the special case
431 * of I/O cancellations.
432 *
433 * Note we need a special wait function because if this is called we already
434 * know that a signal is pending in current and need to service the
435 * cancellation upcall anyway. the only way to exit this is to either
436 * timeout or have the cancellation be serviced properly.
437 */
b7ae37b0 438static int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op)
1182fca3
MM
439{
440 int ret = -EINVAL;
ce6c414e 441 DEFINE_WAIT(wait_entry);
1182fca3
MM
442
443 while (1) {
1182fca3 444 spin_lock(&op->lock);
ce6c414e 445 prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
1182fca3
MM
446 if (op_state_serviced(op)) {
447 gossip_debug(GOSSIP_WAIT_DEBUG,
448 "%s:op-state is SERVICED.\n",
449 __func__);
450 spin_unlock(&op->lock);
451 ret = 0;
452 break;
453 }
1182fca3
MM
454
455 if (signal_pending(current)) {
456 gossip_debug(GOSSIP_WAIT_DEBUG,
457 "%s:operation interrupted by a signal (tag"
458 " %llu, op %p)\n",
459 __func__,
460 llu(op->tag),
461 op);
8bb8aefd 462 orangefs_clean_up_interrupted_operation(op);
1182fca3
MM
463 ret = -EINTR;
464 break;
465 }
466
467 gossip_debug(GOSSIP_WAIT_DEBUG,
468 "%s:About to call schedule_timeout.\n",
469 __func__);
eab9b389 470 spin_unlock(&op->lock);
727cbfea 471 ret = schedule_timeout(op_timeout_secs * HZ);
1182fca3
MM
472
473 gossip_debug(GOSSIP_WAIT_DEBUG,
474 "%s:Value returned from schedule_timeout(%d).\n",
475 __func__,
476 ret);
477 if (!ret) {
478 gossip_debug(GOSSIP_WAIT_DEBUG,
479 "%s:*** operation timed out: %p\n",
480 __func__,
481 op);
eab9b389 482 spin_lock(&op->lock);
8bb8aefd 483 orangefs_clean_up_interrupted_operation(op);
1182fca3
MM
484 ret = -ETIMEDOUT;
485 break;
486 }
487
488 gossip_debug(GOSSIP_WAIT_DEBUG,
489 "%s:Breaking out of loop, regardless of value returned by schedule_timeout.\n",
490 __func__);
491 ret = -ETIMEDOUT;
492 break;
493 }
494
1182fca3 495 spin_lock(&op->lock);
ce6c414e 496 finish_wait(&op->waitq, &wait_entry);
1182fca3
MM
497 spin_unlock(&op->lock);
498
499 gossip_debug(GOSSIP_WAIT_DEBUG,
500 "%s:returning ret(%d)\n",
501 __func__,
502 ret);
503
504 return ret;
505}