Commit | Line | Data |
---|---|---|
1182fca3 MM |
1 | /* |
2 | * (C) 2001 Clemson University and The University of Chicago | |
3 | * (C) 2011 Omnibond Systems | |
4 | * | |
5 | * Changes by Acxiom Corporation to implement generic service_operation() | |
6 | * function, Copyright Acxiom Corporation, 2005. | |
7 | * | |
8 | * See COPYING in top-level directory. | |
9 | */ | |
10 | ||
11 | /* | |
12 | * In-kernel waitqueue operations. | |
13 | */ | |
14 | ||
15 | #include "protocol.h" | |
575e9461 MM |
16 | #include "orangefs-kernel.h" |
17 | #include "orangefs-bufmap.h" | |
1182fca3 MM |
18 | |
19 | /* | |
20 | * What we do in this function is to walk the list of operations that are | |
21 | * present in the request queue and mark them as purged. | |
22 | * NOTE: This is called from the device close after client-core has | |
23 | * guaranteed that no new operations could appear on the list since the | |
24 | * client-core is anyway going to exit. | |
25 | */ | |
26 | void purge_waiting_ops(void) | |
27 | { | |
8bb8aefd | 28 | struct orangefs_kernel_op_s *op; |
1182fca3 | 29 | |
8bb8aefd YL |
30 | spin_lock(&orangefs_request_list_lock); |
31 | list_for_each_entry(op, &orangefs_request_list, list) { | |
1182fca3 MM |
32 | gossip_debug(GOSSIP_WAIT_DEBUG, |
33 | "pvfs2-client-core: purging op tag %llu %s\n", | |
34 | llu(op->tag), | |
35 | get_opname_string(op)); | |
36 | spin_lock(&op->lock); | |
37 | set_op_state_purged(op); | |
38 | spin_unlock(&op->lock); | |
39 | wake_up_interruptible(&op->waitq); | |
40 | } | |
8bb8aefd | 41 | spin_unlock(&orangefs_request_list_lock); |
1182fca3 MM |
42 | } |
43 | ||
44 | /* | |
8bb8aefd | 45 | * submits a ORANGEFS operation and waits for it to complete |
1182fca3 MM |
46 | * |
47 | * Note op->downcall.status will contain the status of the operation (in | |
48 | * errno format), whether provided by pvfs2-client or a result of failure to | |
49 | * service the operation. If the caller wishes to distinguish, then | |
50 | * op->state can be checked to see if it was serviced or not. | |
51 | * | |
52 | * Returns contents of op->downcall.status for convenience | |
53 | */ | |
8bb8aefd | 54 | int service_operation(struct orangefs_kernel_op_s *op, |
1182fca3 MM |
55 | const char *op_name, |
56 | int flags) | |
57 | { | |
58 | /* flags to modify behavior */ | |
59 | sigset_t orig_sigset; | |
60 | int ret = 0; | |
61 | ||
62 | /* irqflags and wait_entry are only used IF the client-core aborts */ | |
63 | unsigned long irqflags; | |
64 | ||
ce6c414e | 65 | DEFINE_WAIT(wait_entry); |
1182fca3 MM |
66 | |
67 | op->upcall.tgid = current->tgid; | |
68 | op->upcall.pid = current->pid; | |
69 | ||
70 | retry_servicing: | |
71 | op->downcall.status = 0; | |
72 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
8bb8aefd | 73 | "orangefs: service_operation: %s %p\n", |
1182fca3 MM |
74 | op_name, |
75 | op); | |
76 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
8bb8aefd | 77 | "orangefs: operation posted by process: %s, pid: %i\n", |
1182fca3 MM |
78 | current->comm, |
79 | current->pid); | |
80 | ||
81 | /* mask out signals if this operation is not to be interrupted */ | |
8bb8aefd | 82 | if (!(flags & ORANGEFS_OP_INTERRUPTIBLE)) |
c146c0b8 | 83 | orangefs_block_signals(&orig_sigset); |
1182fca3 | 84 | |
8bb8aefd | 85 | if (!(flags & ORANGEFS_OP_NO_SEMAPHORE)) { |
1182fca3 MM |
86 | ret = mutex_lock_interruptible(&request_mutex); |
87 | /* | |
88 | * check to see if we were interrupted while waiting for | |
89 | * semaphore | |
90 | */ | |
91 | if (ret < 0) { | |
8bb8aefd | 92 | if (!(flags & ORANGEFS_OP_INTERRUPTIBLE)) |
c146c0b8 | 93 | orangefs_set_signals(&orig_sigset); |
1182fca3 MM |
94 | op->downcall.status = ret; |
95 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
8bb8aefd | 96 | "orangefs: service_operation interrupted.\n"); |
1182fca3 MM |
97 | return ret; |
98 | } | |
99 | } | |
100 | ||
101 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
102 | "%s:About to call is_daemon_in_service().\n", | |
103 | __func__); | |
104 | ||
105 | if (is_daemon_in_service() < 0) { | |
106 | /* | |
107 | * By incrementing the per-operation attempt counter, we | |
108 | * directly go into the timeout logic while waiting for | |
109 | * the matching downcall to be read | |
110 | */ | |
111 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
112 | "%s:client core is NOT in service(%d).\n", | |
113 | __func__, | |
114 | is_daemon_in_service()); | |
115 | op->attempts++; | |
116 | } | |
117 | ||
118 | /* queue up the operation */ | |
8bb8aefd | 119 | if (flags & ORANGEFS_OP_PRIORITY) { |
1182fca3 MM |
120 | add_priority_op_to_request_list(op); |
121 | } else { | |
122 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
123 | "%s:About to call add_op_to_request_list().\n", | |
124 | __func__); | |
125 | add_op_to_request_list(op); | |
126 | } | |
127 | ||
8bb8aefd | 128 | if (!(flags & ORANGEFS_OP_NO_SEMAPHORE)) |
1182fca3 MM |
129 | mutex_unlock(&request_mutex); |
130 | ||
131 | /* | |
132 | * If we are asked to service an asynchronous operation from | |
133 | * VFS perspective, we are done. | |
134 | */ | |
8bb8aefd | 135 | if (flags & ORANGEFS_OP_ASYNC) |
1182fca3 MM |
136 | return 0; |
137 | ||
8bb8aefd | 138 | if (flags & ORANGEFS_OP_CANCELLATION) { |
1182fca3 MM |
139 | gossip_debug(GOSSIP_WAIT_DEBUG, |
140 | "%s:" | |
141 | "About to call wait_for_cancellation_downcall.\n", | |
142 | __func__); | |
143 | ret = wait_for_cancellation_downcall(op); | |
144 | } else { | |
145 | ret = wait_for_matching_downcall(op); | |
146 | } | |
147 | ||
148 | if (ret < 0) { | |
149 | /* failed to get matching downcall */ | |
150 | if (ret == -ETIMEDOUT) { | |
8bb8aefd | 151 | gossip_err("orangefs: %s -- wait timed out; aborting attempt.\n", |
1182fca3 MM |
152 | op_name); |
153 | } | |
154 | op->downcall.status = ret; | |
155 | } else { | |
156 | /* got matching downcall; make sure status is in errno format */ | |
157 | op->downcall.status = | |
8bb8aefd | 158 | orangefs_normalize_to_errno(op->downcall.status); |
1182fca3 MM |
159 | ret = op->downcall.status; |
160 | } | |
161 | ||
8bb8aefd | 162 | if (!(flags & ORANGEFS_OP_INTERRUPTIBLE)) |
c146c0b8 | 163 | orangefs_set_signals(&orig_sigset); |
1182fca3 MM |
164 | |
165 | BUG_ON(ret != op->downcall.status); | |
166 | /* retry if operation has not been serviced and if requested */ | |
167 | if (!op_state_serviced(op) && op->downcall.status == -EAGAIN) { | |
168 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
8bb8aefd | 169 | "orangefs: tag %llu (%s)" |
1182fca3 MM |
170 | " -- operation to be retried (%d attempt)\n", |
171 | llu(op->tag), | |
172 | op_name, | |
173 | op->attempts + 1); | |
174 | ||
175 | if (!op->uses_shared_memory) | |
176 | /* | |
177 | * this operation doesn't use the shared memory | |
178 | * system | |
179 | */ | |
180 | goto retry_servicing; | |
181 | ||
182 | /* op uses shared memory */ | |
7d221485 | 183 | if (orangefs_get_bufmap_init() == 0) { |
1182fca3 MM |
184 | /* |
185 | * This operation uses the shared memory system AND | |
186 | * the system is not yet ready. This situation occurs | |
187 | * when the client-core is restarted AND there were | |
188 | * operations waiting to be processed or were already | |
189 | * in process. | |
190 | */ | |
191 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
192 | "uses_shared_memory is true.\n"); | |
193 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
194 | "Client core in-service status(%d).\n", | |
195 | is_daemon_in_service()); | |
196 | gossip_debug(GOSSIP_WAIT_DEBUG, "bufmap_init:%d.\n", | |
7d221485 | 197 | orangefs_get_bufmap_init()); |
1182fca3 MM |
198 | gossip_debug(GOSSIP_WAIT_DEBUG, |
199 | "operation's status is 0x%0x.\n", | |
200 | op->op_state); | |
201 | ||
202 | /* | |
203 | * let process sleep for a few seconds so shared | |
204 | * memory system can be initialized. | |
205 | */ | |
206 | spin_lock_irqsave(&op->lock, irqflags); | |
ce6c414e MM |
207 | prepare_to_wait(&orangefs_bufmap_init_waitq, |
208 | &wait_entry, | |
209 | TASK_INTERRUPTIBLE); | |
1182fca3 MM |
210 | spin_unlock_irqrestore(&op->lock, irqflags); |
211 | ||
1182fca3 | 212 | /* |
8bb8aefd | 213 | * Wait for orangefs_bufmap_initialize() to wake me up |
1182fca3 MM |
214 | * within the allotted time. |
215 | */ | |
216 | ret = schedule_timeout(MSECS_TO_JIFFIES | |
8bb8aefd | 217 | (1000 * ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS)); |
1182fca3 MM |
218 | |
219 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
220 | "Value returned from schedule_timeout:" | |
221 | "%d.\n", | |
222 | ret); | |
223 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
224 | "Is shared memory available? (%d).\n", | |
7d221485 | 225 | orangefs_get_bufmap_init()); |
1182fca3 MM |
226 | |
227 | spin_lock_irqsave(&op->lock, irqflags); | |
ce6c414e | 228 | finish_wait(&orangefs_bufmap_init_waitq, &wait_entry); |
1182fca3 MM |
229 | spin_unlock_irqrestore(&op->lock, irqflags); |
230 | ||
7d221485 | 231 | if (orangefs_get_bufmap_init() == 0) { |
1182fca3 MM |
232 | gossip_err("%s:The shared memory system has not started in %d seconds after the client core restarted. Aborting user's request(%s).\n", |
233 | __func__, | |
8bb8aefd | 234 | ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS, |
1182fca3 MM |
235 | get_opname_string(op)); |
236 | return -EIO; | |
237 | } | |
238 | ||
239 | /* | |
240 | * Return to the calling function and re-populate a | |
241 | * shared memory buffer. | |
242 | */ | |
243 | return -EAGAIN; | |
244 | } | |
245 | } | |
246 | ||
247 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
8bb8aefd | 248 | "orangefs: service_operation %s returning: %d for %p.\n", |
1182fca3 MM |
249 | op_name, |
250 | ret, | |
251 | op); | |
252 | return ret; | |
253 | } | |
254 | ||
8bb8aefd | 255 | void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op) |
1182fca3 MM |
256 | { |
257 | /* | |
258 | * handle interrupted cases depending on what state we were in when | |
259 | * the interruption is detected. there is a coarse grained lock | |
260 | * across the operation. | |
261 | * | |
262 | * NOTE: be sure not to reverse lock ordering by locking an op lock | |
263 | * while holding the request_list lock. Here, we first lock the op | |
264 | * and then lock the appropriate list. | |
265 | */ | |
266 | if (!op) { | |
267 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
268 | "%s: op is null, ignoring\n", | |
269 | __func__); | |
270 | return; | |
271 | } | |
272 | ||
273 | /* | |
274 | * one more sanity check, make sure it's in one of the possible states | |
275 | * or don't try to cancel it | |
276 | */ | |
277 | if (!(op_state_waiting(op) || | |
278 | op_state_in_progress(op) || | |
279 | op_state_serviced(op) || | |
280 | op_state_purged(op))) { | |
281 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
282 | "%s: op %p not in a valid state (%0x), " | |
283 | "ignoring\n", | |
284 | __func__, | |
285 | op, | |
286 | op->op_state); | |
287 | return; | |
288 | } | |
289 | ||
290 | spin_lock(&op->lock); | |
291 | ||
292 | if (op_state_waiting(op)) { | |
293 | /* | |
294 | * upcall hasn't been read; remove op from upcall request | |
295 | * list. | |
296 | */ | |
297 | spin_unlock(&op->lock); | |
298 | remove_op_from_request_list(op); | |
299 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
300 | "Interrupted: Removed op %p from request_list\n", | |
301 | op); | |
302 | } else if (op_state_in_progress(op)) { | |
303 | /* op must be removed from the in progress htable */ | |
304 | spin_unlock(&op->lock); | |
305 | spin_lock(&htable_ops_in_progress_lock); | |
306 | list_del(&op->list); | |
307 | spin_unlock(&htable_ops_in_progress_lock); | |
308 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
309 | "Interrupted: Removed op %p" | |
310 | " from htable_ops_in_progress\n", | |
311 | op); | |
312 | } else if (!op_state_serviced(op)) { | |
313 | spin_unlock(&op->lock); | |
314 | gossip_err("interrupted operation is in a weird state 0x%x\n", | |
315 | op->op_state); | |
84d02150 MM |
316 | } else { |
317 | /* | |
318 | * It is not intended for execution to flow here, | |
319 | * but having this unlock here makes sparse happy. | |
320 | */ | |
321 | gossip_err("%s: can't get here.\n", __func__); | |
322 | spin_unlock(&op->lock); | |
1182fca3 MM |
323 | } |
324 | } | |
325 | ||
326 | /* | |
327 | * sleeps on waitqueue waiting for matching downcall. | |
328 | * if client-core finishes servicing, then we are good to go. | |
329 | * else if client-core exits, we get woken up here, and retry with a timeout | |
330 | * | |
331 | * Post when this call returns to the caller, the specified op will no | |
332 | * longer be on any list or htable. | |
333 | * | |
334 | * Returns 0 on success and -errno on failure | |
335 | * Errors are: | |
336 | * EAGAIN in case we want the caller to requeue and try again.. | |
337 | * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this | |
338 | * operation since client-core seems to be exiting too often | |
339 | * or if we were interrupted. | |
340 | */ | |
8bb8aefd | 341 | int wait_for_matching_downcall(struct orangefs_kernel_op_s *op) |
1182fca3 MM |
342 | { |
343 | int ret = -EINVAL; | |
ce6c414e | 344 | DEFINE_WAIT(wait_entry); |
1182fca3 MM |
345 | |
346 | while (1) { | |
1182fca3 | 347 | spin_lock(&op->lock); |
ce6c414e | 348 | prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE); |
1182fca3 MM |
349 | if (op_state_serviced(op)) { |
350 | spin_unlock(&op->lock); | |
351 | ret = 0; | |
352 | break; | |
353 | } | |
354 | spin_unlock(&op->lock); | |
355 | ||
356 | if (!signal_pending(current)) { | |
357 | /* | |
358 | * if this was our first attempt and client-core | |
359 | * has not purged our operation, we are happy to | |
360 | * simply wait | |
361 | */ | |
362 | spin_lock(&op->lock); | |
363 | if (op->attempts == 0 && !op_state_purged(op)) { | |
364 | spin_unlock(&op->lock); | |
365 | schedule(); | |
366 | } else { | |
367 | spin_unlock(&op->lock); | |
368 | /* | |
369 | * subsequent attempts, we retry exactly once | |
370 | * with timeouts | |
371 | */ | |
372 | if (!schedule_timeout(MSECS_TO_JIFFIES | |
373 | (1000 * op_timeout_secs))) { | |
374 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
375 | "*** %s:" | |
376 | " operation timed out (tag" | |
377 | " %llu, %p, att %d)\n", | |
378 | __func__, | |
379 | llu(op->tag), | |
380 | op, | |
381 | op->attempts); | |
382 | ret = -ETIMEDOUT; | |
8bb8aefd | 383 | orangefs_clean_up_interrupted_operation |
1182fca3 MM |
384 | (op); |
385 | break; | |
386 | } | |
387 | } | |
388 | spin_lock(&op->lock); | |
389 | op->attempts++; | |
390 | /* | |
391 | * if the operation was purged in the meantime, it | |
392 | * is better to requeue it afresh but ensure that | |
393 | * we have not been purged repeatedly. This could | |
394 | * happen if client-core crashes when an op | |
395 | * is being serviced, so we requeue the op, client | |
396 | * core crashes again so we requeue the op, client | |
397 | * core starts, and so on... | |
398 | */ | |
399 | if (op_state_purged(op)) { | |
8bb8aefd | 400 | ret = (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ? |
1182fca3 MM |
401 | -EAGAIN : |
402 | -EIO; | |
403 | spin_unlock(&op->lock); | |
404 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
405 | "*** %s:" | |
406 | " operation purged (tag " | |
407 | "%llu, %p, att %d)\n", | |
408 | __func__, | |
409 | llu(op->tag), | |
410 | op, | |
411 | op->attempts); | |
8bb8aefd | 412 | orangefs_clean_up_interrupted_operation(op); |
1182fca3 MM |
413 | break; |
414 | } | |
415 | spin_unlock(&op->lock); | |
416 | continue; | |
417 | } | |
418 | ||
419 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
420 | "*** %s:" | |
421 | " operation interrupted by a signal (tag " | |
422 | "%llu, op %p)\n", | |
423 | __func__, | |
424 | llu(op->tag), | |
425 | op); | |
8bb8aefd | 426 | orangefs_clean_up_interrupted_operation(op); |
1182fca3 MM |
427 | ret = -EINTR; |
428 | break; | |
429 | } | |
430 | ||
1182fca3 | 431 | spin_lock(&op->lock); |
ce6c414e | 432 | finish_wait(&op->waitq, &wait_entry); |
1182fca3 MM |
433 | spin_unlock(&op->lock); |
434 | ||
435 | return ret; | |
436 | } | |
437 | ||
438 | /* | |
439 | * similar to wait_for_matching_downcall(), but used in the special case | |
440 | * of I/O cancellations. | |
441 | * | |
442 | * Note we need a special wait function because if this is called we already | |
443 | * know that a signal is pending in current and need to service the | |
444 | * cancellation upcall anyway. the only way to exit this is to either | |
445 | * timeout or have the cancellation be serviced properly. | |
446 | */ | |
8bb8aefd | 447 | int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op) |
1182fca3 MM |
448 | { |
449 | int ret = -EINVAL; | |
ce6c414e | 450 | DEFINE_WAIT(wait_entry); |
1182fca3 MM |
451 | |
452 | while (1) { | |
1182fca3 | 453 | spin_lock(&op->lock); |
ce6c414e | 454 | prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE); |
1182fca3 MM |
455 | if (op_state_serviced(op)) { |
456 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
457 | "%s:op-state is SERVICED.\n", | |
458 | __func__); | |
459 | spin_unlock(&op->lock); | |
460 | ret = 0; | |
461 | break; | |
462 | } | |
463 | spin_unlock(&op->lock); | |
464 | ||
465 | if (signal_pending(current)) { | |
466 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
467 | "%s:operation interrupted by a signal (tag" | |
468 | " %llu, op %p)\n", | |
469 | __func__, | |
470 | llu(op->tag), | |
471 | op); | |
8bb8aefd | 472 | orangefs_clean_up_interrupted_operation(op); |
1182fca3 MM |
473 | ret = -EINTR; |
474 | break; | |
475 | } | |
476 | ||
477 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
478 | "%s:About to call schedule_timeout.\n", | |
479 | __func__); | |
480 | ret = | |
481 | schedule_timeout(MSECS_TO_JIFFIES(1000 * op_timeout_secs)); | |
482 | ||
483 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
484 | "%s:Value returned from schedule_timeout(%d).\n", | |
485 | __func__, | |
486 | ret); | |
487 | if (!ret) { | |
488 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
489 | "%s:*** operation timed out: %p\n", | |
490 | __func__, | |
491 | op); | |
8bb8aefd | 492 | orangefs_clean_up_interrupted_operation(op); |
1182fca3 MM |
493 | ret = -ETIMEDOUT; |
494 | break; | |
495 | } | |
496 | ||
497 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
498 | "%s:Breaking out of loop, regardless of value returned by schedule_timeout.\n", | |
499 | __func__); | |
500 | ret = -ETIMEDOUT; | |
501 | break; | |
502 | } | |
503 | ||
1182fca3 | 504 | spin_lock(&op->lock); |
ce6c414e | 505 | finish_wait(&op->waitq, &wait_entry); |
1182fca3 MM |
506 | spin_unlock(&op->lock); |
507 | ||
508 | gossip_debug(GOSSIP_WAIT_DEBUG, | |
509 | "%s:returning ret(%d)\n", | |
510 | __func__, | |
511 | ret); | |
512 | ||
513 | return ret; | |
514 | } |