scsi_transport_iscsi: return -EOVERFLOW for Too many iscsi targets
[linux-2.6-block.git] / drivers / scsi / scsi_lib.c
CommitLineData
1da177e4
LT
1/*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
d3f46f39 11#include <linux/bitops.h>
1da177e4
LT
12#include <linux/blkdev.h>
13#include <linux/completion.h>
14#include <linux/kernel.h>
15#include <linux/mempool.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
faead26d 20#include <linux/hardirq.h>
c6132da1 21#include <linux/scatterlist.h>
1da177e4
LT
22
23#include <scsi/scsi.h>
beb40487 24#include <scsi/scsi_cmnd.h>
1da177e4
LT
25#include <scsi/scsi_dbg.h>
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_driver.h>
28#include <scsi/scsi_eh.h>
29#include <scsi/scsi_host.h>
1da177e4
LT
30
31#include "scsi_priv.h"
32#include "scsi_logging.h"
33
34
6391a113 35#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
5972511b 36#define SG_MEMPOOL_SIZE 2
1da177e4
LT
37
38struct scsi_host_sg_pool {
39 size_t size;
a8474ce2 40 char *name;
e18b890b 41 struct kmem_cache *slab;
1da177e4
LT
42 mempool_t *pool;
43};
44
d3f46f39
JB
45#define SP(x) { x, "sgpool-" __stringify(x) }
46#if (SCSI_MAX_SG_SEGMENTS < 32)
47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48#endif
52c1da39 49static struct scsi_host_sg_pool scsi_sg_pools[] = {
1da177e4
LT
50 SP(8),
51 SP(16),
fd820f40 52#if (SCSI_MAX_SG_SEGMENTS > 32)
d3f46f39 53 SP(32),
fd820f40 54#if (SCSI_MAX_SG_SEGMENTS > 64)
d3f46f39
JB
55 SP(64),
56#if (SCSI_MAX_SG_SEGMENTS > 128)
1da177e4 57 SP(128),
d3f46f39
JB
58#if (SCSI_MAX_SG_SEGMENTS > 256)
59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
fd820f40
FT
60#endif
61#endif
62#endif
d3f46f39
JB
63#endif
64 SP(SCSI_MAX_SG_SEGMENTS)
a8474ce2 65};
1da177e4
LT
66#undef SP
67
7027ad72 68struct kmem_cache *scsi_sdb_cache;
6f9a35e2 69
a1bf9d1d 70static void scsi_run_queue(struct request_queue *q);
e91442b6
JB
71
72/*
73 * Function: scsi_unprep_request()
74 *
75 * Purpose: Remove all preparation done for a request, including its
76 * associated scsi_cmnd, so that it can be requeued.
77 *
78 * Arguments: req - request to unprepare
79 *
80 * Lock status: Assumed that no locks are held upon entry.
81 *
82 * Returns: Nothing.
83 */
84static void scsi_unprep_request(struct request *req)
85{
86 struct scsi_cmnd *cmd = req->special;
87
4aff5e23 88 req->cmd_flags &= ~REQ_DONTPREP;
beb40487 89 req->special = NULL;
e91442b6 90
e91442b6
JB
91 scsi_put_command(cmd);
92}
a1bf9d1d 93
4f5299ac
JB
94/**
95 * __scsi_queue_insert - private queue insertion
96 * @cmd: The SCSI command being requeued
97 * @reason: The reason for the requeue
98 * @unbusy: Whether the queue should be unbusied
1da177e4 99 *
4f5299ac
JB
100 * This is a private queue insertion. The public interface
101 * scsi_queue_insert() always assumes the queue should be unbusied
102 * because it's always called before the completion. This function is
103 * for a requeue after completion, which should only occur in this
104 * file.
1da177e4 105 */
4f5299ac 106static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
1da177e4
LT
107{
108 struct Scsi_Host *host = cmd->device->host;
109 struct scsi_device *device = cmd->device;
f0c0a376 110 struct scsi_target *starget = scsi_target(device);
a1bf9d1d
TH
111 struct request_queue *q = device->request_queue;
112 unsigned long flags;
1da177e4
LT
113
114 SCSI_LOG_MLQUEUE(1,
115 printk("Inserting command %p into mlqueue\n", cmd));
116
117 /*
d8c37e7b 118 * Set the appropriate busy bit for the device/host.
1da177e4
LT
119 *
120 * If the host/device isn't busy, assume that something actually
121 * completed, and that we should be able to queue a command now.
122 *
123 * Note that the prior mid-layer assumption that any host could
124 * always queue at least one command is now broken. The mid-layer
125 * will implement a user specifiable stall (see
126 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
127 * if a command is requeued with no other commands outstanding
128 * either for the device or for the host.
129 */
f0c0a376
MC
130 switch (reason) {
131 case SCSI_MLQUEUE_HOST_BUSY:
1da177e4 132 host->host_blocked = host->max_host_blocked;
f0c0a376
MC
133 break;
134 case SCSI_MLQUEUE_DEVICE_BUSY:
1da177e4 135 device->device_blocked = device->max_device_blocked;
f0c0a376
MC
136 break;
137 case SCSI_MLQUEUE_TARGET_BUSY:
138 starget->target_blocked = starget->max_target_blocked;
139 break;
140 }
1da177e4 141
1da177e4
LT
142 /*
143 * Decrement the counters, since these commands are no longer
144 * active on the host/device.
145 */
4f5299ac
JB
146 if (unbusy)
147 scsi_device_unbusy(device);
1da177e4
LT
148
149 /*
a1bf9d1d
TH
150 * Requeue this command. It will go before all other commands
151 * that are already in the queue.
1da177e4
LT
152 *
153 * NOTE: there is magic here about the way the queue is plugged if
154 * we have no outstanding commands.
155 *
a1bf9d1d 156 * Although we *don't* plug the queue, we call the request
1da177e4
LT
157 * function. The SCSI request function detects the blocked condition
158 * and plugs the queue appropriately.
a1bf9d1d
TH
159 */
160 spin_lock_irqsave(q->queue_lock, flags);
59897dad 161 blk_requeue_request(q, cmd->request);
a1bf9d1d
TH
162 spin_unlock_irqrestore(q->queue_lock, flags);
163
164 scsi_run_queue(q);
165
1da177e4
LT
166 return 0;
167}
168
4f5299ac
JB
169/*
170 * Function: scsi_queue_insert()
171 *
172 * Purpose: Insert a command in the midlevel queue.
173 *
174 * Arguments: cmd - command that we are adding to queue.
175 * reason - why we are inserting command to queue.
176 *
177 * Lock status: Assumed that lock is not held upon entry.
178 *
179 * Returns: Nothing.
180 *
181 * Notes: We do this for one of two cases. Either the host is busy
182 * and it cannot accept any more commands for the time being,
183 * or the device returned QUEUE_FULL and can accept no more
184 * commands.
185 * Notes: This could be called either from an interrupt context or a
186 * normal process context.
187 */
188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189{
190 return __scsi_queue_insert(cmd, reason, 1);
191}
39216033 192/**
33aa687d 193 * scsi_execute - insert request and wait for the result
39216033
JB
194 * @sdev: scsi device
195 * @cmd: scsi command
196 * @data_direction: data direction
197 * @buffer: data buffer
198 * @bufflen: len of buffer
199 * @sense: optional sense buffer
200 * @timeout: request timeout in seconds
201 * @retries: number of times to retry request
33aa687d 202 * @flags: or into request flags;
f4f4e47e 203 * @resid: optional residual length
39216033 204 *
59c51591 205 * returns the req->errors value which is the scsi_cmnd result
ea73a9f2 206 * field.
eb44820c 207 */
33aa687d
JB
208int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
209 int data_direction, void *buffer, unsigned bufflen,
f4f4e47e
FT
210 unsigned char *sense, int timeout, int retries, int flags,
211 int *resid)
39216033
JB
212{
213 struct request *req;
214 int write = (data_direction == DMA_TO_DEVICE);
215 int ret = DRIVER_ERROR << 24;
216
217 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
218
219 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
220 buffer, bufflen, __GFP_WAIT))
221 goto out;
222
223 req->cmd_len = COMMAND_SIZE(cmd[0]);
224 memcpy(req->cmd, cmd, req->cmd_len);
225 req->sense = sense;
226 req->sense_len = 0;
17e01f21 227 req->retries = retries;
39216033 228 req->timeout = timeout;
4aff5e23
JA
229 req->cmd_type = REQ_TYPE_BLOCK_PC;
230 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
39216033
JB
231
232 /*
233 * head injection *required* here otherwise quiesce won't work
234 */
235 blk_execute_rq(req->q, NULL, req, 1);
236
bdb2b8ca
AS
237 /*
238 * Some devices (USB mass-storage in particular) may transfer
239 * garbage data together with a residue indicating that the data
240 * is invalid. Prevent the garbage from being misinterpreted
241 * and prevent security leaks by zeroing out the excess data.
242 */
c3a4d78c
TH
243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
bdb2b8ca 245
f4f4e47e 246 if (resid)
c3a4d78c 247 *resid = req->resid_len;
39216033
JB
248 ret = req->errors;
249 out:
250 blk_put_request(req);
251
252 return ret;
253}
33aa687d 254EXPORT_SYMBOL(scsi_execute);
39216033 255
ea73a9f2
JB
256
257int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
258 int data_direction, void *buffer, unsigned bufflen,
f4f4e47e
FT
259 struct scsi_sense_hdr *sshdr, int timeout, int retries,
260 int *resid)
ea73a9f2
JB
261{
262 char *sense = NULL;
1ccb48bb 263 int result;
264
ea73a9f2 265 if (sshdr) {
24669f75 266 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
ea73a9f2
JB
267 if (!sense)
268 return DRIVER_ERROR << 24;
ea73a9f2 269 }
1ccb48bb 270 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
f4f4e47e 271 sense, timeout, retries, 0, resid);
ea73a9f2 272 if (sshdr)
e514385b 273 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
ea73a9f2
JB
274
275 kfree(sense);
276 return result;
277}
278EXPORT_SYMBOL(scsi_execute_req);
279
1da177e4
LT
280/*
281 * Function: scsi_init_cmd_errh()
282 *
283 * Purpose: Initialize cmd fields related to error handling.
284 *
285 * Arguments: cmd - command that is ready to be queued.
286 *
1da177e4
LT
287 * Notes: This function has the job of initializing a number of
288 * fields related to error handling. Typically this will
289 * be called once for each command, as required.
290 */
631c228c 291static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
1da177e4 292{
1da177e4 293 cmd->serial_number = 0;
30b0c37b 294 scsi_set_resid(cmd, 0);
b80ca4f7 295 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4 296 if (cmd->cmd_len == 0)
db4742dd 297 cmd->cmd_len = scsi_command_size(cmd->cmnd);
1da177e4
LT
298}
299
300void scsi_device_unbusy(struct scsi_device *sdev)
301{
302 struct Scsi_Host *shost = sdev->host;
f0c0a376 303 struct scsi_target *starget = scsi_target(sdev);
1da177e4
LT
304 unsigned long flags;
305
306 spin_lock_irqsave(shost->host_lock, flags);
307 shost->host_busy--;
f0c0a376 308 starget->target_busy--;
939647ee 309 if (unlikely(scsi_host_in_recovery(shost) &&
ee7863bc 310 (shost->host_failed || shost->host_eh_scheduled)))
1da177e4
LT
311 scsi_eh_wakeup(shost);
312 spin_unlock(shost->host_lock);
152587de 313 spin_lock(sdev->request_queue->queue_lock);
1da177e4 314 sdev->device_busy--;
152587de 315 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
1da177e4
LT
316}
317
318/*
319 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
320 * and call blk_run_queue for all the scsi_devices on the target -
321 * including current_sdev first.
322 *
323 * Called with *no* scsi locks held.
324 */
325static void scsi_single_lun_run(struct scsi_device *current_sdev)
326{
327 struct Scsi_Host *shost = current_sdev->host;
328 struct scsi_device *sdev, *tmp;
329 struct scsi_target *starget = scsi_target(current_sdev);
330 unsigned long flags;
331
332 spin_lock_irqsave(shost->host_lock, flags);
333 starget->starget_sdev_user = NULL;
334 spin_unlock_irqrestore(shost->host_lock, flags);
335
336 /*
337 * Call blk_run_queue for all LUNs on the target, starting with
338 * current_sdev. We race with others (to set starget_sdev_user),
339 * but in most cases, we will be first. Ideally, each LU on the
340 * target would get some limited time or requests on the target.
341 */
342 blk_run_queue(current_sdev->request_queue);
343
344 spin_lock_irqsave(shost->host_lock, flags);
345 if (starget->starget_sdev_user)
346 goto out;
347 list_for_each_entry_safe(sdev, tmp, &starget->devices,
348 same_target_siblings) {
349 if (sdev == current_sdev)
350 continue;
351 if (scsi_device_get(sdev))
352 continue;
353
354 spin_unlock_irqrestore(shost->host_lock, flags);
355 blk_run_queue(sdev->request_queue);
356 spin_lock_irqsave(shost->host_lock, flags);
357
358 scsi_device_put(sdev);
359 }
360 out:
361 spin_unlock_irqrestore(shost->host_lock, flags);
362}
363
9d112517
KU
364static inline int scsi_device_is_busy(struct scsi_device *sdev)
365{
366 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
367 return 1;
368
369 return 0;
370}
371
f0c0a376
MC
372static inline int scsi_target_is_busy(struct scsi_target *starget)
373{
374 return ((starget->can_queue > 0 &&
375 starget->target_busy >= starget->can_queue) ||
376 starget->target_blocked);
377}
378
9d112517
KU
379static inline int scsi_host_is_busy(struct Scsi_Host *shost)
380{
381 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
382 shost->host_blocked || shost->host_self_blocked)
383 return 1;
384
385 return 0;
386}
387
1da177e4
LT
388/*
389 * Function: scsi_run_queue()
390 *
391 * Purpose: Select a proper request queue to serve next
392 *
393 * Arguments: q - last request's queue
394 *
395 * Returns: Nothing
396 *
397 * Notes: The previous command was completely finished, start
398 * a new one if possible.
399 */
400static void scsi_run_queue(struct request_queue *q)
401{
2a3a59e5 402 struct scsi_device *sdev = q->queuedata;
1da177e4 403 struct Scsi_Host *shost = sdev->host;
2a3a59e5 404 LIST_HEAD(starved_list);
1da177e4
LT
405 unsigned long flags;
406
25d7c363 407 if (scsi_target(sdev)->single_lun)
1da177e4
LT
408 scsi_single_lun_run(sdev);
409
410 spin_lock_irqsave(shost->host_lock, flags);
2a3a59e5
MC
411 list_splice_init(&shost->starved_list, &starved_list);
412
413 while (!list_empty(&starved_list)) {
75ad23bc
NP
414 int flagset;
415
1da177e4
LT
416 /*
417 * As long as shost is accepting commands and we have
418 * starved queues, call blk_run_queue. scsi_request_fn
419 * drops the queue_lock and can add us back to the
420 * starved_list.
421 *
422 * host_lock protects the starved_list and starved_entry.
423 * scsi_request_fn must get the host_lock before checking
424 * or modifying starved_list or starved_entry.
425 */
2a3a59e5 426 if (scsi_host_is_busy(shost))
f0c0a376 427 break;
f0c0a376 428
2a3a59e5
MC
429 sdev = list_entry(starved_list.next,
430 struct scsi_device, starved_entry);
431 list_del_init(&sdev->starved_entry);
f0c0a376
MC
432 if (scsi_target_is_busy(scsi_target(sdev))) {
433 list_move_tail(&sdev->starved_entry,
434 &shost->starved_list);
435 continue;
436 }
437
75ad23bc
NP
438 spin_unlock(shost->host_lock);
439
440 spin_lock(sdev->request_queue->queue_lock);
441 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
442 !test_bit(QUEUE_FLAG_REENTER,
443 &sdev->request_queue->queue_flags);
444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue);
447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock);
04846f25 450
75ad23bc 451 spin_lock(shost->host_lock);
1da177e4 452 }
2a3a59e5
MC
453 /* put any unprocessed entries back */
454 list_splice(&starved_list, &shost->starved_list);
1da177e4
LT
455 spin_unlock_irqrestore(shost->host_lock, flags);
456
457 blk_run_queue(q);
458}
459
460/*
461 * Function: scsi_requeue_command()
462 *
463 * Purpose: Handle post-processing of completed commands.
464 *
465 * Arguments: q - queue to operate on
466 * cmd - command that may need to be requeued.
467 *
468 * Returns: Nothing
469 *
470 * Notes: After command completion, there may be blocks left
471 * over which weren't finished by the previous command
472 * this can be for a number of reasons - the main one is
473 * I/O errors in the middle of the request, in which case
474 * we need to request the blocks that come after the bad
475 * sector.
e91442b6 476 * Notes: Upon return, cmd is a stale pointer.
1da177e4
LT
477 */
478static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
479{
e91442b6 480 struct request *req = cmd->request;
283369cc
TH
481 unsigned long flags;
482
283369cc 483 spin_lock_irqsave(q->queue_lock, flags);
02bd3499 484 scsi_unprep_request(req);
e91442b6 485 blk_requeue_request(q, req);
283369cc 486 spin_unlock_irqrestore(q->queue_lock, flags);
1da177e4
LT
487
488 scsi_run_queue(q);
489}
490
491void scsi_next_command(struct scsi_cmnd *cmd)
492{
49d7bc64
LT
493 struct scsi_device *sdev = cmd->device;
494 struct request_queue *q = sdev->request_queue;
495
496 /* need to hold a reference on the device before we let go of the cmd */
497 get_device(&sdev->sdev_gendev);
1da177e4
LT
498
499 scsi_put_command(cmd);
500 scsi_run_queue(q);
49d7bc64
LT
501
502 /* ok to remove device now */
503 put_device(&sdev->sdev_gendev);
1da177e4
LT
504}
505
506void scsi_run_host_queues(struct Scsi_Host *shost)
507{
508 struct scsi_device *sdev;
509
510 shost_for_each_device(sdev, shost)
511 scsi_run_queue(sdev->request_queue);
512}
513
79ed2429
JB
514static void __scsi_release_buffers(struct scsi_cmnd *, int);
515
1da177e4
LT
516/*
517 * Function: scsi_end_request()
518 *
519 * Purpose: Post-processing of completed commands (usually invoked at end
520 * of upper level post-processing and scsi_io_completion).
521 *
522 * Arguments: cmd - command that is complete.
610d8b0c 523 * error - 0 if I/O indicates success, < 0 for I/O error.
1da177e4
LT
524 * bytes - number of bytes of completed I/O
525 * requeue - indicates whether we should requeue leftovers.
526 *
527 * Lock status: Assumed that lock is not held upon entry.
528 *
e91442b6 529 * Returns: cmd if requeue required, NULL otherwise.
1da177e4
LT
530 *
531 * Notes: This is called for block device requests in order to
532 * mark some number of sectors as complete.
533 *
534 * We are guaranteeing that the request queue will be goosed
535 * at some point during this call.
e91442b6 536 * Notes: If cmd was requeued, upon return it will be a stale pointer.
1da177e4 537 */
610d8b0c 538static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
1da177e4
LT
539 int bytes, int requeue)
540{
165125e1 541 struct request_queue *q = cmd->device->request_queue;
1da177e4 542 struct request *req = cmd->request;
1da177e4
LT
543
544 /*
545 * If there are blocks left over at the end, set up the command
546 * to queue the remainder of them.
547 */
610d8b0c 548 if (blk_end_request(req, error, bytes)) {
1da177e4 549 /* kill remainder if no retrys */
4a27446f 550 if (error && scsi_noretry_cmd(cmd))
e458824f 551 blk_end_request_all(req, error);
1da177e4 552 else {
e91442b6 553 if (requeue) {
1da177e4
LT
554 /*
555 * Bleah. Leftovers again. Stick the
556 * leftovers in the front of the
557 * queue, and goose the queue again.
558 */
79ed2429 559 scsi_release_buffers(cmd);
1da177e4 560 scsi_requeue_command(q, cmd);
e91442b6
JB
561 cmd = NULL;
562 }
1da177e4
LT
563 return cmd;
564 }
565 }
566
1da177e4
LT
567 /*
568 * This will goose the queue request function at the end, so we don't
569 * need to worry about launching another command.
570 */
79ed2429 571 __scsi_release_buffers(cmd, 0);
1da177e4
LT
572 scsi_next_command(cmd);
573 return NULL;
574}
575
a8474ce2
JA
576static inline unsigned int scsi_sgtable_index(unsigned short nents)
577{
578 unsigned int index;
579
d3f46f39
JB
580 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
581
582 if (nents <= 8)
a8474ce2 583 index = 0;
d3f46f39
JB
584 else
585 index = get_count_order(nents) - 3;
1da177e4 586
a8474ce2
JA
587 return index;
588}
589
5ed7959e 590static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
a8474ce2
JA
591{
592 struct scsi_host_sg_pool *sgp;
a8474ce2 593
5ed7959e
JA
594 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
595 mempool_free(sgl, sgp->pool);
596}
a8474ce2 597
5ed7959e
JA
598static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
599{
600 struct scsi_host_sg_pool *sgp;
a8474ce2 601
5ed7959e
JA
602 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
603 return mempool_alloc(sgp->pool, gfp_mask);
604}
a3bec5c5 605
30b0c37b
BH
606static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
607 gfp_t gfp_mask)
5ed7959e
JA
608{
609 int ret;
a8474ce2 610
30b0c37b 611 BUG_ON(!nents);
a8474ce2 612
30b0c37b
BH
613 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
614 gfp_mask, scsi_sg_alloc);
5ed7959e 615 if (unlikely(ret))
30b0c37b 616 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
7cedb1f1 617 scsi_sg_free);
45711f1a 618
a8474ce2 619 return ret;
1da177e4
LT
620}
621
30b0c37b 622static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
1da177e4 623{
30b0c37b 624 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
1da177e4
LT
625}
626
79ed2429
JB
627static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
628{
629
630 if (cmd->sdb.table.nents)
631 scsi_free_sgtable(&cmd->sdb);
632
633 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
634
635 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
636 struct scsi_data_buffer *bidi_sdb =
637 cmd->request->next_rq->special;
638 scsi_free_sgtable(bidi_sdb);
639 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
640 cmd->request->next_rq->special = NULL;
641 }
642
643 if (scsi_prot_sg_count(cmd))
644 scsi_free_sgtable(cmd->prot_sdb);
645}
646
1da177e4
LT
647/*
648 * Function: scsi_release_buffers()
649 *
650 * Purpose: Completion processing for block device I/O requests.
651 *
652 * Arguments: cmd - command that we are bailing.
653 *
654 * Lock status: Assumed that no lock is held upon entry.
655 *
656 * Returns: Nothing
657 *
658 * Notes: In the event that an upper level driver rejects a
659 * command, we must release resources allocated during
660 * the __init_io() function. Primarily this would involve
661 * the scatter-gather table, and potentially any bounce
662 * buffers.
663 */
bb52d82f 664void scsi_release_buffers(struct scsi_cmnd *cmd)
1da177e4 665{
79ed2429 666 __scsi_release_buffers(cmd, 1);
1da177e4 667}
bb52d82f 668EXPORT_SYMBOL(scsi_release_buffers);
1da177e4
LT
669
670/*
671 * Function: scsi_io_completion()
672 *
673 * Purpose: Completion processing for block device I/O requests.
674 *
675 * Arguments: cmd - command that is finished.
676 *
677 * Lock status: Assumed that no lock is held upon entry.
678 *
679 * Returns: Nothing
680 *
681 * Notes: This function is matched in terms of capabilities to
682 * the function that created the scatter-gather list.
683 * In other words, if there are no bounce buffers
684 * (the normal case for most drivers), we don't need
685 * the logic to deal with cleaning up afterwards.
686 *
b60af5b0
AS
687 * We must call scsi_end_request(). This will finish off
688 * the specified number of sectors. If we are done, the
689 * command block will be released and the queue function
690 * will be goosed. If we are not done then we have to
691 * figure out what to do next:
1da177e4 692 *
b60af5b0
AS
693 * a) We can call scsi_requeue_command(). The request
694 * will be unprepared and put back on the queue. Then
695 * a new command will be created for it. This should
696 * be used if we made forward progress, or if we want
697 * to switch from READ(10) to READ(6) for example.
1da177e4 698 *
b60af5b0
AS
699 * b) We can call scsi_queue_insert(). The request will
700 * be put back on the queue and retried using the same
701 * command as before, possibly after a delay.
702 *
703 * c) We can call blk_end_request() with -EIO to fail
704 * the remainder of the request.
1da177e4 705 */
03aba2f7 706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1da177e4
LT
707{
708 int result = cmd->result;
165125e1 709 struct request_queue *q = cmd->device->request_queue;
1da177e4 710 struct request *req = cmd->request;
fa8e36c3 711 int error = 0;
1da177e4
LT
712 struct scsi_sense_hdr sshdr;
713 int sense_valid = 0;
714 int sense_deferred = 0;
b60af5b0
AS
715 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
716 ACTION_DELAYED_RETRY} action;
717 char *description = NULL;
1da177e4 718
1da177e4
LT
719 if (result) {
720 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
721 if (sense_valid)
722 sense_deferred = scsi_sense_is_deferred(&sshdr);
723 }
631c228c 724
1da177e4
LT
725 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
726 req->errors = result;
727 if (result) {
1da177e4
LT
728 if (sense_valid && req->sense) {
729 /*
730 * SG_IO wants current and deferred errors
731 */
732 int len = 8 + cmd->sense_buffer[7];
733
734 if (len > SCSI_SENSE_BUFFERSIZE)
735 len = SCSI_SENSE_BUFFERSIZE;
736 memcpy(req->sense, cmd->sense_buffer, len);
737 req->sense_len = len;
738 }
fa8e36c3
JB
739 if (!sense_deferred)
740 error = -EIO;
b22f687d 741 }
e6bb7a96
FT
742
743 req->resid_len = scsi_get_resid(cmd);
744
6f9a35e2 745 if (scsi_bidi_cmnd(cmd)) {
e6bb7a96
FT
746 /*
747 * Bidi commands Must be complete as a whole,
748 * both sides at once.
749 */
750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752 blk_end_request_all(req, 0);
753
754 scsi_release_buffers(cmd);
755 scsi_next_command(cmd);
6f9a35e2
BH
756 return;
757 }
1da177e4
LT
758 }
759
6f9a35e2 760 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
30b0c37b 761
1da177e4
LT
762 /*
763 * Next deal with any sectors which we were able to correctly
764 * handle.
765 */
83096ebf 766 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
d6b0c537 767 "%d bytes done.\n",
83096ebf 768 blk_rq_sectors(req), good_bytes));
d6b0c537 769
a9bddd74
JB
770 /*
771 * Recovered errors need reporting, but they're always treated
772 * as success, so fiddle the result code here. For BLOCK_PC
773 * we already took a copy of the original into rq->errors which
774 * is what gets returned to the user
775 */
776 if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
777 if (!(req->cmd_flags & REQ_QUIET))
778 scsi_print_sense("", cmd);
779 result = 0;
780 /* BLOCK_PC may have set error */
781 error = 0;
782 }
783
784 /*
785 * A number of bytes were successfully read. If there
d6b0c537
JB
786 * are leftovers and there is some kind of error
787 * (result != 0), retry the rest.
788 */
fa8e36c3 789 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
d6b0c537 790 return;
03aba2f7 791
3e695f89
MP
792 error = -EIO;
793
b60af5b0
AS
794 if (host_byte(result) == DID_RESET) {
795 /* Third party bus reset or reset for error recovery
796 * reasons. Just retry the command and see what
797 * happens.
798 */
799 action = ACTION_RETRY;
800 } else if (sense_valid && !sense_deferred) {
1da177e4
LT
801 switch (sshdr.sense_key) {
802 case UNIT_ATTENTION:
803 if (cmd->device->removable) {
03aba2f7 804 /* Detected disc change. Set a bit
1da177e4
LT
805 * and quietly refuse further access.
806 */
807 cmd->device->changed = 1;
b60af5b0
AS
808 description = "Media Changed";
809 action = ACTION_FAIL;
1da177e4 810 } else {
03aba2f7
LT
811 /* Must have been a power glitch, or a
812 * bus reset. Could not have been a
813 * media change, so we just retry the
b60af5b0 814 * command and see what happens.
03aba2f7 815 */
b60af5b0 816 action = ACTION_RETRY;
1da177e4
LT
817 }
818 break;
819 case ILLEGAL_REQUEST:
03aba2f7
LT
820 /* If we had an ILLEGAL REQUEST returned, then
821 * we may have performed an unsupported
822 * command. The only thing this should be
823 * would be a ten byte read where only a six
824 * byte read was supported. Also, on a system
825 * where READ CAPACITY failed, we may have
826 * read past the end of the disk.
827 */
26a68019
JA
828 if ((cmd->device->use_10_for_rw &&
829 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
1da177e4
LT
830 (cmd->cmnd[0] == READ_10 ||
831 cmd->cmnd[0] == WRITE_10)) {
b60af5b0 832 /* This will issue a new 6-byte command. */
1da177e4 833 cmd->device->use_10_for_rw = 0;
b60af5b0 834 action = ACTION_REPREP;
3e695f89
MP
835 } else if (sshdr.asc == 0x10) /* DIX */ {
836 description = "Host Data Integrity Failure";
837 action = ACTION_FAIL;
838 error = -EILSEQ;
b60af5b0
AS
839 } else
840 action = ACTION_FAIL;
841 break;
511e44f4 842 case ABORTED_COMMAND:
126c0982 843 action = ACTION_FAIL;
511e44f4 844 if (sshdr.asc == 0x10) { /* DIF */
3e695f89 845 description = "Target Data Integrity Failure";
3e695f89 846 error = -EILSEQ;
126c0982 847 }
1da177e4
LT
848 break;
849 case NOT_READY:
03aba2f7 850 /* If the device is in the process of becoming
f3e93f73 851 * ready, or has a temporary blockage, retry.
1da177e4 852 */
f3e93f73
JB
853 if (sshdr.asc == 0x04) {
854 switch (sshdr.ascq) {
855 case 0x01: /* becoming ready */
856 case 0x04: /* format in progress */
857 case 0x05: /* rebuild in progress */
858 case 0x06: /* recalculation in progress */
859 case 0x07: /* operation in progress */
860 case 0x08: /* Long write in progress */
861 case 0x09: /* self test in progress */
b60af5b0 862 action = ACTION_DELAYED_RETRY;
f3e93f73 863 break;
3dbf6a54
AS
864 default:
865 description = "Device not ready";
866 action = ACTION_FAIL;
867 break;
f3e93f73 868 }
b60af5b0
AS
869 } else {
870 description = "Device not ready";
871 action = ACTION_FAIL;
1da177e4 872 }
b60af5b0 873 break;
1da177e4 874 case VOLUME_OVERFLOW:
03aba2f7 875 /* See SSC3rXX or current. */
b60af5b0
AS
876 action = ACTION_FAIL;
877 break;
1da177e4 878 default:
b60af5b0
AS
879 description = "Unhandled sense code";
880 action = ACTION_FAIL;
1da177e4
LT
881 break;
882 }
b60af5b0
AS
883 } else {
884 description = "Unhandled error code";
885 action = ACTION_FAIL;
03aba2f7 886 }
b60af5b0
AS
887
888 switch (action) {
889 case ACTION_FAIL:
890 /* Give up and fail the remainder of the request */
79ed2429 891 scsi_release_buffers(cmd);
4aff5e23 892 if (!(req->cmd_flags & REQ_QUIET)) {
b60af5b0 893 if (description)
3dbf6a54 894 scmd_printk(KERN_INFO, cmd, "%s\n",
b60af5b0 895 description);
a4d04a4c 896 scsi_print_result(cmd);
3173d8c3
JB
897 if (driver_byte(result) & DRIVER_SENSE)
898 scsi_print_sense("", cmd);
899 }
40cbbb78 900 blk_end_request_all(req, -EIO);
b60af5b0
AS
901 scsi_next_command(cmd);
902 break;
903 case ACTION_REPREP:
904 /* Unprep the request and put it back at the head of the queue.
905 * A new command will be prepared and issued.
906 */
79ed2429 907 scsi_release_buffers(cmd);
b60af5b0
AS
908 scsi_requeue_command(q, cmd);
909 break;
910 case ACTION_RETRY:
911 /* Retry the same command immediately */
4f5299ac 912 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
b60af5b0
AS
913 break;
914 case ACTION_DELAYED_RETRY:
915 /* Retry the same command after a delay */
4f5299ac 916 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
b60af5b0 917 break;
1da177e4
LT
918 }
919}
1da177e4 920
6f9a35e2
BH
921static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
922 gfp_t gfp_mask)
1da177e4 923{
6f9a35e2 924 int count;
1da177e4
LT
925
926 /*
3b003157 927 * If sg table allocation fails, requeue request later.
1da177e4 928 */
30b0c37b
BH
929 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
930 gfp_mask))) {
1da177e4 931 return BLKPREP_DEFER;
7c72ce81 932 }
1da177e4 933
3b003157 934 req->buffer = NULL;
1da177e4
LT
935
936 /*
937 * Next, walk the list, and fill in the addresses and sizes of
938 * each segment.
939 */
30b0c37b
BH
940 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
941 BUG_ON(count > sdb->table.nents);
942 sdb->table.nents = count;
1011c1b9 943 sdb->length = blk_rq_bytes(req);
4a03d90e 944 return BLKPREP_OK;
1da177e4 945}
6f9a35e2
BH
946
947/*
948 * Function: scsi_init_io()
949 *
950 * Purpose: SCSI I/O initialize function.
951 *
952 * Arguments: cmd - Command descriptor we wish to initialize
953 *
954 * Returns: 0 on success
955 * BLKPREP_DEFER if the failure is retryable
956 * BLKPREP_KILL if the failure is fatal
957 */
958int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
959{
960 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
961 if (error)
962 goto err_exit;
963
964 if (blk_bidi_rq(cmd->request)) {
965 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
6362abd3 966 scsi_sdb_cache, GFP_ATOMIC);
6f9a35e2
BH
967 if (!bidi_sdb) {
968 error = BLKPREP_DEFER;
969 goto err_exit;
970 }
971
972 cmd->request->next_rq->special = bidi_sdb;
973 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
974 GFP_ATOMIC);
975 if (error)
976 goto err_exit;
977 }
978
7027ad72
MP
979 if (blk_integrity_rq(cmd->request)) {
980 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
981 int ivecs, count;
982
983 BUG_ON(prot_sdb == NULL);
984 ivecs = blk_rq_count_integrity_sg(cmd->request);
985
986 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
987 error = BLKPREP_DEFER;
988 goto err_exit;
989 }
990
991 count = blk_rq_map_integrity_sg(cmd->request,
992 prot_sdb->table.sgl);
993 BUG_ON(unlikely(count > ivecs));
994
995 cmd->prot_sdb = prot_sdb;
996 cmd->prot_sdb->table.nents = count;
997 }
998
6f9a35e2
BH
999 return BLKPREP_OK ;
1000
1001err_exit:
1002 scsi_release_buffers(cmd);
1003 if (error == BLKPREP_KILL)
1004 scsi_put_command(cmd);
1005 else /* BLKPREP_DEFER */
1006 scsi_unprep_request(cmd->request);
1007
1008 return error;
1009}
bb52d82f 1010EXPORT_SYMBOL(scsi_init_io);
1da177e4 1011
3b003157
CH
1012static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1013 struct request *req)
1014{
1015 struct scsi_cmnd *cmd;
1016
1017 if (!req->special) {
1018 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1019 if (unlikely(!cmd))
1020 return NULL;
1021 req->special = cmd;
1022 } else {
1023 cmd = req->special;
1024 }
1025
1026 /* pull a tag out of the request if we have one */
1027 cmd->tag = req->tag;
1028 cmd->request = req;
1029
64a87b24
BH
1030 cmd->cmnd = req->cmd;
1031
3b003157
CH
1032 return cmd;
1033}
1034
7f9a6bc4 1035int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
7b16318d 1036{
3b003157 1037 struct scsi_cmnd *cmd;
7f9a6bc4
JB
1038 int ret = scsi_prep_state_check(sdev, req);
1039
1040 if (ret != BLKPREP_OK)
1041 return ret;
3b003157
CH
1042
1043 cmd = scsi_get_cmd_from_req(sdev, req);
1044 if (unlikely(!cmd))
1045 return BLKPREP_DEFER;
1046
1047 /*
1048 * BLOCK_PC requests may transfer data, in which case they must
1049 * a bio attached to them. Or they might contain a SCSI command
1050 * that does not transfer data, in which case they may optionally
1051 * submit a request without an attached bio.
1052 */
1053 if (req->bio) {
1054 int ret;
1055
1056 BUG_ON(!req->nr_phys_segments);
1057
bb52d82f 1058 ret = scsi_init_io(cmd, GFP_ATOMIC);
3b003157
CH
1059 if (unlikely(ret))
1060 return ret;
1061 } else {
b0790410 1062 BUG_ON(blk_rq_bytes(req));
3b003157 1063
30b0c37b 1064 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
3b003157
CH
1065 req->buffer = NULL;
1066 }
7b16318d 1067
7b16318d 1068 cmd->cmd_len = req->cmd_len;
b0790410 1069 if (!blk_rq_bytes(req))
7b16318d
JB
1070 cmd->sc_data_direction = DMA_NONE;
1071 else if (rq_data_dir(req) == WRITE)
1072 cmd->sc_data_direction = DMA_TO_DEVICE;
1073 else
1074 cmd->sc_data_direction = DMA_FROM_DEVICE;
1075
b0790410 1076 cmd->transfersize = blk_rq_bytes(req);
7b16318d 1077 cmd->allowed = req->retries;
3b003157 1078 return BLKPREP_OK;
7b16318d 1079}
7f9a6bc4 1080EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
7b16318d 1081
3b003157
CH
1082/*
1083 * Setup a REQ_TYPE_FS command. These are simple read/write request
1084 * from filesystems that still need to be translated to SCSI CDBs from
1085 * the ULD.
1086 */
7f9a6bc4 1087int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1da177e4 1088{
1da177e4 1089 struct scsi_cmnd *cmd;
7f9a6bc4 1090 int ret = scsi_prep_state_check(sdev, req);
1da177e4 1091
7f9a6bc4
JB
1092 if (ret != BLKPREP_OK)
1093 return ret;
a6a8d9f8
CS
1094
1095 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1096 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1097 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1098 if (ret != BLKPREP_OK)
1099 return ret;
1100 }
1101
1da177e4 1102 /*
3b003157 1103 * Filesystem requests must transfer data.
1da177e4 1104 */
3b003157
CH
1105 BUG_ON(!req->nr_phys_segments);
1106
1107 cmd = scsi_get_cmd_from_req(sdev, req);
1108 if (unlikely(!cmd))
1109 return BLKPREP_DEFER;
1110
64a87b24 1111 memset(cmd->cmnd, 0, BLK_MAX_CDB);
bb52d82f 1112 return scsi_init_io(cmd, GFP_ATOMIC);
3b003157 1113}
7f9a6bc4 1114EXPORT_SYMBOL(scsi_setup_fs_cmnd);
3b003157 1115
7f9a6bc4 1116int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
3b003157 1117{
3b003157
CH
1118 int ret = BLKPREP_OK;
1119
1da177e4 1120 /*
3b003157
CH
1121 * If the device is not in running state we will reject some
1122 * or all commands.
1da177e4 1123 */
3b003157
CH
1124 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1125 switch (sdev->sdev_state) {
1126 case SDEV_OFFLINE:
1127 /*
1128 * If the device is offline we refuse to process any
1129 * commands. The device must be brought online
1130 * before trying any recovery commands.
1131 */
1132 sdev_printk(KERN_ERR, sdev,
1133 "rejecting I/O to offline device\n");
1134 ret = BLKPREP_KILL;
1135 break;
1136 case SDEV_DEL:
1137 /*
1138 * If the device is fully deleted, we refuse to
1139 * process any commands as well.
1140 */
9ccfc756 1141 sdev_printk(KERN_ERR, sdev,
3b003157
CH
1142 "rejecting I/O to dead device\n");
1143 ret = BLKPREP_KILL;
1144 break;
1145 case SDEV_QUIESCE:
1146 case SDEV_BLOCK:
6f4267e3 1147 case SDEV_CREATED_BLOCK:
3b003157
CH
1148 /*
1149 * If the devices is blocked we defer normal commands.
1150 */
1151 if (!(req->cmd_flags & REQ_PREEMPT))
1152 ret = BLKPREP_DEFER;
1153 break;
1154 default:
1155 /*
1156 * For any other not fully online state we only allow
1157 * special commands. In particular any user initiated
1158 * command is not allowed.
1159 */
1160 if (!(req->cmd_flags & REQ_PREEMPT))
1161 ret = BLKPREP_KILL;
1162 break;
1da177e4 1163 }
1da177e4 1164 }
7f9a6bc4
JB
1165 return ret;
1166}
1167EXPORT_SYMBOL(scsi_prep_state_check);
1da177e4 1168
7f9a6bc4
JB
1169int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1170{
1171 struct scsi_device *sdev = q->queuedata;
1da177e4 1172
3b003157
CH
1173 switch (ret) {
1174 case BLKPREP_KILL:
1175 req->errors = DID_NO_CONNECT << 16;
7f9a6bc4
JB
1176 /* release the command and kill it */
1177 if (req->special) {
1178 struct scsi_cmnd *cmd = req->special;
1179 scsi_release_buffers(cmd);
1180 scsi_put_command(cmd);
1181 req->special = NULL;
1182 }
3b003157
CH
1183 break;
1184 case BLKPREP_DEFER:
1da177e4 1185 /*
9934c8c0 1186 * If we defer, the blk_peek_request() returns NULL, but the
3b003157
CH
1187 * queue must be restarted, so we plug here if no returning
1188 * command will automatically do that.
1da177e4 1189 */
3b003157
CH
1190 if (sdev->device_busy == 0)
1191 blk_plug_device(q);
1192 break;
1193 default:
1194 req->cmd_flags |= REQ_DONTPREP;
1da177e4
LT
1195 }
1196
3b003157 1197 return ret;
1da177e4 1198}
7f9a6bc4
JB
1199EXPORT_SYMBOL(scsi_prep_return);
1200
751bf4d7 1201int scsi_prep_fn(struct request_queue *q, struct request *req)
7f9a6bc4
JB
1202{
1203 struct scsi_device *sdev = q->queuedata;
1204 int ret = BLKPREP_KILL;
1205
1206 if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1207 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1208 return scsi_prep_return(q, req, ret);
1209}
1da177e4
LT
1210
1211/*
1212 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1213 * return 0.
1214 *
1215 * Called with the queue_lock held.
1216 */
1217static inline int scsi_dev_queue_ready(struct request_queue *q,
1218 struct scsi_device *sdev)
1219{
1da177e4
LT
1220 if (sdev->device_busy == 0 && sdev->device_blocked) {
1221 /*
1222 * unblock after device_blocked iterates to zero
1223 */
1224 if (--sdev->device_blocked == 0) {
1225 SCSI_LOG_MLQUEUE(3,
9ccfc756
JB
1226 sdev_printk(KERN_INFO, sdev,
1227 "unblocking device at zero depth\n"));
1da177e4
LT
1228 } else {
1229 blk_plug_device(q);
1230 return 0;
1231 }
1232 }
9d112517 1233 if (scsi_device_is_busy(sdev))
1da177e4
LT
1234 return 0;
1235
1236 return 1;
1237}
1238
f0c0a376
MC
1239
1240/*
1241 * scsi_target_queue_ready: checks if there we can send commands to target
1242 * @sdev: scsi device on starget to check.
1243 *
1244 * Called with the host lock held.
1245 */
1246static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1247 struct scsi_device *sdev)
1248{
1249 struct scsi_target *starget = scsi_target(sdev);
1250
1251 if (starget->single_lun) {
1252 if (starget->starget_sdev_user &&
1253 starget->starget_sdev_user != sdev)
1254 return 0;
1255 starget->starget_sdev_user = sdev;
1256 }
1257
1258 if (starget->target_busy == 0 && starget->target_blocked) {
1259 /*
1260 * unblock after target_blocked iterates to zero
1261 */
1262 if (--starget->target_blocked == 0) {
1263 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1264 "unblocking target at zero depth\n"));
b4efdd58 1265 } else
f0c0a376 1266 return 0;
f0c0a376
MC
1267 }
1268
1269 if (scsi_target_is_busy(starget)) {
1270 if (list_empty(&sdev->starved_entry)) {
1271 list_add_tail(&sdev->starved_entry,
1272 &shost->starved_list);
1273 return 0;
1274 }
1275 }
1276
1277 /* We're OK to process the command, so we can't be starved */
1278 if (!list_empty(&sdev->starved_entry))
1279 list_del_init(&sdev->starved_entry);
1280 return 1;
1281}
1282
1da177e4
LT
1283/*
1284 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1285 * return 0. We must end up running the queue again whenever 0 is
1286 * returned, else IO can hang.
1287 *
1288 * Called with host_lock held.
1289 */
1290static inline int scsi_host_queue_ready(struct request_queue *q,
1291 struct Scsi_Host *shost,
1292 struct scsi_device *sdev)
1293{
939647ee 1294 if (scsi_host_in_recovery(shost))
1da177e4
LT
1295 return 0;
1296 if (shost->host_busy == 0 && shost->host_blocked) {
1297 /*
1298 * unblock after host_blocked iterates to zero
1299 */
1300 if (--shost->host_blocked == 0) {
1301 SCSI_LOG_MLQUEUE(3,
1302 printk("scsi%d unblocking host at zero depth\n",
1303 shost->host_no));
1304 } else {
1da177e4
LT
1305 return 0;
1306 }
1307 }
9d112517 1308 if (scsi_host_is_busy(shost)) {
1da177e4
LT
1309 if (list_empty(&sdev->starved_entry))
1310 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1311 return 0;
1312 }
1313
1314 /* We're OK to process the command, so we can't be starved */
1315 if (!list_empty(&sdev->starved_entry))
1316 list_del_init(&sdev->starved_entry);
1317
1318 return 1;
1319}
1320
6c5121b7
KU
1321/*
1322 * Busy state exporting function for request stacking drivers.
1323 *
1324 * For efficiency, no lock is taken to check the busy state of
1325 * shost/starget/sdev, since the returned value is not guaranteed and
1326 * may be changed after request stacking drivers call the function,
1327 * regardless of taking lock or not.
1328 *
1329 * When scsi can't dispatch I/Os anymore and needs to kill I/Os
1330 * (e.g. !sdev), scsi needs to return 'not busy'.
1331 * Otherwise, request stacking drivers may hold requests forever.
1332 */
1333static int scsi_lld_busy(struct request_queue *q)
1334{
1335 struct scsi_device *sdev = q->queuedata;
1336 struct Scsi_Host *shost;
1337 struct scsi_target *starget;
1338
1339 if (!sdev)
1340 return 0;
1341
1342 shost = sdev->host;
1343 starget = scsi_target(sdev);
1344
1345 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1346 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1347 return 1;
1348
1349 return 0;
1350}
1351
1da177e4 1352/*
e91442b6 1353 * Kill a request for a dead device
1da177e4 1354 */
165125e1 1355static void scsi_kill_request(struct request *req, struct request_queue *q)
1da177e4 1356{
e91442b6 1357 struct scsi_cmnd *cmd = req->special;
e36e0c80 1358 struct scsi_device *sdev = cmd->device;
f0c0a376 1359 struct scsi_target *starget = scsi_target(sdev);
e36e0c80 1360 struct Scsi_Host *shost = sdev->host;
1da177e4 1361
9934c8c0 1362 blk_start_request(req);
788ce43a 1363
e91442b6
JB
1364 if (unlikely(cmd == NULL)) {
1365 printk(KERN_CRIT "impossible request in %s.\n",
cadbd4a5 1366 __func__);
e91442b6 1367 BUG();
1da177e4 1368 }
e91442b6
JB
1369
1370 scsi_init_cmd_errh(cmd);
1371 cmd->result = DID_NO_CONNECT << 16;
1372 atomic_inc(&cmd->device->iorequest_cnt);
e36e0c80
TH
1373
1374 /*
1375 * SCSI request completion path will do scsi_device_unbusy(),
1376 * bump busy counts. To bump the counters, we need to dance
1377 * with the locks as normal issue path does.
1378 */
1379 sdev->device_busy++;
1380 spin_unlock(sdev->request_queue->queue_lock);
1381 spin_lock(shost->host_lock);
1382 shost->host_busy++;
f0c0a376 1383 starget->target_busy++;
e36e0c80
TH
1384 spin_unlock(shost->host_lock);
1385 spin_lock(sdev->request_queue->queue_lock);
1386
242f9dcb 1387 blk_complete_request(req);
1da177e4
LT
1388}
1389
1aea6434
JA
1390static void scsi_softirq_done(struct request *rq)
1391{
242f9dcb
JA
1392 struct scsi_cmnd *cmd = rq->special;
1393 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1aea6434
JA
1394 int disposition;
1395
1396 INIT_LIST_HEAD(&cmd->eh_entry);
1397
242f9dcb
JA
1398 /*
1399 * Set the serial numbers back to zero
1400 */
1401 cmd->serial_number = 0;
1402
1403 atomic_inc(&cmd->device->iodone_cnt);
1404 if (cmd->result)
1405 atomic_inc(&cmd->device->ioerr_cnt);
1406
1aea6434
JA
1407 disposition = scsi_decide_disposition(cmd);
1408 if (disposition != SUCCESS &&
1409 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1410 sdev_printk(KERN_ERR, cmd->device,
1411 "timing out command, waited %lus\n",
1412 wait_for/HZ);
1413 disposition = SUCCESS;
1414 }
1415
1416 scsi_log_completion(cmd, disposition);
1417
1418 switch (disposition) {
1419 case SUCCESS:
1420 scsi_finish_command(cmd);
1421 break;
1422 case NEEDS_RETRY:
596f482a 1423 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1aea6434
JA
1424 break;
1425 case ADD_TO_MLQUEUE:
1426 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1427 break;
1428 default:
1429 if (!scsi_eh_scmd_add(cmd, 0))
1430 scsi_finish_command(cmd);
1431 }
1432}
1433
1da177e4
LT
1434/*
1435 * Function: scsi_request_fn()
1436 *
1437 * Purpose: Main strategy routine for SCSI.
1438 *
1439 * Arguments: q - Pointer to actual queue.
1440 *
1441 * Returns: Nothing
1442 *
1443 * Lock status: IO request lock assumed to be held when called.
1444 */
1445static void scsi_request_fn(struct request_queue *q)
1446{
1447 struct scsi_device *sdev = q->queuedata;
1448 struct Scsi_Host *shost;
1449 struct scsi_cmnd *cmd;
1450 struct request *req;
1451
1452 if (!sdev) {
1453 printk("scsi: killing requests for dead queue\n");
9934c8c0 1454 while ((req = blk_peek_request(q)) != NULL)
e91442b6 1455 scsi_kill_request(req, q);
1da177e4
LT
1456 return;
1457 }
1458
1459 if(!get_device(&sdev->sdev_gendev))
1460 /* We must be tearing the block queue down already */
1461 return;
1462
1463 /*
1464 * To start with, we keep looping until the queue is empty, or until
1465 * the host is no longer able to accept any more requests.
1466 */
1467 shost = sdev->host;
1468 while (!blk_queue_plugged(q)) {
1469 int rtn;
1470 /*
1471 * get next queueable request. We do this early to make sure
1472 * that the request is fully prepared even if we cannot
1473 * accept it.
1474 */
9934c8c0 1475 req = blk_peek_request(q);
1da177e4
LT
1476 if (!req || !scsi_dev_queue_ready(q, sdev))
1477 break;
1478
1479 if (unlikely(!scsi_device_online(sdev))) {
9ccfc756
JB
1480 sdev_printk(KERN_ERR, sdev,
1481 "rejecting I/O to offline device\n");
e91442b6 1482 scsi_kill_request(req, q);
1da177e4
LT
1483 continue;
1484 }
1485
1486
1487 /*
1488 * Remove the request from the request list.
1489 */
1490 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
9934c8c0 1491 blk_start_request(req);
1da177e4
LT
1492 sdev->device_busy++;
1493
1494 spin_unlock(q->queue_lock);
e91442b6
JB
1495 cmd = req->special;
1496 if (unlikely(cmd == NULL)) {
1497 printk(KERN_CRIT "impossible request in %s.\n"
1498 "please mail a stack trace to "
4aff5e23 1499 "linux-scsi@vger.kernel.org\n",
cadbd4a5 1500 __func__);
4aff5e23 1501 blk_dump_rq_flags(req, "foo");
e91442b6
JB
1502 BUG();
1503 }
1da177e4
LT
1504 spin_lock(shost->host_lock);
1505
ecefe8a9
MC
1506 /*
1507 * We hit this when the driver is using a host wide
1508 * tag map. For device level tag maps the queue_depth check
1509 * in the device ready fn would prevent us from trying
1510 * to allocate a tag. Since the map is a shared host resource
1511 * we add the dev to the starved list so it eventually gets
1512 * a run when a tag is freed.
1513 */
6bd522f6 1514 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
ecefe8a9
MC
1515 if (list_empty(&sdev->starved_entry))
1516 list_add_tail(&sdev->starved_entry,
1517 &shost->starved_list);
1518 goto not_ready;
1519 }
1520
f0c0a376
MC
1521 if (!scsi_target_queue_ready(shost, sdev))
1522 goto not_ready;
1523
1da177e4
LT
1524 if (!scsi_host_queue_ready(q, shost, sdev))
1525 goto not_ready;
f0c0a376
MC
1526
1527 scsi_target(sdev)->target_busy++;
1da177e4
LT
1528 shost->host_busy++;
1529
1530 /*
1531 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1532 * take the lock again.
1533 */
1534 spin_unlock_irq(shost->host_lock);
1535
1da177e4
LT
1536 /*
1537 * Finally, initialize any error handling parameters, and set up
1538 * the timers for timeouts.
1539 */
1540 scsi_init_cmd_errh(cmd);
1541
1542 /*
1543 * Dispatch the command to the low-level driver.
1544 */
1545 rtn = scsi_dispatch_cmd(cmd);
1546 spin_lock_irq(q->queue_lock);
1547 if(rtn) {
1548 /* we're refusing the command; because of
1549 * the way locks get dropped, we need to
1550 * check here if plugging is required */
1551 if(sdev->device_busy == 0)
1552 blk_plug_device(q);
1553
1554 break;
1555 }
1556 }
1557
1558 goto out;
1559
1560 not_ready:
1561 spin_unlock_irq(shost->host_lock);
1562
1563 /*
1564 * lock q, handle tag, requeue req, and decrement device_busy. We
1565 * must return with queue_lock held.
1566 *
1567 * Decrementing device_busy without checking it is OK, as all such
1568 * cases (host limits or settings) should run the queue at some
1569 * later time.
1570 */
1571 spin_lock_irq(q->queue_lock);
1572 blk_requeue_request(q, req);
1573 sdev->device_busy--;
1574 if(sdev->device_busy == 0)
1575 blk_plug_device(q);
1576 out:
1577 /* must be careful here...if we trigger the ->remove() function
1578 * we cannot be holding the q lock */
1579 spin_unlock_irq(q->queue_lock);
1580 put_device(&sdev->sdev_gendev);
1581 spin_lock_irq(q->queue_lock);
1582}
1583
1584u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1585{
1586 struct device *host_dev;
1587 u64 bounce_limit = 0xffffffff;
1588
1589 if (shost->unchecked_isa_dma)
1590 return BLK_BOUNCE_ISA;
1591 /*
1592 * Platforms with virtual-DMA translation
1593 * hardware have no practical limit.
1594 */
1595 if (!PCI_DMA_BUS_IS_PHYS)
1596 return BLK_BOUNCE_ANY;
1597
1598 host_dev = scsi_get_device(shost);
1599 if (host_dev && host_dev->dma_mask)
1600 bounce_limit = *host_dev->dma_mask;
1601
1602 return bounce_limit;
1603}
1604EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1605
b58d9154
FT
1606struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1607 request_fn_proc *request_fn)
1da177e4 1608{
1da177e4 1609 struct request_queue *q;
860ac568 1610 struct device *dev = shost->shost_gendev.parent;
1da177e4 1611
b58d9154 1612 q = blk_init_queue(request_fn, NULL);
1da177e4
LT
1613 if (!q)
1614 return NULL;
1615
a8474ce2
JA
1616 /*
1617 * this limit is imposed by hardware restrictions
1618 */
1da177e4 1619 blk_queue_max_hw_segments(q, shost->sg_tablesize);
d3f46f39 1620 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
a8474ce2 1621
1da177e4
LT
1622 blk_queue_max_sectors(q, shost->max_sectors);
1623 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1624 blk_queue_segment_boundary(q, shost->dma_boundary);
99c84dbd 1625 dma_set_seg_boundary(dev, shost->dma_boundary);
1da177e4 1626
860ac568
FT
1627 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1628
75ad23bc 1629 /* New queue, no concurrency on queue_flags */
1da177e4 1630 if (!shost->use_clustering)
75ad23bc 1631 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
465ff318
JB
1632
1633 /*
1634 * set a reasonable default alignment on word boundaries: the
1635 * host and device may alter it using
1636 * blk_queue_update_dma_alignment() later.
1637 */
1638 blk_queue_dma_alignment(q, 0x03);
1639
1da177e4
LT
1640 return q;
1641}
b58d9154
FT
1642EXPORT_SYMBOL(__scsi_alloc_queue);
1643
1644struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1645{
1646 struct request_queue *q;
1647
1648 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1649 if (!q)
1650 return NULL;
1651
1652 blk_queue_prep_rq(q, scsi_prep_fn);
b58d9154 1653 blk_queue_softirq_done(q, scsi_softirq_done);
242f9dcb 1654 blk_queue_rq_timed_out(q, scsi_times_out);
6c5121b7 1655 blk_queue_lld_busy(q, scsi_lld_busy);
b58d9154
FT
1656 return q;
1657}
1da177e4
LT
1658
1659void scsi_free_queue(struct request_queue *q)
1660{
1661 blk_cleanup_queue(q);
1662}
1663
1664/*
1665 * Function: scsi_block_requests()
1666 *
1667 * Purpose: Utility function used by low-level drivers to prevent further
1668 * commands from being queued to the device.
1669 *
1670 * Arguments: shost - Host in question
1671 *
1672 * Returns: Nothing
1673 *
1674 * Lock status: No locks are assumed held.
1675 *
1676 * Notes: There is no timer nor any other means by which the requests
1677 * get unblocked other than the low-level driver calling
1678 * scsi_unblock_requests().
1679 */
1680void scsi_block_requests(struct Scsi_Host *shost)
1681{
1682 shost->host_self_blocked = 1;
1683}
1684EXPORT_SYMBOL(scsi_block_requests);
1685
1686/*
1687 * Function: scsi_unblock_requests()
1688 *
1689 * Purpose: Utility function used by low-level drivers to allow further
1690 * commands from being queued to the device.
1691 *
1692 * Arguments: shost - Host in question
1693 *
1694 * Returns: Nothing
1695 *
1696 * Lock status: No locks are assumed held.
1697 *
1698 * Notes: There is no timer nor any other means by which the requests
1699 * get unblocked other than the low-level driver calling
1700 * scsi_unblock_requests().
1701 *
1702 * This is done as an API function so that changes to the
1703 * internals of the scsi mid-layer won't require wholesale
1704 * changes to drivers that use this feature.
1705 */
1706void scsi_unblock_requests(struct Scsi_Host *shost)
1707{
1708 shost->host_self_blocked = 0;
1709 scsi_run_host_queues(shost);
1710}
1711EXPORT_SYMBOL(scsi_unblock_requests);
1712
1713int __init scsi_init_queue(void)
1714{
1715 int i;
1716
6362abd3
MP
1717 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1718 sizeof(struct scsi_data_buffer),
1719 0, 0, NULL);
1720 if (!scsi_sdb_cache) {
1721 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
f078727b 1722 return -ENOMEM;
6f9a35e2
BH
1723 }
1724
1da177e4
LT
1725 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1726 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1727 int size = sgp->size * sizeof(struct scatterlist);
1728
1729 sgp->slab = kmem_cache_create(sgp->name, size, 0,
20c2df83 1730 SLAB_HWCACHE_ALIGN, NULL);
1da177e4
LT
1731 if (!sgp->slab) {
1732 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1733 sgp->name);
6362abd3 1734 goto cleanup_sdb;
1da177e4
LT
1735 }
1736
93d2341c
MD
1737 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1738 sgp->slab);
1da177e4
LT
1739 if (!sgp->pool) {
1740 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1741 sgp->name);
6362abd3 1742 goto cleanup_sdb;
1da177e4
LT
1743 }
1744 }
1745
1746 return 0;
3d9dd6ee 1747
6362abd3 1748cleanup_sdb:
3d9dd6ee
FT
1749 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1750 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1751 if (sgp->pool)
1752 mempool_destroy(sgp->pool);
1753 if (sgp->slab)
1754 kmem_cache_destroy(sgp->slab);
1755 }
6362abd3 1756 kmem_cache_destroy(scsi_sdb_cache);
3d9dd6ee
FT
1757
1758 return -ENOMEM;
1da177e4
LT
1759}
1760
1761void scsi_exit_queue(void)
1762{
1763 int i;
1764
6362abd3 1765 kmem_cache_destroy(scsi_sdb_cache);
aa7b5cd7 1766
1da177e4
LT
1767 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1768 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1769 mempool_destroy(sgp->pool);
1770 kmem_cache_destroy(sgp->slab);
1771 }
1772}
5baba830
JB
1773
1774/**
1775 * scsi_mode_select - issue a mode select
1776 * @sdev: SCSI device to be queried
1777 * @pf: Page format bit (1 == standard, 0 == vendor specific)
1778 * @sp: Save page bit (0 == don't save, 1 == save)
1779 * @modepage: mode page being requested
1780 * @buffer: request buffer (may not be smaller than eight bytes)
1781 * @len: length of request buffer.
1782 * @timeout: command timeout
1783 * @retries: number of retries before failing
1784 * @data: returns a structure abstracting the mode header data
eb44820c 1785 * @sshdr: place to put sense data (or NULL if no sense to be collected).
5baba830
JB
1786 * must be SCSI_SENSE_BUFFERSIZE big.
1787 *
1788 * Returns zero if successful; negative error number or scsi
1789 * status on error
1790 *
1791 */
1792int
1793scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1794 unsigned char *buffer, int len, int timeout, int retries,
1795 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1796{
1797 unsigned char cmd[10];
1798 unsigned char *real_buffer;
1799 int ret;
1800
1801 memset(cmd, 0, sizeof(cmd));
1802 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1803
1804 if (sdev->use_10_for_ms) {
1805 if (len > 65535)
1806 return -EINVAL;
1807 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1808 if (!real_buffer)
1809 return -ENOMEM;
1810 memcpy(real_buffer + 8, buffer, len);
1811 len += 8;
1812 real_buffer[0] = 0;
1813 real_buffer[1] = 0;
1814 real_buffer[2] = data->medium_type;
1815 real_buffer[3] = data->device_specific;
1816 real_buffer[4] = data->longlba ? 0x01 : 0;
1817 real_buffer[5] = 0;
1818 real_buffer[6] = data->block_descriptor_length >> 8;
1819 real_buffer[7] = data->block_descriptor_length;
1820
1821 cmd[0] = MODE_SELECT_10;
1822 cmd[7] = len >> 8;
1823 cmd[8] = len;
1824 } else {
1825 if (len > 255 || data->block_descriptor_length > 255 ||
1826 data->longlba)
1827 return -EINVAL;
1828
1829 real_buffer = kmalloc(4 + len, GFP_KERNEL);
1830 if (!real_buffer)
1831 return -ENOMEM;
1832 memcpy(real_buffer + 4, buffer, len);
1833 len += 4;
1834 real_buffer[0] = 0;
1835 real_buffer[1] = data->medium_type;
1836 real_buffer[2] = data->device_specific;
1837 real_buffer[3] = data->block_descriptor_length;
1838
1839
1840 cmd[0] = MODE_SELECT;
1841 cmd[4] = len;
1842 }
1843
1844 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
f4f4e47e 1845 sshdr, timeout, retries, NULL);
5baba830
JB
1846 kfree(real_buffer);
1847 return ret;
1848}
1849EXPORT_SYMBOL_GPL(scsi_mode_select);
1850
1da177e4 1851/**
eb44820c 1852 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1cf72699 1853 * @sdev: SCSI device to be queried
1da177e4
LT
1854 * @dbd: set if mode sense will allow block descriptors to be returned
1855 * @modepage: mode page being requested
1856 * @buffer: request buffer (may not be smaller than eight bytes)
1857 * @len: length of request buffer.
1858 * @timeout: command timeout
1859 * @retries: number of retries before failing
1860 * @data: returns a structure abstracting the mode header data
eb44820c 1861 * @sshdr: place to put sense data (or NULL if no sense to be collected).
1cf72699 1862 * must be SCSI_SENSE_BUFFERSIZE big.
1da177e4
LT
1863 *
1864 * Returns zero if unsuccessful, or the header offset (either 4
1865 * or 8 depending on whether a six or ten byte command was
1866 * issued) if successful.
eb44820c 1867 */
1da177e4 1868int
1cf72699 1869scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1da177e4 1870 unsigned char *buffer, int len, int timeout, int retries,
5baba830
JB
1871 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1872{
1da177e4
LT
1873 unsigned char cmd[12];
1874 int use_10_for_ms;
1875 int header_length;
1cf72699 1876 int result;
ea73a9f2 1877 struct scsi_sense_hdr my_sshdr;
1da177e4
LT
1878
1879 memset(data, 0, sizeof(*data));
1880 memset(&cmd[0], 0, 12);
1881 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1882 cmd[2] = modepage;
1883
ea73a9f2
JB
1884 /* caller might not be interested in sense, but we need it */
1885 if (!sshdr)
1886 sshdr = &my_sshdr;
1887
1da177e4 1888 retry:
1cf72699 1889 use_10_for_ms = sdev->use_10_for_ms;
1da177e4
LT
1890
1891 if (use_10_for_ms) {
1892 if (len < 8)
1893 len = 8;
1894
1895 cmd[0] = MODE_SENSE_10;
1896 cmd[8] = len;
1897 header_length = 8;
1898 } else {
1899 if (len < 4)
1900 len = 4;
1901
1902 cmd[0] = MODE_SENSE;
1903 cmd[4] = len;
1904 header_length = 4;
1905 }
1906
1da177e4
LT
1907 memset(buffer, 0, len);
1908
1cf72699 1909 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
f4f4e47e 1910 sshdr, timeout, retries, NULL);
1da177e4
LT
1911
1912 /* This code looks awful: what it's doing is making sure an
1913 * ILLEGAL REQUEST sense return identifies the actual command
1914 * byte as the problem. MODE_SENSE commands can return
1915 * ILLEGAL REQUEST if the code page isn't supported */
1916
1cf72699
JB
1917 if (use_10_for_ms && !scsi_status_is_good(result) &&
1918 (driver_byte(result) & DRIVER_SENSE)) {
ea73a9f2
JB
1919 if (scsi_sense_valid(sshdr)) {
1920 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1921 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1da177e4
LT
1922 /*
1923 * Invalid command operation code
1924 */
1cf72699 1925 sdev->use_10_for_ms = 0;
1da177e4
LT
1926 goto retry;
1927 }
1928 }
1929 }
1930
1cf72699 1931 if(scsi_status_is_good(result)) {
6d73c851
AV
1932 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1933 (modepage == 6 || modepage == 8))) {
1934 /* Initio breakage? */
1935 header_length = 0;
1936 data->length = 13;
1937 data->medium_type = 0;
1938 data->device_specific = 0;
1939 data->longlba = 0;
1940 data->block_descriptor_length = 0;
1941 } else if(use_10_for_ms) {
1da177e4
LT
1942 data->length = buffer[0]*256 + buffer[1] + 2;
1943 data->medium_type = buffer[2];
1944 data->device_specific = buffer[3];
1945 data->longlba = buffer[4] & 0x01;
1946 data->block_descriptor_length = buffer[6]*256
1947 + buffer[7];
1948 } else {
1949 data->length = buffer[0] + 1;
1950 data->medium_type = buffer[1];
1951 data->device_specific = buffer[2];
1952 data->block_descriptor_length = buffer[3];
1953 }
6d73c851 1954 data->header_length = header_length;
1da177e4
LT
1955 }
1956
1cf72699 1957 return result;
1da177e4
LT
1958}
1959EXPORT_SYMBOL(scsi_mode_sense);
1960
001aac25
JB
1961/**
1962 * scsi_test_unit_ready - test if unit is ready
1963 * @sdev: scsi device to change the state of.
1964 * @timeout: command timeout
1965 * @retries: number of retries before failing
1966 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
1967 * returning sense. Make sure that this is cleared before passing
1968 * in.
1969 *
1970 * Returns zero if unsuccessful or an error if TUR failed. For
1971 * removable media, a return of NOT_READY or UNIT_ATTENTION is
1972 * translated to success, with the ->changed flag updated.
1973 **/
1da177e4 1974int
001aac25
JB
1975scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
1976 struct scsi_sense_hdr *sshdr_external)
1da177e4 1977{
1da177e4
LT
1978 char cmd[] = {
1979 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1980 };
001aac25 1981 struct scsi_sense_hdr *sshdr;
1da177e4 1982 int result;
001aac25
JB
1983
1984 if (!sshdr_external)
1985 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1986 else
1987 sshdr = sshdr_external;
1988
1989 /* try to eat the UNIT_ATTENTION if there are enough retries */
1990 do {
1991 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
f4f4e47e 1992 timeout, retries, NULL);
32c356d7
JB
1993 if (sdev->removable && scsi_sense_valid(sshdr) &&
1994 sshdr->sense_key == UNIT_ATTENTION)
1995 sdev->changed = 1;
1996 } while (scsi_sense_valid(sshdr) &&
1997 sshdr->sense_key == UNIT_ATTENTION && --retries);
001aac25
JB
1998
1999 if (!sshdr)
2000 /* could not allocate sense buffer, so can't process it */
2001 return result;
1da177e4 2002
32c356d7
JB
2003 if (sdev->removable && scsi_sense_valid(sshdr) &&
2004 (sshdr->sense_key == UNIT_ATTENTION ||
2005 sshdr->sense_key == NOT_READY)) {
2006 sdev->changed = 1;
2007 result = 0;
1da177e4 2008 }
001aac25
JB
2009 if (!sshdr_external)
2010 kfree(sshdr);
1da177e4
LT
2011 return result;
2012}
2013EXPORT_SYMBOL(scsi_test_unit_ready);
2014
2015/**
eb44820c 2016 * scsi_device_set_state - Take the given device through the device state model.
1da177e4
LT
2017 * @sdev: scsi device to change the state of.
2018 * @state: state to change to.
2019 *
2020 * Returns zero if unsuccessful or an error if the requested
2021 * transition is illegal.
eb44820c 2022 */
1da177e4
LT
2023int
2024scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2025{
2026 enum scsi_device_state oldstate = sdev->sdev_state;
2027
2028 if (state == oldstate)
2029 return 0;
2030
2031 switch (state) {
2032 case SDEV_CREATED:
6f4267e3
JB
2033 switch (oldstate) {
2034 case SDEV_CREATED_BLOCK:
2035 break;
2036 default:
2037 goto illegal;
2038 }
2039 break;
1da177e4
LT
2040
2041 case SDEV_RUNNING:
2042 switch (oldstate) {
2043 case SDEV_CREATED:
2044 case SDEV_OFFLINE:
2045 case SDEV_QUIESCE:
2046 case SDEV_BLOCK:
2047 break;
2048 default:
2049 goto illegal;
2050 }
2051 break;
2052
2053 case SDEV_QUIESCE:
2054 switch (oldstate) {
2055 case SDEV_RUNNING:
2056 case SDEV_OFFLINE:
2057 break;
2058 default:
2059 goto illegal;
2060 }
2061 break;
2062
2063 case SDEV_OFFLINE:
2064 switch (oldstate) {
2065 case SDEV_CREATED:
2066 case SDEV_RUNNING:
2067 case SDEV_QUIESCE:
2068 case SDEV_BLOCK:
2069 break;
2070 default:
2071 goto illegal;
2072 }
2073 break;
2074
2075 case SDEV_BLOCK:
2076 switch (oldstate) {
1da177e4 2077 case SDEV_RUNNING:
6f4267e3
JB
2078 case SDEV_CREATED_BLOCK:
2079 break;
2080 default:
2081 goto illegal;
2082 }
2083 break;
2084
2085 case SDEV_CREATED_BLOCK:
2086 switch (oldstate) {
2087 case SDEV_CREATED:
1da177e4
LT
2088 break;
2089 default:
2090 goto illegal;
2091 }
2092 break;
2093
2094 case SDEV_CANCEL:
2095 switch (oldstate) {
2096 case SDEV_CREATED:
2097 case SDEV_RUNNING:
9ea72909 2098 case SDEV_QUIESCE:
1da177e4
LT
2099 case SDEV_OFFLINE:
2100 case SDEV_BLOCK:
2101 break;
2102 default:
2103 goto illegal;
2104 }
2105 break;
2106
2107 case SDEV_DEL:
2108 switch (oldstate) {
309bd271
BK
2109 case SDEV_CREATED:
2110 case SDEV_RUNNING:
2111 case SDEV_OFFLINE:
1da177e4
LT
2112 case SDEV_CANCEL:
2113 break;
2114 default:
2115 goto illegal;
2116 }
2117 break;
2118
2119 }
2120 sdev->sdev_state = state;
2121 return 0;
2122
2123 illegal:
2124 SCSI_LOG_ERROR_RECOVERY(1,
9ccfc756
JB
2125 sdev_printk(KERN_ERR, sdev,
2126 "Illegal state transition %s->%s\n",
2127 scsi_device_state_name(oldstate),
2128 scsi_device_state_name(state))
1da177e4
LT
2129 );
2130 return -EINVAL;
2131}
2132EXPORT_SYMBOL(scsi_device_set_state);
2133
a341cd0f
JG
2134/**
2135 * sdev_evt_emit - emit a single SCSI device uevent
2136 * @sdev: associated SCSI device
2137 * @evt: event to emit
2138 *
2139 * Send a single uevent (scsi_event) to the associated scsi_device.
2140 */
2141static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2142{
2143 int idx = 0;
2144 char *envp[3];
2145
2146 switch (evt->evt_type) {
2147 case SDEV_EVT_MEDIA_CHANGE:
2148 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2149 break;
2150
2151 default:
2152 /* do nothing */
2153 break;
2154 }
2155
2156 envp[idx++] = NULL;
2157
2158 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2159}
2160
2161/**
2162 * sdev_evt_thread - send a uevent for each scsi event
2163 * @work: work struct for scsi_device
2164 *
2165 * Dispatch queued events to their associated scsi_device kobjects
2166 * as uevents.
2167 */
2168void scsi_evt_thread(struct work_struct *work)
2169{
2170 struct scsi_device *sdev;
2171 LIST_HEAD(event_list);
2172
2173 sdev = container_of(work, struct scsi_device, event_work);
2174
2175 while (1) {
2176 struct scsi_event *evt;
2177 struct list_head *this, *tmp;
2178 unsigned long flags;
2179
2180 spin_lock_irqsave(&sdev->list_lock, flags);
2181 list_splice_init(&sdev->event_list, &event_list);
2182 spin_unlock_irqrestore(&sdev->list_lock, flags);
2183
2184 if (list_empty(&event_list))
2185 break;
2186
2187 list_for_each_safe(this, tmp, &event_list) {
2188 evt = list_entry(this, struct scsi_event, node);
2189 list_del(&evt->node);
2190 scsi_evt_emit(sdev, evt);
2191 kfree(evt);
2192 }
2193 }
2194}
2195
2196/**
2197 * sdev_evt_send - send asserted event to uevent thread
2198 * @sdev: scsi_device event occurred on
2199 * @evt: event to send
2200 *
2201 * Assert scsi device event asynchronously.
2202 */
2203void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2204{
2205 unsigned long flags;
2206
4d1566ed
KS
2207#if 0
2208 /* FIXME: currently this check eliminates all media change events
2209 * for polled devices. Need to update to discriminate between AN
2210 * and polled events */
a341cd0f
JG
2211 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2212 kfree(evt);
2213 return;
2214 }
4d1566ed 2215#endif
a341cd0f
JG
2216
2217 spin_lock_irqsave(&sdev->list_lock, flags);
2218 list_add_tail(&evt->node, &sdev->event_list);
2219 schedule_work(&sdev->event_work);
2220 spin_unlock_irqrestore(&sdev->list_lock, flags);
2221}
2222EXPORT_SYMBOL_GPL(sdev_evt_send);
2223
2224/**
2225 * sdev_evt_alloc - allocate a new scsi event
2226 * @evt_type: type of event to allocate
2227 * @gfpflags: GFP flags for allocation
2228 *
2229 * Allocates and returns a new scsi_event.
2230 */
2231struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2232 gfp_t gfpflags)
2233{
2234 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2235 if (!evt)
2236 return NULL;
2237
2238 evt->evt_type = evt_type;
2239 INIT_LIST_HEAD(&evt->node);
2240
2241 /* evt_type-specific initialization, if any */
2242 switch (evt_type) {
2243 case SDEV_EVT_MEDIA_CHANGE:
2244 default:
2245 /* do nothing */
2246 break;
2247 }
2248
2249 return evt;
2250}
2251EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2252
2253/**
2254 * sdev_evt_send_simple - send asserted event to uevent thread
2255 * @sdev: scsi_device event occurred on
2256 * @evt_type: type of event to send
2257 * @gfpflags: GFP flags for allocation
2258 *
2259 * Assert scsi device event asynchronously, given an event type.
2260 */
2261void sdev_evt_send_simple(struct scsi_device *sdev,
2262 enum scsi_device_event evt_type, gfp_t gfpflags)
2263{
2264 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2265 if (!evt) {
2266 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2267 evt_type);
2268 return;
2269 }
2270
2271 sdev_evt_send(sdev, evt);
2272}
2273EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2274
1da177e4
LT
2275/**
2276 * scsi_device_quiesce - Block user issued commands.
2277 * @sdev: scsi device to quiesce.
2278 *
2279 * This works by trying to transition to the SDEV_QUIESCE state
2280 * (which must be a legal transition). When the device is in this
2281 * state, only special requests will be accepted, all others will
2282 * be deferred. Since special requests may also be requeued requests,
2283 * a successful return doesn't guarantee the device will be
2284 * totally quiescent.
2285 *
2286 * Must be called with user context, may sleep.
2287 *
2288 * Returns zero if unsuccessful or an error if not.
eb44820c 2289 */
1da177e4
LT
2290int
2291scsi_device_quiesce(struct scsi_device *sdev)
2292{
2293 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2294 if (err)
2295 return err;
2296
2297 scsi_run_queue(sdev->request_queue);
2298 while (sdev->device_busy) {
2299 msleep_interruptible(200);
2300 scsi_run_queue(sdev->request_queue);
2301 }
2302 return 0;
2303}
2304EXPORT_SYMBOL(scsi_device_quiesce);
2305
2306/**
2307 * scsi_device_resume - Restart user issued commands to a quiesced device.
2308 * @sdev: scsi device to resume.
2309 *
2310 * Moves the device from quiesced back to running and restarts the
2311 * queues.
2312 *
2313 * Must be called with user context, may sleep.
eb44820c 2314 */
1da177e4
LT
2315void
2316scsi_device_resume(struct scsi_device *sdev)
2317{
2318 if(scsi_device_set_state(sdev, SDEV_RUNNING))
2319 return;
2320 scsi_run_queue(sdev->request_queue);
2321}
2322EXPORT_SYMBOL(scsi_device_resume);
2323
2324static void
2325device_quiesce_fn(struct scsi_device *sdev, void *data)
2326{
2327 scsi_device_quiesce(sdev);
2328}
2329
2330void
2331scsi_target_quiesce(struct scsi_target *starget)
2332{
2333 starget_for_each_device(starget, NULL, device_quiesce_fn);
2334}
2335EXPORT_SYMBOL(scsi_target_quiesce);
2336
2337static void
2338device_resume_fn(struct scsi_device *sdev, void *data)
2339{
2340 scsi_device_resume(sdev);
2341}
2342
2343void
2344scsi_target_resume(struct scsi_target *starget)
2345{
2346 starget_for_each_device(starget, NULL, device_resume_fn);
2347}
2348EXPORT_SYMBOL(scsi_target_resume);
2349
2350/**
eb44820c 2351 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
1da177e4
LT
2352 * @sdev: device to block
2353 *
2354 * Block request made by scsi lld's to temporarily stop all
2355 * scsi commands on the specified device. Called from interrupt
2356 * or normal process context.
2357 *
2358 * Returns zero if successful or error if not
2359 *
2360 * Notes:
2361 * This routine transitions the device to the SDEV_BLOCK state
2362 * (which must be a legal transition). When the device is in this
2363 * state, all commands are deferred until the scsi lld reenables
2364 * the device with scsi_device_unblock or device_block_tmo fires.
2365 * This routine assumes the host_lock is held on entry.
eb44820c 2366 */
1da177e4
LT
2367int
2368scsi_internal_device_block(struct scsi_device *sdev)
2369{
165125e1 2370 struct request_queue *q = sdev->request_queue;
1da177e4
LT
2371 unsigned long flags;
2372 int err = 0;
2373
2374 err = scsi_device_set_state(sdev, SDEV_BLOCK);
6f4267e3
JB
2375 if (err) {
2376 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2377
2378 if (err)
2379 return err;
2380 }
1da177e4
LT
2381
2382 /*
2383 * The device has transitioned to SDEV_BLOCK. Stop the
2384 * block layer from calling the midlayer with this device's
2385 * request queue.
2386 */
2387 spin_lock_irqsave(q->queue_lock, flags);
2388 blk_stop_queue(q);
2389 spin_unlock_irqrestore(q->queue_lock, flags);
2390
2391 return 0;
2392}
2393EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2394
2395/**
2396 * scsi_internal_device_unblock - resume a device after a block request
2397 * @sdev: device to resume
2398 *
2399 * Called by scsi lld's or the midlayer to restart the device queue
2400 * for the previously suspended scsi device. Called from interrupt or
2401 * normal process context.
2402 *
2403 * Returns zero if successful or error if not.
2404 *
2405 * Notes:
2406 * This routine transitions the device to the SDEV_RUNNING state
2407 * (which must be a legal transition) allowing the midlayer to
2408 * goose the queue for this device. This routine assumes the
2409 * host_lock is held upon entry.
eb44820c 2410 */
1da177e4
LT
2411int
2412scsi_internal_device_unblock(struct scsi_device *sdev)
2413{
165125e1 2414 struct request_queue *q = sdev->request_queue;
1da177e4
LT
2415 unsigned long flags;
2416
2417 /*
2418 * Try to transition the scsi device to SDEV_RUNNING
2419 * and goose the device queue if successful.
2420 */
5c10e63c
TY
2421 if (sdev->sdev_state == SDEV_BLOCK)
2422 sdev->sdev_state = SDEV_RUNNING;
2423 else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2424 sdev->sdev_state = SDEV_CREATED;
2425 else
2426 return -EINVAL;
1da177e4
LT
2427
2428 spin_lock_irqsave(q->queue_lock, flags);
2429 blk_start_queue(q);
2430 spin_unlock_irqrestore(q->queue_lock, flags);
2431
2432 return 0;
2433}
2434EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2435
2436static void
2437device_block(struct scsi_device *sdev, void *data)
2438{
2439 scsi_internal_device_block(sdev);
2440}
2441
2442static int
2443target_block(struct device *dev, void *data)
2444{
2445 if (scsi_is_target_device(dev))
2446 starget_for_each_device(to_scsi_target(dev), NULL,
2447 device_block);
2448 return 0;
2449}
2450
2451void
2452scsi_target_block(struct device *dev)
2453{
2454 if (scsi_is_target_device(dev))
2455 starget_for_each_device(to_scsi_target(dev), NULL,
2456 device_block);
2457 else
2458 device_for_each_child(dev, NULL, target_block);
2459}
2460EXPORT_SYMBOL_GPL(scsi_target_block);
2461
2462static void
2463device_unblock(struct scsi_device *sdev, void *data)
2464{
2465 scsi_internal_device_unblock(sdev);
2466}
2467
2468static int
2469target_unblock(struct device *dev, void *data)
2470{
2471 if (scsi_is_target_device(dev))
2472 starget_for_each_device(to_scsi_target(dev), NULL,
2473 device_unblock);
2474 return 0;
2475}
2476
2477void
2478scsi_target_unblock(struct device *dev)
2479{
2480 if (scsi_is_target_device(dev))
2481 starget_for_each_device(to_scsi_target(dev), NULL,
2482 device_unblock);
2483 else
2484 device_for_each_child(dev, NULL, target_unblock);
2485}
2486EXPORT_SYMBOL_GPL(scsi_target_unblock);
cdb8c2a6
GL
2487
2488/**
2489 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
eb44820c 2490 * @sgl: scatter-gather list
cdb8c2a6
GL
2491 * @sg_count: number of segments in sg
2492 * @offset: offset in bytes into sg, on return offset into the mapped area
2493 * @len: bytes to map, on return number of bytes mapped
2494 *
2495 * Returns virtual address of the start of the mapped page
2496 */
c6132da1 2497void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
cdb8c2a6
GL
2498 size_t *offset, size_t *len)
2499{
2500 int i;
2501 size_t sg_len = 0, len_complete = 0;
c6132da1 2502 struct scatterlist *sg;
cdb8c2a6
GL
2503 struct page *page;
2504
22cfefb5
AM
2505 WARN_ON(!irqs_disabled());
2506
c6132da1 2507 for_each_sg(sgl, sg, sg_count, i) {
cdb8c2a6 2508 len_complete = sg_len; /* Complete sg-entries */
c6132da1 2509 sg_len += sg->length;
cdb8c2a6
GL
2510 if (sg_len > *offset)
2511 break;
2512 }
2513
2514 if (unlikely(i == sg_count)) {
169e1a2a
AM
2515 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2516 "elements %d\n",
cadbd4a5 2517 __func__, sg_len, *offset, sg_count);
cdb8c2a6
GL
2518 WARN_ON(1);
2519 return NULL;
2520 }
2521
2522 /* Offset starting from the beginning of first page in this sg-entry */
c6132da1 2523 *offset = *offset - len_complete + sg->offset;
cdb8c2a6
GL
2524
2525 /* Assumption: contiguous pages can be accessed as "page + i" */
45711f1a 2526 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
cdb8c2a6
GL
2527 *offset &= ~PAGE_MASK;
2528
2529 /* Bytes in this sg-entry from *offset to the end of the page */
2530 sg_len = PAGE_SIZE - *offset;
2531 if (*len > sg_len)
2532 *len = sg_len;
2533
2534 return kmap_atomic(page, KM_BIO_SRC_IRQ);
2535}
2536EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2537
2538/**
eb44820c 2539 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
cdb8c2a6
GL
2540 * @virt: virtual address to be unmapped
2541 */
2542void scsi_kunmap_atomic_sg(void *virt)
2543{
2544 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2545}
2546EXPORT_SYMBOL(scsi_kunmap_atomic_sg);