Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * scsi_lib.c Copyright (C) 1999 Eric Youngdale | |
3 | * | |
4 | * SCSI queueing library. | |
5 | * Initial versions: Eric Youngdale (eric@andante.org). | |
6 | * Based upon conversations with large numbers | |
7 | * of people at Linux Expo. | |
8 | */ | |
9 | ||
10 | #include <linux/bio.h> | |
d3f46f39 | 11 | #include <linux/bitops.h> |
1da177e4 LT |
12 | #include <linux/blkdev.h> |
13 | #include <linux/completion.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/mempool.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/pci.h> | |
19 | #include <linux/delay.h> | |
faead26d | 20 | #include <linux/hardirq.h> |
c6132da1 | 21 | #include <linux/scatterlist.h> |
1da177e4 LT |
22 | |
23 | #include <scsi/scsi.h> | |
beb40487 | 24 | #include <scsi/scsi_cmnd.h> |
1da177e4 LT |
25 | #include <scsi/scsi_dbg.h> |
26 | #include <scsi/scsi_device.h> | |
27 | #include <scsi/scsi_driver.h> | |
28 | #include <scsi/scsi_eh.h> | |
29 | #include <scsi/scsi_host.h> | |
1da177e4 LT |
30 | |
31 | #include "scsi_priv.h" | |
32 | #include "scsi_logging.h" | |
33 | ||
34 | ||
6391a113 | 35 | #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) |
5972511b | 36 | #define SG_MEMPOOL_SIZE 2 |
1da177e4 LT |
37 | |
38 | struct scsi_host_sg_pool { | |
39 | size_t size; | |
a8474ce2 | 40 | char *name; |
e18b890b | 41 | struct kmem_cache *slab; |
1da177e4 LT |
42 | mempool_t *pool; |
43 | }; | |
44 | ||
d3f46f39 JB |
45 | #define SP(x) { x, "sgpool-" __stringify(x) } |
46 | #if (SCSI_MAX_SG_SEGMENTS < 32) | |
47 | #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) | |
48 | #endif | |
52c1da39 | 49 | static struct scsi_host_sg_pool scsi_sg_pools[] = { |
1da177e4 LT |
50 | SP(8), |
51 | SP(16), | |
fd820f40 | 52 | #if (SCSI_MAX_SG_SEGMENTS > 32) |
d3f46f39 | 53 | SP(32), |
fd820f40 | 54 | #if (SCSI_MAX_SG_SEGMENTS > 64) |
d3f46f39 JB |
55 | SP(64), |
56 | #if (SCSI_MAX_SG_SEGMENTS > 128) | |
1da177e4 | 57 | SP(128), |
d3f46f39 JB |
58 | #if (SCSI_MAX_SG_SEGMENTS > 256) |
59 | #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) | |
fd820f40 FT |
60 | #endif |
61 | #endif | |
62 | #endif | |
d3f46f39 JB |
63 | #endif |
64 | SP(SCSI_MAX_SG_SEGMENTS) | |
a8474ce2 | 65 | }; |
1da177e4 LT |
66 | #undef SP |
67 | ||
7027ad72 | 68 | struct kmem_cache *scsi_sdb_cache; |
6f9a35e2 | 69 | |
a1bf9d1d | 70 | static void scsi_run_queue(struct request_queue *q); |
e91442b6 JB |
71 | |
72 | /* | |
73 | * Function: scsi_unprep_request() | |
74 | * | |
75 | * Purpose: Remove all preparation done for a request, including its | |
76 | * associated scsi_cmnd, so that it can be requeued. | |
77 | * | |
78 | * Arguments: req - request to unprepare | |
79 | * | |
80 | * Lock status: Assumed that no locks are held upon entry. | |
81 | * | |
82 | * Returns: Nothing. | |
83 | */ | |
84 | static void scsi_unprep_request(struct request *req) | |
85 | { | |
86 | struct scsi_cmnd *cmd = req->special; | |
87 | ||
4aff5e23 | 88 | req->cmd_flags &= ~REQ_DONTPREP; |
beb40487 | 89 | req->special = NULL; |
e91442b6 | 90 | |
e91442b6 JB |
91 | scsi_put_command(cmd); |
92 | } | |
a1bf9d1d | 93 | |
4f5299ac JB |
94 | /** |
95 | * __scsi_queue_insert - private queue insertion | |
96 | * @cmd: The SCSI command being requeued | |
97 | * @reason: The reason for the requeue | |
98 | * @unbusy: Whether the queue should be unbusied | |
1da177e4 | 99 | * |
4f5299ac JB |
100 | * This is a private queue insertion. The public interface |
101 | * scsi_queue_insert() always assumes the queue should be unbusied | |
102 | * because it's always called before the completion. This function is | |
103 | * for a requeue after completion, which should only occur in this | |
104 | * file. | |
1da177e4 | 105 | */ |
4f5299ac | 106 | static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) |
1da177e4 LT |
107 | { |
108 | struct Scsi_Host *host = cmd->device->host; | |
109 | struct scsi_device *device = cmd->device; | |
f0c0a376 | 110 | struct scsi_target *starget = scsi_target(device); |
a1bf9d1d TH |
111 | struct request_queue *q = device->request_queue; |
112 | unsigned long flags; | |
1da177e4 LT |
113 | |
114 | SCSI_LOG_MLQUEUE(1, | |
115 | printk("Inserting command %p into mlqueue\n", cmd)); | |
116 | ||
117 | /* | |
d8c37e7b | 118 | * Set the appropriate busy bit for the device/host. |
1da177e4 LT |
119 | * |
120 | * If the host/device isn't busy, assume that something actually | |
121 | * completed, and that we should be able to queue a command now. | |
122 | * | |
123 | * Note that the prior mid-layer assumption that any host could | |
124 | * always queue at least one command is now broken. The mid-layer | |
125 | * will implement a user specifiable stall (see | |
126 | * scsi_host.max_host_blocked and scsi_device.max_device_blocked) | |
127 | * if a command is requeued with no other commands outstanding | |
128 | * either for the device or for the host. | |
129 | */ | |
f0c0a376 MC |
130 | switch (reason) { |
131 | case SCSI_MLQUEUE_HOST_BUSY: | |
1da177e4 | 132 | host->host_blocked = host->max_host_blocked; |
f0c0a376 MC |
133 | break; |
134 | case SCSI_MLQUEUE_DEVICE_BUSY: | |
1da177e4 | 135 | device->device_blocked = device->max_device_blocked; |
f0c0a376 MC |
136 | break; |
137 | case SCSI_MLQUEUE_TARGET_BUSY: | |
138 | starget->target_blocked = starget->max_target_blocked; | |
139 | break; | |
140 | } | |
1da177e4 | 141 | |
1da177e4 LT |
142 | /* |
143 | * Decrement the counters, since these commands are no longer | |
144 | * active on the host/device. | |
145 | */ | |
4f5299ac JB |
146 | if (unbusy) |
147 | scsi_device_unbusy(device); | |
1da177e4 LT |
148 | |
149 | /* | |
a1bf9d1d TH |
150 | * Requeue this command. It will go before all other commands |
151 | * that are already in the queue. | |
1da177e4 LT |
152 | * |
153 | * NOTE: there is magic here about the way the queue is plugged if | |
154 | * we have no outstanding commands. | |
155 | * | |
a1bf9d1d | 156 | * Although we *don't* plug the queue, we call the request |
1da177e4 LT |
157 | * function. The SCSI request function detects the blocked condition |
158 | * and plugs the queue appropriately. | |
a1bf9d1d TH |
159 | */ |
160 | spin_lock_irqsave(q->queue_lock, flags); | |
59897dad | 161 | blk_requeue_request(q, cmd->request); |
a1bf9d1d TH |
162 | spin_unlock_irqrestore(q->queue_lock, flags); |
163 | ||
164 | scsi_run_queue(q); | |
165 | ||
1da177e4 LT |
166 | return 0; |
167 | } | |
168 | ||
4f5299ac JB |
169 | /* |
170 | * Function: scsi_queue_insert() | |
171 | * | |
172 | * Purpose: Insert a command in the midlevel queue. | |
173 | * | |
174 | * Arguments: cmd - command that we are adding to queue. | |
175 | * reason - why we are inserting command to queue. | |
176 | * | |
177 | * Lock status: Assumed that lock is not held upon entry. | |
178 | * | |
179 | * Returns: Nothing. | |
180 | * | |
181 | * Notes: We do this for one of two cases. Either the host is busy | |
182 | * and it cannot accept any more commands for the time being, | |
183 | * or the device returned QUEUE_FULL and can accept no more | |
184 | * commands. | |
185 | * Notes: This could be called either from an interrupt context or a | |
186 | * normal process context. | |
187 | */ | |
188 | int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | |
189 | { | |
190 | return __scsi_queue_insert(cmd, reason, 1); | |
191 | } | |
39216033 | 192 | /** |
33aa687d | 193 | * scsi_execute - insert request and wait for the result |
39216033 JB |
194 | * @sdev: scsi device |
195 | * @cmd: scsi command | |
196 | * @data_direction: data direction | |
197 | * @buffer: data buffer | |
198 | * @bufflen: len of buffer | |
199 | * @sense: optional sense buffer | |
200 | * @timeout: request timeout in seconds | |
201 | * @retries: number of times to retry request | |
33aa687d | 202 | * @flags: or into request flags; |
f4f4e47e | 203 | * @resid: optional residual length |
39216033 | 204 | * |
59c51591 | 205 | * returns the req->errors value which is the scsi_cmnd result |
ea73a9f2 | 206 | * field. |
eb44820c | 207 | */ |
33aa687d JB |
208 | int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, |
209 | int data_direction, void *buffer, unsigned bufflen, | |
f4f4e47e FT |
210 | unsigned char *sense, int timeout, int retries, int flags, |
211 | int *resid) | |
39216033 JB |
212 | { |
213 | struct request *req; | |
214 | int write = (data_direction == DMA_TO_DEVICE); | |
215 | int ret = DRIVER_ERROR << 24; | |
216 | ||
217 | req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); | |
218 | ||
219 | if (bufflen && blk_rq_map_kern(sdev->request_queue, req, | |
220 | buffer, bufflen, __GFP_WAIT)) | |
221 | goto out; | |
222 | ||
223 | req->cmd_len = COMMAND_SIZE(cmd[0]); | |
224 | memcpy(req->cmd, cmd, req->cmd_len); | |
225 | req->sense = sense; | |
226 | req->sense_len = 0; | |
17e01f21 | 227 | req->retries = retries; |
39216033 | 228 | req->timeout = timeout; |
4aff5e23 JA |
229 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
230 | req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; | |
39216033 JB |
231 | |
232 | /* | |
233 | * head injection *required* here otherwise quiesce won't work | |
234 | */ | |
235 | blk_execute_rq(req->q, NULL, req, 1); | |
236 | ||
bdb2b8ca AS |
237 | /* |
238 | * Some devices (USB mass-storage in particular) may transfer | |
239 | * garbage data together with a residue indicating that the data | |
240 | * is invalid. Prevent the garbage from being misinterpreted | |
241 | * and prevent security leaks by zeroing out the excess data. | |
242 | */ | |
c3a4d78c TH |
243 | if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) |
244 | memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); | |
bdb2b8ca | 245 | |
f4f4e47e | 246 | if (resid) |
c3a4d78c | 247 | *resid = req->resid_len; |
39216033 JB |
248 | ret = req->errors; |
249 | out: | |
250 | blk_put_request(req); | |
251 | ||
252 | return ret; | |
253 | } | |
33aa687d | 254 | EXPORT_SYMBOL(scsi_execute); |
39216033 | 255 | |
ea73a9f2 JB |
256 | |
257 | int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, | |
258 | int data_direction, void *buffer, unsigned bufflen, | |
f4f4e47e FT |
259 | struct scsi_sense_hdr *sshdr, int timeout, int retries, |
260 | int *resid) | |
ea73a9f2 JB |
261 | { |
262 | char *sense = NULL; | |
1ccb48bb | 263 | int result; |
264 | ||
ea73a9f2 | 265 | if (sshdr) { |
24669f75 | 266 | sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); |
ea73a9f2 JB |
267 | if (!sense) |
268 | return DRIVER_ERROR << 24; | |
ea73a9f2 | 269 | } |
1ccb48bb | 270 | result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, |
f4f4e47e | 271 | sense, timeout, retries, 0, resid); |
ea73a9f2 | 272 | if (sshdr) |
e514385b | 273 | scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); |
ea73a9f2 JB |
274 | |
275 | kfree(sense); | |
276 | return result; | |
277 | } | |
278 | EXPORT_SYMBOL(scsi_execute_req); | |
279 | ||
1da177e4 LT |
280 | /* |
281 | * Function: scsi_init_cmd_errh() | |
282 | * | |
283 | * Purpose: Initialize cmd fields related to error handling. | |
284 | * | |
285 | * Arguments: cmd - command that is ready to be queued. | |
286 | * | |
1da177e4 LT |
287 | * Notes: This function has the job of initializing a number of |
288 | * fields related to error handling. Typically this will | |
289 | * be called once for each command, as required. | |
290 | */ | |
631c228c | 291 | static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) |
1da177e4 | 292 | { |
1da177e4 | 293 | cmd->serial_number = 0; |
30b0c37b | 294 | scsi_set_resid(cmd, 0); |
b80ca4f7 | 295 | memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); |
1da177e4 | 296 | if (cmd->cmd_len == 0) |
db4742dd | 297 | cmd->cmd_len = scsi_command_size(cmd->cmnd); |
1da177e4 LT |
298 | } |
299 | ||
300 | void scsi_device_unbusy(struct scsi_device *sdev) | |
301 | { | |
302 | struct Scsi_Host *shost = sdev->host; | |
f0c0a376 | 303 | struct scsi_target *starget = scsi_target(sdev); |
1da177e4 LT |
304 | unsigned long flags; |
305 | ||
306 | spin_lock_irqsave(shost->host_lock, flags); | |
307 | shost->host_busy--; | |
f0c0a376 | 308 | starget->target_busy--; |
939647ee | 309 | if (unlikely(scsi_host_in_recovery(shost) && |
ee7863bc | 310 | (shost->host_failed || shost->host_eh_scheduled))) |
1da177e4 LT |
311 | scsi_eh_wakeup(shost); |
312 | spin_unlock(shost->host_lock); | |
152587de | 313 | spin_lock(sdev->request_queue->queue_lock); |
1da177e4 | 314 | sdev->device_busy--; |
152587de | 315 | spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); |
1da177e4 LT |
316 | } |
317 | ||
318 | /* | |
319 | * Called for single_lun devices on IO completion. Clear starget_sdev_user, | |
320 | * and call blk_run_queue for all the scsi_devices on the target - | |
321 | * including current_sdev first. | |
322 | * | |
323 | * Called with *no* scsi locks held. | |
324 | */ | |
325 | static void scsi_single_lun_run(struct scsi_device *current_sdev) | |
326 | { | |
327 | struct Scsi_Host *shost = current_sdev->host; | |
328 | struct scsi_device *sdev, *tmp; | |
329 | struct scsi_target *starget = scsi_target(current_sdev); | |
330 | unsigned long flags; | |
331 | ||
332 | spin_lock_irqsave(shost->host_lock, flags); | |
333 | starget->starget_sdev_user = NULL; | |
334 | spin_unlock_irqrestore(shost->host_lock, flags); | |
335 | ||
336 | /* | |
337 | * Call blk_run_queue for all LUNs on the target, starting with | |
338 | * current_sdev. We race with others (to set starget_sdev_user), | |
339 | * but in most cases, we will be first. Ideally, each LU on the | |
340 | * target would get some limited time or requests on the target. | |
341 | */ | |
342 | blk_run_queue(current_sdev->request_queue); | |
343 | ||
344 | spin_lock_irqsave(shost->host_lock, flags); | |
345 | if (starget->starget_sdev_user) | |
346 | goto out; | |
347 | list_for_each_entry_safe(sdev, tmp, &starget->devices, | |
348 | same_target_siblings) { | |
349 | if (sdev == current_sdev) | |
350 | continue; | |
351 | if (scsi_device_get(sdev)) | |
352 | continue; | |
353 | ||
354 | spin_unlock_irqrestore(shost->host_lock, flags); | |
355 | blk_run_queue(sdev->request_queue); | |
356 | spin_lock_irqsave(shost->host_lock, flags); | |
357 | ||
358 | scsi_device_put(sdev); | |
359 | } | |
360 | out: | |
361 | spin_unlock_irqrestore(shost->host_lock, flags); | |
362 | } | |
363 | ||
9d112517 KU |
364 | static inline int scsi_device_is_busy(struct scsi_device *sdev) |
365 | { | |
366 | if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked) | |
367 | return 1; | |
368 | ||
369 | return 0; | |
370 | } | |
371 | ||
f0c0a376 MC |
372 | static inline int scsi_target_is_busy(struct scsi_target *starget) |
373 | { | |
374 | return ((starget->can_queue > 0 && | |
375 | starget->target_busy >= starget->can_queue) || | |
376 | starget->target_blocked); | |
377 | } | |
378 | ||
9d112517 KU |
379 | static inline int scsi_host_is_busy(struct Scsi_Host *shost) |
380 | { | |
381 | if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || | |
382 | shost->host_blocked || shost->host_self_blocked) | |
383 | return 1; | |
384 | ||
385 | return 0; | |
386 | } | |
387 | ||
1da177e4 LT |
388 | /* |
389 | * Function: scsi_run_queue() | |
390 | * | |
391 | * Purpose: Select a proper request queue to serve next | |
392 | * | |
393 | * Arguments: q - last request's queue | |
394 | * | |
395 | * Returns: Nothing | |
396 | * | |
397 | * Notes: The previous command was completely finished, start | |
398 | * a new one if possible. | |
399 | */ | |
400 | static void scsi_run_queue(struct request_queue *q) | |
401 | { | |
2a3a59e5 | 402 | struct scsi_device *sdev = q->queuedata; |
1da177e4 | 403 | struct Scsi_Host *shost = sdev->host; |
2a3a59e5 | 404 | LIST_HEAD(starved_list); |
1da177e4 LT |
405 | unsigned long flags; |
406 | ||
25d7c363 | 407 | if (scsi_target(sdev)->single_lun) |
1da177e4 LT |
408 | scsi_single_lun_run(sdev); |
409 | ||
410 | spin_lock_irqsave(shost->host_lock, flags); | |
2a3a59e5 MC |
411 | list_splice_init(&shost->starved_list, &starved_list); |
412 | ||
413 | while (!list_empty(&starved_list)) { | |
75ad23bc NP |
414 | int flagset; |
415 | ||
1da177e4 LT |
416 | /* |
417 | * As long as shost is accepting commands and we have | |
418 | * starved queues, call blk_run_queue. scsi_request_fn | |
419 | * drops the queue_lock and can add us back to the | |
420 | * starved_list. | |
421 | * | |
422 | * host_lock protects the starved_list and starved_entry. | |
423 | * scsi_request_fn must get the host_lock before checking | |
424 | * or modifying starved_list or starved_entry. | |
425 | */ | |
2a3a59e5 | 426 | if (scsi_host_is_busy(shost)) |
f0c0a376 | 427 | break; |
f0c0a376 | 428 | |
2a3a59e5 MC |
429 | sdev = list_entry(starved_list.next, |
430 | struct scsi_device, starved_entry); | |
431 | list_del_init(&sdev->starved_entry); | |
f0c0a376 MC |
432 | if (scsi_target_is_busy(scsi_target(sdev))) { |
433 | list_move_tail(&sdev->starved_entry, | |
434 | &shost->starved_list); | |
435 | continue; | |
436 | } | |
437 | ||
75ad23bc NP |
438 | spin_unlock(shost->host_lock); |
439 | ||
440 | spin_lock(sdev->request_queue->queue_lock); | |
441 | flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && | |
442 | !test_bit(QUEUE_FLAG_REENTER, | |
443 | &sdev->request_queue->queue_flags); | |
444 | if (flagset) | |
445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); | |
446 | __blk_run_queue(sdev->request_queue); | |
447 | if (flagset) | |
448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); | |
449 | spin_unlock(sdev->request_queue->queue_lock); | |
04846f25 | 450 | |
75ad23bc | 451 | spin_lock(shost->host_lock); |
1da177e4 | 452 | } |
2a3a59e5 MC |
453 | /* put any unprocessed entries back */ |
454 | list_splice(&starved_list, &shost->starved_list); | |
1da177e4 LT |
455 | spin_unlock_irqrestore(shost->host_lock, flags); |
456 | ||
457 | blk_run_queue(q); | |
458 | } | |
459 | ||
460 | /* | |
461 | * Function: scsi_requeue_command() | |
462 | * | |
463 | * Purpose: Handle post-processing of completed commands. | |
464 | * | |
465 | * Arguments: q - queue to operate on | |
466 | * cmd - command that may need to be requeued. | |
467 | * | |
468 | * Returns: Nothing | |
469 | * | |
470 | * Notes: After command completion, there may be blocks left | |
471 | * over which weren't finished by the previous command | |
472 | * this can be for a number of reasons - the main one is | |
473 | * I/O errors in the middle of the request, in which case | |
474 | * we need to request the blocks that come after the bad | |
475 | * sector. | |
e91442b6 | 476 | * Notes: Upon return, cmd is a stale pointer. |
1da177e4 LT |
477 | */ |
478 | static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) | |
479 | { | |
e91442b6 | 480 | struct request *req = cmd->request; |
283369cc TH |
481 | unsigned long flags; |
482 | ||
283369cc | 483 | spin_lock_irqsave(q->queue_lock, flags); |
02bd3499 | 484 | scsi_unprep_request(req); |
e91442b6 | 485 | blk_requeue_request(q, req); |
283369cc | 486 | spin_unlock_irqrestore(q->queue_lock, flags); |
1da177e4 LT |
487 | |
488 | scsi_run_queue(q); | |
489 | } | |
490 | ||
491 | void scsi_next_command(struct scsi_cmnd *cmd) | |
492 | { | |
49d7bc64 LT |
493 | struct scsi_device *sdev = cmd->device; |
494 | struct request_queue *q = sdev->request_queue; | |
495 | ||
496 | /* need to hold a reference on the device before we let go of the cmd */ | |
497 | get_device(&sdev->sdev_gendev); | |
1da177e4 LT |
498 | |
499 | scsi_put_command(cmd); | |
500 | scsi_run_queue(q); | |
49d7bc64 LT |
501 | |
502 | /* ok to remove device now */ | |
503 | put_device(&sdev->sdev_gendev); | |
1da177e4 LT |
504 | } |
505 | ||
506 | void scsi_run_host_queues(struct Scsi_Host *shost) | |
507 | { | |
508 | struct scsi_device *sdev; | |
509 | ||
510 | shost_for_each_device(sdev, shost) | |
511 | scsi_run_queue(sdev->request_queue); | |
512 | } | |
513 | ||
79ed2429 JB |
514 | static void __scsi_release_buffers(struct scsi_cmnd *, int); |
515 | ||
1da177e4 LT |
516 | /* |
517 | * Function: scsi_end_request() | |
518 | * | |
519 | * Purpose: Post-processing of completed commands (usually invoked at end | |
520 | * of upper level post-processing and scsi_io_completion). | |
521 | * | |
522 | * Arguments: cmd - command that is complete. | |
610d8b0c | 523 | * error - 0 if I/O indicates success, < 0 for I/O error. |
1da177e4 LT |
524 | * bytes - number of bytes of completed I/O |
525 | * requeue - indicates whether we should requeue leftovers. | |
526 | * | |
527 | * Lock status: Assumed that lock is not held upon entry. | |
528 | * | |
e91442b6 | 529 | * Returns: cmd if requeue required, NULL otherwise. |
1da177e4 LT |
530 | * |
531 | * Notes: This is called for block device requests in order to | |
532 | * mark some number of sectors as complete. | |
533 | * | |
534 | * We are guaranteeing that the request queue will be goosed | |
535 | * at some point during this call. | |
e91442b6 | 536 | * Notes: If cmd was requeued, upon return it will be a stale pointer. |
1da177e4 | 537 | */ |
610d8b0c | 538 | static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, |
1da177e4 LT |
539 | int bytes, int requeue) |
540 | { | |
165125e1 | 541 | struct request_queue *q = cmd->device->request_queue; |
1da177e4 | 542 | struct request *req = cmd->request; |
1da177e4 LT |
543 | |
544 | /* | |
545 | * If there are blocks left over at the end, set up the command | |
546 | * to queue the remainder of them. | |
547 | */ | |
610d8b0c | 548 | if (blk_end_request(req, error, bytes)) { |
5b93629b | 549 | int leftover = blk_rq_sectors(req) << 9; |
1da177e4 LT |
550 | |
551 | if (blk_pc_request(req)) | |
c3a4d78c | 552 | leftover = req->resid_len; |
1da177e4 LT |
553 | |
554 | /* kill remainder if no retrys */ | |
4a27446f | 555 | if (error && scsi_noretry_cmd(cmd)) |
610d8b0c | 556 | blk_end_request(req, error, leftover); |
1da177e4 | 557 | else { |
e91442b6 | 558 | if (requeue) { |
1da177e4 LT |
559 | /* |
560 | * Bleah. Leftovers again. Stick the | |
561 | * leftovers in the front of the | |
562 | * queue, and goose the queue again. | |
563 | */ | |
79ed2429 | 564 | scsi_release_buffers(cmd); |
1da177e4 | 565 | scsi_requeue_command(q, cmd); |
e91442b6 JB |
566 | cmd = NULL; |
567 | } | |
1da177e4 LT |
568 | return cmd; |
569 | } | |
570 | } | |
571 | ||
1da177e4 LT |
572 | /* |
573 | * This will goose the queue request function at the end, so we don't | |
574 | * need to worry about launching another command. | |
575 | */ | |
79ed2429 | 576 | __scsi_release_buffers(cmd, 0); |
1da177e4 LT |
577 | scsi_next_command(cmd); |
578 | return NULL; | |
579 | } | |
580 | ||
a8474ce2 JA |
581 | static inline unsigned int scsi_sgtable_index(unsigned short nents) |
582 | { | |
583 | unsigned int index; | |
584 | ||
d3f46f39 JB |
585 | BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); |
586 | ||
587 | if (nents <= 8) | |
a8474ce2 | 588 | index = 0; |
d3f46f39 JB |
589 | else |
590 | index = get_count_order(nents) - 3; | |
1da177e4 | 591 | |
a8474ce2 JA |
592 | return index; |
593 | } | |
594 | ||
5ed7959e | 595 | static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) |
a8474ce2 JA |
596 | { |
597 | struct scsi_host_sg_pool *sgp; | |
a8474ce2 | 598 | |
5ed7959e JA |
599 | sgp = scsi_sg_pools + scsi_sgtable_index(nents); |
600 | mempool_free(sgl, sgp->pool); | |
601 | } | |
a8474ce2 | 602 | |
5ed7959e JA |
603 | static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) |
604 | { | |
605 | struct scsi_host_sg_pool *sgp; | |
a8474ce2 | 606 | |
5ed7959e JA |
607 | sgp = scsi_sg_pools + scsi_sgtable_index(nents); |
608 | return mempool_alloc(sgp->pool, gfp_mask); | |
609 | } | |
a3bec5c5 | 610 | |
30b0c37b BH |
611 | static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, |
612 | gfp_t gfp_mask) | |
5ed7959e JA |
613 | { |
614 | int ret; | |
a8474ce2 | 615 | |
30b0c37b | 616 | BUG_ON(!nents); |
a8474ce2 | 617 | |
30b0c37b BH |
618 | ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, |
619 | gfp_mask, scsi_sg_alloc); | |
5ed7959e | 620 | if (unlikely(ret)) |
30b0c37b | 621 | __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, |
7cedb1f1 | 622 | scsi_sg_free); |
45711f1a | 623 | |
a8474ce2 | 624 | return ret; |
1da177e4 LT |
625 | } |
626 | ||
30b0c37b | 627 | static void scsi_free_sgtable(struct scsi_data_buffer *sdb) |
1da177e4 | 628 | { |
30b0c37b | 629 | __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); |
1da177e4 LT |
630 | } |
631 | ||
79ed2429 JB |
632 | static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) |
633 | { | |
634 | ||
635 | if (cmd->sdb.table.nents) | |
636 | scsi_free_sgtable(&cmd->sdb); | |
637 | ||
638 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); | |
639 | ||
640 | if (do_bidi_check && scsi_bidi_cmnd(cmd)) { | |
641 | struct scsi_data_buffer *bidi_sdb = | |
642 | cmd->request->next_rq->special; | |
643 | scsi_free_sgtable(bidi_sdb); | |
644 | kmem_cache_free(scsi_sdb_cache, bidi_sdb); | |
645 | cmd->request->next_rq->special = NULL; | |
646 | } | |
647 | ||
648 | if (scsi_prot_sg_count(cmd)) | |
649 | scsi_free_sgtable(cmd->prot_sdb); | |
650 | } | |
651 | ||
1da177e4 LT |
652 | /* |
653 | * Function: scsi_release_buffers() | |
654 | * | |
655 | * Purpose: Completion processing for block device I/O requests. | |
656 | * | |
657 | * Arguments: cmd - command that we are bailing. | |
658 | * | |
659 | * Lock status: Assumed that no lock is held upon entry. | |
660 | * | |
661 | * Returns: Nothing | |
662 | * | |
663 | * Notes: In the event that an upper level driver rejects a | |
664 | * command, we must release resources allocated during | |
665 | * the __init_io() function. Primarily this would involve | |
666 | * the scatter-gather table, and potentially any bounce | |
667 | * buffers. | |
668 | */ | |
bb52d82f | 669 | void scsi_release_buffers(struct scsi_cmnd *cmd) |
1da177e4 | 670 | { |
79ed2429 | 671 | __scsi_release_buffers(cmd, 1); |
1da177e4 | 672 | } |
bb52d82f | 673 | EXPORT_SYMBOL(scsi_release_buffers); |
1da177e4 | 674 | |
6f9a35e2 | 675 | /* |
c3a4d78c TH |
676 | * Bidi commands Must be complete as a whole, both sides at once. If |
677 | * part of the bytes were written and lld returned scsi_in()->resid | |
678 | * and/or scsi_out()->resid this information will be left in | |
679 | * req->resid_len and req->next_rq->resid_len. The upper-layer driver | |
680 | * can decide what to do with this information. | |
6f9a35e2 | 681 | */ |
8c5e03d3 | 682 | static void scsi_end_bidi_request(struct scsi_cmnd *cmd) |
6f9a35e2 | 683 | { |
b8de1631 | 684 | struct request *req = cmd->request; |
b8de1631 | 685 | |
c3a4d78c TH |
686 | req->resid_len = scsi_out(cmd)->resid; |
687 | req->next_rq->resid_len = scsi_in(cmd)->resid; | |
b8de1631 KU |
688 | |
689 | /* The req and req->next_rq have not been completed */ | |
b0790410 TH |
690 | BUG_ON(blk_end_bidi_request(req, 0, blk_rq_bytes(req), |
691 | blk_rq_bytes(req->next_rq))); | |
b8de1631 | 692 | |
6f9a35e2 BH |
693 | scsi_release_buffers(cmd); |
694 | ||
695 | /* | |
696 | * This will goose the queue request function at the end, so we don't | |
697 | * need to worry about launching another command. | |
698 | */ | |
699 | scsi_next_command(cmd); | |
700 | } | |
701 | ||
1da177e4 LT |
702 | /* |
703 | * Function: scsi_io_completion() | |
704 | * | |
705 | * Purpose: Completion processing for block device I/O requests. | |
706 | * | |
707 | * Arguments: cmd - command that is finished. | |
708 | * | |
709 | * Lock status: Assumed that no lock is held upon entry. | |
710 | * | |
711 | * Returns: Nothing | |
712 | * | |
713 | * Notes: This function is matched in terms of capabilities to | |
714 | * the function that created the scatter-gather list. | |
715 | * In other words, if there are no bounce buffers | |
716 | * (the normal case for most drivers), we don't need | |
717 | * the logic to deal with cleaning up afterwards. | |
718 | * | |
b60af5b0 AS |
719 | * We must call scsi_end_request(). This will finish off |
720 | * the specified number of sectors. If we are done, the | |
721 | * command block will be released and the queue function | |
722 | * will be goosed. If we are not done then we have to | |
723 | * figure out what to do next: | |
1da177e4 | 724 | * |
b60af5b0 AS |
725 | * a) We can call scsi_requeue_command(). The request |
726 | * will be unprepared and put back on the queue. Then | |
727 | * a new command will be created for it. This should | |
728 | * be used if we made forward progress, or if we want | |
729 | * to switch from READ(10) to READ(6) for example. | |
1da177e4 | 730 | * |
b60af5b0 AS |
731 | * b) We can call scsi_queue_insert(). The request will |
732 | * be put back on the queue and retried using the same | |
733 | * command as before, possibly after a delay. | |
734 | * | |
735 | * c) We can call blk_end_request() with -EIO to fail | |
736 | * the remainder of the request. | |
1da177e4 | 737 | */ |
03aba2f7 | 738 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) |
1da177e4 LT |
739 | { |
740 | int result = cmd->result; | |
44ea91c5 | 741 | int this_count; |
165125e1 | 742 | struct request_queue *q = cmd->device->request_queue; |
1da177e4 | 743 | struct request *req = cmd->request; |
fa8e36c3 | 744 | int error = 0; |
1da177e4 LT |
745 | struct scsi_sense_hdr sshdr; |
746 | int sense_valid = 0; | |
747 | int sense_deferred = 0; | |
b60af5b0 AS |
748 | enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, |
749 | ACTION_DELAYED_RETRY} action; | |
750 | char *description = NULL; | |
1da177e4 | 751 | |
1da177e4 LT |
752 | if (result) { |
753 | sense_valid = scsi_command_normalize_sense(cmd, &sshdr); | |
754 | if (sense_valid) | |
755 | sense_deferred = scsi_sense_is_deferred(&sshdr); | |
756 | } | |
631c228c | 757 | |
1da177e4 LT |
758 | if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ |
759 | req->errors = result; | |
760 | if (result) { | |
1da177e4 LT |
761 | if (sense_valid && req->sense) { |
762 | /* | |
763 | * SG_IO wants current and deferred errors | |
764 | */ | |
765 | int len = 8 + cmd->sense_buffer[7]; | |
766 | ||
767 | if (len > SCSI_SENSE_BUFFERSIZE) | |
768 | len = SCSI_SENSE_BUFFERSIZE; | |
769 | memcpy(req->sense, cmd->sense_buffer, len); | |
770 | req->sense_len = len; | |
771 | } | |
fa8e36c3 JB |
772 | if (!sense_deferred) |
773 | error = -EIO; | |
b22f687d | 774 | } |
6f9a35e2 BH |
775 | if (scsi_bidi_cmnd(cmd)) { |
776 | /* will also release_buffers */ | |
777 | scsi_end_bidi_request(cmd); | |
778 | return; | |
779 | } | |
c3a4d78c | 780 | req->resid_len = scsi_get_resid(cmd); |
1da177e4 LT |
781 | } |
782 | ||
6f9a35e2 | 783 | BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ |
30b0c37b | 784 | |
1da177e4 LT |
785 | /* |
786 | * Next deal with any sectors which we were able to correctly | |
787 | * handle. | |
788 | */ | |
83096ebf | 789 | SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, " |
d6b0c537 | 790 | "%d bytes done.\n", |
83096ebf | 791 | blk_rq_sectors(req), good_bytes)); |
d6b0c537 | 792 | |
a9bddd74 JB |
793 | /* |
794 | * Recovered errors need reporting, but they're always treated | |
795 | * as success, so fiddle the result code here. For BLOCK_PC | |
796 | * we already took a copy of the original into rq->errors which | |
797 | * is what gets returned to the user | |
798 | */ | |
799 | if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) { | |
800 | if (!(req->cmd_flags & REQ_QUIET)) | |
801 | scsi_print_sense("", cmd); | |
802 | result = 0; | |
803 | /* BLOCK_PC may have set error */ | |
804 | error = 0; | |
805 | } | |
806 | ||
807 | /* | |
808 | * A number of bytes were successfully read. If there | |
d6b0c537 JB |
809 | * are leftovers and there is some kind of error |
810 | * (result != 0), retry the rest. | |
811 | */ | |
fa8e36c3 | 812 | if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) |
d6b0c537 | 813 | return; |
44ea91c5 | 814 | this_count = blk_rq_bytes(req); |
03aba2f7 | 815 | |
3e695f89 MP |
816 | error = -EIO; |
817 | ||
b60af5b0 AS |
818 | if (host_byte(result) == DID_RESET) { |
819 | /* Third party bus reset or reset for error recovery | |
820 | * reasons. Just retry the command and see what | |
821 | * happens. | |
822 | */ | |
823 | action = ACTION_RETRY; | |
824 | } else if (sense_valid && !sense_deferred) { | |
1da177e4 LT |
825 | switch (sshdr.sense_key) { |
826 | case UNIT_ATTENTION: | |
827 | if (cmd->device->removable) { | |
03aba2f7 | 828 | /* Detected disc change. Set a bit |
1da177e4 LT |
829 | * and quietly refuse further access. |
830 | */ | |
831 | cmd->device->changed = 1; | |
b60af5b0 AS |
832 | description = "Media Changed"; |
833 | action = ACTION_FAIL; | |
1da177e4 | 834 | } else { |
03aba2f7 LT |
835 | /* Must have been a power glitch, or a |
836 | * bus reset. Could not have been a | |
837 | * media change, so we just retry the | |
b60af5b0 | 838 | * command and see what happens. |
03aba2f7 | 839 | */ |
b60af5b0 | 840 | action = ACTION_RETRY; |
1da177e4 LT |
841 | } |
842 | break; | |
843 | case ILLEGAL_REQUEST: | |
03aba2f7 LT |
844 | /* If we had an ILLEGAL REQUEST returned, then |
845 | * we may have performed an unsupported | |
846 | * command. The only thing this should be | |
847 | * would be a ten byte read where only a six | |
848 | * byte read was supported. Also, on a system | |
849 | * where READ CAPACITY failed, we may have | |
850 | * read past the end of the disk. | |
851 | */ | |
26a68019 JA |
852 | if ((cmd->device->use_10_for_rw && |
853 | sshdr.asc == 0x20 && sshdr.ascq == 0x00) && | |
1da177e4 LT |
854 | (cmd->cmnd[0] == READ_10 || |
855 | cmd->cmnd[0] == WRITE_10)) { | |
b60af5b0 | 856 | /* This will issue a new 6-byte command. */ |
1da177e4 | 857 | cmd->device->use_10_for_rw = 0; |
b60af5b0 | 858 | action = ACTION_REPREP; |
3e695f89 MP |
859 | } else if (sshdr.asc == 0x10) /* DIX */ { |
860 | description = "Host Data Integrity Failure"; | |
861 | action = ACTION_FAIL; | |
862 | error = -EILSEQ; | |
b60af5b0 AS |
863 | } else |
864 | action = ACTION_FAIL; | |
865 | break; | |
511e44f4 | 866 | case ABORTED_COMMAND: |
126c0982 | 867 | action = ACTION_FAIL; |
511e44f4 | 868 | if (sshdr.asc == 0x10) { /* DIF */ |
3e695f89 | 869 | description = "Target Data Integrity Failure"; |
3e695f89 | 870 | error = -EILSEQ; |
126c0982 | 871 | } |
1da177e4 LT |
872 | break; |
873 | case NOT_READY: | |
03aba2f7 | 874 | /* If the device is in the process of becoming |
f3e93f73 | 875 | * ready, or has a temporary blockage, retry. |
1da177e4 | 876 | */ |
f3e93f73 JB |
877 | if (sshdr.asc == 0x04) { |
878 | switch (sshdr.ascq) { | |
879 | case 0x01: /* becoming ready */ | |
880 | case 0x04: /* format in progress */ | |
881 | case 0x05: /* rebuild in progress */ | |
882 | case 0x06: /* recalculation in progress */ | |
883 | case 0x07: /* operation in progress */ | |
884 | case 0x08: /* Long write in progress */ | |
885 | case 0x09: /* self test in progress */ | |
b60af5b0 | 886 | action = ACTION_DELAYED_RETRY; |
f3e93f73 | 887 | break; |
3dbf6a54 AS |
888 | default: |
889 | description = "Device not ready"; | |
890 | action = ACTION_FAIL; | |
891 | break; | |
f3e93f73 | 892 | } |
b60af5b0 AS |
893 | } else { |
894 | description = "Device not ready"; | |
895 | action = ACTION_FAIL; | |
1da177e4 | 896 | } |
b60af5b0 | 897 | break; |
1da177e4 | 898 | case VOLUME_OVERFLOW: |
03aba2f7 | 899 | /* See SSC3rXX or current. */ |
b60af5b0 AS |
900 | action = ACTION_FAIL; |
901 | break; | |
1da177e4 | 902 | default: |
b60af5b0 AS |
903 | description = "Unhandled sense code"; |
904 | action = ACTION_FAIL; | |
1da177e4 LT |
905 | break; |
906 | } | |
b60af5b0 AS |
907 | } else { |
908 | description = "Unhandled error code"; | |
909 | action = ACTION_FAIL; | |
03aba2f7 | 910 | } |
b60af5b0 AS |
911 | |
912 | switch (action) { | |
913 | case ACTION_FAIL: | |
914 | /* Give up and fail the remainder of the request */ | |
79ed2429 | 915 | scsi_release_buffers(cmd); |
4aff5e23 | 916 | if (!(req->cmd_flags & REQ_QUIET)) { |
b60af5b0 | 917 | if (description) |
3dbf6a54 | 918 | scmd_printk(KERN_INFO, cmd, "%s\n", |
b60af5b0 | 919 | description); |
a4d04a4c | 920 | scsi_print_result(cmd); |
3173d8c3 JB |
921 | if (driver_byte(result) & DRIVER_SENSE) |
922 | scsi_print_sense("", cmd); | |
923 | } | |
40cbbb78 | 924 | blk_end_request_all(req, -EIO); |
b60af5b0 AS |
925 | scsi_next_command(cmd); |
926 | break; | |
927 | case ACTION_REPREP: | |
928 | /* Unprep the request and put it back at the head of the queue. | |
929 | * A new command will be prepared and issued. | |
930 | */ | |
79ed2429 | 931 | scsi_release_buffers(cmd); |
b60af5b0 AS |
932 | scsi_requeue_command(q, cmd); |
933 | break; | |
934 | case ACTION_RETRY: | |
935 | /* Retry the same command immediately */ | |
4f5299ac | 936 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); |
b60af5b0 AS |
937 | break; |
938 | case ACTION_DELAYED_RETRY: | |
939 | /* Retry the same command after a delay */ | |
4f5299ac | 940 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); |
b60af5b0 | 941 | break; |
1da177e4 LT |
942 | } |
943 | } | |
1da177e4 | 944 | |
6f9a35e2 BH |
945 | static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, |
946 | gfp_t gfp_mask) | |
1da177e4 | 947 | { |
6f9a35e2 | 948 | int count; |
1da177e4 LT |
949 | |
950 | /* | |
3b003157 | 951 | * If sg table allocation fails, requeue request later. |
1da177e4 | 952 | */ |
30b0c37b BH |
953 | if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, |
954 | gfp_mask))) { | |
1da177e4 | 955 | return BLKPREP_DEFER; |
7c72ce81 | 956 | } |
1da177e4 | 957 | |
3b003157 | 958 | req->buffer = NULL; |
1da177e4 LT |
959 | |
960 | /* | |
961 | * Next, walk the list, and fill in the addresses and sizes of | |
962 | * each segment. | |
963 | */ | |
30b0c37b BH |
964 | count = blk_rq_map_sg(req->q, req, sdb->table.sgl); |
965 | BUG_ON(count > sdb->table.nents); | |
966 | sdb->table.nents = count; | |
6b00769f | 967 | if (blk_pc_request(req)) |
b0790410 | 968 | sdb->length = blk_rq_bytes(req); |
6b00769f | 969 | else |
83096ebf | 970 | sdb->length = blk_rq_sectors(req) << 9; |
4a03d90e | 971 | return BLKPREP_OK; |
1da177e4 | 972 | } |
6f9a35e2 BH |
973 | |
974 | /* | |
975 | * Function: scsi_init_io() | |
976 | * | |
977 | * Purpose: SCSI I/O initialize function. | |
978 | * | |
979 | * Arguments: cmd - Command descriptor we wish to initialize | |
980 | * | |
981 | * Returns: 0 on success | |
982 | * BLKPREP_DEFER if the failure is retryable | |
983 | * BLKPREP_KILL if the failure is fatal | |
984 | */ | |
985 | int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) | |
986 | { | |
987 | int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); | |
988 | if (error) | |
989 | goto err_exit; | |
990 | ||
991 | if (blk_bidi_rq(cmd->request)) { | |
992 | struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( | |
6362abd3 | 993 | scsi_sdb_cache, GFP_ATOMIC); |
6f9a35e2 BH |
994 | if (!bidi_sdb) { |
995 | error = BLKPREP_DEFER; | |
996 | goto err_exit; | |
997 | } | |
998 | ||
999 | cmd->request->next_rq->special = bidi_sdb; | |
1000 | error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, | |
1001 | GFP_ATOMIC); | |
1002 | if (error) | |
1003 | goto err_exit; | |
1004 | } | |
1005 | ||
7027ad72 MP |
1006 | if (blk_integrity_rq(cmd->request)) { |
1007 | struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; | |
1008 | int ivecs, count; | |
1009 | ||
1010 | BUG_ON(prot_sdb == NULL); | |
1011 | ivecs = blk_rq_count_integrity_sg(cmd->request); | |
1012 | ||
1013 | if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { | |
1014 | error = BLKPREP_DEFER; | |
1015 | goto err_exit; | |
1016 | } | |
1017 | ||
1018 | count = blk_rq_map_integrity_sg(cmd->request, | |
1019 | prot_sdb->table.sgl); | |
1020 | BUG_ON(unlikely(count > ivecs)); | |
1021 | ||
1022 | cmd->prot_sdb = prot_sdb; | |
1023 | cmd->prot_sdb->table.nents = count; | |
1024 | } | |
1025 | ||
6f9a35e2 BH |
1026 | return BLKPREP_OK ; |
1027 | ||
1028 | err_exit: | |
1029 | scsi_release_buffers(cmd); | |
1030 | if (error == BLKPREP_KILL) | |
1031 | scsi_put_command(cmd); | |
1032 | else /* BLKPREP_DEFER */ | |
1033 | scsi_unprep_request(cmd->request); | |
1034 | ||
1035 | return error; | |
1036 | } | |
bb52d82f | 1037 | EXPORT_SYMBOL(scsi_init_io); |
1da177e4 | 1038 | |
3b003157 CH |
1039 | static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, |
1040 | struct request *req) | |
1041 | { | |
1042 | struct scsi_cmnd *cmd; | |
1043 | ||
1044 | if (!req->special) { | |
1045 | cmd = scsi_get_command(sdev, GFP_ATOMIC); | |
1046 | if (unlikely(!cmd)) | |
1047 | return NULL; | |
1048 | req->special = cmd; | |
1049 | } else { | |
1050 | cmd = req->special; | |
1051 | } | |
1052 | ||
1053 | /* pull a tag out of the request if we have one */ | |
1054 | cmd->tag = req->tag; | |
1055 | cmd->request = req; | |
1056 | ||
64a87b24 BH |
1057 | cmd->cmnd = req->cmd; |
1058 | ||
3b003157 CH |
1059 | return cmd; |
1060 | } | |
1061 | ||
7f9a6bc4 | 1062 | int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) |
7b16318d | 1063 | { |
3b003157 | 1064 | struct scsi_cmnd *cmd; |
7f9a6bc4 JB |
1065 | int ret = scsi_prep_state_check(sdev, req); |
1066 | ||
1067 | if (ret != BLKPREP_OK) | |
1068 | return ret; | |
3b003157 CH |
1069 | |
1070 | cmd = scsi_get_cmd_from_req(sdev, req); | |
1071 | if (unlikely(!cmd)) | |
1072 | return BLKPREP_DEFER; | |
1073 | ||
1074 | /* | |
1075 | * BLOCK_PC requests may transfer data, in which case they must | |
1076 | * a bio attached to them. Or they might contain a SCSI command | |
1077 | * that does not transfer data, in which case they may optionally | |
1078 | * submit a request without an attached bio. | |
1079 | */ | |
1080 | if (req->bio) { | |
1081 | int ret; | |
1082 | ||
1083 | BUG_ON(!req->nr_phys_segments); | |
1084 | ||
bb52d82f | 1085 | ret = scsi_init_io(cmd, GFP_ATOMIC); |
3b003157 CH |
1086 | if (unlikely(ret)) |
1087 | return ret; | |
1088 | } else { | |
b0790410 | 1089 | BUG_ON(blk_rq_bytes(req)); |
3b003157 | 1090 | |
30b0c37b | 1091 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); |
3b003157 CH |
1092 | req->buffer = NULL; |
1093 | } | |
7b16318d | 1094 | |
7b16318d | 1095 | cmd->cmd_len = req->cmd_len; |
b0790410 | 1096 | if (!blk_rq_bytes(req)) |
7b16318d JB |
1097 | cmd->sc_data_direction = DMA_NONE; |
1098 | else if (rq_data_dir(req) == WRITE) | |
1099 | cmd->sc_data_direction = DMA_TO_DEVICE; | |
1100 | else | |
1101 | cmd->sc_data_direction = DMA_FROM_DEVICE; | |
1102 | ||
b0790410 | 1103 | cmd->transfersize = blk_rq_bytes(req); |
7b16318d | 1104 | cmd->allowed = req->retries; |
3b003157 | 1105 | return BLKPREP_OK; |
7b16318d | 1106 | } |
7f9a6bc4 | 1107 | EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); |
7b16318d | 1108 | |
3b003157 CH |
1109 | /* |
1110 | * Setup a REQ_TYPE_FS command. These are simple read/write request | |
1111 | * from filesystems that still need to be translated to SCSI CDBs from | |
1112 | * the ULD. | |
1113 | */ | |
7f9a6bc4 | 1114 | int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) |
1da177e4 | 1115 | { |
1da177e4 | 1116 | struct scsi_cmnd *cmd; |
7f9a6bc4 | 1117 | int ret = scsi_prep_state_check(sdev, req); |
1da177e4 | 1118 | |
7f9a6bc4 JB |
1119 | if (ret != BLKPREP_OK) |
1120 | return ret; | |
a6a8d9f8 CS |
1121 | |
1122 | if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh | |
1123 | && sdev->scsi_dh_data->scsi_dh->prep_fn)) { | |
1124 | ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); | |
1125 | if (ret != BLKPREP_OK) | |
1126 | return ret; | |
1127 | } | |
1128 | ||
1da177e4 | 1129 | /* |
3b003157 | 1130 | * Filesystem requests must transfer data. |
1da177e4 | 1131 | */ |
3b003157 CH |
1132 | BUG_ON(!req->nr_phys_segments); |
1133 | ||
1134 | cmd = scsi_get_cmd_from_req(sdev, req); | |
1135 | if (unlikely(!cmd)) | |
1136 | return BLKPREP_DEFER; | |
1137 | ||
64a87b24 | 1138 | memset(cmd->cmnd, 0, BLK_MAX_CDB); |
bb52d82f | 1139 | return scsi_init_io(cmd, GFP_ATOMIC); |
3b003157 | 1140 | } |
7f9a6bc4 | 1141 | EXPORT_SYMBOL(scsi_setup_fs_cmnd); |
3b003157 | 1142 | |
7f9a6bc4 | 1143 | int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) |
3b003157 | 1144 | { |
3b003157 CH |
1145 | int ret = BLKPREP_OK; |
1146 | ||
1da177e4 | 1147 | /* |
3b003157 CH |
1148 | * If the device is not in running state we will reject some |
1149 | * or all commands. | |
1da177e4 | 1150 | */ |
3b003157 CH |
1151 | if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { |
1152 | switch (sdev->sdev_state) { | |
1153 | case SDEV_OFFLINE: | |
1154 | /* | |
1155 | * If the device is offline we refuse to process any | |
1156 | * commands. The device must be brought online | |
1157 | * before trying any recovery commands. | |
1158 | */ | |
1159 | sdev_printk(KERN_ERR, sdev, | |
1160 | "rejecting I/O to offline device\n"); | |
1161 | ret = BLKPREP_KILL; | |
1162 | break; | |
1163 | case SDEV_DEL: | |
1164 | /* | |
1165 | * If the device is fully deleted, we refuse to | |
1166 | * process any commands as well. | |
1167 | */ | |
9ccfc756 | 1168 | sdev_printk(KERN_ERR, sdev, |
3b003157 CH |
1169 | "rejecting I/O to dead device\n"); |
1170 | ret = BLKPREP_KILL; | |
1171 | break; | |
1172 | case SDEV_QUIESCE: | |
1173 | case SDEV_BLOCK: | |
6f4267e3 | 1174 | case SDEV_CREATED_BLOCK: |
3b003157 CH |
1175 | /* |
1176 | * If the devices is blocked we defer normal commands. | |
1177 | */ | |
1178 | if (!(req->cmd_flags & REQ_PREEMPT)) | |
1179 | ret = BLKPREP_DEFER; | |
1180 | break; | |
1181 | default: | |
1182 | /* | |
1183 | * For any other not fully online state we only allow | |
1184 | * special commands. In particular any user initiated | |
1185 | * command is not allowed. | |
1186 | */ | |
1187 | if (!(req->cmd_flags & REQ_PREEMPT)) | |
1188 | ret = BLKPREP_KILL; | |
1189 | break; | |
1da177e4 | 1190 | } |
1da177e4 | 1191 | } |
7f9a6bc4 JB |
1192 | return ret; |
1193 | } | |
1194 | EXPORT_SYMBOL(scsi_prep_state_check); | |
1da177e4 | 1195 | |
7f9a6bc4 JB |
1196 | int scsi_prep_return(struct request_queue *q, struct request *req, int ret) |
1197 | { | |
1198 | struct scsi_device *sdev = q->queuedata; | |
1da177e4 | 1199 | |
3b003157 CH |
1200 | switch (ret) { |
1201 | case BLKPREP_KILL: | |
1202 | req->errors = DID_NO_CONNECT << 16; | |
7f9a6bc4 JB |
1203 | /* release the command and kill it */ |
1204 | if (req->special) { | |
1205 | struct scsi_cmnd *cmd = req->special; | |
1206 | scsi_release_buffers(cmd); | |
1207 | scsi_put_command(cmd); | |
1208 | req->special = NULL; | |
1209 | } | |
3b003157 CH |
1210 | break; |
1211 | case BLKPREP_DEFER: | |
1da177e4 | 1212 | /* |
3b003157 CH |
1213 | * If we defer, the elv_next_request() returns NULL, but the |
1214 | * queue must be restarted, so we plug here if no returning | |
1215 | * command will automatically do that. | |
1da177e4 | 1216 | */ |
3b003157 CH |
1217 | if (sdev->device_busy == 0) |
1218 | blk_plug_device(q); | |
1219 | break; | |
1220 | default: | |
1221 | req->cmd_flags |= REQ_DONTPREP; | |
1da177e4 LT |
1222 | } |
1223 | ||
3b003157 | 1224 | return ret; |
1da177e4 | 1225 | } |
7f9a6bc4 JB |
1226 | EXPORT_SYMBOL(scsi_prep_return); |
1227 | ||
751bf4d7 | 1228 | int scsi_prep_fn(struct request_queue *q, struct request *req) |
7f9a6bc4 JB |
1229 | { |
1230 | struct scsi_device *sdev = q->queuedata; | |
1231 | int ret = BLKPREP_KILL; | |
1232 | ||
1233 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) | |
1234 | ret = scsi_setup_blk_pc_cmnd(sdev, req); | |
1235 | return scsi_prep_return(q, req, ret); | |
1236 | } | |
1da177e4 LT |
1237 | |
1238 | /* | |
1239 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else | |
1240 | * return 0. | |
1241 | * | |
1242 | * Called with the queue_lock held. | |
1243 | */ | |
1244 | static inline int scsi_dev_queue_ready(struct request_queue *q, | |
1245 | struct scsi_device *sdev) | |
1246 | { | |
1da177e4 LT |
1247 | if (sdev->device_busy == 0 && sdev->device_blocked) { |
1248 | /* | |
1249 | * unblock after device_blocked iterates to zero | |
1250 | */ | |
1251 | if (--sdev->device_blocked == 0) { | |
1252 | SCSI_LOG_MLQUEUE(3, | |
9ccfc756 JB |
1253 | sdev_printk(KERN_INFO, sdev, |
1254 | "unblocking device at zero depth\n")); | |
1da177e4 LT |
1255 | } else { |
1256 | blk_plug_device(q); | |
1257 | return 0; | |
1258 | } | |
1259 | } | |
9d112517 | 1260 | if (scsi_device_is_busy(sdev)) |
1da177e4 LT |
1261 | return 0; |
1262 | ||
1263 | return 1; | |
1264 | } | |
1265 | ||
f0c0a376 MC |
1266 | |
1267 | /* | |
1268 | * scsi_target_queue_ready: checks if there we can send commands to target | |
1269 | * @sdev: scsi device on starget to check. | |
1270 | * | |
1271 | * Called with the host lock held. | |
1272 | */ | |
1273 | static inline int scsi_target_queue_ready(struct Scsi_Host *shost, | |
1274 | struct scsi_device *sdev) | |
1275 | { | |
1276 | struct scsi_target *starget = scsi_target(sdev); | |
1277 | ||
1278 | if (starget->single_lun) { | |
1279 | if (starget->starget_sdev_user && | |
1280 | starget->starget_sdev_user != sdev) | |
1281 | return 0; | |
1282 | starget->starget_sdev_user = sdev; | |
1283 | } | |
1284 | ||
1285 | if (starget->target_busy == 0 && starget->target_blocked) { | |
1286 | /* | |
1287 | * unblock after target_blocked iterates to zero | |
1288 | */ | |
1289 | if (--starget->target_blocked == 0) { | |
1290 | SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, | |
1291 | "unblocking target at zero depth\n")); | |
1292 | } else { | |
1293 | blk_plug_device(sdev->request_queue); | |
1294 | return 0; | |
1295 | } | |
1296 | } | |
1297 | ||
1298 | if (scsi_target_is_busy(starget)) { | |
1299 | if (list_empty(&sdev->starved_entry)) { | |
1300 | list_add_tail(&sdev->starved_entry, | |
1301 | &shost->starved_list); | |
1302 | return 0; | |
1303 | } | |
1304 | } | |
1305 | ||
1306 | /* We're OK to process the command, so we can't be starved */ | |
1307 | if (!list_empty(&sdev->starved_entry)) | |
1308 | list_del_init(&sdev->starved_entry); | |
1309 | return 1; | |
1310 | } | |
1311 | ||
1da177e4 LT |
1312 | /* |
1313 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else | |
1314 | * return 0. We must end up running the queue again whenever 0 is | |
1315 | * returned, else IO can hang. | |
1316 | * | |
1317 | * Called with host_lock held. | |
1318 | */ | |
1319 | static inline int scsi_host_queue_ready(struct request_queue *q, | |
1320 | struct Scsi_Host *shost, | |
1321 | struct scsi_device *sdev) | |
1322 | { | |
939647ee | 1323 | if (scsi_host_in_recovery(shost)) |
1da177e4 LT |
1324 | return 0; |
1325 | if (shost->host_busy == 0 && shost->host_blocked) { | |
1326 | /* | |
1327 | * unblock after host_blocked iterates to zero | |
1328 | */ | |
1329 | if (--shost->host_blocked == 0) { | |
1330 | SCSI_LOG_MLQUEUE(3, | |
1331 | printk("scsi%d unblocking host at zero depth\n", | |
1332 | shost->host_no)); | |
1333 | } else { | |
1da177e4 LT |
1334 | return 0; |
1335 | } | |
1336 | } | |
9d112517 | 1337 | if (scsi_host_is_busy(shost)) { |
1da177e4 LT |
1338 | if (list_empty(&sdev->starved_entry)) |
1339 | list_add_tail(&sdev->starved_entry, &shost->starved_list); | |
1340 | return 0; | |
1341 | } | |
1342 | ||
1343 | /* We're OK to process the command, so we can't be starved */ | |
1344 | if (!list_empty(&sdev->starved_entry)) | |
1345 | list_del_init(&sdev->starved_entry); | |
1346 | ||
1347 | return 1; | |
1348 | } | |
1349 | ||
6c5121b7 KU |
1350 | /* |
1351 | * Busy state exporting function for request stacking drivers. | |
1352 | * | |
1353 | * For efficiency, no lock is taken to check the busy state of | |
1354 | * shost/starget/sdev, since the returned value is not guaranteed and | |
1355 | * may be changed after request stacking drivers call the function, | |
1356 | * regardless of taking lock or not. | |
1357 | * | |
1358 | * When scsi can't dispatch I/Os anymore and needs to kill I/Os | |
1359 | * (e.g. !sdev), scsi needs to return 'not busy'. | |
1360 | * Otherwise, request stacking drivers may hold requests forever. | |
1361 | */ | |
1362 | static int scsi_lld_busy(struct request_queue *q) | |
1363 | { | |
1364 | struct scsi_device *sdev = q->queuedata; | |
1365 | struct Scsi_Host *shost; | |
1366 | struct scsi_target *starget; | |
1367 | ||
1368 | if (!sdev) | |
1369 | return 0; | |
1370 | ||
1371 | shost = sdev->host; | |
1372 | starget = scsi_target(sdev); | |
1373 | ||
1374 | if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) || | |
1375 | scsi_target_is_busy(starget) || scsi_device_is_busy(sdev)) | |
1376 | return 1; | |
1377 | ||
1378 | return 0; | |
1379 | } | |
1380 | ||
1da177e4 | 1381 | /* |
e91442b6 | 1382 | * Kill a request for a dead device |
1da177e4 | 1383 | */ |
165125e1 | 1384 | static void scsi_kill_request(struct request *req, struct request_queue *q) |
1da177e4 | 1385 | { |
e91442b6 | 1386 | struct scsi_cmnd *cmd = req->special; |
e36e0c80 | 1387 | struct scsi_device *sdev = cmd->device; |
f0c0a376 | 1388 | struct scsi_target *starget = scsi_target(sdev); |
e36e0c80 | 1389 | struct Scsi_Host *shost = sdev->host; |
1da177e4 | 1390 | |
788ce43a JB |
1391 | blkdev_dequeue_request(req); |
1392 | ||
e91442b6 JB |
1393 | if (unlikely(cmd == NULL)) { |
1394 | printk(KERN_CRIT "impossible request in %s.\n", | |
cadbd4a5 | 1395 | __func__); |
e91442b6 | 1396 | BUG(); |
1da177e4 | 1397 | } |
e91442b6 JB |
1398 | |
1399 | scsi_init_cmd_errh(cmd); | |
1400 | cmd->result = DID_NO_CONNECT << 16; | |
1401 | atomic_inc(&cmd->device->iorequest_cnt); | |
e36e0c80 TH |
1402 | |
1403 | /* | |
1404 | * SCSI request completion path will do scsi_device_unbusy(), | |
1405 | * bump busy counts. To bump the counters, we need to dance | |
1406 | * with the locks as normal issue path does. | |
1407 | */ | |
1408 | sdev->device_busy++; | |
1409 | spin_unlock(sdev->request_queue->queue_lock); | |
1410 | spin_lock(shost->host_lock); | |
1411 | shost->host_busy++; | |
f0c0a376 | 1412 | starget->target_busy++; |
e36e0c80 TH |
1413 | spin_unlock(shost->host_lock); |
1414 | spin_lock(sdev->request_queue->queue_lock); | |
1415 | ||
242f9dcb | 1416 | blk_complete_request(req); |
1da177e4 LT |
1417 | } |
1418 | ||
1aea6434 JA |
1419 | static void scsi_softirq_done(struct request *rq) |
1420 | { | |
242f9dcb JA |
1421 | struct scsi_cmnd *cmd = rq->special; |
1422 | unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; | |
1aea6434 JA |
1423 | int disposition; |
1424 | ||
1425 | INIT_LIST_HEAD(&cmd->eh_entry); | |
1426 | ||
242f9dcb JA |
1427 | /* |
1428 | * Set the serial numbers back to zero | |
1429 | */ | |
1430 | cmd->serial_number = 0; | |
1431 | ||
1432 | atomic_inc(&cmd->device->iodone_cnt); | |
1433 | if (cmd->result) | |
1434 | atomic_inc(&cmd->device->ioerr_cnt); | |
1435 | ||
1aea6434 JA |
1436 | disposition = scsi_decide_disposition(cmd); |
1437 | if (disposition != SUCCESS && | |
1438 | time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { | |
1439 | sdev_printk(KERN_ERR, cmd->device, | |
1440 | "timing out command, waited %lus\n", | |
1441 | wait_for/HZ); | |
1442 | disposition = SUCCESS; | |
1443 | } | |
1444 | ||
1445 | scsi_log_completion(cmd, disposition); | |
1446 | ||
1447 | switch (disposition) { | |
1448 | case SUCCESS: | |
1449 | scsi_finish_command(cmd); | |
1450 | break; | |
1451 | case NEEDS_RETRY: | |
596f482a | 1452 | scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); |
1aea6434 JA |
1453 | break; |
1454 | case ADD_TO_MLQUEUE: | |
1455 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); | |
1456 | break; | |
1457 | default: | |
1458 | if (!scsi_eh_scmd_add(cmd, 0)) | |
1459 | scsi_finish_command(cmd); | |
1460 | } | |
1461 | } | |
1462 | ||
1da177e4 LT |
1463 | /* |
1464 | * Function: scsi_request_fn() | |
1465 | * | |
1466 | * Purpose: Main strategy routine for SCSI. | |
1467 | * | |
1468 | * Arguments: q - Pointer to actual queue. | |
1469 | * | |
1470 | * Returns: Nothing | |
1471 | * | |
1472 | * Lock status: IO request lock assumed to be held when called. | |
1473 | */ | |
1474 | static void scsi_request_fn(struct request_queue *q) | |
1475 | { | |
1476 | struct scsi_device *sdev = q->queuedata; | |
1477 | struct Scsi_Host *shost; | |
1478 | struct scsi_cmnd *cmd; | |
1479 | struct request *req; | |
1480 | ||
1481 | if (!sdev) { | |
1482 | printk("scsi: killing requests for dead queue\n"); | |
e91442b6 JB |
1483 | while ((req = elv_next_request(q)) != NULL) |
1484 | scsi_kill_request(req, q); | |
1da177e4 LT |
1485 | return; |
1486 | } | |
1487 | ||
1488 | if(!get_device(&sdev->sdev_gendev)) | |
1489 | /* We must be tearing the block queue down already */ | |
1490 | return; | |
1491 | ||
1492 | /* | |
1493 | * To start with, we keep looping until the queue is empty, or until | |
1494 | * the host is no longer able to accept any more requests. | |
1495 | */ | |
1496 | shost = sdev->host; | |
1497 | while (!blk_queue_plugged(q)) { | |
1498 | int rtn; | |
1499 | /* | |
1500 | * get next queueable request. We do this early to make sure | |
1501 | * that the request is fully prepared even if we cannot | |
1502 | * accept it. | |
1503 | */ | |
1504 | req = elv_next_request(q); | |
1505 | if (!req || !scsi_dev_queue_ready(q, sdev)) | |
1506 | break; | |
1507 | ||
1508 | if (unlikely(!scsi_device_online(sdev))) { | |
9ccfc756 JB |
1509 | sdev_printk(KERN_ERR, sdev, |
1510 | "rejecting I/O to offline device\n"); | |
e91442b6 | 1511 | scsi_kill_request(req, q); |
1da177e4 LT |
1512 | continue; |
1513 | } | |
1514 | ||
1515 | ||
1516 | /* | |
1517 | * Remove the request from the request list. | |
1518 | */ | |
1519 | if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) | |
1520 | blkdev_dequeue_request(req); | |
1521 | sdev->device_busy++; | |
1522 | ||
1523 | spin_unlock(q->queue_lock); | |
e91442b6 JB |
1524 | cmd = req->special; |
1525 | if (unlikely(cmd == NULL)) { | |
1526 | printk(KERN_CRIT "impossible request in %s.\n" | |
1527 | "please mail a stack trace to " | |
4aff5e23 | 1528 | "linux-scsi@vger.kernel.org\n", |
cadbd4a5 | 1529 | __func__); |
4aff5e23 | 1530 | blk_dump_rq_flags(req, "foo"); |
e91442b6 JB |
1531 | BUG(); |
1532 | } | |
1da177e4 LT |
1533 | spin_lock(shost->host_lock); |
1534 | ||
ecefe8a9 MC |
1535 | /* |
1536 | * We hit this when the driver is using a host wide | |
1537 | * tag map. For device level tag maps the queue_depth check | |
1538 | * in the device ready fn would prevent us from trying | |
1539 | * to allocate a tag. Since the map is a shared host resource | |
1540 | * we add the dev to the starved list so it eventually gets | |
1541 | * a run when a tag is freed. | |
1542 | */ | |
6bd522f6 | 1543 | if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { |
ecefe8a9 MC |
1544 | if (list_empty(&sdev->starved_entry)) |
1545 | list_add_tail(&sdev->starved_entry, | |
1546 | &shost->starved_list); | |
1547 | goto not_ready; | |
1548 | } | |
1549 | ||
f0c0a376 MC |
1550 | if (!scsi_target_queue_ready(shost, sdev)) |
1551 | goto not_ready; | |
1552 | ||
1da177e4 LT |
1553 | if (!scsi_host_queue_ready(q, shost, sdev)) |
1554 | goto not_ready; | |
f0c0a376 MC |
1555 | |
1556 | scsi_target(sdev)->target_busy++; | |
1da177e4 LT |
1557 | shost->host_busy++; |
1558 | ||
1559 | /* | |
1560 | * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will | |
1561 | * take the lock again. | |
1562 | */ | |
1563 | spin_unlock_irq(shost->host_lock); | |
1564 | ||
1da177e4 LT |
1565 | /* |
1566 | * Finally, initialize any error handling parameters, and set up | |
1567 | * the timers for timeouts. | |
1568 | */ | |
1569 | scsi_init_cmd_errh(cmd); | |
1570 | ||
1571 | /* | |
1572 | * Dispatch the command to the low-level driver. | |
1573 | */ | |
1574 | rtn = scsi_dispatch_cmd(cmd); | |
1575 | spin_lock_irq(q->queue_lock); | |
1576 | if(rtn) { | |
1577 | /* we're refusing the command; because of | |
1578 | * the way locks get dropped, we need to | |
1579 | * check here if plugging is required */ | |
1580 | if(sdev->device_busy == 0) | |
1581 | blk_plug_device(q); | |
1582 | ||
1583 | break; | |
1584 | } | |
1585 | } | |
1586 | ||
1587 | goto out; | |
1588 | ||
1589 | not_ready: | |
1590 | spin_unlock_irq(shost->host_lock); | |
1591 | ||
1592 | /* | |
1593 | * lock q, handle tag, requeue req, and decrement device_busy. We | |
1594 | * must return with queue_lock held. | |
1595 | * | |
1596 | * Decrementing device_busy without checking it is OK, as all such | |
1597 | * cases (host limits or settings) should run the queue at some | |
1598 | * later time. | |
1599 | */ | |
1600 | spin_lock_irq(q->queue_lock); | |
1601 | blk_requeue_request(q, req); | |
1602 | sdev->device_busy--; | |
1603 | if(sdev->device_busy == 0) | |
1604 | blk_plug_device(q); | |
1605 | out: | |
1606 | /* must be careful here...if we trigger the ->remove() function | |
1607 | * we cannot be holding the q lock */ | |
1608 | spin_unlock_irq(q->queue_lock); | |
1609 | put_device(&sdev->sdev_gendev); | |
1610 | spin_lock_irq(q->queue_lock); | |
1611 | } | |
1612 | ||
1613 | u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) | |
1614 | { | |
1615 | struct device *host_dev; | |
1616 | u64 bounce_limit = 0xffffffff; | |
1617 | ||
1618 | if (shost->unchecked_isa_dma) | |
1619 | return BLK_BOUNCE_ISA; | |
1620 | /* | |
1621 | * Platforms with virtual-DMA translation | |
1622 | * hardware have no practical limit. | |
1623 | */ | |
1624 | if (!PCI_DMA_BUS_IS_PHYS) | |
1625 | return BLK_BOUNCE_ANY; | |
1626 | ||
1627 | host_dev = scsi_get_device(shost); | |
1628 | if (host_dev && host_dev->dma_mask) | |
1629 | bounce_limit = *host_dev->dma_mask; | |
1630 | ||
1631 | return bounce_limit; | |
1632 | } | |
1633 | EXPORT_SYMBOL(scsi_calculate_bounce_limit); | |
1634 | ||
b58d9154 FT |
1635 | struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, |
1636 | request_fn_proc *request_fn) | |
1da177e4 | 1637 | { |
1da177e4 | 1638 | struct request_queue *q; |
860ac568 | 1639 | struct device *dev = shost->shost_gendev.parent; |
1da177e4 | 1640 | |
b58d9154 | 1641 | q = blk_init_queue(request_fn, NULL); |
1da177e4 LT |
1642 | if (!q) |
1643 | return NULL; | |
1644 | ||
a8474ce2 JA |
1645 | /* |
1646 | * this limit is imposed by hardware restrictions | |
1647 | */ | |
1da177e4 | 1648 | blk_queue_max_hw_segments(q, shost->sg_tablesize); |
d3f46f39 | 1649 | blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); |
a8474ce2 | 1650 | |
1da177e4 LT |
1651 | blk_queue_max_sectors(q, shost->max_sectors); |
1652 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); | |
1653 | blk_queue_segment_boundary(q, shost->dma_boundary); | |
99c84dbd | 1654 | dma_set_seg_boundary(dev, shost->dma_boundary); |
1da177e4 | 1655 | |
860ac568 FT |
1656 | blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); |
1657 | ||
75ad23bc | 1658 | /* New queue, no concurrency on queue_flags */ |
1da177e4 | 1659 | if (!shost->use_clustering) |
75ad23bc | 1660 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); |
465ff318 JB |
1661 | |
1662 | /* | |
1663 | * set a reasonable default alignment on word boundaries: the | |
1664 | * host and device may alter it using | |
1665 | * blk_queue_update_dma_alignment() later. | |
1666 | */ | |
1667 | blk_queue_dma_alignment(q, 0x03); | |
1668 | ||
1da177e4 LT |
1669 | return q; |
1670 | } | |
b58d9154 FT |
1671 | EXPORT_SYMBOL(__scsi_alloc_queue); |
1672 | ||
1673 | struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | |
1674 | { | |
1675 | struct request_queue *q; | |
1676 | ||
1677 | q = __scsi_alloc_queue(sdev->host, scsi_request_fn); | |
1678 | if (!q) | |
1679 | return NULL; | |
1680 | ||
1681 | blk_queue_prep_rq(q, scsi_prep_fn); | |
b58d9154 | 1682 | blk_queue_softirq_done(q, scsi_softirq_done); |
242f9dcb | 1683 | blk_queue_rq_timed_out(q, scsi_times_out); |
6c5121b7 | 1684 | blk_queue_lld_busy(q, scsi_lld_busy); |
b58d9154 FT |
1685 | return q; |
1686 | } | |
1da177e4 LT |
1687 | |
1688 | void scsi_free_queue(struct request_queue *q) | |
1689 | { | |
1690 | blk_cleanup_queue(q); | |
1691 | } | |
1692 | ||
1693 | /* | |
1694 | * Function: scsi_block_requests() | |
1695 | * | |
1696 | * Purpose: Utility function used by low-level drivers to prevent further | |
1697 | * commands from being queued to the device. | |
1698 | * | |
1699 | * Arguments: shost - Host in question | |
1700 | * | |
1701 | * Returns: Nothing | |
1702 | * | |
1703 | * Lock status: No locks are assumed held. | |
1704 | * | |
1705 | * Notes: There is no timer nor any other means by which the requests | |
1706 | * get unblocked other than the low-level driver calling | |
1707 | * scsi_unblock_requests(). | |
1708 | */ | |
1709 | void scsi_block_requests(struct Scsi_Host *shost) | |
1710 | { | |
1711 | shost->host_self_blocked = 1; | |
1712 | } | |
1713 | EXPORT_SYMBOL(scsi_block_requests); | |
1714 | ||
1715 | /* | |
1716 | * Function: scsi_unblock_requests() | |
1717 | * | |
1718 | * Purpose: Utility function used by low-level drivers to allow further | |
1719 | * commands from being queued to the device. | |
1720 | * | |
1721 | * Arguments: shost - Host in question | |
1722 | * | |
1723 | * Returns: Nothing | |
1724 | * | |
1725 | * Lock status: No locks are assumed held. | |
1726 | * | |
1727 | * Notes: There is no timer nor any other means by which the requests | |
1728 | * get unblocked other than the low-level driver calling | |
1729 | * scsi_unblock_requests(). | |
1730 | * | |
1731 | * This is done as an API function so that changes to the | |
1732 | * internals of the scsi mid-layer won't require wholesale | |
1733 | * changes to drivers that use this feature. | |
1734 | */ | |
1735 | void scsi_unblock_requests(struct Scsi_Host *shost) | |
1736 | { | |
1737 | shost->host_self_blocked = 0; | |
1738 | scsi_run_host_queues(shost); | |
1739 | } | |
1740 | EXPORT_SYMBOL(scsi_unblock_requests); | |
1741 | ||
1742 | int __init scsi_init_queue(void) | |
1743 | { | |
1744 | int i; | |
1745 | ||
6362abd3 MP |
1746 | scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", |
1747 | sizeof(struct scsi_data_buffer), | |
1748 | 0, 0, NULL); | |
1749 | if (!scsi_sdb_cache) { | |
1750 | printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); | |
f078727b | 1751 | return -ENOMEM; |
6f9a35e2 BH |
1752 | } |
1753 | ||
1da177e4 LT |
1754 | for (i = 0; i < SG_MEMPOOL_NR; i++) { |
1755 | struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; | |
1756 | int size = sgp->size * sizeof(struct scatterlist); | |
1757 | ||
1758 | sgp->slab = kmem_cache_create(sgp->name, size, 0, | |
20c2df83 | 1759 | SLAB_HWCACHE_ALIGN, NULL); |
1da177e4 LT |
1760 | if (!sgp->slab) { |
1761 | printk(KERN_ERR "SCSI: can't init sg slab %s\n", | |
1762 | sgp->name); | |
6362abd3 | 1763 | goto cleanup_sdb; |
1da177e4 LT |
1764 | } |
1765 | ||
93d2341c MD |
1766 | sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, |
1767 | sgp->slab); | |
1da177e4 LT |
1768 | if (!sgp->pool) { |
1769 | printk(KERN_ERR "SCSI: can't init sg mempool %s\n", | |
1770 | sgp->name); | |
6362abd3 | 1771 | goto cleanup_sdb; |
1da177e4 LT |
1772 | } |
1773 | } | |
1774 | ||
1775 | return 0; | |
3d9dd6ee | 1776 | |
6362abd3 | 1777 | cleanup_sdb: |
3d9dd6ee FT |
1778 | for (i = 0; i < SG_MEMPOOL_NR; i++) { |
1779 | struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; | |
1780 | if (sgp->pool) | |
1781 | mempool_destroy(sgp->pool); | |
1782 | if (sgp->slab) | |
1783 | kmem_cache_destroy(sgp->slab); | |
1784 | } | |
6362abd3 | 1785 | kmem_cache_destroy(scsi_sdb_cache); |
3d9dd6ee FT |
1786 | |
1787 | return -ENOMEM; | |
1da177e4 LT |
1788 | } |
1789 | ||
1790 | void scsi_exit_queue(void) | |
1791 | { | |
1792 | int i; | |
1793 | ||
6362abd3 | 1794 | kmem_cache_destroy(scsi_sdb_cache); |
aa7b5cd7 | 1795 | |
1da177e4 LT |
1796 | for (i = 0; i < SG_MEMPOOL_NR; i++) { |
1797 | struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; | |
1798 | mempool_destroy(sgp->pool); | |
1799 | kmem_cache_destroy(sgp->slab); | |
1800 | } | |
1801 | } | |
5baba830 JB |
1802 | |
1803 | /** | |
1804 | * scsi_mode_select - issue a mode select | |
1805 | * @sdev: SCSI device to be queried | |
1806 | * @pf: Page format bit (1 == standard, 0 == vendor specific) | |
1807 | * @sp: Save page bit (0 == don't save, 1 == save) | |
1808 | * @modepage: mode page being requested | |
1809 | * @buffer: request buffer (may not be smaller than eight bytes) | |
1810 | * @len: length of request buffer. | |
1811 | * @timeout: command timeout | |
1812 | * @retries: number of retries before failing | |
1813 | * @data: returns a structure abstracting the mode header data | |
eb44820c | 1814 | * @sshdr: place to put sense data (or NULL if no sense to be collected). |
5baba830 JB |
1815 | * must be SCSI_SENSE_BUFFERSIZE big. |
1816 | * | |
1817 | * Returns zero if successful; negative error number or scsi | |
1818 | * status on error | |
1819 | * | |
1820 | */ | |
1821 | int | |
1822 | scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, | |
1823 | unsigned char *buffer, int len, int timeout, int retries, | |
1824 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) | |
1825 | { | |
1826 | unsigned char cmd[10]; | |
1827 | unsigned char *real_buffer; | |
1828 | int ret; | |
1829 | ||
1830 | memset(cmd, 0, sizeof(cmd)); | |
1831 | cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); | |
1832 | ||
1833 | if (sdev->use_10_for_ms) { | |
1834 | if (len > 65535) | |
1835 | return -EINVAL; | |
1836 | real_buffer = kmalloc(8 + len, GFP_KERNEL); | |
1837 | if (!real_buffer) | |
1838 | return -ENOMEM; | |
1839 | memcpy(real_buffer + 8, buffer, len); | |
1840 | len += 8; | |
1841 | real_buffer[0] = 0; | |
1842 | real_buffer[1] = 0; | |
1843 | real_buffer[2] = data->medium_type; | |
1844 | real_buffer[3] = data->device_specific; | |
1845 | real_buffer[4] = data->longlba ? 0x01 : 0; | |
1846 | real_buffer[5] = 0; | |
1847 | real_buffer[6] = data->block_descriptor_length >> 8; | |
1848 | real_buffer[7] = data->block_descriptor_length; | |
1849 | ||
1850 | cmd[0] = MODE_SELECT_10; | |
1851 | cmd[7] = len >> 8; | |
1852 | cmd[8] = len; | |
1853 | } else { | |
1854 | if (len > 255 || data->block_descriptor_length > 255 || | |
1855 | data->longlba) | |
1856 | return -EINVAL; | |
1857 | ||
1858 | real_buffer = kmalloc(4 + len, GFP_KERNEL); | |
1859 | if (!real_buffer) | |
1860 | return -ENOMEM; | |
1861 | memcpy(real_buffer + 4, buffer, len); | |
1862 | len += 4; | |
1863 | real_buffer[0] = 0; | |
1864 | real_buffer[1] = data->medium_type; | |
1865 | real_buffer[2] = data->device_specific; | |
1866 | real_buffer[3] = data->block_descriptor_length; | |
1867 | ||
1868 | ||
1869 | cmd[0] = MODE_SELECT; | |
1870 | cmd[4] = len; | |
1871 | } | |
1872 | ||
1873 | ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, | |
f4f4e47e | 1874 | sshdr, timeout, retries, NULL); |
5baba830 JB |
1875 | kfree(real_buffer); |
1876 | return ret; | |
1877 | } | |
1878 | EXPORT_SYMBOL_GPL(scsi_mode_select); | |
1879 | ||
1da177e4 | 1880 | /** |
eb44820c | 1881 | * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. |
1cf72699 | 1882 | * @sdev: SCSI device to be queried |
1da177e4 LT |
1883 | * @dbd: set if mode sense will allow block descriptors to be returned |
1884 | * @modepage: mode page being requested | |
1885 | * @buffer: request buffer (may not be smaller than eight bytes) | |
1886 | * @len: length of request buffer. | |
1887 | * @timeout: command timeout | |
1888 | * @retries: number of retries before failing | |
1889 | * @data: returns a structure abstracting the mode header data | |
eb44820c | 1890 | * @sshdr: place to put sense data (or NULL if no sense to be collected). |
1cf72699 | 1891 | * must be SCSI_SENSE_BUFFERSIZE big. |
1da177e4 LT |
1892 | * |
1893 | * Returns zero if unsuccessful, or the header offset (either 4 | |
1894 | * or 8 depending on whether a six or ten byte command was | |
1895 | * issued) if successful. | |
eb44820c | 1896 | */ |
1da177e4 | 1897 | int |
1cf72699 | 1898 | scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, |
1da177e4 | 1899 | unsigned char *buffer, int len, int timeout, int retries, |
5baba830 JB |
1900 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) |
1901 | { | |
1da177e4 LT |
1902 | unsigned char cmd[12]; |
1903 | int use_10_for_ms; | |
1904 | int header_length; | |
1cf72699 | 1905 | int result; |
ea73a9f2 | 1906 | struct scsi_sense_hdr my_sshdr; |
1da177e4 LT |
1907 | |
1908 | memset(data, 0, sizeof(*data)); | |
1909 | memset(&cmd[0], 0, 12); | |
1910 | cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ | |
1911 | cmd[2] = modepage; | |
1912 | ||
ea73a9f2 JB |
1913 | /* caller might not be interested in sense, but we need it */ |
1914 | if (!sshdr) | |
1915 | sshdr = &my_sshdr; | |
1916 | ||
1da177e4 | 1917 | retry: |
1cf72699 | 1918 | use_10_for_ms = sdev->use_10_for_ms; |
1da177e4 LT |
1919 | |
1920 | if (use_10_for_ms) { | |
1921 | if (len < 8) | |
1922 | len = 8; | |
1923 | ||
1924 | cmd[0] = MODE_SENSE_10; | |
1925 | cmd[8] = len; | |
1926 | header_length = 8; | |
1927 | } else { | |
1928 | if (len < 4) | |
1929 | len = 4; | |
1930 | ||
1931 | cmd[0] = MODE_SENSE; | |
1932 | cmd[4] = len; | |
1933 | header_length = 4; | |
1934 | } | |
1935 | ||
1da177e4 LT |
1936 | memset(buffer, 0, len); |
1937 | ||
1cf72699 | 1938 | result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, |
f4f4e47e | 1939 | sshdr, timeout, retries, NULL); |
1da177e4 LT |
1940 | |
1941 | /* This code looks awful: what it's doing is making sure an | |
1942 | * ILLEGAL REQUEST sense return identifies the actual command | |
1943 | * byte as the problem. MODE_SENSE commands can return | |
1944 | * ILLEGAL REQUEST if the code page isn't supported */ | |
1945 | ||
1cf72699 JB |
1946 | if (use_10_for_ms && !scsi_status_is_good(result) && |
1947 | (driver_byte(result) & DRIVER_SENSE)) { | |
ea73a9f2 JB |
1948 | if (scsi_sense_valid(sshdr)) { |
1949 | if ((sshdr->sense_key == ILLEGAL_REQUEST) && | |
1950 | (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { | |
1da177e4 LT |
1951 | /* |
1952 | * Invalid command operation code | |
1953 | */ | |
1cf72699 | 1954 | sdev->use_10_for_ms = 0; |
1da177e4 LT |
1955 | goto retry; |
1956 | } | |
1957 | } | |
1958 | } | |
1959 | ||
1cf72699 | 1960 | if(scsi_status_is_good(result)) { |
6d73c851 AV |
1961 | if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && |
1962 | (modepage == 6 || modepage == 8))) { | |
1963 | /* Initio breakage? */ | |
1964 | header_length = 0; | |
1965 | data->length = 13; | |
1966 | data->medium_type = 0; | |
1967 | data->device_specific = 0; | |
1968 | data->longlba = 0; | |
1969 | data->block_descriptor_length = 0; | |
1970 | } else if(use_10_for_ms) { | |
1da177e4 LT |
1971 | data->length = buffer[0]*256 + buffer[1] + 2; |
1972 | data->medium_type = buffer[2]; | |
1973 | data->device_specific = buffer[3]; | |
1974 | data->longlba = buffer[4] & 0x01; | |
1975 | data->block_descriptor_length = buffer[6]*256 | |
1976 | + buffer[7]; | |
1977 | } else { | |
1978 | data->length = buffer[0] + 1; | |
1979 | data->medium_type = buffer[1]; | |
1980 | data->device_specific = buffer[2]; | |
1981 | data->block_descriptor_length = buffer[3]; | |
1982 | } | |
6d73c851 | 1983 | data->header_length = header_length; |
1da177e4 LT |
1984 | } |
1985 | ||
1cf72699 | 1986 | return result; |
1da177e4 LT |
1987 | } |
1988 | EXPORT_SYMBOL(scsi_mode_sense); | |
1989 | ||
001aac25 JB |
1990 | /** |
1991 | * scsi_test_unit_ready - test if unit is ready | |
1992 | * @sdev: scsi device to change the state of. | |
1993 | * @timeout: command timeout | |
1994 | * @retries: number of retries before failing | |
1995 | * @sshdr_external: Optional pointer to struct scsi_sense_hdr for | |
1996 | * returning sense. Make sure that this is cleared before passing | |
1997 | * in. | |
1998 | * | |
1999 | * Returns zero if unsuccessful or an error if TUR failed. For | |
2000 | * removable media, a return of NOT_READY or UNIT_ATTENTION is | |
2001 | * translated to success, with the ->changed flag updated. | |
2002 | **/ | |
1da177e4 | 2003 | int |
001aac25 JB |
2004 | scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, |
2005 | struct scsi_sense_hdr *sshdr_external) | |
1da177e4 | 2006 | { |
1da177e4 LT |
2007 | char cmd[] = { |
2008 | TEST_UNIT_READY, 0, 0, 0, 0, 0, | |
2009 | }; | |
001aac25 | 2010 | struct scsi_sense_hdr *sshdr; |
1da177e4 | 2011 | int result; |
001aac25 JB |
2012 | |
2013 | if (!sshdr_external) | |
2014 | sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); | |
2015 | else | |
2016 | sshdr = sshdr_external; | |
2017 | ||
2018 | /* try to eat the UNIT_ATTENTION if there are enough retries */ | |
2019 | do { | |
2020 | result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, | |
f4f4e47e | 2021 | timeout, retries, NULL); |
32c356d7 JB |
2022 | if (sdev->removable && scsi_sense_valid(sshdr) && |
2023 | sshdr->sense_key == UNIT_ATTENTION) | |
2024 | sdev->changed = 1; | |
2025 | } while (scsi_sense_valid(sshdr) && | |
2026 | sshdr->sense_key == UNIT_ATTENTION && --retries); | |
001aac25 JB |
2027 | |
2028 | if (!sshdr) | |
2029 | /* could not allocate sense buffer, so can't process it */ | |
2030 | return result; | |
1da177e4 | 2031 | |
32c356d7 JB |
2032 | if (sdev->removable && scsi_sense_valid(sshdr) && |
2033 | (sshdr->sense_key == UNIT_ATTENTION || | |
2034 | sshdr->sense_key == NOT_READY)) { | |
2035 | sdev->changed = 1; | |
2036 | result = 0; | |
1da177e4 | 2037 | } |
001aac25 JB |
2038 | if (!sshdr_external) |
2039 | kfree(sshdr); | |
1da177e4 LT |
2040 | return result; |
2041 | } | |
2042 | EXPORT_SYMBOL(scsi_test_unit_ready); | |
2043 | ||
2044 | /** | |
eb44820c | 2045 | * scsi_device_set_state - Take the given device through the device state model. |
1da177e4 LT |
2046 | * @sdev: scsi device to change the state of. |
2047 | * @state: state to change to. | |
2048 | * | |
2049 | * Returns zero if unsuccessful or an error if the requested | |
2050 | * transition is illegal. | |
eb44820c | 2051 | */ |
1da177e4 LT |
2052 | int |
2053 | scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | |
2054 | { | |
2055 | enum scsi_device_state oldstate = sdev->sdev_state; | |
2056 | ||
2057 | if (state == oldstate) | |
2058 | return 0; | |
2059 | ||
2060 | switch (state) { | |
2061 | case SDEV_CREATED: | |
6f4267e3 JB |
2062 | switch (oldstate) { |
2063 | case SDEV_CREATED_BLOCK: | |
2064 | break; | |
2065 | default: | |
2066 | goto illegal; | |
2067 | } | |
2068 | break; | |
1da177e4 LT |
2069 | |
2070 | case SDEV_RUNNING: | |
2071 | switch (oldstate) { | |
2072 | case SDEV_CREATED: | |
2073 | case SDEV_OFFLINE: | |
2074 | case SDEV_QUIESCE: | |
2075 | case SDEV_BLOCK: | |
2076 | break; | |
2077 | default: | |
2078 | goto illegal; | |
2079 | } | |
2080 | break; | |
2081 | ||
2082 | case SDEV_QUIESCE: | |
2083 | switch (oldstate) { | |
2084 | case SDEV_RUNNING: | |
2085 | case SDEV_OFFLINE: | |
2086 | break; | |
2087 | default: | |
2088 | goto illegal; | |
2089 | } | |
2090 | break; | |
2091 | ||
2092 | case SDEV_OFFLINE: | |
2093 | switch (oldstate) { | |
2094 | case SDEV_CREATED: | |
2095 | case SDEV_RUNNING: | |
2096 | case SDEV_QUIESCE: | |
2097 | case SDEV_BLOCK: | |
2098 | break; | |
2099 | default: | |
2100 | goto illegal; | |
2101 | } | |
2102 | break; | |
2103 | ||
2104 | case SDEV_BLOCK: | |
2105 | switch (oldstate) { | |
1da177e4 | 2106 | case SDEV_RUNNING: |
6f4267e3 JB |
2107 | case SDEV_CREATED_BLOCK: |
2108 | break; | |
2109 | default: | |
2110 | goto illegal; | |
2111 | } | |
2112 | break; | |
2113 | ||
2114 | case SDEV_CREATED_BLOCK: | |
2115 | switch (oldstate) { | |
2116 | case SDEV_CREATED: | |
1da177e4 LT |
2117 | break; |
2118 | default: | |
2119 | goto illegal; | |
2120 | } | |
2121 | break; | |
2122 | ||
2123 | case SDEV_CANCEL: | |
2124 | switch (oldstate) { | |
2125 | case SDEV_CREATED: | |
2126 | case SDEV_RUNNING: | |
9ea72909 | 2127 | case SDEV_QUIESCE: |
1da177e4 LT |
2128 | case SDEV_OFFLINE: |
2129 | case SDEV_BLOCK: | |
2130 | break; | |
2131 | default: | |
2132 | goto illegal; | |
2133 | } | |
2134 | break; | |
2135 | ||
2136 | case SDEV_DEL: | |
2137 | switch (oldstate) { | |
309bd271 BK |
2138 | case SDEV_CREATED: |
2139 | case SDEV_RUNNING: | |
2140 | case SDEV_OFFLINE: | |
1da177e4 LT |
2141 | case SDEV_CANCEL: |
2142 | break; | |
2143 | default: | |
2144 | goto illegal; | |
2145 | } | |
2146 | break; | |
2147 | ||
2148 | } | |
2149 | sdev->sdev_state = state; | |
2150 | return 0; | |
2151 | ||
2152 | illegal: | |
2153 | SCSI_LOG_ERROR_RECOVERY(1, | |
9ccfc756 JB |
2154 | sdev_printk(KERN_ERR, sdev, |
2155 | "Illegal state transition %s->%s\n", | |
2156 | scsi_device_state_name(oldstate), | |
2157 | scsi_device_state_name(state)) | |
1da177e4 LT |
2158 | ); |
2159 | return -EINVAL; | |
2160 | } | |
2161 | EXPORT_SYMBOL(scsi_device_set_state); | |
2162 | ||
a341cd0f JG |
2163 | /** |
2164 | * sdev_evt_emit - emit a single SCSI device uevent | |
2165 | * @sdev: associated SCSI device | |
2166 | * @evt: event to emit | |
2167 | * | |
2168 | * Send a single uevent (scsi_event) to the associated scsi_device. | |
2169 | */ | |
2170 | static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) | |
2171 | { | |
2172 | int idx = 0; | |
2173 | char *envp[3]; | |
2174 | ||
2175 | switch (evt->evt_type) { | |
2176 | case SDEV_EVT_MEDIA_CHANGE: | |
2177 | envp[idx++] = "SDEV_MEDIA_CHANGE=1"; | |
2178 | break; | |
2179 | ||
2180 | default: | |
2181 | /* do nothing */ | |
2182 | break; | |
2183 | } | |
2184 | ||
2185 | envp[idx++] = NULL; | |
2186 | ||
2187 | kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); | |
2188 | } | |
2189 | ||
2190 | /** | |
2191 | * sdev_evt_thread - send a uevent for each scsi event | |
2192 | * @work: work struct for scsi_device | |
2193 | * | |
2194 | * Dispatch queued events to their associated scsi_device kobjects | |
2195 | * as uevents. | |
2196 | */ | |
2197 | void scsi_evt_thread(struct work_struct *work) | |
2198 | { | |
2199 | struct scsi_device *sdev; | |
2200 | LIST_HEAD(event_list); | |
2201 | ||
2202 | sdev = container_of(work, struct scsi_device, event_work); | |
2203 | ||
2204 | while (1) { | |
2205 | struct scsi_event *evt; | |
2206 | struct list_head *this, *tmp; | |
2207 | unsigned long flags; | |
2208 | ||
2209 | spin_lock_irqsave(&sdev->list_lock, flags); | |
2210 | list_splice_init(&sdev->event_list, &event_list); | |
2211 | spin_unlock_irqrestore(&sdev->list_lock, flags); | |
2212 | ||
2213 | if (list_empty(&event_list)) | |
2214 | break; | |
2215 | ||
2216 | list_for_each_safe(this, tmp, &event_list) { | |
2217 | evt = list_entry(this, struct scsi_event, node); | |
2218 | list_del(&evt->node); | |
2219 | scsi_evt_emit(sdev, evt); | |
2220 | kfree(evt); | |
2221 | } | |
2222 | } | |
2223 | } | |
2224 | ||
2225 | /** | |
2226 | * sdev_evt_send - send asserted event to uevent thread | |
2227 | * @sdev: scsi_device event occurred on | |
2228 | * @evt: event to send | |
2229 | * | |
2230 | * Assert scsi device event asynchronously. | |
2231 | */ | |
2232 | void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) | |
2233 | { | |
2234 | unsigned long flags; | |
2235 | ||
4d1566ed KS |
2236 | #if 0 |
2237 | /* FIXME: currently this check eliminates all media change events | |
2238 | * for polled devices. Need to update to discriminate between AN | |
2239 | * and polled events */ | |
a341cd0f JG |
2240 | if (!test_bit(evt->evt_type, sdev->supported_events)) { |
2241 | kfree(evt); | |
2242 | return; | |
2243 | } | |
4d1566ed | 2244 | #endif |
a341cd0f JG |
2245 | |
2246 | spin_lock_irqsave(&sdev->list_lock, flags); | |
2247 | list_add_tail(&evt->node, &sdev->event_list); | |
2248 | schedule_work(&sdev->event_work); | |
2249 | spin_unlock_irqrestore(&sdev->list_lock, flags); | |
2250 | } | |
2251 | EXPORT_SYMBOL_GPL(sdev_evt_send); | |
2252 | ||
2253 | /** | |
2254 | * sdev_evt_alloc - allocate a new scsi event | |
2255 | * @evt_type: type of event to allocate | |
2256 | * @gfpflags: GFP flags for allocation | |
2257 | * | |
2258 | * Allocates and returns a new scsi_event. | |
2259 | */ | |
2260 | struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, | |
2261 | gfp_t gfpflags) | |
2262 | { | |
2263 | struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); | |
2264 | if (!evt) | |
2265 | return NULL; | |
2266 | ||
2267 | evt->evt_type = evt_type; | |
2268 | INIT_LIST_HEAD(&evt->node); | |
2269 | ||
2270 | /* evt_type-specific initialization, if any */ | |
2271 | switch (evt_type) { | |
2272 | case SDEV_EVT_MEDIA_CHANGE: | |
2273 | default: | |
2274 | /* do nothing */ | |
2275 | break; | |
2276 | } | |
2277 | ||
2278 | return evt; | |
2279 | } | |
2280 | EXPORT_SYMBOL_GPL(sdev_evt_alloc); | |
2281 | ||
2282 | /** | |
2283 | * sdev_evt_send_simple - send asserted event to uevent thread | |
2284 | * @sdev: scsi_device event occurred on | |
2285 | * @evt_type: type of event to send | |
2286 | * @gfpflags: GFP flags for allocation | |
2287 | * | |
2288 | * Assert scsi device event asynchronously, given an event type. | |
2289 | */ | |
2290 | void sdev_evt_send_simple(struct scsi_device *sdev, | |
2291 | enum scsi_device_event evt_type, gfp_t gfpflags) | |
2292 | { | |
2293 | struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); | |
2294 | if (!evt) { | |
2295 | sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", | |
2296 | evt_type); | |
2297 | return; | |
2298 | } | |
2299 | ||
2300 | sdev_evt_send(sdev, evt); | |
2301 | } | |
2302 | EXPORT_SYMBOL_GPL(sdev_evt_send_simple); | |
2303 | ||
1da177e4 LT |
2304 | /** |
2305 | * scsi_device_quiesce - Block user issued commands. | |
2306 | * @sdev: scsi device to quiesce. | |
2307 | * | |
2308 | * This works by trying to transition to the SDEV_QUIESCE state | |
2309 | * (which must be a legal transition). When the device is in this | |
2310 | * state, only special requests will be accepted, all others will | |
2311 | * be deferred. Since special requests may also be requeued requests, | |
2312 | * a successful return doesn't guarantee the device will be | |
2313 | * totally quiescent. | |
2314 | * | |
2315 | * Must be called with user context, may sleep. | |
2316 | * | |
2317 | * Returns zero if unsuccessful or an error if not. | |
eb44820c | 2318 | */ |
1da177e4 LT |
2319 | int |
2320 | scsi_device_quiesce(struct scsi_device *sdev) | |
2321 | { | |
2322 | int err = scsi_device_set_state(sdev, SDEV_QUIESCE); | |
2323 | if (err) | |
2324 | return err; | |
2325 | ||
2326 | scsi_run_queue(sdev->request_queue); | |
2327 | while (sdev->device_busy) { | |
2328 | msleep_interruptible(200); | |
2329 | scsi_run_queue(sdev->request_queue); | |
2330 | } | |
2331 | return 0; | |
2332 | } | |
2333 | EXPORT_SYMBOL(scsi_device_quiesce); | |
2334 | ||
2335 | /** | |
2336 | * scsi_device_resume - Restart user issued commands to a quiesced device. | |
2337 | * @sdev: scsi device to resume. | |
2338 | * | |
2339 | * Moves the device from quiesced back to running and restarts the | |
2340 | * queues. | |
2341 | * | |
2342 | * Must be called with user context, may sleep. | |
eb44820c | 2343 | */ |
1da177e4 LT |
2344 | void |
2345 | scsi_device_resume(struct scsi_device *sdev) | |
2346 | { | |
2347 | if(scsi_device_set_state(sdev, SDEV_RUNNING)) | |
2348 | return; | |
2349 | scsi_run_queue(sdev->request_queue); | |
2350 | } | |
2351 | EXPORT_SYMBOL(scsi_device_resume); | |
2352 | ||
2353 | static void | |
2354 | device_quiesce_fn(struct scsi_device *sdev, void *data) | |
2355 | { | |
2356 | scsi_device_quiesce(sdev); | |
2357 | } | |
2358 | ||
2359 | void | |
2360 | scsi_target_quiesce(struct scsi_target *starget) | |
2361 | { | |
2362 | starget_for_each_device(starget, NULL, device_quiesce_fn); | |
2363 | } | |
2364 | EXPORT_SYMBOL(scsi_target_quiesce); | |
2365 | ||
2366 | static void | |
2367 | device_resume_fn(struct scsi_device *sdev, void *data) | |
2368 | { | |
2369 | scsi_device_resume(sdev); | |
2370 | } | |
2371 | ||
2372 | void | |
2373 | scsi_target_resume(struct scsi_target *starget) | |
2374 | { | |
2375 | starget_for_each_device(starget, NULL, device_resume_fn); | |
2376 | } | |
2377 | EXPORT_SYMBOL(scsi_target_resume); | |
2378 | ||
2379 | /** | |
eb44820c | 2380 | * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state |
1da177e4 LT |
2381 | * @sdev: device to block |
2382 | * | |
2383 | * Block request made by scsi lld's to temporarily stop all | |
2384 | * scsi commands on the specified device. Called from interrupt | |
2385 | * or normal process context. | |
2386 | * | |
2387 | * Returns zero if successful or error if not | |
2388 | * | |
2389 | * Notes: | |
2390 | * This routine transitions the device to the SDEV_BLOCK state | |
2391 | * (which must be a legal transition). When the device is in this | |
2392 | * state, all commands are deferred until the scsi lld reenables | |
2393 | * the device with scsi_device_unblock or device_block_tmo fires. | |
2394 | * This routine assumes the host_lock is held on entry. | |
eb44820c | 2395 | */ |
1da177e4 LT |
2396 | int |
2397 | scsi_internal_device_block(struct scsi_device *sdev) | |
2398 | { | |
165125e1 | 2399 | struct request_queue *q = sdev->request_queue; |
1da177e4 LT |
2400 | unsigned long flags; |
2401 | int err = 0; | |
2402 | ||
2403 | err = scsi_device_set_state(sdev, SDEV_BLOCK); | |
6f4267e3 JB |
2404 | if (err) { |
2405 | err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); | |
2406 | ||
2407 | if (err) | |
2408 | return err; | |
2409 | } | |
1da177e4 LT |
2410 | |
2411 | /* | |
2412 | * The device has transitioned to SDEV_BLOCK. Stop the | |
2413 | * block layer from calling the midlayer with this device's | |
2414 | * request queue. | |
2415 | */ | |
2416 | spin_lock_irqsave(q->queue_lock, flags); | |
2417 | blk_stop_queue(q); | |
2418 | spin_unlock_irqrestore(q->queue_lock, flags); | |
2419 | ||
2420 | return 0; | |
2421 | } | |
2422 | EXPORT_SYMBOL_GPL(scsi_internal_device_block); | |
2423 | ||
2424 | /** | |
2425 | * scsi_internal_device_unblock - resume a device after a block request | |
2426 | * @sdev: device to resume | |
2427 | * | |
2428 | * Called by scsi lld's or the midlayer to restart the device queue | |
2429 | * for the previously suspended scsi device. Called from interrupt or | |
2430 | * normal process context. | |
2431 | * | |
2432 | * Returns zero if successful or error if not. | |
2433 | * | |
2434 | * Notes: | |
2435 | * This routine transitions the device to the SDEV_RUNNING state | |
2436 | * (which must be a legal transition) allowing the midlayer to | |
2437 | * goose the queue for this device. This routine assumes the | |
2438 | * host_lock is held upon entry. | |
eb44820c | 2439 | */ |
1da177e4 LT |
2440 | int |
2441 | scsi_internal_device_unblock(struct scsi_device *sdev) | |
2442 | { | |
165125e1 | 2443 | struct request_queue *q = sdev->request_queue; |
1da177e4 LT |
2444 | int err; |
2445 | unsigned long flags; | |
2446 | ||
2447 | /* | |
2448 | * Try to transition the scsi device to SDEV_RUNNING | |
2449 | * and goose the device queue if successful. | |
2450 | */ | |
2451 | err = scsi_device_set_state(sdev, SDEV_RUNNING); | |
6f4267e3 JB |
2452 | if (err) { |
2453 | err = scsi_device_set_state(sdev, SDEV_CREATED); | |
2454 | ||
2455 | if (err) | |
2456 | return err; | |
2457 | } | |
1da177e4 LT |
2458 | |
2459 | spin_lock_irqsave(q->queue_lock, flags); | |
2460 | blk_start_queue(q); | |
2461 | spin_unlock_irqrestore(q->queue_lock, flags); | |
2462 | ||
2463 | return 0; | |
2464 | } | |
2465 | EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); | |
2466 | ||
2467 | static void | |
2468 | device_block(struct scsi_device *sdev, void *data) | |
2469 | { | |
2470 | scsi_internal_device_block(sdev); | |
2471 | } | |
2472 | ||
2473 | static int | |
2474 | target_block(struct device *dev, void *data) | |
2475 | { | |
2476 | if (scsi_is_target_device(dev)) | |
2477 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2478 | device_block); | |
2479 | return 0; | |
2480 | } | |
2481 | ||
2482 | void | |
2483 | scsi_target_block(struct device *dev) | |
2484 | { | |
2485 | if (scsi_is_target_device(dev)) | |
2486 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2487 | device_block); | |
2488 | else | |
2489 | device_for_each_child(dev, NULL, target_block); | |
2490 | } | |
2491 | EXPORT_SYMBOL_GPL(scsi_target_block); | |
2492 | ||
2493 | static void | |
2494 | device_unblock(struct scsi_device *sdev, void *data) | |
2495 | { | |
2496 | scsi_internal_device_unblock(sdev); | |
2497 | } | |
2498 | ||
2499 | static int | |
2500 | target_unblock(struct device *dev, void *data) | |
2501 | { | |
2502 | if (scsi_is_target_device(dev)) | |
2503 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2504 | device_unblock); | |
2505 | return 0; | |
2506 | } | |
2507 | ||
2508 | void | |
2509 | scsi_target_unblock(struct device *dev) | |
2510 | { | |
2511 | if (scsi_is_target_device(dev)) | |
2512 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2513 | device_unblock); | |
2514 | else | |
2515 | device_for_each_child(dev, NULL, target_unblock); | |
2516 | } | |
2517 | EXPORT_SYMBOL_GPL(scsi_target_unblock); | |
cdb8c2a6 GL |
2518 | |
2519 | /** | |
2520 | * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt | |
eb44820c | 2521 | * @sgl: scatter-gather list |
cdb8c2a6 GL |
2522 | * @sg_count: number of segments in sg |
2523 | * @offset: offset in bytes into sg, on return offset into the mapped area | |
2524 | * @len: bytes to map, on return number of bytes mapped | |
2525 | * | |
2526 | * Returns virtual address of the start of the mapped page | |
2527 | */ | |
c6132da1 | 2528 | void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, |
cdb8c2a6 GL |
2529 | size_t *offset, size_t *len) |
2530 | { | |
2531 | int i; | |
2532 | size_t sg_len = 0, len_complete = 0; | |
c6132da1 | 2533 | struct scatterlist *sg; |
cdb8c2a6 GL |
2534 | struct page *page; |
2535 | ||
22cfefb5 AM |
2536 | WARN_ON(!irqs_disabled()); |
2537 | ||
c6132da1 | 2538 | for_each_sg(sgl, sg, sg_count, i) { |
cdb8c2a6 | 2539 | len_complete = sg_len; /* Complete sg-entries */ |
c6132da1 | 2540 | sg_len += sg->length; |
cdb8c2a6 GL |
2541 | if (sg_len > *offset) |
2542 | break; | |
2543 | } | |
2544 | ||
2545 | if (unlikely(i == sg_count)) { | |
169e1a2a AM |
2546 | printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " |
2547 | "elements %d\n", | |
cadbd4a5 | 2548 | __func__, sg_len, *offset, sg_count); |
cdb8c2a6 GL |
2549 | WARN_ON(1); |
2550 | return NULL; | |
2551 | } | |
2552 | ||
2553 | /* Offset starting from the beginning of first page in this sg-entry */ | |
c6132da1 | 2554 | *offset = *offset - len_complete + sg->offset; |
cdb8c2a6 GL |
2555 | |
2556 | /* Assumption: contiguous pages can be accessed as "page + i" */ | |
45711f1a | 2557 | page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); |
cdb8c2a6 GL |
2558 | *offset &= ~PAGE_MASK; |
2559 | ||
2560 | /* Bytes in this sg-entry from *offset to the end of the page */ | |
2561 | sg_len = PAGE_SIZE - *offset; | |
2562 | if (*len > sg_len) | |
2563 | *len = sg_len; | |
2564 | ||
2565 | return kmap_atomic(page, KM_BIO_SRC_IRQ); | |
2566 | } | |
2567 | EXPORT_SYMBOL(scsi_kmap_atomic_sg); | |
2568 | ||
2569 | /** | |
eb44820c | 2570 | * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg |
cdb8c2a6 GL |
2571 | * @virt: virtual address to be unmapped |
2572 | */ | |
2573 | void scsi_kunmap_atomic_sg(void *virt) | |
2574 | { | |
2575 | kunmap_atomic(virt, KM_BIO_SRC_IRQ); | |
2576 | } | |
2577 | EXPORT_SYMBOL(scsi_kunmap_atomic_sg); |