Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 | 2 | /* |
d285203c CH |
3 | * Copyright (C) 1999 Eric Youngdale |
4 | * Copyright (C) 2014 Christoph Hellwig | |
1da177e4 LT |
5 | * |
6 | * SCSI queueing library. | |
7 | * Initial versions: Eric Youngdale (eric@andante.org). | |
8 | * Based upon conversations with large numbers | |
9 | * of people at Linux Expo. | |
10 | */ | |
11 | ||
12 | #include <linux/bio.h> | |
d3f46f39 | 13 | #include <linux/bitops.h> |
1da177e4 LT |
14 | #include <linux/blkdev.h> |
15 | #include <linux/completion.h> | |
16 | #include <linux/kernel.h> | |
09703660 | 17 | #include <linux/export.h> |
1da177e4 LT |
18 | #include <linux/init.h> |
19 | #include <linux/pci.h> | |
20 | #include <linux/delay.h> | |
faead26d | 21 | #include <linux/hardirq.h> |
c6132da1 | 22 | #include <linux/scatterlist.h> |
d285203c | 23 | #include <linux/blk-mq.h> |
f1569ff1 | 24 | #include <linux/ratelimit.h> |
a8aa3978 | 25 | #include <asm/unaligned.h> |
1da177e4 LT |
26 | |
27 | #include <scsi/scsi.h> | |
beb40487 | 28 | #include <scsi/scsi_cmnd.h> |
1da177e4 LT |
29 | #include <scsi/scsi_dbg.h> |
30 | #include <scsi/scsi_device.h> | |
31 | #include <scsi/scsi_driver.h> | |
32 | #include <scsi/scsi_eh.h> | |
33 | #include <scsi/scsi_host.h> | |
7aa686d3 | 34 | #include <scsi/scsi_transport.h> /* __scsi_init_queue() */ |
ee14c674 | 35 | #include <scsi/scsi_dh.h> |
1da177e4 | 36 | |
3b5382c4 CH |
37 | #include <trace/events/scsi.h> |
38 | ||
0eebd005 | 39 | #include "scsi_debugfs.h" |
1da177e4 LT |
40 | #include "scsi_priv.h" |
41 | #include "scsi_logging.h" | |
42 | ||
92524fa1 ML |
43 | /* |
44 | * Size of integrity metadata is usually small, 1 inline sg should | |
45 | * cover normal cases. | |
46 | */ | |
3e99b3b1 ML |
47 | #ifdef CONFIG_ARCH_NO_SG_CHAIN |
48 | #define SCSI_INLINE_PROT_SG_CNT 0 | |
49 | #define SCSI_INLINE_SG_CNT 0 | |
50 | #else | |
92524fa1 | 51 | #define SCSI_INLINE_PROT_SG_CNT 1 |
3dccdf53 | 52 | #define SCSI_INLINE_SG_CNT 2 |
3e99b3b1 | 53 | #endif |
3dccdf53 | 54 | |
0a6ac4ee | 55 | static struct kmem_cache *scsi_sense_cache; |
0a6ac4ee | 56 | static DEFINE_MUTEX(scsi_sense_cache_mutex); |
1da177e4 | 57 | |
a45a1f36 BVA |
58 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd); |
59 | ||
0a6ac4ee CH |
60 | int scsi_init_sense_cache(struct Scsi_Host *shost) |
61 | { | |
0a6ac4ee CH |
62 | int ret = 0; |
63 | ||
f9b0530f | 64 | mutex_lock(&scsi_sense_cache_mutex); |
aaff5eba | 65 | if (!scsi_sense_cache) { |
0a6ac4ee | 66 | scsi_sense_cache = |
0afe76e8 DW |
67 | kmem_cache_create_usercopy("scsi_sense_cache", |
68 | SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, | |
69 | 0, SCSI_SENSE_BUFFERSIZE, NULL); | |
0a6ac4ee CH |
70 | if (!scsi_sense_cache) |
71 | ret = -ENOMEM; | |
72 | } | |
0a6ac4ee CH |
73 | mutex_unlock(&scsi_sense_cache_mutex); |
74 | return ret; | |
75 | } | |
6f9a35e2 | 76 | |
a488e749 JA |
77 | /* |
78 | * When to reinvoke queueing after a resource shortage. It's 3 msecs to | |
79 | * not change behaviour from the previous unplug mechanism, experimentation | |
80 | * may prove this needs changing. | |
81 | */ | |
82 | #define SCSI_QUEUE_DELAY 3 | |
83 | ||
de3e8bf3 CH |
84 | static void |
85 | scsi_set_blocked(struct scsi_cmnd *cmd, int reason) | |
1da177e4 LT |
86 | { |
87 | struct Scsi_Host *host = cmd->device->host; | |
88 | struct scsi_device *device = cmd->device; | |
f0c0a376 | 89 | struct scsi_target *starget = scsi_target(device); |
1da177e4 LT |
90 | |
91 | /* | |
d8c37e7b | 92 | * Set the appropriate busy bit for the device/host. |
1da177e4 LT |
93 | * |
94 | * If the host/device isn't busy, assume that something actually | |
95 | * completed, and that we should be able to queue a command now. | |
96 | * | |
97 | * Note that the prior mid-layer assumption that any host could | |
98 | * always queue at least one command is now broken. The mid-layer | |
99 | * will implement a user specifiable stall (see | |
100 | * scsi_host.max_host_blocked and scsi_device.max_device_blocked) | |
101 | * if a command is requeued with no other commands outstanding | |
102 | * either for the device or for the host. | |
103 | */ | |
f0c0a376 MC |
104 | switch (reason) { |
105 | case SCSI_MLQUEUE_HOST_BUSY: | |
cd9070c9 | 106 | atomic_set(&host->host_blocked, host->max_host_blocked); |
f0c0a376 MC |
107 | break; |
108 | case SCSI_MLQUEUE_DEVICE_BUSY: | |
573e5913 | 109 | case SCSI_MLQUEUE_EH_RETRY: |
cd9070c9 CH |
110 | atomic_set(&device->device_blocked, |
111 | device->max_device_blocked); | |
f0c0a376 MC |
112 | break; |
113 | case SCSI_MLQUEUE_TARGET_BUSY: | |
cd9070c9 CH |
114 | atomic_set(&starget->target_blocked, |
115 | starget->max_target_blocked); | |
f0c0a376 MC |
116 | break; |
117 | } | |
de3e8bf3 CH |
118 | } |
119 | ||
d285203c CH |
120 | static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) |
121 | { | |
a45a1f36 BVA |
122 | if (cmd->request->rq_flags & RQF_DONTPREP) { |
123 | cmd->request->rq_flags &= ~RQF_DONTPREP; | |
124 | scsi_mq_uninit_cmd(cmd); | |
125 | } else { | |
126 | WARN_ON_ONCE(true); | |
127 | } | |
2b053aca | 128 | blk_mq_requeue_request(cmd->request, true); |
d285203c CH |
129 | } |
130 | ||
de3e8bf3 CH |
131 | /** |
132 | * __scsi_queue_insert - private queue insertion | |
133 | * @cmd: The SCSI command being requeued | |
134 | * @reason: The reason for the requeue | |
135 | * @unbusy: Whether the queue should be unbusied | |
136 | * | |
137 | * This is a private queue insertion. The public interface | |
138 | * scsi_queue_insert() always assumes the queue should be unbusied | |
139 | * because it's always called before the completion. This function is | |
140 | * for a requeue after completion, which should only occur in this | |
141 | * file. | |
142 | */ | |
08640e81 | 143 | static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) |
de3e8bf3 CH |
144 | { |
145 | struct scsi_device *device = cmd->device; | |
de3e8bf3 CH |
146 | |
147 | SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, | |
148 | "Inserting command %p into mlqueue\n", cmd)); | |
149 | ||
150 | scsi_set_blocked(cmd, reason); | |
1da177e4 | 151 | |
1da177e4 LT |
152 | /* |
153 | * Decrement the counters, since these commands are no longer | |
154 | * active on the host/device. | |
155 | */ | |
4f5299ac | 156 | if (unbusy) |
6eb045e0 | 157 | scsi_device_unbusy(device, cmd); |
1da177e4 LT |
158 | |
159 | /* | |
a1bf9d1d | 160 | * Requeue this command. It will go before all other commands |
b485462a BVA |
161 | * that are already in the queue. Schedule requeue work under |
162 | * lock such that the kblockd_schedule_work() call happens | |
163 | * before blk_cleanup_queue() finishes. | |
a488e749 | 164 | */ |
644373a4 | 165 | cmd->result = 0; |
f664a3cc | 166 | |
f664a3cc | 167 | blk_mq_requeue_request(cmd->request, true); |
1da177e4 LT |
168 | } |
169 | ||
ea941016 AA |
170 | /** |
171 | * scsi_queue_insert - Reinsert a command in the queue. | |
172 | * @cmd: command that we are adding to queue. | |
173 | * @reason: why we are inserting command to queue. | |
4f5299ac | 174 | * |
ea941016 AA |
175 | * We do this for one of two cases. Either the host is busy and it cannot accept |
176 | * any more commands for the time being, or the device returned QUEUE_FULL and | |
177 | * can accept no more commands. | |
4f5299ac | 178 | * |
ea941016 AA |
179 | * Context: This could be called either from an interrupt context or a normal |
180 | * process context. | |
4f5299ac | 181 | */ |
84feb166 | 182 | void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) |
4f5299ac | 183 | { |
08640e81 | 184 | __scsi_queue_insert(cmd, reason, true); |
4f5299ac | 185 | } |
e8064021 | 186 | |
76aaf87b CH |
187 | |
188 | /** | |
704f8392 | 189 | * __scsi_execute - insert request and wait for the result |
76aaf87b CH |
190 | * @sdev: scsi device |
191 | * @cmd: scsi command | |
192 | * @data_direction: data direction | |
193 | * @buffer: data buffer | |
194 | * @bufflen: len of buffer | |
195 | * @sense: optional sense buffer | |
196 | * @sshdr: optional decoded sense header | |
197 | * @timeout: request timeout in seconds | |
198 | * @retries: number of times to retry request | |
199 | * @flags: flags for ->cmd_flags | |
200 | * @rq_flags: flags for ->rq_flags | |
201 | * @resid: optional residual length | |
202 | * | |
17d5363b CH |
203 | * Returns the scsi_cmnd result field if a command was executed, or a negative |
204 | * Linux error code if we didn't get that far. | |
76aaf87b | 205 | */ |
704f8392 | 206 | int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, |
33aa687d | 207 | int data_direction, void *buffer, unsigned bufflen, |
3949e2f0 CH |
208 | unsigned char *sense, struct scsi_sense_hdr *sshdr, |
209 | int timeout, int retries, u64 flags, req_flags_t rq_flags, | |
210 | int *resid) | |
39216033 JB |
211 | { |
212 | struct request *req; | |
82ed4db4 | 213 | struct scsi_request *rq; |
39216033 JB |
214 | int ret = DRIVER_ERROR << 24; |
215 | ||
ff005a06 | 216 | req = blk_get_request(sdev->request_queue, |
aebf526b | 217 | data_direction == DMA_TO_DEVICE ? |
e6044f71 BVA |
218 | REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, |
219 | rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0); | |
a492f075 | 220 | if (IS_ERR(req)) |
bfe159a5 | 221 | return ret; |
82ed4db4 | 222 | rq = scsi_req(req); |
39216033 JB |
223 | |
224 | if (bufflen && blk_rq_map_kern(sdev->request_queue, req, | |
0eb0b63c | 225 | buffer, bufflen, GFP_NOIO)) |
39216033 JB |
226 | goto out; |
227 | ||
82ed4db4 CH |
228 | rq->cmd_len = COMMAND_SIZE(cmd[0]); |
229 | memcpy(rq->cmd, cmd, rq->cmd_len); | |
64c7f1d1 | 230 | rq->retries = retries; |
39216033 | 231 | req->timeout = timeout; |
e8064021 | 232 | req->cmd_flags |= flags; |
039c635f | 233 | req->rq_flags |= rq_flags | RQF_QUIET; |
39216033 JB |
234 | |
235 | /* | |
236 | * head injection *required* here otherwise quiesce won't work | |
237 | */ | |
684da762 | 238 | blk_execute_rq(NULL, req, 1); |
39216033 | 239 | |
bdb2b8ca AS |
240 | /* |
241 | * Some devices (USB mass-storage in particular) may transfer | |
242 | * garbage data together with a residue indicating that the data | |
243 | * is invalid. Prevent the garbage from being misinterpreted | |
244 | * and prevent security leaks by zeroing out the excess data. | |
245 | */ | |
82ed4db4 CH |
246 | if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen)) |
247 | memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len); | |
bdb2b8ca | 248 | |
f4f4e47e | 249 | if (resid) |
82ed4db4 CH |
250 | *resid = rq->resid_len; |
251 | if (sense && rq->sense_len) | |
252 | memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE); | |
3949e2f0 CH |
253 | if (sshdr) |
254 | scsi_normalize_sense(rq->sense, rq->sense_len, sshdr); | |
17d5363b | 255 | ret = rq->result; |
39216033 JB |
256 | out: |
257 | blk_put_request(req); | |
258 | ||
259 | return ret; | |
260 | } | |
704f8392 | 261 | EXPORT_SYMBOL(__scsi_execute); |
39216033 | 262 | |
3bd6f43f | 263 | /* |
6eb045e0 ML |
264 | * Wake up the error handler if necessary. Avoid as follows that the error |
265 | * handler is not woken up if host in-flight requests number == | |
266 | * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination | |
267 | * with an RCU read lock in this function to ensure that this function in | |
268 | * its entirety either finishes before scsi_eh_scmd_add() increases the | |
3bd6f43f BVA |
269 | * host_failed counter or that it notices the shost state change made by |
270 | * scsi_eh_scmd_add(). | |
271 | */ | |
6eb045e0 | 272 | static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) |
1da177e4 | 273 | { |
1da177e4 LT |
274 | unsigned long flags; |
275 | ||
3bd6f43f | 276 | rcu_read_lock(); |
6eb045e0 | 277 | __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
3bd6f43f | 278 | if (unlikely(scsi_host_in_recovery(shost))) { |
74665016 | 279 | spin_lock_irqsave(shost->host_lock, flags); |
3bd6f43f BVA |
280 | if (shost->host_failed || shost->host_eh_scheduled) |
281 | scsi_eh_wakeup(shost); | |
74665016 CH |
282 | spin_unlock_irqrestore(shost->host_lock, flags); |
283 | } | |
3bd6f43f BVA |
284 | rcu_read_unlock(); |
285 | } | |
286 | ||
6eb045e0 | 287 | void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd) |
3bd6f43f BVA |
288 | { |
289 | struct Scsi_Host *shost = sdev->host; | |
290 | struct scsi_target *starget = scsi_target(sdev); | |
291 | ||
6eb045e0 | 292 | scsi_dec_host_busy(shost, cmd); |
3bd6f43f BVA |
293 | |
294 | if (starget->can_queue > 0) | |
295 | atomic_dec(&starget->target_busy); | |
74665016 | 296 | |
020b0f0a | 297 | sbitmap_put(&sdev->budget_map, cmd->budget_token); |
2a5a24aa | 298 | cmd->budget_token = -1; |
1da177e4 LT |
299 | } |
300 | ||
d285203c CH |
301 | static void scsi_kick_queue(struct request_queue *q) |
302 | { | |
f664a3cc | 303 | blk_mq_run_hw_queues(q, false); |
d285203c CH |
304 | } |
305 | ||
1da177e4 LT |
306 | /* |
307 | * Called for single_lun devices on IO completion. Clear starget_sdev_user, | |
308 | * and call blk_run_queue for all the scsi_devices on the target - | |
309 | * including current_sdev first. | |
310 | * | |
311 | * Called with *no* scsi locks held. | |
312 | */ | |
313 | static void scsi_single_lun_run(struct scsi_device *current_sdev) | |
314 | { | |
315 | struct Scsi_Host *shost = current_sdev->host; | |
316 | struct scsi_device *sdev, *tmp; | |
317 | struct scsi_target *starget = scsi_target(current_sdev); | |
318 | unsigned long flags; | |
319 | ||
320 | spin_lock_irqsave(shost->host_lock, flags); | |
321 | starget->starget_sdev_user = NULL; | |
322 | spin_unlock_irqrestore(shost->host_lock, flags); | |
323 | ||
324 | /* | |
325 | * Call blk_run_queue for all LUNs on the target, starting with | |
326 | * current_sdev. We race with others (to set starget_sdev_user), | |
327 | * but in most cases, we will be first. Ideally, each LU on the | |
328 | * target would get some limited time or requests on the target. | |
329 | */ | |
d285203c | 330 | scsi_kick_queue(current_sdev->request_queue); |
1da177e4 LT |
331 | |
332 | spin_lock_irqsave(shost->host_lock, flags); | |
333 | if (starget->starget_sdev_user) | |
334 | goto out; | |
335 | list_for_each_entry_safe(sdev, tmp, &starget->devices, | |
336 | same_target_siblings) { | |
337 | if (sdev == current_sdev) | |
338 | continue; | |
339 | if (scsi_device_get(sdev)) | |
340 | continue; | |
341 | ||
342 | spin_unlock_irqrestore(shost->host_lock, flags); | |
d285203c | 343 | scsi_kick_queue(sdev->request_queue); |
1da177e4 | 344 | spin_lock_irqsave(shost->host_lock, flags); |
4c7b4d63 | 345 | |
1da177e4 LT |
346 | scsi_device_put(sdev); |
347 | } | |
348 | out: | |
349 | spin_unlock_irqrestore(shost->host_lock, flags); | |
350 | } | |
351 | ||
cd9070c9 | 352 | static inline bool scsi_device_is_busy(struct scsi_device *sdev) |
9d112517 | 353 | { |
8278807a | 354 | if (scsi_device_busy(sdev) >= sdev->queue_depth) |
cd9070c9 CH |
355 | return true; |
356 | if (atomic_read(&sdev->device_blocked) > 0) | |
357 | return true; | |
358 | return false; | |
9d112517 KU |
359 | } |
360 | ||
cd9070c9 | 361 | static inline bool scsi_target_is_busy(struct scsi_target *starget) |
f0c0a376 | 362 | { |
2ccbb008 CH |
363 | if (starget->can_queue > 0) { |
364 | if (atomic_read(&starget->target_busy) >= starget->can_queue) | |
365 | return true; | |
366 | if (atomic_read(&starget->target_blocked) > 0) | |
367 | return true; | |
368 | } | |
cd9070c9 | 369 | return false; |
f0c0a376 MC |
370 | } |
371 | ||
cd9070c9 | 372 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) |
9d112517 | 373 | { |
cd9070c9 CH |
374 | if (atomic_read(&shost->host_blocked) > 0) |
375 | return true; | |
376 | if (shost->host_self_blocked) | |
377 | return true; | |
378 | return false; | |
9d112517 KU |
379 | } |
380 | ||
21a05df5 | 381 | static void scsi_starved_list_run(struct Scsi_Host *shost) |
1da177e4 | 382 | { |
2a3a59e5 | 383 | LIST_HEAD(starved_list); |
21a05df5 | 384 | struct scsi_device *sdev; |
1da177e4 LT |
385 | unsigned long flags; |
386 | ||
1da177e4 | 387 | spin_lock_irqsave(shost->host_lock, flags); |
2a3a59e5 MC |
388 | list_splice_init(&shost->starved_list, &starved_list); |
389 | ||
390 | while (!list_empty(&starved_list)) { | |
e2eb7244 JB |
391 | struct request_queue *slq; |
392 | ||
1da177e4 LT |
393 | /* |
394 | * As long as shost is accepting commands and we have | |
395 | * starved queues, call blk_run_queue. scsi_request_fn | |
396 | * drops the queue_lock and can add us back to the | |
397 | * starved_list. | |
398 | * | |
399 | * host_lock protects the starved_list and starved_entry. | |
400 | * scsi_request_fn must get the host_lock before checking | |
401 | * or modifying starved_list or starved_entry. | |
402 | */ | |
2a3a59e5 | 403 | if (scsi_host_is_busy(shost)) |
f0c0a376 | 404 | break; |
f0c0a376 | 405 | |
2a3a59e5 MC |
406 | sdev = list_entry(starved_list.next, |
407 | struct scsi_device, starved_entry); | |
408 | list_del_init(&sdev->starved_entry); | |
f0c0a376 MC |
409 | if (scsi_target_is_busy(scsi_target(sdev))) { |
410 | list_move_tail(&sdev->starved_entry, | |
411 | &shost->starved_list); | |
412 | continue; | |
413 | } | |
414 | ||
e2eb7244 JB |
415 | /* |
416 | * Once we drop the host lock, a racing scsi_remove_device() | |
417 | * call may remove the sdev from the starved list and destroy | |
418 | * it and the queue. Mitigate by taking a reference to the | |
419 | * queue and never touching the sdev again after we drop the | |
420 | * host lock. Note: if __scsi_remove_device() invokes | |
421 | * blk_cleanup_queue() before the queue is run from this | |
422 | * function then blk_run_queue() will return immediately since | |
423 | * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. | |
424 | */ | |
425 | slq = sdev->request_queue; | |
426 | if (!blk_get_queue(slq)) | |
427 | continue; | |
428 | spin_unlock_irqrestore(shost->host_lock, flags); | |
429 | ||
d285203c | 430 | scsi_kick_queue(slq); |
e2eb7244 JB |
431 | blk_put_queue(slq); |
432 | ||
433 | spin_lock_irqsave(shost->host_lock, flags); | |
1da177e4 | 434 | } |
2a3a59e5 MC |
435 | /* put any unprocessed entries back */ |
436 | list_splice(&starved_list, &shost->starved_list); | |
1da177e4 | 437 | spin_unlock_irqrestore(shost->host_lock, flags); |
21a05df5 CH |
438 | } |
439 | ||
ea941016 AA |
440 | /** |
441 | * scsi_run_queue - Select a proper request queue to serve next. | |
442 | * @q: last request's queue | |
21a05df5 | 443 | * |
ea941016 | 444 | * The previous command was completely finished, start a new one if possible. |
21a05df5 CH |
445 | */ |
446 | static void scsi_run_queue(struct request_queue *q) | |
447 | { | |
448 | struct scsi_device *sdev = q->queuedata; | |
449 | ||
450 | if (scsi_target(sdev)->single_lun) | |
451 | scsi_single_lun_run(sdev); | |
452 | if (!list_empty(&sdev->host->starved_list)) | |
453 | scsi_starved_list_run(sdev->host); | |
1da177e4 | 454 | |
f664a3cc | 455 | blk_mq_run_hw_queues(q, false); |
1da177e4 LT |
456 | } |
457 | ||
9937a5e2 JA |
458 | void scsi_requeue_run_queue(struct work_struct *work) |
459 | { | |
460 | struct scsi_device *sdev; | |
461 | struct request_queue *q; | |
462 | ||
463 | sdev = container_of(work, struct scsi_device, requeue_work); | |
464 | q = sdev->request_queue; | |
465 | scsi_run_queue(q); | |
466 | } | |
467 | ||
1da177e4 LT |
468 | void scsi_run_host_queues(struct Scsi_Host *shost) |
469 | { | |
470 | struct scsi_device *sdev; | |
471 | ||
472 | shost_for_each_device(sdev, shost) | |
473 | scsi_run_queue(sdev->request_queue); | |
474 | } | |
475 | ||
d285203c CH |
476 | static void scsi_uninit_cmd(struct scsi_cmnd *cmd) |
477 | { | |
57292b58 | 478 | if (!blk_rq_is_passthrough(cmd->request)) { |
d285203c CH |
479 | struct scsi_driver *drv = scsi_cmd_to_driver(cmd); |
480 | ||
481 | if (drv->uninit_command) | |
482 | drv->uninit_command(cmd); | |
483 | } | |
484 | } | |
485 | ||
7007e9dd | 486 | void scsi_free_sgtables(struct scsi_cmnd *cmd) |
d285203c CH |
487 | { |
488 | if (cmd->sdb.table.nents) | |
3dccdf53 ML |
489 | sg_free_table_chained(&cmd->sdb.table, |
490 | SCSI_INLINE_SG_CNT); | |
d285203c | 491 | if (scsi_prot_sg_count(cmd)) |
92524fa1 ML |
492 | sg_free_table_chained(&cmd->prot_sdb->table, |
493 | SCSI_INLINE_PROT_SG_CNT); | |
d285203c | 494 | } |
7007e9dd | 495 | EXPORT_SYMBOL_GPL(scsi_free_sgtables); |
d285203c CH |
496 | |
497 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) | |
498 | { | |
20a66f2b | 499 | scsi_free_sgtables(cmd); |
d285203c | 500 | scsi_uninit_cmd(cmd); |
d285203c CH |
501 | } |
502 | ||
3f0dcfbc ML |
503 | static void scsi_run_queue_async(struct scsi_device *sdev) |
504 | { | |
505 | if (scsi_target(sdev)->single_lun || | |
ed5dd6a6 | 506 | !list_empty(&sdev->host->starved_list)) { |
3f0dcfbc | 507 | kblockd_schedule_work(&sdev->requeue_work); |
ed5dd6a6 ML |
508 | } else { |
509 | /* | |
510 | * smp_mb() present in sbitmap_queue_clear() or implied in | |
511 | * .end_io is for ordering writing .device_busy in | |
512 | * scsi_device_unbusy() and reading sdev->restarts. | |
513 | */ | |
514 | int old = atomic_read(&sdev->restarts); | |
515 | ||
516 | /* | |
517 | * ->restarts has to be kept as non-zero if new budget | |
518 | * contention occurs. | |
519 | * | |
520 | * No need to run queue when either another re-run | |
521 | * queue wins in updating ->restarts or a new budget | |
522 | * contention occurs. | |
523 | */ | |
524 | if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old) | |
525 | blk_mq_run_hw_queues(sdev->request_queue, true); | |
526 | } | |
3f0dcfbc ML |
527 | } |
528 | ||
7e63b5a4 | 529 | /* Returns false when no more bytes to process, true if there are more */ |
2a842aca | 530 | static bool scsi_end_request(struct request *req, blk_status_t error, |
ae3d56d8 | 531 | unsigned int bytes) |
f6d47e74 | 532 | { |
bed2213d | 533 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); |
f6d47e74 CH |
534 | struct scsi_device *sdev = cmd->device; |
535 | struct request_queue *q = sdev->request_queue; | |
f6d47e74 CH |
536 | |
537 | if (blk_update_request(req, error, bytes)) | |
538 | return true; | |
539 | ||
f6d47e74 CH |
540 | if (blk_queue_add_random(q)) |
541 | add_disk_randomness(req->rq_disk); | |
542 | ||
64104f70 BVA |
543 | if (!blk_rq_is_scsi(req)) { |
544 | WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); | |
545 | cmd->flags &= ~SCMD_INITIALIZED; | |
546 | } | |
547 | ||
db983f6e BVA |
548 | /* |
549 | * Calling rcu_barrier() is not necessary here because the | |
550 | * SCSI error handler guarantees that the function called by | |
551 | * call_rcu() has been called before scsi_end_request() is | |
552 | * called. | |
553 | */ | |
554 | destroy_rcu_head(&cmd->rcu); | |
555 | ||
f664a3cc JA |
556 | /* |
557 | * In the MQ case the command gets freed by __blk_mq_end_request, | |
558 | * so we have to do all cleanup that depends on it earlier. | |
559 | * | |
560 | * We also can't kick the queues from irq context, so we | |
561 | * will have to defer it to a workqueue. | |
562 | */ | |
563 | scsi_mq_uninit_cmd(cmd); | |
f81426a8 | 564 | |
a78b03bc JA |
565 | /* |
566 | * queue is still alive, so grab the ref for preventing it | |
567 | * from being cleaned up during running queue. | |
568 | */ | |
569 | percpu_ref_get(&q->q_usage_counter); | |
f81426a8 | 570 | |
f664a3cc | 571 | __blk_mq_end_request(req, error); |
d285203c | 572 | |
3f0dcfbc | 573 | scsi_run_queue_async(sdev); |
f6d47e74 | 574 | |
a78b03bc | 575 | percpu_ref_put(&q->q_usage_counter); |
f6d47e74 CH |
576 | return false; |
577 | } | |
578 | ||
0f7f6234 | 579 | /** |
a77b32d8 BVA |
580 | * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t |
581 | * @cmd: SCSI command | |
0f7f6234 HR |
582 | * @result: scsi error code |
583 | * | |
a77b32d8 BVA |
584 | * Translate a SCSI result code into a blk_status_t value. May reset the host |
585 | * byte of @cmd->result. | |
0f7f6234 | 586 | */ |
a77b32d8 | 587 | static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result) |
63583cca | 588 | { |
2a842aca | 589 | switch (host_byte(result)) { |
f4abab3f BVA |
590 | case DID_OK: |
591 | /* | |
592 | * Also check the other bytes than the status byte in result | |
593 | * to handle the case when a SCSI LLD sets result to | |
594 | * DRIVER_SENSE << 24 without setting SAM_STAT_CHECK_CONDITION. | |
595 | */ | |
596 | if (scsi_status_is_good(result) && (result & ~0xff) == 0) | |
597 | return BLK_STS_OK; | |
598 | return BLK_STS_IOERR; | |
63583cca | 599 | case DID_TRANSPORT_FAILFAST: |
962c8dcd | 600 | case DID_TRANSPORT_MARGINAL: |
2a842aca | 601 | return BLK_STS_TRANSPORT; |
63583cca | 602 | case DID_TARGET_FAILURE: |
2082ebc4 | 603 | set_host_byte(cmd, DID_OK); |
2a842aca | 604 | return BLK_STS_TARGET; |
63583cca | 605 | case DID_NEXUS_FAILURE: |
4a067cf8 | 606 | set_host_byte(cmd, DID_OK); |
2a842aca | 607 | return BLK_STS_NEXUS; |
a9d6ceb8 HR |
608 | case DID_ALLOC_FAILURE: |
609 | set_host_byte(cmd, DID_OK); | |
2a842aca | 610 | return BLK_STS_NOSPC; |
7e782af5 HR |
611 | case DID_MEDIUM_ERROR: |
612 | set_host_byte(cmd, DID_OK); | |
2a842aca | 613 | return BLK_STS_MEDIUM; |
63583cca | 614 | default: |
2a842aca | 615 | return BLK_STS_IOERR; |
63583cca | 616 | } |
63583cca HR |
617 | } |
618 | ||
4ae61c68 DG |
619 | /* Helper for scsi_io_completion() when "reprep" action required. */ |
620 | static void scsi_io_completion_reprep(struct scsi_cmnd *cmd, | |
621 | struct request_queue *q) | |
622 | { | |
623 | /* A new command will be prepared and issued. */ | |
f664a3cc | 624 | scsi_mq_requeue_cmd(cmd); |
4ae61c68 DG |
625 | } |
626 | ||
2a242d59 MC |
627 | static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd) |
628 | { | |
629 | struct request *req = cmd->request; | |
630 | unsigned long wait_for; | |
631 | ||
632 | if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT) | |
633 | return false; | |
634 | ||
635 | wait_for = (cmd->allowed + 1) * req->timeout; | |
636 | if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { | |
637 | scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n", | |
638 | wait_for/HZ); | |
639 | return true; | |
640 | } | |
641 | return false; | |
642 | } | |
643 | ||
da32baea DG |
644 | /* Helper for scsi_io_completion() when special action required. */ |
645 | static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) | |
1da177e4 | 646 | { |
165125e1 | 647 | struct request_queue *q = cmd->device->request_queue; |
1da177e4 | 648 | struct request *req = cmd->request; |
da32baea | 649 | int level = 0; |
b60af5b0 AS |
650 | enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, |
651 | ACTION_DELAYED_RETRY} action; | |
da32baea DG |
652 | struct scsi_sense_hdr sshdr; |
653 | bool sense_valid; | |
654 | bool sense_current = true; /* false implies "deferred sense" */ | |
655 | blk_status_t blk_stat; | |
1da177e4 | 656 | |
da32baea DG |
657 | sense_valid = scsi_command_normalize_sense(cmd, &sshdr); |
658 | if (sense_valid) | |
659 | sense_current = !scsi_sense_is_deferred(&sshdr); | |
03aba2f7 | 660 | |
da32baea | 661 | blk_stat = scsi_result_to_blk_status(cmd, result); |
3e695f89 | 662 | |
b60af5b0 AS |
663 | if (host_byte(result) == DID_RESET) { |
664 | /* Third party bus reset or reset for error recovery | |
665 | * reasons. Just retry the command and see what | |
666 | * happens. | |
667 | */ | |
668 | action = ACTION_RETRY; | |
da32baea | 669 | } else if (sense_valid && sense_current) { |
1da177e4 LT |
670 | switch (sshdr.sense_key) { |
671 | case UNIT_ATTENTION: | |
672 | if (cmd->device->removable) { | |
03aba2f7 | 673 | /* Detected disc change. Set a bit |
1da177e4 LT |
674 | * and quietly refuse further access. |
675 | */ | |
676 | cmd->device->changed = 1; | |
b60af5b0 | 677 | action = ACTION_FAIL; |
1da177e4 | 678 | } else { |
03aba2f7 LT |
679 | /* Must have been a power glitch, or a |
680 | * bus reset. Could not have been a | |
681 | * media change, so we just retry the | |
b60af5b0 | 682 | * command and see what happens. |
03aba2f7 | 683 | */ |
b60af5b0 | 684 | action = ACTION_RETRY; |
1da177e4 LT |
685 | } |
686 | break; | |
687 | case ILLEGAL_REQUEST: | |
03aba2f7 LT |
688 | /* If we had an ILLEGAL REQUEST returned, then |
689 | * we may have performed an unsupported | |
690 | * command. The only thing this should be | |
691 | * would be a ten byte read where only a six | |
692 | * byte read was supported. Also, on a system | |
693 | * where READ CAPACITY failed, we may have | |
694 | * read past the end of the disk. | |
695 | */ | |
26a68019 JA |
696 | if ((cmd->device->use_10_for_rw && |
697 | sshdr.asc == 0x20 && sshdr.ascq == 0x00) && | |
1da177e4 LT |
698 | (cmd->cmnd[0] == READ_10 || |
699 | cmd->cmnd[0] == WRITE_10)) { | |
b60af5b0 | 700 | /* This will issue a new 6-byte command. */ |
1da177e4 | 701 | cmd->device->use_10_for_rw = 0; |
b60af5b0 | 702 | action = ACTION_REPREP; |
3e695f89 | 703 | } else if (sshdr.asc == 0x10) /* DIX */ { |
3e695f89 | 704 | action = ACTION_FAIL; |
da32baea | 705 | blk_stat = BLK_STS_PROTECTION; |
c98a0eb0 | 706 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ |
5db44863 | 707 | } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { |
c98a0eb0 | 708 | action = ACTION_FAIL; |
da32baea | 709 | blk_stat = BLK_STS_TARGET; |
b60af5b0 AS |
710 | } else |
711 | action = ACTION_FAIL; | |
712 | break; | |
511e44f4 | 713 | case ABORTED_COMMAND: |
126c0982 | 714 | action = ACTION_FAIL; |
e6c11dbb | 715 | if (sshdr.asc == 0x10) /* DIF */ |
da32baea | 716 | blk_stat = BLK_STS_PROTECTION; |
1da177e4 LT |
717 | break; |
718 | case NOT_READY: | |
03aba2f7 | 719 | /* If the device is in the process of becoming |
f3e93f73 | 720 | * ready, or has a temporary blockage, retry. |
1da177e4 | 721 | */ |
f3e93f73 JB |
722 | if (sshdr.asc == 0x04) { |
723 | switch (sshdr.ascq) { | |
724 | case 0x01: /* becoming ready */ | |
725 | case 0x04: /* format in progress */ | |
726 | case 0x05: /* rebuild in progress */ | |
727 | case 0x06: /* recalculation in progress */ | |
728 | case 0x07: /* operation in progress */ | |
729 | case 0x08: /* Long write in progress */ | |
730 | case 0x09: /* self test in progress */ | |
d8705f11 | 731 | case 0x14: /* space allocation in progress */ |
e37c7d9a DG |
732 | case 0x1a: /* start stop unit in progress */ |
733 | case 0x1b: /* sanitize in progress */ | |
734 | case 0x1d: /* configuration in progress */ | |
735 | case 0x24: /* depopulation in progress */ | |
b60af5b0 | 736 | action = ACTION_DELAYED_RETRY; |
f3e93f73 | 737 | break; |
0d882320 HR |
738 | case 0x0a: /* ALUA state transition */ |
739 | blk_stat = BLK_STS_AGAIN; | |
740 | fallthrough; | |
3dbf6a54 | 741 | default: |
3dbf6a54 AS |
742 | action = ACTION_FAIL; |
743 | break; | |
f3e93f73 | 744 | } |
e6c11dbb | 745 | } else |
b60af5b0 | 746 | action = ACTION_FAIL; |
b60af5b0 | 747 | break; |
1da177e4 | 748 | case VOLUME_OVERFLOW: |
03aba2f7 | 749 | /* See SSC3rXX or current. */ |
b60af5b0 AS |
750 | action = ACTION_FAIL; |
751 | break; | |
d8f53b0a DLM |
752 | case DATA_PROTECT: |
753 | action = ACTION_FAIL; | |
754 | if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) || | |
755 | (sshdr.asc == 0x55 && | |
756 | (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) { | |
757 | /* Insufficient zone resources */ | |
758 | blk_stat = BLK_STS_ZONE_OPEN_RESOURCE; | |
759 | } | |
760 | break; | |
1da177e4 | 761 | default: |
b60af5b0 | 762 | action = ACTION_FAIL; |
1da177e4 LT |
763 | break; |
764 | } | |
e6c11dbb | 765 | } else |
b60af5b0 | 766 | action = ACTION_FAIL; |
b60af5b0 | 767 | |
2a242d59 | 768 | if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd)) |
ee60b2c5 | 769 | action = ACTION_FAIL; |
ee60b2c5 | 770 | |
b60af5b0 AS |
771 | switch (action) { |
772 | case ACTION_FAIL: | |
773 | /* Give up and fail the remainder of the request */ | |
e8064021 | 774 | if (!(req->rq_flags & RQF_QUIET)) { |
f1569ff1 HR |
775 | static DEFINE_RATELIMIT_STATE(_rs, |
776 | DEFAULT_RATELIMIT_INTERVAL, | |
777 | DEFAULT_RATELIMIT_BURST); | |
778 | ||
779 | if (unlikely(scsi_logging_level)) | |
da32baea DG |
780 | level = |
781 | SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, | |
782 | SCSI_LOG_MLCOMPLETE_BITS); | |
f1569ff1 HR |
783 | |
784 | /* | |
785 | * if logging is enabled the failure will be printed | |
786 | * in scsi_log_completion(), so avoid duplicate messages | |
787 | */ | |
788 | if (!level && __ratelimit(&_rs)) { | |
789 | scsi_print_result(cmd, NULL, FAILED); | |
c65be1a6 | 790 | if (driver_byte(result) == DRIVER_SENSE) |
f1569ff1 HR |
791 | scsi_print_sense(cmd); |
792 | scsi_print_command(cmd); | |
793 | } | |
3173d8c3 | 794 | } |
ae3d56d8 | 795 | if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req))) |
f6d47e74 | 796 | return; |
df561f66 | 797 | fallthrough; |
b60af5b0 | 798 | case ACTION_REPREP: |
4ae61c68 | 799 | scsi_io_completion_reprep(cmd, q); |
b60af5b0 AS |
800 | break; |
801 | case ACTION_RETRY: | |
802 | /* Retry the same command immediately */ | |
08640e81 | 803 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false); |
b60af5b0 AS |
804 | break; |
805 | case ACTION_DELAYED_RETRY: | |
806 | /* Retry the same command after a delay */ | |
08640e81 | 807 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false); |
b60af5b0 | 808 | break; |
1da177e4 LT |
809 | } |
810 | } | |
1da177e4 | 811 | |
ab831084 DG |
812 | /* |
813 | * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a | |
814 | * new result that may suppress further error checking. Also modifies | |
815 | * *blk_statp in some cases. | |
816 | */ | |
817 | static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, | |
818 | blk_status_t *blk_statp) | |
819 | { | |
820 | bool sense_valid; | |
821 | bool sense_current = true; /* false implies "deferred sense" */ | |
822 | struct request *req = cmd->request; | |
823 | struct scsi_sense_hdr sshdr; | |
824 | ||
825 | sense_valid = scsi_command_normalize_sense(cmd, &sshdr); | |
826 | if (sense_valid) | |
827 | sense_current = !scsi_sense_is_deferred(&sshdr); | |
828 | ||
829 | if (blk_rq_is_passthrough(req)) { | |
830 | if (sense_valid) { | |
831 | /* | |
832 | * SG_IO wants current and deferred errors | |
833 | */ | |
834 | scsi_req(req)->sense_len = | |
835 | min(8 + cmd->sense_buffer[7], | |
836 | SCSI_SENSE_BUFFERSIZE); | |
837 | } | |
838 | if (sense_current) | |
839 | *blk_statp = scsi_result_to_blk_status(cmd, result); | |
840 | } else if (blk_rq_bytes(req) == 0 && sense_current) { | |
841 | /* | |
842 | * Flush commands do not transfers any data, and thus cannot use | |
843 | * good_bytes != blk_rq_bytes(req) as the signal for an error. | |
844 | * This sets *blk_statp explicitly for the problem case. | |
845 | */ | |
846 | *blk_statp = scsi_result_to_blk_status(cmd, result); | |
847 | } | |
848 | /* | |
849 | * Recovered errors need reporting, but they're always treated as | |
850 | * success, so fiddle the result code here. For passthrough requests | |
851 | * we already took a copy of the original into sreq->result which | |
852 | * is what gets returned to the user | |
853 | */ | |
854 | if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { | |
855 | bool do_print = true; | |
856 | /* | |
857 | * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d] | |
858 | * skip print since caller wants ATA registers. Only occurs | |
859 | * on SCSI ATA PASS_THROUGH commands when CK_COND=1 | |
860 | */ | |
861 | if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) | |
862 | do_print = false; | |
863 | else if (req->rq_flags & RQF_QUIET) | |
864 | do_print = false; | |
865 | if (do_print) | |
866 | scsi_print_sense(cmd); | |
867 | result = 0; | |
868 | /* for passthrough, *blk_statp may be set */ | |
869 | *blk_statp = BLK_STS_OK; | |
870 | } | |
871 | /* | |
872 | * Another corner case: the SCSI status byte is non-zero but 'good'. | |
873 | * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when | |
874 | * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD | |
875 | * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related | |
876 | * intermediate statuses (both obsolete in SAM-4) as good. | |
877 | */ | |
878 | if (status_byte(result) && scsi_status_is_good(result)) { | |
879 | result = 0; | |
880 | *blk_statp = BLK_STS_OK; | |
881 | } | |
882 | return result; | |
883 | } | |
884 | ||
ea941016 AA |
885 | /** |
886 | * scsi_io_completion - Completion processing for SCSI commands. | |
887 | * @cmd: command that is finished. | |
888 | * @good_bytes: number of processed bytes. | |
1da177e4 | 889 | * |
ea941016 AA |
890 | * We will finish off the specified number of sectors. If we are done, the |
891 | * command block will be released and the queue function will be goosed. If we | |
892 | * are not done then we have to figure out what to do next: | |
1da177e4 | 893 | * |
ea941016 AA |
894 | * a) We can call scsi_io_completion_reprep(). The request will be |
895 | * unprepared and put back on the queue. Then a new command will | |
896 | * be created for it. This should be used if we made forward | |
897 | * progress, or if we want to switch from READ(10) to READ(6) for | |
898 | * example. | |
1da177e4 | 899 | * |
ea941016 AA |
900 | * b) We can call scsi_io_completion_action(). The request will be |
901 | * put back on the queue and retried using the same command as | |
902 | * before, possibly after a delay. | |
b60af5b0 | 903 | * |
ea941016 AA |
904 | * c) We can call scsi_end_request() with blk_stat other than |
905 | * BLK_STS_OK, to fail the remainder of the request. | |
1da177e4 | 906 | */ |
03aba2f7 | 907 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) |
1da177e4 LT |
908 | { |
909 | int result = cmd->result; | |
165125e1 | 910 | struct request_queue *q = cmd->device->request_queue; |
1da177e4 | 911 | struct request *req = cmd->request; |
1f7cbb8e | 912 | blk_status_t blk_stat = BLK_STS_OK; |
1da177e4 | 913 | |
0d437906 | 914 | if (unlikely(result)) /* a nz result may or may not be an error */ |
ab831084 | 915 | result = scsi_io_completion_nz_result(cmd, result, &blk_stat); |
631c228c | 916 | |
0d437906 | 917 | if (unlikely(blk_rq_is_passthrough(req))) { |
27c41973 | 918 | /* |
a77b32d8 | 919 | * scsi_result_to_blk_status may have reset the host_byte |
27c41973 | 920 | */ |
17d5363b | 921 | scsi_req(req)->result = cmd->result; |
8e1695a0 | 922 | } |
30b0c37b | 923 | |
1da177e4 LT |
924 | /* |
925 | * Next deal with any sectors which we were able to correctly | |
926 | * handle. | |
927 | */ | |
91921e01 HR |
928 | SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, |
929 | "%u sectors total, %d bytes done.\n", | |
930 | blk_rq_sectors(req), good_bytes)); | |
d6b0c537 | 931 | |
a9bddd74 | 932 | /* |
ea941016 | 933 | * Failed, zero length commands always need to drop down |
1f7cbb8e | 934 | * to retry code. Fast path should return in this block. |
d6b0c537 | 935 | */ |
0d437906 | 936 | if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) { |
ae3d56d8 | 937 | if (likely(!scsi_end_request(req, blk_stat, good_bytes))) |
1f7cbb8e DG |
938 | return; /* no bytes remaining */ |
939 | } | |
bc85dc50 | 940 | |
0d437906 DG |
941 | /* Kill remainder if no retries. */ |
942 | if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) { | |
ae3d56d8 | 943 | if (scsi_end_request(req, blk_stat, blk_rq_bytes(req))) |
8e1695a0 DG |
944 | WARN_ONCE(true, |
945 | "Bytes remaining after failed, no-retry command"); | |
f6d47e74 | 946 | return; |
bc85dc50 CH |
947 | } |
948 | ||
949 | /* | |
950 | * If there had been no error, but we have leftover bytes in the | |
951 | * requeues just queue the command up again. | |
d6b0c537 | 952 | */ |
0d437906 | 953 | if (likely(result == 0)) |
4ae61c68 DG |
954 | scsi_io_completion_reprep(cmd, q); |
955 | else | |
da32baea | 956 | scsi_io_completion_action(cmd, result); |
1da177e4 | 957 | } |
1da177e4 | 958 | |
cc97923a CH |
959 | static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, |
960 | struct request *rq) | |
1da177e4 | 961 | { |
cc97923a CH |
962 | return sdev->dma_drain_len && blk_rq_is_passthrough(rq) && |
963 | !op_is_write(req_op(rq)) && | |
964 | sdev->host->hostt->dma_need_drain(rq); | |
1da177e4 | 965 | } |
6f9a35e2 | 966 | |
ea941016 | 967 | /** |
76fc0df9 BVA |
968 | * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists |
969 | * @cmd: SCSI command data structure to initialize. | |
970 | * | |
971 | * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled | |
972 | * for @cmd. | |
6f9a35e2 | 973 | * |
ea941016 AA |
974 | * Returns: |
975 | * * BLK_STS_OK - on success | |
976 | * * BLK_STS_RESOURCE - if the failure is retryable | |
977 | * * BLK_STS_IOERR - if the failure is fatal | |
6f9a35e2 | 978 | */ |
7007e9dd | 979 | blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) |
6f9a35e2 | 980 | { |
cc97923a | 981 | struct scsi_device *sdev = cmd->device; |
13f05c8d | 982 | struct request *rq = cmd->request; |
cc97923a CH |
983 | unsigned short nr_segs = blk_rq_nr_phys_segments(rq); |
984 | struct scatterlist *last_sg = NULL; | |
159b2cbf | 985 | blk_status_t ret; |
cc97923a | 986 | bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq); |
0475bd6c | 987 | int count; |
13f05c8d | 988 | |
cc97923a | 989 | if (WARN_ON_ONCE(!nr_segs)) |
159b2cbf | 990 | return BLK_STS_IOERR; |
635d98b1 | 991 | |
cc97923a CH |
992 | /* |
993 | * Make sure there is space for the drain. The driver must adjust | |
994 | * max_hw_segments to be prepared for this. | |
995 | */ | |
996 | if (need_drain) | |
997 | nr_segs++; | |
998 | ||
0475bd6c CH |
999 | /* |
1000 | * If sg table allocation fails, requeue request later. | |
1001 | */ | |
cc97923a CH |
1002 | if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs, |
1003 | cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT))) | |
0475bd6c CH |
1004 | return BLK_STS_RESOURCE; |
1005 | ||
1006 | /* | |
1007 | * Next, walk the list, and fill in the addresses and sizes of | |
1008 | * each segment. | |
1009 | */ | |
cc97923a CH |
1010 | count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg); |
1011 | ||
bdf8710d CH |
1012 | if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { |
1013 | unsigned int pad_len = | |
1014 | (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; | |
1015 | ||
1016 | last_sg->length += pad_len; | |
1017 | cmd->extra_len += pad_len; | |
1018 | } | |
1019 | ||
cc97923a CH |
1020 | if (need_drain) { |
1021 | sg_unmark_end(last_sg); | |
1022 | last_sg = sg_next(last_sg); | |
1023 | sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len); | |
1024 | sg_mark_end(last_sg); | |
1025 | ||
bdf8710d | 1026 | cmd->extra_len += sdev->dma_drain_len; |
cc97923a CH |
1027 | count++; |
1028 | } | |
1029 | ||
0475bd6c CH |
1030 | BUG_ON(count > cmd->sdb.table.nents); |
1031 | cmd->sdb.table.nents = count; | |
1032 | cmd->sdb.length = blk_rq_payload_bytes(rq); | |
6f9a35e2 | 1033 | |
13f05c8d | 1034 | if (blk_integrity_rq(rq)) { |
7027ad72 | 1035 | struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; |
0475bd6c | 1036 | int ivecs; |
7027ad72 | 1037 | |
14784565 | 1038 | if (WARN_ON_ONCE(!prot_sdb)) { |
91724c20 EM |
1039 | /* |
1040 | * This can happen if someone (e.g. multipath) | |
1041 | * queues a command to a device on an adapter | |
1042 | * that does not support DIX. | |
1043 | */ | |
159b2cbf | 1044 | ret = BLK_STS_IOERR; |
14784565 | 1045 | goto out_free_sgtables; |
91724c20 EM |
1046 | } |
1047 | ||
13f05c8d | 1048 | ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); |
7027ad72 | 1049 | |
001d63be | 1050 | if (sg_alloc_table_chained(&prot_sdb->table, ivecs, |
4635873c | 1051 | prot_sdb->table.sgl, |
92524fa1 | 1052 | SCSI_INLINE_PROT_SG_CNT)) { |
159b2cbf | 1053 | ret = BLK_STS_RESOURCE; |
14784565 | 1054 | goto out_free_sgtables; |
7027ad72 MP |
1055 | } |
1056 | ||
13f05c8d | 1057 | count = blk_rq_map_integrity_sg(rq->q, rq->bio, |
7027ad72 | 1058 | prot_sdb->table.sgl); |
6f1d8a53 IS |
1059 | BUG_ON(count > ivecs); |
1060 | BUG_ON(count > queue_max_integrity_segments(rq->q)); | |
7027ad72 MP |
1061 | |
1062 | cmd->prot_sdb = prot_sdb; | |
1063 | cmd->prot_sdb->table.nents = count; | |
1064 | } | |
1065 | ||
159b2cbf | 1066 | return BLK_STS_OK; |
14784565 | 1067 | out_free_sgtables: |
20a66f2b | 1068 | scsi_free_sgtables(cmd); |
159b2cbf | 1069 | return ret; |
6f9a35e2 | 1070 | } |
7007e9dd | 1071 | EXPORT_SYMBOL(scsi_alloc_sgtables); |
1da177e4 | 1072 | |
ca18d6f7 | 1073 | /** |
832889f5 | 1074 | * scsi_initialize_rq - initialize struct scsi_cmnd partially |
35c0506f | 1075 | * @rq: Request associated with the SCSI command to be initialized. |
ca18d6f7 | 1076 | * |
832889f5 BVA |
1077 | * This function initializes the members of struct scsi_cmnd that must be |
1078 | * initialized before request processing starts and that won't be | |
1079 | * reinitialized if a SCSI command is requeued. | |
1080 | * | |
64104f70 BVA |
1081 | * Called from inside blk_get_request() for pass-through requests and from |
1082 | * inside scsi_init_command() for filesystem requests. | |
ca18d6f7 | 1083 | */ |
e4c9470b | 1084 | static void scsi_initialize_rq(struct request *rq) |
ca18d6f7 | 1085 | { |
c8d9cf22 BVA |
1086 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
1087 | ||
1088 | scsi_req_init(&cmd->req); | |
3be8828f | 1089 | init_rcu_head(&cmd->rcu); |
832889f5 BVA |
1090 | cmd->jiffies_at_alloc = jiffies; |
1091 | cmd->retries = 0; | |
ca18d6f7 | 1092 | } |
ca18d6f7 | 1093 | |
b7e9e1fb ML |
1094 | /* |
1095 | * Only called when the request isn't completed by SCSI, and not freed by | |
1096 | * SCSI | |
1097 | */ | |
1098 | static void scsi_cleanup_rq(struct request *rq) | |
1099 | { | |
1100 | if (rq->rq_flags & RQF_DONTPREP) { | |
1101 | scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); | |
1102 | rq->rq_flags &= ~RQF_DONTPREP; | |
1103 | } | |
1104 | } | |
1105 | ||
65ca846a | 1106 | /* Called before a request is prepared. See also scsi_mq_prep_fn(). */ |
e9c787e6 | 1107 | void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) |
3b003157 | 1108 | { |
e9c787e6 CH |
1109 | void *buf = cmd->sense_buffer; |
1110 | void *prot = cmd->prot_sdb; | |
64104f70 BVA |
1111 | struct request *rq = blk_mq_rq_from_pdu(cmd); |
1112 | unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS; | |
832889f5 | 1113 | unsigned long jiffies_at_alloc; |
65ca846a | 1114 | int retries, to_clear; |
6eb045e0 | 1115 | bool in_flight; |
2a5a24aa | 1116 | int budget_token = cmd->budget_token; |
64104f70 BVA |
1117 | |
1118 | if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) { | |
1119 | flags |= SCMD_INITIALIZED; | |
1120 | scsi_initialize_rq(rq); | |
1121 | } | |
3b003157 | 1122 | |
832889f5 BVA |
1123 | jiffies_at_alloc = cmd->jiffies_at_alloc; |
1124 | retries = cmd->retries; | |
6eb045e0 | 1125 | in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
65ca846a BVA |
1126 | /* |
1127 | * Zero out the cmd, except for the embedded scsi_request. Only clear | |
1128 | * the driver-private command data if the LLD does not supply a | |
1129 | * function to initialize that data. | |
1130 | */ | |
1131 | to_clear = sizeof(*cmd) - sizeof(cmd->req); | |
1132 | if (!dev->host->hostt->init_cmd_priv) | |
1133 | to_clear += dev->host->hostt->cmd_size; | |
1134 | memset((char *)cmd + sizeof(cmd->req), 0, to_clear); | |
3b003157 | 1135 | |
e9c787e6 CH |
1136 | cmd->device = dev; |
1137 | cmd->sense_buffer = buf; | |
1138 | cmd->prot_sdb = prot; | |
64104f70 | 1139 | cmd->flags = flags; |
e9c787e6 | 1140 | INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); |
832889f5 BVA |
1141 | cmd->jiffies_at_alloc = jiffies_at_alloc; |
1142 | cmd->retries = retries; | |
6eb045e0 ML |
1143 | if (in_flight) |
1144 | __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); | |
2a5a24aa | 1145 | cmd->budget_token = budget_token; |
64a87b24 | 1146 | |
3b003157 CH |
1147 | } |
1148 | ||
785ba83b CH |
1149 | static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, |
1150 | struct request *req) | |
7b16318d | 1151 | { |
bed2213d | 1152 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); |
3b003157 CH |
1153 | |
1154 | /* | |
aebf526b | 1155 | * Passthrough requests may transfer data, in which case they must |
3b003157 CH |
1156 | * a bio attached to them. Or they might contain a SCSI command |
1157 | * that does not transfer data, in which case they may optionally | |
1158 | * submit a request without an attached bio. | |
1159 | */ | |
1160 | if (req->bio) { | |
7007e9dd | 1161 | blk_status_t ret = scsi_alloc_sgtables(cmd); |
159b2cbf CH |
1162 | if (unlikely(ret != BLK_STS_OK)) |
1163 | return ret; | |
3b003157 | 1164 | } else { |
b0790410 | 1165 | BUG_ON(blk_rq_bytes(req)); |
3b003157 | 1166 | |
30b0c37b | 1167 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); |
3b003157 | 1168 | } |
7b16318d | 1169 | |
82ed4db4 | 1170 | cmd->cmd_len = scsi_req(req)->cmd_len; |
2ceda20f CH |
1171 | if (cmd->cmd_len == 0) |
1172 | cmd->cmd_len = scsi_command_size(cmd->cmnd); | |
82ed4db4 | 1173 | cmd->cmnd = scsi_req(req)->cmd; |
b0790410 | 1174 | cmd->transfersize = blk_rq_bytes(req); |
64c7f1d1 | 1175 | cmd->allowed = scsi_req(req)->retries; |
785ba83b | 1176 | return BLK_STS_OK; |
7b16318d | 1177 | } |
7b16318d | 1178 | |
c092d4ec | 1179 | static blk_status_t |
822bd2db | 1180 | scsi_device_state_check(struct scsi_device *sdev, struct request *req) |
3b003157 | 1181 | { |
c092d4ec | 1182 | switch (sdev->sdev_state) { |
e6044f71 BVA |
1183 | case SDEV_CREATED: |
1184 | return BLK_STS_OK; | |
c092d4ec CH |
1185 | case SDEV_OFFLINE: |
1186 | case SDEV_TRANSPORT_OFFLINE: | |
1187 | /* | |
1188 | * If the device is offline we refuse to process any | |
1189 | * commands. The device must be brought online | |
1190 | * before trying any recovery commands. | |
1191 | */ | |
b0962c53 EM |
1192 | if (!sdev->offline_already) { |
1193 | sdev->offline_already = true; | |
1194 | sdev_printk(KERN_ERR, sdev, | |
1195 | "rejecting I/O to offline device\n"); | |
1196 | } | |
c092d4ec CH |
1197 | return BLK_STS_IOERR; |
1198 | case SDEV_DEL: | |
1199 | /* | |
1200 | * If the device is fully deleted, we refuse to | |
1201 | * process any commands as well. | |
1202 | */ | |
1203 | sdev_printk(KERN_ERR, sdev, | |
1204 | "rejecting I/O to dead device\n"); | |
1205 | return BLK_STS_IOERR; | |
1206 | case SDEV_BLOCK: | |
1207 | case SDEV_CREATED_BLOCK: | |
1208 | return BLK_STS_RESOURCE; | |
1209 | case SDEV_QUIESCE: | |
1210 | /* | |
e6044f71 BVA |
1211 | * If the device is blocked we only accept power management |
1212 | * commands. | |
c092d4ec | 1213 | */ |
e6044f71 | 1214 | if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) |
c092d4ec CH |
1215 | return BLK_STS_RESOURCE; |
1216 | return BLK_STS_OK; | |
1217 | default: | |
1218 | /* | |
1219 | * For any other not fully online state we only allow | |
e6044f71 | 1220 | * power management commands. |
c092d4ec | 1221 | */ |
e6044f71 | 1222 | if (req && !(req->rq_flags & RQF_PM)) |
c092d4ec CH |
1223 | return BLK_STS_IOERR; |
1224 | return BLK_STS_OK; | |
1da177e4 | 1225 | } |
7f9a6bc4 | 1226 | } |
1da177e4 | 1227 | |
1da177e4 | 1228 | /* |
020b0f0a ML |
1229 | * scsi_dev_queue_ready: if we can send requests to sdev, assign one token |
1230 | * and return the token else return -1. | |
1da177e4 LT |
1231 | */ |
1232 | static inline int scsi_dev_queue_ready(struct request_queue *q, | |
1233 | struct scsi_device *sdev) | |
1234 | { | |
020b0f0a | 1235 | int token; |
71e75c97 | 1236 | |
020b0f0a | 1237 | token = sbitmap_get(&sdev->budget_map); |
cd9070c9 | 1238 | if (atomic_read(&sdev->device_blocked)) { |
020b0f0a ML |
1239 | if (token < 0) |
1240 | goto out; | |
1241 | ||
1242 | if (scsi_device_busy(sdev) > 1) | |
71e75c97 CH |
1243 | goto out_dec; |
1244 | ||
1da177e4 LT |
1245 | /* |
1246 | * unblock after device_blocked iterates to zero | |
1247 | */ | |
f664a3cc | 1248 | if (atomic_dec_return(&sdev->device_blocked) > 0) |
71e75c97 | 1249 | goto out_dec; |
71e75c97 CH |
1250 | SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, |
1251 | "unblocking device at zero depth\n")); | |
1da177e4 | 1252 | } |
71e75c97 | 1253 | |
020b0f0a | 1254 | return token; |
71e75c97 | 1255 | out_dec: |
020b0f0a ML |
1256 | if (token >= 0) |
1257 | sbitmap_put(&sdev->budget_map, token); | |
1258 | out: | |
1259 | return -1; | |
1da177e4 LT |
1260 | } |
1261 | ||
f0c0a376 MC |
1262 | /* |
1263 | * scsi_target_queue_ready: checks if there we can send commands to target | |
1264 | * @sdev: scsi device on starget to check. | |
f0c0a376 MC |
1265 | */ |
1266 | static inline int scsi_target_queue_ready(struct Scsi_Host *shost, | |
1267 | struct scsi_device *sdev) | |
1268 | { | |
1269 | struct scsi_target *starget = scsi_target(sdev); | |
7ae65c0f | 1270 | unsigned int busy; |
f0c0a376 MC |
1271 | |
1272 | if (starget->single_lun) { | |
7ae65c0f | 1273 | spin_lock_irq(shost->host_lock); |
f0c0a376 | 1274 | if (starget->starget_sdev_user && |
7ae65c0f CH |
1275 | starget->starget_sdev_user != sdev) { |
1276 | spin_unlock_irq(shost->host_lock); | |
1277 | return 0; | |
1278 | } | |
f0c0a376 | 1279 | starget->starget_sdev_user = sdev; |
7ae65c0f | 1280 | spin_unlock_irq(shost->host_lock); |
f0c0a376 MC |
1281 | } |
1282 | ||
2ccbb008 CH |
1283 | if (starget->can_queue <= 0) |
1284 | return 1; | |
1285 | ||
7ae65c0f | 1286 | busy = atomic_inc_return(&starget->target_busy) - 1; |
cd9070c9 | 1287 | if (atomic_read(&starget->target_blocked) > 0) { |
7ae65c0f CH |
1288 | if (busy) |
1289 | goto starved; | |
1290 | ||
f0c0a376 MC |
1291 | /* |
1292 | * unblock after target_blocked iterates to zero | |
1293 | */ | |
cd9070c9 | 1294 | if (atomic_dec_return(&starget->target_blocked) > 0) |
7ae65c0f | 1295 | goto out_dec; |
cf68d334 CH |
1296 | |
1297 | SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, | |
1298 | "unblocking target at zero depth\n")); | |
f0c0a376 MC |
1299 | } |
1300 | ||
2ccbb008 | 1301 | if (busy >= starget->can_queue) |
7ae65c0f | 1302 | goto starved; |
f0c0a376 | 1303 | |
7ae65c0f CH |
1304 | return 1; |
1305 | ||
1306 | starved: | |
1307 | spin_lock_irq(shost->host_lock); | |
1308 | list_move_tail(&sdev->starved_entry, &shost->starved_list); | |
cf68d334 | 1309 | spin_unlock_irq(shost->host_lock); |
7ae65c0f | 1310 | out_dec: |
2ccbb008 CH |
1311 | if (starget->can_queue > 0) |
1312 | atomic_dec(&starget->target_busy); | |
7ae65c0f | 1313 | return 0; |
f0c0a376 MC |
1314 | } |
1315 | ||
1da177e4 LT |
1316 | /* |
1317 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else | |
1318 | * return 0. We must end up running the queue again whenever 0 is | |
1319 | * returned, else IO can hang. | |
1da177e4 LT |
1320 | */ |
1321 | static inline int scsi_host_queue_ready(struct request_queue *q, | |
1322 | struct Scsi_Host *shost, | |
6eb045e0 ML |
1323 | struct scsi_device *sdev, |
1324 | struct scsi_cmnd *cmd) | |
1da177e4 | 1325 | { |
939647ee | 1326 | if (scsi_host_in_recovery(shost)) |
74665016 CH |
1327 | return 0; |
1328 | ||
cd9070c9 | 1329 | if (atomic_read(&shost->host_blocked) > 0) { |
6eb045e0 | 1330 | if (scsi_host_busy(shost) > 0) |
74665016 CH |
1331 | goto starved; |
1332 | ||
1da177e4 LT |
1333 | /* |
1334 | * unblock after host_blocked iterates to zero | |
1335 | */ | |
cd9070c9 | 1336 | if (atomic_dec_return(&shost->host_blocked) > 0) |
74665016 | 1337 | goto out_dec; |
cf68d334 CH |
1338 | |
1339 | SCSI_LOG_MLQUEUE(3, | |
1340 | shost_printk(KERN_INFO, shost, | |
1341 | "unblocking host at zero depth\n")); | |
1da177e4 | 1342 | } |
74665016 | 1343 | |
74665016 CH |
1344 | if (shost->host_self_blocked) |
1345 | goto starved; | |
1da177e4 LT |
1346 | |
1347 | /* We're OK to process the command, so we can't be starved */ | |
74665016 CH |
1348 | if (!list_empty(&sdev->starved_entry)) { |
1349 | spin_lock_irq(shost->host_lock); | |
1350 | if (!list_empty(&sdev->starved_entry)) | |
1351 | list_del_init(&sdev->starved_entry); | |
1352 | spin_unlock_irq(shost->host_lock); | |
1353 | } | |
1da177e4 | 1354 | |
6eb045e0 ML |
1355 | __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
1356 | ||
74665016 CH |
1357 | return 1; |
1358 | ||
1359 | starved: | |
1360 | spin_lock_irq(shost->host_lock); | |
1361 | if (list_empty(&sdev->starved_entry)) | |
1362 | list_add_tail(&sdev->starved_entry, &shost->starved_list); | |
cf68d334 | 1363 | spin_unlock_irq(shost->host_lock); |
74665016 | 1364 | out_dec: |
6eb045e0 | 1365 | scsi_dec_host_busy(shost, cmd); |
74665016 | 1366 | return 0; |
1da177e4 LT |
1367 | } |
1368 | ||
6c5121b7 KU |
1369 | /* |
1370 | * Busy state exporting function for request stacking drivers. | |
1371 | * | |
1372 | * For efficiency, no lock is taken to check the busy state of | |
1373 | * shost/starget/sdev, since the returned value is not guaranteed and | |
1374 | * may be changed after request stacking drivers call the function, | |
1375 | * regardless of taking lock or not. | |
1376 | * | |
67bd9413 BVA |
1377 | * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi |
1378 | * needs to return 'not busy'. Otherwise, request stacking drivers | |
1379 | * may hold requests forever. | |
6c5121b7 | 1380 | */ |
f664a3cc | 1381 | static bool scsi_mq_lld_busy(struct request_queue *q) |
6c5121b7 KU |
1382 | { |
1383 | struct scsi_device *sdev = q->queuedata; | |
1384 | struct Scsi_Host *shost; | |
6c5121b7 | 1385 | |
3f3299d5 | 1386 | if (blk_queue_dying(q)) |
f664a3cc | 1387 | return false; |
6c5121b7 KU |
1388 | |
1389 | shost = sdev->host; | |
6c5121b7 | 1390 | |
b7e94a16 JN |
1391 | /* |
1392 | * Ignore host/starget busy state. | |
1393 | * Since block layer does not have a concept of fairness across | |
1394 | * multiple queues, congestion of host/starget needs to be handled | |
1395 | * in SCSI layer. | |
1396 | */ | |
1397 | if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) | |
f664a3cc | 1398 | return true; |
e36e0c80 | 1399 | |
f664a3cc | 1400 | return false; |
1da177e4 LT |
1401 | } |
1402 | ||
0d2810cd BVA |
1403 | /* |
1404 | * Block layer request completion callback. May be called from interrupt | |
1405 | * context. | |
1406 | */ | |
1407 | static void scsi_complete(struct request *rq) | |
1aea6434 | 1408 | { |
bed2213d | 1409 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
b8e162f9 | 1410 | enum scsi_disposition disposition; |
1aea6434 JA |
1411 | |
1412 | INIT_LIST_HEAD(&cmd->eh_entry); | |
1413 | ||
242f9dcb JA |
1414 | atomic_inc(&cmd->device->iodone_cnt); |
1415 | if (cmd->result) | |
1416 | atomic_inc(&cmd->device->ioerr_cnt); | |
1417 | ||
1aea6434 | 1418 | disposition = scsi_decide_disposition(cmd); |
2a242d59 | 1419 | if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd)) |
1aea6434 | 1420 | disposition = SUCCESS; |
91921e01 | 1421 | |
1aea6434 JA |
1422 | scsi_log_completion(cmd, disposition); |
1423 | ||
1424 | switch (disposition) { | |
4c7b4d63 BH |
1425 | case SUCCESS: |
1426 | scsi_finish_command(cmd); | |
1427 | break; | |
1428 | case NEEDS_RETRY: | |
1429 | scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); | |
1430 | break; | |
1431 | case ADD_TO_MLQUEUE: | |
1432 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); | |
1433 | break; | |
1434 | default: | |
1435 | scsi_eh_scmd_add(cmd); | |
1436 | break; | |
1aea6434 JA |
1437 | } |
1438 | } | |
1439 | ||
82042a2c | 1440 | /** |
ae6b4e69 | 1441 | * scsi_dispatch_cmd - Dispatch a command to the low-level driver. |
82042a2c CH |
1442 | * @cmd: command block we are dispatching. |
1443 | * | |
1444 | * Return: nonzero return request was rejected and device's queue needs to be | |
1445 | * plugged. | |
1446 | */ | |
1447 | static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |
1448 | { | |
1449 | struct Scsi_Host *host = cmd->device->host; | |
1450 | int rtn = 0; | |
1451 | ||
1452 | atomic_inc(&cmd->device->iorequest_cnt); | |
1453 | ||
1454 | /* check if the device is still usable */ | |
1455 | if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { | |
1456 | /* in SDEV_DEL we error all commands. DID_NO_CONNECT | |
1457 | * returns an immediate error upwards, and signals | |
1458 | * that the device is no longer present */ | |
1459 | cmd->result = DID_NO_CONNECT << 16; | |
1460 | goto done; | |
1461 | } | |
1462 | ||
1463 | /* Check to see if the scsi lld made this device blocked. */ | |
1464 | if (unlikely(scsi_device_blocked(cmd->device))) { | |
1465 | /* | |
1466 | * in blocked state, the command is just put back on | |
1467 | * the device queue. The suspend state has already | |
1468 | * blocked the queue so future requests should not | |
1469 | * occur until the device transitions out of the | |
1470 | * suspend state. | |
1471 | */ | |
1472 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, | |
1473 | "queuecommand : device blocked\n")); | |
1474 | return SCSI_MLQUEUE_DEVICE_BUSY; | |
1475 | } | |
1476 | ||
1477 | /* Store the LUN value in cmnd, if needed. */ | |
1478 | if (cmd->device->lun_in_cdb) | |
1479 | cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | | |
1480 | (cmd->device->lun << 5 & 0xe0); | |
1481 | ||
1482 | scsi_log_send(cmd); | |
1483 | ||
1484 | /* | |
1485 | * Before we queue this command, check if the command | |
1486 | * length exceeds what the host adapter can handle. | |
1487 | */ | |
1488 | if (cmd->cmd_len > cmd->device->host->max_cmd_len) { | |
1489 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, | |
1490 | "queuecommand : command too long. " | |
1491 | "cdb_size=%d host->max_cmd_len=%d\n", | |
1492 | cmd->cmd_len, cmd->device->host->max_cmd_len)); | |
1493 | cmd->result = (DID_ABORT << 16); | |
1494 | goto done; | |
1495 | } | |
1496 | ||
1497 | if (unlikely(host->shost_state == SHOST_DEL)) { | |
1498 | cmd->result = (DID_NO_CONNECT << 16); | |
1499 | goto done; | |
1500 | ||
1501 | } | |
1502 | ||
1503 | trace_scsi_dispatch_cmd_start(cmd); | |
1504 | rtn = host->hostt->queuecommand(host, cmd); | |
1505 | if (rtn) { | |
1506 | trace_scsi_dispatch_cmd_error(cmd, rtn); | |
1507 | if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && | |
1508 | rtn != SCSI_MLQUEUE_TARGET_BUSY) | |
1509 | rtn = SCSI_MLQUEUE_HOST_BUSY; | |
1510 | ||
1511 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, | |
1512 | "queuecommand : request rejected\n")); | |
1513 | } | |
1514 | ||
1515 | return rtn; | |
1516 | done: | |
1517 | cmd->scsi_done(cmd); | |
1518 | return 0; | |
1519 | } | |
1520 | ||
be4c186c | 1521 | /* Size in bytes of the sg-list stored in the scsi-mq command-private data. */ |
3dccdf53 | 1522 | static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost) |
be4c186c | 1523 | { |
3dccdf53 | 1524 | return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) * |
be4c186c BVA |
1525 | sizeof(struct scatterlist); |
1526 | } | |
1527 | ||
5843cc3d | 1528 | static blk_status_t scsi_prepare_cmd(struct request *req) |
d285203c CH |
1529 | { |
1530 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); | |
1531 | struct scsi_device *sdev = req->q->queuedata; | |
1532 | struct Scsi_Host *shost = sdev->host; | |
d285203c CH |
1533 | struct scatterlist *sg; |
1534 | ||
08f78436 | 1535 | scsi_init_command(sdev, cmd); |
d285203c | 1536 | |
d285203c | 1537 | cmd->request = req; |
d285203c | 1538 | cmd->tag = req->tag; |
d285203c | 1539 | cmd->prot_op = SCSI_PROT_NORMAL; |
b6ba9b0e CH |
1540 | if (blk_rq_bytes(req)) |
1541 | cmd->sc_data_direction = rq_dma_dir(req); | |
1542 | else | |
1543 | cmd->sc_data_direction = DMA_NONE; | |
d285203c | 1544 | |
d285203c CH |
1545 | sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; |
1546 | cmd->sdb.table.sgl = sg; | |
1547 | ||
1548 | if (scsi_host_get_prot(shost)) { | |
d285203c CH |
1549 | memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); |
1550 | ||
1551 | cmd->prot_sdb->table.sgl = | |
1552 | (struct scatterlist *)(cmd->prot_sdb + 1); | |
1553 | } | |
1554 | ||
74e5e6c1 CH |
1555 | /* |
1556 | * Special handling for passthrough commands, which don't go to the ULP | |
1557 | * at all: | |
1558 | */ | |
1559 | if (blk_rq_is_scsi(req)) | |
1560 | return scsi_setup_scsi_cmnd(sdev, req); | |
1561 | ||
1562 | if (sdev->handler && sdev->handler->prep_fn) { | |
1563 | blk_status_t ret = sdev->handler->prep_fn(sdev, req); | |
fe052529 | 1564 | |
74e5e6c1 CH |
1565 | if (ret != BLK_STS_OK) |
1566 | return ret; | |
1567 | } | |
1568 | ||
1569 | cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd; | |
1570 | memset(cmd->cmnd, 0, BLK_MAX_CDB); | |
1571 | return scsi_cmd_to_driver(cmd)->init_command(cmd); | |
d285203c CH |
1572 | } |
1573 | ||
1574 | static void scsi_mq_done(struct scsi_cmnd *cmd) | |
1575 | { | |
15f73f5b CH |
1576 | if (unlikely(blk_should_fake_timeout(cmd->request->q))) |
1577 | return; | |
f1342709 KB |
1578 | if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) |
1579 | return; | |
d285203c | 1580 | trace_scsi_dispatch_cmd_done(cmd); |
15f73f5b | 1581 | blk_mq_complete_request(cmd->request); |
d285203c CH |
1582 | } |
1583 | ||
2a5a24aa | 1584 | static void scsi_mq_put_budget(struct request_queue *q, int budget_token) |
d285203c | 1585 | { |
0df21c86 | 1586 | struct scsi_device *sdev = q->queuedata; |
0df21c86 | 1587 | |
020b0f0a | 1588 | sbitmap_put(&sdev->budget_map, budget_token); |
0df21c86 ML |
1589 | } |
1590 | ||
2a5a24aa | 1591 | static int scsi_mq_get_budget(struct request_queue *q) |
0df21c86 | 1592 | { |
d285203c | 1593 | struct scsi_device *sdev = q->queuedata; |
020b0f0a | 1594 | int token = scsi_dev_queue_ready(q, sdev); |
d285203c | 1595 | |
020b0f0a ML |
1596 | if (token >= 0) |
1597 | return token; | |
ed5dd6a6 ML |
1598 | |
1599 | atomic_inc(&sdev->restarts); | |
1600 | ||
1601 | /* | |
1602 | * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy). | |
1603 | * .restarts must be incremented before .device_busy is read because the | |
1604 | * code in scsi_run_queue_async() depends on the order of these operations. | |
1605 | */ | |
1606 | smp_mb__after_atomic(); | |
1607 | ||
1608 | /* | |
1609 | * If all in-flight requests originated from this LUN are completed | |
1610 | * before reading .device_busy, sdev->device_busy will be observed as | |
1611 | * zero, then blk_mq_delay_run_hw_queues() will dispatch this request | |
1612 | * soon. Otherwise, completion of one of these requests will observe | |
1613 | * the .restarts flag, and the request queue will be run for handling | |
1614 | * this request, see scsi_end_request(). | |
1615 | */ | |
8278807a | 1616 | if (unlikely(scsi_device_busy(sdev) == 0 && |
ed5dd6a6 ML |
1617 | !scsi_device_blocked(sdev))) |
1618 | blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY); | |
2a5a24aa | 1619 | return -1; |
0df21c86 ML |
1620 | } |
1621 | ||
d022d18c ML |
1622 | static void scsi_mq_set_rq_budget_token(struct request *req, int token) |
1623 | { | |
1624 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); | |
1625 | ||
1626 | cmd->budget_token = token; | |
1627 | } | |
1628 | ||
1629 | static int scsi_mq_get_rq_budget_token(struct request *req) | |
1630 | { | |
1631 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); | |
1632 | ||
1633 | return cmd->budget_token; | |
0df21c86 ML |
1634 | } |
1635 | ||
fc17b653 | 1636 | static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, |
74c45052 | 1637 | const struct blk_mq_queue_data *bd) |
d285203c | 1638 | { |
74c45052 | 1639 | struct request *req = bd->rq; |
d285203c CH |
1640 | struct request_queue *q = req->q; |
1641 | struct scsi_device *sdev = q->queuedata; | |
1642 | struct Scsi_Host *shost = sdev->host; | |
1643 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); | |
fc17b653 | 1644 | blk_status_t ret; |
d285203c CH |
1645 | int reason; |
1646 | ||
2a5a24aa ML |
1647 | WARN_ON_ONCE(cmd->budget_token < 0); |
1648 | ||
c092d4ec CH |
1649 | /* |
1650 | * If the device is not in running state we will reject some or all | |
1651 | * commands. | |
1652 | */ | |
1653 | if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { | |
822bd2db | 1654 | ret = scsi_device_state_check(sdev, req); |
c092d4ec CH |
1655 | if (ret != BLK_STS_OK) |
1656 | goto out_put_budget; | |
1657 | } | |
d285203c | 1658 | |
fc17b653 | 1659 | ret = BLK_STS_RESOURCE; |
d285203c | 1660 | if (!scsi_target_queue_ready(shost, sdev)) |
826a70a0 | 1661 | goto out_put_budget; |
6eb045e0 | 1662 | if (!scsi_host_queue_ready(q, shost, sdev, cmd)) |
d285203c CH |
1663 | goto out_dec_target_busy; |
1664 | ||
e8064021 | 1665 | if (!(req->rq_flags & RQF_DONTPREP)) { |
5843cc3d | 1666 | ret = scsi_prepare_cmd(req); |
fc17b653 | 1667 | if (ret != BLK_STS_OK) |
d285203c | 1668 | goto out_dec_host_busy; |
e8064021 | 1669 | req->rq_flags |= RQF_DONTPREP; |
fe052529 | 1670 | } else { |
cd464d83 | 1671 | clear_bit(SCMD_STATE_COMPLETE, &cmd->state); |
d285203c CH |
1672 | } |
1673 | ||
8930a6c2 | 1674 | cmd->flags &= SCMD_PRESERVED_FLAGS; |
125c99bc CH |
1675 | if (sdev->simple_tags) |
1676 | cmd->flags |= SCMD_TAGGED; | |
8930a6c2 PB |
1677 | if (bd->last) |
1678 | cmd->flags |= SCMD_LAST; | |
b1dd2aac | 1679 | |
3a8dc5bb CH |
1680 | scsi_set_resid(cmd, 0); |
1681 | memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); | |
d285203c CH |
1682 | cmd->scsi_done = scsi_mq_done; |
1683 | ||
ed7fb2d0 | 1684 | blk_mq_start_request(req); |
d285203c CH |
1685 | reason = scsi_dispatch_cmd(cmd); |
1686 | if (reason) { | |
1687 | scsi_set_blocked(cmd, reason); | |
fc17b653 | 1688 | ret = BLK_STS_RESOURCE; |
d285203c CH |
1689 | goto out_dec_host_busy; |
1690 | } | |
1691 | ||
fc17b653 | 1692 | return BLK_STS_OK; |
d285203c CH |
1693 | |
1694 | out_dec_host_busy: | |
6eb045e0 | 1695 | scsi_dec_host_busy(shost, cmd); |
d285203c CH |
1696 | out_dec_target_busy: |
1697 | if (scsi_target(sdev)->can_queue > 0) | |
1698 | atomic_dec(&scsi_target(sdev)->target_busy); | |
0df21c86 | 1699 | out_put_budget: |
2a5a24aa ML |
1700 | scsi_mq_put_budget(q, cmd->budget_token); |
1701 | cmd->budget_token = -1; | |
d285203c | 1702 | switch (ret) { |
fc17b653 CH |
1703 | case BLK_STS_OK: |
1704 | break; | |
1705 | case BLK_STS_RESOURCE: | |
0512a75b | 1706 | case BLK_STS_ZONE_RESOURCE: |
673235f9 | 1707 | if (scsi_device_blocked(sdev)) |
86ff7c2a | 1708 | ret = BLK_STS_DEV_RESOURCE; |
d285203c | 1709 | break; |
268940b8 HR |
1710 | case BLK_STS_AGAIN: |
1711 | scsi_req(req)->result = DID_BUS_BUSY << 16; | |
1712 | if (req->rq_flags & RQF_DONTPREP) | |
1713 | scsi_mq_uninit_cmd(cmd); | |
1714 | break; | |
fc17b653 | 1715 | default: |
be549d49 JL |
1716 | if (unlikely(!scsi_device_online(sdev))) |
1717 | scsi_req(req)->result = DID_NO_CONNECT << 16; | |
1718 | else | |
1719 | scsi_req(req)->result = DID_ERROR << 16; | |
d285203c | 1720 | /* |
be549d49 | 1721 | * Make sure to release all allocated resources when |
d285203c CH |
1722 | * we hit an error, as we will never see this command |
1723 | * again. | |
1724 | */ | |
e8064021 | 1725 | if (req->rq_flags & RQF_DONTPREP) |
d285203c | 1726 | scsi_mq_uninit_cmd(cmd); |
3f0dcfbc | 1727 | scsi_run_queue_async(sdev); |
d285203c | 1728 | break; |
d285203c CH |
1729 | } |
1730 | return ret; | |
1731 | } | |
1732 | ||
0152fb6b CH |
1733 | static enum blk_eh_timer_return scsi_timeout(struct request *req, |
1734 | bool reserved) | |
1735 | { | |
1736 | if (reserved) | |
1737 | return BLK_EH_RESET_TIMER; | |
1738 | return scsi_times_out(req); | |
1739 | } | |
1740 | ||
e7008ff5 BVA |
1741 | static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, |
1742 | unsigned int hctx_idx, unsigned int numa_node) | |
d285203c | 1743 | { |
d6296d39 | 1744 | struct Scsi_Host *shost = set->driver_data; |
d285203c | 1745 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
08f78436 | 1746 | struct scatterlist *sg; |
65ca846a | 1747 | int ret = 0; |
d285203c | 1748 | |
aaff5eba CH |
1749 | cmd->sense_buffer = |
1750 | kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node); | |
d285203c CH |
1751 | if (!cmd->sense_buffer) |
1752 | return -ENOMEM; | |
82ed4db4 | 1753 | cmd->req.sense = cmd->sense_buffer; |
08f78436 BVA |
1754 | |
1755 | if (scsi_host_get_prot(shost)) { | |
1756 | sg = (void *)cmd + sizeof(struct scsi_cmnd) + | |
1757 | shost->hostt->cmd_size; | |
3dccdf53 | 1758 | cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost); |
08f78436 BVA |
1759 | } |
1760 | ||
65ca846a BVA |
1761 | if (shost->hostt->init_cmd_priv) { |
1762 | ret = shost->hostt->init_cmd_priv(shost, cmd); | |
1763 | if (ret < 0) | |
aaff5eba | 1764 | kmem_cache_free(scsi_sense_cache, cmd->sense_buffer); |
65ca846a BVA |
1765 | } |
1766 | ||
1767 | return ret; | |
d285203c CH |
1768 | } |
1769 | ||
e7008ff5 BVA |
1770 | static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, |
1771 | unsigned int hctx_idx) | |
d285203c | 1772 | { |
65ca846a | 1773 | struct Scsi_Host *shost = set->driver_data; |
d285203c CH |
1774 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
1775 | ||
65ca846a BVA |
1776 | if (shost->hostt->exit_cmd_priv) |
1777 | shost->hostt->exit_cmd_priv(shost, cmd); | |
aaff5eba | 1778 | kmem_cache_free(scsi_sense_cache, cmd->sense_buffer); |
d285203c CH |
1779 | } |
1780 | ||
af183095 KD |
1781 | |
1782 | static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx) | |
1783 | { | |
4309ea74 | 1784 | struct Scsi_Host *shost = hctx->driver_data; |
af183095 KD |
1785 | |
1786 | if (shost->hostt->mq_poll) | |
1787 | return shost->hostt->mq_poll(shost, hctx->queue_num); | |
1788 | ||
1789 | return 0; | |
1790 | } | |
1791 | ||
4309ea74 KD |
1792 | static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
1793 | unsigned int hctx_idx) | |
1794 | { | |
1795 | struct Scsi_Host *shost = data; | |
1796 | ||
1797 | hctx->driver_data = shost; | |
1798 | return 0; | |
1799 | } | |
1800 | ||
2d9c5c20 CH |
1801 | static int scsi_map_queues(struct blk_mq_tag_set *set) |
1802 | { | |
1803 | struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); | |
1804 | ||
1805 | if (shost->hostt->map_queues) | |
1806 | return shost->hostt->map_queues(shost); | |
99bbf484 | 1807 | return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); |
2d9c5c20 CH |
1808 | } |
1809 | ||
d48777a6 | 1810 | void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) |
1da177e4 | 1811 | { |
6f381fa3 | 1812 | struct device *dev = shost->dma_dev; |
1da177e4 | 1813 | |
a8474ce2 JA |
1814 | /* |
1815 | * this limit is imposed by hardware restrictions | |
1816 | */ | |
8a78362c | 1817 | blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, |
65e8617f | 1818 | SG_MAX_SEGMENTS)); |
a8474ce2 | 1819 | |
13f05c8d MP |
1820 | if (scsi_host_prot_dma(shost)) { |
1821 | shost->sg_prot_tablesize = | |
1822 | min_not_zero(shost->sg_prot_tablesize, | |
1823 | (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); | |
1824 | BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); | |
1825 | blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); | |
1826 | } | |
1827 | ||
1b5d9a6e CH |
1828 | if (dev->dma_mask) { |
1829 | shost->max_sectors = min_t(unsigned int, shost->max_sectors, | |
1830 | dma_max_mapping_size(dev) >> SECTOR_SHIFT); | |
1831 | } | |
086fa5ff | 1832 | blk_queue_max_hw_sectors(q, shost->max_sectors); |
1da177e4 | 1833 | blk_queue_segment_boundary(q, shost->dma_boundary); |
99c84dbd | 1834 | dma_set_seg_boundary(dev, shost->dma_boundary); |
1da177e4 | 1835 | |
a8cf59a6 | 1836 | blk_queue_max_segment_size(q, shost->max_segment_size); |
7ad388d8 CH |
1837 | blk_queue_virt_boundary(q, shost->virt_boundary_mask); |
1838 | dma_set_max_seg_size(dev, queue_max_segment_size(q)); | |
465ff318 JB |
1839 | |
1840 | /* | |
90addc6b HC |
1841 | * Set a reasonable default alignment: The larger of 32-byte (dword), |
1842 | * which is a common minimum for HBAs, and the minimum DMA alignment, | |
1843 | * which is set by the platform. | |
1844 | * | |
1845 | * Devices that require a bigger alignment can increase it later. | |
465ff318 | 1846 | */ |
90addc6b | 1847 | blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); |
d285203c | 1848 | } |
d48777a6 | 1849 | EXPORT_SYMBOL_GPL(__scsi_init_queue); |
465ff318 | 1850 | |
8930a6c2 PB |
1851 | static const struct blk_mq_ops scsi_mq_ops_no_commit = { |
1852 | .get_budget = scsi_mq_get_budget, | |
1853 | .put_budget = scsi_mq_put_budget, | |
1854 | .queue_rq = scsi_queue_rq, | |
0d2810cd | 1855 | .complete = scsi_complete, |
8930a6c2 PB |
1856 | .timeout = scsi_timeout, |
1857 | #ifdef CONFIG_BLK_DEBUG_FS | |
1858 | .show_rq = scsi_show_rq, | |
1859 | #endif | |
1860 | .init_request = scsi_mq_init_request, | |
1861 | .exit_request = scsi_mq_exit_request, | |
1862 | .initialize_rq_fn = scsi_initialize_rq, | |
82a9ac71 | 1863 | .cleanup_rq = scsi_cleanup_rq, |
8930a6c2 PB |
1864 | .busy = scsi_mq_lld_busy, |
1865 | .map_queues = scsi_map_queues, | |
4309ea74 | 1866 | .init_hctx = scsi_init_hctx, |
af183095 | 1867 | .poll = scsi_mq_poll, |
d022d18c ML |
1868 | .set_rq_budget_token = scsi_mq_set_rq_budget_token, |
1869 | .get_rq_budget_token = scsi_mq_get_rq_budget_token, | |
8930a6c2 PB |
1870 | }; |
1871 | ||
1872 | ||
1873 | static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) | |
1874 | { | |
4309ea74 | 1875 | struct Scsi_Host *shost = hctx->driver_data; |
8930a6c2 PB |
1876 | |
1877 | shost->hostt->commit_rqs(shost, hctx->queue_num); | |
1878 | } | |
1879 | ||
f363b089 | 1880 | static const struct blk_mq_ops scsi_mq_ops = { |
0df21c86 ML |
1881 | .get_budget = scsi_mq_get_budget, |
1882 | .put_budget = scsi_mq_put_budget, | |
d285203c | 1883 | .queue_rq = scsi_queue_rq, |
8930a6c2 | 1884 | .commit_rqs = scsi_commit_rqs, |
0d2810cd | 1885 | .complete = scsi_complete, |
0152fb6b | 1886 | .timeout = scsi_timeout, |
0eebd005 BVA |
1887 | #ifdef CONFIG_BLK_DEBUG_FS |
1888 | .show_rq = scsi_show_rq, | |
1889 | #endif | |
e7008ff5 BVA |
1890 | .init_request = scsi_mq_init_request, |
1891 | .exit_request = scsi_mq_exit_request, | |
ca18d6f7 | 1892 | .initialize_rq_fn = scsi_initialize_rq, |
b7e9e1fb | 1893 | .cleanup_rq = scsi_cleanup_rq, |
3a7ea2c4 | 1894 | .busy = scsi_mq_lld_busy, |
2d9c5c20 | 1895 | .map_queues = scsi_map_queues, |
4309ea74 | 1896 | .init_hctx = scsi_init_hctx, |
af183095 | 1897 | .poll = scsi_mq_poll, |
d022d18c ML |
1898 | .set_rq_budget_token = scsi_mq_set_rq_budget_token, |
1899 | .get_rq_budget_token = scsi_mq_get_rq_budget_token, | |
d285203c CH |
1900 | }; |
1901 | ||
1902 | struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) | |
1903 | { | |
1904 | sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); | |
1905 | if (IS_ERR(sdev->request_queue)) | |
1906 | return NULL; | |
1907 | ||
1908 | sdev->request_queue->queuedata = sdev; | |
1909 | __scsi_init_queue(sdev->host, sdev->request_queue); | |
17cb960f | 1910 | blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue); |
d285203c CH |
1911 | return sdev->request_queue; |
1912 | } | |
1913 | ||
1914 | int scsi_mq_setup_tags(struct Scsi_Host *shost) | |
1915 | { | |
be4c186c | 1916 | unsigned int cmd_size, sgl_size; |
840e1b55 | 1917 | struct blk_mq_tag_set *tag_set = &shost->tag_set; |
d285203c | 1918 | |
9393c8de MS |
1919 | sgl_size = max_t(unsigned int, sizeof(struct scatterlist), |
1920 | scsi_mq_inline_sgl_size(shost)); | |
d285203c CH |
1921 | cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; |
1922 | if (scsi_host_get_prot(shost)) | |
92524fa1 ML |
1923 | cmd_size += sizeof(struct scsi_data_buffer) + |
1924 | sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT; | |
d285203c | 1925 | |
840e1b55 | 1926 | memset(tag_set, 0, sizeof(*tag_set)); |
8930a6c2 | 1927 | if (shost->hostt->commit_rqs) |
840e1b55 | 1928 | tag_set->ops = &scsi_mq_ops; |
8930a6c2 | 1929 | else |
840e1b55 YB |
1930 | tag_set->ops = &scsi_mq_ops_no_commit; |
1931 | tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; | |
af183095 | 1932 | tag_set->nr_maps = shost->nr_maps ? : 1; |
840e1b55 YB |
1933 | tag_set->queue_depth = shost->can_queue; |
1934 | tag_set->cmd_size = cmd_size; | |
1935 | tag_set->numa_node = NUMA_NO_NODE; | |
1936 | tag_set->flags = BLK_MQ_F_SHOULD_MERGE; | |
1937 | tag_set->flags |= | |
24391c0d | 1938 | BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); |
840e1b55 | 1939 | tag_set->driver_data = shost; |
bdb01301 HR |
1940 | if (shost->host_tagset) |
1941 | tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; | |
d285203c | 1942 | |
840e1b55 | 1943 | return blk_mq_alloc_tag_set(tag_set); |
d285203c CH |
1944 | } |
1945 | ||
1946 | void scsi_mq_destroy_tags(struct Scsi_Host *shost) | |
1947 | { | |
1948 | blk_mq_free_tag_set(&shost->tag_set); | |
1949 | } | |
1950 | ||
857de6e0 HR |
1951 | /** |
1952 | * scsi_device_from_queue - return sdev associated with a request_queue | |
1953 | * @q: The request queue to return the sdev from | |
1954 | * | |
1955 | * Return the sdev associated with a request queue or NULL if the | |
1956 | * request_queue does not reference a SCSI device. | |
1957 | */ | |
1958 | struct scsi_device *scsi_device_from_queue(struct request_queue *q) | |
1959 | { | |
1960 | struct scsi_device *sdev = NULL; | |
1961 | ||
6b6fa7a5 SM |
1962 | if (q->mq_ops == &scsi_mq_ops_no_commit || |
1963 | q->mq_ops == &scsi_mq_ops) | |
857de6e0 HR |
1964 | sdev = q->queuedata; |
1965 | if (!sdev || !get_device(&sdev->sdev_gendev)) | |
1966 | sdev = NULL; | |
1967 | ||
1968 | return sdev; | |
1969 | } | |
857de6e0 | 1970 | |
ea941016 AA |
1971 | /** |
1972 | * scsi_block_requests - Utility function used by low-level drivers to prevent | |
1973 | * further commands from being queued to the device. | |
1974 | * @shost: host in question | |
1da177e4 | 1975 | * |
ea941016 AA |
1976 | * There is no timer nor any other means by which the requests get unblocked |
1977 | * other than the low-level driver calling scsi_unblock_requests(). | |
1da177e4 LT |
1978 | */ |
1979 | void scsi_block_requests(struct Scsi_Host *shost) | |
1980 | { | |
1981 | shost->host_self_blocked = 1; | |
1982 | } | |
1983 | EXPORT_SYMBOL(scsi_block_requests); | |
1984 | ||
ea941016 AA |
1985 | /** |
1986 | * scsi_unblock_requests - Utility function used by low-level drivers to allow | |
1987 | * further commands to be queued to the device. | |
1988 | * @shost: host in question | |
1da177e4 | 1989 | * |
ea941016 AA |
1990 | * There is no timer nor any other means by which the requests get unblocked |
1991 | * other than the low-level driver calling scsi_unblock_requests(). This is done | |
1992 | * as an API function so that changes to the internals of the scsi mid-layer | |
1993 | * won't require wholesale changes to drivers that use this feature. | |
1da177e4 LT |
1994 | */ |
1995 | void scsi_unblock_requests(struct Scsi_Host *shost) | |
1996 | { | |
1997 | shost->host_self_blocked = 0; | |
1998 | scsi_run_host_queues(shost); | |
1999 | } | |
2000 | EXPORT_SYMBOL(scsi_unblock_requests); | |
2001 | ||
1da177e4 LT |
2002 | void scsi_exit_queue(void) |
2003 | { | |
0a6ac4ee | 2004 | kmem_cache_destroy(scsi_sense_cache); |
1da177e4 | 2005 | } |
5baba830 JB |
2006 | |
2007 | /** | |
2008 | * scsi_mode_select - issue a mode select | |
2009 | * @sdev: SCSI device to be queried | |
2010 | * @pf: Page format bit (1 == standard, 0 == vendor specific) | |
2011 | * @sp: Save page bit (0 == don't save, 1 == save) | |
2012 | * @modepage: mode page being requested | |
2013 | * @buffer: request buffer (may not be smaller than eight bytes) | |
2014 | * @len: length of request buffer. | |
2015 | * @timeout: command timeout | |
2016 | * @retries: number of retries before failing | |
2017 | * @data: returns a structure abstracting the mode header data | |
eb44820c | 2018 | * @sshdr: place to put sense data (or NULL if no sense to be collected). |
5baba830 JB |
2019 | * must be SCSI_SENSE_BUFFERSIZE big. |
2020 | * | |
2021 | * Returns zero if successful; negative error number or scsi | |
2022 | * status on error | |
2023 | * | |
2024 | */ | |
2025 | int | |
2026 | scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, | |
2027 | unsigned char *buffer, int len, int timeout, int retries, | |
2028 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) | |
2029 | { | |
2030 | unsigned char cmd[10]; | |
2031 | unsigned char *real_buffer; | |
2032 | int ret; | |
2033 | ||
2034 | memset(cmd, 0, sizeof(cmd)); | |
2035 | cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); | |
2036 | ||
2037 | if (sdev->use_10_for_ms) { | |
2038 | if (len > 65535) | |
2039 | return -EINVAL; | |
2040 | real_buffer = kmalloc(8 + len, GFP_KERNEL); | |
2041 | if (!real_buffer) | |
2042 | return -ENOMEM; | |
2043 | memcpy(real_buffer + 8, buffer, len); | |
2044 | len += 8; | |
2045 | real_buffer[0] = 0; | |
2046 | real_buffer[1] = 0; | |
2047 | real_buffer[2] = data->medium_type; | |
2048 | real_buffer[3] = data->device_specific; | |
2049 | real_buffer[4] = data->longlba ? 0x01 : 0; | |
2050 | real_buffer[5] = 0; | |
2051 | real_buffer[6] = data->block_descriptor_length >> 8; | |
2052 | real_buffer[7] = data->block_descriptor_length; | |
2053 | ||
2054 | cmd[0] = MODE_SELECT_10; | |
2055 | cmd[7] = len >> 8; | |
2056 | cmd[8] = len; | |
2057 | } else { | |
2058 | if (len > 255 || data->block_descriptor_length > 255 || | |
2059 | data->longlba) | |
2060 | return -EINVAL; | |
2061 | ||
2062 | real_buffer = kmalloc(4 + len, GFP_KERNEL); | |
2063 | if (!real_buffer) | |
2064 | return -ENOMEM; | |
2065 | memcpy(real_buffer + 4, buffer, len); | |
2066 | len += 4; | |
2067 | real_buffer[0] = 0; | |
2068 | real_buffer[1] = data->medium_type; | |
2069 | real_buffer[2] = data->device_specific; | |
2070 | real_buffer[3] = data->block_descriptor_length; | |
5baba830 JB |
2071 | |
2072 | cmd[0] = MODE_SELECT; | |
2073 | cmd[4] = len; | |
2074 | } | |
2075 | ||
2076 | ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, | |
f4f4e47e | 2077 | sshdr, timeout, retries, NULL); |
5baba830 JB |
2078 | kfree(real_buffer); |
2079 | return ret; | |
2080 | } | |
2081 | EXPORT_SYMBOL_GPL(scsi_mode_select); | |
2082 | ||
1da177e4 | 2083 | /** |
eb44820c | 2084 | * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. |
1cf72699 | 2085 | * @sdev: SCSI device to be queried |
1da177e4 LT |
2086 | * @dbd: set if mode sense will allow block descriptors to be returned |
2087 | * @modepage: mode page being requested | |
2088 | * @buffer: request buffer (may not be smaller than eight bytes) | |
2089 | * @len: length of request buffer. | |
2090 | * @timeout: command timeout | |
2091 | * @retries: number of retries before failing | |
2092 | * @data: returns a structure abstracting the mode header data | |
eb44820c | 2093 | * @sshdr: place to put sense data (or NULL if no sense to be collected). |
1cf72699 | 2094 | * must be SCSI_SENSE_BUFFERSIZE big. |
1da177e4 LT |
2095 | * |
2096 | * Returns zero if unsuccessful, or the header offset (either 4 | |
2097 | * or 8 depending on whether a six or ten byte command was | |
2098 | * issued) if successful. | |
eb44820c | 2099 | */ |
1da177e4 | 2100 | int |
1cf72699 | 2101 | scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, |
1da177e4 | 2102 | unsigned char *buffer, int len, int timeout, int retries, |
5baba830 JB |
2103 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) |
2104 | { | |
1da177e4 LT |
2105 | unsigned char cmd[12]; |
2106 | int use_10_for_ms; | |
2107 | int header_length; | |
0ae80ba9 | 2108 | int result, retry_count = retries; |
ea73a9f2 | 2109 | struct scsi_sense_hdr my_sshdr; |
1da177e4 LT |
2110 | |
2111 | memset(data, 0, sizeof(*data)); | |
2112 | memset(&cmd[0], 0, 12); | |
0ec96913 CG |
2113 | |
2114 | dbd = sdev->set_dbd_for_ms ? 8 : dbd; | |
1da177e4 LT |
2115 | cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ |
2116 | cmd[2] = modepage; | |
2117 | ||
ea73a9f2 JB |
2118 | /* caller might not be interested in sense, but we need it */ |
2119 | if (!sshdr) | |
2120 | sshdr = &my_sshdr; | |
2121 | ||
1da177e4 | 2122 | retry: |
1cf72699 | 2123 | use_10_for_ms = sdev->use_10_for_ms; |
1da177e4 LT |
2124 | |
2125 | if (use_10_for_ms) { | |
2126 | if (len < 8) | |
2127 | len = 8; | |
2128 | ||
2129 | cmd[0] = MODE_SENSE_10; | |
2130 | cmd[8] = len; | |
2131 | header_length = 8; | |
2132 | } else { | |
2133 | if (len < 4) | |
2134 | len = 4; | |
2135 | ||
2136 | cmd[0] = MODE_SENSE; | |
2137 | cmd[4] = len; | |
2138 | header_length = 4; | |
2139 | } | |
2140 | ||
1da177e4 LT |
2141 | memset(buffer, 0, len); |
2142 | ||
1cf72699 | 2143 | result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, |
f4f4e47e | 2144 | sshdr, timeout, retries, NULL); |
1da177e4 LT |
2145 | |
2146 | /* This code looks awful: what it's doing is making sure an | |
2147 | * ILLEGAL REQUEST sense return identifies the actual command | |
2148 | * byte as the problem. MODE_SENSE commands can return | |
2149 | * ILLEGAL REQUEST if the code page isn't supported */ | |
2150 | ||
1cf72699 | 2151 | if (use_10_for_ms && !scsi_status_is_good(result) && |
c65be1a6 | 2152 | driver_byte(result) == DRIVER_SENSE) { |
ea73a9f2 JB |
2153 | if (scsi_sense_valid(sshdr)) { |
2154 | if ((sshdr->sense_key == ILLEGAL_REQUEST) && | |
2155 | (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { | |
4c7b4d63 | 2156 | /* |
1da177e4 LT |
2157 | * Invalid command operation code |
2158 | */ | |
1cf72699 | 2159 | sdev->use_10_for_ms = 0; |
1da177e4 LT |
2160 | goto retry; |
2161 | } | |
2162 | } | |
2163 | } | |
2164 | ||
4c7b4d63 | 2165 | if (scsi_status_is_good(result)) { |
6d73c851 AV |
2166 | if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && |
2167 | (modepage == 6 || modepage == 8))) { | |
2168 | /* Initio breakage? */ | |
2169 | header_length = 0; | |
2170 | data->length = 13; | |
2171 | data->medium_type = 0; | |
2172 | data->device_specific = 0; | |
2173 | data->longlba = 0; | |
2174 | data->block_descriptor_length = 0; | |
4c7b4d63 | 2175 | } else if (use_10_for_ms) { |
1da177e4 LT |
2176 | data->length = buffer[0]*256 + buffer[1] + 2; |
2177 | data->medium_type = buffer[2]; | |
2178 | data->device_specific = buffer[3]; | |
2179 | data->longlba = buffer[4] & 0x01; | |
2180 | data->block_descriptor_length = buffer[6]*256 | |
2181 | + buffer[7]; | |
2182 | } else { | |
2183 | data->length = buffer[0] + 1; | |
2184 | data->medium_type = buffer[1]; | |
2185 | data->device_specific = buffer[2]; | |
2186 | data->block_descriptor_length = buffer[3]; | |
2187 | } | |
6d73c851 | 2188 | data->header_length = header_length; |
0ae80ba9 HR |
2189 | } else if ((status_byte(result) == CHECK_CONDITION) && |
2190 | scsi_sense_valid(sshdr) && | |
2191 | sshdr->sense_key == UNIT_ATTENTION && retry_count) { | |
2192 | retry_count--; | |
2193 | goto retry; | |
1da177e4 LT |
2194 | } |
2195 | ||
1cf72699 | 2196 | return result; |
1da177e4 LT |
2197 | } |
2198 | EXPORT_SYMBOL(scsi_mode_sense); | |
2199 | ||
001aac25 JB |
2200 | /** |
2201 | * scsi_test_unit_ready - test if unit is ready | |
2202 | * @sdev: scsi device to change the state of. | |
2203 | * @timeout: command timeout | |
2204 | * @retries: number of retries before failing | |
74a78ebd | 2205 | * @sshdr: outpout pointer for decoded sense information. |
001aac25 JB |
2206 | * |
2207 | * Returns zero if unsuccessful or an error if TUR failed. For | |
9f8a2c23 | 2208 | * removable media, UNIT_ATTENTION sets ->changed flag. |
001aac25 | 2209 | **/ |
1da177e4 | 2210 | int |
001aac25 | 2211 | scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, |
74a78ebd | 2212 | struct scsi_sense_hdr *sshdr) |
1da177e4 | 2213 | { |
1da177e4 LT |
2214 | char cmd[] = { |
2215 | TEST_UNIT_READY, 0, 0, 0, 0, 0, | |
2216 | }; | |
2217 | int result; | |
001aac25 | 2218 | |
001aac25 JB |
2219 | /* try to eat the UNIT_ATTENTION if there are enough retries */ |
2220 | do { | |
2221 | result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, | |
9b91fd34 | 2222 | timeout, 1, NULL); |
32c356d7 JB |
2223 | if (sdev->removable && scsi_sense_valid(sshdr) && |
2224 | sshdr->sense_key == UNIT_ATTENTION) | |
2225 | sdev->changed = 1; | |
2226 | } while (scsi_sense_valid(sshdr) && | |
2227 | sshdr->sense_key == UNIT_ATTENTION && --retries); | |
001aac25 | 2228 | |
1da177e4 LT |
2229 | return result; |
2230 | } | |
2231 | EXPORT_SYMBOL(scsi_test_unit_ready); | |
2232 | ||
2233 | /** | |
eb44820c | 2234 | * scsi_device_set_state - Take the given device through the device state model. |
1da177e4 LT |
2235 | * @sdev: scsi device to change the state of. |
2236 | * @state: state to change to. | |
2237 | * | |
23cb27fd | 2238 | * Returns zero if successful or an error if the requested |
1da177e4 | 2239 | * transition is illegal. |
eb44820c | 2240 | */ |
1da177e4 LT |
2241 | int |
2242 | scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | |
2243 | { | |
2244 | enum scsi_device_state oldstate = sdev->sdev_state; | |
2245 | ||
2246 | if (state == oldstate) | |
2247 | return 0; | |
2248 | ||
2249 | switch (state) { | |
2250 | case SDEV_CREATED: | |
6f4267e3 JB |
2251 | switch (oldstate) { |
2252 | case SDEV_CREATED_BLOCK: | |
2253 | break; | |
2254 | default: | |
2255 | goto illegal; | |
2256 | } | |
2257 | break; | |
4c7b4d63 | 2258 | |
1da177e4 LT |
2259 | case SDEV_RUNNING: |
2260 | switch (oldstate) { | |
2261 | case SDEV_CREATED: | |
2262 | case SDEV_OFFLINE: | |
1b8d2620 | 2263 | case SDEV_TRANSPORT_OFFLINE: |
1da177e4 LT |
2264 | case SDEV_QUIESCE: |
2265 | case SDEV_BLOCK: | |
2266 | break; | |
2267 | default: | |
2268 | goto illegal; | |
2269 | } | |
2270 | break; | |
2271 | ||
2272 | case SDEV_QUIESCE: | |
2273 | switch (oldstate) { | |
2274 | case SDEV_RUNNING: | |
2275 | case SDEV_OFFLINE: | |
1b8d2620 | 2276 | case SDEV_TRANSPORT_OFFLINE: |
1da177e4 LT |
2277 | break; |
2278 | default: | |
2279 | goto illegal; | |
2280 | } | |
2281 | break; | |
2282 | ||
2283 | case SDEV_OFFLINE: | |
1b8d2620 | 2284 | case SDEV_TRANSPORT_OFFLINE: |
1da177e4 LT |
2285 | switch (oldstate) { |
2286 | case SDEV_CREATED: | |
2287 | case SDEV_RUNNING: | |
2288 | case SDEV_QUIESCE: | |
2289 | case SDEV_BLOCK: | |
2290 | break; | |
2291 | default: | |
2292 | goto illegal; | |
2293 | } | |
2294 | break; | |
2295 | ||
2296 | case SDEV_BLOCK: | |
2297 | switch (oldstate) { | |
1da177e4 | 2298 | case SDEV_RUNNING: |
6f4267e3 | 2299 | case SDEV_CREATED_BLOCK: |
6cbb7aed | 2300 | case SDEV_QUIESCE: |
a33e5bfb | 2301 | case SDEV_OFFLINE: |
6f4267e3 JB |
2302 | break; |
2303 | default: | |
2304 | goto illegal; | |
2305 | } | |
2306 | break; | |
2307 | ||
2308 | case SDEV_CREATED_BLOCK: | |
2309 | switch (oldstate) { | |
2310 | case SDEV_CREATED: | |
1da177e4 LT |
2311 | break; |
2312 | default: | |
2313 | goto illegal; | |
2314 | } | |
2315 | break; | |
2316 | ||
2317 | case SDEV_CANCEL: | |
2318 | switch (oldstate) { | |
2319 | case SDEV_CREATED: | |
2320 | case SDEV_RUNNING: | |
9ea72909 | 2321 | case SDEV_QUIESCE: |
1da177e4 | 2322 | case SDEV_OFFLINE: |
1b8d2620 | 2323 | case SDEV_TRANSPORT_OFFLINE: |
1da177e4 LT |
2324 | break; |
2325 | default: | |
2326 | goto illegal; | |
2327 | } | |
2328 | break; | |
2329 | ||
2330 | case SDEV_DEL: | |
2331 | switch (oldstate) { | |
309bd271 BK |
2332 | case SDEV_CREATED: |
2333 | case SDEV_RUNNING: | |
2334 | case SDEV_OFFLINE: | |
1b8d2620 | 2335 | case SDEV_TRANSPORT_OFFLINE: |
1da177e4 | 2336 | case SDEV_CANCEL: |
255ee932 | 2337 | case SDEV_BLOCK: |
0516c08d | 2338 | case SDEV_CREATED_BLOCK: |
1da177e4 LT |
2339 | break; |
2340 | default: | |
2341 | goto illegal; | |
2342 | } | |
2343 | break; | |
2344 | ||
2345 | } | |
b0962c53 | 2346 | sdev->offline_already = false; |
1da177e4 LT |
2347 | sdev->sdev_state = state; |
2348 | return 0; | |
2349 | ||
2350 | illegal: | |
91921e01 | 2351 | SCSI_LOG_ERROR_RECOVERY(1, |
9ccfc756 | 2352 | sdev_printk(KERN_ERR, sdev, |
91921e01 | 2353 | "Illegal state transition %s->%s", |
9ccfc756 JB |
2354 | scsi_device_state_name(oldstate), |
2355 | scsi_device_state_name(state)) | |
1da177e4 LT |
2356 | ); |
2357 | return -EINVAL; | |
2358 | } | |
2359 | EXPORT_SYMBOL(scsi_device_set_state); | |
2360 | ||
a341cd0f | 2361 | /** |
ae6b4e69 | 2362 | * scsi_evt_emit - emit a single SCSI device uevent |
a341cd0f JG |
2363 | * @sdev: associated SCSI device |
2364 | * @evt: event to emit | |
2365 | * | |
2366 | * Send a single uevent (scsi_event) to the associated scsi_device. | |
2367 | */ | |
2368 | static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) | |
2369 | { | |
2370 | int idx = 0; | |
2371 | char *envp[3]; | |
2372 | ||
2373 | switch (evt->evt_type) { | |
2374 | case SDEV_EVT_MEDIA_CHANGE: | |
2375 | envp[idx++] = "SDEV_MEDIA_CHANGE=1"; | |
2376 | break; | |
279afdfe | 2377 | case SDEV_EVT_INQUIRY_CHANGE_REPORTED: |
d3d32891 | 2378 | scsi_rescan_device(&sdev->sdev_gendev); |
279afdfe EM |
2379 | envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; |
2380 | break; | |
2381 | case SDEV_EVT_CAPACITY_CHANGE_REPORTED: | |
2382 | envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; | |
2383 | break; | |
2384 | case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: | |
2385 | envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; | |
2386 | break; | |
2387 | case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: | |
2388 | envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; | |
2389 | break; | |
2390 | case SDEV_EVT_LUN_CHANGE_REPORTED: | |
2391 | envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; | |
2392 | break; | |
14c3e677 HR |
2393 | case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: |
2394 | envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED"; | |
2395 | break; | |
cf3431bb HR |
2396 | case SDEV_EVT_POWER_ON_RESET_OCCURRED: |
2397 | envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED"; | |
2398 | break; | |
a341cd0f JG |
2399 | default: |
2400 | /* do nothing */ | |
2401 | break; | |
2402 | } | |
2403 | ||
2404 | envp[idx++] = NULL; | |
2405 | ||
2406 | kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); | |
2407 | } | |
2408 | ||
2409 | /** | |
ae6b4e69 | 2410 | * scsi_evt_thread - send a uevent for each scsi event |
a341cd0f JG |
2411 | * @work: work struct for scsi_device |
2412 | * | |
2413 | * Dispatch queued events to their associated scsi_device kobjects | |
2414 | * as uevents. | |
2415 | */ | |
2416 | void scsi_evt_thread(struct work_struct *work) | |
2417 | { | |
2418 | struct scsi_device *sdev; | |
279afdfe | 2419 | enum scsi_device_event evt_type; |
a341cd0f JG |
2420 | LIST_HEAD(event_list); |
2421 | ||
2422 | sdev = container_of(work, struct scsi_device, event_work); | |
2423 | ||
279afdfe EM |
2424 | for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) |
2425 | if (test_and_clear_bit(evt_type, sdev->pending_events)) | |
2426 | sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); | |
2427 | ||
a341cd0f JG |
2428 | while (1) { |
2429 | struct scsi_event *evt; | |
2430 | struct list_head *this, *tmp; | |
2431 | unsigned long flags; | |
2432 | ||
2433 | spin_lock_irqsave(&sdev->list_lock, flags); | |
2434 | list_splice_init(&sdev->event_list, &event_list); | |
2435 | spin_unlock_irqrestore(&sdev->list_lock, flags); | |
2436 | ||
2437 | if (list_empty(&event_list)) | |
2438 | break; | |
2439 | ||
2440 | list_for_each_safe(this, tmp, &event_list) { | |
2441 | evt = list_entry(this, struct scsi_event, node); | |
2442 | list_del(&evt->node); | |
2443 | scsi_evt_emit(sdev, evt); | |
2444 | kfree(evt); | |
2445 | } | |
2446 | } | |
2447 | } | |
2448 | ||
2449 | /** | |
2450 | * sdev_evt_send - send asserted event to uevent thread | |
2451 | * @sdev: scsi_device event occurred on | |
2452 | * @evt: event to send | |
2453 | * | |
2454 | * Assert scsi device event asynchronously. | |
2455 | */ | |
2456 | void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) | |
2457 | { | |
2458 | unsigned long flags; | |
2459 | ||
4d1566ed KS |
2460 | #if 0 |
2461 | /* FIXME: currently this check eliminates all media change events | |
2462 | * for polled devices. Need to update to discriminate between AN | |
2463 | * and polled events */ | |
a341cd0f JG |
2464 | if (!test_bit(evt->evt_type, sdev->supported_events)) { |
2465 | kfree(evt); | |
2466 | return; | |
2467 | } | |
4d1566ed | 2468 | #endif |
a341cd0f JG |
2469 | |
2470 | spin_lock_irqsave(&sdev->list_lock, flags); | |
2471 | list_add_tail(&evt->node, &sdev->event_list); | |
2472 | schedule_work(&sdev->event_work); | |
2473 | spin_unlock_irqrestore(&sdev->list_lock, flags); | |
2474 | } | |
2475 | EXPORT_SYMBOL_GPL(sdev_evt_send); | |
2476 | ||
2477 | /** | |
2478 | * sdev_evt_alloc - allocate a new scsi event | |
2479 | * @evt_type: type of event to allocate | |
2480 | * @gfpflags: GFP flags for allocation | |
2481 | * | |
2482 | * Allocates and returns a new scsi_event. | |
2483 | */ | |
2484 | struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, | |
2485 | gfp_t gfpflags) | |
2486 | { | |
2487 | struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); | |
2488 | if (!evt) | |
2489 | return NULL; | |
2490 | ||
2491 | evt->evt_type = evt_type; | |
2492 | INIT_LIST_HEAD(&evt->node); | |
2493 | ||
2494 | /* evt_type-specific initialization, if any */ | |
2495 | switch (evt_type) { | |
2496 | case SDEV_EVT_MEDIA_CHANGE: | |
279afdfe EM |
2497 | case SDEV_EVT_INQUIRY_CHANGE_REPORTED: |
2498 | case SDEV_EVT_CAPACITY_CHANGE_REPORTED: | |
2499 | case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: | |
2500 | case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: | |
2501 | case SDEV_EVT_LUN_CHANGE_REPORTED: | |
14c3e677 | 2502 | case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: |
cf3431bb | 2503 | case SDEV_EVT_POWER_ON_RESET_OCCURRED: |
a341cd0f JG |
2504 | default: |
2505 | /* do nothing */ | |
2506 | break; | |
2507 | } | |
2508 | ||
2509 | return evt; | |
2510 | } | |
2511 | EXPORT_SYMBOL_GPL(sdev_evt_alloc); | |
2512 | ||
2513 | /** | |
2514 | * sdev_evt_send_simple - send asserted event to uevent thread | |
2515 | * @sdev: scsi_device event occurred on | |
2516 | * @evt_type: type of event to send | |
2517 | * @gfpflags: GFP flags for allocation | |
2518 | * | |
2519 | * Assert scsi device event asynchronously, given an event type. | |
2520 | */ | |
2521 | void sdev_evt_send_simple(struct scsi_device *sdev, | |
2522 | enum scsi_device_event evt_type, gfp_t gfpflags) | |
2523 | { | |
2524 | struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); | |
2525 | if (!evt) { | |
2526 | sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", | |
2527 | evt_type); | |
2528 | return; | |
2529 | } | |
2530 | ||
2531 | sdev_evt_send(sdev, evt); | |
2532 | } | |
2533 | EXPORT_SYMBOL_GPL(sdev_evt_send_simple); | |
2534 | ||
1da177e4 | 2535 | /** |
e6044f71 | 2536 | * scsi_device_quiesce - Block all commands except power management. |
1da177e4 LT |
2537 | * @sdev: scsi device to quiesce. |
2538 | * | |
2539 | * This works by trying to transition to the SDEV_QUIESCE state | |
2540 | * (which must be a legal transition). When the device is in this | |
e6044f71 BVA |
2541 | * state, only power management requests will be accepted, all others will |
2542 | * be deferred. | |
1da177e4 LT |
2543 | * |
2544 | * Must be called with user context, may sleep. | |
2545 | * | |
2546 | * Returns zero if unsuccessful or an error if not. | |
eb44820c | 2547 | */ |
1da177e4 LT |
2548 | int |
2549 | scsi_device_quiesce(struct scsi_device *sdev) | |
2550 | { | |
3a0a5299 | 2551 | struct request_queue *q = sdev->request_queue; |
0db6ca8a BVA |
2552 | int err; |
2553 | ||
3a0a5299 BVA |
2554 | /* |
2555 | * It is allowed to call scsi_device_quiesce() multiple times from | |
2556 | * the same context but concurrent scsi_device_quiesce() calls are | |
2557 | * not allowed. | |
2558 | */ | |
2559 | WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current); | |
2560 | ||
cd84a62e BVA |
2561 | if (sdev->quiesced_by == current) |
2562 | return 0; | |
2563 | ||
2564 | blk_set_pm_only(q); | |
3a0a5299 BVA |
2565 | |
2566 | blk_mq_freeze_queue(q); | |
2567 | /* | |
cd84a62e | 2568 | * Ensure that the effect of blk_set_pm_only() will be visible |
3a0a5299 BVA |
2569 | * for percpu_ref_tryget() callers that occur after the queue |
2570 | * unfreeze even if the queue was already frozen before this function | |
2571 | * was called. See also https://lwn.net/Articles/573497/. | |
2572 | */ | |
2573 | synchronize_rcu(); | |
2574 | blk_mq_unfreeze_queue(q); | |
2575 | ||
0db6ca8a BVA |
2576 | mutex_lock(&sdev->state_mutex); |
2577 | err = scsi_device_set_state(sdev, SDEV_QUIESCE); | |
3a0a5299 BVA |
2578 | if (err == 0) |
2579 | sdev->quiesced_by = current; | |
2580 | else | |
cd84a62e | 2581 | blk_clear_pm_only(q); |
0db6ca8a BVA |
2582 | mutex_unlock(&sdev->state_mutex); |
2583 | ||
3a0a5299 | 2584 | return err; |
1da177e4 LT |
2585 | } |
2586 | EXPORT_SYMBOL(scsi_device_quiesce); | |
2587 | ||
2588 | /** | |
2589 | * scsi_device_resume - Restart user issued commands to a quiesced device. | |
2590 | * @sdev: scsi device to resume. | |
2591 | * | |
2592 | * Moves the device from quiesced back to running and restarts the | |
2593 | * queues. | |
2594 | * | |
2595 | * Must be called with user context, may sleep. | |
eb44820c | 2596 | */ |
a7a20d10 | 2597 | void scsi_device_resume(struct scsi_device *sdev) |
1da177e4 | 2598 | { |
a7a20d10 DW |
2599 | /* check if the device state was mutated prior to resume, and if |
2600 | * so assume the state is being managed elsewhere (for example | |
2601 | * device deleted during suspend) | |
2602 | */ | |
0db6ca8a | 2603 | mutex_lock(&sdev->state_mutex); |
e6044f71 BVA |
2604 | if (sdev->sdev_state == SDEV_QUIESCE) |
2605 | scsi_device_set_state(sdev, SDEV_RUNNING); | |
17605afa BVA |
2606 | if (sdev->quiesced_by) { |
2607 | sdev->quiesced_by = NULL; | |
2608 | blk_clear_pm_only(sdev->request_queue); | |
2609 | } | |
0db6ca8a | 2610 | mutex_unlock(&sdev->state_mutex); |
1da177e4 LT |
2611 | } |
2612 | EXPORT_SYMBOL(scsi_device_resume); | |
2613 | ||
2614 | static void | |
2615 | device_quiesce_fn(struct scsi_device *sdev, void *data) | |
2616 | { | |
2617 | scsi_device_quiesce(sdev); | |
2618 | } | |
2619 | ||
2620 | void | |
2621 | scsi_target_quiesce(struct scsi_target *starget) | |
2622 | { | |
2623 | starget_for_each_device(starget, NULL, device_quiesce_fn); | |
2624 | } | |
2625 | EXPORT_SYMBOL(scsi_target_quiesce); | |
2626 | ||
2627 | static void | |
2628 | device_resume_fn(struct scsi_device *sdev, void *data) | |
2629 | { | |
2630 | scsi_device_resume(sdev); | |
2631 | } | |
2632 | ||
2633 | void | |
2634 | scsi_target_resume(struct scsi_target *starget) | |
2635 | { | |
2636 | starget_for_each_device(starget, NULL, device_resume_fn); | |
2637 | } | |
2638 | EXPORT_SYMBOL(scsi_target_resume); | |
2639 | ||
2640 | /** | |
551eb598 BVA |
2641 | * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state |
2642 | * @sdev: device to block | |
1da177e4 | 2643 | * |
551eb598 | 2644 | * Pause SCSI command processing on the specified device. Does not sleep. |
1da177e4 | 2645 | * |
551eb598 | 2646 | * Returns zero if successful or a negative error code upon failure. |
669f0441 | 2647 | * |
551eb598 BVA |
2648 | * Notes: |
2649 | * This routine transitions the device to the SDEV_BLOCK state (which must be | |
2650 | * a legal transition). When the device is in this state, command processing | |
2651 | * is paused until the device leaves the SDEV_BLOCK state. See also | |
2652 | * scsi_internal_device_unblock_nowait(). | |
eb44820c | 2653 | */ |
551eb598 | 2654 | int scsi_internal_device_block_nowait(struct scsi_device *sdev) |
1da177e4 | 2655 | { |
165125e1 | 2656 | struct request_queue *q = sdev->request_queue; |
1da177e4 LT |
2657 | int err = 0; |
2658 | ||
2659 | err = scsi_device_set_state(sdev, SDEV_BLOCK); | |
6f4267e3 JB |
2660 | if (err) { |
2661 | err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); | |
2662 | ||
2663 | if (err) | |
2664 | return err; | |
2665 | } | |
1da177e4 | 2666 | |
4c7b4d63 | 2667 | /* |
1da177e4 LT |
2668 | * The device has transitioned to SDEV_BLOCK. Stop the |
2669 | * block layer from calling the midlayer with this device's | |
4c7b4d63 | 2670 | * request queue. |
1da177e4 | 2671 | */ |
f664a3cc | 2672 | blk_mq_quiesce_queue_nowait(q); |
1da177e4 LT |
2673 | return 0; |
2674 | } | |
551eb598 BVA |
2675 | EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); |
2676 | ||
1da177e4 | 2677 | /** |
551eb598 BVA |
2678 | * scsi_internal_device_block - try to transition to the SDEV_BLOCK state |
2679 | * @sdev: device to block | |
2680 | * | |
2681 | * Pause SCSI command processing on the specified device and wait until all | |
2682 | * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep. | |
1da177e4 | 2683 | * |
551eb598 | 2684 | * Returns zero if successful or a negative error code upon failure. |
1da177e4 | 2685 | * |
551eb598 BVA |
2686 | * Note: |
2687 | * This routine transitions the device to the SDEV_BLOCK state (which must be | |
2688 | * a legal transition). When the device is in this state, command processing | |
2689 | * is paused until the device leaves the SDEV_BLOCK state. See also | |
2690 | * scsi_internal_device_unblock(). | |
eb44820c | 2691 | */ |
551eb598 | 2692 | static int scsi_internal_device_block(struct scsi_device *sdev) |
1da177e4 | 2693 | { |
551eb598 BVA |
2694 | struct request_queue *q = sdev->request_queue; |
2695 | int err; | |
2696 | ||
0db6ca8a | 2697 | mutex_lock(&sdev->state_mutex); |
551eb598 | 2698 | err = scsi_internal_device_block_nowait(sdev); |
f664a3cc JA |
2699 | if (err == 0) |
2700 | blk_mq_quiesce_queue(q); | |
0db6ca8a BVA |
2701 | mutex_unlock(&sdev->state_mutex); |
2702 | ||
551eb598 BVA |
2703 | return err; |
2704 | } | |
4c7b4d63 | 2705 | |
66483a4a BVA |
2706 | void scsi_start_queue(struct scsi_device *sdev) |
2707 | { | |
2708 | struct request_queue *q = sdev->request_queue; | |
5d9fb5cc | 2709 | |
f664a3cc | 2710 | blk_mq_unquiesce_queue(q); |
66483a4a BVA |
2711 | } |
2712 | ||
1da177e4 | 2713 | /** |
43f7571b | 2714 | * scsi_internal_device_unblock_nowait - resume a device after a block request |
1da177e4 | 2715 | * @sdev: device to resume |
43f7571b | 2716 | * @new_state: state to set the device to after unblocking |
1da177e4 | 2717 | * |
43f7571b BVA |
2718 | * Restart the device queue for a previously suspended SCSI device. Does not |
2719 | * sleep. | |
1da177e4 | 2720 | * |
43f7571b | 2721 | * Returns zero if successful or a negative error code upon failure. |
1da177e4 | 2722 | * |
43f7571b BVA |
2723 | * Notes: |
2724 | * This routine transitions the device to the SDEV_RUNNING state or to one of | |
2725 | * the offline states (which must be a legal transition) allowing the midlayer | |
2726 | * to goose the queue for this device. | |
eb44820c | 2727 | */ |
43f7571b BVA |
2728 | int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, |
2729 | enum scsi_device_state new_state) | |
1da177e4 | 2730 | { |
09addb1d BVA |
2731 | switch (new_state) { |
2732 | case SDEV_RUNNING: | |
2733 | case SDEV_TRANSPORT_OFFLINE: | |
2734 | break; | |
2735 | default: | |
2736 | return -EINVAL; | |
2737 | } | |
2738 | ||
5d9fb5cc MC |
2739 | /* |
2740 | * Try to transition the scsi device to SDEV_RUNNING or one of the | |
2741 | * offlined states and goose the device queue if successful. | |
1da177e4 | 2742 | */ |
8cd1ec78 HR |
2743 | switch (sdev->sdev_state) { |
2744 | case SDEV_BLOCK: | |
2745 | case SDEV_TRANSPORT_OFFLINE: | |
5d9fb5cc | 2746 | sdev->sdev_state = new_state; |
8cd1ec78 HR |
2747 | break; |
2748 | case SDEV_CREATED_BLOCK: | |
5d9fb5cc MC |
2749 | if (new_state == SDEV_TRANSPORT_OFFLINE || |
2750 | new_state == SDEV_OFFLINE) | |
2751 | sdev->sdev_state = new_state; | |
2752 | else | |
2753 | sdev->sdev_state = SDEV_CREATED; | |
8cd1ec78 HR |
2754 | break; |
2755 | case SDEV_CANCEL: | |
2756 | case SDEV_OFFLINE: | |
2757 | break; | |
2758 | default: | |
5c10e63c | 2759 | return -EINVAL; |
8cd1ec78 | 2760 | } |
66483a4a | 2761 | scsi_start_queue(sdev); |
1da177e4 LT |
2762 | |
2763 | return 0; | |
2764 | } | |
43f7571b BVA |
2765 | EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait); |
2766 | ||
2767 | /** | |
2768 | * scsi_internal_device_unblock - resume a device after a block request | |
2769 | * @sdev: device to resume | |
2770 | * @new_state: state to set the device to after unblocking | |
2771 | * | |
2772 | * Restart the device queue for a previously suspended SCSI device. May sleep. | |
2773 | * | |
2774 | * Returns zero if successful or a negative error code upon failure. | |
2775 | * | |
2776 | * Notes: | |
2777 | * This routine transitions the device to the SDEV_RUNNING state or to one of | |
2778 | * the offline states (which must be a legal transition) allowing the midlayer | |
2779 | * to goose the queue for this device. | |
2780 | */ | |
2781 | static int scsi_internal_device_unblock(struct scsi_device *sdev, | |
2782 | enum scsi_device_state new_state) | |
2783 | { | |
0db6ca8a BVA |
2784 | int ret; |
2785 | ||
2786 | mutex_lock(&sdev->state_mutex); | |
2787 | ret = scsi_internal_device_unblock_nowait(sdev, new_state); | |
2788 | mutex_unlock(&sdev->state_mutex); | |
2789 | ||
2790 | return ret; | |
43f7571b | 2791 | } |
1da177e4 LT |
2792 | |
2793 | static void | |
2794 | device_block(struct scsi_device *sdev, void *data) | |
2795 | { | |
94ef80a5 BVA |
2796 | int ret; |
2797 | ||
2798 | ret = scsi_internal_device_block(sdev); | |
2799 | ||
2800 | WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n", | |
2801 | dev_name(&sdev->sdev_gendev), ret); | |
1da177e4 LT |
2802 | } |
2803 | ||
2804 | static int | |
2805 | target_block(struct device *dev, void *data) | |
2806 | { | |
2807 | if (scsi_is_target_device(dev)) | |
2808 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2809 | device_block); | |
2810 | return 0; | |
2811 | } | |
2812 | ||
2813 | void | |
2814 | scsi_target_block(struct device *dev) | |
2815 | { | |
2816 | if (scsi_is_target_device(dev)) | |
2817 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2818 | device_block); | |
2819 | else | |
2820 | device_for_each_child(dev, NULL, target_block); | |
2821 | } | |
2822 | EXPORT_SYMBOL_GPL(scsi_target_block); | |
2823 | ||
2824 | static void | |
2825 | device_unblock(struct scsi_device *sdev, void *data) | |
2826 | { | |
5d9fb5cc | 2827 | scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); |
1da177e4 LT |
2828 | } |
2829 | ||
2830 | static int | |
2831 | target_unblock(struct device *dev, void *data) | |
2832 | { | |
2833 | if (scsi_is_target_device(dev)) | |
5d9fb5cc | 2834 | starget_for_each_device(to_scsi_target(dev), data, |
1da177e4 LT |
2835 | device_unblock); |
2836 | return 0; | |
2837 | } | |
2838 | ||
2839 | void | |
5d9fb5cc | 2840 | scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) |
1da177e4 LT |
2841 | { |
2842 | if (scsi_is_target_device(dev)) | |
5d9fb5cc | 2843 | starget_for_each_device(to_scsi_target(dev), &new_state, |
1da177e4 LT |
2844 | device_unblock); |
2845 | else | |
5d9fb5cc | 2846 | device_for_each_child(dev, &new_state, target_unblock); |
1da177e4 LT |
2847 | } |
2848 | EXPORT_SYMBOL_GPL(scsi_target_unblock); | |
cdb8c2a6 | 2849 | |
2bb95584 HR |
2850 | int |
2851 | scsi_host_block(struct Scsi_Host *shost) | |
2852 | { | |
2853 | struct scsi_device *sdev; | |
2854 | int ret = 0; | |
2855 | ||
f983622a ML |
2856 | /* |
2857 | * Call scsi_internal_device_block_nowait so we can avoid | |
2858 | * calling synchronize_rcu() for each LUN. | |
2859 | */ | |
2bb95584 | 2860 | shost_for_each_device(sdev, shost) { |
f983622a ML |
2861 | mutex_lock(&sdev->state_mutex); |
2862 | ret = scsi_internal_device_block_nowait(sdev); | |
2863 | mutex_unlock(&sdev->state_mutex); | |
f30785db YB |
2864 | if (ret) { |
2865 | scsi_device_put(sdev); | |
2bb95584 | 2866 | break; |
f30785db | 2867 | } |
2bb95584 | 2868 | } |
f983622a ML |
2869 | |
2870 | /* | |
2871 | * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so | |
2872 | * calling synchronize_rcu() once is enough. | |
2873 | */ | |
2874 | WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING); | |
2875 | ||
2876 | if (!ret) | |
2877 | synchronize_rcu(); | |
2878 | ||
2bb95584 HR |
2879 | return ret; |
2880 | } | |
2881 | EXPORT_SYMBOL_GPL(scsi_host_block); | |
2882 | ||
2883 | int | |
2884 | scsi_host_unblock(struct Scsi_Host *shost, int new_state) | |
2885 | { | |
2886 | struct scsi_device *sdev; | |
2887 | int ret = 0; | |
2888 | ||
2889 | shost_for_each_device(sdev, shost) { | |
2890 | ret = scsi_internal_device_unblock(sdev, new_state); | |
4dea170f YB |
2891 | if (ret) { |
2892 | scsi_device_put(sdev); | |
2bb95584 | 2893 | break; |
4dea170f | 2894 | } |
2bb95584 HR |
2895 | } |
2896 | return ret; | |
2897 | } | |
2898 | EXPORT_SYMBOL_GPL(scsi_host_unblock); | |
2899 | ||
cdb8c2a6 GL |
2900 | /** |
2901 | * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt | |
eb44820c | 2902 | * @sgl: scatter-gather list |
cdb8c2a6 GL |
2903 | * @sg_count: number of segments in sg |
2904 | * @offset: offset in bytes into sg, on return offset into the mapped area | |
2905 | * @len: bytes to map, on return number of bytes mapped | |
2906 | * | |
2907 | * Returns virtual address of the start of the mapped page | |
2908 | */ | |
c6132da1 | 2909 | void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, |
cdb8c2a6 GL |
2910 | size_t *offset, size_t *len) |
2911 | { | |
2912 | int i; | |
2913 | size_t sg_len = 0, len_complete = 0; | |
c6132da1 | 2914 | struct scatterlist *sg; |
cdb8c2a6 GL |
2915 | struct page *page; |
2916 | ||
22cfefb5 AM |
2917 | WARN_ON(!irqs_disabled()); |
2918 | ||
c6132da1 | 2919 | for_each_sg(sgl, sg, sg_count, i) { |
cdb8c2a6 | 2920 | len_complete = sg_len; /* Complete sg-entries */ |
c6132da1 | 2921 | sg_len += sg->length; |
cdb8c2a6 GL |
2922 | if (sg_len > *offset) |
2923 | break; | |
2924 | } | |
2925 | ||
2926 | if (unlikely(i == sg_count)) { | |
169e1a2a AM |
2927 | printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " |
2928 | "elements %d\n", | |
cadbd4a5 | 2929 | __func__, sg_len, *offset, sg_count); |
cdb8c2a6 GL |
2930 | WARN_ON(1); |
2931 | return NULL; | |
2932 | } | |
2933 | ||
2934 | /* Offset starting from the beginning of first page in this sg-entry */ | |
c6132da1 | 2935 | *offset = *offset - len_complete + sg->offset; |
cdb8c2a6 GL |
2936 | |
2937 | /* Assumption: contiguous pages can be accessed as "page + i" */ | |
45711f1a | 2938 | page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); |
cdb8c2a6 GL |
2939 | *offset &= ~PAGE_MASK; |
2940 | ||
2941 | /* Bytes in this sg-entry from *offset to the end of the page */ | |
2942 | sg_len = PAGE_SIZE - *offset; | |
2943 | if (*len > sg_len) | |
2944 | *len = sg_len; | |
2945 | ||
77dfce07 | 2946 | return kmap_atomic(page); |
cdb8c2a6 GL |
2947 | } |
2948 | EXPORT_SYMBOL(scsi_kmap_atomic_sg); | |
2949 | ||
2950 | /** | |
eb44820c | 2951 | * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg |
cdb8c2a6 GL |
2952 | * @virt: virtual address to be unmapped |
2953 | */ | |
2954 | void scsi_kunmap_atomic_sg(void *virt) | |
2955 | { | |
77dfce07 | 2956 | kunmap_atomic(virt); |
cdb8c2a6 GL |
2957 | } |
2958 | EXPORT_SYMBOL(scsi_kunmap_atomic_sg); | |
6f4c827e AL |
2959 | |
2960 | void sdev_disable_disk_events(struct scsi_device *sdev) | |
2961 | { | |
2962 | atomic_inc(&sdev->disk_events_disable_depth); | |
2963 | } | |
2964 | EXPORT_SYMBOL(sdev_disable_disk_events); | |
2965 | ||
2966 | void sdev_enable_disk_events(struct scsi_device *sdev) | |
2967 | { | |
2968 | if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) | |
2969 | return; | |
2970 | atomic_dec(&sdev->disk_events_disable_depth); | |
2971 | } | |
2972 | EXPORT_SYMBOL(sdev_enable_disk_events); | |
9983bed3 | 2973 | |
2e4209b3 MW |
2974 | static unsigned char designator_prio(const unsigned char *d) |
2975 | { | |
2976 | if (d[1] & 0x30) | |
2977 | /* not associated with LUN */ | |
2978 | return 0; | |
2979 | ||
2980 | if (d[3] == 0) | |
2981 | /* invalid length */ | |
2982 | return 0; | |
2983 | ||
2984 | /* | |
2985 | * Order of preference for lun descriptor: | |
2986 | * - SCSI name string | |
2987 | * - NAA IEEE Registered Extended | |
2988 | * - EUI-64 based 16-byte | |
2989 | * - EUI-64 based 12-byte | |
2990 | * - NAA IEEE Registered | |
2991 | * - NAA IEEE Extended | |
2992 | * - EUI-64 based 8-byte | |
2993 | * - SCSI name string (truncated) | |
2994 | * - T10 Vendor ID | |
2995 | * as longer descriptors reduce the likelyhood | |
2996 | * of identification clashes. | |
2997 | */ | |
2998 | ||
2999 | switch (d[1] & 0xf) { | |
3000 | case 8: | |
3001 | /* SCSI name string, variable-length UTF-8 */ | |
3002 | return 9; | |
3003 | case 3: | |
3004 | switch (d[4] >> 4) { | |
3005 | case 6: | |
3006 | /* NAA registered extended */ | |
3007 | return 8; | |
3008 | case 5: | |
3009 | /* NAA registered */ | |
3010 | return 5; | |
3011 | case 4: | |
3012 | /* NAA extended */ | |
3013 | return 4; | |
3014 | case 3: | |
3015 | /* NAA locally assigned */ | |
3016 | return 1; | |
3017 | default: | |
3018 | break; | |
3019 | } | |
3020 | break; | |
3021 | case 2: | |
3022 | switch (d[3]) { | |
3023 | case 16: | |
3024 | /* EUI64-based, 16 byte */ | |
3025 | return 7; | |
3026 | case 12: | |
3027 | /* EUI64-based, 12 byte */ | |
3028 | return 6; | |
3029 | case 8: | |
3030 | /* EUI64-based, 8 byte */ | |
3031 | return 3; | |
3032 | default: | |
3033 | break; | |
3034 | } | |
3035 | break; | |
3036 | case 1: | |
3037 | /* T10 vendor ID */ | |
3038 | return 1; | |
3039 | default: | |
3040 | break; | |
3041 | } | |
3042 | ||
3043 | return 0; | |
3044 | } | |
3045 | ||
9983bed3 HR |
3046 | /** |
3047 | * scsi_vpd_lun_id - return a unique device identification | |
3048 | * @sdev: SCSI device | |
3049 | * @id: buffer for the identification | |
3050 | * @id_len: length of the buffer | |
3051 | * | |
3052 | * Copies a unique device identification into @id based | |
3053 | * on the information in the VPD page 0x83 of the device. | |
3054 | * The string will be formatted as a SCSI name string. | |
3055 | * | |
3056 | * Returns the length of the identification or error on failure. | |
3057 | * If the identifier is longer than the supplied buffer the actual | |
3058 | * identifier length is returned and the buffer is not zero-padded. | |
3059 | */ | |
3060 | int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) | |
3061 | { | |
2e4209b3 | 3062 | u8 cur_id_prio = 0; |
9983bed3 | 3063 | u8 cur_id_size = 0; |
ccf1e004 BVA |
3064 | const unsigned char *d, *cur_id_str; |
3065 | const struct scsi_vpd *vpd_pg83; | |
9983bed3 HR |
3066 | int id_size = -EINVAL; |
3067 | ||
3068 | rcu_read_lock(); | |
3069 | vpd_pg83 = rcu_dereference(sdev->vpd_pg83); | |
3070 | if (!vpd_pg83) { | |
3071 | rcu_read_unlock(); | |
3072 | return -ENXIO; | |
3073 | } | |
3074 | ||
9983bed3 HR |
3075 | /* The id string must be at least 20 bytes + terminating NULL byte */ |
3076 | if (id_len < 21) { | |
3077 | rcu_read_unlock(); | |
3078 | return -EINVAL; | |
3079 | } | |
3080 | ||
3081 | memset(id, 0, id_len); | |
16d6317e MW |
3082 | for (d = vpd_pg83->data + 4; |
3083 | d < vpd_pg83->data + vpd_pg83->len; | |
3084 | d += d[3] + 4) { | |
2e4209b3 MW |
3085 | u8 prio = designator_prio(d); |
3086 | ||
3087 | if (prio == 0 || cur_id_prio > prio) | |
16d6317e | 3088 | continue; |
9983bed3 HR |
3089 | |
3090 | switch (d[1] & 0xf) { | |
d230823a HR |
3091 | case 0x1: |
3092 | /* T10 Vendor ID */ | |
3093 | if (cur_id_size > d[3]) | |
3094 | break; | |
2e4209b3 | 3095 | cur_id_prio = prio; |
d230823a HR |
3096 | cur_id_size = d[3]; |
3097 | if (cur_id_size + 4 > id_len) | |
3098 | cur_id_size = id_len - 4; | |
3099 | cur_id_str = d + 4; | |
d230823a HR |
3100 | id_size = snprintf(id, id_len, "t10.%*pE", |
3101 | cur_id_size, cur_id_str); | |
3102 | break; | |
9983bed3 HR |
3103 | case 0x2: |
3104 | /* EUI-64 */ | |
2e4209b3 | 3105 | cur_id_prio = prio; |
9983bed3 HR |
3106 | cur_id_size = d[3]; |
3107 | cur_id_str = d + 4; | |
9983bed3 HR |
3108 | switch (cur_id_size) { |
3109 | case 8: | |
3110 | id_size = snprintf(id, id_len, | |
3111 | "eui.%8phN", | |
3112 | cur_id_str); | |
3113 | break; | |
3114 | case 12: | |
3115 | id_size = snprintf(id, id_len, | |
3116 | "eui.%12phN", | |
3117 | cur_id_str); | |
3118 | break; | |
3119 | case 16: | |
3120 | id_size = snprintf(id, id_len, | |
3121 | "eui.%16phN", | |
3122 | cur_id_str); | |
3123 | break; | |
3124 | default: | |
9983bed3 HR |
3125 | break; |
3126 | } | |
3127 | break; | |
3128 | case 0x3: | |
3129 | /* NAA */ | |
2e4209b3 | 3130 | cur_id_prio = prio; |
9983bed3 HR |
3131 | cur_id_size = d[3]; |
3132 | cur_id_str = d + 4; | |
9983bed3 HR |
3133 | switch (cur_id_size) { |
3134 | case 8: | |
3135 | id_size = snprintf(id, id_len, | |
3136 | "naa.%8phN", | |
3137 | cur_id_str); | |
3138 | break; | |
3139 | case 16: | |
3140 | id_size = snprintf(id, id_len, | |
3141 | "naa.%16phN", | |
3142 | cur_id_str); | |
3143 | break; | |
3144 | default: | |
9983bed3 HR |
3145 | break; |
3146 | } | |
3147 | break; | |
3148 | case 0x8: | |
3149 | /* SCSI name string */ | |
2e4209b3 | 3150 | if (cur_id_size > d[3]) |
9983bed3 HR |
3151 | break; |
3152 | /* Prefer others for truncated descriptor */ | |
2e4209b3 MW |
3153 | if (d[3] > id_len) { |
3154 | prio = 2; | |
3155 | if (cur_id_prio > prio) | |
3156 | break; | |
3157 | } | |
3158 | cur_id_prio = prio; | |
9983bed3 HR |
3159 | cur_id_size = id_size = d[3]; |
3160 | cur_id_str = d + 4; | |
9983bed3 HR |
3161 | if (cur_id_size >= id_len) |
3162 | cur_id_size = id_len - 1; | |
3163 | memcpy(id, cur_id_str, cur_id_size); | |
9983bed3 HR |
3164 | break; |
3165 | default: | |
3166 | break; | |
3167 | } | |
9983bed3 HR |
3168 | } |
3169 | rcu_read_unlock(); | |
3170 | ||
3171 | return id_size; | |
3172 | } | |
3173 | EXPORT_SYMBOL(scsi_vpd_lun_id); | |
a8aa3978 HR |
3174 | |
3175 | /* | |
3176 | * scsi_vpd_tpg_id - return a target port group identifier | |
3177 | * @sdev: SCSI device | |
3178 | * | |
3179 | * Returns the Target Port Group identifier from the information | |
3180 | * froom VPD page 0x83 of the device. | |
3181 | * | |
3182 | * Returns the identifier or error on failure. | |
3183 | */ | |
3184 | int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) | |
3185 | { | |
ccf1e004 BVA |
3186 | const unsigned char *d; |
3187 | const struct scsi_vpd *vpd_pg83; | |
a8aa3978 HR |
3188 | int group_id = -EAGAIN, rel_port = -1; |
3189 | ||
3190 | rcu_read_lock(); | |
3191 | vpd_pg83 = rcu_dereference(sdev->vpd_pg83); | |
3192 | if (!vpd_pg83) { | |
3193 | rcu_read_unlock(); | |
3194 | return -ENXIO; | |
3195 | } | |
3196 | ||
ccf1e004 BVA |
3197 | d = vpd_pg83->data + 4; |
3198 | while (d < vpd_pg83->data + vpd_pg83->len) { | |
a8aa3978 HR |
3199 | switch (d[1] & 0xf) { |
3200 | case 0x4: | |
3201 | /* Relative target port */ | |
3202 | rel_port = get_unaligned_be16(&d[6]); | |
3203 | break; | |
3204 | case 0x5: | |
3205 | /* Target port group */ | |
3206 | group_id = get_unaligned_be16(&d[6]); | |
3207 | break; | |
3208 | default: | |
3209 | break; | |
3210 | } | |
3211 | d += d[3] + 4; | |
3212 | } | |
3213 | rcu_read_unlock(); | |
3214 | ||
3215 | if (group_id >= 0 && rel_id && rel_port != -1) | |
3216 | *rel_id = rel_port; | |
3217 | ||
3218 | return group_id; | |
3219 | } | |
3220 | EXPORT_SYMBOL(scsi_vpd_tpg_id); |