Commit | Line | Data |
---|---|---|
3d6392cf JA |
1 | /* |
2 | * bsg.c - block layer implementation of the sg v3 interface | |
3 | * | |
4 | * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs | |
5 | * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> | |
6 | * | |
7 | * This file is subject to the terms and conditions of the GNU General Public | |
8 | * License version 2. See the file "COPYING" in the main directory of this | |
9 | * archive for more details. | |
10 | * | |
11 | */ | |
12 | /* | |
13 | * TODO | |
14 | * - Should this get merged, block/scsi_ioctl.c will be migrated into | |
15 | * this file. To keep maintenance down, it's easier to have them | |
16 | * seperated right now. | |
17 | * | |
18 | */ | |
3d6392cf JA |
19 | #include <linux/module.h> |
20 | #include <linux/init.h> | |
21 | #include <linux/file.h> | |
22 | #include <linux/blkdev.h> | |
23 | #include <linux/poll.h> | |
24 | #include <linux/cdev.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/uio.h> | |
27 | #include <linux/bsg.h> | |
28 | ||
29 | #include <scsi/scsi.h> | |
30 | #include <scsi/scsi_ioctl.h> | |
31 | #include <scsi/scsi_cmnd.h> | |
4e2872d6 FT |
32 | #include <scsi/scsi_device.h> |
33 | #include <scsi/scsi_driver.h> | |
3d6392cf JA |
34 | #include <scsi/sg.h> |
35 | ||
36 | static char bsg_version[] = "block layer sg (bsg) 0.4"; | |
37 | ||
3d6392cf | 38 | struct bsg_device { |
3d6392cf JA |
39 | request_queue_t *queue; |
40 | spinlock_t lock; | |
41 | struct list_head busy_list; | |
42 | struct list_head done_list; | |
43 | struct hlist_node dev_list; | |
44 | atomic_t ref_count; | |
45 | int minor; | |
46 | int queued_cmds; | |
47 | int done_cmds; | |
3d6392cf JA |
48 | wait_queue_head_t wq_done; |
49 | wait_queue_head_t wq_free; | |
d351af01 | 50 | char name[BUS_ID_SIZE]; |
3d6392cf JA |
51 | int max_queue; |
52 | unsigned long flags; | |
53 | }; | |
54 | ||
55 | enum { | |
56 | BSG_F_BLOCK = 1, | |
57 | BSG_F_WRITE_PERM = 2, | |
58 | }; | |
59 | ||
5309cb38 | 60 | #define BSG_DEFAULT_CMDS 64 |
292b7f27 | 61 | #define BSG_MAX_DEVS 32768 |
3d6392cf JA |
62 | |
63 | #undef BSG_DEBUG | |
64 | ||
65 | #ifdef BSG_DEBUG | |
66 | #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args) | |
67 | #else | |
68 | #define dprintk(fmt, args...) | |
69 | #endif | |
70 | ||
71 | #define list_entry_bc(entry) list_entry((entry), struct bsg_command, list) | |
72 | ||
73 | /* | |
74 | * just for testing | |
75 | */ | |
76 | #define BSG_MAJOR (240) | |
77 | ||
78 | static DEFINE_MUTEX(bsg_mutex); | |
292b7f27 | 79 | static int bsg_device_nr, bsg_minor_idx; |
3d6392cf JA |
80 | |
81 | #define BSG_LIST_SIZE (8) | |
82 | #define bsg_list_idx(minor) ((minor) & (BSG_LIST_SIZE - 1)) | |
83 | static struct hlist_head bsg_device_list[BSG_LIST_SIZE]; | |
84 | ||
85 | static struct class *bsg_class; | |
86 | static LIST_HEAD(bsg_class_list); | |
87 | ||
5309cb38 JA |
88 | static struct kmem_cache *bsg_cmd_cachep; |
89 | ||
3d6392cf JA |
90 | /* |
91 | * our internal command type | |
92 | */ | |
93 | struct bsg_command { | |
94 | struct bsg_device *bd; | |
95 | struct list_head list; | |
96 | struct request *rq; | |
97 | struct bio *bio; | |
2c9ecdf4 | 98 | struct bio *bidi_bio; |
3d6392cf | 99 | int err; |
70e36ece FT |
100 | struct sg_io_v4 hdr; |
101 | struct sg_io_v4 __user *uhdr; | |
3d6392cf JA |
102 | char sense[SCSI_SENSE_BUFFERSIZE]; |
103 | }; | |
104 | ||
105 | static void bsg_free_command(struct bsg_command *bc) | |
106 | { | |
107 | struct bsg_device *bd = bc->bd; | |
3d6392cf JA |
108 | unsigned long flags; |
109 | ||
5309cb38 | 110 | kmem_cache_free(bsg_cmd_cachep, bc); |
3d6392cf JA |
111 | |
112 | spin_lock_irqsave(&bd->lock, flags); | |
113 | bd->queued_cmds--; | |
3d6392cf JA |
114 | spin_unlock_irqrestore(&bd->lock, flags); |
115 | ||
116 | wake_up(&bd->wq_free); | |
117 | } | |
118 | ||
e7d72173 | 119 | static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) |
3d6392cf | 120 | { |
e7d72173 | 121 | struct bsg_command *bc = ERR_PTR(-EINVAL); |
3d6392cf JA |
122 | |
123 | spin_lock_irq(&bd->lock); | |
124 | ||
125 | if (bd->queued_cmds >= bd->max_queue) | |
126 | goto out; | |
127 | ||
3d6392cf | 128 | bd->queued_cmds++; |
3d6392cf JA |
129 | spin_unlock_irq(&bd->lock); |
130 | ||
5309cb38 JA |
131 | bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER); |
132 | if (unlikely(!bc)) { | |
133 | spin_lock_irq(&bd->lock); | |
7e75d730 | 134 | bd->queued_cmds--; |
e7d72173 | 135 | bc = ERR_PTR(-ENOMEM); |
7e75d730 | 136 | goto out; |
5309cb38 JA |
137 | } |
138 | ||
3d6392cf JA |
139 | memset(bc, 0, sizeof(*bc)); |
140 | bc->bd = bd; | |
141 | INIT_LIST_HEAD(&bc->list); | |
5309cb38 | 142 | dprintk("%s: returning free cmd %p\n", bd->name, bc); |
3d6392cf JA |
143 | return bc; |
144 | out: | |
3d6392cf JA |
145 | spin_unlock_irq(&bd->lock); |
146 | return bc; | |
147 | } | |
148 | ||
149 | static inline void | |
150 | bsg_del_done_cmd(struct bsg_device *bd, struct bsg_command *bc) | |
151 | { | |
152 | bd->done_cmds--; | |
153 | list_del(&bc->list); | |
154 | } | |
155 | ||
156 | static inline void | |
157 | bsg_add_done_cmd(struct bsg_device *bd, struct bsg_command *bc) | |
158 | { | |
159 | bd->done_cmds++; | |
160 | list_add_tail(&bc->list, &bd->done_list); | |
161 | wake_up(&bd->wq_done); | |
162 | } | |
163 | ||
164 | static inline int bsg_io_schedule(struct bsg_device *bd, int state) | |
165 | { | |
166 | DEFINE_WAIT(wait); | |
167 | int ret = 0; | |
168 | ||
169 | spin_lock_irq(&bd->lock); | |
170 | ||
171 | BUG_ON(bd->done_cmds > bd->queued_cmds); | |
172 | ||
173 | /* | |
174 | * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no | |
175 | * work to do", even though we return -ENOSPC after this same test | |
176 | * during bsg_write() -- there, it means our buffer can't have more | |
177 | * bsg_commands added to it, thus has no space left. | |
178 | */ | |
179 | if (bd->done_cmds == bd->queued_cmds) { | |
180 | ret = -ENODATA; | |
181 | goto unlock; | |
182 | } | |
183 | ||
184 | if (!test_bit(BSG_F_BLOCK, &bd->flags)) { | |
185 | ret = -EAGAIN; | |
186 | goto unlock; | |
187 | } | |
188 | ||
189 | prepare_to_wait(&bd->wq_done, &wait, state); | |
190 | spin_unlock_irq(&bd->lock); | |
191 | io_schedule(); | |
192 | finish_wait(&bd->wq_done, &wait); | |
193 | ||
194 | if ((state == TASK_INTERRUPTIBLE) && signal_pending(current)) | |
195 | ret = -ERESTARTSYS; | |
196 | ||
197 | return ret; | |
198 | unlock: | |
199 | spin_unlock_irq(&bd->lock); | |
200 | return ret; | |
201 | } | |
202 | ||
70e36ece FT |
203 | static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, |
204 | struct sg_io_v4 *hdr, int has_write_perm) | |
205 | { | |
206 | memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ | |
207 | ||
208 | if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, | |
209 | hdr->request_len)) | |
210 | return -EFAULT; | |
211 | if (blk_verify_command(rq->cmd, has_write_perm)) | |
212 | return -EPERM; | |
213 | ||
214 | /* | |
215 | * fill in request structure | |
216 | */ | |
217 | rq->cmd_len = hdr->request_len; | |
218 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | |
219 | ||
220 | rq->timeout = (hdr->timeout * HZ) / 1000; | |
221 | if (!rq->timeout) | |
222 | rq->timeout = q->sg_timeout; | |
223 | if (!rq->timeout) | |
224 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; | |
225 | ||
226 | return 0; | |
227 | } | |
228 | ||
3d6392cf | 229 | /* |
70e36ece | 230 | * Check if sg_io_v4 from user is allowed and valid |
3d6392cf JA |
231 | */ |
232 | static int | |
70e36ece | 233 | bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw) |
3d6392cf | 234 | { |
70e36ece | 235 | if (hdr->guard != 'Q') |
3d6392cf | 236 | return -EINVAL; |
70e36ece | 237 | if (hdr->request_len > BLK_MAX_CDB) |
3d6392cf | 238 | return -EINVAL; |
70e36ece FT |
239 | if (hdr->dout_xfer_len > (q->max_sectors << 9) || |
240 | hdr->din_xfer_len > (q->max_sectors << 9)) | |
3d6392cf JA |
241 | return -EIO; |
242 | ||
70e36ece FT |
243 | /* not supported currently */ |
244 | if (hdr->protocol || hdr->subprotocol) | |
245 | return -EINVAL; | |
246 | ||
70e36ece | 247 | *rw = hdr->dout_xfer_len ? WRITE : READ; |
3d6392cf JA |
248 | |
249 | return 0; | |
250 | } | |
251 | ||
252 | /* | |
70e36ece | 253 | * map sg_io_v4 to a request. |
3d6392cf JA |
254 | */ |
255 | static struct request * | |
70e36ece | 256 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) |
3d6392cf JA |
257 | { |
258 | request_queue_t *q = bd->queue; | |
2c9ecdf4 | 259 | struct request *rq, *next_rq = NULL; |
2ef7086a | 260 | int ret, rw = 0; /* shut up gcc */ |
70e36ece FT |
261 | unsigned int dxfer_len; |
262 | void *dxferp = NULL; | |
3d6392cf | 263 | |
70e36ece FT |
264 | dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, |
265 | hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, | |
266 | hdr->din_xfer_len); | |
3d6392cf | 267 | |
70e36ece | 268 | ret = bsg_validate_sgv4_hdr(q, hdr, &rw); |
3d6392cf JA |
269 | if (ret) |
270 | return ERR_PTR(ret); | |
271 | ||
272 | /* | |
273 | * map scatter-gather elements seperately and string them to request | |
274 | */ | |
275 | rq = blk_get_request(q, rw, GFP_KERNEL); | |
2c9ecdf4 FT |
276 | if (!rq) |
277 | return ERR_PTR(-ENOMEM); | |
70e36ece FT |
278 | ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM, |
279 | &bd->flags)); | |
2c9ecdf4 FT |
280 | if (ret) |
281 | goto out; | |
282 | ||
283 | if (rw == WRITE && hdr->din_xfer_len) { | |
284 | if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { | |
285 | ret = -EOPNOTSUPP; | |
286 | goto out; | |
287 | } | |
288 | ||
289 | next_rq = blk_get_request(q, READ, GFP_KERNEL); | |
290 | if (!next_rq) { | |
291 | ret = -ENOMEM; | |
292 | goto out; | |
293 | } | |
294 | rq->next_rq = next_rq; | |
295 | ||
296 | dxferp = (void*)(unsigned long)hdr->din_xferp; | |
297 | ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len); | |
298 | if (ret) | |
299 | goto out; | |
3d6392cf JA |
300 | } |
301 | ||
70e36ece FT |
302 | if (hdr->dout_xfer_len) { |
303 | dxfer_len = hdr->dout_xfer_len; | |
304 | dxferp = (void*)(unsigned long)hdr->dout_xferp; | |
305 | } else if (hdr->din_xfer_len) { | |
306 | dxfer_len = hdr->din_xfer_len; | |
307 | dxferp = (void*)(unsigned long)hdr->din_xferp; | |
308 | } else | |
309 | dxfer_len = 0; | |
310 | ||
311 | if (dxfer_len) { | |
312 | ret = blk_rq_map_user(q, rq, dxferp, dxfer_len); | |
2c9ecdf4 FT |
313 | if (ret) |
314 | goto out; | |
3d6392cf | 315 | } |
3d6392cf | 316 | return rq; |
2c9ecdf4 FT |
317 | out: |
318 | blk_put_request(rq); | |
319 | if (next_rq) { | |
320 | blk_rq_unmap_user(next_rq->bio); | |
321 | blk_put_request(next_rq); | |
322 | } | |
323 | return ERR_PTR(ret); | |
3d6392cf JA |
324 | } |
325 | ||
326 | /* | |
327 | * async completion call-back from the block layer, when scsi/ide/whatever | |
328 | * calls end_that_request_last() on a request | |
329 | */ | |
330 | static void bsg_rq_end_io(struct request *rq, int uptodate) | |
331 | { | |
332 | struct bsg_command *bc = rq->end_io_data; | |
333 | struct bsg_device *bd = bc->bd; | |
334 | unsigned long flags; | |
335 | ||
5309cb38 JA |
336 | dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", |
337 | bd->name, rq, bc, bc->bio, uptodate); | |
3d6392cf JA |
338 | |
339 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); | |
340 | ||
341 | spin_lock_irqsave(&bd->lock, flags); | |
342 | list_del(&bc->list); | |
343 | bsg_add_done_cmd(bd, bc); | |
344 | spin_unlock_irqrestore(&bd->lock, flags); | |
345 | } | |
346 | ||
347 | /* | |
348 | * do final setup of a 'bc' and submit the matching 'rq' to the block | |
349 | * layer for io | |
350 | */ | |
351 | static void bsg_add_command(struct bsg_device *bd, request_queue_t *q, | |
352 | struct bsg_command *bc, struct request *rq) | |
353 | { | |
354 | rq->sense = bc->sense; | |
355 | rq->sense_len = 0; | |
356 | ||
357 | /* | |
358 | * add bc command to busy queue and submit rq for io | |
359 | */ | |
360 | bc->rq = rq; | |
361 | bc->bio = rq->bio; | |
2c9ecdf4 FT |
362 | if (rq->next_rq) |
363 | bc->bidi_bio = rq->next_rq->bio; | |
3d6392cf JA |
364 | bc->hdr.duration = jiffies; |
365 | spin_lock_irq(&bd->lock); | |
366 | list_add_tail(&bc->list, &bd->busy_list); | |
367 | spin_unlock_irq(&bd->lock); | |
368 | ||
369 | dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); | |
370 | ||
371 | rq->end_io_data = bc; | |
d351af01 | 372 | blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io); |
3d6392cf JA |
373 | } |
374 | ||
375 | static inline struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) | |
376 | { | |
377 | struct bsg_command *bc = NULL; | |
378 | ||
379 | spin_lock_irq(&bd->lock); | |
380 | if (bd->done_cmds) { | |
381 | bc = list_entry_bc(bd->done_list.next); | |
382 | bsg_del_done_cmd(bd, bc); | |
383 | } | |
384 | spin_unlock_irq(&bd->lock); | |
385 | ||
386 | return bc; | |
387 | } | |
388 | ||
389 | /* | |
390 | * Get a finished command from the done list | |
391 | */ | |
e7d72173 | 392 | static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) |
3d6392cf JA |
393 | { |
394 | struct bsg_command *bc; | |
395 | int ret; | |
396 | ||
397 | do { | |
398 | bc = bsg_next_done_cmd(bd); | |
399 | if (bc) | |
400 | break; | |
401 | ||
e7d72173 FT |
402 | if (!test_bit(BSG_F_BLOCK, &bd->flags)) { |
403 | bc = ERR_PTR(-EAGAIN); | |
404 | break; | |
405 | } | |
406 | ||
407 | ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); | |
3d6392cf | 408 | if (ret) { |
e7d72173 | 409 | bc = ERR_PTR(-ERESTARTSYS); |
3d6392cf JA |
410 | break; |
411 | } | |
412 | } while (1); | |
413 | ||
414 | dprintk("%s: returning done %p\n", bd->name, bc); | |
415 | ||
416 | return bc; | |
417 | } | |
418 | ||
70e36ece | 419 | static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, |
2c9ecdf4 | 420 | struct bio *bio, struct bio *bidi_bio) |
70e36ece FT |
421 | { |
422 | int ret = 0; | |
423 | ||
424 | dprintk("rq %p bio %p %u\n", rq, bio, rq->errors); | |
425 | /* | |
426 | * fill in all the output members | |
427 | */ | |
428 | hdr->device_status = status_byte(rq->errors); | |
429 | hdr->transport_status = host_byte(rq->errors); | |
430 | hdr->driver_status = driver_byte(rq->errors); | |
431 | hdr->info = 0; | |
432 | if (hdr->device_status || hdr->transport_status || hdr->driver_status) | |
433 | hdr->info |= SG_INFO_CHECK; | |
434 | hdr->din_resid = rq->data_len; | |
435 | hdr->response_len = 0; | |
436 | ||
437 | if (rq->sense_len && hdr->response) { | |
438 | int len = min((unsigned int) hdr->max_response_len, | |
439 | rq->sense_len); | |
440 | ||
441 | ret = copy_to_user((void*)(unsigned long)hdr->response, | |
442 | rq->sense, len); | |
443 | if (!ret) | |
444 | hdr->response_len = len; | |
445 | else | |
446 | ret = -EFAULT; | |
447 | } | |
448 | ||
2c9ecdf4 FT |
449 | if (rq->next_rq) { |
450 | blk_rq_unmap_user(bidi_bio); | |
451 | blk_put_request(rq->next_rq); | |
452 | } | |
453 | ||
70e36ece FT |
454 | blk_rq_unmap_user(bio); |
455 | blk_put_request(rq); | |
456 | ||
457 | return ret; | |
458 | } | |
459 | ||
3d6392cf JA |
460 | static int bsg_complete_all_commands(struct bsg_device *bd) |
461 | { | |
462 | struct bsg_command *bc; | |
463 | int ret, tret; | |
464 | ||
465 | dprintk("%s: entered\n", bd->name); | |
466 | ||
467 | set_bit(BSG_F_BLOCK, &bd->flags); | |
468 | ||
469 | /* | |
470 | * wait for all commands to complete | |
471 | */ | |
472 | ret = 0; | |
473 | do { | |
474 | ret = bsg_io_schedule(bd, TASK_UNINTERRUPTIBLE); | |
475 | /* | |
476 | * look for -ENODATA specifically -- we'll sometimes get | |
477 | * -ERESTARTSYS when we've taken a signal, but we can't | |
478 | * return until we're done freeing the queue, so ignore | |
479 | * it. The signal will get handled when we're done freeing | |
480 | * the bsg_device. | |
481 | */ | |
482 | } while (ret != -ENODATA); | |
483 | ||
484 | /* | |
485 | * discard done commands | |
486 | */ | |
487 | ret = 0; | |
488 | do { | |
e7d72173 FT |
489 | spin_lock_irq(&bd->lock); |
490 | if (!bd->queued_cmds) { | |
491 | spin_unlock_irq(&bd->lock); | |
3d6392cf JA |
492 | break; |
493 | } | |
efba1a31 | 494 | spin_unlock_irq(&bd->lock); |
3d6392cf | 495 | |
e7d72173 FT |
496 | bc = bsg_get_done_cmd(bd); |
497 | if (IS_ERR(bc)) | |
498 | break; | |
499 | ||
2c9ecdf4 FT |
500 | tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, |
501 | bc->bidi_bio); | |
3d6392cf JA |
502 | if (!ret) |
503 | ret = tret; | |
504 | ||
505 | bsg_free_command(bc); | |
506 | } while (1); | |
507 | ||
508 | return ret; | |
509 | } | |
510 | ||
3d6392cf | 511 | static ssize_t |
e7d72173 FT |
512 | __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, |
513 | const struct iovec *iov, ssize_t *bytes_read) | |
3d6392cf JA |
514 | { |
515 | struct bsg_command *bc; | |
516 | int nr_commands, ret; | |
517 | ||
70e36ece | 518 | if (count % sizeof(struct sg_io_v4)) |
3d6392cf JA |
519 | return -EINVAL; |
520 | ||
521 | ret = 0; | |
70e36ece | 522 | nr_commands = count / sizeof(struct sg_io_v4); |
3d6392cf | 523 | while (nr_commands) { |
e7d72173 | 524 | bc = bsg_get_done_cmd(bd); |
3d6392cf JA |
525 | if (IS_ERR(bc)) { |
526 | ret = PTR_ERR(bc); | |
527 | break; | |
528 | } | |
529 | ||
530 | /* | |
531 | * this is the only case where we need to copy data back | |
532 | * after completing the request. so do that here, | |
533 | * bsg_complete_work() cannot do that for us | |
534 | */ | |
2c9ecdf4 FT |
535 | ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, |
536 | bc->bidi_bio); | |
3d6392cf JA |
537 | |
538 | if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr))) | |
539 | ret = -EFAULT; | |
540 | ||
541 | bsg_free_command(bc); | |
542 | ||
543 | if (ret) | |
544 | break; | |
545 | ||
70e36ece FT |
546 | buf += sizeof(struct sg_io_v4); |
547 | *bytes_read += sizeof(struct sg_io_v4); | |
3d6392cf JA |
548 | nr_commands--; |
549 | } | |
550 | ||
551 | return ret; | |
552 | } | |
553 | ||
554 | static inline void bsg_set_block(struct bsg_device *bd, struct file *file) | |
555 | { | |
556 | if (file->f_flags & O_NONBLOCK) | |
557 | clear_bit(BSG_F_BLOCK, &bd->flags); | |
558 | else | |
559 | set_bit(BSG_F_BLOCK, &bd->flags); | |
560 | } | |
561 | ||
562 | static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file) | |
563 | { | |
564 | if (file->f_mode & FMODE_WRITE) | |
565 | set_bit(BSG_F_WRITE_PERM, &bd->flags); | |
566 | else | |
567 | clear_bit(BSG_F_WRITE_PERM, &bd->flags); | |
568 | } | |
569 | ||
570 | static inline int err_block_err(int ret) | |
571 | { | |
572 | if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) | |
573 | return 1; | |
574 | ||
575 | return 0; | |
576 | } | |
577 | ||
578 | static ssize_t | |
579 | bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |
580 | { | |
581 | struct bsg_device *bd = file->private_data; | |
582 | int ret; | |
583 | ssize_t bytes_read; | |
584 | ||
9e69fbb5 | 585 | dprintk("%s: read %Zd bytes\n", bd->name, count); |
3d6392cf JA |
586 | |
587 | bsg_set_block(bd, file); | |
588 | bytes_read = 0; | |
e7d72173 | 589 | ret = __bsg_read(buf, count, bd, NULL, &bytes_read); |
3d6392cf JA |
590 | *ppos = bytes_read; |
591 | ||
592 | if (!bytes_read || (bytes_read && err_block_err(ret))) | |
593 | bytes_read = ret; | |
594 | ||
595 | return bytes_read; | |
596 | } | |
597 | ||
598 | static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf, | |
599 | size_t count, ssize_t *bytes_read) | |
600 | { | |
601 | struct bsg_command *bc; | |
602 | struct request *rq; | |
603 | int ret, nr_commands; | |
604 | ||
70e36ece | 605 | if (count % sizeof(struct sg_io_v4)) |
3d6392cf JA |
606 | return -EINVAL; |
607 | ||
70e36ece | 608 | nr_commands = count / sizeof(struct sg_io_v4); |
3d6392cf JA |
609 | rq = NULL; |
610 | bc = NULL; | |
611 | ret = 0; | |
612 | while (nr_commands) { | |
613 | request_queue_t *q = bd->queue; | |
3d6392cf | 614 | |
e7d72173 | 615 | bc = bsg_alloc_command(bd); |
3d6392cf JA |
616 | if (IS_ERR(bc)) { |
617 | ret = PTR_ERR(bc); | |
618 | bc = NULL; | |
619 | break; | |
620 | } | |
621 | ||
70e36ece | 622 | bc->uhdr = (struct sg_io_v4 __user *) buf; |
3d6392cf JA |
623 | if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { |
624 | ret = -EFAULT; | |
625 | break; | |
626 | } | |
627 | ||
628 | /* | |
629 | * get a request, fill in the blanks, and add to request queue | |
630 | */ | |
70e36ece | 631 | rq = bsg_map_hdr(bd, &bc->hdr); |
3d6392cf JA |
632 | if (IS_ERR(rq)) { |
633 | ret = PTR_ERR(rq); | |
634 | rq = NULL; | |
635 | break; | |
636 | } | |
637 | ||
638 | bsg_add_command(bd, q, bc, rq); | |
639 | bc = NULL; | |
640 | rq = NULL; | |
641 | nr_commands--; | |
70e36ece FT |
642 | buf += sizeof(struct sg_io_v4); |
643 | *bytes_read += sizeof(struct sg_io_v4); | |
3d6392cf JA |
644 | } |
645 | ||
3d6392cf JA |
646 | if (bc) |
647 | bsg_free_command(bc); | |
648 | ||
649 | return ret; | |
650 | } | |
651 | ||
652 | static ssize_t | |
653 | bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) | |
654 | { | |
655 | struct bsg_device *bd = file->private_data; | |
656 | ssize_t bytes_read; | |
657 | int ret; | |
658 | ||
9e69fbb5 | 659 | dprintk("%s: write %Zd bytes\n", bd->name, count); |
3d6392cf JA |
660 | |
661 | bsg_set_block(bd, file); | |
662 | bsg_set_write_perm(bd, file); | |
663 | ||
664 | bytes_read = 0; | |
665 | ret = __bsg_write(bd, buf, count, &bytes_read); | |
666 | *ppos = bytes_read; | |
667 | ||
668 | /* | |
669 | * return bytes written on non-fatal errors | |
670 | */ | |
671 | if (!bytes_read || (bytes_read && err_block_err(ret))) | |
672 | bytes_read = ret; | |
673 | ||
9e69fbb5 | 674 | dprintk("%s: returning %Zd\n", bd->name, bytes_read); |
3d6392cf JA |
675 | return bytes_read; |
676 | } | |
677 | ||
3d6392cf JA |
678 | static struct bsg_device *bsg_alloc_device(void) |
679 | { | |
3d6392cf | 680 | struct bsg_device *bd; |
3d6392cf JA |
681 | |
682 | bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); | |
683 | if (unlikely(!bd)) | |
684 | return NULL; | |
685 | ||
686 | spin_lock_init(&bd->lock); | |
687 | ||
5309cb38 | 688 | bd->max_queue = BSG_DEFAULT_CMDS; |
3d6392cf JA |
689 | |
690 | INIT_LIST_HEAD(&bd->busy_list); | |
691 | INIT_LIST_HEAD(&bd->done_list); | |
692 | INIT_HLIST_NODE(&bd->dev_list); | |
693 | ||
694 | init_waitqueue_head(&bd->wq_free); | |
695 | init_waitqueue_head(&bd->wq_done); | |
696 | return bd; | |
3d6392cf JA |
697 | } |
698 | ||
699 | static int bsg_put_device(struct bsg_device *bd) | |
700 | { | |
701 | int ret = 0; | |
702 | ||
703 | mutex_lock(&bsg_mutex); | |
704 | ||
705 | if (!atomic_dec_and_test(&bd->ref_count)) | |
706 | goto out; | |
707 | ||
708 | dprintk("%s: tearing down\n", bd->name); | |
709 | ||
710 | /* | |
711 | * close can always block | |
712 | */ | |
713 | set_bit(BSG_F_BLOCK, &bd->flags); | |
714 | ||
715 | /* | |
716 | * correct error detection baddies here again. it's the responsibility | |
717 | * of the app to properly reap commands before close() if it wants | |
718 | * fool-proof error detection | |
719 | */ | |
720 | ret = bsg_complete_all_commands(bd); | |
721 | ||
722 | blk_put_queue(bd->queue); | |
723 | hlist_del(&bd->dev_list); | |
5309cb38 | 724 | kfree(bd); |
3d6392cf JA |
725 | out: |
726 | mutex_unlock(&bsg_mutex); | |
727 | return ret; | |
728 | } | |
729 | ||
730 | static struct bsg_device *bsg_add_device(struct inode *inode, | |
d351af01 | 731 | struct request_queue *rq, |
3d6392cf JA |
732 | struct file *file) |
733 | { | |
734 | struct bsg_device *bd = NULL; | |
735 | #ifdef BSG_DEBUG | |
736 | unsigned char buf[32]; | |
737 | #endif | |
738 | ||
739 | bd = bsg_alloc_device(); | |
740 | if (!bd) | |
741 | return ERR_PTR(-ENOMEM); | |
742 | ||
d351af01 FT |
743 | bd->queue = rq; |
744 | kobject_get(&rq->kobj); | |
3d6392cf JA |
745 | bsg_set_block(bd, file); |
746 | ||
747 | atomic_set(&bd->ref_count, 1); | |
748 | bd->minor = iminor(inode); | |
749 | mutex_lock(&bsg_mutex); | |
d351af01 | 750 | hlist_add_head(&bd->dev_list, &bsg_device_list[bsg_list_idx(bd->minor)]); |
3d6392cf | 751 | |
d351af01 | 752 | strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1); |
3d6392cf | 753 | dprintk("bound to <%s>, max queue %d\n", |
9e69fbb5 | 754 | format_dev_t(buf, inode->i_rdev), bd->max_queue); |
3d6392cf JA |
755 | |
756 | mutex_unlock(&bsg_mutex); | |
757 | return bd; | |
758 | } | |
759 | ||
760 | static struct bsg_device *__bsg_get_device(int minor) | |
761 | { | |
762 | struct hlist_head *list = &bsg_device_list[bsg_list_idx(minor)]; | |
763 | struct bsg_device *bd = NULL; | |
764 | struct hlist_node *entry; | |
765 | ||
766 | mutex_lock(&bsg_mutex); | |
767 | ||
768 | hlist_for_each(entry, list) { | |
769 | bd = hlist_entry(entry, struct bsg_device, dev_list); | |
770 | if (bd->minor == minor) { | |
771 | atomic_inc(&bd->ref_count); | |
772 | break; | |
773 | } | |
774 | ||
775 | bd = NULL; | |
776 | } | |
777 | ||
778 | mutex_unlock(&bsg_mutex); | |
779 | return bd; | |
780 | } | |
781 | ||
782 | static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) | |
783 | { | |
784 | struct bsg_device *bd = __bsg_get_device(iminor(inode)); | |
785 | struct bsg_class_device *bcd, *__bcd; | |
786 | ||
787 | if (bd) | |
788 | return bd; | |
789 | ||
790 | /* | |
791 | * find the class device | |
792 | */ | |
793 | bcd = NULL; | |
794 | mutex_lock(&bsg_mutex); | |
795 | list_for_each_entry(__bcd, &bsg_class_list, list) { | |
796 | if (__bcd->minor == iminor(inode)) { | |
797 | bcd = __bcd; | |
798 | break; | |
799 | } | |
800 | } | |
801 | mutex_unlock(&bsg_mutex); | |
802 | ||
803 | if (!bcd) | |
804 | return ERR_PTR(-ENODEV); | |
805 | ||
d351af01 | 806 | return bsg_add_device(inode, bcd->queue, file); |
3d6392cf JA |
807 | } |
808 | ||
809 | static int bsg_open(struct inode *inode, struct file *file) | |
810 | { | |
811 | struct bsg_device *bd = bsg_get_device(inode, file); | |
812 | ||
813 | if (IS_ERR(bd)) | |
814 | return PTR_ERR(bd); | |
815 | ||
816 | file->private_data = bd; | |
817 | return 0; | |
818 | } | |
819 | ||
820 | static int bsg_release(struct inode *inode, struct file *file) | |
821 | { | |
822 | struct bsg_device *bd = file->private_data; | |
823 | ||
824 | file->private_data = NULL; | |
825 | return bsg_put_device(bd); | |
826 | } | |
827 | ||
828 | static unsigned int bsg_poll(struct file *file, poll_table *wait) | |
829 | { | |
830 | struct bsg_device *bd = file->private_data; | |
831 | unsigned int mask = 0; | |
832 | ||
833 | poll_wait(file, &bd->wq_done, wait); | |
834 | poll_wait(file, &bd->wq_free, wait); | |
835 | ||
836 | spin_lock_irq(&bd->lock); | |
837 | if (!list_empty(&bd->done_list)) | |
838 | mask |= POLLIN | POLLRDNORM; | |
839 | if (bd->queued_cmds >= bd->max_queue) | |
840 | mask |= POLLOUT; | |
841 | spin_unlock_irq(&bd->lock); | |
842 | ||
843 | return mask; | |
844 | } | |
845 | ||
846 | static int | |
847 | bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | |
848 | unsigned long arg) | |
849 | { | |
850 | struct bsg_device *bd = file->private_data; | |
851 | int __user *uarg = (int __user *) arg; | |
852 | ||
853 | if (!bd) | |
854 | return -ENXIO; | |
855 | ||
856 | switch (cmd) { | |
857 | /* | |
858 | * our own ioctls | |
859 | */ | |
860 | case SG_GET_COMMAND_Q: | |
861 | return put_user(bd->max_queue, uarg); | |
5309cb38 | 862 | case SG_SET_COMMAND_Q: { |
3d6392cf JA |
863 | int queue; |
864 | ||
865 | if (get_user(queue, uarg)) | |
866 | return -EFAULT; | |
5309cb38 | 867 | if (queue < 1) |
3d6392cf JA |
868 | return -EINVAL; |
869 | ||
5309cb38 | 870 | spin_lock_irq(&bd->lock); |
3d6392cf | 871 | bd->max_queue = queue; |
5309cb38 | 872 | spin_unlock_irq(&bd->lock); |
3d6392cf JA |
873 | return 0; |
874 | } | |
875 | ||
876 | /* | |
877 | * SCSI/sg ioctls | |
878 | */ | |
879 | case SG_GET_VERSION_NUM: | |
880 | case SCSI_IOCTL_GET_IDLUN: | |
881 | case SCSI_IOCTL_GET_BUS_NUMBER: | |
882 | case SG_SET_TIMEOUT: | |
883 | case SG_GET_TIMEOUT: | |
884 | case SG_GET_RESERVED_SIZE: | |
885 | case SG_SET_RESERVED_SIZE: | |
886 | case SG_EMULATED_HOST: | |
3d6392cf JA |
887 | case SCSI_IOCTL_SEND_COMMAND: { |
888 | void __user *uarg = (void __user *) arg; | |
d351af01 | 889 | return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg); |
3d6392cf | 890 | } |
10e8855b FT |
891 | case SG_IO: { |
892 | struct request *rq; | |
2c9ecdf4 | 893 | struct bio *bio, *bidi_bio = NULL; |
10e8855b FT |
894 | struct sg_io_v4 hdr; |
895 | ||
896 | if (copy_from_user(&hdr, uarg, sizeof(hdr))) | |
897 | return -EFAULT; | |
898 | ||
899 | rq = bsg_map_hdr(bd, &hdr); | |
900 | if (IS_ERR(rq)) | |
901 | return PTR_ERR(rq); | |
902 | ||
903 | bio = rq->bio; | |
2c9ecdf4 FT |
904 | if (rq->next_rq) |
905 | bidi_bio = rq->next_rq->bio; | |
d351af01 | 906 | blk_execute_rq(bd->queue, NULL, rq, 0); |
2c9ecdf4 | 907 | blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); |
10e8855b FT |
908 | |
909 | if (copy_to_user(uarg, &hdr, sizeof(hdr))) | |
910 | return -EFAULT; | |
b711afa6 JA |
911 | |
912 | return 0; | |
10e8855b | 913 | } |
3d6392cf JA |
914 | /* |
915 | * block device ioctls | |
916 | */ | |
917 | default: | |
918 | #if 0 | |
919 | return ioctl_by_bdev(bd->bdev, cmd, arg); | |
920 | #else | |
921 | return -ENOTTY; | |
922 | #endif | |
923 | } | |
924 | } | |
925 | ||
926 | static struct file_operations bsg_fops = { | |
927 | .read = bsg_read, | |
928 | .write = bsg_write, | |
929 | .poll = bsg_poll, | |
930 | .open = bsg_open, | |
931 | .release = bsg_release, | |
932 | .ioctl = bsg_ioctl, | |
933 | .owner = THIS_MODULE, | |
934 | }; | |
935 | ||
d351af01 | 936 | void bsg_unregister_queue(struct request_queue *q) |
3d6392cf | 937 | { |
d351af01 | 938 | struct bsg_class_device *bcd = &q->bsg_dev; |
3d6392cf JA |
939 | |
940 | if (!bcd->class_dev) | |
941 | return; | |
942 | ||
943 | mutex_lock(&bsg_mutex); | |
d351af01 | 944 | sysfs_remove_link(&q->kobj, "bsg"); |
3d6392cf JA |
945 | class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor)); |
946 | bcd->class_dev = NULL; | |
947 | list_del_init(&bcd->list); | |
292b7f27 | 948 | bsg_device_nr--; |
3d6392cf JA |
949 | mutex_unlock(&bsg_mutex); |
950 | } | |
4cf0723a | 951 | EXPORT_SYMBOL_GPL(bsg_unregister_queue); |
3d6392cf | 952 | |
4cf0723a | 953 | int bsg_register_queue(struct request_queue *q, const char *name) |
3d6392cf | 954 | { |
292b7f27 | 955 | struct bsg_class_device *bcd, *__bcd; |
3d6392cf | 956 | dev_t dev; |
292b7f27 | 957 | int ret = -EMFILE; |
4e2872d6 | 958 | struct class_device *class_dev = NULL; |
3d6392cf JA |
959 | |
960 | /* | |
961 | * we need a proper transport to send commands, not a stacked device | |
962 | */ | |
963 | if (!q->request_fn) | |
964 | return 0; | |
965 | ||
d351af01 | 966 | bcd = &q->bsg_dev; |
3d6392cf JA |
967 | memset(bcd, 0, sizeof(*bcd)); |
968 | INIT_LIST_HEAD(&bcd->list); | |
969 | ||
970 | mutex_lock(&bsg_mutex); | |
292b7f27 FT |
971 | if (bsg_device_nr == BSG_MAX_DEVS) { |
972 | printk(KERN_ERR "bsg: too many bsg devices\n"); | |
973 | goto err; | |
974 | } | |
975 | ||
976 | retry: | |
977 | list_for_each_entry(__bcd, &bsg_class_list, list) { | |
978 | if (__bcd->minor == bsg_minor_idx) { | |
979 | bsg_minor_idx++; | |
980 | if (bsg_minor_idx == BSG_MAX_DEVS) | |
981 | bsg_minor_idx = 0; | |
982 | goto retry; | |
983 | } | |
984 | } | |
985 | ||
986 | bcd->minor = bsg_minor_idx++; | |
987 | if (bsg_minor_idx == BSG_MAX_DEVS) | |
988 | bsg_minor_idx = 0; | |
989 | ||
d351af01 | 990 | bcd->queue = q; |
292b7f27 | 991 | dev = MKDEV(BSG_MAJOR, bcd->minor); |
4e2872d6 FT |
992 | class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", name); |
993 | if (IS_ERR(class_dev)) { | |
994 | ret = PTR_ERR(class_dev); | |
264a0472 | 995 | goto err; |
4e2872d6 FT |
996 | } |
997 | bcd->class_dev = class_dev; | |
998 | ||
999 | if (q->kobj.dentry) { | |
1000 | ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); | |
1001 | if (ret) | |
1002 | goto err; | |
1003 | } | |
1004 | ||
3d6392cf | 1005 | list_add_tail(&bcd->list, &bsg_class_list); |
292b7f27 | 1006 | bsg_device_nr++; |
4e2872d6 | 1007 | |
3d6392cf JA |
1008 | mutex_unlock(&bsg_mutex); |
1009 | return 0; | |
264a0472 | 1010 | err: |
4e2872d6 | 1011 | if (class_dev) |
264a0472 JA |
1012 | class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor)); |
1013 | mutex_unlock(&bsg_mutex); | |
4e2872d6 FT |
1014 | return ret; |
1015 | } | |
4cf0723a | 1016 | EXPORT_SYMBOL_GPL(bsg_register_queue); |
4e2872d6 FT |
1017 | |
1018 | static int bsg_add(struct class_device *cl_dev, struct class_interface *cl_intf) | |
1019 | { | |
1020 | int ret; | |
1021 | struct scsi_device *sdp = to_scsi_device(cl_dev->dev); | |
1022 | struct request_queue *rq = sdp->request_queue; | |
1023 | ||
1024 | if (rq->kobj.parent) | |
1025 | ret = bsg_register_queue(rq, kobject_name(rq->kobj.parent)); | |
1026 | else | |
1027 | ret = bsg_register_queue(rq, kobject_name(&sdp->sdev_gendev.kobj)); | |
1028 | return ret; | |
3d6392cf JA |
1029 | } |
1030 | ||
4e2872d6 FT |
1031 | static void bsg_remove(struct class_device *cl_dev, struct class_interface *cl_intf) |
1032 | { | |
1033 | bsg_unregister_queue(to_scsi_device(cl_dev->dev)->request_queue); | |
1034 | } | |
1035 | ||
1036 | static struct class_interface bsg_intf = { | |
1037 | .add = bsg_add, | |
1038 | .remove = bsg_remove, | |
1039 | }; | |
1040 | ||
292b7f27 FT |
1041 | static struct cdev bsg_cdev = { |
1042 | .kobj = {.name = "bsg", }, | |
1043 | .owner = THIS_MODULE, | |
1044 | }; | |
1045 | ||
3d6392cf JA |
1046 | static int __init bsg_init(void) |
1047 | { | |
1048 | int ret, i; | |
1049 | ||
5309cb38 JA |
1050 | bsg_cmd_cachep = kmem_cache_create("bsg_cmd", |
1051 | sizeof(struct bsg_command), 0, 0, NULL, NULL); | |
1052 | if (!bsg_cmd_cachep) { | |
1053 | printk(KERN_ERR "bsg: failed creating slab cache\n"); | |
1054 | return -ENOMEM; | |
1055 | } | |
1056 | ||
3d6392cf JA |
1057 | for (i = 0; i < BSG_LIST_SIZE; i++) |
1058 | INIT_HLIST_HEAD(&bsg_device_list[i]); | |
1059 | ||
1060 | bsg_class = class_create(THIS_MODULE, "bsg"); | |
5309cb38 JA |
1061 | if (IS_ERR(bsg_class)) { |
1062 | kmem_cache_destroy(bsg_cmd_cachep); | |
3d6392cf | 1063 | return PTR_ERR(bsg_class); |
5309cb38 | 1064 | } |
3d6392cf | 1065 | |
292b7f27 FT |
1066 | ret = register_chrdev_region(MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS, "bsg"); |
1067 | if (ret) { | |
1068 | kmem_cache_destroy(bsg_cmd_cachep); | |
1069 | class_destroy(bsg_class); | |
1070 | return ret; | |
1071 | } | |
1072 | ||
1073 | cdev_init(&bsg_cdev, &bsg_fops); | |
1074 | ret = cdev_add(&bsg_cdev, MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS); | |
3d6392cf | 1075 | if (ret) { |
5309cb38 | 1076 | kmem_cache_destroy(bsg_cmd_cachep); |
3d6392cf | 1077 | class_destroy(bsg_class); |
292b7f27 | 1078 | unregister_chrdev_region(MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS); |
3d6392cf JA |
1079 | return ret; |
1080 | } | |
1081 | ||
4e2872d6 FT |
1082 | ret = scsi_register_interface(&bsg_intf); |
1083 | if (ret) { | |
1084 | printk(KERN_ERR "bsg: failed register scsi interface %d\n", ret); | |
1085 | kmem_cache_destroy(bsg_cmd_cachep); | |
1086 | class_destroy(bsg_class); | |
1087 | unregister_chrdev(BSG_MAJOR, "bsg"); | |
1088 | return ret; | |
1089 | } | |
1090 | ||
3d6392cf JA |
1091 | printk(KERN_INFO "%s loaded\n", bsg_version); |
1092 | return 0; | |
1093 | } | |
1094 | ||
1095 | MODULE_AUTHOR("Jens Axboe"); | |
1096 | MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver"); | |
1097 | MODULE_LICENSE("GPL"); | |
1098 | ||
4e2872d6 | 1099 | device_initcall(bsg_init); |