pinctrl: at91-pio4: add missing of_node_put
[linux-2.6-block.git] / block / bsg.c
CommitLineData
3d6392cf 1/*
0c6a89ba 2 * bsg.c - block layer implementation of the sg v4 interface
3d6392cf
JA
3 *
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
10 *
11 */
3d6392cf
JA
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/file.h>
15#include <linux/blkdev.h>
16#include <linux/poll.h>
17#include <linux/cdev.h>
ad5ebd2f 18#include <linux/jiffies.h>
3d6392cf
JA
19#include <linux/percpu.h>
20#include <linux/uio.h>
598443a2 21#include <linux/idr.h>
3d6392cf 22#include <linux/bsg.h>
5a0e3ad6 23#include <linux/slab.h>
3d6392cf
JA
24
25#include <scsi/scsi.h>
26#include <scsi/scsi_ioctl.h>
27#include <scsi/scsi_cmnd.h>
4e2872d6
FT
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_driver.h>
3d6392cf
JA
30#include <scsi/sg.h>
31
0ed081ce
FT
32#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
33#define BSG_VERSION "0.4"
3d6392cf 34
3124b65d
JT
35#define bsg_dbg(bd, fmt, ...) \
36 pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
37
3d6392cf 38struct bsg_device {
165125e1 39 struct request_queue *queue;
3d6392cf
JA
40 spinlock_t lock;
41 struct list_head busy_list;
42 struct list_head done_list;
43 struct hlist_node dev_list;
44 atomic_t ref_count;
3d6392cf
JA
45 int queued_cmds;
46 int done_cmds;
3d6392cf
JA
47 wait_queue_head_t wq_done;
48 wait_queue_head_t wq_free;
3ada8b7e 49 char name[20];
3d6392cf
JA
50 int max_queue;
51 unsigned long flags;
52};
53
54enum {
55 BSG_F_BLOCK = 1,
3d6392cf
JA
56};
57
5309cb38 58#define BSG_DEFAULT_CMDS 64
292b7f27 59#define BSG_MAX_DEVS 32768
3d6392cf 60
3d6392cf 61static DEFINE_MUTEX(bsg_mutex);
598443a2 62static DEFINE_IDR(bsg_minor_idr);
3d6392cf 63
25fd1643 64#define BSG_LIST_ARRAY_SIZE 8
25fd1643 65static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
3d6392cf
JA
66
67static struct class *bsg_class;
46f6ef4a 68static int bsg_major;
3d6392cf 69
5309cb38
JA
70static struct kmem_cache *bsg_cmd_cachep;
71
3d6392cf
JA
72/*
73 * our internal command type
74 */
75struct bsg_command {
76 struct bsg_device *bd;
77 struct list_head list;
78 struct request *rq;
79 struct bio *bio;
2c9ecdf4 80 struct bio *bidi_bio;
3d6392cf 81 int err;
70e36ece 82 struct sg_io_v4 hdr;
3d6392cf
JA
83};
84
85static void bsg_free_command(struct bsg_command *bc)
86{
87 struct bsg_device *bd = bc->bd;
3d6392cf
JA
88 unsigned long flags;
89
5309cb38 90 kmem_cache_free(bsg_cmd_cachep, bc);
3d6392cf
JA
91
92 spin_lock_irqsave(&bd->lock, flags);
93 bd->queued_cmds--;
3d6392cf
JA
94 spin_unlock_irqrestore(&bd->lock, flags);
95
96 wake_up(&bd->wq_free);
97}
98
e7d72173 99static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
3d6392cf 100{
e7d72173 101 struct bsg_command *bc = ERR_PTR(-EINVAL);
3d6392cf
JA
102
103 spin_lock_irq(&bd->lock);
104
105 if (bd->queued_cmds >= bd->max_queue)
106 goto out;
107
3d6392cf 108 bd->queued_cmds++;
3d6392cf
JA
109 spin_unlock_irq(&bd->lock);
110
25fd1643 111 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
5309cb38
JA
112 if (unlikely(!bc)) {
113 spin_lock_irq(&bd->lock);
7e75d730 114 bd->queued_cmds--;
e7d72173 115 bc = ERR_PTR(-ENOMEM);
7e75d730 116 goto out;
5309cb38
JA
117 }
118
3d6392cf
JA
119 bc->bd = bd;
120 INIT_LIST_HEAD(&bc->list);
3124b65d 121 bsg_dbg(bd, "returning free cmd %p\n", bc);
3d6392cf
JA
122 return bc;
123out:
3d6392cf
JA
124 spin_unlock_irq(&bd->lock);
125 return bc;
126}
127
1c1133e1 128static inline struct hlist_head *bsg_dev_idx_hash(int index)
3d6392cf 129{
1c1133e1 130 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
3d6392cf
JA
131}
132
17cb960f
CH
133#define uptr64(val) ((void __user *)(uintptr_t)(val))
134
135static int bsg_scsi_check_proto(struct sg_io_v4 *hdr)
136{
137 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
138 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
139 return -EINVAL;
140 return 0;
141}
142
143static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
144 fmode_t mode)
70e36ece 145{
17cb960f 146 struct scsi_request *sreq = scsi_req(rq);
82ed4db4 147
17cb960f
CH
148 sreq->cmd_len = hdr->request_len;
149 if (sreq->cmd_len > BLK_MAX_CDB) {
150 sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
151 if (!sreq->cmd)
9f5de6b1
FT
152 return -ENOMEM;
153 }
70e36ece 154
17cb960f 155 if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
70e36ece 156 return -EFAULT;
17cb960f 157 if (blk_verify_command(sreq->cmd, mode))
70e36ece 158 return -EPERM;
70e36ece
FT
159 return 0;
160}
161
17cb960f 162static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
3d6392cf 163{
17cb960f 164 struct scsi_request *sreq = scsi_req(rq);
15d10b61
FT
165 int ret = 0;
166
17cb960f
CH
167 /*
168 * fill in all the output members
169 */
170 hdr->device_status = sreq->result & 0xff;
171 hdr->transport_status = host_byte(sreq->result);
172 hdr->driver_status = driver_byte(sreq->result);
173 hdr->info = 0;
174 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
175 hdr->info |= SG_INFO_CHECK;
176 hdr->response_len = 0;
3d6392cf 177
17cb960f
CH
178 if (sreq->sense_len && hdr->response) {
179 int len = min_t(unsigned int, hdr->max_response_len,
180 sreq->sense_len);
181
182 if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
183 ret = -EFAULT;
184 else
185 hdr->response_len = len;
186 }
187
188 if (rq->next_rq) {
189 hdr->dout_resid = sreq->resid_len;
190 hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
191 } else if (rq_data_dir(rq) == READ) {
192 hdr->din_resid = sreq->resid_len;
193 } else {
194 hdr->dout_resid = sreq->resid_len;
15d10b61 195 }
70e36ece 196
15d10b61 197 return ret;
3d6392cf
JA
198}
199
17cb960f
CH
200static void bsg_scsi_free_rq(struct request *rq)
201{
202 scsi_req_free_cmd(scsi_req(rq));
203}
204
205static const struct bsg_ops bsg_scsi_ops = {
206 .check_proto = bsg_scsi_check_proto,
207 .fill_hdr = bsg_scsi_fill_hdr,
208 .complete_rq = bsg_scsi_complete_rq,
209 .free_rq = bsg_scsi_free_rq,
210};
211
3d6392cf 212static struct request *
17cb960f 213bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
3d6392cf 214{
2c9ecdf4 215 struct request *rq, *next_rq = NULL;
aebf526b 216 int ret;
c7a841f3 217
17cb960f 218 if (!q->bsg_dev.class_dev)
c7a841f3 219 return ERR_PTR(-ENXIO);
3d6392cf 220
17cb960f
CH
221 if (hdr->guard != 'Q')
222 return ERR_PTR(-EINVAL);
3d6392cf 223
17cb960f 224 ret = q->bsg_dev.ops->check_proto(hdr);
3d6392cf
JA
225 if (ret)
226 return ERR_PTR(ret);
227
17cb960f
CH
228 rq = blk_get_request(q, hdr->dout_xfer_len ?
229 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
230 GFP_KERNEL);
a492f075
JL
231 if (IS_ERR(rq))
232 return rq;
f27b087b 233
17cb960f 234 ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode);
2c9ecdf4
FT
235 if (ret)
236 goto out;
237
17cb960f
CH
238 rq->timeout = msecs_to_jiffies(hdr->timeout);
239 if (!rq->timeout)
240 rq->timeout = q->sg_timeout;
241 if (!rq->timeout)
242 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
243 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
244 rq->timeout = BLK_MIN_SG_TIMEOUT;
245
246 if (hdr->dout_xfer_len && hdr->din_xfer_len) {
2c9ecdf4
FT
247 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
248 ret = -EOPNOTSUPP;
249 goto out;
250 }
251
aebf526b 252 next_rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
a492f075
JL
253 if (IS_ERR(next_rq)) {
254 ret = PTR_ERR(next_rq);
2c9ecdf4
FT
255 goto out;
256 }
2c9ecdf4 257
17cb960f
CH
258 rq->next_rq = next_rq;
259 ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp),
152e283f 260 hdr->din_xfer_len, GFP_KERNEL);
2c9ecdf4 261 if (ret)
17cb960f 262 goto out_free_nextrq;
3d6392cf
JA
263 }
264
70e36ece 265 if (hdr->dout_xfer_len) {
17cb960f
CH
266 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp),
267 hdr->dout_xfer_len, GFP_KERNEL);
70e36ece 268 } else if (hdr->din_xfer_len) {
17cb960f
CH
269 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
270 hdr->din_xfer_len, GFP_KERNEL);
271 } else {
272 ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL);
3d6392cf 273 }
c1c20120 274
17cb960f
CH
275 if (ret)
276 goto out_unmap_nextrq;
3d6392cf 277 return rq;
17cb960f
CH
278
279out_unmap_nextrq:
280 if (rq->next_rq)
281 blk_rq_unmap_user(rq->next_rq->bio);
282out_free_nextrq:
283 if (rq->next_rq)
284 blk_put_request(rq->next_rq);
2c9ecdf4 285out:
17cb960f 286 q->bsg_dev.ops->free_rq(rq);
2c9ecdf4 287 blk_put_request(rq);
2c9ecdf4 288 return ERR_PTR(ret);
3d6392cf
JA
289}
290
291/*
292 * async completion call-back from the block layer, when scsi/ide/whatever
293 * calls end_that_request_last() on a request
294 */
2a842aca 295static void bsg_rq_end_io(struct request *rq, blk_status_t status)
3d6392cf
JA
296{
297 struct bsg_command *bc = rq->end_io_data;
298 struct bsg_device *bd = bc->bd;
299 unsigned long flags;
300
3124b65d
JT
301 bsg_dbg(bd, "finished rq %p bc %p, bio %p\n",
302 rq, bc, bc->bio);
3d6392cf
JA
303
304 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
305
306 spin_lock_irqsave(&bd->lock, flags);
25fd1643
JA
307 list_move_tail(&bc->list, &bd->done_list);
308 bd->done_cmds++;
3d6392cf 309 spin_unlock_irqrestore(&bd->lock, flags);
25fd1643
JA
310
311 wake_up(&bd->wq_done);
3d6392cf
JA
312}
313
314/*
315 * do final setup of a 'bc' and submit the matching 'rq' to the block
316 * layer for io
317 */
165125e1 318static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
3d6392cf
JA
319 struct bsg_command *bc, struct request *rq)
320{
05378940
BH
321 int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
322
3d6392cf
JA
323 /*
324 * add bc command to busy queue and submit rq for io
325 */
326 bc->rq = rq;
327 bc->bio = rq->bio;
2c9ecdf4
FT
328 if (rq->next_rq)
329 bc->bidi_bio = rq->next_rq->bio;
3d6392cf
JA
330 bc->hdr.duration = jiffies;
331 spin_lock_irq(&bd->lock);
332 list_add_tail(&bc->list, &bd->busy_list);
333 spin_unlock_irq(&bd->lock);
334
3124b65d 335 bsg_dbg(bd, "queueing rq %p, bc %p\n", rq, bc);
3d6392cf
JA
336
337 rq->end_io_data = bc;
05378940 338 blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
3d6392cf
JA
339}
340
25fd1643 341static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
3d6392cf
JA
342{
343 struct bsg_command *bc = NULL;
344
345 spin_lock_irq(&bd->lock);
346 if (bd->done_cmds) {
43ac9e62 347 bc = list_first_entry(&bd->done_list, struct bsg_command, list);
25fd1643
JA
348 list_del(&bc->list);
349 bd->done_cmds--;
3d6392cf
JA
350 }
351 spin_unlock_irq(&bd->lock);
352
353 return bc;
354}
355
356/*
357 * Get a finished command from the done list
358 */
e7d72173 359static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
3d6392cf
JA
360{
361 struct bsg_command *bc;
362 int ret;
363
364 do {
365 bc = bsg_next_done_cmd(bd);
366 if (bc)
367 break;
368
e7d72173
FT
369 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
370 bc = ERR_PTR(-EAGAIN);
371 break;
372 }
373
374 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
3d6392cf 375 if (ret) {
e7d72173 376 bc = ERR_PTR(-ERESTARTSYS);
3d6392cf
JA
377 break;
378 }
379 } while (1);
380
3124b65d 381 bsg_dbg(bd, "returning done %p\n", bc);
3d6392cf
JA
382
383 return bc;
384}
385
70e36ece 386static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
2c9ecdf4 387 struct bio *bio, struct bio *bidi_bio)
70e36ece 388{
17cb960f 389 int ret;
70e36ece 390
17cb960f 391 ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr);
70e36ece 392
2c9ecdf4
FT
393 if (rq->next_rq) {
394 blk_rq_unmap_user(bidi_bio);
395 blk_put_request(rq->next_rq);
17cb960f 396 }
2d507a01 397
70e36ece 398 blk_rq_unmap_user(bio);
17cb960f 399 rq->q->bsg_dev.ops->free_rq(rq);
70e36ece 400 blk_put_request(rq);
70e36ece
FT
401 return ret;
402}
403
2c561246
PZ
404static bool bsg_complete(struct bsg_device *bd)
405{
406 bool ret = false;
407 bool spin;
408
409 do {
410 spin_lock_irq(&bd->lock);
411
412 BUG_ON(bd->done_cmds > bd->queued_cmds);
413
414 /*
415 * All commands consumed.
416 */
417 if (bd->done_cmds == bd->queued_cmds)
418 ret = true;
419
420 spin = !test_bit(BSG_F_BLOCK, &bd->flags);
421
422 spin_unlock_irq(&bd->lock);
423 } while (!ret && spin);
424
425 return ret;
426}
427
3d6392cf
JA
428static int bsg_complete_all_commands(struct bsg_device *bd)
429{
430 struct bsg_command *bc;
431 int ret, tret;
432
3124b65d 433 bsg_dbg(bd, "entered\n");
3d6392cf 434
3d6392cf
JA
435 /*
436 * wait for all commands to complete
437 */
2c561246 438 io_wait_event(bd->wq_done, bsg_complete(bd));
3d6392cf
JA
439
440 /*
441 * discard done commands
442 */
443 ret = 0;
444 do {
e7d72173
FT
445 spin_lock_irq(&bd->lock);
446 if (!bd->queued_cmds) {
447 spin_unlock_irq(&bd->lock);
3d6392cf
JA
448 break;
449 }
efba1a31 450 spin_unlock_irq(&bd->lock);
3d6392cf 451
e7d72173
FT
452 bc = bsg_get_done_cmd(bd);
453 if (IS_ERR(bc))
454 break;
455
2c9ecdf4
FT
456 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
457 bc->bidi_bio);
3d6392cf
JA
458 if (!ret)
459 ret = tret;
460
461 bsg_free_command(bc);
462 } while (1);
463
464 return ret;
465}
466
25fd1643 467static int
e7d72173
FT
468__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
469 const struct iovec *iov, ssize_t *bytes_read)
3d6392cf
JA
470{
471 struct bsg_command *bc;
472 int nr_commands, ret;
473
70e36ece 474 if (count % sizeof(struct sg_io_v4))
3d6392cf
JA
475 return -EINVAL;
476
477 ret = 0;
70e36ece 478 nr_commands = count / sizeof(struct sg_io_v4);
3d6392cf 479 while (nr_commands) {
e7d72173 480 bc = bsg_get_done_cmd(bd);
3d6392cf
JA
481 if (IS_ERR(bc)) {
482 ret = PTR_ERR(bc);
483 break;
484 }
485
486 /*
487 * this is the only case where we need to copy data back
488 * after completing the request. so do that here,
489 * bsg_complete_work() cannot do that for us
490 */
2c9ecdf4
FT
491 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
492 bc->bidi_bio);
3d6392cf 493
25fd1643 494 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
3d6392cf
JA
495 ret = -EFAULT;
496
497 bsg_free_command(bc);
498
499 if (ret)
500 break;
501
70e36ece
FT
502 buf += sizeof(struct sg_io_v4);
503 *bytes_read += sizeof(struct sg_io_v4);
3d6392cf
JA
504 nr_commands--;
505 }
506
507 return ret;
508}
509
510static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
511{
512 if (file->f_flags & O_NONBLOCK)
513 clear_bit(BSG_F_BLOCK, &bd->flags);
514 else
515 set_bit(BSG_F_BLOCK, &bd->flags);
516}
517
25fd1643
JA
518/*
519 * Check if the error is a "real" error that we should return.
520 */
3d6392cf
JA
521static inline int err_block_err(int ret)
522{
523 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
524 return 1;
525
526 return 0;
527}
528
529static ssize_t
530bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
531{
532 struct bsg_device *bd = file->private_data;
533 int ret;
534 ssize_t bytes_read;
535
3124b65d 536 bsg_dbg(bd, "read %zd bytes\n", count);
3d6392cf
JA
537
538 bsg_set_block(bd, file);
0b07de85 539
3d6392cf 540 bytes_read = 0;
e7d72173 541 ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
3d6392cf
JA
542 *ppos = bytes_read;
543
44194e3e 544 if (!bytes_read || err_block_err(ret))
3d6392cf
JA
545 bytes_read = ret;
546
547 return bytes_read;
548}
549
25fd1643 550static int __bsg_write(struct bsg_device *bd, const char __user *buf,
f00c4d80 551 size_t count, ssize_t *bytes_written, fmode_t mode)
3d6392cf
JA
552{
553 struct bsg_command *bc;
554 struct request *rq;
555 int ret, nr_commands;
556
70e36ece 557 if (count % sizeof(struct sg_io_v4))
3d6392cf
JA
558 return -EINVAL;
559
70e36ece 560 nr_commands = count / sizeof(struct sg_io_v4);
3d6392cf
JA
561 rq = NULL;
562 bc = NULL;
563 ret = 0;
564 while (nr_commands) {
165125e1 565 struct request_queue *q = bd->queue;
3d6392cf 566
e7d72173 567 bc = bsg_alloc_command(bd);
3d6392cf
JA
568 if (IS_ERR(bc)) {
569 ret = PTR_ERR(bc);
570 bc = NULL;
571 break;
572 }
573
3d6392cf
JA
574 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
575 ret = -EFAULT;
576 break;
577 }
578
579 /*
580 * get a request, fill in the blanks, and add to request queue
581 */
17cb960f 582 rq = bsg_map_hdr(bd->queue, &bc->hdr, mode);
3d6392cf
JA
583 if (IS_ERR(rq)) {
584 ret = PTR_ERR(rq);
585 rq = NULL;
586 break;
587 }
588
589 bsg_add_command(bd, q, bc, rq);
590 bc = NULL;
591 rq = NULL;
592 nr_commands--;
70e36ece 593 buf += sizeof(struct sg_io_v4);
25fd1643 594 *bytes_written += sizeof(struct sg_io_v4);
3d6392cf
JA
595 }
596
3d6392cf
JA
597 if (bc)
598 bsg_free_command(bc);
599
600 return ret;
601}
602
603static ssize_t
604bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
605{
606 struct bsg_device *bd = file->private_data;
25fd1643 607 ssize_t bytes_written;
3d6392cf
JA
608 int ret;
609
3124b65d 610 bsg_dbg(bd, "write %zd bytes\n", count);
3d6392cf 611
db68ce10 612 if (unlikely(uaccess_kernel()))
128394ef
AV
613 return -EINVAL;
614
3d6392cf 615 bsg_set_block(bd, file);
3d6392cf 616
25fd1643 617 bytes_written = 0;
f00c4d80 618 ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode);
abf54393 619
25fd1643 620 *ppos = bytes_written;
3d6392cf
JA
621
622 /*
623 * return bytes written on non-fatal errors
624 */
44194e3e 625 if (!bytes_written || err_block_err(ret))
25fd1643 626 bytes_written = ret;
3d6392cf 627
3124b65d 628 bsg_dbg(bd, "returning %zd\n", bytes_written);
25fd1643 629 return bytes_written;
3d6392cf
JA
630}
631
3d6392cf
JA
632static struct bsg_device *bsg_alloc_device(void)
633{
3d6392cf 634 struct bsg_device *bd;
3d6392cf
JA
635
636 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
637 if (unlikely(!bd))
638 return NULL;
639
640 spin_lock_init(&bd->lock);
641
5309cb38 642 bd->max_queue = BSG_DEFAULT_CMDS;
3d6392cf
JA
643
644 INIT_LIST_HEAD(&bd->busy_list);
645 INIT_LIST_HEAD(&bd->done_list);
646 INIT_HLIST_NODE(&bd->dev_list);
647
648 init_waitqueue_head(&bd->wq_free);
649 init_waitqueue_head(&bd->wq_done);
650 return bd;
3d6392cf
JA
651}
652
97f46ae4
FT
653static void bsg_kref_release_function(struct kref *kref)
654{
655 struct bsg_class_device *bcd =
656 container_of(kref, struct bsg_class_device, ref);
8df5fc04 657 struct device *parent = bcd->parent;
97f46ae4
FT
658
659 if (bcd->release)
660 bcd->release(bcd->parent);
661
8df5fc04 662 put_device(parent);
97f46ae4
FT
663}
664
3d6392cf
JA
665static int bsg_put_device(struct bsg_device *bd)
666{
97f46ae4
FT
667 int ret = 0, do_free;
668 struct request_queue *q = bd->queue;
3d6392cf
JA
669
670 mutex_lock(&bsg_mutex);
671
97f46ae4 672 do_free = atomic_dec_and_test(&bd->ref_count);
3f27e3ed
FT
673 if (!do_free) {
674 mutex_unlock(&bsg_mutex);
3d6392cf 675 goto out;
3f27e3ed
FT
676 }
677
678 hlist_del(&bd->dev_list);
679 mutex_unlock(&bsg_mutex);
3d6392cf 680
3124b65d 681 bsg_dbg(bd, "tearing down\n");
3d6392cf
JA
682
683 /*
684 * close can always block
685 */
686 set_bit(BSG_F_BLOCK, &bd->flags);
687
688 /*
689 * correct error detection baddies here again. it's the responsibility
690 * of the app to properly reap commands before close() if it wants
691 * fool-proof error detection
692 */
693 ret = bsg_complete_all_commands(bd);
694
5309cb38 695 kfree(bd);
3d6392cf 696out:
97f46ae4
FT
697 kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
698 if (do_free)
699 blk_put_queue(q);
3d6392cf
JA
700 return ret;
701}
702
703static struct bsg_device *bsg_add_device(struct inode *inode,
d351af01 704 struct request_queue *rq,
3d6392cf
JA
705 struct file *file)
706{
25fd1643 707 struct bsg_device *bd;
3d6392cf 708 unsigned char buf[32];
d9f97264 709
09ac46c4 710 if (!blk_get_queue(rq))
c3ff1b90 711 return ERR_PTR(-ENXIO);
3d6392cf
JA
712
713 bd = bsg_alloc_device();
c3ff1b90
FT
714 if (!bd) {
715 blk_put_queue(rq);
3d6392cf 716 return ERR_PTR(-ENOMEM);
c3ff1b90 717 }
3d6392cf 718
d351af01 719 bd->queue = rq;
0b07de85 720
3d6392cf
JA
721 bsg_set_block(bd, file);
722
723 atomic_set(&bd->ref_count, 1);
3d6392cf 724 mutex_lock(&bsg_mutex);
842ea771 725 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
3d6392cf 726
3ada8b7e 727 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
3124b65d 728 bsg_dbg(bd, "bound to <%s>, max queue %d\n",
9e69fbb5 729 format_dev_t(buf, inode->i_rdev), bd->max_queue);
3d6392cf
JA
730
731 mutex_unlock(&bsg_mutex);
732 return bd;
733}
734
842ea771 735static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
3d6392cf 736{
43ac9e62 737 struct bsg_device *bd;
3d6392cf
JA
738
739 mutex_lock(&bsg_mutex);
740
b67bfe0d 741 hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
842ea771 742 if (bd->queue == q) {
3d6392cf 743 atomic_inc(&bd->ref_count);
43ac9e62 744 goto found;
3d6392cf 745 }
3d6392cf 746 }
43ac9e62
FT
747 bd = NULL;
748found:
3d6392cf
JA
749 mutex_unlock(&bsg_mutex);
750 return bd;
751}
752
753static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
754{
598443a2
FT
755 struct bsg_device *bd;
756 struct bsg_class_device *bcd;
3d6392cf 757
3d6392cf
JA
758 /*
759 * find the class device
760 */
3d6392cf 761 mutex_lock(&bsg_mutex);
598443a2 762 bcd = idr_find(&bsg_minor_idr, iminor(inode));
d45ac4fa 763 if (bcd)
97f46ae4 764 kref_get(&bcd->ref);
3d6392cf
JA
765 mutex_unlock(&bsg_mutex);
766
767 if (!bcd)
768 return ERR_PTR(-ENODEV);
769
842ea771 770 bd = __bsg_get_device(iminor(inode), bcd->queue);
d45ac4fa
FT
771 if (bd)
772 return bd;
773
774 bd = bsg_add_device(inode, bcd->queue, file);
775 if (IS_ERR(bd))
97f46ae4 776 kref_put(&bcd->ref, bsg_kref_release_function);
d45ac4fa
FT
777
778 return bd;
3d6392cf
JA
779}
780
781static int bsg_open(struct inode *inode, struct file *file)
782{
75bd2ef1
JC
783 struct bsg_device *bd;
784
75bd2ef1 785 bd = bsg_get_device(inode, file);
3d6392cf
JA
786
787 if (IS_ERR(bd))
788 return PTR_ERR(bd);
789
790 file->private_data = bd;
791 return 0;
792}
793
794static int bsg_release(struct inode *inode, struct file *file)
795{
796 struct bsg_device *bd = file->private_data;
797
798 file->private_data = NULL;
799 return bsg_put_device(bd);
800}
801
1771e70a 802static __poll_t bsg_poll(struct file *file, poll_table *wait)
3d6392cf
JA
803{
804 struct bsg_device *bd = file->private_data;
1771e70a 805 __poll_t mask = 0;
3d6392cf
JA
806
807 poll_wait(file, &bd->wq_done, wait);
808 poll_wait(file, &bd->wq_free, wait);
809
810 spin_lock_irq(&bd->lock);
811 if (!list_empty(&bd->done_list))
a9a08845 812 mask |= EPOLLIN | EPOLLRDNORM;
80ceb057 813 if (bd->queued_cmds < bd->max_queue)
a9a08845 814 mask |= EPOLLOUT;
3d6392cf
JA
815 spin_unlock_irq(&bd->lock);
816
817 return mask;
818}
819
25fd1643 820static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3d6392cf
JA
821{
822 struct bsg_device *bd = file->private_data;
823 int __user *uarg = (int __user *) arg;
2d507a01 824 int ret;
3d6392cf 825
3d6392cf
JA
826 switch (cmd) {
827 /*
828 * our own ioctls
829 */
830 case SG_GET_COMMAND_Q:
831 return put_user(bd->max_queue, uarg);
5309cb38 832 case SG_SET_COMMAND_Q: {
3d6392cf
JA
833 int queue;
834
835 if (get_user(queue, uarg))
836 return -EFAULT;
5309cb38 837 if (queue < 1)
3d6392cf
JA
838 return -EINVAL;
839
5309cb38 840 spin_lock_irq(&bd->lock);
3d6392cf 841 bd->max_queue = queue;
5309cb38 842 spin_unlock_irq(&bd->lock);
3d6392cf
JA
843 return 0;
844 }
845
846 /*
847 * SCSI/sg ioctls
848 */
849 case SG_GET_VERSION_NUM:
850 case SCSI_IOCTL_GET_IDLUN:
851 case SCSI_IOCTL_GET_BUS_NUMBER:
852 case SG_SET_TIMEOUT:
853 case SG_GET_TIMEOUT:
854 case SG_GET_RESERVED_SIZE:
855 case SG_SET_RESERVED_SIZE:
856 case SG_EMULATED_HOST:
3d6392cf
JA
857 case SCSI_IOCTL_SEND_COMMAND: {
858 void __user *uarg = (void __user *) arg;
74f3c8af 859 return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
3d6392cf 860 }
10e8855b
FT
861 case SG_IO: {
862 struct request *rq;
2c9ecdf4 863 struct bio *bio, *bidi_bio = NULL;
10e8855b 864 struct sg_io_v4 hdr;
05378940 865 int at_head;
10e8855b
FT
866
867 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
868 return -EFAULT;
869
17cb960f 870 rq = bsg_map_hdr(bd->queue, &hdr, file->f_mode);
10e8855b
FT
871 if (IS_ERR(rq))
872 return PTR_ERR(rq);
873
874 bio = rq->bio;
2c9ecdf4
FT
875 if (rq->next_rq)
876 bidi_bio = rq->next_rq->bio;
05378940
BH
877
878 at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
879 blk_execute_rq(bd->queue, NULL, rq, at_head);
2d507a01 880 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
10e8855b
FT
881
882 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
883 return -EFAULT;
b711afa6 884
2d507a01 885 return ret;
10e8855b 886 }
3d6392cf 887 default:
3d6392cf 888 return -ENOTTY;
3d6392cf
JA
889 }
890}
891
7344be05 892static const struct file_operations bsg_fops = {
3d6392cf
JA
893 .read = bsg_read,
894 .write = bsg_write,
895 .poll = bsg_poll,
896 .open = bsg_open,
897 .release = bsg_release,
25fd1643 898 .unlocked_ioctl = bsg_ioctl,
3d6392cf 899 .owner = THIS_MODULE,
6038f373 900 .llseek = default_llseek,
3d6392cf
JA
901};
902
d351af01 903void bsg_unregister_queue(struct request_queue *q)
3d6392cf 904{
d351af01 905 struct bsg_class_device *bcd = &q->bsg_dev;
3d6392cf 906
df468820
FT
907 if (!bcd->class_dev)
908 return;
3d6392cf
JA
909
910 mutex_lock(&bsg_mutex);
598443a2 911 idr_remove(&bsg_minor_idr, bcd->minor);
37b40adf
SG
912 if (q->kobj.sd)
913 sysfs_remove_link(&q->kobj, "bsg");
ee959b00 914 device_unregister(bcd->class_dev);
3d6392cf 915 bcd->class_dev = NULL;
97f46ae4 916 kref_put(&bcd->ref, bsg_kref_release_function);
3d6392cf
JA
917 mutex_unlock(&bsg_mutex);
918}
4cf0723a 919EXPORT_SYMBOL_GPL(bsg_unregister_queue);
3d6392cf 920
97f46ae4 921int bsg_register_queue(struct request_queue *q, struct device *parent,
17cb960f
CH
922 const char *name, const struct bsg_ops *ops,
923 void (*release)(struct device *))
3d6392cf 924{
598443a2 925 struct bsg_class_device *bcd;
3d6392cf 926 dev_t dev;
bab998d6 927 int ret;
ee959b00 928 struct device *class_dev = NULL;
39dca558
JB
929 const char *devname;
930
931 if (name)
932 devname = name;
933 else
3ada8b7e 934 devname = dev_name(parent);
3d6392cf
JA
935
936 /*
937 * we need a proper transport to send commands, not a stacked device
938 */
49fd524f 939 if (!queue_is_rq_based(q))
3d6392cf
JA
940 return 0;
941
d351af01 942 bcd = &q->bsg_dev;
3d6392cf 943 memset(bcd, 0, sizeof(*bcd));
3d6392cf
JA
944
945 mutex_lock(&bsg_mutex);
292b7f27 946
bab998d6
TH
947 ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
948 if (ret < 0) {
949 if (ret == -ENOSPC) {
950 printk(KERN_ERR "bsg: too many bsg devices\n");
951 ret = -EINVAL;
952 }
598443a2 953 goto unlock;
598443a2
FT
954 }
955
bab998d6 956 bcd->minor = ret;
d351af01 957 bcd->queue = q;
97f46ae4
FT
958 bcd->parent = get_device(parent);
959 bcd->release = release;
17cb960f 960 bcd->ops = ops;
97f46ae4 961 kref_init(&bcd->ref);
46f6ef4a 962 dev = MKDEV(bsg_major, bcd->minor);
1ff9f542 963 class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname);
4e2872d6
FT
964 if (IS_ERR(class_dev)) {
965 ret = PTR_ERR(class_dev);
598443a2 966 goto put_dev;
4e2872d6
FT
967 }
968 bcd->class_dev = class_dev;
969
abce891a 970 if (q->kobj.sd) {
4e2872d6
FT
971 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
972 if (ret)
598443a2 973 goto unregister_class_dev;
4e2872d6
FT
974 }
975
3d6392cf
JA
976 mutex_unlock(&bsg_mutex);
977 return 0;
6826ee4f 978
598443a2 979unregister_class_dev:
ee959b00 980 device_unregister(class_dev);
598443a2 981put_dev:
97f46ae4 982 put_device(parent);
bab998d6 983 idr_remove(&bsg_minor_idr, bcd->minor);
598443a2 984unlock:
264a0472 985 mutex_unlock(&bsg_mutex);
4e2872d6
FT
986 return ret;
987}
17cb960f
CH
988
989int bsg_scsi_register_queue(struct request_queue *q, struct device *parent)
990{
991 if (!blk_queue_scsi_passthrough(q)) {
992 WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
993 return -EINVAL;
994 }
995
996 return bsg_register_queue(q, parent, NULL, &bsg_scsi_ops, NULL);
997}
998EXPORT_SYMBOL_GPL(bsg_scsi_register_queue);
4e2872d6 999
7e7654a9 1000static struct cdev bsg_cdev;
292b7f27 1001
2c9ede55 1002static char *bsg_devnode(struct device *dev, umode_t *mode)
2bdf9149
KS
1003{
1004 return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
1005}
1006
3d6392cf
JA
1007static int __init bsg_init(void)
1008{
1009 int ret, i;
46f6ef4a 1010 dev_t devid;
3d6392cf 1011
5309cb38 1012 bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
20c2df83 1013 sizeof(struct bsg_command), 0, 0, NULL);
5309cb38
JA
1014 if (!bsg_cmd_cachep) {
1015 printk(KERN_ERR "bsg: failed creating slab cache\n");
1016 return -ENOMEM;
1017 }
1018
25fd1643 1019 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
3d6392cf
JA
1020 INIT_HLIST_HEAD(&bsg_device_list[i]);
1021
1022 bsg_class = class_create(THIS_MODULE, "bsg");
5309cb38 1023 if (IS_ERR(bsg_class)) {
9b9f770c
FT
1024 ret = PTR_ERR(bsg_class);
1025 goto destroy_kmemcache;
5309cb38 1026 }
e454cea2 1027 bsg_class->devnode = bsg_devnode;
3d6392cf 1028
46f6ef4a 1029 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
9b9f770c
FT
1030 if (ret)
1031 goto destroy_bsg_class;
292b7f27 1032
46f6ef4a
JA
1033 bsg_major = MAJOR(devid);
1034
292b7f27 1035 cdev_init(&bsg_cdev, &bsg_fops);
46f6ef4a 1036 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
9b9f770c
FT
1037 if (ret)
1038 goto unregister_chrdev;
3d6392cf 1039
5d3a8cd3 1040 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
0ed081ce 1041 " loaded (major %d)\n", bsg_major);
3d6392cf 1042 return 0;
9b9f770c
FT
1043unregister_chrdev:
1044 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1045destroy_bsg_class:
1046 class_destroy(bsg_class);
1047destroy_kmemcache:
1048 kmem_cache_destroy(bsg_cmd_cachep);
1049 return ret;
3d6392cf
JA
1050}
1051
1052MODULE_AUTHOR("Jens Axboe");
0ed081ce 1053MODULE_DESCRIPTION(BSG_DESCRIPTION);
3d6392cf
JA
1054MODULE_LICENSE("GPL");
1055
4e2872d6 1056device_initcall(bsg_init);