mfd: kempld-core: Constify variables that point to const structure
[linux-2.6-block.git] / block / bsg.c
CommitLineData
3d6392cf 1/*
0c6a89ba 2 * bsg.c - block layer implementation of the sg v4 interface
3d6392cf
JA
3 *
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
10 *
11 */
3d6392cf
JA
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/file.h>
15#include <linux/blkdev.h>
16#include <linux/poll.h>
17#include <linux/cdev.h>
ad5ebd2f 18#include <linux/jiffies.h>
3d6392cf
JA
19#include <linux/percpu.h>
20#include <linux/uio.h>
598443a2 21#include <linux/idr.h>
3d6392cf 22#include <linux/bsg.h>
5a0e3ad6 23#include <linux/slab.h>
3d6392cf
JA
24
25#include <scsi/scsi.h>
26#include <scsi/scsi_ioctl.h>
27#include <scsi/scsi_cmnd.h>
4e2872d6
FT
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_driver.h>
3d6392cf
JA
30#include <scsi/sg.h>
31
0ed081ce
FT
32#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
33#define BSG_VERSION "0.4"
3d6392cf 34
3124b65d
JT
35#define bsg_dbg(bd, fmt, ...) \
36 pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
37
3d6392cf 38struct bsg_device {
165125e1 39 struct request_queue *queue;
3d6392cf
JA
40 spinlock_t lock;
41 struct list_head busy_list;
42 struct list_head done_list;
43 struct hlist_node dev_list;
44 atomic_t ref_count;
3d6392cf
JA
45 int queued_cmds;
46 int done_cmds;
3d6392cf
JA
47 wait_queue_head_t wq_done;
48 wait_queue_head_t wq_free;
3ada8b7e 49 char name[20];
3d6392cf
JA
50 int max_queue;
51 unsigned long flags;
52};
53
54enum {
55 BSG_F_BLOCK = 1,
3d6392cf
JA
56};
57
5309cb38 58#define BSG_DEFAULT_CMDS 64
292b7f27 59#define BSG_MAX_DEVS 32768
3d6392cf 60
3d6392cf 61static DEFINE_MUTEX(bsg_mutex);
598443a2 62static DEFINE_IDR(bsg_minor_idr);
3d6392cf 63
25fd1643 64#define BSG_LIST_ARRAY_SIZE 8
25fd1643 65static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
3d6392cf
JA
66
67static struct class *bsg_class;
46f6ef4a 68static int bsg_major;
3d6392cf 69
5309cb38
JA
70static struct kmem_cache *bsg_cmd_cachep;
71
3d6392cf
JA
72/*
73 * our internal command type
74 */
75struct bsg_command {
76 struct bsg_device *bd;
77 struct list_head list;
78 struct request *rq;
79 struct bio *bio;
2c9ecdf4 80 struct bio *bidi_bio;
3d6392cf 81 int err;
70e36ece 82 struct sg_io_v4 hdr;
3d6392cf
JA
83};
84
85static void bsg_free_command(struct bsg_command *bc)
86{
87 struct bsg_device *bd = bc->bd;
3d6392cf
JA
88 unsigned long flags;
89
5309cb38 90 kmem_cache_free(bsg_cmd_cachep, bc);
3d6392cf
JA
91
92 spin_lock_irqsave(&bd->lock, flags);
93 bd->queued_cmds--;
3d6392cf
JA
94 spin_unlock_irqrestore(&bd->lock, flags);
95
96 wake_up(&bd->wq_free);
97}
98
e7d72173 99static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
3d6392cf 100{
e7d72173 101 struct bsg_command *bc = ERR_PTR(-EINVAL);
3d6392cf
JA
102
103 spin_lock_irq(&bd->lock);
104
105 if (bd->queued_cmds >= bd->max_queue)
106 goto out;
107
3d6392cf 108 bd->queued_cmds++;
3d6392cf
JA
109 spin_unlock_irq(&bd->lock);
110
25fd1643 111 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
5309cb38
JA
112 if (unlikely(!bc)) {
113 spin_lock_irq(&bd->lock);
7e75d730 114 bd->queued_cmds--;
e7d72173 115 bc = ERR_PTR(-ENOMEM);
7e75d730 116 goto out;
5309cb38
JA
117 }
118
3d6392cf
JA
119 bc->bd = bd;
120 INIT_LIST_HEAD(&bc->list);
3124b65d 121 bsg_dbg(bd, "returning free cmd %p\n", bc);
3d6392cf
JA
122 return bc;
123out:
3d6392cf
JA
124 spin_unlock_irq(&bd->lock);
125 return bc;
126}
127
1c1133e1 128static inline struct hlist_head *bsg_dev_idx_hash(int index)
3d6392cf 129{
1c1133e1 130 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
3d6392cf
JA
131}
132
17cb960f
CH
133#define uptr64(val) ((void __user *)(uintptr_t)(val))
134
135static int bsg_scsi_check_proto(struct sg_io_v4 *hdr)
136{
137 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
138 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
139 return -EINVAL;
140 return 0;
141}
142
143static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
144 fmode_t mode)
70e36ece 145{
17cb960f 146 struct scsi_request *sreq = scsi_req(rq);
82ed4db4 147
17cb960f
CH
148 sreq->cmd_len = hdr->request_len;
149 if (sreq->cmd_len > BLK_MAX_CDB) {
150 sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
151 if (!sreq->cmd)
9f5de6b1
FT
152 return -ENOMEM;
153 }
70e36ece 154
17cb960f 155 if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
70e36ece 156 return -EFAULT;
17cb960f 157 if (blk_verify_command(sreq->cmd, mode))
70e36ece 158 return -EPERM;
70e36ece
FT
159 return 0;
160}
161
17cb960f 162static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
3d6392cf 163{
17cb960f 164 struct scsi_request *sreq = scsi_req(rq);
15d10b61
FT
165 int ret = 0;
166
17cb960f
CH
167 /*
168 * fill in all the output members
169 */
170 hdr->device_status = sreq->result & 0xff;
171 hdr->transport_status = host_byte(sreq->result);
172 hdr->driver_status = driver_byte(sreq->result);
173 hdr->info = 0;
174 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
175 hdr->info |= SG_INFO_CHECK;
176 hdr->response_len = 0;
3d6392cf 177
17cb960f
CH
178 if (sreq->sense_len && hdr->response) {
179 int len = min_t(unsigned int, hdr->max_response_len,
180 sreq->sense_len);
181
182 if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
183 ret = -EFAULT;
184 else
185 hdr->response_len = len;
186 }
187
188 if (rq->next_rq) {
189 hdr->dout_resid = sreq->resid_len;
190 hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
191 } else if (rq_data_dir(rq) == READ) {
192 hdr->din_resid = sreq->resid_len;
193 } else {
194 hdr->dout_resid = sreq->resid_len;
15d10b61 195 }
70e36ece 196
15d10b61 197 return ret;
3d6392cf
JA
198}
199
17cb960f
CH
200static void bsg_scsi_free_rq(struct request *rq)
201{
202 scsi_req_free_cmd(scsi_req(rq));
203}
204
205static const struct bsg_ops bsg_scsi_ops = {
206 .check_proto = bsg_scsi_check_proto,
207 .fill_hdr = bsg_scsi_fill_hdr,
208 .complete_rq = bsg_scsi_complete_rq,
209 .free_rq = bsg_scsi_free_rq,
210};
211
3d6392cf 212static struct request *
17cb960f 213bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
3d6392cf 214{
2c9ecdf4 215 struct request *rq, *next_rq = NULL;
aebf526b 216 int ret;
c7a841f3 217
17cb960f 218 if (!q->bsg_dev.class_dev)
c7a841f3 219 return ERR_PTR(-ENXIO);
3d6392cf 220
17cb960f
CH
221 if (hdr->guard != 'Q')
222 return ERR_PTR(-EINVAL);
3d6392cf 223
17cb960f 224 ret = q->bsg_dev.ops->check_proto(hdr);
3d6392cf
JA
225 if (ret)
226 return ERR_PTR(ret);
227
17cb960f 228 rq = blk_get_request(q, hdr->dout_xfer_len ?
ff005a06 229 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
a492f075
JL
230 if (IS_ERR(rq))
231 return rq;
f27b087b 232
17cb960f 233 ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode);
2c9ecdf4
FT
234 if (ret)
235 goto out;
236
17cb960f
CH
237 rq->timeout = msecs_to_jiffies(hdr->timeout);
238 if (!rq->timeout)
239 rq->timeout = q->sg_timeout;
240 if (!rq->timeout)
241 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
242 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
243 rq->timeout = BLK_MIN_SG_TIMEOUT;
244
245 if (hdr->dout_xfer_len && hdr->din_xfer_len) {
2c9ecdf4
FT
246 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
247 ret = -EOPNOTSUPP;
248 goto out;
249 }
250
ff005a06 251 next_rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
a492f075
JL
252 if (IS_ERR(next_rq)) {
253 ret = PTR_ERR(next_rq);
2c9ecdf4
FT
254 goto out;
255 }
2c9ecdf4 256
17cb960f
CH
257 rq->next_rq = next_rq;
258 ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp),
152e283f 259 hdr->din_xfer_len, GFP_KERNEL);
2c9ecdf4 260 if (ret)
17cb960f 261 goto out_free_nextrq;
3d6392cf
JA
262 }
263
70e36ece 264 if (hdr->dout_xfer_len) {
17cb960f
CH
265 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp),
266 hdr->dout_xfer_len, GFP_KERNEL);
70e36ece 267 } else if (hdr->din_xfer_len) {
17cb960f
CH
268 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
269 hdr->din_xfer_len, GFP_KERNEL);
270 } else {
271 ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL);
3d6392cf 272 }
c1c20120 273
17cb960f
CH
274 if (ret)
275 goto out_unmap_nextrq;
3d6392cf 276 return rq;
17cb960f
CH
277
278out_unmap_nextrq:
279 if (rq->next_rq)
280 blk_rq_unmap_user(rq->next_rq->bio);
281out_free_nextrq:
282 if (rq->next_rq)
283 blk_put_request(rq->next_rq);
2c9ecdf4 284out:
17cb960f 285 q->bsg_dev.ops->free_rq(rq);
2c9ecdf4 286 blk_put_request(rq);
2c9ecdf4 287 return ERR_PTR(ret);
3d6392cf
JA
288}
289
290/*
291 * async completion call-back from the block layer, when scsi/ide/whatever
292 * calls end_that_request_last() on a request
293 */
2a842aca 294static void bsg_rq_end_io(struct request *rq, blk_status_t status)
3d6392cf
JA
295{
296 struct bsg_command *bc = rq->end_io_data;
297 struct bsg_device *bd = bc->bd;
298 unsigned long flags;
299
3124b65d
JT
300 bsg_dbg(bd, "finished rq %p bc %p, bio %p\n",
301 rq, bc, bc->bio);
3d6392cf
JA
302
303 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
304
305 spin_lock_irqsave(&bd->lock, flags);
25fd1643
JA
306 list_move_tail(&bc->list, &bd->done_list);
307 bd->done_cmds++;
3d6392cf 308 spin_unlock_irqrestore(&bd->lock, flags);
25fd1643
JA
309
310 wake_up(&bd->wq_done);
3d6392cf
JA
311}
312
313/*
314 * do final setup of a 'bc' and submit the matching 'rq' to the block
315 * layer for io
316 */
165125e1 317static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
3d6392cf
JA
318 struct bsg_command *bc, struct request *rq)
319{
05378940
BH
320 int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
321
3d6392cf
JA
322 /*
323 * add bc command to busy queue and submit rq for io
324 */
325 bc->rq = rq;
326 bc->bio = rq->bio;
2c9ecdf4
FT
327 if (rq->next_rq)
328 bc->bidi_bio = rq->next_rq->bio;
3d6392cf
JA
329 bc->hdr.duration = jiffies;
330 spin_lock_irq(&bd->lock);
331 list_add_tail(&bc->list, &bd->busy_list);
332 spin_unlock_irq(&bd->lock);
333
3124b65d 334 bsg_dbg(bd, "queueing rq %p, bc %p\n", rq, bc);
3d6392cf
JA
335
336 rq->end_io_data = bc;
05378940 337 blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
3d6392cf
JA
338}
339
25fd1643 340static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
3d6392cf
JA
341{
342 struct bsg_command *bc = NULL;
343
344 spin_lock_irq(&bd->lock);
345 if (bd->done_cmds) {
43ac9e62 346 bc = list_first_entry(&bd->done_list, struct bsg_command, list);
25fd1643
JA
347 list_del(&bc->list);
348 bd->done_cmds--;
3d6392cf
JA
349 }
350 spin_unlock_irq(&bd->lock);
351
352 return bc;
353}
354
355/*
356 * Get a finished command from the done list
357 */
e7d72173 358static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
3d6392cf
JA
359{
360 struct bsg_command *bc;
361 int ret;
362
363 do {
364 bc = bsg_next_done_cmd(bd);
365 if (bc)
366 break;
367
e7d72173
FT
368 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
369 bc = ERR_PTR(-EAGAIN);
370 break;
371 }
372
373 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
3d6392cf 374 if (ret) {
e7d72173 375 bc = ERR_PTR(-ERESTARTSYS);
3d6392cf
JA
376 break;
377 }
378 } while (1);
379
3124b65d 380 bsg_dbg(bd, "returning done %p\n", bc);
3d6392cf
JA
381
382 return bc;
383}
384
70e36ece 385static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
2c9ecdf4 386 struct bio *bio, struct bio *bidi_bio)
70e36ece 387{
17cb960f 388 int ret;
70e36ece 389
17cb960f 390 ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr);
70e36ece 391
2c9ecdf4
FT
392 if (rq->next_rq) {
393 blk_rq_unmap_user(bidi_bio);
394 blk_put_request(rq->next_rq);
17cb960f 395 }
2d507a01 396
70e36ece 397 blk_rq_unmap_user(bio);
17cb960f 398 rq->q->bsg_dev.ops->free_rq(rq);
70e36ece 399 blk_put_request(rq);
70e36ece
FT
400 return ret;
401}
402
2c561246
PZ
403static bool bsg_complete(struct bsg_device *bd)
404{
405 bool ret = false;
406 bool spin;
407
408 do {
409 spin_lock_irq(&bd->lock);
410
411 BUG_ON(bd->done_cmds > bd->queued_cmds);
412
413 /*
414 * All commands consumed.
415 */
416 if (bd->done_cmds == bd->queued_cmds)
417 ret = true;
418
419 spin = !test_bit(BSG_F_BLOCK, &bd->flags);
420
421 spin_unlock_irq(&bd->lock);
422 } while (!ret && spin);
423
424 return ret;
425}
426
3d6392cf
JA
427static int bsg_complete_all_commands(struct bsg_device *bd)
428{
429 struct bsg_command *bc;
430 int ret, tret;
431
3124b65d 432 bsg_dbg(bd, "entered\n");
3d6392cf 433
3d6392cf
JA
434 /*
435 * wait for all commands to complete
436 */
2c561246 437 io_wait_event(bd->wq_done, bsg_complete(bd));
3d6392cf
JA
438
439 /*
440 * discard done commands
441 */
442 ret = 0;
443 do {
e7d72173
FT
444 spin_lock_irq(&bd->lock);
445 if (!bd->queued_cmds) {
446 spin_unlock_irq(&bd->lock);
3d6392cf
JA
447 break;
448 }
efba1a31 449 spin_unlock_irq(&bd->lock);
3d6392cf 450
e7d72173
FT
451 bc = bsg_get_done_cmd(bd);
452 if (IS_ERR(bc))
453 break;
454
2c9ecdf4
FT
455 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
456 bc->bidi_bio);
3d6392cf
JA
457 if (!ret)
458 ret = tret;
459
460 bsg_free_command(bc);
461 } while (1);
462
463 return ret;
464}
465
25fd1643 466static int
e7d72173
FT
467__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
468 const struct iovec *iov, ssize_t *bytes_read)
3d6392cf
JA
469{
470 struct bsg_command *bc;
471 int nr_commands, ret;
472
70e36ece 473 if (count % sizeof(struct sg_io_v4))
3d6392cf
JA
474 return -EINVAL;
475
476 ret = 0;
70e36ece 477 nr_commands = count / sizeof(struct sg_io_v4);
3d6392cf 478 while (nr_commands) {
e7d72173 479 bc = bsg_get_done_cmd(bd);
3d6392cf
JA
480 if (IS_ERR(bc)) {
481 ret = PTR_ERR(bc);
482 break;
483 }
484
485 /*
486 * this is the only case where we need to copy data back
487 * after completing the request. so do that here,
488 * bsg_complete_work() cannot do that for us
489 */
2c9ecdf4
FT
490 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
491 bc->bidi_bio);
3d6392cf 492
25fd1643 493 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
3d6392cf
JA
494 ret = -EFAULT;
495
496 bsg_free_command(bc);
497
498 if (ret)
499 break;
500
70e36ece
FT
501 buf += sizeof(struct sg_io_v4);
502 *bytes_read += sizeof(struct sg_io_v4);
3d6392cf
JA
503 nr_commands--;
504 }
505
506 return ret;
507}
508
509static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
510{
511 if (file->f_flags & O_NONBLOCK)
512 clear_bit(BSG_F_BLOCK, &bd->flags);
513 else
514 set_bit(BSG_F_BLOCK, &bd->flags);
515}
516
25fd1643
JA
517/*
518 * Check if the error is a "real" error that we should return.
519 */
3d6392cf
JA
520static inline int err_block_err(int ret)
521{
522 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
523 return 1;
524
525 return 0;
526}
527
528static ssize_t
529bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
530{
531 struct bsg_device *bd = file->private_data;
532 int ret;
533 ssize_t bytes_read;
534
3124b65d 535 bsg_dbg(bd, "read %zd bytes\n", count);
3d6392cf
JA
536
537 bsg_set_block(bd, file);
0b07de85 538
3d6392cf 539 bytes_read = 0;
e7d72173 540 ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
3d6392cf
JA
541 *ppos = bytes_read;
542
44194e3e 543 if (!bytes_read || err_block_err(ret))
3d6392cf
JA
544 bytes_read = ret;
545
546 return bytes_read;
547}
548
25fd1643 549static int __bsg_write(struct bsg_device *bd, const char __user *buf,
f00c4d80 550 size_t count, ssize_t *bytes_written, fmode_t mode)
3d6392cf
JA
551{
552 struct bsg_command *bc;
553 struct request *rq;
554 int ret, nr_commands;
555
70e36ece 556 if (count % sizeof(struct sg_io_v4))
3d6392cf
JA
557 return -EINVAL;
558
70e36ece 559 nr_commands = count / sizeof(struct sg_io_v4);
3d6392cf
JA
560 rq = NULL;
561 bc = NULL;
562 ret = 0;
563 while (nr_commands) {
165125e1 564 struct request_queue *q = bd->queue;
3d6392cf 565
e7d72173 566 bc = bsg_alloc_command(bd);
3d6392cf
JA
567 if (IS_ERR(bc)) {
568 ret = PTR_ERR(bc);
569 bc = NULL;
570 break;
571 }
572
3d6392cf
JA
573 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
574 ret = -EFAULT;
575 break;
576 }
577
578 /*
579 * get a request, fill in the blanks, and add to request queue
580 */
17cb960f 581 rq = bsg_map_hdr(bd->queue, &bc->hdr, mode);
3d6392cf
JA
582 if (IS_ERR(rq)) {
583 ret = PTR_ERR(rq);
584 rq = NULL;
585 break;
586 }
587
588 bsg_add_command(bd, q, bc, rq);
589 bc = NULL;
590 rq = NULL;
591 nr_commands--;
70e36ece 592 buf += sizeof(struct sg_io_v4);
25fd1643 593 *bytes_written += sizeof(struct sg_io_v4);
3d6392cf
JA
594 }
595
3d6392cf
JA
596 if (bc)
597 bsg_free_command(bc);
598
599 return ret;
600}
601
602static ssize_t
603bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
604{
605 struct bsg_device *bd = file->private_data;
25fd1643 606 ssize_t bytes_written;
3d6392cf
JA
607 int ret;
608
3124b65d 609 bsg_dbg(bd, "write %zd bytes\n", count);
3d6392cf 610
db68ce10 611 if (unlikely(uaccess_kernel()))
128394ef
AV
612 return -EINVAL;
613
3d6392cf 614 bsg_set_block(bd, file);
3d6392cf 615
25fd1643 616 bytes_written = 0;
f00c4d80 617 ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode);
abf54393 618
25fd1643 619 *ppos = bytes_written;
3d6392cf
JA
620
621 /*
622 * return bytes written on non-fatal errors
623 */
44194e3e 624 if (!bytes_written || err_block_err(ret))
25fd1643 625 bytes_written = ret;
3d6392cf 626
3124b65d 627 bsg_dbg(bd, "returning %zd\n", bytes_written);
25fd1643 628 return bytes_written;
3d6392cf
JA
629}
630
3d6392cf
JA
631static struct bsg_device *bsg_alloc_device(void)
632{
3d6392cf 633 struct bsg_device *bd;
3d6392cf
JA
634
635 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
636 if (unlikely(!bd))
637 return NULL;
638
639 spin_lock_init(&bd->lock);
640
5309cb38 641 bd->max_queue = BSG_DEFAULT_CMDS;
3d6392cf
JA
642
643 INIT_LIST_HEAD(&bd->busy_list);
644 INIT_LIST_HEAD(&bd->done_list);
645 INIT_HLIST_NODE(&bd->dev_list);
646
647 init_waitqueue_head(&bd->wq_free);
648 init_waitqueue_head(&bd->wq_done);
649 return bd;
3d6392cf
JA
650}
651
652static int bsg_put_device(struct bsg_device *bd)
653{
97f46ae4
FT
654 int ret = 0, do_free;
655 struct request_queue *q = bd->queue;
3d6392cf
JA
656
657 mutex_lock(&bsg_mutex);
658
97f46ae4 659 do_free = atomic_dec_and_test(&bd->ref_count);
3f27e3ed
FT
660 if (!do_free) {
661 mutex_unlock(&bsg_mutex);
3d6392cf 662 goto out;
3f27e3ed
FT
663 }
664
665 hlist_del(&bd->dev_list);
666 mutex_unlock(&bsg_mutex);
3d6392cf 667
3124b65d 668 bsg_dbg(bd, "tearing down\n");
3d6392cf
JA
669
670 /*
671 * close can always block
672 */
673 set_bit(BSG_F_BLOCK, &bd->flags);
674
675 /*
676 * correct error detection baddies here again. it's the responsibility
677 * of the app to properly reap commands before close() if it wants
678 * fool-proof error detection
679 */
680 ret = bsg_complete_all_commands(bd);
681
5309cb38 682 kfree(bd);
3d6392cf 683out:
97f46ae4
FT
684 if (do_free)
685 blk_put_queue(q);
3d6392cf
JA
686 return ret;
687}
688
689static struct bsg_device *bsg_add_device(struct inode *inode,
d351af01 690 struct request_queue *rq,
3d6392cf
JA
691 struct file *file)
692{
25fd1643 693 struct bsg_device *bd;
3d6392cf 694 unsigned char buf[32];
d9f97264 695
d6c73964
AG
696 lockdep_assert_held(&bsg_mutex);
697
09ac46c4 698 if (!blk_get_queue(rq))
c3ff1b90 699 return ERR_PTR(-ENXIO);
3d6392cf
JA
700
701 bd = bsg_alloc_device();
c3ff1b90
FT
702 if (!bd) {
703 blk_put_queue(rq);
3d6392cf 704 return ERR_PTR(-ENOMEM);
c3ff1b90 705 }
3d6392cf 706
d351af01 707 bd->queue = rq;
0b07de85 708
3d6392cf
JA
709 bsg_set_block(bd, file);
710
711 atomic_set(&bd->ref_count, 1);
842ea771 712 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
3d6392cf 713
3ada8b7e 714 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
3124b65d 715 bsg_dbg(bd, "bound to <%s>, max queue %d\n",
9e69fbb5 716 format_dev_t(buf, inode->i_rdev), bd->max_queue);
3d6392cf 717
3d6392cf
JA
718 return bd;
719}
720
842ea771 721static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
3d6392cf 722{
43ac9e62 723 struct bsg_device *bd;
3d6392cf 724
d6c73964 725 lockdep_assert_held(&bsg_mutex);
3d6392cf 726
b67bfe0d 727 hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
842ea771 728 if (bd->queue == q) {
3d6392cf 729 atomic_inc(&bd->ref_count);
43ac9e62 730 goto found;
3d6392cf 731 }
3d6392cf 732 }
43ac9e62
FT
733 bd = NULL;
734found:
3d6392cf
JA
735 return bd;
736}
737
738static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
739{
598443a2
FT
740 struct bsg_device *bd;
741 struct bsg_class_device *bcd;
3d6392cf 742
3d6392cf
JA
743 /*
744 * find the class device
745 */
3d6392cf 746 mutex_lock(&bsg_mutex);
598443a2 747 bcd = idr_find(&bsg_minor_idr, iminor(inode));
3d6392cf 748
d6c73964
AG
749 if (!bcd) {
750 bd = ERR_PTR(-ENODEV);
751 goto out_unlock;
752 }
3d6392cf 753
842ea771 754 bd = __bsg_get_device(iminor(inode), bcd->queue);
d6c73964
AG
755 if (!bd)
756 bd = bsg_add_device(inode, bcd->queue, file);
d45ac4fa 757
d6c73964
AG
758out_unlock:
759 mutex_unlock(&bsg_mutex);
d45ac4fa 760 return bd;
3d6392cf
JA
761}
762
763static int bsg_open(struct inode *inode, struct file *file)
764{
75bd2ef1
JC
765 struct bsg_device *bd;
766
75bd2ef1 767 bd = bsg_get_device(inode, file);
3d6392cf
JA
768
769 if (IS_ERR(bd))
770 return PTR_ERR(bd);
771
772 file->private_data = bd;
773 return 0;
774}
775
776static int bsg_release(struct inode *inode, struct file *file)
777{
778 struct bsg_device *bd = file->private_data;
779
780 file->private_data = NULL;
781 return bsg_put_device(bd);
782}
783
1771e70a 784static __poll_t bsg_poll(struct file *file, poll_table *wait)
3d6392cf
JA
785{
786 struct bsg_device *bd = file->private_data;
1771e70a 787 __poll_t mask = 0;
3d6392cf
JA
788
789 poll_wait(file, &bd->wq_done, wait);
790 poll_wait(file, &bd->wq_free, wait);
791
792 spin_lock_irq(&bd->lock);
793 if (!list_empty(&bd->done_list))
a9a08845 794 mask |= EPOLLIN | EPOLLRDNORM;
80ceb057 795 if (bd->queued_cmds < bd->max_queue)
a9a08845 796 mask |= EPOLLOUT;
3d6392cf
JA
797 spin_unlock_irq(&bd->lock);
798
799 return mask;
800}
801
25fd1643 802static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3d6392cf
JA
803{
804 struct bsg_device *bd = file->private_data;
805 int __user *uarg = (int __user *) arg;
2d507a01 806 int ret;
3d6392cf 807
3d6392cf
JA
808 switch (cmd) {
809 /*
810 * our own ioctls
811 */
812 case SG_GET_COMMAND_Q:
813 return put_user(bd->max_queue, uarg);
5309cb38 814 case SG_SET_COMMAND_Q: {
3d6392cf
JA
815 int queue;
816
817 if (get_user(queue, uarg))
818 return -EFAULT;
5309cb38 819 if (queue < 1)
3d6392cf
JA
820 return -EINVAL;
821
5309cb38 822 spin_lock_irq(&bd->lock);
3d6392cf 823 bd->max_queue = queue;
5309cb38 824 spin_unlock_irq(&bd->lock);
3d6392cf
JA
825 return 0;
826 }
827
828 /*
829 * SCSI/sg ioctls
830 */
831 case SG_GET_VERSION_NUM:
832 case SCSI_IOCTL_GET_IDLUN:
833 case SCSI_IOCTL_GET_BUS_NUMBER:
834 case SG_SET_TIMEOUT:
835 case SG_GET_TIMEOUT:
836 case SG_GET_RESERVED_SIZE:
837 case SG_SET_RESERVED_SIZE:
838 case SG_EMULATED_HOST:
3d6392cf
JA
839 case SCSI_IOCTL_SEND_COMMAND: {
840 void __user *uarg = (void __user *) arg;
74f3c8af 841 return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
3d6392cf 842 }
10e8855b
FT
843 case SG_IO: {
844 struct request *rq;
2c9ecdf4 845 struct bio *bio, *bidi_bio = NULL;
10e8855b 846 struct sg_io_v4 hdr;
05378940 847 int at_head;
10e8855b
FT
848
849 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
850 return -EFAULT;
851
17cb960f 852 rq = bsg_map_hdr(bd->queue, &hdr, file->f_mode);
10e8855b
FT
853 if (IS_ERR(rq))
854 return PTR_ERR(rq);
855
856 bio = rq->bio;
2c9ecdf4
FT
857 if (rq->next_rq)
858 bidi_bio = rq->next_rq->bio;
05378940
BH
859
860 at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
861 blk_execute_rq(bd->queue, NULL, rq, at_head);
2d507a01 862 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
10e8855b
FT
863
864 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
865 return -EFAULT;
b711afa6 866
2d507a01 867 return ret;
10e8855b 868 }
3d6392cf 869 default:
3d6392cf 870 return -ENOTTY;
3d6392cf
JA
871 }
872}
873
7344be05 874static const struct file_operations bsg_fops = {
3d6392cf
JA
875 .read = bsg_read,
876 .write = bsg_write,
877 .poll = bsg_poll,
878 .open = bsg_open,
879 .release = bsg_release,
25fd1643 880 .unlocked_ioctl = bsg_ioctl,
3d6392cf 881 .owner = THIS_MODULE,
6038f373 882 .llseek = default_llseek,
3d6392cf
JA
883};
884
d351af01 885void bsg_unregister_queue(struct request_queue *q)
3d6392cf 886{
d351af01 887 struct bsg_class_device *bcd = &q->bsg_dev;
3d6392cf 888
df468820
FT
889 if (!bcd->class_dev)
890 return;
3d6392cf
JA
891
892 mutex_lock(&bsg_mutex);
598443a2 893 idr_remove(&bsg_minor_idr, bcd->minor);
37b40adf
SG
894 if (q->kobj.sd)
895 sysfs_remove_link(&q->kobj, "bsg");
ee959b00 896 device_unregister(bcd->class_dev);
3d6392cf 897 bcd->class_dev = NULL;
3d6392cf
JA
898 mutex_unlock(&bsg_mutex);
899}
4cf0723a 900EXPORT_SYMBOL_GPL(bsg_unregister_queue);
3d6392cf 901
97f46ae4 902int bsg_register_queue(struct request_queue *q, struct device *parent,
5de815a7 903 const char *name, const struct bsg_ops *ops)
3d6392cf 904{
598443a2 905 struct bsg_class_device *bcd;
3d6392cf 906 dev_t dev;
bab998d6 907 int ret;
ee959b00 908 struct device *class_dev = NULL;
3d6392cf
JA
909
910 /*
911 * we need a proper transport to send commands, not a stacked device
912 */
49fd524f 913 if (!queue_is_rq_based(q))
3d6392cf
JA
914 return 0;
915
d351af01 916 bcd = &q->bsg_dev;
3d6392cf 917 memset(bcd, 0, sizeof(*bcd));
3d6392cf
JA
918
919 mutex_lock(&bsg_mutex);
292b7f27 920
bab998d6
TH
921 ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
922 if (ret < 0) {
923 if (ret == -ENOSPC) {
924 printk(KERN_ERR "bsg: too many bsg devices\n");
925 ret = -EINVAL;
926 }
598443a2 927 goto unlock;
598443a2
FT
928 }
929
bab998d6 930 bcd->minor = ret;
d351af01 931 bcd->queue = q;
17cb960f 932 bcd->ops = ops;
46f6ef4a 933 dev = MKDEV(bsg_major, bcd->minor);
5de815a7 934 class_dev = device_create(bsg_class, parent, dev, NULL, "%s", name);
4e2872d6
FT
935 if (IS_ERR(class_dev)) {
936 ret = PTR_ERR(class_dev);
5de815a7 937 goto idr_remove;
4e2872d6
FT
938 }
939 bcd->class_dev = class_dev;
940
abce891a 941 if (q->kobj.sd) {
4e2872d6
FT
942 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
943 if (ret)
598443a2 944 goto unregister_class_dev;
4e2872d6
FT
945 }
946
3d6392cf
JA
947 mutex_unlock(&bsg_mutex);
948 return 0;
6826ee4f 949
598443a2 950unregister_class_dev:
ee959b00 951 device_unregister(class_dev);
5de815a7 952idr_remove:
bab998d6 953 idr_remove(&bsg_minor_idr, bcd->minor);
598443a2 954unlock:
264a0472 955 mutex_unlock(&bsg_mutex);
4e2872d6
FT
956 return ret;
957}
17cb960f
CH
958
959int bsg_scsi_register_queue(struct request_queue *q, struct device *parent)
960{
961 if (!blk_queue_scsi_passthrough(q)) {
962 WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
963 return -EINVAL;
964 }
965
5de815a7 966 return bsg_register_queue(q, parent, dev_name(parent), &bsg_scsi_ops);
17cb960f
CH
967}
968EXPORT_SYMBOL_GPL(bsg_scsi_register_queue);
4e2872d6 969
7e7654a9 970static struct cdev bsg_cdev;
292b7f27 971
2c9ede55 972static char *bsg_devnode(struct device *dev, umode_t *mode)
2bdf9149
KS
973{
974 return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
975}
976
3d6392cf
JA
977static int __init bsg_init(void)
978{
979 int ret, i;
46f6ef4a 980 dev_t devid;
3d6392cf 981
5309cb38 982 bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
20c2df83 983 sizeof(struct bsg_command), 0, 0, NULL);
5309cb38
JA
984 if (!bsg_cmd_cachep) {
985 printk(KERN_ERR "bsg: failed creating slab cache\n");
986 return -ENOMEM;
987 }
988
25fd1643 989 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
3d6392cf
JA
990 INIT_HLIST_HEAD(&bsg_device_list[i]);
991
992 bsg_class = class_create(THIS_MODULE, "bsg");
5309cb38 993 if (IS_ERR(bsg_class)) {
9b9f770c
FT
994 ret = PTR_ERR(bsg_class);
995 goto destroy_kmemcache;
5309cb38 996 }
e454cea2 997 bsg_class->devnode = bsg_devnode;
3d6392cf 998
46f6ef4a 999 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
9b9f770c
FT
1000 if (ret)
1001 goto destroy_bsg_class;
292b7f27 1002
46f6ef4a
JA
1003 bsg_major = MAJOR(devid);
1004
292b7f27 1005 cdev_init(&bsg_cdev, &bsg_fops);
46f6ef4a 1006 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
9b9f770c
FT
1007 if (ret)
1008 goto unregister_chrdev;
3d6392cf 1009
5d3a8cd3 1010 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
0ed081ce 1011 " loaded (major %d)\n", bsg_major);
3d6392cf 1012 return 0;
9b9f770c
FT
1013unregister_chrdev:
1014 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1015destroy_bsg_class:
1016 class_destroy(bsg_class);
1017destroy_kmemcache:
1018 kmem_cache_destroy(bsg_cmd_cachep);
1019 return ret;
3d6392cf
JA
1020}
1021
1022MODULE_AUTHOR("Jens Axboe");
0ed081ce 1023MODULE_DESCRIPTION(BSG_DESCRIPTION);
3d6392cf
JA
1024MODULE_LICENSE("GPL");
1025
4e2872d6 1026device_initcall(bsg_init);