Merge tag 'for-5.0/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[linux-2.6-block.git] / drivers / target / target_core_iblock.c
CommitLineData
c66ac9db
NB
1/*******************************************************************************
2 * Filename: target_core_iblock.c
3 *
4 * This file contains the Storage Engine <-> Linux BlockIO transport
5 * specific functions.
6 *
4c76251e 7 * (c) Copyright 2003-2013 Datera, Inc.
c66ac9db
NB
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
c66ac9db
NB
27#include <linux/string.h>
28#include <linux/parser.h>
29#include <linux/timer.h>
30#include <linux/fs.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
c66ac9db
NB
34#include <linux/bio.h>
35#include <linux/genhd.h>
36#include <linux/file.h>
827509e3 37#include <linux/module.h>
ba929992 38#include <scsi/scsi_proto.h>
14150a6b 39#include <asm/unaligned.h>
c66ac9db
NB
40
41#include <target/target_core_base.h>
c4795fb2 42#include <target/target_core_backend.h>
c66ac9db
NB
43
44#include "target_core_iblock.h"
45
d5b4a21b
CH
46#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
47#define IBLOCK_BIO_POOL_SIZE 128
48
0fd97ccf
CH
49static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
50{
51 return container_of(dev, struct iblock_dev, dev);
52}
53
54
c66ac9db
NB
55static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
56{
6708bb27 57 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
c66ac9db 58 " Generic Target Core Stack %s\n", hba->hba_id,
ce8dd25d 59 IBLOCK_VERSION, TARGET_CORE_VERSION);
c66ac9db
NB
60 return 0;
61}
62
63static void iblock_detach_hba(struct se_hba *hba)
64{
c66ac9db
NB
65}
66
0fd97ccf 67static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
c66ac9db
NB
68{
69 struct iblock_dev *ib_dev = NULL;
c66ac9db
NB
70
71 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
6708bb27
AG
72 if (!ib_dev) {
73 pr_err("Unable to allocate struct iblock_dev\n");
c66ac9db
NB
74 return NULL;
75 }
c66ac9db 76
6708bb27 77 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
c66ac9db 78
0fd97ccf 79 return &ib_dev->dev;
c66ac9db
NB
80}
81
0fd97ccf 82static int iblock_configure_device(struct se_device *dev)
c66ac9db 83{
0fd97ccf 84 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
c66ac9db 85 struct request_queue *q;
0fd97ccf 86 struct block_device *bd = NULL;
ecebbf6c 87 struct blk_integrity *bi;
44bfd018 88 fmode_t mode;
2237498f 89 unsigned int max_write_zeroes_sectors;
0fd97ccf 90 int ret = -ENOMEM;
c66ac9db 91
0fd97ccf
CH
92 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
93 pr_err("Missing udev_path= parameters for IBLOCK\n");
94 return -EINVAL;
c66ac9db 95 }
d5b4a21b 96
a47a28b7
KO
97 ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
98 if (ret) {
0fd97ccf
CH
99 pr_err("IBLOCK: Unable to create bioset\n");
100 goto out;
c66ac9db 101 }
0fd97ccf 102
6708bb27 103 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
c66ac9db
NB
104 ib_dev->ibd_udev_path);
105
44bfd018
AG
106 mode = FMODE_READ|FMODE_EXCL;
107 if (!ib_dev->ibd_readonly)
108 mode |= FMODE_WRITE;
eeeb9522
NB
109 else
110 dev->dev_flags |= DF_READ_ONLY;
44bfd018
AG
111
112 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
613640e4
NB
113 if (IS_ERR(bd)) {
114 ret = PTR_ERR(bd);
0fd97ccf 115 goto out_free_bioset;
613640e4 116 }
c66ac9db
NB
117 ib_dev->ibd_bd = bd;
118
0fd97ccf
CH
119 q = bdev_get_queue(bd);
120
121 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
046ba642 122 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
0fd97ccf 123 dev->dev_attrib.hw_queue_depth = q->nr_requests;
c66ac9db 124
ea263c7f 125 if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
6708bb27 126 pr_debug("IBLOCK: BLOCK Discard support available,"
8a9ebe71
MC
127 " disabled by default\n");
128
f6970ad3
NB
129 /*
130 * Enable write same emulation for IBLOCK and use 0xFFFF as
131 * the smaller WRITE_SAME(10) only has a two-byte block count.
132 */
2237498f
NB
133 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
134 if (max_write_zeroes_sectors)
135 dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
136 else
137 dev->dev_attrib.max_write_same_len = 0xFFFF;
c66ac9db 138
e22a7f07 139 if (blk_queue_nonrot(q))
0fd97ccf 140 dev->dev_attrib.is_nonrot = 1;
d0c8b259 141
ecebbf6c
NB
142 bi = bdev_get_integrity(bd);
143 if (bi) {
a47a28b7 144 struct bio_set *bs = &ib_dev->ibd_bio_set;
ecebbf6c 145
0f8087ec
MP
146 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
147 !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
ecebbf6c 148 pr_err("IBLOCK export of blk_integrity: %s not"
0f8087ec 149 " supported\n", bi->profile->name);
ecebbf6c
NB
150 ret = -ENOSYS;
151 goto out_blkdev_put;
152 }
153
0f8087ec 154 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
ecebbf6c 155 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
0f8087ec 156 } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
ecebbf6c
NB
157 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
158 }
159
160 if (dev->dev_attrib.pi_prot_type) {
161 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
162 pr_err("Unable to allocate bioset for PI\n");
163 ret = -ENOMEM;
164 goto out_blkdev_put;
165 }
166 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
f4f8154a 167 &bs->bio_integrity_pool);
ecebbf6c
NB
168 }
169 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
170 }
171
0fd97ccf 172 return 0;
c66ac9db 173
ecebbf6c
NB
174out_blkdev_put:
175 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
0fd97ccf 176out_free_bioset:
a47a28b7 177 bioset_exit(&ib_dev->ibd_bio_set);
0fd97ccf
CH
178out:
179 return ret;
c66ac9db
NB
180}
181
4cc987ea
NB
182static void iblock_dev_call_rcu(struct rcu_head *p)
183{
184 struct se_device *dev = container_of(p, struct se_device, rcu_head);
185 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
186
187 kfree(ib_dev);
188}
189
0fd97ccf 190static void iblock_free_device(struct se_device *dev)
92634706
MC
191{
192 call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
193}
194
195static void iblock_destroy_device(struct se_device *dev)
c66ac9db 196{
0fd97ccf 197 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
c66ac9db 198
bc665524
NB
199 if (ib_dev->ibd_bd != NULL)
200 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
a47a28b7 201 bioset_exit(&ib_dev->ibd_bio_set);
c66ac9db
NB
202}
203
c66ac9db
NB
204static unsigned long long iblock_emulate_read_cap_with_block_size(
205 struct se_device *dev,
206 struct block_device *bd,
207 struct request_queue *q)
208{
209 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
210 bdev_logical_block_size(bd)) - 1);
211 u32 block_size = bdev_logical_block_size(bd);
212
0fd97ccf 213 if (block_size == dev->dev_attrib.block_size)
c66ac9db
NB
214 return blocks_long;
215
216 switch (block_size) {
217 case 4096:
0fd97ccf 218 switch (dev->dev_attrib.block_size) {
c66ac9db
NB
219 case 2048:
220 blocks_long <<= 1;
221 break;
222 case 1024:
223 blocks_long <<= 2;
224 break;
225 case 512:
226 blocks_long <<= 3;
227 default:
228 break;
229 }
230 break;
231 case 2048:
0fd97ccf 232 switch (dev->dev_attrib.block_size) {
c66ac9db
NB
233 case 4096:
234 blocks_long >>= 1;
235 break;
236 case 1024:
237 blocks_long <<= 1;
238 break;
239 case 512:
240 blocks_long <<= 2;
241 break;
242 default:
243 break;
244 }
245 break;
246 case 1024:
0fd97ccf 247 switch (dev->dev_attrib.block_size) {
c66ac9db
NB
248 case 4096:
249 blocks_long >>= 2;
250 break;
251 case 2048:
252 blocks_long >>= 1;
253 break;
254 case 512:
255 blocks_long <<= 1;
256 break;
257 default:
258 break;
259 }
260 break;
261 case 512:
0fd97ccf 262 switch (dev->dev_attrib.block_size) {
c66ac9db
NB
263 case 4096:
264 blocks_long >>= 3;
265 break;
266 case 2048:
267 blocks_long >>= 2;
268 break;
269 case 1024:
270 blocks_long >>= 1;
271 break;
272 default:
273 break;
274 }
275 break;
276 default:
277 break;
278 }
279
280 return blocks_long;
281}
282
3a41d85f
NB
283static void iblock_complete_cmd(struct se_cmd *cmd)
284{
285 struct iblock_req *ibr = cmd->priv;
286 u8 status;
287
5981c245 288 if (!refcount_dec_and_test(&ibr->pending))
3a41d85f
NB
289 return;
290
291 if (atomic_read(&ibr->ib_bio_err_cnt))
292 status = SAM_STAT_CHECK_CONDITION;
293 else
294 status = SAM_STAT_GOOD;
295
296 target_complete_cmd(cmd, status);
297 kfree(ibr);
298}
299
4246a0b6 300static void iblock_bio_done(struct bio *bio)
3a41d85f
NB
301{
302 struct se_cmd *cmd = bio->bi_private;
303 struct iblock_req *ibr = cmd->priv;
304
4e4cbee9
CH
305 if (bio->bi_status) {
306 pr_err("bio error: %p, err: %d\n", bio, bio->bi_status);
3a41d85f
NB
307 /*
308 * Bump the ib_bio_err_cnt and release bio.
309 */
310 atomic_inc(&ibr->ib_bio_err_cnt);
4e857c58 311 smp_mb__after_atomic();
3a41d85f
NB
312 }
313
314 bio_put(bio);
315
316 iblock_complete_cmd(cmd);
317}
318
319static struct bio *
e742fc32
MC
320iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
321 int op_flags)
3a41d85f
NB
322{
323 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
324 struct bio *bio;
325
326 /*
327 * Only allocate as many vector entries as the bio code allows us to,
328 * we'll loop later on until we have handled the whole request.
329 */
330 if (sg_num > BIO_MAX_PAGES)
331 sg_num = BIO_MAX_PAGES;
332
a47a28b7 333 bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set);
3a41d85f
NB
334 if (!bio) {
335 pr_err("Unable to allocate memory for bio\n");
336 return NULL;
337 }
338
74d46992 339 bio_set_dev(bio, ib_dev->ibd_bd);
3a41d85f
NB
340 bio->bi_private = cmd;
341 bio->bi_end_io = &iblock_bio_done;
4f024f37 342 bio->bi_iter.bi_sector = lba;
e742fc32 343 bio_set_op_attrs(bio, op, op_flags);
3a41d85f
NB
344
345 return bio;
346}
347
4e49ea4a 348static void iblock_submit_bios(struct bio_list *list)
3a41d85f
NB
349{
350 struct blk_plug plug;
351 struct bio *bio;
352
353 blk_start_plug(&plug);
354 while ((bio = bio_list_pop(list)))
4e49ea4a 355 submit_bio(bio);
3a41d85f
NB
356 blk_finish_plug(&plug);
357}
358
4246a0b6 359static void iblock_end_io_flush(struct bio *bio)
df5fa691
CH
360{
361 struct se_cmd *cmd = bio->bi_private;
362
4e4cbee9
CH
363 if (bio->bi_status)
364 pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
df5fa691 365
5787cacd 366 if (cmd) {
4e4cbee9 367 if (bio->bi_status)
5787cacd 368 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
de103c93 369 else
5787cacd 370 target_complete_cmd(cmd, SAM_STAT_GOOD);
5787cacd
CH
371 }
372
df5fa691
CH
373 bio_put(bio);
374}
375
c66ac9db 376/*
df5fa691
CH
377 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
378 * always flush the whole cache.
c66ac9db 379 */
de103c93
CH
380static sense_reason_t
381iblock_execute_sync_cache(struct se_cmd *cmd)
c66ac9db 382{
0fd97ccf 383 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
a1d8b49a 384 int immed = (cmd->t_task_cdb[1] & 0x2);
df5fa691 385 struct bio *bio;
c66ac9db
NB
386
387 /*
388 * If the Immediate bit is set, queue up the GOOD response
df5fa691 389 * for this SYNCHRONIZE_CACHE op.
c66ac9db
NB
390 */
391 if (immed)
5787cacd 392 target_complete_cmd(cmd, SAM_STAT_GOOD);
c66ac9db 393
df5fa691
CH
394 bio = bio_alloc(GFP_KERNEL, 0);
395 bio->bi_end_io = iblock_end_io_flush;
74d46992 396 bio_set_dev(bio, ib_dev->ibd_bd);
70fd7614 397 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
c66ac9db 398 if (!immed)
df5fa691 399 bio->bi_private = cmd;
4e49ea4a 400 submit_bio(bio);
ad67f0d9 401 return 0;
c66ac9db
NB
402}
403
dbc21c5a 404static sense_reason_t
62e46942 405iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
dbc21c5a 406{
62e46942 407 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
8a9ebe71 408 struct se_device *dev = cmd->se_dev;
dbc21c5a
AH
409 int ret;
410
8a9ebe71
MC
411 ret = blkdev_issue_discard(bdev,
412 target_to_linux_sector(dev, lba),
413 target_to_linux_sector(dev, nolb),
414 GFP_KERNEL, 0);
dbc21c5a
AH
415 if (ret < 0) {
416 pr_err("blkdev_issue_discard() failed: %d\n", ret);
417 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
418 }
419
420 return 0;
421}
422
07b63196 423static sense_reason_t
2237498f 424iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
07b63196
MC
425{
426 struct se_device *dev = cmd->se_dev;
427 struct scatterlist *sg = &cmd->t_data_sg[0];
f5957dad
BL
428 unsigned char *buf, *not_zero;
429 int ret;
07b63196 430
2237498f
NB
431 buf = kmap(sg_page(sg)) + sg->offset;
432 if (!buf)
433 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
434 /*
435 * Fall back to block_execute_write_same() slow-path if
436 * incoming WRITE_SAME payload does not contain zeros.
437 */
f5957dad 438 not_zero = memchr_inv(buf, 0x00, cmd->data_length);
2237498f 439 kunmap(sg_page(sg));
07b63196 440
f5957dad 441 if (not_zero)
2237498f 442 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
07b63196 443
2237498f 444 ret = blkdev_issue_zeroout(bdev,
07b63196
MC
445 target_to_linux_sector(dev, cmd->t_task_lba),
446 target_to_linux_sector(dev,
447 sbc_get_write_same_sectors(cmd)),
2237498f 448 GFP_KERNEL, false);
07b63196
MC
449 if (ret)
450 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
451
452 target_complete_cmd(cmd, GOOD);
453 return 0;
454}
455
f6970ad3
NB
456static sense_reason_t
457iblock_execute_write_same(struct se_cmd *cmd)
458{
07b63196 459 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
f6970ad3
NB
460 struct iblock_req *ibr;
461 struct scatterlist *sg;
462 struct bio *bio;
463 struct bio_list list;
8a9ebe71
MC
464 struct se_device *dev = cmd->se_dev;
465 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
466 sector_t sectors = target_to_linux_sector(dev,
467 sbc_get_write_same_sectors(cmd));
f6970ad3 468
afd73f1b
NB
469 if (cmd->prot_op) {
470 pr_err("WRITE_SAME: Protection information with IBLOCK"
471 " backends not supported\n");
472 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
473 }
f6970ad3
NB
474 sg = &cmd->t_data_sg[0];
475
476 if (cmd->t_data_nents > 1 ||
477 sg->length != cmd->se_dev->dev_attrib.block_size) {
478 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
479 " block_size: %u\n", cmd->t_data_nents, sg->length,
480 cmd->se_dev->dev_attrib.block_size);
481 return TCM_INVALID_CDB_FIELD;
482 }
483
2237498f
NB
484 if (bdev_write_zeroes_sectors(bdev)) {
485 if (!iblock_execute_zero_out(bdev, cmd))
486 return 0;
487 }
07b63196 488
f6970ad3
NB
489 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
490 if (!ibr)
491 goto fail;
492 cmd->priv = ibr;
493
e742fc32 494 bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
f6970ad3
NB
495 if (!bio)
496 goto fail_free_ibr;
497
498 bio_list_init(&list);
499 bio_list_add(&list, bio);
500
5981c245 501 refcount_set(&ibr->pending, 1);
f6970ad3
NB
502
503 while (sectors) {
504 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
505 != sg->length) {
506
e742fc32
MC
507 bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
508 0);
f6970ad3
NB
509 if (!bio)
510 goto fail_put_bios;
511
5981c245 512 refcount_inc(&ibr->pending);
f6970ad3
NB
513 bio_list_add(&list, bio);
514 }
515
516 /* Always in 512 byte units for Linux/Block */
80b045b3 517 block_lba += sg->length >> SECTOR_SHIFT;
f6970ad3
NB
518 sectors -= 1;
519 }
520
4e49ea4a 521 iblock_submit_bios(&list);
f6970ad3
NB
522 return 0;
523
524fail_put_bios:
525 while ((bio = bio_list_pop(&list)))
526 bio_put(bio);
527fail_free_ibr:
528 kfree(ibr);
529fail:
530 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
531}
532
c66ac9db 533enum {
44bfd018 534 Opt_udev_path, Opt_readonly, Opt_force, Opt_err
c66ac9db
NB
535};
536
537static match_table_t tokens = {
538 {Opt_udev_path, "udev_path=%s"},
44bfd018 539 {Opt_readonly, "readonly=%d"},
c66ac9db
NB
540 {Opt_force, "force=%d"},
541 {Opt_err, NULL}
542};
543
0fd97ccf
CH
544static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
545 const char *page, ssize_t count)
c66ac9db 546{
0fd97ccf 547 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
6d180253 548 char *orig, *ptr, *arg_p, *opts;
c66ac9db 549 substring_t args[MAX_OPT_ARGS];
21bca31c 550 int ret = 0, token;
44bfd018 551 unsigned long tmp_readonly;
c66ac9db
NB
552
553 opts = kstrdup(page, GFP_KERNEL);
554 if (!opts)
555 return -ENOMEM;
556
557 orig = opts;
558
90c161b6 559 while ((ptr = strsep(&opts, ",\n")) != NULL) {
c66ac9db
NB
560 if (!*ptr)
561 continue;
562
563 token = match_token(ptr, tokens, args);
564 switch (token) {
565 case Opt_udev_path:
566 if (ib_dev->ibd_bd) {
6708bb27 567 pr_err("Unable to set udev_path= while"
c66ac9db
NB
568 " ib_dev->ibd_bd exists\n");
569 ret = -EEXIST;
570 goto out;
571 }
852b6ed1
NB
572 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
573 SE_UDEV_PATH_LEN) == 0) {
574 ret = -EINVAL;
6d180253
JJ
575 break;
576 }
6708bb27 577 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
c66ac9db
NB
578 ib_dev->ibd_udev_path);
579 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
580 break;
44bfd018
AG
581 case Opt_readonly:
582 arg_p = match_strdup(&args[0]);
583 if (!arg_p) {
584 ret = -ENOMEM;
585 break;
586 }
57103d7f 587 ret = kstrtoul(arg_p, 0, &tmp_readonly);
44bfd018
AG
588 kfree(arg_p);
589 if (ret < 0) {
57103d7f 590 pr_err("kstrtoul() failed for"
44bfd018
AG
591 " readonly=\n");
592 goto out;
593 }
594 ib_dev->ibd_readonly = tmp_readonly;
595 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
596 break;
c66ac9db 597 case Opt_force:
c66ac9db
NB
598 break;
599 default:
600 break;
601 }
602 }
603
604out:
605 kfree(orig);
606 return (!ret) ? count : ret;
607}
608
0fd97ccf 609static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
c66ac9db 610{
0fd97ccf
CH
611 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
612 struct block_device *bd = ib_dev->ibd_bd;
c66ac9db
NB
613 char buf[BDEVNAME_SIZE];
614 ssize_t bl = 0;
615
616 if (bd)
617 bl += sprintf(b + bl, "iBlock device: %s",
618 bdevname(bd, buf));
0fd97ccf 619 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
44bfd018 620 bl += sprintf(b + bl, " UDEV PATH: %s",
0fd97ccf
CH
621 ib_dev->ibd_udev_path);
622 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
c66ac9db
NB
623
624 bl += sprintf(b + bl, " ");
625 if (bd) {
626 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
21bca31c 627 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
0fd97ccf 628 "" : (bd->bd_holder == ib_dev) ?
c66ac9db
NB
629 "CLAIMED: IBLOCK" : "CLAIMED: OS");
630 } else {
21bca31c 631 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
c66ac9db
NB
632 }
633
634 return bl;
635}
636
ecebbf6c 637static int
fed564f6
GE
638iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
639 struct sg_mapping_iter *miter)
ecebbf6c
NB
640{
641 struct se_device *dev = cmd->se_dev;
642 struct blk_integrity *bi;
643 struct bio_integrity_payload *bip;
644 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
fed564f6
GE
645 int rc;
646 size_t resid, len;
ecebbf6c
NB
647
648 bi = bdev_get_integrity(ib_dev->ibd_bd);
649 if (!bi) {
650 pr_err("Unable to locate bio_integrity\n");
651 return -ENODEV;
652 }
653
fed564f6
GE
654 bip = bio_integrity_alloc(bio, GFP_NOIO,
655 min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
06c1e390 656 if (IS_ERR(bip)) {
ecebbf6c 657 pr_err("Unable to allocate bio_integrity_payload\n");
06c1e390 658 return PTR_ERR(bip);
ecebbf6c
NB
659 }
660
fed564f6
GE
661 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
662 bip_set_seed(bip, bio->bi_iter.bi_sector);
ecebbf6c 663
4e13c5d0
LT
664 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
665 (unsigned long long)bip->bip_iter.bi_sector);
ecebbf6c 666
fed564f6
GE
667 resid = bip->bip_iter.bi_size;
668 while (resid > 0 && sg_miter_next(miter)) {
ecebbf6c 669
fed564f6
GE
670 len = min_t(size_t, miter->length, resid);
671 rc = bio_integrity_add_page(bio, miter->page, len,
672 offset_in_page(miter->addr));
673 if (rc != len) {
ecebbf6c 674 pr_err("bio_integrity_add_page() failed; %d\n", rc);
fed564f6 675 sg_miter_stop(miter);
ecebbf6c
NB
676 return -ENOMEM;
677 }
678
fed564f6
GE
679 pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
680 miter->page, len, offset_in_page(miter->addr));
681
682 resid -= len;
683 if (len < miter->length)
684 miter->consumed -= miter->length - len;
ecebbf6c 685 }
fed564f6 686 sg_miter_stop(miter);
ecebbf6c
NB
687
688 return 0;
689}
690
de103c93 691static sense_reason_t
a82a9538
NB
692iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
693 enum dma_data_direction data_direction)
c66ac9db 694{
5951146d 695 struct se_device *dev = cmd->se_dev;
8a9ebe71 696 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
5787cacd 697 struct iblock_req *ibr;
fed564f6 698 struct bio *bio;
dbbf3e94 699 struct bio_list list;
c66ac9db 700 struct scatterlist *sg;
5787cacd 701 u32 sg_num = sgl_nents;
d5b4a21b 702 unsigned bio_cnt;
fed564f6
GE
703 int i, rc, op, op_flags = 0;
704 struct sg_mapping_iter prot_miter;
dbbf3e94 705
5787cacd 706 if (data_direction == DMA_TO_DEVICE) {
d0c8b259
NB
707 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
708 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
dbbf3e94 709 /*
70fd7614 710 * Force writethrough using REQ_FUA if a volatile write cache
d0c8b259 711 * is not enabled, or if initiator set the Force Unit Access bit.
dbbf3e94 712 */
e742fc32 713 op = REQ_OP_WRITE;
c888a8f9 714 if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
d0c8b259 715 if (cmd->se_cmd_flags & SCF_FUA)
70fd7614 716 op_flags = REQ_FUA;
c888a8f9 717 else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
70fd7614 718 op_flags = REQ_FUA;
d0c8b259 719 }
dbbf3e94 720 } else {
e742fc32 721 op = REQ_OP_READ;
dbbf3e94
CH
722 }
723
5787cacd
CH
724 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
725 if (!ibr)
726 goto fail;
727 cmd->priv = ibr;
728
e0de4457 729 if (!sgl_nents) {
5981c245 730 refcount_set(&ibr->pending, 1);
e0de4457
PB
731 iblock_complete_cmd(cmd);
732 return 0;
733 }
734
e742fc32 735 bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
5787cacd
CH
736 if (!bio)
737 goto fail_free_ibr;
dbbf3e94
CH
738
739 bio_list_init(&list);
740 bio_list_add(&list, bio);
5787cacd 741
5981c245 742 refcount_set(&ibr->pending, 2);
d5b4a21b 743 bio_cnt = 1;
c66ac9db 744
fed564f6
GE
745 if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
746 sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
747 op == REQ_OP_READ ? SG_MITER_FROM_SG :
748 SG_MITER_TO_SG);
749
5787cacd 750 for_each_sg(sgl, sg, sgl_nents, i) {
dbbf3e94
CH
751 /*
752 * XXX: if the length the device accepts is shorter than the
753 * length of the S/G list entry this will cause and
754 * endless loop. Better hope no driver uses huge pages.
755 */
756 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
757 != sg->length) {
fed564f6
GE
758 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
759 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
760 if (rc)
761 goto fail_put_bios;
762 }
763
d5b4a21b 764 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
4e49ea4a 765 iblock_submit_bios(&list);
d5b4a21b
CH
766 bio_cnt = 0;
767 }
768
e742fc32
MC
769 bio = iblock_get_bio(cmd, block_lba, sg_num, op,
770 op_flags);
6708bb27 771 if (!bio)
5787cacd
CH
772 goto fail_put_bios;
773
5981c245 774 refcount_inc(&ibr->pending);
dbbf3e94 775 bio_list_add(&list, bio);
d5b4a21b 776 bio_cnt++;
c66ac9db 777 }
dbbf3e94 778
c66ac9db 779 /* Always in 512 byte units for Linux/Block */
80b045b3 780 block_lba += sg->length >> SECTOR_SHIFT;
c66ac9db 781 sg_num--;
c66ac9db
NB
782 }
783
6f16ec43 784 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
fed564f6 785 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
ecebbf6c
NB
786 if (rc)
787 goto fail_put_bios;
788 }
789
4e49ea4a 790 iblock_submit_bios(&list);
5787cacd 791 iblock_complete_cmd(cmd);
03e98c9e 792 return 0;
dbbf3e94 793
5787cacd 794fail_put_bios:
dbbf3e94 795 while ((bio = bio_list_pop(&list)))
c66ac9db 796 bio_put(bio);
5787cacd
CH
797fail_free_ibr:
798 kfree(ibr);
5787cacd 799fail:
de103c93 800 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
c66ac9db
NB
801}
802
c66ac9db
NB
803static sector_t iblock_get_blocks(struct se_device *dev)
804{
0fd97ccf
CH
805 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
806 struct block_device *bd = ib_dev->ibd_bd;
c66ac9db
NB
807 struct request_queue *q = bdev_get_queue(bd);
808
809 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
810}
811
7f7caf6a
AG
812static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
813{
814 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
815 struct block_device *bd = ib_dev->ibd_bd;
816 int ret;
817
818 ret = bdev_alignment_offset(bd);
819 if (ret == -1)
820 return 0;
821
822 /* convert offset-bytes to offset-lbas */
823 return ret / bdev_logical_block_size(bd);
824}
825
826static unsigned int iblock_get_lbppbe(struct se_device *dev)
827{
828 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
829 struct block_device *bd = ib_dev->ibd_bd;
830 int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
831
832 return ilog2(logs_per_phys);
833}
834
835static unsigned int iblock_get_io_min(struct se_device *dev)
836{
837 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
838 struct block_device *bd = ib_dev->ibd_bd;
839
840 return bdev_io_min(bd);
841}
842
843static unsigned int iblock_get_io_opt(struct se_device *dev)
844{
845 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
846 struct block_device *bd = ib_dev->ibd_bd;
847
848 return bdev_io_opt(bd);
849}
850
9e999a6c 851static struct sbc_ops iblock_sbc_ops = {
0c2ad7d1 852 .execute_rw = iblock_execute_rw,
ad67f0d9 853 .execute_sync_cache = iblock_execute_sync_cache,
6f974e8c 854 .execute_write_same = iblock_execute_write_same,
14150a6b 855 .execute_unmap = iblock_execute_unmap,
0c2ad7d1
CH
856};
857
de103c93
CH
858static sense_reason_t
859iblock_parse_cdb(struct se_cmd *cmd)
0c2ad7d1 860{
9e999a6c 861 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
0c2ad7d1
CH
862}
863
452e2010 864static bool iblock_get_write_cache(struct se_device *dev)
d0c8b259
NB
865{
866 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
867 struct block_device *bd = ib_dev->ibd_bd;
868 struct request_queue *q = bdev_get_queue(bd);
869
c888a8f9 870 return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
d0c8b259
NB
871}
872
0a06d430 873static const struct target_backend_ops iblock_ops = {
c66ac9db 874 .name = "iblock",
0fd97ccf
CH
875 .inquiry_prod = "IBLOCK",
876 .inquiry_rev = IBLOCK_VERSION,
c66ac9db 877 .owner = THIS_MODULE,
c66ac9db
NB
878 .attach_hba = iblock_attach_hba,
879 .detach_hba = iblock_detach_hba,
0fd97ccf
CH
880 .alloc_device = iblock_alloc_device,
881 .configure_device = iblock_configure_device,
92634706 882 .destroy_device = iblock_destroy_device,
c66ac9db 883 .free_device = iblock_free_device,
0c2ad7d1 884 .parse_cdb = iblock_parse_cdb,
c66ac9db
NB
885 .set_configfs_dev_params = iblock_set_configfs_dev_params,
886 .show_configfs_dev_params = iblock_show_configfs_dev_params,
6f23ac8a 887 .get_device_type = sbc_get_device_type,
c66ac9db 888 .get_blocks = iblock_get_blocks,
7f7caf6a
AG
889 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
890 .get_lbppbe = iblock_get_lbppbe,
891 .get_io_min = iblock_get_io_min,
892 .get_io_opt = iblock_get_io_opt,
d0c8b259 893 .get_write_cache = iblock_get_write_cache,
5873c4d1 894 .tb_dev_attrib_attrs = sbc_attrib_attrs,
c66ac9db
NB
895};
896
897static int __init iblock_module_init(void)
898{
0a06d430 899 return transport_backend_register(&iblock_ops);
c66ac9db
NB
900}
901
63b91d5a 902static void __exit iblock_module_exit(void)
c66ac9db 903{
0a06d430 904 target_backend_unregister(&iblock_ops);
c66ac9db
NB
905}
906
907MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
908MODULE_AUTHOR("nab@Linux-iSCSI.org");
909MODULE_LICENSE("GPL");
910
911module_init(iblock_module_init);
912module_exit(iblock_module_exit);