drm/amdgpu: change gfx 11.0.4 external_id range
[linux-block.git] / drivers / target / target_core_user.c
CommitLineData
a61127c2 1// SPDX-License-Identifier: GPL-2.0-only
7c9e7a6f
AG
2/*
3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
4 * Copyright (C) 2014 Red Hat, Inc.
f97ec7db 5 * Copyright (C) 2015 Arrikto, Inc.
141685a3 6 * Copyright (C) 2017 Chinamobile, Inc.
7c9e7a6f
AG
7 */
8
9#include <linux/spinlock.h>
10#include <linux/module.h>
ba929992 11#include <linux/kernel.h>
7c9e7a6f
AG
12#include <linux/timer.h>
13#include <linux/parser.h>
5538d294 14#include <linux/vmalloc.h>
7c9e7a6f 15#include <linux/uio_driver.h>
d3cbb743 16#include <linux/xarray.h>
ac64a2ce 17#include <linux/stringify.h>
26418649 18#include <linux/bitops.h>
f5045724 19#include <linux/highmem.h>
7d7a7435 20#include <linux/configfs.h>
b6df4b79 21#include <linux/mutex.h>
9972cebb 22#include <linux/workqueue.h>
bb9b9eb0 23#include <linux/pagemap.h>
7c9e7a6f 24#include <net/genetlink.h>
ba929992
BVA
25#include <scsi/scsi_common.h>
26#include <scsi/scsi_proto.h>
7c9e7a6f
AG
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/target_core_backend.h>
e9f720d6 30
7c9e7a6f
AG
31#include <linux/target_core_user.h>
32
572ccdab
RD
33/**
34 * DOC: Userspace I/O
35 * Userspace I/O
36 * -------------
37 *
7c9e7a6f
AG
38 * Define a shared-memory interface for LIO to pass SCSI commands and
39 * data to userspace for processing. This is to allow backends that
40 * are too complex for in-kernel support to be possible.
41 *
42 * It uses the UIO framework to do a lot of the device-creation and
43 * introspection work for us.
44 *
45 * See the .h file for how the ring is laid out. Note that while the
46 * command ring is defined, the particulars of the data area are
47 * not. Offset values in the command entry point to other locations
572ccdab 48 * internal to the mmap-ed area. There is separate space outside the
7c9e7a6f
AG
49 * command ring for data buffers. This leaves maximum flexibility for
50 * moving buffer allocations, or even page flipping or other
51 * allocation techniques, without altering the command ring layout.
52 *
53 * SECURITY:
54 * The user process must be assumed to be malicious. There's no way to
55 * prevent it breaking the command ring protocol if it wants, but in
56 * order to prevent other issues we must only ever read *data* from
57 * the shared memory area, not offsets or sizes. This applies to
58 * command ring entries as well as the mailbox. Extra code needed for
59 * this may have a 'UAM' comment.
60 */
61
7c9e7a6f
AG
62#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
63
ecddbb7e 64/* For mailbox plus cmd ring, the size is fixed 8MB */
c7ede4f0 65#define MB_CMDR_SIZE_DEF (8 * 1024 * 1024)
ecddbb7e 66/* Offset of cmd ring is size of mailbox */
c7ede4f0
GL
67#define CMDR_OFF ((__u32)sizeof(struct tcmu_mailbox))
68#define CMDR_SIZE_DEF (MB_CMDR_SIZE_DEF - CMDR_OFF)
26418649 69
b6df4b79 70/*
f5ce815f
BS
71 * For data area, the default block size is PAGE_SIZE and
72 * the default total size is 256K * PAGE_SIZE.
b6df4b79 73 */
e719afdc 74#define DATA_PAGES_PER_BLK_DEF 1
f5ce815f 75#define DATA_AREA_PAGES_DEF (256 * 1024)
7c9e7a6f 76
f5ce815f 77#define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT))
8b084d9d 78#define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT))
80eb8761 79
af1dd7ff
MC
80/*
81 * Default number of global data blocks(512K * PAGE_SIZE)
82 * when the unmap thread will be started.
83 */
8b084d9d 84#define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024)
b6df4b79 85
b3af66e2 86static u8 tcmu_kern_cmd_reply_supported;
bdaeedc1 87static u8 tcmu_netlink_blocked;
b3af66e2 88
7c9e7a6f
AG
89static struct device *tcmu_root_device;
90
91struct tcmu_hba {
92 u32 host_id;
93};
94
7c9e7a6f
AG
95#define TCMU_CONFIG_LEN 256
96
3228691f
MC
97static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
98static LIST_HEAD(tcmu_nl_cmd_list);
99
100struct tcmu_dev;
101
b3af66e2
MC
102struct tcmu_nl_cmd {
103 /* wake up thread waiting for reply */
104 struct completion complete;
3228691f
MC
105 struct list_head nl_list;
106 struct tcmu_dev *udev;
b3af66e2
MC
107 int cmd;
108 int status;
109};
110
7c9e7a6f 111struct tcmu_dev {
b6df4b79 112 struct list_head node;
f3cdbe39 113 struct kref kref;
af1dd7ff 114
7c9e7a6f 115 struct se_device se_dev;
6888da81 116 struct se_dev_plug se_plug;
7c9e7a6f
AG
117
118 char *name;
119 struct se_hba *hba;
120
121#define TCMU_DEV_BIT_OPEN 0
122#define TCMU_DEV_BIT_BROKEN 1
892782ca 123#define TCMU_DEV_BIT_BLOCKED 2
59526d7a 124#define TCMU_DEV_BIT_TMR_NOTIFY 3
3ac0fcb4 125#define TCMU_DEV_BIT_PLUGGED 4
7c9e7a6f 126 unsigned long flags;
7c9e7a6f
AG
127
128 struct uio_info uio_info;
129
b6df4b79
XL
130 struct inode *inode;
131
0e0d7526 132 uint64_t dev_size;
ecddbb7e
BS
133
134 struct tcmu_mailbox *mb_addr;
135 void *cmdr;
7c9e7a6f
AG
136 u32 cmdr_size;
137 u32 cmdr_last_cleaned;
3d9b9555 138 /* Offset of data area from start of mb */
26418649 139 /* Must add data_off and mb_addr to get the address */
7c9e7a6f 140 size_t data_off;
f5ce815f 141 int data_area_mb;
80eb8761 142 uint32_t max_blocks;
ecddbb7e 143 size_t mmap_pages;
26418649 144
b6df4b79 145 struct mutex cmdr_lock;
a94a2572 146 struct list_head qfull_queue;
bc2d214a 147 struct list_head tmr_queue;
7c9e7a6f 148
141685a3 149 uint32_t dbi_max;
b6df4b79 150 uint32_t dbi_thresh;
80eb8761 151 unsigned long *data_bitmap;
8b084d9d 152 struct xarray data_pages;
e719afdc
BS
153 uint32_t data_pages_per_blk;
154 uint32_t data_blk_size;
141685a3 155
d3cbb743 156 struct xarray commands;
7c9e7a6f 157
9103575a 158 struct timer_list cmd_timer;
af980e46 159 unsigned int cmd_time_out;
a94a2572 160 struct list_head inflight_queue;
9103575a
MC
161
162 struct timer_list qfull_timer;
163 int qfull_time_out;
164
488ebe4c 165 struct list_head timedout_entry;
7c9e7a6f 166
b3af66e2 167 struct tcmu_nl_cmd curr_nl_cmd;
b3af66e2 168
7c9e7a6f 169 char dev_config[TCMU_CONFIG_LEN];
b849b456
KN
170
171 int nl_reply_supported;
7c9e7a6f
AG
172};
173
174#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
175
7c9e7a6f
AG
176struct tcmu_cmd {
177 struct se_cmd *se_cmd;
178 struct tcmu_dev *tcmu_dev;
a94a2572 179 struct list_head queue_entry;
7c9e7a6f
AG
180
181 uint16_t cmd_id;
182
26418649 183 /* Can't use se_cmd when cleaning up expired cmds, because if
7c9e7a6f 184 cmd has been completed then accessing se_cmd is off limits */
141685a3 185 uint32_t dbi_cnt;
52ef2743 186 uint32_t dbi_bidi_cnt;
141685a3
XL
187 uint32_t dbi_cur;
188 uint32_t *dbi;
7c9e7a6f 189
3c9a7c58
BS
190 uint32_t data_len_bidi;
191
7c9e7a6f
AG
192 unsigned long deadline;
193
194#define TCMU_CMD_BIT_EXPIRED 0
018c1491 195#define TCMU_CMD_BIT_KEEP_BUF 1
7c9e7a6f
AG
196 unsigned long flags;
197};
bc2d214a
BS
198
199struct tcmu_tmr {
200 struct list_head queue_entry;
201
202 uint8_t tmr_type;
203 uint32_t tmr_cmd_cnt;
8fdaabe1 204 int16_t tmr_cmd_ids[];
bc2d214a
BS
205};
206
af1dd7ff
MC
207/*
208 * To avoid dead lock the mutex lock order should always be:
209 *
210 * mutex_lock(&root_udev_mutex);
211 * ...
212 * mutex_lock(&tcmu_dev->cmdr_lock);
213 * mutex_unlock(&tcmu_dev->cmdr_lock);
214 * ...
215 * mutex_unlock(&root_udev_mutex);
216 */
b6df4b79
XL
217static DEFINE_MUTEX(root_udev_mutex);
218static LIST_HEAD(root_udev);
219
488ebe4c
MC
220static DEFINE_SPINLOCK(timed_out_udevs_lock);
221static LIST_HEAD(timed_out_udevs);
222
80eb8761
MC
223static struct kmem_cache *tcmu_cmd_cache;
224
8b084d9d 225static atomic_t global_page_count = ATOMIC_INIT(0);
af1dd7ff 226static struct delayed_work tcmu_unmap_work;
8b084d9d 227static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF;
b6df4b79 228
80eb8761
MC
229static int tcmu_set_global_max_data_area(const char *str,
230 const struct kernel_param *kp)
231{
232 int ret, max_area_mb;
233
234 ret = kstrtoint(str, 10, &max_area_mb);
235 if (ret)
236 return -EINVAL;
237
238 if (max_area_mb <= 0) {
239 pr_err("global_max_data_area must be larger than 0.\n");
240 return -EINVAL;
241 }
242
8b084d9d
BS
243 tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb);
244 if (atomic_read(&global_page_count) > tcmu_global_max_pages)
80eb8761
MC
245 schedule_delayed_work(&tcmu_unmap_work, 0);
246 else
247 cancel_delayed_work_sync(&tcmu_unmap_work);
248
249 return 0;
250}
251
252static int tcmu_get_global_max_data_area(char *buffer,
253 const struct kernel_param *kp)
254{
8b084d9d 255 return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
80eb8761
MC
256}
257
258static const struct kernel_param_ops tcmu_global_max_data_area_op = {
259 .set = tcmu_set_global_max_data_area,
260 .get = tcmu_get_global_max_data_area,
261};
262
263module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
264 S_IWUSR | S_IRUGO);
265MODULE_PARM_DESC(global_max_data_area_mb,
266 "Max MBs allowed to be allocated to all the tcmu device's "
267 "data areas.");
7c9e7a6f 268
bdaeedc1
MC
269static int tcmu_get_block_netlink(char *buffer,
270 const struct kernel_param *kp)
271{
272 return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
273 "blocked" : "unblocked");
274}
275
276static int tcmu_set_block_netlink(const char *str,
277 const struct kernel_param *kp)
278{
279 int ret;
280 u8 val;
281
282 ret = kstrtou8(str, 0, &val);
283 if (ret < 0)
284 return ret;
285
286 if (val > 1) {
287 pr_err("Invalid block netlink value %u\n", val);
288 return -EINVAL;
289 }
290
291 tcmu_netlink_blocked = val;
292 return 0;
293}
294
295static const struct kernel_param_ops tcmu_block_netlink_op = {
296 .set = tcmu_set_block_netlink,
297 .get = tcmu_get_block_netlink,
298};
299
300module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
301MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
302
303static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
304{
305 struct tcmu_dev *udev = nl_cmd->udev;
306
307 if (!tcmu_netlink_blocked) {
308 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
309 return -EBUSY;
310 }
311
312 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
313 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
314 nl_cmd->status = -EINTR;
315 list_del(&nl_cmd->nl_list);
316 complete(&nl_cmd->complete);
317 }
318 return 0;
319}
320
321static int tcmu_set_reset_netlink(const char *str,
322 const struct kernel_param *kp)
323{
324 struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
325 int ret;
326 u8 val;
327
328 ret = kstrtou8(str, 0, &val);
329 if (ret < 0)
330 return ret;
331
332 if (val != 1) {
333 pr_err("Invalid reset netlink value %u\n", val);
334 return -EINVAL;
335 }
336
337 mutex_lock(&tcmu_nl_cmd_mutex);
338 list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
339 ret = tcmu_fail_netlink_cmd(nl_cmd);
340 if (ret)
341 break;
342 }
343 mutex_unlock(&tcmu_nl_cmd_mutex);
344
345 return ret;
346}
347
348static const struct kernel_param_ops tcmu_reset_netlink_op = {
349 .set = tcmu_set_reset_netlink,
350};
351
352module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
353MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
354
7c9e7a6f
AG
355/* multicast group */
356enum tcmu_multicast_groups {
357 TCMU_MCGRP_CONFIG,
358};
359
360static const struct genl_multicast_group tcmu_mcgrps[] = {
361 [TCMU_MCGRP_CONFIG] = { .name = "config", },
362};
363
b3af66e2
MC
364static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
365 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
366 [TCMU_ATTR_MINOR] = { .type = NLA_U32 },
367 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
368 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
369 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
370};
371
372static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
373{
3228691f 374 struct tcmu_dev *udev = NULL;
b3af66e2
MC
375 struct tcmu_nl_cmd *nl_cmd;
376 int dev_id, rc, ret = 0;
b3af66e2
MC
377
378 if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
379 !info->attrs[TCMU_ATTR_DEVICE_ID]) {
380 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
9554c1be 381 return -EINVAL;
b3af66e2
MC
382 }
383
384 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
385 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
386
3228691f
MC
387 mutex_lock(&tcmu_nl_cmd_mutex);
388 list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
389 if (nl_cmd->udev->se_dev.dev_index == dev_id) {
390 udev = nl_cmd->udev;
391 break;
392 }
b3af66e2 393 }
b3af66e2 394
3228691f 395 if (!udev) {
0c218e16 396 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
3228691f
MC
397 completed_cmd, rc, dev_id);
398 ret = -ENODEV;
399 goto unlock;
400 }
401 list_del(&nl_cmd->nl_list);
b3af66e2 402
bdaeedc1
MC
403 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
404 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
405 nl_cmd->status);
b3af66e2
MC
406
407 if (nl_cmd->cmd != completed_cmd) {
3228691f
MC
408 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
409 udev->name, completed_cmd, nl_cmd->cmd);
b3af66e2 410 ret = -EINVAL;
3228691f 411 goto unlock;
b3af66e2
MC
412 }
413
3228691f
MC
414 nl_cmd->status = rc;
415 complete(&nl_cmd->complete);
416unlock:
417 mutex_unlock(&tcmu_nl_cmd_mutex);
b3af66e2
MC
418 return ret;
419}
420
421static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
422{
423 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
424}
425
426static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
427{
428 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
429}
430
431static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
432 struct genl_info *info)
433{
434 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
435}
436
437static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
438{
439 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
440 tcmu_kern_cmd_reply_supported =
441 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
442 printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
443 tcmu_kern_cmd_reply_supported);
444 }
445
446 return 0;
447}
448
66a9b928 449static const struct genl_small_ops tcmu_genl_ops[] = {
b3af66e2
MC
450 {
451 .cmd = TCMU_CMD_SET_FEATURES,
ef6243ac 452 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
b3af66e2 453 .flags = GENL_ADMIN_PERM,
b3af66e2
MC
454 .doit = tcmu_genl_set_features,
455 },
456 {
457 .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
ef6243ac 458 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
b3af66e2 459 .flags = GENL_ADMIN_PERM,
b3af66e2
MC
460 .doit = tcmu_genl_add_dev_done,
461 },
462 {
463 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
ef6243ac 464 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
b3af66e2 465 .flags = GENL_ADMIN_PERM,
b3af66e2
MC
466 .doit = tcmu_genl_rm_dev_done,
467 },
468 {
469 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
ef6243ac 470 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
b3af66e2 471 .flags = GENL_ADMIN_PERM,
b3af66e2
MC
472 .doit = tcmu_genl_reconfig_dev_done,
473 },
474};
475
7c9e7a6f 476/* Our generic netlink family */
56989f6d 477static struct genl_family tcmu_genl_family __ro_after_init = {
489111e5 478 .module = THIS_MODULE,
7c9e7a6f
AG
479 .hdrsize = 0,
480 .name = "TCM-USER",
b3af66e2 481 .version = 2,
7c9e7a6f 482 .maxattr = TCMU_ATTR_MAX,
3b0f31f2 483 .policy = tcmu_attr_policy,
7c9e7a6f
AG
484 .mcgrps = tcmu_mcgrps,
485 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
20c08b36 486 .netnsok = true,
66a9b928
JK
487 .small_ops = tcmu_genl_ops,
488 .n_small_ops = ARRAY_SIZE(tcmu_genl_ops),
9c5d03d3 489 .resv_start_op = TCMU_CMD_SET_FEATURES + 1,
7c9e7a6f
AG
490};
491
141685a3
XL
492#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
493#define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
494#define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
495#define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
496
b6df4b79 497static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
141685a3
XL
498{
499 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
500 uint32_t i;
501
b6df4b79 502 for (i = 0; i < len; i++)
141685a3
XL
503 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
504}
505
7e98905e
BS
506static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
507 struct tcmu_cmd *tcmu_cmd,
f5ce815f 508 int prev_dbi, int length, int *iov_cnt)
141685a3 509{
f5ce815f 510 XA_STATE(xas, &udev->data_pages, 0);
b6df4b79 511 struct page *page;
e719afdc 512 int i, cnt, dbi, dpi;
f5ce815f 513 int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE);
141685a3 514
b6df4b79
XL
515 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
516 if (dbi == udev->dbi_thresh)
7e98905e 517 return -1;
141685a3 518
e719afdc 519 dpi = dbi * udev->data_pages_per_blk;
f5ce815f 520 /* Count the number of already allocated pages */
e719afdc 521 xas_set(&xas, dpi);
b4150b68 522 rcu_read_lock();
f5ce815f
BS
523 for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
524 cnt++;
b4150b68 525 rcu_read_unlock();
141685a3 526
f5ce815f 527 for (i = cnt; i < page_cnt; i++) {
1d2ac7b6
BS
528 /* try to get new zeroed page from the mm */
529 page = alloc_page(GFP_NOIO | __GFP_ZERO);
b6df4b79 530 if (!page)
f5ce815f 531 break;
b6df4b79 532
e719afdc 533 if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) {
f5ce815f
BS
534 __free_page(page);
535 break;
536 }
141685a3 537 }
f5ce815f
BS
538 if (atomic_add_return(i - cnt, &global_page_count) >
539 tcmu_global_max_pages)
540 schedule_delayed_work(&tcmu_unmap_work, 0);
141685a3 541
f5ce815f 542 if (i && dbi > udev->dbi_max)
b6df4b79
XL
543 udev->dbi_max = dbi;
544
545 set_bit(dbi, udev->data_bitmap);
546 tcmu_cmd_set_dbi(tcmu_cmd, dbi);
547
7e98905e
BS
548 if (dbi != prev_dbi + 1)
549 *iov_cnt += 1;
550
f5ce815f 551 return i == page_cnt ? dbi : -1;
141685a3
XL
552}
553
7e98905e 554static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
f5ce815f 555 struct tcmu_cmd *tcmu_cmd, int length)
b6df4b79 556{
7e98905e
BS
557 /* start value of dbi + 1 must not be a valid dbi */
558 int dbi = -2;
e719afdc
BS
559 int blk_data_len, iov_cnt = 0;
560 uint32_t blk_size = udev->data_blk_size;
7e98905e 561
e719afdc
BS
562 for (; length > 0; length -= blk_size) {
563 blk_data_len = min_t(uint32_t, length, blk_size);
564 dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len,
565 &iov_cnt);
7e98905e
BS
566 if (dbi < 0)
567 return -1;
b6df4b79 568 }
7e98905e 569 return iov_cnt;
b6df4b79
XL
570}
571
141685a3
XL
572static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
573{
574 kfree(tcmu_cmd->dbi);
575 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
576}
577
52ef2743 578static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
141685a3 579{
52ef2743
BS
580 int i, len;
581 struct se_cmd *se_cmd = cmd->se_cmd;
e719afdc 582 uint32_t blk_size = cmd->tcmu_dev->data_blk_size;
52ef2743 583
e719afdc 584 cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size);
141685a3
XL
585
586 if (se_cmd->se_cmd_flags & SCF_BIDI) {
587 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
52ef2743
BS
588 for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
589 len += se_cmd->t_bidi_data_sg[i].length;
e719afdc 590 cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size);
52ef2743 591 cmd->dbi_cnt += cmd->dbi_bidi_cnt;
3c9a7c58 592 cmd->data_len_bidi = len;
141685a3 593 }
141685a3
XL
594}
595
3c9a7c58 596static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
c8ed1ff8 597 struct iovec **iov, int prev_dbi, int len)
3c9a7c58
BS
598{
599 /* Get the next dbi */
600 int dbi = tcmu_cmd_get_dbi(cmd);
c8ed1ff8 601
e719afdc
BS
602 /* Do not add more than udev->data_blk_size to iov */
603 len = min_t(int, len, udev->data_blk_size);
3c9a7c58 604
3c9a7c58
BS
605 /*
606 * The following code will gather and map the blocks to the same iovec
607 * when the blocks are all next to each other.
608 */
609 if (dbi != prev_dbi + 1) {
610 /* dbi is not next to previous dbi, so start new iov */
611 if (prev_dbi >= 0)
612 (*iov)++;
613 /* write offset relative to mb_addr */
614 (*iov)->iov_base = (void __user *)
e719afdc 615 (udev->data_off + dbi * udev->data_blk_size);
3c9a7c58
BS
616 }
617 (*iov)->iov_len += len;
618
619 return dbi;
620}
621
622static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
623 struct iovec **iov, int data_length)
624{
625 /* start value of dbi + 1 must not be a valid dbi */
626 int dbi = -2;
627
628 /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
e719afdc 629 for (; data_length > 0; data_length -= udev->data_blk_size)
c8ed1ff8 630 dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length);
3c9a7c58
BS
631}
632
7c9e7a6f
AG
633static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
634{
635 struct se_device *se_dev = se_cmd->se_dev;
636 struct tcmu_dev *udev = TCMU_DEV(se_dev);
637 struct tcmu_cmd *tcmu_cmd;
7c9e7a6f 638
0eccce86 639 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
7c9e7a6f
AG
640 if (!tcmu_cmd)
641 return NULL;
642
a94a2572 643 INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
7c9e7a6f
AG
644 tcmu_cmd->se_cmd = se_cmd;
645 tcmu_cmd->tcmu_dev = udev;
7c9e7a6f 646
52ef2743 647 tcmu_cmd_set_block_cnts(tcmu_cmd);
141685a3 648 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
0eccce86 649 GFP_NOIO);
141685a3
XL
650 if (!tcmu_cmd->dbi) {
651 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
652 return NULL;
653 }
654
7c9e7a6f
AG
655 return tcmu_cmd;
656}
657
658static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
659{
b75d8063 660 unsigned long offset = offset_in_page(vaddr);
26d2b310 661 void *start = vaddr - offset;
7c9e7a6f
AG
662
663 size = round_up(size+offset, PAGE_SIZE);
7c9e7a6f
AG
664
665 while (size) {
3145550a 666 flush_dcache_page(vmalloc_to_page(start));
26d2b310 667 start += PAGE_SIZE;
7c9e7a6f
AG
668 size -= PAGE_SIZE;
669 }
670}
671
672/*
673 * Some ring helper functions. We don't assume size is a power of 2 so
674 * we can't use circ_buf.h.
675 */
676static inline size_t spc_used(size_t head, size_t tail, size_t size)
677{
678 int diff = head - tail;
679
680 if (diff >= 0)
681 return diff;
682 else
683 return size + diff;
684}
685
686static inline size_t spc_free(size_t head, size_t tail, size_t size)
687{
688 /* Keep 1 byte unused or we can't tell full from empty */
689 return (size - spc_used(head, tail, size) - 1);
690}
691
692static inline size_t head_to_end(size_t head, size_t size)
693{
694 return size - head;
695}
696
697#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
698
c8ed1ff8
BS
699#define TCMU_SG_TO_DATA_AREA 1
700#define TCMU_DATA_AREA_TO_SG 2
701
702static inline void tcmu_copy_data(struct tcmu_dev *udev,
703 struct tcmu_cmd *tcmu_cmd, uint32_t direction,
704 struct scatterlist *sg, unsigned int sg_nents,
705 struct iovec **iov, size_t data_len)
26418649 706{
3c9a7c58 707 /* start value of dbi + 1 must not be a valid dbi */
c8ed1ff8 708 int dbi = -2;
f5ce815f 709 size_t page_remaining, cp_len;
b4150b68 710 int page_cnt, page_inx, dpi;
c8ed1ff8
BS
711 struct sg_mapping_iter sg_iter;
712 unsigned int sg_flags;
713 struct page *page;
714 void *data_page_start, *data_addr;
3e609135 715
c8ed1ff8
BS
716 if (direction == TCMU_SG_TO_DATA_AREA)
717 sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG;
718 else
719 sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
720 sg_miter_start(&sg_iter, sg, sg_nents, sg_flags);
721
722 while (data_len) {
723 if (direction == TCMU_SG_TO_DATA_AREA)
724 dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
725 data_len);
726 else
727 dbi = tcmu_cmd_get_dbi(tcmu_cmd);
f5ce815f
BS
728
729 page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE);
e719afdc
BS
730 if (page_cnt > udev->data_pages_per_blk)
731 page_cnt = udev->data_pages_per_blk;
f5ce815f 732
b4150b68
BS
733 dpi = dbi * udev->data_pages_per_blk;
734 for (page_inx = 0; page_inx < page_cnt && data_len;
735 page_inx++, dpi++) {
736 page = xa_load(&udev->data_pages, dpi);
f5ce815f
BS
737
738 if (direction == TCMU_DATA_AREA_TO_SG)
739 flush_dcache_page(page);
740 data_page_start = kmap_atomic(page);
741 page_remaining = PAGE_SIZE;
742
743 while (page_remaining && data_len) {
744 if (!sg_miter_next(&sg_iter)) {
745 /* set length to 0 to abort outer loop */
746 data_len = 0;
747 pr_debug("%s: aborting data copy due to exhausted sg_list\n",
748 __func__);
749 break;
750 }
751 cp_len = min3(sg_iter.length, page_remaining,
752 data_len);
753
754 data_addr = data_page_start +
755 PAGE_SIZE - page_remaining;
756 if (direction == TCMU_SG_TO_DATA_AREA)
757 memcpy(data_addr, sg_iter.addr, cp_len);
758 else
759 memcpy(sg_iter.addr, data_addr, cp_len);
760
761 data_len -= cp_len;
762 page_remaining -= cp_len;
763 sg_iter.consumed = cp_len;
c8ed1ff8 764 }
f5ce815f 765 sg_miter_stop(&sg_iter);
c8ed1ff8 766
f5ce815f 767 kunmap_atomic(data_page_start);
c8ed1ff8 768 if (direction == TCMU_SG_TO_DATA_AREA)
f5ce815f 769 flush_dcache_page(page);
f97ec7db 770 }
3c58f737 771 }
0c28481f
SY
772}
773
c8ed1ff8
BS
774static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
775 struct iovec **iov)
776{
777 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
778
779 tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg,
780 se_cmd->t_data_nents, iov, se_cmd->data_length);
781}
782
783static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
6c3796d1 784 bool bidi, uint32_t read_len)
f97ec7db 785{
c8ed1ff8
BS
786 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
787 struct scatterlist *data_sg;
a5d68ba8 788 unsigned int data_nents;
a5d68ba8
XL
789
790 if (!bidi) {
791 data_sg = se_cmd->t_data_sg;
792 data_nents = se_cmd->t_data_nents;
793 } else {
a5d68ba8
XL
794 /*
795 * For bidi case, the first count blocks are for Data-Out
796 * buffer blocks, and before gathering the Data-In buffer
52ef2743 797 * the Data-Out buffer blocks should be skipped.
a5d68ba8 798 */
c8ed1ff8
BS
799 tcmu_cmd_set_dbi_cur(tcmu_cmd,
800 tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt);
a5d68ba8
XL
801
802 data_sg = se_cmd->t_bidi_data_sg;
803 data_nents = se_cmd->t_bidi_data_nents;
804 }
f97ec7db 805
c8ed1ff8
BS
806 tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg,
807 data_nents, NULL, read_len);
f97ec7db
IT
808}
809
b6df4b79 810static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
26418649 811{
3c0f26ff 812 return thresh - bitmap_weight(bitmap, thresh);
26418649
SY
813}
814
7c9e7a6f 815/*
7e98905e 816 * We can't queue a command until we have space available on the cmd ring.
7c9e7a6f
AG
817 *
818 * Called with ring lock held.
819 */
7e98905e 820static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size)
7c9e7a6f
AG
821{
822 struct tcmu_mailbox *mb = udev->mb_addr;
0241fd39 823 size_t space, cmd_needed;
7c9e7a6f
AG
824 u32 cmd_head;
825
826 tcmu_flush_dcache_range(mb, sizeof(*mb));
827
828 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
829
f56574a2
AG
830 /*
831 * If cmd end-of-ring space is too small then we need space for a NOP plus
832 * original cmd - cmds are internally contiguous.
833 */
834 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
835 cmd_needed = cmd_size;
836 else
837 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
838
7c9e7a6f
AG
839 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
840 if (space < cmd_needed) {
841 pr_debug("no cmd space: %u %u %u\n", cmd_head,
842 udev->cmdr_last_cleaned, udev->cmdr_size);
843 return false;
844 }
7e98905e
BS
845 return true;
846}
7c9e7a6f 847
7e98905e
BS
848/*
849 * We have to allocate data buffers before we can queue a command.
850 * Returns -1 on error (not enough space) or number of needed iovs on success
851 *
852 * Called with ring lock held.
853 */
854static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
855 int *iov_bidi_cnt)
856{
857 int space, iov_cnt = 0, ret = 0;
858
859 if (!cmd->dbi_cnt)
860 goto wr_iov_cnts;
bc2d214a 861
b6df4b79
XL
862 /* try to check and get the data blocks as needed */
863 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
52ef2743 864 if (space < cmd->dbi_cnt) {
80eb8761
MC
865 unsigned long blocks_left =
866 (udev->max_blocks - udev->dbi_thresh) + space;
b6df4b79 867
52ef2743 868 if (blocks_left < cmd->dbi_cnt) {
e719afdc
BS
869 pr_debug("no data space: only %lu available, but ask for %u\n",
870 blocks_left * udev->data_blk_size,
871 cmd->dbi_cnt * udev->data_blk_size);
7e98905e 872 return -1;
b6df4b79
XL
873 }
874
52ef2743 875 udev->dbi_thresh += cmd->dbi_cnt;
80eb8761
MC
876 if (udev->dbi_thresh > udev->max_blocks)
877 udev->dbi_thresh = udev->max_blocks;
7c9e7a6f
AG
878 }
879
f5ce815f 880 iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length);
7e98905e
BS
881 if (iov_cnt < 0)
882 return -1;
883
884 if (cmd->dbi_bidi_cnt) {
f5ce815f 885 ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi);
7e98905e
BS
886 if (ret < 0)
887 return -1;
888 }
889wr_iov_cnts:
890 *iov_bidi_cnt = ret;
891 return iov_cnt + ret;
7c9e7a6f
AG
892}
893
fe25cc34
XL
894static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
895{
896 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
897 sizeof(struct tcmu_cmd_entry));
898}
899
900static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
901 size_t base_command_size)
902{
903 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
904 size_t command_size;
905
906 command_size = base_command_size +
907 round_up(scsi_command_size(se_cmd->t_task_cdb),
908 TCMU_OP_ALIGN_SIZE);
909
910 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
911
912 return command_size;
913}
914
61fb2482
BS
915static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
916 struct timer_list *timer)
0d44374c 917{
9103575a 918 if (!tmo)
61fb2482 919 return;
9103575a 920
0d44374c 921 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
a94a2572
XL
922 if (!timer_pending(timer))
923 mod_timer(timer, tcmu_cmd->deadline);
924
61fb2482
BS
925 pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
926 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
0d44374c
MC
927}
928
a94a2572 929static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
af1dd7ff
MC
930{
931 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
9103575a 932 unsigned int tmo;
af1dd7ff 933
9103575a
MC
934 /*
935 * For backwards compat if qfull_time_out is not set use
936 * cmd_time_out and if that's not set use the default time out.
937 */
938 if (!udev->qfull_time_out)
939 return -ETIMEDOUT;
940 else if (udev->qfull_time_out > 0)
941 tmo = udev->qfull_time_out;
942 else if (udev->cmd_time_out)
943 tmo = udev->cmd_time_out;
944 else
945 tmo = TCMU_TIME_OUT;
946
61fb2482 947 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
af1dd7ff 948
a94a2572 949 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
61fb2482
BS
950 pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
951 tcmu_cmd, udev->name);
af1dd7ff
MC
952 return 0;
953}
954
3d3f9d56
BS
955static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
956{
957 struct tcmu_cmd_entry_hdr *hdr;
958 struct tcmu_mailbox *mb = udev->mb_addr;
959 uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
960
961 /* Insert a PAD if end-of-ring space is too small */
962 if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) {
963 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
964
ecddbb7e 965 hdr = udev->cmdr + cmd_head;
3d3f9d56
BS
966 tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD);
967 tcmu_hdr_set_len(&hdr->len_op, pad_size);
968 hdr->cmd_id = 0; /* not used for PAD */
969 hdr->kflags = 0;
970 hdr->uflags = 0;
971 tcmu_flush_dcache_range(hdr, sizeof(*hdr));
972
973 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
974 tcmu_flush_dcache_range(mb, sizeof(*mb));
975
976 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
977 WARN_ON(cmd_head != 0);
978 }
979
980 return cmd_head;
981}
982
6888da81
MC
983static void tcmu_unplug_device(struct se_dev_plug *se_plug)
984{
985 struct se_device *se_dev = se_plug->se_dev;
986 struct tcmu_dev *udev = TCMU_DEV(se_dev);
987
3ac0fcb4 988 clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags);
6888da81
MC
989 uio_event_notify(&udev->uio_info);
990}
991
992static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
993{
994 struct tcmu_dev *udev = TCMU_DEV(se_dev);
995
3ac0fcb4 996 if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
6888da81
MC
997 return &udev->se_plug;
998
999 return NULL;
1000}
1001
6fd0ce79
MC
1002/**
1003 * queue_cmd_ring - queue cmd to ring or internally
1004 * @tcmu_cmd: cmd to queue
1005 * @scsi_err: TCM error code if failure (-1) returned.
1006 *
1007 * Returns:
1008 * -1 we cannot queue internally or to the ring.
1009 * 0 success
af1dd7ff 1010 * 1 internally queued to wait for ring memory to free.
6fd0ce79 1011 */
e7f41104 1012static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
7c9e7a6f
AG
1013{
1014 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
1015 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
1016 size_t base_command_size, command_size;
3d3f9d56 1017 struct tcmu_mailbox *mb = udev->mb_addr;
7c9e7a6f 1018 struct tcmu_cmd_entry *entry;
7c9e7a6f 1019 struct iovec *iov;
d3cbb743
BS
1020 int iov_cnt, iov_bidi_cnt;
1021 uint32_t cmd_id, cmd_head;
7c9e7a6f 1022 uint64_t cdb_off;
e719afdc 1023 uint32_t blk_size = udev->data_blk_size;
52ef2743 1024 /* size of data buffer needed */
e719afdc 1025 size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size;
7c9e7a6f 1026
6fd0ce79
MC
1027 *scsi_err = TCM_NO_SENSE;
1028
892782ca
MC
1029 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
1030 *scsi_err = TCM_LUN_BUSY;
1031 return -1;
1032 }
1033
6fd0ce79
MC
1034 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1035 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1036 return -1;
1037 }
7c9e7a6f 1038
7e98905e
BS
1039 if (!list_empty(&udev->qfull_queue))
1040 goto queue;
1041
e719afdc 1042 if (data_length > (size_t)udev->max_blocks * blk_size) {
7e98905e 1043 pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
e719afdc 1044 data_length, (size_t)udev->max_blocks * blk_size);
7e98905e
BS
1045 *scsi_err = TCM_INVALID_CDB_FIELD;
1046 return -1;
1047 }
1048
1049 iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt);
1050 if (iov_cnt < 0)
1051 goto free_and_queue;
1052
7c9e7a6f
AG
1053 /*
1054 * Must be a certain minimum size for response sense info, but
1055 * also may be larger if the iov array is large.
fe25cc34 1056 */
7e98905e 1057 base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt);
fe25cc34 1058 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
7c9e7a6f 1059
7e98905e
BS
1060 if (command_size > (udev->cmdr_size / 2)) {
1061 pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n",
1062 command_size, udev->cmdr_size);
1063 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
6fd0ce79
MC
1064 *scsi_err = TCM_INVALID_CDB_FIELD;
1065 return -1;
554617b2 1066 }
7c9e7a6f 1067
7e98905e 1068 if (!is_ring_space_avail(udev, command_size))
810b8153
MC
1069 /*
1070 * Don't leave commands partially setup because the unmap
1071 * thread might need the blocks to make forward progress.
1072 */
7e98905e
BS
1073 goto free_and_queue;
1074
d3cbb743
BS
1075 if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff),
1076 GFP_NOWAIT) < 0) {
7e98905e
BS
1077 pr_err("tcmu: Could not allocate cmd id.\n");
1078
1079 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
1080 *scsi_err = TCM_OUT_OF_RESOURCES;
1081 return -1;
7c9e7a6f 1082 }
7e98905e
BS
1083 tcmu_cmd->cmd_id = cmd_id;
1084
1085 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
1086 tcmu_cmd, udev->name);
7c9e7a6f 1087
3d3f9d56 1088 cmd_head = ring_insert_padding(udev, command_size);
7c9e7a6f 1089
ecddbb7e 1090 entry = udev->cmdr + cmd_head;
b3743c71 1091 memset(entry, 0, command_size);
0ad46af8 1092 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
7c9e7a6f 1093
7e98905e 1094 /* prepare iov list and copy data to data area if necessary */
b6df4b79 1095 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
7c9e7a6f 1096 iov = &entry->req.iov[0];
3c9a7c58
BS
1097
1098 if (se_cmd->data_direction == DMA_TO_DEVICE ||
1099 se_cmd->se_cmd_flags & SCF_BIDI)
1100 scatter_data_area(udev, tcmu_cmd, &iov);
1101 else
1102 tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length);
1103
7e98905e 1104 entry->req.iov_cnt = iov_cnt - iov_bidi_cnt;
7c9e7a6f 1105
e4648b01 1106 /* Handle BIDI commands */
ab22d260 1107 if (se_cmd->se_cmd_flags & SCF_BIDI) {
ab22d260 1108 iov++;
3c9a7c58 1109 tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi);
7e98905e 1110 entry->req.iov_bidi_cnt = iov_bidi_cnt;
ab22d260 1111 }
61fb2482
BS
1112
1113 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
1114
0d44374c
MC
1115 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
1116
fe25cc34
XL
1117 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
1118
7c9e7a6f
AG
1119 /* All offsets relative to mb_addr, not start of entry! */
1120 cdb_off = CMDR_OFF + cmd_head + base_command_size;
1121 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
1122 entry->req.cdb_off = cdb_off;
8c4e0f21 1123 tcmu_flush_dcache_range(entry, command_size);
7c9e7a6f
AG
1124
1125 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1126 tcmu_flush_dcache_range(mb, sizeof(*mb));
7c9e7a6f 1127
a94a2572 1128 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
a94a2572 1129
3ac0fcb4 1130 if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
6888da81 1131 uio_event_notify(&udev->uio_info);
7c9e7a6f 1132
6fd0ce79 1133 return 0;
af1dd7ff 1134
7e98905e
BS
1135free_and_queue:
1136 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1137 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1138
af1dd7ff 1139queue:
a94a2572 1140 if (add_to_qfull_queue(tcmu_cmd)) {
af1dd7ff
MC
1141 *scsi_err = TCM_OUT_OF_RESOURCES;
1142 return -1;
1143 }
1144
1145 return 1;
7c9e7a6f
AG
1146}
1147
bc2d214a
BS
1148/**
1149 * queue_tmr_ring - queue tmr info to ring or internally
1150 * @udev: related tcmu_dev
1151 * @tmr: tcmu_tmr containing tmr info to queue
1152 *
1153 * Returns:
1154 * 0 success
1155 * 1 internally queued to wait for ring memory to free.
1156 */
1157static int
1158queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
1159{
1160 struct tcmu_tmr_entry *entry;
1161 int cmd_size;
1162 int id_list_sz;
1163 struct tcmu_mailbox *mb = udev->mb_addr;
1164 uint32_t cmd_head;
1165
1166 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
1167 goto out_free;
1168
1169 id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt;
1170 cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE);
1171
1172 if (!list_empty(&udev->tmr_queue) ||
7e98905e 1173 !is_ring_space_avail(udev, cmd_size)) {
bc2d214a
BS
1174 list_add_tail(&tmr->queue_entry, &udev->tmr_queue);
1175 pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n",
1176 tmr, udev->name);
1177 return 1;
1178 }
1179
1180 cmd_head = ring_insert_padding(udev, cmd_size);
1181
ecddbb7e 1182 entry = udev->cmdr + cmd_head;
bc2d214a
BS
1183 memset(entry, 0, cmd_size);
1184 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR);
1185 tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size);
1186 entry->tmr_type = tmr->tmr_type;
1187 entry->cmd_cnt = tmr->tmr_cmd_cnt;
1188 memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz);
1189 tcmu_flush_dcache_range(entry, cmd_size);
1190
1191 UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size);
1192 tcmu_flush_dcache_range(mb, sizeof(*mb));
1193
1194 uio_event_notify(&udev->uio_info);
1195
1196out_free:
1197 kfree(tmr);
1198
1199 return 0;
1200}
1201
02eb924f
AG
1202static sense_reason_t
1203tcmu_queue_cmd(struct se_cmd *se_cmd)
7c9e7a6f 1204{
af1dd7ff
MC
1205 struct se_device *se_dev = se_cmd->se_dev;
1206 struct tcmu_dev *udev = TCMU_DEV(se_dev);
7c9e7a6f 1207 struct tcmu_cmd *tcmu_cmd;
c9684927
BS
1208 sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD;
1209 int ret = -1;
7c9e7a6f
AG
1210
1211 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
1212 if (!tcmu_cmd)
02eb924f 1213 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
7c9e7a6f 1214
af1dd7ff 1215 mutex_lock(&udev->cmdr_lock);
c9684927
BS
1216 if (!(se_cmd->transport_state & CMD_T_ABORTED))
1217 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
af1dd7ff 1218 if (ret < 0)
141685a3 1219 tcmu_free_cmd(tcmu_cmd);
780e1384
SK
1220 else
1221 se_cmd->priv = tcmu_cmd;
a3512902 1222 mutex_unlock(&udev->cmdr_lock);
6fd0ce79 1223 return scsi_ret;
7c9e7a6f
AG
1224}
1225
ed212ca8
BS
1226static void tcmu_set_next_deadline(struct list_head *queue,
1227 struct timer_list *timer)
1228{
1229 struct tcmu_cmd *cmd;
1230
1231 if (!list_empty(queue)) {
1232 cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry);
1233 mod_timer(timer, cmd->deadline);
1234 } else
1235 del_timer(timer);
1236}
1237
bc2d214a
BS
1238static int
1239tcmu_tmr_type(enum tcm_tmreq_table tmf)
1240{
1241 switch (tmf) {
1242 case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK;
1243 case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET;
1244 case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA;
1245 case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET;
1246 case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET;
1247 case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET;
1248 case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET;
1249 case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO;
1250 default: return TCMU_TMR_UNKNOWN;
1251 }
1252}
1253
1254static void
1255tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
1256 struct list_head *cmd_list)
1257{
1258 int i = 0, cmd_cnt = 0;
1259 bool unqueued = false;
bc2d214a
BS
1260 struct tcmu_cmd *cmd;
1261 struct se_cmd *se_cmd;
1262 struct tcmu_tmr *tmr;
1263 struct tcmu_dev *udev = TCMU_DEV(se_dev);
1264
1265 mutex_lock(&udev->cmdr_lock);
1266
1267 /* First we check for aborted commands in qfull_queue */
1268 list_for_each_entry(se_cmd, cmd_list, state_list) {
1269 i++;
1270 if (!se_cmd->priv)
1271 continue;
1272 cmd = se_cmd->priv;
1273 /* Commands on qfull queue have no id yet */
1274 if (cmd->cmd_id) {
1275 cmd_cnt++;
1276 continue;
1277 }
1278 pr_debug("Removing aborted command %p from queue on dev %s.\n",
1279 cmd, udev->name);
1280
1281 list_del_init(&cmd->queue_entry);
1282 tcmu_free_cmd(cmd);
780e1384 1283 se_cmd->priv = NULL;
bc2d214a
BS
1284 target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
1285 unqueued = true;
1286 }
1287 if (unqueued)
1288 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1289
59526d7a
BS
1290 if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags))
1291 goto unlock;
1292
bc2d214a
BS
1293 pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
1294 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
1295
c20bda34 1296 tmr = kmalloc(struct_size(tmr, tmr_cmd_ids, cmd_cnt), GFP_NOIO);
bc2d214a
BS
1297 if (!tmr)
1298 goto unlock;
1299
1300 tmr->tmr_type = tcmu_tmr_type(tmf);
1301 tmr->tmr_cmd_cnt = cmd_cnt;
1302
1303 if (cmd_cnt != 0) {
1304 cmd_cnt = 0;
1305 list_for_each_entry(se_cmd, cmd_list, state_list) {
1306 if (!se_cmd->priv)
1307 continue;
1308 cmd = se_cmd->priv;
1309 if (cmd->cmd_id)
1310 tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id;
1311 }
1312 }
1313
1314 queue_tmr_ring(udev, tmr);
1315
1316unlock:
1317 mutex_unlock(&udev->cmdr_lock);
1318}
1319
018c1491
BS
1320static bool tcmu_handle_completion(struct tcmu_cmd *cmd,
1321 struct tcmu_cmd_entry *entry, bool keep_buf)
7c9e7a6f
AG
1322{
1323 struct se_cmd *se_cmd = cmd->se_cmd;
1324 struct tcmu_dev *udev = cmd->tcmu_dev;
6c3796d1 1325 bool read_len_valid = false;
018c1491 1326 bool ret = true;
a86a7586 1327 uint32_t read_len;
7c9e7a6f 1328
141685a3
XL
1329 /*
1330 * cmd has been completed already from timeout, just reclaim
1331 * data area space and free cmd
1332 */
a86a7586
DF
1333 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1334 WARN_ON_ONCE(se_cmd);
141685a3 1335 goto out;
a86a7586 1336 }
018c1491
BS
1337 if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
1338 pr_err("cmd_id %u already completed with KEEP_BUF, ring is broken\n",
1339 entry->hdr.cmd_id);
1340 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1341 ret = false;
1342 goto out;
1343 }
b25c7863 1344
a94a2572
XL
1345 list_del_init(&cmd->queue_entry);
1346
141685a3 1347 tcmu_cmd_reset_dbi_cur(cmd);
7c9e7a6f 1348
0ad46af8 1349 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
0ad46af8
AG
1350 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1351 cmd->se_cmd);
ed97d0cd 1352 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
6c3796d1 1353 goto done;
1354 }
1355
a86a7586 1356 read_len = se_cmd->data_length;
6c3796d1 1357 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1358 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1359 read_len_valid = true;
1360 if (entry->rsp.read_len < read_len)
1361 read_len = entry->rsp.read_len;
1362 }
1363
1364 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
406f74c2 1365 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
6c3796d1 1366 if (!read_len_valid )
1367 goto done;
1368 else
1369 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
1370 }
1371 if (se_cmd->se_cmd_flags & SCF_BIDI) {
26418649 1372 /* Get Data-In buffer before clean up */
6c3796d1 1373 gather_data_area(udev, cmd, true, read_len);
e4648b01 1374 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
6c3796d1 1375 gather_data_area(udev, cmd, false, read_len);
7c9e7a6f 1376 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
141685a3 1377 /* TODO: */
2bc396a2
IT
1378 } else if (se_cmd->data_direction != DMA_NONE) {
1379 pr_warn("TCMU: data direction was %d!\n",
1380 se_cmd->data_direction);
7c9e7a6f
AG
1381 }
1382
6c3796d1 1383done:
780e1384 1384 se_cmd->priv = NULL;
6c3796d1 1385 if (read_len_valid) {
1386 pr_debug("read_len = %d\n", read_len);
1387 target_complete_cmd_with_length(cmd->se_cmd,
1388 entry->rsp.scsi_status, read_len);
1389 } else
1390 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
7c9e7a6f 1391
141685a3 1392out:
018c1491
BS
1393 if (!keep_buf) {
1394 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1395 tcmu_free_cmd(cmd);
1396 } else {
1397 /*
1398 * Keep this command after completion, since userspace still
1399 * needs the data buffer. Mark it with TCMU_CMD_BIT_KEEP_BUF
1400 * and reset potential TCMU_CMD_BIT_EXPIRED, so we don't accept
1401 * a second completion later.
1402 * Userspace can free the buffer later by writing the cmd_id
1403 * to new action attribute free_kept_buf.
1404 */
1405 clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1406 set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags);
1407 }
1408 return ret;
7c9e7a6f
AG
1409}
1410
bc2d214a
BS
1411static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
1412{
1413 struct tcmu_tmr *tmr, *tmp;
1414 LIST_HEAD(tmrs);
1415
1416 if (list_empty(&udev->tmr_queue))
1417 return 1;
1418
1419 pr_debug("running %s's tmr queue\n", udev->name);
1420
1421 list_splice_init(&udev->tmr_queue, &tmrs);
1422
1423 list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) {
1424 list_del_init(&tmr->queue_entry);
1425
1426 pr_debug("removing tmr %p on dev %s from queue\n",
1427 tmr, udev->name);
1428
1429 if (queue_tmr_ring(udev, tmr)) {
1430 pr_debug("ran out of space during tmr queue run\n");
1431 /*
1432 * tmr was requeued, so just put all tmrs back in
1433 * the queue
1434 */
1435 list_splice_tail(&tmrs, &udev->tmr_queue);
1436 return 0;
1437 }
1438 }
1439
1440 return 1;
1441}
1442
9814b55c 1443static bool tcmu_handle_completions(struct tcmu_dev *udev)
7c9e7a6f
AG
1444{
1445 struct tcmu_mailbox *mb;
a94a2572 1446 struct tcmu_cmd *cmd;
bc2d214a 1447 bool free_space = false;
7c9e7a6f
AG
1448
1449 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1450 pr_err("ring broken, not handling completions\n");
82473125 1451 return false;
7c9e7a6f
AG
1452 }
1453
7c9e7a6f
AG
1454 mb = udev->mb_addr;
1455 tcmu_flush_dcache_range(mb, sizeof(*mb));
1456
6aa7de05 1457 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
7c9e7a6f 1458
ecddbb7e 1459 struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned;
018c1491 1460 bool keep_buf;
7c9e7a6f 1461
5a0c256d
BS
1462 /*
1463 * Flush max. up to end of cmd ring since current entry might
1464 * be a padding that is shorter than sizeof(*entry)
1465 */
1466 size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
1467 udev->cmdr_size);
1468 tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
1469 ring_left : sizeof(*entry));
7c9e7a6f 1470
bc2d214a
BS
1471 free_space = true;
1472
1473 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD ||
1474 tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) {
0ad46af8
AG
1475 UPDATE_HEAD(udev->cmdr_last_cleaned,
1476 tcmu_hdr_get_len(entry->hdr.len_op),
1477 udev->cmdr_size);
7c9e7a6f
AG
1478 continue;
1479 }
0ad46af8 1480 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
7c9e7a6f 1481
018c1491
BS
1482 keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF);
1483 if (keep_buf)
1484 cmd = xa_load(&udev->commands, entry->hdr.cmd_id);
1485 else
1486 cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
7c9e7a6f 1487 if (!cmd) {
88cf1073
MC
1488 pr_err("cmd_id %u not found, ring is broken\n",
1489 entry->hdr.cmd_id);
7c9e7a6f 1490 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
9814b55c 1491 return false;
7c9e7a6f
AG
1492 }
1493
018c1491
BS
1494 if (!tcmu_handle_completion(cmd, entry, keep_buf))
1495 break;
7c9e7a6f 1496
0ad46af8
AG
1497 UPDATE_HEAD(udev->cmdr_last_cleaned,
1498 tcmu_hdr_get_len(entry->hdr.len_op),
1499 udev->cmdr_size);
7c9e7a6f 1500 }
bc2d214a
BS
1501 if (free_space)
1502 free_space = tcmu_run_tmr_queue(udev);
7c9e7a6f 1503
8b084d9d 1504 if (atomic_read(&global_page_count) > tcmu_global_max_pages &&
d3cbb743 1505 xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
ed212ca8
BS
1506 /*
1507 * Allocated blocks exceeded global block limit, currently no
1508 * more pending or waiting commands so try to reclaim blocks.
1509 */
1510 schedule_delayed_work(&tcmu_unmap_work, 0);
af1dd7ff 1511 }
ed212ca8
BS
1512 if (udev->cmd_time_out)
1513 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
7c9e7a6f 1514
bc2d214a 1515 return free_space;
7c9e7a6f
AG
1516}
1517
61fb2482 1518static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
7c9e7a6f 1519{
af1dd7ff 1520 struct se_cmd *se_cmd;
7c9e7a6f 1521
ed212ca8 1522 if (!time_after_eq(jiffies, cmd->deadline))
61fb2482 1523 return;
7c9e7a6f 1524
61fb2482
BS
1525 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1526 list_del_init(&cmd->queue_entry);
45dc488c 1527 se_cmd = cmd->se_cmd;
a3512902 1528 se_cmd->priv = NULL;
61fb2482 1529 cmd->se_cmd = NULL;
7c9e7a6f 1530
61fb2482
BS
1531 pr_debug("Timing out inflight cmd %u on dev %s.\n",
1532 cmd->cmd_id, cmd->tcmu_dev->name);
9103575a 1533
61fb2482
BS
1534 target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
1535}
9103575a 1536
61fb2482
BS
1537static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
1538{
1539 struct se_cmd *se_cmd;
9103575a 1540
ed212ca8 1541 if (!time_after_eq(jiffies, cmd->deadline))
61fb2482
BS
1542 return;
1543
9d7464b1
DC
1544 pr_debug("Timing out queued cmd %p on dev %s.\n",
1545 cmd, cmd->tcmu_dev->name);
1546
61fb2482
BS
1547 list_del_init(&cmd->queue_entry);
1548 se_cmd = cmd->se_cmd;
1549 tcmu_free_cmd(cmd);
1550
780e1384 1551 se_cmd->priv = NULL;
61fb2482 1552 target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
7c9e7a6f
AG
1553}
1554
9103575a 1555static void tcmu_device_timedout(struct tcmu_dev *udev)
7c9e7a6f 1556{
488ebe4c
MC
1557 spin_lock(&timed_out_udevs_lock);
1558 if (list_empty(&udev->timedout_entry))
1559 list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1560 spin_unlock(&timed_out_udevs_lock);
b6df4b79 1561
af1dd7ff 1562 schedule_delayed_work(&tcmu_unmap_work, 0);
7c9e7a6f
AG
1563}
1564
9103575a
MC
1565static void tcmu_cmd_timedout(struct timer_list *t)
1566{
1567 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
1568
1569 pr_debug("%s cmd timeout has expired\n", udev->name);
1570 tcmu_device_timedout(udev);
1571}
1572
1573static void tcmu_qfull_timedout(struct timer_list *t)
1574{
1575 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
1576
1577 pr_debug("%s qfull timeout has expired\n", udev->name);
1578 tcmu_device_timedout(udev);
1579}
1580
7c9e7a6f
AG
1581static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
1582{
1583 struct tcmu_hba *tcmu_hba;
1584
1585 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
1586 if (!tcmu_hba)
1587 return -ENOMEM;
1588
1589 tcmu_hba->host_id = host_id;
1590 hba->hba_ptr = tcmu_hba;
1591
1592 return 0;
1593}
1594
1595static void tcmu_detach_hba(struct se_hba *hba)
1596{
1597 kfree(hba->hba_ptr);
1598 hba->hba_ptr = NULL;
1599}
1600
1601static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1602{
1603 struct tcmu_dev *udev;
1604
1605 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
1606 if (!udev)
1607 return NULL;
f3cdbe39 1608 kref_init(&udev->kref);
7c9e7a6f
AG
1609
1610 udev->name = kstrdup(name, GFP_KERNEL);
1611 if (!udev->name) {
1612 kfree(udev);
1613 return NULL;
1614 }
1615
1616 udev->hba = hba;
af980e46 1617 udev->cmd_time_out = TCMU_TIME_OUT;
9103575a 1618 udev->qfull_time_out = -1;
7c9e7a6f 1619
e719afdc
BS
1620 udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF;
1621 udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk;
c7ede4f0 1622 udev->cmdr_size = CMDR_SIZE_DEF;
f5ce815f 1623 udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF);
e719afdc 1624
b6df4b79 1625 mutex_init(&udev->cmdr_lock);
7c9e7a6f 1626
ff07e4a4 1627 INIT_LIST_HEAD(&udev->node);
488ebe4c 1628 INIT_LIST_HEAD(&udev->timedout_entry);
a94a2572 1629 INIT_LIST_HEAD(&udev->qfull_queue);
bc2d214a 1630 INIT_LIST_HEAD(&udev->tmr_queue);
a94a2572 1631 INIT_LIST_HEAD(&udev->inflight_queue);
d3cbb743 1632 xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1);
7c9e7a6f 1633
9103575a
MC
1634 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
1635 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
7c9e7a6f 1636
8b084d9d 1637 xa_init(&udev->data_pages);
c22adc0b 1638
7c9e7a6f
AG
1639 return &udev->se_dev;
1640}
1641
43bf922c
BS
1642static void tcmu_dev_call_rcu(struct rcu_head *p)
1643{
1644 struct se_device *dev = container_of(p, struct se_device, rcu_head);
1645 struct tcmu_dev *udev = TCMU_DEV(dev);
1646
1647 kfree(udev->uio_info.name);
1648 kfree(udev->name);
1649 kfree(udev);
1650}
1651
1652static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1653{
018c1491
BS
1654 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ||
1655 test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
43bf922c
BS
1656 kmem_cache_free(tcmu_cmd_cache, cmd);
1657 return 0;
1658 }
1659 return -EINVAL;
1660}
1661
e719afdc 1662static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first,
f7c89771 1663 unsigned long last)
43bf922c 1664{
43bf922c 1665 struct page *page;
325d5c5f 1666 unsigned long dpi;
f5ce815f 1667 u32 pages_freed = 0;
43bf922c 1668
325d5c5f
BS
1669 first = first * udev->data_pages_per_blk;
1670 last = (last + 1) * udev->data_pages_per_blk - 1;
1671 xa_for_each_range(&udev->data_pages, dpi, page, first, last) {
1672 xa_erase(&udev->data_pages, dpi);
bb9b9eb0
XW
1673 /*
1674 * While reaching here there may be page faults occurring on
1675 * the to-be-released pages. A race condition may occur if
1676 * unmap_mapping_range() is called before page faults on these
1677 * pages have completed; a valid but stale map is created.
1678 *
1679 * If another command subsequently runs and needs to extend
1680 * dbi_thresh, it may reuse the slot corresponding to the
1681 * previous page in data_bitmap. Though we will allocate a new
1682 * page for the slot in data_area, no page fault will happen
1683 * because we have a valid map. Therefore the command's data
1684 * will be lost.
1685 *
1686 * We lock and unlock pages that are to be released to ensure
1687 * all page faults have completed. This way
1688 * unmap_mapping_range() can ensure stale maps are cleanly
1689 * removed.
1690 */
1691 lock_page(page);
1692 unlock_page(page);
f7c89771 1693 __free_page(page);
f5ce815f 1694 pages_freed++;
43bf922c 1695 }
f5ce815f
BS
1696
1697 atomic_sub(pages_freed, &global_page_count);
1698
1699 return pages_freed;
43bf922c
BS
1700}
1701
1702static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
1703{
1704 struct tcmu_tmr *tmr, *tmp;
1705
1706 list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) {
1707 list_del_init(&tmr->queue_entry);
1708 kfree(tmr);
1709 }
1710}
1711
1712static void tcmu_dev_kref_release(struct kref *kref)
1713{
1714 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1715 struct se_device *dev = &udev->se_dev;
1716 struct tcmu_cmd *cmd;
1717 bool all_expired = true;
d3cbb743 1718 unsigned long i;
43bf922c
BS
1719
1720 vfree(udev->mb_addr);
1721 udev->mb_addr = NULL;
1722
1723 spin_lock_bh(&timed_out_udevs_lock);
1724 if (!list_empty(&udev->timedout_entry))
1725 list_del(&udev->timedout_entry);
1726 spin_unlock_bh(&timed_out_udevs_lock);
1727
1728 /* Upper layer should drain all requests before calling this */
1729 mutex_lock(&udev->cmdr_lock);
d3cbb743 1730 xa_for_each(&udev->commands, i, cmd) {
43bf922c
BS
1731 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1732 all_expired = false;
1733 }
1734 /* There can be left over TMR cmds. Remove them. */
1735 tcmu_remove_all_queued_tmr(udev);
1736 if (!list_empty(&udev->qfull_queue))
1737 all_expired = false;
d3cbb743 1738 xa_destroy(&udev->commands);
43bf922c
BS
1739 WARN_ON(!all_expired);
1740
e719afdc 1741 tcmu_blocks_release(udev, 0, udev->dbi_max);
43bf922c
BS
1742 bitmap_free(udev->data_bitmap);
1743 mutex_unlock(&udev->cmdr_lock);
1744
8f33bb24
BS
1745 pr_debug("dev_kref_release\n");
1746
43bf922c
BS
1747 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1748}
1749
61fb2482 1750static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
af1dd7ff
MC
1751{
1752 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1753 LIST_HEAD(cmds);
af1dd7ff
MC
1754 sense_reason_t scsi_ret;
1755 int ret;
1756
a94a2572 1757 if (list_empty(&udev->qfull_queue))
61fb2482 1758 return;
af1dd7ff 1759
892782ca 1760 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
af1dd7ff 1761
a94a2572 1762 list_splice_init(&udev->qfull_queue, &cmds);
af1dd7ff 1763
a94a2572
XL
1764 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1765 list_del_init(&tcmu_cmd->queue_entry);
af1dd7ff 1766
61fb2482
BS
1767 pr_debug("removing cmd %p on dev %s from queue\n",
1768 tcmu_cmd, udev->name);
af1dd7ff 1769
892782ca 1770 if (fail) {
892782ca
MC
1771 /*
1772 * We were not able to even start the command, so
1773 * fail with busy to allow a retry in case runner
1774 * was only temporarily down. If the device is being
1775 * removed then LIO core will do the right thing and
1776 * fail the retry.
1777 */
780e1384 1778 tcmu_cmd->se_cmd->priv = NULL;
892782ca
MC
1779 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
1780 tcmu_free_cmd(tcmu_cmd);
1781 continue;
1782 }
1783
af1dd7ff
MC
1784 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1785 if (ret < 0) {
61fb2482
BS
1786 pr_debug("cmd %p on dev %s failed with %u\n",
1787 tcmu_cmd, udev->name, scsi_ret);
af1dd7ff
MC
1788 /*
1789 * Ignore scsi_ret for now. target_complete_cmd
1790 * drops it.
1791 */
780e1384 1792 tcmu_cmd->se_cmd->priv = NULL;
af1dd7ff
MC
1793 target_complete_cmd(tcmu_cmd->se_cmd,
1794 SAM_STAT_CHECK_CONDITION);
1795 tcmu_free_cmd(tcmu_cmd);
1796 } else if (ret > 0) {
1797 pr_debug("ran out of space during cmdr queue run\n");
1798 /*
1799 * cmd was requeued, so just put all cmds back in
1800 * the queue
1801 */
a94a2572 1802 list_splice_tail(&cmds, &udev->qfull_queue);
a94a2572 1803 break;
af1dd7ff
MC
1804 }
1805 }
a94a2572
XL
1806
1807 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
af1dd7ff
MC
1808}
1809
7c9e7a6f
AG
1810static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1811{
af1dd7ff 1812 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
7c9e7a6f 1813
af1dd7ff 1814 mutex_lock(&udev->cmdr_lock);
bc2d214a
BS
1815 if (tcmu_handle_completions(udev))
1816 run_qfull_queue(udev, false);
af1dd7ff 1817 mutex_unlock(&udev->cmdr_lock);
7c9e7a6f
AG
1818
1819 return 0;
1820}
1821
1822/*
1823 * mmap code from uio.c. Copied here because we want to hook mmap()
1824 * and this stuff must come along.
1825 */
1826static int tcmu_find_mem_index(struct vm_area_struct *vma)
1827{
1828 struct tcmu_dev *udev = vma->vm_private_data;
1829 struct uio_info *info = &udev->uio_info;
1830
1831 if (vma->vm_pgoff < MAX_UIO_MAPS) {
1832 if (info->mem[vma->vm_pgoff].size == 0)
1833 return -1;
1834 return (int)vma->vm_pgoff;
1835 }
1836 return -1;
1837}
1838
8b084d9d 1839static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
b6df4b79
XL
1840{
1841 struct page *page;
b6df4b79
XL
1842
1843 mutex_lock(&udev->cmdr_lock);
3722e36c 1844 page = xa_load(&udev->data_pages, dpi);
b6df4b79 1845 if (likely(page)) {
a6968f7a 1846 get_page(page);
bb9b9eb0 1847 lock_page(page);
b6df4b79
XL
1848 mutex_unlock(&udev->cmdr_lock);
1849 return page;
1850 }
1851
1852 /*
c1c390ba
MC
1853 * Userspace messed up and passed in a address not in the
1854 * data iov passed to it.
b6df4b79 1855 */
8b084d9d
BS
1856 pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n",
1857 dpi, udev->name);
b6df4b79
XL
1858 mutex_unlock(&udev->cmdr_lock);
1859
8b084d9d 1860 return NULL;
b6df4b79
XL
1861}
1862
8f33bb24
BS
1863static void tcmu_vma_open(struct vm_area_struct *vma)
1864{
1865 struct tcmu_dev *udev = vma->vm_private_data;
1866
1867 pr_debug("vma_open\n");
1868
1869 kref_get(&udev->kref);
1870}
1871
1872static void tcmu_vma_close(struct vm_area_struct *vma)
1873{
1874 struct tcmu_dev *udev = vma->vm_private_data;
1875
1876 pr_debug("vma_close\n");
1877
1878 /* release ref from tcmu_vma_open */
1879 kref_put(&udev->kref, tcmu_dev_kref_release);
1880}
1881
69589c9b 1882static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
7c9e7a6f 1883{
11bac800 1884 struct tcmu_dev *udev = vmf->vma->vm_private_data;
7c9e7a6f
AG
1885 struct uio_info *info = &udev->uio_info;
1886 struct page *page;
1887 unsigned long offset;
1888 void *addr;
bb9b9eb0 1889 vm_fault_t ret = 0;
7c9e7a6f 1890
11bac800 1891 int mi = tcmu_find_mem_index(vmf->vma);
7c9e7a6f
AG
1892 if (mi < 0)
1893 return VM_FAULT_SIGBUS;
1894
1895 /*
1896 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1897 * to use mem[N].
1898 */
1899 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1900
141685a3
XL
1901 if (offset < udev->data_off) {
1902 /* For the vmalloc()ed cmd area pages */
1903 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
7c9e7a6f 1904 page = vmalloc_to_page(addr);
a6968f7a 1905 get_page(page);
141685a3 1906 } else {
8b084d9d 1907 uint32_t dpi;
141685a3 1908
b6df4b79 1909 /* For the dynamically growing data area pages */
8b084d9d
BS
1910 dpi = (offset - udev->data_off) / PAGE_SIZE;
1911 page = tcmu_try_get_data_page(udev, dpi);
b6df4b79 1912 if (!page)
c1c390ba 1913 return VM_FAULT_SIGBUS;
bb9b9eb0 1914 ret = VM_FAULT_LOCKED;
141685a3
XL
1915 }
1916
7c9e7a6f 1917 vmf->page = page;
bb9b9eb0 1918 return ret;
7c9e7a6f
AG
1919}
1920
1921static const struct vm_operations_struct tcmu_vm_ops = {
8f33bb24
BS
1922 .open = tcmu_vma_open,
1923 .close = tcmu_vma_close,
7c9e7a6f
AG
1924 .fault = tcmu_vma_fault,
1925};
1926
1927static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1928{
1929 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1930
1c71222e 1931 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
7c9e7a6f
AG
1932 vma->vm_ops = &tcmu_vm_ops;
1933
1934 vma->vm_private_data = udev;
1935
1936 /* Ensure the mmap is exactly the right size */
ecddbb7e 1937 if (vma_pages(vma) != udev->mmap_pages)
7c9e7a6f
AG
1938 return -EINVAL;
1939
8f33bb24
BS
1940 tcmu_vma_open(vma);
1941
7c9e7a6f
AG
1942 return 0;
1943}
1944
1945static int tcmu_open(struct uio_info *info, struct inode *inode)
1946{
1947 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1948
1949 /* O_EXCL not supported for char devs, so fake it? */
1950 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1951 return -EBUSY;
1952
b6df4b79
XL
1953 udev->inode = inode;
1954
7c9e7a6f
AG
1955 pr_debug("open\n");
1956
1957 return 0;
1958}
1959
1960static int tcmu_release(struct uio_info *info, struct inode *inode)
1961{
1962 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
018c1491
BS
1963 struct tcmu_cmd *cmd;
1964 unsigned long i;
1965 bool freed = false;
1966
1967 mutex_lock(&udev->cmdr_lock);
1968
1969 xa_for_each(&udev->commands, i, cmd) {
1970 /* Cmds with KEEP_BUF set are no longer on the ring, but
1971 * userspace still holds the data buffer. If userspace closes
1972 * we implicitly free these cmds and buffers, since after new
1973 * open the (new ?) userspace cannot find the cmd in the ring
1974 * and thus never will release the buffer by writing cmd_id to
1975 * free_kept_buf action attribute.
1976 */
1977 if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags))
1978 continue;
1979 pr_debug("removing KEEP_BUF cmd %u on dev %s from ring\n",
1980 cmd->cmd_id, udev->name);
1981 freed = true;
1982
1983 xa_erase(&udev->commands, i);
1984 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1985 tcmu_free_cmd(cmd);
1986 }
1987 /*
1988 * We only freed data space, not ring space. Therefore we dont call
1989 * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
1990 */
1991 if (freed && list_empty(&udev->tmr_queue))
1992 run_qfull_queue(udev, false);
1993
1994 mutex_unlock(&udev->cmdr_lock);
7c9e7a6f
AG
1995
1996 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1997
1998 pr_debug("close\n");
8f33bb24 1999
7c9e7a6f
AG
2000 return 0;
2001}
2002
9de3a1ef 2003static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
b3af66e2
MC
2004{
2005 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
2006
2007 if (!tcmu_kern_cmd_reply_supported)
9de3a1ef 2008 return 0;
b849b456
KN
2009
2010 if (udev->nl_reply_supported <= 0)
9de3a1ef 2011 return 0;
b849b456 2012
3228691f 2013 mutex_lock(&tcmu_nl_cmd_mutex);
b849b456 2014
bdaeedc1
MC
2015 if (tcmu_netlink_blocked) {
2016 mutex_unlock(&tcmu_nl_cmd_mutex);
2017 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
2018 udev->name);
2019 return -EAGAIN;
2020 }
b3af66e2
MC
2021
2022 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
3228691f 2023 mutex_unlock(&tcmu_nl_cmd_mutex);
9de3a1ef
MC
2024 pr_warn("netlink cmd %d already executing on %s\n",
2025 nl_cmd->cmd, udev->name);
2026 return -EBUSY;
b3af66e2
MC
2027 }
2028
2029 memset(nl_cmd, 0, sizeof(*nl_cmd));
2030 nl_cmd->cmd = cmd;
3228691f 2031 nl_cmd->udev = udev;
b3af66e2 2032 init_completion(&nl_cmd->complete);
3228691f
MC
2033 INIT_LIST_HEAD(&nl_cmd->nl_list);
2034
2035 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
b3af66e2 2036
3228691f 2037 mutex_unlock(&tcmu_nl_cmd_mutex);
9de3a1ef 2038 return 0;
b3af66e2
MC
2039}
2040
7d894862
LZ
2041static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
2042{
2043 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
2044
2045 if (!tcmu_kern_cmd_reply_supported)
2046 return;
2047
2048 if (udev->nl_reply_supported <= 0)
2049 return;
2050
2051 mutex_lock(&tcmu_nl_cmd_mutex);
2052
2053 list_del(&nl_cmd->nl_list);
2054 memset(nl_cmd, 0, sizeof(*nl_cmd));
2055
2056 mutex_unlock(&tcmu_nl_cmd_mutex);
2057}
2058
b3af66e2
MC
2059static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
2060{
2061 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
2062 int ret;
b3af66e2
MC
2063
2064 if (!tcmu_kern_cmd_reply_supported)
2065 return 0;
2066
b849b456
KN
2067 if (udev->nl_reply_supported <= 0)
2068 return 0;
2069
b3af66e2
MC
2070 pr_debug("sleeping for nl reply\n");
2071 wait_for_completion(&nl_cmd->complete);
2072
3228691f 2073 mutex_lock(&tcmu_nl_cmd_mutex);
b3af66e2
MC
2074 nl_cmd->cmd = TCMU_CMD_UNSPEC;
2075 ret = nl_cmd->status;
3228691f 2076 mutex_unlock(&tcmu_nl_cmd_mutex);
b3af66e2 2077
85fae482 2078 return ret;
b3af66e2
MC
2079}
2080
0e5aee39
ZL
2081static int tcmu_netlink_event_init(struct tcmu_dev *udev,
2082 enum tcmu_genl_cmd cmd,
2083 struct sk_buff **buf, void **hdr)
7c9e7a6f
AG
2084{
2085 struct sk_buff *skb;
2086 void *msg_header;
6e14eab9 2087 int ret = -ENOMEM;
7c9e7a6f
AG
2088
2089 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2090 if (!skb)
6e14eab9 2091 return ret;
7c9e7a6f
AG
2092
2093 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
6e14eab9
NB
2094 if (!msg_header)
2095 goto free_skb;
7c9e7a6f 2096
b3af66e2 2097 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
6e14eab9
NB
2098 if (ret < 0)
2099 goto free_skb;
7c9e7a6f 2100
b3af66e2
MC
2101 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
2102 if (ret < 0)
2103 goto free_skb;
2104
2105 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
6e14eab9
NB
2106 if (ret < 0)
2107 goto free_skb;
7c9e7a6f 2108
0e5aee39
ZL
2109 *buf = skb;
2110 *hdr = msg_header;
2111 return ret;
2d76443e 2112
0e5aee39
ZL
2113free_skb:
2114 nlmsg_free(skb);
2115 return ret;
2116}
2117
2118static int tcmu_netlink_event_send(struct tcmu_dev *udev,
2119 enum tcmu_genl_cmd cmd,
06add777 2120 struct sk_buff *skb, void *msg_header)
0e5aee39 2121{
06add777 2122 int ret;
8a45885c 2123
053c095a 2124 genlmsg_end(skb, msg_header);
7c9e7a6f 2125
9de3a1ef
MC
2126 ret = tcmu_init_genl_cmd_reply(udev, cmd);
2127 if (ret) {
2128 nlmsg_free(skb);
2129 return ret;
2130 }
b3af66e2 2131
20c08b36 2132 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
0e5aee39 2133 TCMU_MCGRP_CONFIG, GFP_KERNEL);
2ff717cd
CA
2134
2135 /* Wait during an add as the listener may not be up yet */
2136 if (ret == 0 ||
2137 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
2138 return tcmu_wait_genl_cmd_reply(udev);
7d894862
LZ
2139 else
2140 tcmu_destroy_genl_cmd_reply(udev);
2ff717cd 2141
6e14eab9 2142 return ret;
7c9e7a6f
AG
2143}
2144
e0c240ac
ZL
2145static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
2146{
2147 struct sk_buff *skb = NULL;
2148 void *msg_header = NULL;
2149 int ret = 0;
2150
2151 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
2152 &msg_header);
2153 if (ret < 0)
2154 return ret;
06add777
MC
2155 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
2156 msg_header);
e0c240ac
ZL
2157}
2158
f892bd8e
ZL
2159static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
2160{
2161 struct sk_buff *skb = NULL;
2162 void *msg_header = NULL;
2163 int ret = 0;
2164
2165 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
2166 &skb, &msg_header);
2167 if (ret < 0)
2168 return ret;
2169 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
06add777 2170 skb, msg_header);
f892bd8e
ZL
2171}
2172
de8c5221 2173static int tcmu_update_uio_info(struct tcmu_dev *udev)
7c9e7a6f 2174{
7c9e7a6f
AG
2175 struct tcmu_hba *hba = udev->hba->hba_ptr;
2176 struct uio_info *info;
7c9e7a6f
AG
2177 char *str;
2178
2179 info = &udev->uio_info;
7c9e7a6f 2180
7c9e7a6f 2181 if (udev->dev_config[0])
22c2f35f
CJ
2182 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
2183 udev->name, udev->dev_config);
2184 else
2185 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
2186 udev->name);
2187 if (!str)
2188 return -ENOMEM;
7c9e7a6f 2189
ededd039
BL
2190 /* If the old string exists, free it */
2191 kfree(info->name);
7c9e7a6f
AG
2192 info->name = str;
2193
de8c5221
BL
2194 return 0;
2195}
2196
2197static int tcmu_configure_device(struct se_device *dev)
2198{
2199 struct tcmu_dev *udev = TCMU_DEV(dev);
2200 struct uio_info *info;
2201 struct tcmu_mailbox *mb;
f5ce815f 2202 size_t data_size;
de8c5221
BL
2203 int ret = 0;
2204
2205 ret = tcmu_update_uio_info(udev);
2206 if (ret)
2207 return ret;
2208
2209 info = &udev->uio_info;
2210
c97840c8 2211 mutex_lock(&udev->cmdr_lock);
98effe47 2212 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
c97840c8 2213 mutex_unlock(&udev->cmdr_lock);
a24e7917
WY
2214 if (!udev->data_bitmap) {
2215 ret = -ENOMEM;
80eb8761 2216 goto err_bitmap_alloc;
a24e7917 2217 }
80eb8761 2218
c7ede4f0 2219 mb = vzalloc(udev->cmdr_size + CMDR_OFF);
ecddbb7e 2220 if (!mb) {
7c9e7a6f
AG
2221 ret = -ENOMEM;
2222 goto err_vzalloc;
2223 }
2224
2225 /* mailbox fits in first part of CMDR space */
ecddbb7e
BS
2226 udev->mb_addr = mb;
2227 udev->cmdr = (void *)mb + CMDR_OFF;
c7ede4f0 2228 udev->data_off = udev->cmdr_size + CMDR_OFF;
f5ce815f 2229 data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT;
c7ede4f0 2230 udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT;
e719afdc 2231 udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE;
b6df4b79 2232 udev->dbi_thresh = 0; /* Default in Idle state */
7c9e7a6f 2233
141685a3 2234 /* Initialise the mailbox of the ring buffer */
0ad46af8 2235 mb->version = TCMU_MAILBOX_VERSION;
bc2d214a
BS
2236 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
2237 TCMU_MAILBOX_FLAG_CAP_READ_LEN |
018c1491
BS
2238 TCMU_MAILBOX_FLAG_CAP_TMR |
2239 TCMU_MAILBOX_FLAG_CAP_KEEP_BUF;
7c9e7a6f
AG
2240 mb->cmdr_off = CMDR_OFF;
2241 mb->cmdr_size = udev->cmdr_size;
2242
2243 WARN_ON(!PAGE_ALIGNED(udev->data_off));
f5ce815f 2244 WARN_ON(data_size % PAGE_SIZE);
7c9e7a6f 2245
ac64a2ce 2246 info->version = __stringify(TCMU_MAILBOX_VERSION);
7c9e7a6f
AG
2247
2248 info->mem[0].name = "tcm-user command & data buffer";
0633e123 2249 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
c7ede4f0 2250 info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF;
141685a3 2251 info->mem[0].memtype = UIO_MEM_NONE;
7c9e7a6f
AG
2252
2253 info->irqcontrol = tcmu_irqcontrol;
2254 info->irq = UIO_IRQ_CUSTOM;
2255
2256 info->mmap = tcmu_mmap;
2257 info->open = tcmu_open;
2258 info->release = tcmu_release;
2259
2260 ret = uio_register_device(tcmu_root_device, info);
2261 if (ret)
2262 goto err_register;
2263
81ee28de
SY
2264 /* User can set hw_block_size before enable the device */
2265 if (dev->dev_attrib.hw_block_size == 0)
2266 dev->dev_attrib.hw_block_size = 512;
7c9e7a6f 2267 /* Other attributes can be configured in userspace */
3abaa2bf
MC
2268 if (!dev->dev_attrib.hw_max_sectors)
2269 dev->dev_attrib.hw_max_sectors = 128;
9a8bb606
BL
2270 if (!dev->dev_attrib.emulate_write_cache)
2271 dev->dev_attrib.emulate_write_cache = 0;
7c9e7a6f
AG
2272 dev->dev_attrib.hw_queue_depth = 128;
2273
b849b456
KN
2274 /* If user didn't explicitly disable netlink reply support, use
2275 * module scope setting.
2276 */
2277 if (udev->nl_reply_supported >= 0)
2278 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
2279
f3cdbe39
MC
2280 /*
2281 * Get a ref incase userspace does a close on the uio device before
2282 * LIO has initiated tcmu_free_device.
2283 */
2284 kref_get(&udev->kref);
2285
e0c240ac 2286 ret = tcmu_send_dev_add_event(udev);
7c9e7a6f
AG
2287 if (ret)
2288 goto err_netlink;
2289
b6df4b79
XL
2290 mutex_lock(&root_udev_mutex);
2291 list_add(&udev->node, &root_udev);
2292 mutex_unlock(&root_udev_mutex);
2293
7c9e7a6f
AG
2294 return 0;
2295
2296err_netlink:
f3cdbe39 2297 kref_put(&udev->kref, tcmu_dev_kref_release);
7c9e7a6f
AG
2298 uio_unregister_device(&udev->uio_info);
2299err_register:
2300 vfree(udev->mb_addr);
c22adc0b 2301 udev->mb_addr = NULL;
7c9e7a6f 2302err_vzalloc:
98effe47 2303 bitmap_free(udev->data_bitmap);
80eb8761
MC
2304 udev->data_bitmap = NULL;
2305err_bitmap_alloc:
7c9e7a6f 2306 kfree(info->name);
f3cdbe39 2307 info->name = NULL;
7c9e7a6f
AG
2308
2309 return ret;
2310}
2311
7c9e7a6f 2312static void tcmu_free_device(struct se_device *dev)
92634706
MC
2313{
2314 struct tcmu_dev *udev = TCMU_DEV(dev);
2315
2316 /* release ref from init */
2317 kref_put(&udev->kref, tcmu_dev_kref_release);
2318}
2319
2320static void tcmu_destroy_device(struct se_device *dev)
7c9e7a6f
AG
2321{
2322 struct tcmu_dev *udev = TCMU_DEV(dev);
7c9e7a6f 2323
9103575a
MC
2324 del_timer_sync(&udev->cmd_timer);
2325 del_timer_sync(&udev->qfull_timer);
7c9e7a6f 2326
b6df4b79
XL
2327 mutex_lock(&root_udev_mutex);
2328 list_del(&udev->node);
2329 mutex_unlock(&root_udev_mutex);
2330
f892bd8e 2331 tcmu_send_dev_remove_event(udev);
7c9e7a6f 2332
531283ff 2333 uio_unregister_device(&udev->uio_info);
9260695d
MC
2334
2335 /* release ref from configure */
2336 kref_put(&udev->kref, tcmu_dev_kref_release);
7c9e7a6f
AG
2337}
2338
892782ca
MC
2339static void tcmu_unblock_dev(struct tcmu_dev *udev)
2340{
2341 mutex_lock(&udev->cmdr_lock);
2342 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
2343 mutex_unlock(&udev->cmdr_lock);
2344}
2345
2346static void tcmu_block_dev(struct tcmu_dev *udev)
2347{
2348 mutex_lock(&udev->cmdr_lock);
2349
2350 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2351 goto unlock;
2352
2353 /* complete IO that has executed successfully */
2354 tcmu_handle_completions(udev);
2355 /* fail IO waiting to be queued */
a94a2572 2356 run_qfull_queue(udev, true);
892782ca
MC
2357
2358unlock:
2359 mutex_unlock(&udev->cmdr_lock);
2360}
2361
2362static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2363{
2364 struct tcmu_mailbox *mb;
2365 struct tcmu_cmd *cmd;
d3cbb743 2366 unsigned long i;
892782ca
MC
2367
2368 mutex_lock(&udev->cmdr_lock);
2369
d3cbb743 2370 xa_for_each(&udev->commands, i, cmd) {
018c1491
BS
2371 pr_debug("removing cmd %u on dev %s from ring %s\n",
2372 cmd->cmd_id, udev->name,
2373 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ?
2374 "(is expired)" :
2375 (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ?
2376 "(is keep buffer)" : ""));
892782ca 2377
d3cbb743 2378 xa_erase(&udev->commands, i);
018c1491
BS
2379 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) &&
2380 !test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
a86a7586 2381 WARN_ON(!cmd->se_cmd);
a94a2572 2382 list_del_init(&cmd->queue_entry);
780e1384 2383 cmd->se_cmd->priv = NULL;
892782ca
MC
2384 if (err_level == 1) {
2385 /*
2386 * Userspace was not able to start the
2387 * command or it is retryable.
2388 */
2389 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
2390 } else {
2391 /* hard failure */
2392 target_complete_cmd(cmd->se_cmd,
2393 SAM_STAT_CHECK_CONDITION);
2394 }
2395 }
2396 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
2397 tcmu_free_cmd(cmd);
2398 }
2399
2400 mb = udev->mb_addr;
2401 tcmu_flush_dcache_range(mb, sizeof(*mb));
2402 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
2403 mb->cmd_tail, mb->cmd_head);
2404
2405 udev->cmdr_last_cleaned = 0;
2406 mb->cmd_tail = 0;
2407 mb->cmd_head = 0;
2408 tcmu_flush_dcache_range(mb, sizeof(*mb));
066f79a5 2409 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
892782ca
MC
2410
2411 del_timer(&udev->cmd_timer);
2412
bc2d214a
BS
2413 /*
2414 * ring is empty and qfull queue never contains aborted commands.
2415 * So TMRs in tmr queue do not contain relevant cmd_ids.
2416 * After a ring reset userspace should do a fresh start, so
2417 * even LUN RESET message is no longer relevant.
2418 * Therefore remove all TMRs from qfull queue
2419 */
2420 tcmu_remove_all_queued_tmr(udev);
2421
61fb2482
BS
2422 run_qfull_queue(udev, false);
2423
892782ca
MC
2424 mutex_unlock(&udev->cmdr_lock);
2425}
2426
7c9e7a6f 2427enum {
3abaa2bf 2428 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
08976cb5 2429 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk,
c7ede4f0 2430 Opt_cmd_ring_size_mb, Opt_err,
7c9e7a6f
AG
2431};
2432
2433static match_table_t tokens = {
2434 {Opt_dev_config, "dev_config=%s"},
0e0d7526 2435 {Opt_dev_size, "dev_size=%s"},
b60cb1f8
MC
2436 {Opt_hw_block_size, "hw_block_size=%d"},
2437 {Opt_hw_max_sectors, "hw_max_sectors=%d"},
b849b456 2438 {Opt_nl_reply_supported, "nl_reply_supported=%d"},
c97840c8 2439 {Opt_max_data_area_mb, "max_data_area_mb=%d"},
08976cb5 2440 {Opt_data_pages_per_blk, "data_pages_per_blk=%d"},
c7ede4f0 2441 {Opt_cmd_ring_size_mb, "cmd_ring_size_mb=%d"},
7c9e7a6f
AG
2442 {Opt_err, NULL}
2443};
2444
3abaa2bf
MC
2445static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
2446{
b60cb1f8 2447 int val, ret;
3abaa2bf 2448
b60cb1f8 2449 ret = match_int(arg, &val);
3abaa2bf 2450 if (ret < 0) {
b60cb1f8
MC
2451 pr_err("match_int() failed for dev attrib. Error %d.\n",
2452 ret);
3abaa2bf
MC
2453 return ret;
2454 }
b60cb1f8
MC
2455
2456 if (val <= 0) {
2457 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
2458 val);
3abaa2bf
MC
2459 return -EINVAL;
2460 }
b60cb1f8 2461 *dev_attrib = val;
3abaa2bf
MC
2462 return 0;
2463}
2464
c97840c8
MC
2465static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
2466{
f5ce815f 2467 int val, ret;
e719afdc 2468 uint32_t pages_per_blk = udev->data_pages_per_blk;
c97840c8
MC
2469
2470 ret = match_int(arg, &val);
2471 if (ret < 0) {
2472 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
2473 ret);
2474 return ret;
2475 }
f5ce815f 2476 if (val <= 0) {
c97840c8
MC
2477 pr_err("Invalid max_data_area %d.\n", val);
2478 return -EINVAL;
2479 }
f5ce815f
BS
2480 if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) {
2481 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
2482 val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
2483 val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages);
2484 }
e719afdc
BS
2485 if (TCMU_MBS_TO_PAGES(val) < pages_per_blk) {
2486 pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%u pages).\n",
2487 val, TCMU_MBS_TO_PAGES(val), pages_per_blk);
f5ce815f
BS
2488 return -EINVAL;
2489 }
c97840c8
MC
2490
2491 mutex_lock(&udev->cmdr_lock);
2492 if (udev->data_bitmap) {
2493 pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
2494 ret = -EINVAL;
2495 goto unlock;
2496 }
2497
f5ce815f 2498 udev->data_area_mb = val;
e719afdc 2499 udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk;
c97840c8
MC
2500
2501unlock:
2502 mutex_unlock(&udev->cmdr_lock);
2503 return ret;
2504}
2505
08976cb5
BS
2506static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg)
2507{
2508 int val, ret;
2509
2510 ret = match_int(arg, &val);
2511 if (ret < 0) {
2512 pr_err("match_int() failed for data_pages_per_blk=. Error %d.\n",
2513 ret);
2514 return ret;
2515 }
2516
2517 if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) {
2518 pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n",
2519 val, udev->data_area_mb,
2520 TCMU_MBS_TO_PAGES(udev->data_area_mb));
2521 return -EINVAL;
2522 }
2523
2524 mutex_lock(&udev->cmdr_lock);
2525 if (udev->data_bitmap) {
2526 pr_err("Cannot set data_pages_per_blk after it has been enabled.\n");
2527 ret = -EINVAL;
2528 goto unlock;
2529 }
2530
2531 udev->data_pages_per_blk = val;
2532 udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val;
2533
2534unlock:
2535 mutex_unlock(&udev->cmdr_lock);
2536 return ret;
2537}
2538
c7ede4f0
GL
2539static int tcmu_set_cmd_ring_size(struct tcmu_dev *udev, substring_t *arg)
2540{
2541 int val, ret;
2542
2543 ret = match_int(arg, &val);
2544 if (ret < 0) {
2545 pr_err("match_int() failed for cmd_ring_size_mb=. Error %d.\n",
2546 ret);
2547 return ret;
2548 }
2549
2550 if (val <= 0) {
2551 pr_err("Invalid cmd_ring_size_mb %d.\n", val);
2552 return -EINVAL;
2553 }
2554
2555 mutex_lock(&udev->cmdr_lock);
2556 if (udev->data_bitmap) {
2557 pr_err("Cannot set cmd_ring_size_mb after it has been enabled.\n");
2558 ret = -EINVAL;
2559 goto unlock;
2560 }
2561
2562 udev->cmdr_size = (val << 20) - CMDR_OFF;
2563 if (val > (MB_CMDR_SIZE_DEF >> 20)) {
2564 pr_err("%d is too large. Adjusting cmd_ring_size_mb to global limit of %u\n",
2565 val, (MB_CMDR_SIZE_DEF >> 20));
2566 udev->cmdr_size = CMDR_SIZE_DEF;
2567 }
2568
2569unlock:
2570 mutex_unlock(&udev->cmdr_lock);
2571 return ret;
2572}
2573
7c9e7a6f
AG
2574static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
2575 const char *page, ssize_t count)
2576{
2577 struct tcmu_dev *udev = TCMU_DEV(dev);
0e0d7526 2578 char *orig, *ptr, *opts;
7c9e7a6f 2579 substring_t args[MAX_OPT_ARGS];
c97840c8 2580 int ret = 0, token;
7c9e7a6f
AG
2581
2582 opts = kstrdup(page, GFP_KERNEL);
2583 if (!opts)
2584 return -ENOMEM;
2585
2586 orig = opts;
2587
2588 while ((ptr = strsep(&opts, ",\n")) != NULL) {
2589 if (!*ptr)
2590 continue;
2591
2592 token = match_token(ptr, tokens, args);
2593 switch (token) {
2594 case Opt_dev_config:
2595 if (match_strlcpy(udev->dev_config, &args[0],
2596 TCMU_CONFIG_LEN) == 0) {
2597 ret = -EINVAL;
2598 break;
2599 }
2600 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
2601 break;
2602 case Opt_dev_size:
0e0d7526 2603 ret = match_u64(&args[0], &udev->dev_size);
7c9e7a6f 2604 if (ret < 0)
0e0d7526
MC
2605 pr_err("match_u64() failed for dev_size=. Error %d.\n",
2606 ret);
7c9e7a6f 2607 break;
9c1cd1b6 2608 case Opt_hw_block_size:
3abaa2bf
MC
2609 ret = tcmu_set_dev_attrib(&args[0],
2610 &(dev->dev_attrib.hw_block_size));
2611 break;
2612 case Opt_hw_max_sectors:
2613 ret = tcmu_set_dev_attrib(&args[0],
2614 &(dev->dev_attrib.hw_max_sectors));
9c1cd1b6 2615 break;
b849b456 2616 case Opt_nl_reply_supported:
b60cb1f8 2617 ret = match_int(&args[0], &udev->nl_reply_supported);
b849b456 2618 if (ret < 0)
b60cb1f8
MC
2619 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
2620 ret);
b849b456 2621 break;
80eb8761 2622 case Opt_max_data_area_mb:
c97840c8 2623 ret = tcmu_set_max_blocks_param(udev, &args[0]);
80eb8761 2624 break;
08976cb5
BS
2625 case Opt_data_pages_per_blk:
2626 ret = tcmu_set_data_pages_per_blk(udev, &args[0]);
2627 break;
c7ede4f0
GL
2628 case Opt_cmd_ring_size_mb:
2629 ret = tcmu_set_cmd_ring_size(udev, &args[0]);
2630 break;
7c9e7a6f
AG
2631 default:
2632 break;
2633 }
2579325c
MC
2634
2635 if (ret)
2636 break;
7c9e7a6f
AG
2637 }
2638
2639 kfree(orig);
2640 return (!ret) ? count : ret;
2641}
2642
2643static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
2644{
2645 struct tcmu_dev *udev = TCMU_DEV(dev);
2646 ssize_t bl = 0;
2647
2648 bl = sprintf(b + bl, "Config: %s ",
2649 udev->dev_config[0] ? udev->dev_config : "NULL");
0e0d7526 2650 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
08976cb5 2651 bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb);
c7ede4f0
GL
2652 bl += sprintf(b + bl, "DataPagesPerBlk: %u ", udev->data_pages_per_blk);
2653 bl += sprintf(b + bl, "CmdRingSizeMB: %u\n",
2654 (udev->cmdr_size + CMDR_OFF) >> 20);
7c9e7a6f
AG
2655
2656 return bl;
2657}
2658
2659static sector_t tcmu_get_blocks(struct se_device *dev)
2660{
2661 struct tcmu_dev *udev = TCMU_DEV(dev);
2662
2663 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
2664 dev->dev_attrib.block_size);
2665}
2666
7c9e7a6f 2667static sense_reason_t
9c1cd1b6 2668tcmu_parse_cdb(struct se_cmd *cmd)
7c9e7a6f 2669{
02eb924f 2670 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
7c9e7a6f
AG
2671}
2672
7d7a7435
NB
2673static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
2674{
2675 struct se_dev_attrib *da = container_of(to_config_group(item),
2676 struct se_dev_attrib, da_group);
b5ab697c 2677 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
7d7a7435
NB
2678
2679 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
2680}
2681
2682static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
2683 size_t count)
2684{
2685 struct se_dev_attrib *da = container_of(to_config_group(item),
2686 struct se_dev_attrib, da_group);
2687 struct tcmu_dev *udev = container_of(da->da_dev,
2688 struct tcmu_dev, se_dev);
2689 u32 val;
2690 int ret;
2691
2692 if (da->da_dev->export_count) {
2693 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
2694 return -EINVAL;
2695 }
2696
2697 ret = kstrtou32(page, 0, &val);
2698 if (ret < 0)
2699 return ret;
2700
7d7a7435
NB
2701 udev->cmd_time_out = val * MSEC_PER_SEC;
2702 return count;
2703}
2704CONFIGFS_ATTR(tcmu_, cmd_time_out);
2705
9103575a
MC
2706static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
2707{
2708 struct se_dev_attrib *da = container_of(to_config_group(item),
2709 struct se_dev_attrib, da_group);
2710 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2711
2712 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
2713 udev->qfull_time_out :
2714 udev->qfull_time_out / MSEC_PER_SEC);
2715}
2716
2717static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2718 const char *page, size_t count)
2719{
2720 struct se_dev_attrib *da = container_of(to_config_group(item),
2721 struct se_dev_attrib, da_group);
2722 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2723 s32 val;
2724 int ret;
2725
2726 ret = kstrtos32(page, 0, &val);
2727 if (ret < 0)
2728 return ret;
2729
2730 if (val >= 0) {
2731 udev->qfull_time_out = val * MSEC_PER_SEC;
125966db
PKK
2732 } else if (val == -1) {
2733 udev->qfull_time_out = val;
9103575a
MC
2734 } else {
2735 printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2736 return -EINVAL;
2737 }
2738 return count;
2739}
2740CONFIGFS_ATTR(tcmu_, qfull_time_out);
2741
80eb8761
MC
2742static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
2743{
2744 struct se_dev_attrib *da = container_of(to_config_group(item),
2745 struct se_dev_attrib, da_group);
2746 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2747
f5ce815f 2748 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb);
80eb8761
MC
2749}
2750CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
2751
08976cb5
BS
2752static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item,
2753 char *page)
2754{
2755 struct se_dev_attrib *da = container_of(to_config_group(item),
2756 struct se_dev_attrib, da_group);
2757 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2758
2759 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk);
2760}
2761CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk);
2762
c7ede4f0
GL
2763static ssize_t tcmu_cmd_ring_size_mb_show(struct config_item *item, char *page)
2764{
2765 struct se_dev_attrib *da = container_of(to_config_group(item),
2766 struct se_dev_attrib, da_group);
2767 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2768
2769 return snprintf(page, PAGE_SIZE, "%u\n",
2770 (udev->cmdr_size + CMDR_OFF) >> 20);
2771}
2772CONFIGFS_ATTR_RO(tcmu_, cmd_ring_size_mb);
2773
2d76443e 2774static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
ee018252
BL
2775{
2776 struct se_dev_attrib *da = container_of(to_config_group(item),
2777 struct se_dev_attrib, da_group);
2778 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2779
2780 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
2781}
2782
02ccfb54
ZL
2783static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
2784 const char *reconfig_data)
2785{
2786 struct sk_buff *skb = NULL;
2787 void *msg_header = NULL;
2788 int ret = 0;
2789
2790 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2791 &skb, &msg_header);
2792 if (ret < 0)
2793 return ret;
2794 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
2795 if (ret < 0) {
2796 nlmsg_free(skb);
2797 return ret;
2798 }
2799 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
06add777 2800 skb, msg_header);
02ccfb54
ZL
2801}
2802
2803
2d76443e
MC
2804static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2805 size_t count)
ee018252
BL
2806{
2807 struct se_dev_attrib *da = container_of(to_config_group(item),
2808 struct se_dev_attrib, da_group);
2809 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2d76443e 2810 int ret, len;
ee018252 2811
2d76443e
MC
2812 len = strlen(page);
2813 if (!len || len > TCMU_CONFIG_LEN - 1)
ee018252 2814 return -EINVAL;
ee018252
BL
2815
2816 /* Check if device has been configured before */
63d5be0f 2817 if (target_dev_configured(&udev->se_dev)) {
02ccfb54 2818 ret = tcmu_send_dev_config_event(udev, page);
ee018252
BL
2819 if (ret) {
2820 pr_err("Unable to reconfigure device\n");
2821 return ret;
2822 }
de8c5221
BL
2823 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2824
2825 ret = tcmu_update_uio_info(udev);
2826 if (ret)
2827 return ret;
2828 return count;
ee018252 2829 }
2d76443e 2830 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
ee018252
BL
2831
2832 return count;
2833}
2d76443e 2834CONFIGFS_ATTR(tcmu_, dev_config);
ee018252 2835
801fc54d
BL
2836static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
2837{
2838 struct se_dev_attrib *da = container_of(to_config_group(item),
2839 struct se_dev_attrib, da_group);
2840 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2841
0e0d7526 2842 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
801fc54d
BL
2843}
2844
84e28506
ZL
2845static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
2846{
2847 struct sk_buff *skb = NULL;
2848 void *msg_header = NULL;
2849 int ret = 0;
2850
2851 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2852 &skb, &msg_header);
2853 if (ret < 0)
2854 return ret;
2855 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
2856 size, TCMU_ATTR_PAD);
2857 if (ret < 0) {
2858 nlmsg_free(skb);
2859 return ret;
2860 }
2861 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
06add777 2862 skb, msg_header);
84e28506
ZL
2863}
2864
801fc54d
BL
2865static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2866 size_t count)
2867{
2868 struct se_dev_attrib *da = container_of(to_config_group(item),
2869 struct se_dev_attrib, da_group);
2870 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2d76443e 2871 u64 val;
801fc54d
BL
2872 int ret;
2873
2d76443e 2874 ret = kstrtou64(page, 0, &val);
801fc54d
BL
2875 if (ret < 0)
2876 return ret;
801fc54d
BL
2877
2878 /* Check if device has been configured before */
63d5be0f 2879 if (target_dev_configured(&udev->se_dev)) {
84e28506 2880 ret = tcmu_send_dev_size_event(udev, val);
801fc54d
BL
2881 if (ret) {
2882 pr_err("Unable to reconfigure device\n");
2883 return ret;
2884 }
2885 }
2d76443e 2886 udev->dev_size = val;
801fc54d
BL
2887 return count;
2888}
2889CONFIGFS_ATTR(tcmu_, dev_size);
2890
b849b456
KN
2891static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
2892 char *page)
2893{
2894 struct se_dev_attrib *da = container_of(to_config_group(item),
2895 struct se_dev_attrib, da_group);
2896 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2897
2898 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
2899}
2900
2901static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
2902 const char *page, size_t count)
2903{
2904 struct se_dev_attrib *da = container_of(to_config_group(item),
2905 struct se_dev_attrib, da_group);
2906 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2907 s8 val;
2908 int ret;
2909
2910 ret = kstrtos8(page, 0, &val);
2911 if (ret < 0)
2912 return ret;
2913
2914 udev->nl_reply_supported = val;
2915 return count;
2916}
2917CONFIGFS_ATTR(tcmu_, nl_reply_supported);
2918
9a8bb606
BL
2919static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
2920 char *page)
2921{
2922 struct se_dev_attrib *da = container_of(to_config_group(item),
2923 struct se_dev_attrib, da_group);
2924
2925 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
2926}
2927
33d065cc
ZL
2928static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
2929{
2930 struct sk_buff *skb = NULL;
2931 void *msg_header = NULL;
2932 int ret = 0;
2933
2934 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2935 &skb, &msg_header);
2936 if (ret < 0)
2937 return ret;
2938 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
2939 if (ret < 0) {
2940 nlmsg_free(skb);
2941 return ret;
2942 }
2943 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
06add777 2944 skb, msg_header);
33d065cc
ZL
2945}
2946
9a8bb606
BL
2947static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2948 const char *page, size_t count)
2949{
2950 struct se_dev_attrib *da = container_of(to_config_group(item),
2951 struct se_dev_attrib, da_group);
1068be7b 2952 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2d76443e 2953 u8 val;
9a8bb606
BL
2954 int ret;
2955
2d76443e 2956 ret = kstrtou8(page, 0, &val);
9a8bb606
BL
2957 if (ret < 0)
2958 return ret;
2959
1068be7b 2960 /* Check if device has been configured before */
63d5be0f 2961 if (target_dev_configured(&udev->se_dev)) {
33d065cc 2962 ret = tcmu_send_emulate_write_cache(udev, val);
1068be7b
BL
2963 if (ret) {
2964 pr_err("Unable to reconfigure device\n");
2965 return ret;
2966 }
2967 }
2d76443e
MC
2968
2969 da->emulate_write_cache = val;
9a8bb606
BL
2970 return count;
2971}
2972CONFIGFS_ATTR(tcmu_, emulate_write_cache);
2973
59526d7a
BS
2974static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page)
2975{
2976 struct se_dev_attrib *da = container_of(to_config_group(item),
2977 struct se_dev_attrib, da_group);
2978 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2979
2980 return snprintf(page, PAGE_SIZE, "%i\n",
2981 test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags));
2982}
2983
2984static ssize_t tcmu_tmr_notification_store(struct config_item *item,
2985 const char *page, size_t count)
2986{
2987 struct se_dev_attrib *da = container_of(to_config_group(item),
2988 struct se_dev_attrib, da_group);
2989 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2990 u8 val;
2991 int ret;
2992
2993 ret = kstrtou8(page, 0, &val);
2994 if (ret < 0)
2995 return ret;
2996 if (val > 1)
2997 return -EINVAL;
2998
2999 if (val)
3000 set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
3001 else
3002 clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
3003 return count;
3004}
3005CONFIGFS_ATTR(tcmu_, tmr_notification);
3006
892782ca
MC
3007static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
3008{
3009 struct se_device *se_dev = container_of(to_config_group(item),
3010 struct se_device,
3011 dev_action_group);
3012 struct tcmu_dev *udev = TCMU_DEV(se_dev);
3013
3014 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
3015 return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
3016 else
3017 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
3018}
3019
3020static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
3021 size_t count)
3022{
3023 struct se_device *se_dev = container_of(to_config_group(item),
3024 struct se_device,
3025 dev_action_group);
3026 struct tcmu_dev *udev = TCMU_DEV(se_dev);
3027 u8 val;
3028 int ret;
3029
a30b0473
MC
3030 if (!target_dev_configured(&udev->se_dev)) {
3031 pr_err("Device is not configured.\n");
3032 return -EINVAL;
3033 }
3034
892782ca
MC
3035 ret = kstrtou8(page, 0, &val);
3036 if (ret < 0)
3037 return ret;
3038
3039 if (val > 1) {
3040 pr_err("Invalid block value %d\n", val);
3041 return -EINVAL;
3042 }
3043
3044 if (!val)
3045 tcmu_unblock_dev(udev);
3046 else
3047 tcmu_block_dev(udev);
3048 return count;
3049}
3050CONFIGFS_ATTR(tcmu_, block_dev);
3051
3052static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
3053 size_t count)
3054{
3055 struct se_device *se_dev = container_of(to_config_group(item),
3056 struct se_device,
3057 dev_action_group);
3058 struct tcmu_dev *udev = TCMU_DEV(se_dev);
3059 u8 val;
3060 int ret;
3061
a30b0473
MC
3062 if (!target_dev_configured(&udev->se_dev)) {
3063 pr_err("Device is not configured.\n");
3064 return -EINVAL;
3065 }
3066
892782ca
MC
3067 ret = kstrtou8(page, 0, &val);
3068 if (ret < 0)
3069 return ret;
3070
3071 if (val != 1 && val != 2) {
3072 pr_err("Invalid reset ring value %d\n", val);
3073 return -EINVAL;
3074 }
3075
3076 tcmu_reset_ring(udev, val);
3077 return count;
3078}
3079CONFIGFS_ATTR_WO(tcmu_, reset_ring);
3080
018c1491
BS
3081static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page,
3082 size_t count)
3083{
3084 struct se_device *se_dev = container_of(to_config_group(item),
3085 struct se_device,
3086 dev_action_group);
3087 struct tcmu_dev *udev = TCMU_DEV(se_dev);
3088 struct tcmu_cmd *cmd;
3089 u16 cmd_id;
3090 int ret;
3091
3092 if (!target_dev_configured(&udev->se_dev)) {
3093 pr_err("Device is not configured.\n");
3094 return -EINVAL;
3095 }
3096
3097 ret = kstrtou16(page, 0, &cmd_id);
3098 if (ret < 0)
3099 return ret;
3100
3101 mutex_lock(&udev->cmdr_lock);
3102
3103 {
3104 XA_STATE(xas, &udev->commands, cmd_id);
3105
3106 xas_lock(&xas);
3107 cmd = xas_load(&xas);
3108 if (!cmd) {
3109 pr_err("free_kept_buf: cmd_id %d not found\n", cmd_id);
3110 count = -EINVAL;
3111 xas_unlock(&xas);
3112 goto out_unlock;
3113 }
3114 if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
3115 pr_err("free_kept_buf: cmd_id %d was not completed with KEEP_BUF\n",
3116 cmd_id);
3117 count = -EINVAL;
3118 xas_unlock(&xas);
3119 goto out_unlock;
3120 }
3121 xas_store(&xas, NULL);
3122 xas_unlock(&xas);
3123 }
3124
3125 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
3126 tcmu_free_cmd(cmd);
3127 /*
3128 * We only freed data space, not ring space. Therefore we dont call
3129 * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
3130 */
3131 if (list_empty(&udev->tmr_queue))
3132 run_qfull_queue(udev, false);
3133
3134out_unlock:
3135 mutex_unlock(&udev->cmdr_lock);
3136 return count;
3137}
3138CONFIGFS_ATTR_WO(tcmu_, free_kept_buf);
3139
5821783b 3140static struct configfs_attribute *tcmu_attrib_attrs[] = {
801fc54d 3141 &tcmu_attr_cmd_time_out,
9103575a 3142 &tcmu_attr_qfull_time_out,
80eb8761 3143 &tcmu_attr_max_data_area_mb,
08976cb5 3144 &tcmu_attr_data_pages_per_blk,
c7ede4f0 3145 &tcmu_attr_cmd_ring_size_mb,
2d76443e 3146 &tcmu_attr_dev_config,
801fc54d
BL
3147 &tcmu_attr_dev_size,
3148 &tcmu_attr_emulate_write_cache,
59526d7a 3149 &tcmu_attr_tmr_notification,
b849b456 3150 &tcmu_attr_nl_reply_supported,
801fc54d
BL
3151 NULL,
3152};
3153
7d7a7435
NB
3154static struct configfs_attribute **tcmu_attrs;
3155
892782ca
MC
3156static struct configfs_attribute *tcmu_action_attrs[] = {
3157 &tcmu_attr_block_dev,
3158 &tcmu_attr_reset_ring,
018c1491 3159 &tcmu_attr_free_kept_buf,
892782ca
MC
3160 NULL,
3161};
3162
7d7a7435 3163static struct target_backend_ops tcmu_ops = {
7c9e7a6f 3164 .name = "user",
7c9e7a6f 3165 .owner = THIS_MODULE,
69088a04 3166 .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
356ba2a8
BS
3167 .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
3168 TRANSPORT_FLAG_PASSTHROUGH_ALUA,
7c9e7a6f
AG
3169 .attach_hba = tcmu_attach_hba,
3170 .detach_hba = tcmu_detach_hba,
3171 .alloc_device = tcmu_alloc_device,
3172 .configure_device = tcmu_configure_device,
92634706 3173 .destroy_device = tcmu_destroy_device,
7c9e7a6f 3174 .free_device = tcmu_free_device,
6888da81
MC
3175 .unplug_device = tcmu_unplug_device,
3176 .plug_device = tcmu_plug_device,
7c9e7a6f 3177 .parse_cdb = tcmu_parse_cdb,
bc2d214a 3178 .tmr_notify = tcmu_tmr_notify,
7c9e7a6f
AG
3179 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
3180 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
3181 .get_device_type = sbc_get_device_type,
3182 .get_blocks = tcmu_get_blocks,
892782ca 3183 .tb_dev_action_attrs = tcmu_action_attrs,
7c9e7a6f
AG
3184};
3185
89ec9cfd 3186static void find_free_blocks(void)
b6df4b79
XL
3187{
3188 struct tcmu_dev *udev;
3189 loff_t off;
f5ce815f
BS
3190 u32 pages_freed, total_pages_freed = 0;
3191 u32 start, end, block, total_blocks_freed = 0;
af1dd7ff 3192
8b084d9d 3193 if (atomic_read(&global_page_count) <= tcmu_global_max_pages)
af1dd7ff 3194 return;
b6df4b79 3195
89ec9cfd
MC
3196 mutex_lock(&root_udev_mutex);
3197 list_for_each_entry(udev, &root_udev, node) {
3198 mutex_lock(&udev->cmdr_lock);
b6df4b79 3199
dc335a99
MC
3200 if (!target_dev_configured(&udev->se_dev)) {
3201 mutex_unlock(&udev->cmdr_lock);
3202 continue;
3203 }
3204
89ec9cfd 3205 /* Try to complete the finished commands first */
bc2d214a
BS
3206 if (tcmu_handle_completions(udev))
3207 run_qfull_queue(udev, false);
d906d8af 3208
af1dd7ff
MC
3209 /* Skip the udevs in idle */
3210 if (!udev->dbi_thresh) {
89ec9cfd
MC
3211 mutex_unlock(&udev->cmdr_lock);
3212 continue;
3213 }
b6df4b79 3214
89ec9cfd
MC
3215 end = udev->dbi_max + 1;
3216 block = find_last_bit(udev->data_bitmap, end);
3217 if (block == udev->dbi_max) {
3218 /*
af1dd7ff
MC
3219 * The last bit is dbi_max, so it is not possible
3220 * reclaim any blocks.
89ec9cfd
MC
3221 */
3222 mutex_unlock(&udev->cmdr_lock);
3223 continue;
3224 } else if (block == end) {
3225 /* The current udev will goto idle state */
3226 udev->dbi_thresh = start = 0;
3227 udev->dbi_max = 0;
3228 } else {
3229 udev->dbi_thresh = start = block + 1;
3230 udev->dbi_max = block;
3231 }
b6df4b79 3232
bb9b9eb0
XW
3233 /*
3234 * Release the block pages.
3235 *
3236 * Also note that since tcmu_vma_fault() gets an extra page
3237 * refcount, tcmu_blocks_release() won't free pages if pages
3238 * are mapped. This means it is safe to call
3239 * tcmu_blocks_release() before unmap_mapping_range() which
3240 * drops the refcount of any pages it unmaps and thus releases
3241 * them.
3242 */
3243 pages_freed = tcmu_blocks_release(udev, start, end - 1);
3244
89ec9cfd 3245 /* Here will truncate the data area from off */
e719afdc 3246 off = udev->data_off + (loff_t)start * udev->data_blk_size;
89ec9cfd 3247 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
b6df4b79 3248
89ec9cfd 3249 mutex_unlock(&udev->cmdr_lock);
b6df4b79 3250
f5ce815f
BS
3251 total_pages_freed += pages_freed;
3252 total_blocks_freed += end - start;
3253 pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n",
3254 pages_freed, total_pages_freed, end - start,
3255 total_blocks_freed, udev->name);
89ec9cfd
MC
3256 }
3257 mutex_unlock(&root_udev_mutex);
af1dd7ff 3258
8b084d9d 3259 if (atomic_read(&global_page_count) > tcmu_global_max_pages)
af1dd7ff 3260 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
89ec9cfd
MC
3261}
3262
488ebe4c
MC
3263static void check_timedout_devices(void)
3264{
3265 struct tcmu_dev *udev, *tmp_dev;
61fb2482 3266 struct tcmu_cmd *cmd, *tmp_cmd;
488ebe4c
MC
3267 LIST_HEAD(devs);
3268
3269 spin_lock_bh(&timed_out_udevs_lock);
3270 list_splice_init(&timed_out_udevs, &devs);
3271
3272 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
3273 list_del_init(&udev->timedout_entry);
3274 spin_unlock_bh(&timed_out_udevs_lock);
3275
6fddcb77 3276 mutex_lock(&udev->cmdr_lock);
a94a2572 3277
61fb2482
BS
3278 /*
3279 * If cmd_time_out is disabled but qfull is set deadline
3280 * will only reflect the qfull timeout. Ignore it.
3281 */
3282 if (udev->cmd_time_out) {
3283 list_for_each_entry_safe(cmd, tmp_cmd,
3284 &udev->inflight_queue,
3285 queue_entry) {
3286 tcmu_check_expired_ring_cmd(cmd);
3287 }
3288 tcmu_set_next_deadline(&udev->inflight_queue,
3289 &udev->cmd_timer);
3290 }
3291 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
3292 queue_entry) {
3293 tcmu_check_expired_queue_cmd(cmd);
3294 }
a94a2572
XL
3295 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
3296
6fddcb77 3297 mutex_unlock(&udev->cmdr_lock);
488ebe4c
MC
3298
3299 spin_lock_bh(&timed_out_udevs_lock);
3300 }
3301
3302 spin_unlock_bh(&timed_out_udevs_lock);
3303}
3304
9972cebb 3305static void tcmu_unmap_work_fn(struct work_struct *work)
89ec9cfd 3306{
488ebe4c 3307 check_timedout_devices();
9972cebb 3308 find_free_blocks();
b6df4b79
XL
3309}
3310
7c9e7a6f
AG
3311static int __init tcmu_module_init(void)
3312{
801fc54d 3313 int ret, i, k, len = 0;
7c9e7a6f
AG
3314
3315 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
3316
af1dd7ff 3317 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
9972cebb 3318
7c9e7a6f
AG
3319 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
3320 sizeof(struct tcmu_cmd),
3321 __alignof__(struct tcmu_cmd),
3322 0, NULL);
3323 if (!tcmu_cmd_cache)
3324 return -ENOMEM;
3325
3326 tcmu_root_device = root_device_register("tcm_user");
3327 if (IS_ERR(tcmu_root_device)) {
3328 ret = PTR_ERR(tcmu_root_device);
3329 goto out_free_cache;
3330 }
3331
3332 ret = genl_register_family(&tcmu_genl_family);
3333 if (ret < 0) {
3334 goto out_unreg_device;
3335 }
3336
4703b625 3337 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
7d7a7435 3338 len += sizeof(struct configfs_attribute *);
4703b625
BS
3339 for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
3340 len += sizeof(struct configfs_attribute *);
3341 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
801fc54d 3342 len += sizeof(struct configfs_attribute *);
801fc54d 3343 len += sizeof(struct configfs_attribute *);
7d7a7435
NB
3344
3345 tcmu_attrs = kzalloc(len, GFP_KERNEL);
3346 if (!tcmu_attrs) {
3347 ret = -ENOMEM;
3348 goto out_unreg_genl;
3349 }
3350
4703b625 3351 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
7d7a7435 3352 tcmu_attrs[i] = passthrough_attrib_attrs[i];
4703b625
BS
3353 for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
3354 tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
3355 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
3356 tcmu_attrs[i++] = tcmu_attrib_attrs[k];
7d7a7435
NB
3357 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
3358
0a06d430 3359 ret = transport_backend_register(&tcmu_ops);
7c9e7a6f 3360 if (ret)
7d7a7435 3361 goto out_attrs;
7c9e7a6f
AG
3362
3363 return 0;
3364
7d7a7435
NB
3365out_attrs:
3366 kfree(tcmu_attrs);
7c9e7a6f
AG
3367out_unreg_genl:
3368 genl_unregister_family(&tcmu_genl_family);
3369out_unreg_device:
3370 root_device_unregister(tcmu_root_device);
3371out_free_cache:
3372 kmem_cache_destroy(tcmu_cmd_cache);
3373
3374 return ret;
3375}
3376
3377static void __exit tcmu_module_exit(void)
3378{
af1dd7ff 3379 cancel_delayed_work_sync(&tcmu_unmap_work);
0a06d430 3380 target_backend_unregister(&tcmu_ops);
7d7a7435 3381 kfree(tcmu_attrs);
7c9e7a6f
AG
3382 genl_unregister_family(&tcmu_genl_family);
3383 root_device_unregister(tcmu_root_device);
3384 kmem_cache_destroy(tcmu_cmd_cache);
3385}
3386
3387MODULE_DESCRIPTION("TCM USER subsystem plugin");
3388MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
3389MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
3390MODULE_LICENSE("GPL");
3391
3392module_init(tcmu_module_init);
3393module_exit(tcmu_module_exit);