Commit | Line | Data |
---|---|---|
7c9e7a6f AG |
1 | /* |
2 | * Copyright (C) 2013 Shaohua Li <shli@kernel.org> | |
3 | * Copyright (C) 2014 Red Hat, Inc. | |
f97ec7db | 4 | * Copyright (C) 2015 Arrikto, Inc. |
141685a3 | 5 | * Copyright (C) 2017 Chinamobile, Inc. |
7c9e7a6f AG |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms and conditions of the GNU General Public License, | |
9 | * version 2, as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License along with | |
17 | * this program; if not, write to the Free Software Foundation, Inc., | |
18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
19 | */ | |
20 | ||
21 | #include <linux/spinlock.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/idr.h> | |
ba929992 | 24 | #include <linux/kernel.h> |
7c9e7a6f AG |
25 | #include <linux/timer.h> |
26 | #include <linux/parser.h> | |
5538d294 | 27 | #include <linux/vmalloc.h> |
7c9e7a6f | 28 | #include <linux/uio_driver.h> |
141685a3 | 29 | #include <linux/radix-tree.h> |
ac64a2ce | 30 | #include <linux/stringify.h> |
26418649 | 31 | #include <linux/bitops.h> |
f5045724 | 32 | #include <linux/highmem.h> |
7d7a7435 | 33 | #include <linux/configfs.h> |
b6df4b79 | 34 | #include <linux/mutex.h> |
9972cebb | 35 | #include <linux/workqueue.h> |
7c9e7a6f | 36 | #include <net/genetlink.h> |
ba929992 BVA |
37 | #include <scsi/scsi_common.h> |
38 | #include <scsi/scsi_proto.h> | |
7c9e7a6f AG |
39 | #include <target/target_core_base.h> |
40 | #include <target/target_core_fabric.h> | |
41 | #include <target/target_core_backend.h> | |
e9f720d6 | 42 | |
7c9e7a6f AG |
43 | #include <linux/target_core_user.h> |
44 | ||
572ccdab RD |
45 | /** |
46 | * DOC: Userspace I/O | |
47 | * Userspace I/O | |
48 | * ------------- | |
49 | * | |
7c9e7a6f AG |
50 | * Define a shared-memory interface for LIO to pass SCSI commands and |
51 | * data to userspace for processing. This is to allow backends that | |
52 | * are too complex for in-kernel support to be possible. | |
53 | * | |
54 | * It uses the UIO framework to do a lot of the device-creation and | |
55 | * introspection work for us. | |
56 | * | |
57 | * See the .h file for how the ring is laid out. Note that while the | |
58 | * command ring is defined, the particulars of the data area are | |
59 | * not. Offset values in the command entry point to other locations | |
572ccdab | 60 | * internal to the mmap-ed area. There is separate space outside the |
7c9e7a6f AG |
61 | * command ring for data buffers. This leaves maximum flexibility for |
62 | * moving buffer allocations, or even page flipping or other | |
63 | * allocation techniques, without altering the command ring layout. | |
64 | * | |
65 | * SECURITY: | |
66 | * The user process must be assumed to be malicious. There's no way to | |
67 | * prevent it breaking the command ring protocol if it wants, but in | |
68 | * order to prevent other issues we must only ever read *data* from | |
69 | * the shared memory area, not offsets or sizes. This applies to | |
70 | * command ring entries as well as the mailbox. Extra code needed for | |
71 | * this may have a 'UAM' comment. | |
72 | */ | |
73 | ||
7c9e7a6f AG |
74 | #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) |
75 | ||
b6df4b79 XL |
76 | /* For cmd area, the size is fixed 8MB */ |
77 | #define CMDR_SIZE (8 * 1024 * 1024) | |
26418649 | 78 | |
b6df4b79 XL |
79 | /* |
80 | * For data area, the block size is PAGE_SIZE and | |
81 | * the total size is 256K * PAGE_SIZE. | |
82 | */ | |
83 | #define DATA_BLOCK_SIZE PAGE_SIZE | |
80eb8761 MC |
84 | #define DATA_BLOCK_SHIFT PAGE_SHIFT |
85 | #define DATA_BLOCK_BITS_DEF (256 * 1024) | |
26418649 | 86 | #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE) |
7c9e7a6f | 87 | |
80eb8761 MC |
88 | #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT)) |
89 | #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT)) | |
90 | ||
b6df4b79 | 91 | /* The total size of the ring is 8M + 256K * PAGE_SIZE */ |
7c9e7a6f AG |
92 | #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) |
93 | ||
af1dd7ff MC |
94 | /* |
95 | * Default number of global data blocks(512K * PAGE_SIZE) | |
96 | * when the unmap thread will be started. | |
97 | */ | |
80eb8761 | 98 | #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024) |
b6df4b79 | 99 | |
b3af66e2 MC |
100 | static u8 tcmu_kern_cmd_reply_supported; |
101 | ||
7c9e7a6f AG |
102 | static struct device *tcmu_root_device; |
103 | ||
104 | struct tcmu_hba { | |
105 | u32 host_id; | |
106 | }; | |
107 | ||
7c9e7a6f AG |
108 | #define TCMU_CONFIG_LEN 256 |
109 | ||
b3af66e2 MC |
110 | struct tcmu_nl_cmd { |
111 | /* wake up thread waiting for reply */ | |
112 | struct completion complete; | |
113 | int cmd; | |
114 | int status; | |
115 | }; | |
116 | ||
7c9e7a6f | 117 | struct tcmu_dev { |
b6df4b79 | 118 | struct list_head node; |
f3cdbe39 | 119 | struct kref kref; |
af1dd7ff | 120 | |
7c9e7a6f AG |
121 | struct se_device se_dev; |
122 | ||
123 | char *name; | |
124 | struct se_hba *hba; | |
125 | ||
126 | #define TCMU_DEV_BIT_OPEN 0 | |
127 | #define TCMU_DEV_BIT_BROKEN 1 | |
892782ca | 128 | #define TCMU_DEV_BIT_BLOCKED 2 |
7c9e7a6f | 129 | unsigned long flags; |
7c9e7a6f AG |
130 | |
131 | struct uio_info uio_info; | |
132 | ||
b6df4b79 XL |
133 | struct inode *inode; |
134 | ||
7c9e7a6f AG |
135 | struct tcmu_mailbox *mb_addr; |
136 | size_t dev_size; | |
137 | u32 cmdr_size; | |
138 | u32 cmdr_last_cleaned; | |
3d9b9555 | 139 | /* Offset of data area from start of mb */ |
26418649 | 140 | /* Must add data_off and mb_addr to get the address */ |
7c9e7a6f AG |
141 | size_t data_off; |
142 | size_t data_size; | |
80eb8761 MC |
143 | uint32_t max_blocks; |
144 | size_t ring_size; | |
26418649 | 145 | |
b6df4b79 | 146 | struct mutex cmdr_lock; |
af1dd7ff | 147 | struct list_head cmdr_queue; |
7c9e7a6f | 148 | |
141685a3 | 149 | uint32_t dbi_max; |
b6df4b79 | 150 | uint32_t dbi_thresh; |
80eb8761 | 151 | unsigned long *data_bitmap; |
141685a3 XL |
152 | struct radix_tree_root data_blocks; |
153 | ||
7c9e7a6f | 154 | struct idr commands; |
7c9e7a6f | 155 | |
9103575a | 156 | struct timer_list cmd_timer; |
af980e46 | 157 | unsigned int cmd_time_out; |
9103575a MC |
158 | |
159 | struct timer_list qfull_timer; | |
160 | int qfull_time_out; | |
161 | ||
488ebe4c | 162 | struct list_head timedout_entry; |
7c9e7a6f | 163 | |
b3af66e2 MC |
164 | spinlock_t nl_cmd_lock; |
165 | struct tcmu_nl_cmd curr_nl_cmd; | |
166 | /* wake up threads waiting on curr_nl_cmd */ | |
167 | wait_queue_head_t nl_cmd_wq; | |
168 | ||
7c9e7a6f | 169 | char dev_config[TCMU_CONFIG_LEN]; |
b849b456 KN |
170 | |
171 | int nl_reply_supported; | |
7c9e7a6f AG |
172 | }; |
173 | ||
174 | #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) | |
175 | ||
176 | #define CMDR_OFF sizeof(struct tcmu_mailbox) | |
177 | ||
178 | struct tcmu_cmd { | |
179 | struct se_cmd *se_cmd; | |
180 | struct tcmu_dev *tcmu_dev; | |
af1dd7ff | 181 | struct list_head cmdr_queue_entry; |
7c9e7a6f AG |
182 | |
183 | uint16_t cmd_id; | |
184 | ||
26418649 | 185 | /* Can't use se_cmd when cleaning up expired cmds, because if |
7c9e7a6f | 186 | cmd has been completed then accessing se_cmd is off limits */ |
141685a3 XL |
187 | uint32_t dbi_cnt; |
188 | uint32_t dbi_cur; | |
189 | uint32_t *dbi; | |
7c9e7a6f AG |
190 | |
191 | unsigned long deadline; | |
192 | ||
193 | #define TCMU_CMD_BIT_EXPIRED 0 | |
194 | unsigned long flags; | |
195 | }; | |
af1dd7ff MC |
196 | /* |
197 | * To avoid dead lock the mutex lock order should always be: | |
198 | * | |
199 | * mutex_lock(&root_udev_mutex); | |
200 | * ... | |
201 | * mutex_lock(&tcmu_dev->cmdr_lock); | |
202 | * mutex_unlock(&tcmu_dev->cmdr_lock); | |
203 | * ... | |
204 | * mutex_unlock(&root_udev_mutex); | |
205 | */ | |
b6df4b79 XL |
206 | static DEFINE_MUTEX(root_udev_mutex); |
207 | static LIST_HEAD(root_udev); | |
208 | ||
488ebe4c MC |
209 | static DEFINE_SPINLOCK(timed_out_udevs_lock); |
210 | static LIST_HEAD(timed_out_udevs); | |
211 | ||
80eb8761 MC |
212 | static struct kmem_cache *tcmu_cmd_cache; |
213 | ||
b6df4b79 | 214 | static atomic_t global_db_count = ATOMIC_INIT(0); |
af1dd7ff | 215 | static struct delayed_work tcmu_unmap_work; |
80eb8761 | 216 | static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF; |
b6df4b79 | 217 | |
80eb8761 MC |
218 | static int tcmu_set_global_max_data_area(const char *str, |
219 | const struct kernel_param *kp) | |
220 | { | |
221 | int ret, max_area_mb; | |
222 | ||
223 | ret = kstrtoint(str, 10, &max_area_mb); | |
224 | if (ret) | |
225 | return -EINVAL; | |
226 | ||
227 | if (max_area_mb <= 0) { | |
228 | pr_err("global_max_data_area must be larger than 0.\n"); | |
229 | return -EINVAL; | |
230 | } | |
231 | ||
232 | tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb); | |
233 | if (atomic_read(&global_db_count) > tcmu_global_max_blocks) | |
234 | schedule_delayed_work(&tcmu_unmap_work, 0); | |
235 | else | |
236 | cancel_delayed_work_sync(&tcmu_unmap_work); | |
237 | ||
238 | return 0; | |
239 | } | |
240 | ||
241 | static int tcmu_get_global_max_data_area(char *buffer, | |
242 | const struct kernel_param *kp) | |
243 | { | |
244 | return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); | |
245 | } | |
246 | ||
247 | static const struct kernel_param_ops tcmu_global_max_data_area_op = { | |
248 | .set = tcmu_set_global_max_data_area, | |
249 | .get = tcmu_get_global_max_data_area, | |
250 | }; | |
251 | ||
252 | module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL, | |
253 | S_IWUSR | S_IRUGO); | |
254 | MODULE_PARM_DESC(global_max_data_area_mb, | |
255 | "Max MBs allowed to be allocated to all the tcmu device's " | |
256 | "data areas."); | |
7c9e7a6f AG |
257 | |
258 | /* multicast group */ | |
259 | enum tcmu_multicast_groups { | |
260 | TCMU_MCGRP_CONFIG, | |
261 | }; | |
262 | ||
263 | static const struct genl_multicast_group tcmu_mcgrps[] = { | |
264 | [TCMU_MCGRP_CONFIG] = { .name = "config", }, | |
265 | }; | |
266 | ||
b3af66e2 MC |
267 | static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { |
268 | [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, | |
269 | [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, | |
270 | [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, | |
271 | [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, | |
272 | [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, | |
273 | }; | |
274 | ||
275 | static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) | |
276 | { | |
277 | struct se_device *dev; | |
278 | struct tcmu_dev *udev; | |
279 | struct tcmu_nl_cmd *nl_cmd; | |
280 | int dev_id, rc, ret = 0; | |
281 | bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE); | |
282 | ||
283 | if (!info->attrs[TCMU_ATTR_CMD_STATUS] || | |
284 | !info->attrs[TCMU_ATTR_DEVICE_ID]) { | |
285 | printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); | |
286 | return -EINVAL; | |
287 | } | |
288 | ||
289 | dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); | |
290 | rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); | |
291 | ||
292 | dev = target_find_device(dev_id, !is_removed); | |
293 | if (!dev) { | |
294 | printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n", | |
295 | completed_cmd, rc, dev_id); | |
296 | return -ENODEV; | |
297 | } | |
298 | udev = TCMU_DEV(dev); | |
299 | ||
300 | spin_lock(&udev->nl_cmd_lock); | |
301 | nl_cmd = &udev->curr_nl_cmd; | |
302 | ||
303 | pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id, | |
304 | nl_cmd->cmd, completed_cmd, rc); | |
305 | ||
306 | if (nl_cmd->cmd != completed_cmd) { | |
307 | printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n", | |
308 | completed_cmd, nl_cmd->cmd); | |
309 | ret = -EINVAL; | |
310 | } else { | |
311 | nl_cmd->status = rc; | |
312 | } | |
313 | ||
314 | spin_unlock(&udev->nl_cmd_lock); | |
315 | if (!is_removed) | |
316 | target_undepend_item(&dev->dev_group.cg_item); | |
317 | if (!ret) | |
318 | complete(&nl_cmd->complete); | |
319 | return ret; | |
320 | } | |
321 | ||
322 | static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) | |
323 | { | |
324 | return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); | |
325 | } | |
326 | ||
327 | static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) | |
328 | { | |
329 | return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); | |
330 | } | |
331 | ||
332 | static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, | |
333 | struct genl_info *info) | |
334 | { | |
335 | return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); | |
336 | } | |
337 | ||
338 | static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) | |
339 | { | |
340 | if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { | |
341 | tcmu_kern_cmd_reply_supported = | |
342 | nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); | |
343 | printk(KERN_INFO "tcmu daemon: command reply support %u.\n", | |
344 | tcmu_kern_cmd_reply_supported); | |
345 | } | |
346 | ||
347 | return 0; | |
348 | } | |
349 | ||
350 | static const struct genl_ops tcmu_genl_ops[] = { | |
351 | { | |
352 | .cmd = TCMU_CMD_SET_FEATURES, | |
353 | .flags = GENL_ADMIN_PERM, | |
354 | .policy = tcmu_attr_policy, | |
355 | .doit = tcmu_genl_set_features, | |
356 | }, | |
357 | { | |
358 | .cmd = TCMU_CMD_ADDED_DEVICE_DONE, | |
359 | .flags = GENL_ADMIN_PERM, | |
360 | .policy = tcmu_attr_policy, | |
361 | .doit = tcmu_genl_add_dev_done, | |
362 | }, | |
363 | { | |
364 | .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, | |
365 | .flags = GENL_ADMIN_PERM, | |
366 | .policy = tcmu_attr_policy, | |
367 | .doit = tcmu_genl_rm_dev_done, | |
368 | }, | |
369 | { | |
370 | .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, | |
371 | .flags = GENL_ADMIN_PERM, | |
372 | .policy = tcmu_attr_policy, | |
373 | .doit = tcmu_genl_reconfig_dev_done, | |
374 | }, | |
375 | }; | |
376 | ||
7c9e7a6f | 377 | /* Our generic netlink family */ |
56989f6d | 378 | static struct genl_family tcmu_genl_family __ro_after_init = { |
489111e5 | 379 | .module = THIS_MODULE, |
7c9e7a6f AG |
380 | .hdrsize = 0, |
381 | .name = "TCM-USER", | |
b3af66e2 | 382 | .version = 2, |
7c9e7a6f AG |
383 | .maxattr = TCMU_ATTR_MAX, |
384 | .mcgrps = tcmu_mcgrps, | |
385 | .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), | |
20c08b36 | 386 | .netnsok = true, |
b3af66e2 MC |
387 | .ops = tcmu_genl_ops, |
388 | .n_ops = ARRAY_SIZE(tcmu_genl_ops), | |
7c9e7a6f AG |
389 | }; |
390 | ||
141685a3 XL |
391 | #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) |
392 | #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0) | |
393 | #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index)) | |
394 | #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++]) | |
395 | ||
b6df4b79 | 396 | static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) |
141685a3 XL |
397 | { |
398 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | |
399 | uint32_t i; | |
400 | ||
b6df4b79 | 401 | for (i = 0; i < len; i++) |
141685a3 XL |
402 | clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); |
403 | } | |
404 | ||
b6df4b79 XL |
405 | static inline bool tcmu_get_empty_block(struct tcmu_dev *udev, |
406 | struct tcmu_cmd *tcmu_cmd) | |
141685a3 | 407 | { |
b6df4b79 XL |
408 | struct page *page; |
409 | int ret, dbi; | |
141685a3 | 410 | |
b6df4b79 XL |
411 | dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); |
412 | if (dbi == udev->dbi_thresh) | |
413 | return false; | |
141685a3 | 414 | |
b6df4b79 XL |
415 | page = radix_tree_lookup(&udev->data_blocks, dbi); |
416 | if (!page) { | |
b6df4b79 | 417 | if (atomic_add_return(1, &global_db_count) > |
80eb8761 | 418 | tcmu_global_max_blocks) |
af1dd7ff | 419 | schedule_delayed_work(&tcmu_unmap_work, 0); |
141685a3 | 420 | |
b6df4b79 XL |
421 | /* try to get new page from the mm */ |
422 | page = alloc_page(GFP_KERNEL); | |
423 | if (!page) | |
daf78c30 | 424 | goto err_alloc; |
b6df4b79 XL |
425 | |
426 | ret = radix_tree_insert(&udev->data_blocks, dbi, page); | |
daf78c30 XL |
427 | if (ret) |
428 | goto err_insert; | |
141685a3 XL |
429 | } |
430 | ||
b6df4b79 XL |
431 | if (dbi > udev->dbi_max) |
432 | udev->dbi_max = dbi; | |
433 | ||
434 | set_bit(dbi, udev->data_bitmap); | |
435 | tcmu_cmd_set_dbi(tcmu_cmd, dbi); | |
436 | ||
437 | return true; | |
daf78c30 XL |
438 | err_insert: |
439 | __free_page(page); | |
440 | err_alloc: | |
441 | atomic_dec(&global_db_count); | |
442 | return false; | |
141685a3 XL |
443 | } |
444 | ||
b6df4b79 XL |
445 | static bool tcmu_get_empty_blocks(struct tcmu_dev *udev, |
446 | struct tcmu_cmd *tcmu_cmd) | |
447 | { | |
448 | int i; | |
449 | ||
b6df4b79 XL |
450 | for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) { |
451 | if (!tcmu_get_empty_block(udev, tcmu_cmd)) | |
af1dd7ff | 452 | return false; |
b6df4b79 XL |
453 | } |
454 | return true; | |
b6df4b79 XL |
455 | } |
456 | ||
457 | static inline struct page * | |
458 | tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi) | |
141685a3 XL |
459 | { |
460 | return radix_tree_lookup(&udev->data_blocks, dbi); | |
461 | } | |
462 | ||
463 | static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) | |
464 | { | |
465 | kfree(tcmu_cmd->dbi); | |
466 | kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); | |
467 | } | |
468 | ||
469 | static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd) | |
470 | { | |
471 | struct se_cmd *se_cmd = tcmu_cmd->se_cmd; | |
472 | size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE); | |
473 | ||
474 | if (se_cmd->se_cmd_flags & SCF_BIDI) { | |
475 | BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); | |
476 | data_length += round_up(se_cmd->t_bidi_data_sg->length, | |
477 | DATA_BLOCK_SIZE); | |
478 | } | |
479 | ||
480 | return data_length; | |
481 | } | |
482 | ||
483 | static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd) | |
484 | { | |
485 | size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); | |
486 | ||
487 | return data_length / DATA_BLOCK_SIZE; | |
488 | } | |
489 | ||
7c9e7a6f AG |
490 | static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) |
491 | { | |
492 | struct se_device *se_dev = se_cmd->se_dev; | |
493 | struct tcmu_dev *udev = TCMU_DEV(se_dev); | |
494 | struct tcmu_cmd *tcmu_cmd; | |
7c9e7a6f AG |
495 | |
496 | tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL); | |
497 | if (!tcmu_cmd) | |
498 | return NULL; | |
499 | ||
af1dd7ff | 500 | INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); |
7c9e7a6f AG |
501 | tcmu_cmd->se_cmd = se_cmd; |
502 | tcmu_cmd->tcmu_dev = udev; | |
7c9e7a6f | 503 | |
141685a3 XL |
504 | tcmu_cmd_reset_dbi_cur(tcmu_cmd); |
505 | tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd); | |
506 | tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), | |
507 | GFP_KERNEL); | |
508 | if (!tcmu_cmd->dbi) { | |
509 | kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); | |
510 | return NULL; | |
511 | } | |
512 | ||
7c9e7a6f AG |
513 | return tcmu_cmd; |
514 | } | |
515 | ||
516 | static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) | |
517 | { | |
b75d8063 | 518 | unsigned long offset = offset_in_page(vaddr); |
26d2b310 | 519 | void *start = vaddr - offset; |
7c9e7a6f AG |
520 | |
521 | size = round_up(size+offset, PAGE_SIZE); | |
7c9e7a6f AG |
522 | |
523 | while (size) { | |
26d2b310 | 524 | flush_dcache_page(virt_to_page(start)); |
525 | start += PAGE_SIZE; | |
7c9e7a6f AG |
526 | size -= PAGE_SIZE; |
527 | } | |
528 | } | |
529 | ||
530 | /* | |
531 | * Some ring helper functions. We don't assume size is a power of 2 so | |
532 | * we can't use circ_buf.h. | |
533 | */ | |
534 | static inline size_t spc_used(size_t head, size_t tail, size_t size) | |
535 | { | |
536 | int diff = head - tail; | |
537 | ||
538 | if (diff >= 0) | |
539 | return diff; | |
540 | else | |
541 | return size + diff; | |
542 | } | |
543 | ||
544 | static inline size_t spc_free(size_t head, size_t tail, size_t size) | |
545 | { | |
546 | /* Keep 1 byte unused or we can't tell full from empty */ | |
547 | return (size - spc_used(head, tail, size) - 1); | |
548 | } | |
549 | ||
550 | static inline size_t head_to_end(size_t head, size_t size) | |
551 | { | |
552 | return size - head; | |
553 | } | |
554 | ||
3e609135 | 555 | static inline void new_iov(struct iovec **iov, int *iov_cnt) |
f1dbd087 SY |
556 | { |
557 | struct iovec *iovec; | |
558 | ||
559 | if (*iov_cnt != 0) | |
560 | (*iov)++; | |
561 | (*iov_cnt)++; | |
562 | ||
563 | iovec = *iov; | |
564 | memset(iovec, 0, sizeof(struct iovec)); | |
f1dbd087 SY |
565 | } |
566 | ||
7c9e7a6f AG |
567 | #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) |
568 | ||
26418649 | 569 | /* offset is relative to mb_addr */ |
141685a3 XL |
570 | static inline size_t get_block_offset_user(struct tcmu_dev *dev, |
571 | int dbi, int remaining) | |
26418649 | 572 | { |
141685a3 | 573 | return dev->data_off + dbi * DATA_BLOCK_SIZE + |
26418649 SY |
574 | DATA_BLOCK_SIZE - remaining; |
575 | } | |
576 | ||
daf78c30 | 577 | static inline size_t iov_tail(struct iovec *iov) |
26418649 SY |
578 | { |
579 | return (size_t)iov->iov_base + iov->iov_len; | |
580 | } | |
581 | ||
1a1fc0b8 | 582 | static void scatter_data_area(struct tcmu_dev *udev, |
141685a3 XL |
583 | struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg, |
584 | unsigned int data_nents, struct iovec **iov, | |
585 | int *iov_cnt, bool copy_data) | |
f97ec7db | 586 | { |
141685a3 | 587 | int i, dbi; |
26418649 | 588 | int block_remaining = 0; |
141685a3 XL |
589 | void *from, *to = NULL; |
590 | size_t copy_bytes, to_offset, offset; | |
f97ec7db | 591 | struct scatterlist *sg; |
b6df4b79 | 592 | struct page *page; |
f97ec7db IT |
593 | |
594 | for_each_sg(data_sg, sg, data_nents, i) { | |
26418649 | 595 | int sg_remaining = sg->length; |
f97ec7db | 596 | from = kmap_atomic(sg_page(sg)) + sg->offset; |
26418649 SY |
597 | while (sg_remaining > 0) { |
598 | if (block_remaining == 0) { | |
b6df4b79 XL |
599 | if (to) |
600 | kunmap_atomic(to); | |
601 | ||
26418649 | 602 | block_remaining = DATA_BLOCK_SIZE; |
b6df4b79 XL |
603 | dbi = tcmu_cmd_get_dbi(tcmu_cmd); |
604 | page = tcmu_get_block_page(udev, dbi); | |
605 | to = kmap_atomic(page); | |
26418649 | 606 | } |
141685a3 | 607 | |
3e609135 XL |
608 | /* |
609 | * Covert to virtual offset of the ring data area. | |
610 | */ | |
141685a3 | 611 | to_offset = get_block_offset_user(udev, dbi, |
26418649 | 612 | block_remaining); |
141685a3 | 613 | |
3e609135 XL |
614 | /* |
615 | * The following code will gather and map the blocks | |
616 | * to the same iovec when the blocks are all next to | |
617 | * each other. | |
618 | */ | |
619 | copy_bytes = min_t(size_t, sg_remaining, | |
620 | block_remaining); | |
26418649 | 621 | if (*iov_cnt != 0 && |
daf78c30 | 622 | to_offset == iov_tail(*iov)) { |
3e609135 XL |
623 | /* |
624 | * Will append to the current iovec, because | |
625 | * the current block page is next to the | |
626 | * previous one. | |
627 | */ | |
26418649 SY |
628 | (*iov)->iov_len += copy_bytes; |
629 | } else { | |
3e609135 XL |
630 | /* |
631 | * Will allocate a new iovec because we are | |
632 | * first time here or the current block page | |
633 | * is not next to the previous one. | |
634 | */ | |
635 | new_iov(iov, iov_cnt); | |
141685a3 | 636 | (*iov)->iov_base = (void __user *)to_offset; |
26418649 SY |
637 | (*iov)->iov_len = copy_bytes; |
638 | } | |
3e609135 | 639 | |
f97ec7db | 640 | if (copy_data) { |
c542942c XL |
641 | offset = DATA_BLOCK_SIZE - block_remaining; |
642 | memcpy(to + offset, | |
643 | from + sg->length - sg_remaining, | |
644 | copy_bytes); | |
f97ec7db IT |
645 | tcmu_flush_dcache_range(to, copy_bytes); |
646 | } | |
3e609135 | 647 | |
26418649 SY |
648 | sg_remaining -= copy_bytes; |
649 | block_remaining -= copy_bytes; | |
f97ec7db | 650 | } |
e2e21bd8 | 651 | kunmap_atomic(from - sg->offset); |
f97ec7db | 652 | } |
3e609135 | 653 | |
b6df4b79 XL |
654 | if (to) |
655 | kunmap_atomic(to); | |
0c28481f SY |
656 | } |
657 | ||
a5d68ba8 XL |
658 | static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, |
659 | bool bidi) | |
f97ec7db | 660 | { |
a5d68ba8 | 661 | struct se_cmd *se_cmd = cmd->se_cmd; |
141685a3 | 662 | int i, dbi; |
26418649 | 663 | int block_remaining = 0; |
b6df4b79 | 664 | void *from = NULL, *to; |
141685a3 | 665 | size_t copy_bytes, offset; |
a5d68ba8 | 666 | struct scatterlist *sg, *data_sg; |
b6df4b79 | 667 | struct page *page; |
a5d68ba8 | 668 | unsigned int data_nents; |
141685a3 | 669 | uint32_t count = 0; |
a5d68ba8 XL |
670 | |
671 | if (!bidi) { | |
672 | data_sg = se_cmd->t_data_sg; | |
673 | data_nents = se_cmd->t_data_nents; | |
674 | } else { | |
a5d68ba8 XL |
675 | |
676 | /* | |
677 | * For bidi case, the first count blocks are for Data-Out | |
678 | * buffer blocks, and before gathering the Data-In buffer | |
679 | * the Data-Out buffer blocks should be discarded. | |
680 | */ | |
681 | count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); | |
a5d68ba8 XL |
682 | |
683 | data_sg = se_cmd->t_bidi_data_sg; | |
684 | data_nents = se_cmd->t_bidi_data_nents; | |
685 | } | |
f97ec7db | 686 | |
141685a3 XL |
687 | tcmu_cmd_set_dbi_cur(cmd, count); |
688 | ||
f97ec7db | 689 | for_each_sg(data_sg, sg, data_nents, i) { |
26418649 | 690 | int sg_remaining = sg->length; |
f97ec7db | 691 | to = kmap_atomic(sg_page(sg)) + sg->offset; |
26418649 SY |
692 | while (sg_remaining > 0) { |
693 | if (block_remaining == 0) { | |
b6df4b79 XL |
694 | if (from) |
695 | kunmap_atomic(from); | |
696 | ||
26418649 | 697 | block_remaining = DATA_BLOCK_SIZE; |
141685a3 | 698 | dbi = tcmu_cmd_get_dbi(cmd); |
b6df4b79 XL |
699 | page = tcmu_get_block_page(udev, dbi); |
700 | from = kmap_atomic(page); | |
26418649 SY |
701 | } |
702 | copy_bytes = min_t(size_t, sg_remaining, | |
703 | block_remaining); | |
141685a3 | 704 | offset = DATA_BLOCK_SIZE - block_remaining; |
f97ec7db | 705 | tcmu_flush_dcache_range(from, copy_bytes); |
c542942c | 706 | memcpy(to + sg->length - sg_remaining, from + offset, |
26418649 | 707 | copy_bytes); |
f97ec7db | 708 | |
26418649 SY |
709 | sg_remaining -= copy_bytes; |
710 | block_remaining -= copy_bytes; | |
f97ec7db | 711 | } |
e2e21bd8 | 712 | kunmap_atomic(to - sg->offset); |
f97ec7db | 713 | } |
b6df4b79 XL |
714 | if (from) |
715 | kunmap_atomic(from); | |
f97ec7db IT |
716 | } |
717 | ||
b6df4b79 | 718 | static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) |
26418649 | 719 | { |
3c0f26ff | 720 | return thresh - bitmap_weight(bitmap, thresh); |
26418649 SY |
721 | } |
722 | ||
7c9e7a6f | 723 | /* |
f97ec7db | 724 | * We can't queue a command until we have space available on the cmd ring *and* |
3d9b9555 | 725 | * space available on the data area. |
7c9e7a6f AG |
726 | * |
727 | * Called with ring lock held. | |
728 | */ | |
b6df4b79 XL |
729 | static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd, |
730 | size_t cmd_size, size_t data_needed) | |
7c9e7a6f AG |
731 | { |
732 | struct tcmu_mailbox *mb = udev->mb_addr; | |
b6df4b79 XL |
733 | uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1) |
734 | / DATA_BLOCK_SIZE; | |
0241fd39 | 735 | size_t space, cmd_needed; |
7c9e7a6f AG |
736 | u32 cmd_head; |
737 | ||
738 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | |
739 | ||
740 | cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ | |
741 | ||
f56574a2 AG |
742 | /* |
743 | * If cmd end-of-ring space is too small then we need space for a NOP plus | |
744 | * original cmd - cmds are internally contiguous. | |
745 | */ | |
746 | if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) | |
747 | cmd_needed = cmd_size; | |
748 | else | |
749 | cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); | |
750 | ||
7c9e7a6f AG |
751 | space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); |
752 | if (space < cmd_needed) { | |
753 | pr_debug("no cmd space: %u %u %u\n", cmd_head, | |
754 | udev->cmdr_last_cleaned, udev->cmdr_size); | |
755 | return false; | |
756 | } | |
757 | ||
b6df4b79 XL |
758 | /* try to check and get the data blocks as needed */ |
759 | space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); | |
3c0f26ff | 760 | if ((space * DATA_BLOCK_SIZE) < data_needed) { |
80eb8761 MC |
761 | unsigned long blocks_left = |
762 | (udev->max_blocks - udev->dbi_thresh) + space; | |
b6df4b79 XL |
763 | |
764 | if (blocks_left < blocks_needed) { | |
765 | pr_debug("no data space: only %lu available, but ask for %zu\n", | |
766 | blocks_left * DATA_BLOCK_SIZE, | |
767 | data_needed); | |
768 | return false; | |
769 | } | |
770 | ||
f890f579 | 771 | udev->dbi_thresh += blocks_needed; |
80eb8761 MC |
772 | if (udev->dbi_thresh > udev->max_blocks) |
773 | udev->dbi_thresh = udev->max_blocks; | |
7c9e7a6f AG |
774 | } |
775 | ||
daf78c30 | 776 | return tcmu_get_empty_blocks(udev, cmd); |
7c9e7a6f AG |
777 | } |
778 | ||
fe25cc34 XL |
779 | static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) |
780 | { | |
781 | return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]), | |
782 | sizeof(struct tcmu_cmd_entry)); | |
783 | } | |
784 | ||
785 | static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, | |
786 | size_t base_command_size) | |
787 | { | |
788 | struct se_cmd *se_cmd = tcmu_cmd->se_cmd; | |
789 | size_t command_size; | |
790 | ||
791 | command_size = base_command_size + | |
792 | round_up(scsi_command_size(se_cmd->t_task_cdb), | |
793 | TCMU_OP_ALIGN_SIZE); | |
794 | ||
795 | WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); | |
796 | ||
797 | return command_size; | |
798 | } | |
799 | ||
9103575a MC |
800 | static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, |
801 | struct timer_list *timer) | |
0d44374c MC |
802 | { |
803 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | |
0d44374c MC |
804 | int cmd_id; |
805 | ||
806 | if (tcmu_cmd->cmd_id) | |
9103575a | 807 | goto setup_timer; |
0d44374c MC |
808 | |
809 | cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); | |
810 | if (cmd_id < 0) { | |
811 | pr_err("tcmu: Could not allocate cmd id.\n"); | |
812 | return cmd_id; | |
813 | } | |
814 | tcmu_cmd->cmd_id = cmd_id; | |
815 | ||
af1dd7ff MC |
816 | pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id, |
817 | udev->name, tmo / MSEC_PER_SEC); | |
0d44374c | 818 | |
9103575a MC |
819 | setup_timer: |
820 | if (!tmo) | |
821 | return 0; | |
822 | ||
0d44374c | 823 | tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); |
9103575a | 824 | mod_timer(timer, tcmu_cmd->deadline); |
0d44374c MC |
825 | return 0; |
826 | } | |
827 | ||
af1dd7ff MC |
828 | static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) |
829 | { | |
830 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | |
9103575a | 831 | unsigned int tmo; |
af1dd7ff MC |
832 | int ret; |
833 | ||
9103575a MC |
834 | /* |
835 | * For backwards compat if qfull_time_out is not set use | |
836 | * cmd_time_out and if that's not set use the default time out. | |
837 | */ | |
838 | if (!udev->qfull_time_out) | |
839 | return -ETIMEDOUT; | |
840 | else if (udev->qfull_time_out > 0) | |
841 | tmo = udev->qfull_time_out; | |
842 | else if (udev->cmd_time_out) | |
843 | tmo = udev->cmd_time_out; | |
844 | else | |
845 | tmo = TCMU_TIME_OUT; | |
846 | ||
847 | ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); | |
af1dd7ff MC |
848 | if (ret) |
849 | return ret; | |
850 | ||
851 | list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); | |
852 | pr_debug("adding cmd %u on dev %s to ring space wait queue\n", | |
853 | tcmu_cmd->cmd_id, udev->name); | |
854 | return 0; | |
855 | } | |
856 | ||
6fd0ce79 MC |
857 | /** |
858 | * queue_cmd_ring - queue cmd to ring or internally | |
859 | * @tcmu_cmd: cmd to queue | |
860 | * @scsi_err: TCM error code if failure (-1) returned. | |
861 | * | |
862 | * Returns: | |
863 | * -1 we cannot queue internally or to the ring. | |
864 | * 0 success | |
af1dd7ff | 865 | * 1 internally queued to wait for ring memory to free. |
6fd0ce79 MC |
866 | */ |
867 | static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) | |
7c9e7a6f AG |
868 | { |
869 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | |
870 | struct se_cmd *se_cmd = tcmu_cmd->se_cmd; | |
871 | size_t base_command_size, command_size; | |
7c9e7a6f | 872 | struct tcmu_mailbox *mb; |
7c9e7a6f | 873 | struct tcmu_cmd_entry *entry; |
7c9e7a6f | 874 | struct iovec *iov; |
141685a3 | 875 | int iov_cnt, ret; |
7c9e7a6f AG |
876 | uint32_t cmd_head; |
877 | uint64_t cdb_off; | |
f97ec7db | 878 | bool copy_to_data_area; |
ab22d260 | 879 | size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); |
7c9e7a6f | 880 | |
6fd0ce79 MC |
881 | *scsi_err = TCM_NO_SENSE; |
882 | ||
892782ca MC |
883 | if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { |
884 | *scsi_err = TCM_LUN_BUSY; | |
885 | return -1; | |
886 | } | |
887 | ||
6fd0ce79 MC |
888 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { |
889 | *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
890 | return -1; | |
891 | } | |
7c9e7a6f AG |
892 | |
893 | /* | |
894 | * Must be a certain minimum size for response sense info, but | |
895 | * also may be larger if the iov array is large. | |
896 | * | |
fe25cc34 XL |
897 | * We prepare as many iovs as possbile for potential uses here, |
898 | * because it's expensive to tell how many regions are freed in | |
899 | * the bitmap & global data pool, as the size calculated here | |
900 | * will only be used to do the checks. | |
901 | * | |
902 | * The size will be recalculated later as actually needed to save | |
903 | * cmd area memories. | |
904 | */ | |
905 | base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); | |
906 | command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); | |
7c9e7a6f | 907 | |
af1dd7ff MC |
908 | if (!list_empty(&udev->cmdr_queue)) |
909 | goto queue; | |
7c9e7a6f AG |
910 | |
911 | mb = udev->mb_addr; | |
912 | cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ | |
554617b2 AG |
913 | if ((command_size > (udev->cmdr_size / 2)) || |
914 | data_length > udev->data_size) { | |
915 | pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " | |
3d9b9555 | 916 | "cmd ring/data area\n", command_size, data_length, |
7c9e7a6f | 917 | udev->cmdr_size, udev->data_size); |
6fd0ce79 MC |
918 | *scsi_err = TCM_INVALID_CDB_FIELD; |
919 | return -1; | |
554617b2 | 920 | } |
7c9e7a6f | 921 | |
af1dd7ff | 922 | if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { |
810b8153 MC |
923 | /* |
924 | * Don't leave commands partially setup because the unmap | |
925 | * thread might need the blocks to make forward progress. | |
926 | */ | |
927 | tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); | |
928 | tcmu_cmd_reset_dbi_cur(tcmu_cmd); | |
af1dd7ff | 929 | goto queue; |
7c9e7a6f AG |
930 | } |
931 | ||
f56574a2 AG |
932 | /* Insert a PAD if end-of-ring space is too small */ |
933 | if (head_to_end(cmd_head, udev->cmdr_size) < command_size) { | |
934 | size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); | |
935 | ||
7c9e7a6f | 936 | entry = (void *) mb + CMDR_OFF + cmd_head; |
0ad46af8 AG |
937 | tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); |
938 | tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); | |
939 | entry->hdr.cmd_id = 0; /* not used for PAD */ | |
940 | entry->hdr.kflags = 0; | |
941 | entry->hdr.uflags = 0; | |
9d62bc0e | 942 | tcmu_flush_dcache_range(entry, sizeof(*entry)); |
7c9e7a6f AG |
943 | |
944 | UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); | |
9d62bc0e | 945 | tcmu_flush_dcache_range(mb, sizeof(*mb)); |
7c9e7a6f AG |
946 | |
947 | cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ | |
948 | WARN_ON(cmd_head != 0); | |
949 | } | |
950 | ||
951 | entry = (void *) mb + CMDR_OFF + cmd_head; | |
b3743c71 | 952 | memset(entry, 0, command_size); |
0ad46af8 | 953 | tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); |
7c9e7a6f | 954 | |
3d9b9555 | 955 | /* Handle allocating space from the data area */ |
b6df4b79 | 956 | tcmu_cmd_reset_dbi_cur(tcmu_cmd); |
7c9e7a6f | 957 | iov = &entry->req.iov[0]; |
f97ec7db | 958 | iov_cnt = 0; |
e4648b01 IT |
959 | copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE |
960 | || se_cmd->se_cmd_flags & SCF_BIDI); | |
1a1fc0b8 MC |
961 | scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg, |
962 | se_cmd->t_data_nents, &iov, &iov_cnt, | |
963 | copy_to_data_area); | |
7c9e7a6f AG |
964 | entry->req.iov_cnt = iov_cnt; |
965 | ||
e4648b01 | 966 | /* Handle BIDI commands */ |
b3743c71 | 967 | iov_cnt = 0; |
ab22d260 | 968 | if (se_cmd->se_cmd_flags & SCF_BIDI) { |
ab22d260 | 969 | iov++; |
1a1fc0b8 MC |
970 | scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg, |
971 | se_cmd->t_bidi_data_nents, &iov, &iov_cnt, | |
972 | false); | |
ab22d260 | 973 | } |
b3743c71 | 974 | entry->req.iov_bidi_cnt = iov_cnt; |
26418649 | 975 | |
9103575a MC |
976 | ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, |
977 | &udev->cmd_timer); | |
0d44374c MC |
978 | if (ret) { |
979 | tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); | |
97488c73 | 980 | mutex_unlock(&udev->cmdr_lock); |
6fd0ce79 MC |
981 | |
982 | *scsi_err = TCM_OUT_OF_RESOURCES; | |
983 | return -1; | |
0d44374c MC |
984 | } |
985 | entry->hdr.cmd_id = tcmu_cmd->cmd_id; | |
986 | ||
fe25cc34 XL |
987 | /* |
988 | * Recalaulate the command's base size and size according | |
989 | * to the actual needs | |
990 | */ | |
991 | base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt + | |
992 | entry->req.iov_bidi_cnt); | |
993 | command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); | |
994 | ||
995 | tcmu_hdr_set_len(&entry->hdr.len_op, command_size); | |
996 | ||
7c9e7a6f AG |
997 | /* All offsets relative to mb_addr, not start of entry! */ |
998 | cdb_off = CMDR_OFF + cmd_head + base_command_size; | |
999 | memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); | |
1000 | entry->req.cdb_off = cdb_off; | |
1001 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | |
1002 | ||
1003 | UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); | |
1004 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | |
7c9e7a6f AG |
1005 | |
1006 | /* TODO: only if FLUSH and FUA? */ | |
1007 | uio_event_notify(&udev->uio_info); | |
1008 | ||
6fd0ce79 | 1009 | return 0; |
af1dd7ff MC |
1010 | |
1011 | queue: | |
1012 | if (add_to_cmdr_queue(tcmu_cmd)) { | |
1013 | *scsi_err = TCM_OUT_OF_RESOURCES; | |
1014 | return -1; | |
1015 | } | |
1016 | ||
1017 | return 1; | |
7c9e7a6f AG |
1018 | } |
1019 | ||
02eb924f AG |
1020 | static sense_reason_t |
1021 | tcmu_queue_cmd(struct se_cmd *se_cmd) | |
7c9e7a6f | 1022 | { |
af1dd7ff MC |
1023 | struct se_device *se_dev = se_cmd->se_dev; |
1024 | struct tcmu_dev *udev = TCMU_DEV(se_dev); | |
7c9e7a6f | 1025 | struct tcmu_cmd *tcmu_cmd; |
6fd0ce79 | 1026 | sense_reason_t scsi_ret; |
af1dd7ff | 1027 | int ret; |
7c9e7a6f AG |
1028 | |
1029 | tcmu_cmd = tcmu_alloc_cmd(se_cmd); | |
1030 | if (!tcmu_cmd) | |
02eb924f | 1031 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
7c9e7a6f | 1032 | |
af1dd7ff MC |
1033 | mutex_lock(&udev->cmdr_lock); |
1034 | ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); | |
1035 | mutex_unlock(&udev->cmdr_lock); | |
1036 | if (ret < 0) | |
141685a3 | 1037 | tcmu_free_cmd(tcmu_cmd); |
6fd0ce79 | 1038 | return scsi_ret; |
7c9e7a6f AG |
1039 | } |
1040 | ||
1041 | static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) | |
1042 | { | |
1043 | struct se_cmd *se_cmd = cmd->se_cmd; | |
1044 | struct tcmu_dev *udev = cmd->tcmu_dev; | |
1045 | ||
141685a3 XL |
1046 | /* |
1047 | * cmd has been completed already from timeout, just reclaim | |
1048 | * data area space and free cmd | |
1049 | */ | |
1050 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) | |
1051 | goto out; | |
b25c7863 | 1052 | |
141685a3 | 1053 | tcmu_cmd_reset_dbi_cur(cmd); |
7c9e7a6f | 1054 | |
0ad46af8 | 1055 | if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { |
0ad46af8 AG |
1056 | pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", |
1057 | cmd->se_cmd); | |
ed97d0cd AG |
1058 | entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; |
1059 | } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { | |
406f74c2 | 1060 | transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); |
e4648b01 | 1061 | } else if (se_cmd->se_cmd_flags & SCF_BIDI) { |
26418649 | 1062 | /* Get Data-In buffer before clean up */ |
a5d68ba8 | 1063 | gather_data_area(udev, cmd, true); |
e4648b01 | 1064 | } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { |
a5d68ba8 | 1065 | gather_data_area(udev, cmd, false); |
7c9e7a6f | 1066 | } else if (se_cmd->data_direction == DMA_TO_DEVICE) { |
141685a3 | 1067 | /* TODO: */ |
2bc396a2 IT |
1068 | } else if (se_cmd->data_direction != DMA_NONE) { |
1069 | pr_warn("TCMU: data direction was %d!\n", | |
1070 | se_cmd->data_direction); | |
7c9e7a6f AG |
1071 | } |
1072 | ||
1073 | target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); | |
7c9e7a6f | 1074 | |
141685a3 XL |
1075 | out: |
1076 | cmd->se_cmd = NULL; | |
b6df4b79 | 1077 | tcmu_cmd_free_data(cmd, cmd->dbi_cnt); |
141685a3 | 1078 | tcmu_free_cmd(cmd); |
7c9e7a6f AG |
1079 | } |
1080 | ||
1081 | static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |
1082 | { | |
1083 | struct tcmu_mailbox *mb; | |
7c9e7a6f AG |
1084 | int handled = 0; |
1085 | ||
1086 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { | |
1087 | pr_err("ring broken, not handling completions\n"); | |
1088 | return 0; | |
1089 | } | |
1090 | ||
7c9e7a6f AG |
1091 | mb = udev->mb_addr; |
1092 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | |
1093 | ||
6aa7de05 | 1094 | while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { |
7c9e7a6f AG |
1095 | |
1096 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; | |
1097 | struct tcmu_cmd *cmd; | |
1098 | ||
1099 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | |
1100 | ||
0ad46af8 AG |
1101 | if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { |
1102 | UPDATE_HEAD(udev->cmdr_last_cleaned, | |
1103 | tcmu_hdr_get_len(entry->hdr.len_op), | |
1104 | udev->cmdr_size); | |
7c9e7a6f AG |
1105 | continue; |
1106 | } | |
0ad46af8 | 1107 | WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); |
7c9e7a6f | 1108 | |
d3e709e6 | 1109 | cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); |
7c9e7a6f | 1110 | if (!cmd) { |
88cf1073 MC |
1111 | pr_err("cmd_id %u not found, ring is broken\n", |
1112 | entry->hdr.cmd_id); | |
7c9e7a6f AG |
1113 | set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); |
1114 | break; | |
1115 | } | |
1116 | ||
1117 | tcmu_handle_completion(cmd, entry); | |
1118 | ||
0ad46af8 AG |
1119 | UPDATE_HEAD(udev->cmdr_last_cleaned, |
1120 | tcmu_hdr_get_len(entry->hdr.len_op), | |
1121 | udev->cmdr_size); | |
7c9e7a6f AG |
1122 | |
1123 | handled++; | |
1124 | } | |
1125 | ||
9103575a MC |
1126 | if (mb->cmd_tail == mb->cmd_head) { |
1127 | /* no more pending commands */ | |
1128 | del_timer(&udev->cmd_timer); | |
1129 | ||
1130 | if (list_empty(&udev->cmdr_queue)) { | |
1131 | /* | |
1132 | * no more pending or waiting commands so try to | |
1133 | * reclaim blocks if needed. | |
1134 | */ | |
1135 | if (atomic_read(&global_db_count) > | |
80eb8761 | 1136 | tcmu_global_max_blocks) |
9103575a MC |
1137 | schedule_delayed_work(&tcmu_unmap_work, 0); |
1138 | } | |
af1dd7ff | 1139 | } |
7c9e7a6f AG |
1140 | |
1141 | return handled; | |
1142 | } | |
1143 | ||
1144 | static int tcmu_check_expired_cmd(int id, void *p, void *data) | |
1145 | { | |
1146 | struct tcmu_cmd *cmd = p; | |
af1dd7ff MC |
1147 | struct tcmu_dev *udev = cmd->tcmu_dev; |
1148 | u8 scsi_status; | |
1149 | struct se_cmd *se_cmd; | |
1150 | bool is_running; | |
7c9e7a6f AG |
1151 | |
1152 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) | |
1153 | return 0; | |
1154 | ||
611e2267 | 1155 | if (!time_after(jiffies, cmd->deadline)) |
7c9e7a6f AG |
1156 | return 0; |
1157 | ||
af1dd7ff | 1158 | is_running = list_empty(&cmd->cmdr_queue_entry); |
45dc488c | 1159 | se_cmd = cmd->se_cmd; |
7c9e7a6f | 1160 | |
af1dd7ff | 1161 | if (is_running) { |
9103575a MC |
1162 | /* |
1163 | * If cmd_time_out is disabled but qfull is set deadline | |
1164 | * will only reflect the qfull timeout. Ignore it. | |
1165 | */ | |
1166 | if (!udev->cmd_time_out) | |
1167 | return 0; | |
1168 | ||
af1dd7ff MC |
1169 | set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); |
1170 | /* | |
1171 | * target_complete_cmd will translate this to LUN COMM FAILURE | |
1172 | */ | |
1173 | scsi_status = SAM_STAT_CHECK_CONDITION; | |
1174 | } else { | |
1175 | list_del_init(&cmd->cmdr_queue_entry); | |
1176 | ||
1177 | idr_remove(&udev->commands, id); | |
1178 | tcmu_free_cmd(cmd); | |
1179 | scsi_status = SAM_STAT_TASK_SET_FULL; | |
1180 | } | |
9103575a MC |
1181 | |
1182 | pr_debug("Timing out cmd %u on dev %s that is %s.\n", | |
1183 | id, udev->name, is_running ? "inflight" : "queued"); | |
1184 | ||
af1dd7ff | 1185 | target_complete_cmd(se_cmd, scsi_status); |
7c9e7a6f AG |
1186 | return 0; |
1187 | } | |
1188 | ||
9103575a | 1189 | static void tcmu_device_timedout(struct tcmu_dev *udev) |
7c9e7a6f | 1190 | { |
488ebe4c MC |
1191 | spin_lock(&timed_out_udevs_lock); |
1192 | if (list_empty(&udev->timedout_entry)) | |
1193 | list_add_tail(&udev->timedout_entry, &timed_out_udevs); | |
1194 | spin_unlock(&timed_out_udevs_lock); | |
b6df4b79 | 1195 | |
af1dd7ff | 1196 | schedule_delayed_work(&tcmu_unmap_work, 0); |
7c9e7a6f AG |
1197 | } |
1198 | ||
9103575a MC |
1199 | static void tcmu_cmd_timedout(struct timer_list *t) |
1200 | { | |
1201 | struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); | |
1202 | ||
1203 | pr_debug("%s cmd timeout has expired\n", udev->name); | |
1204 | tcmu_device_timedout(udev); | |
1205 | } | |
1206 | ||
1207 | static void tcmu_qfull_timedout(struct timer_list *t) | |
1208 | { | |
1209 | struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); | |
1210 | ||
1211 | pr_debug("%s qfull timeout has expired\n", udev->name); | |
1212 | tcmu_device_timedout(udev); | |
1213 | } | |
1214 | ||
7c9e7a6f AG |
1215 | static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) |
1216 | { | |
1217 | struct tcmu_hba *tcmu_hba; | |
1218 | ||
1219 | tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); | |
1220 | if (!tcmu_hba) | |
1221 | return -ENOMEM; | |
1222 | ||
1223 | tcmu_hba->host_id = host_id; | |
1224 | hba->hba_ptr = tcmu_hba; | |
1225 | ||
1226 | return 0; | |
1227 | } | |
1228 | ||
1229 | static void tcmu_detach_hba(struct se_hba *hba) | |
1230 | { | |
1231 | kfree(hba->hba_ptr); | |
1232 | hba->hba_ptr = NULL; | |
1233 | } | |
1234 | ||
1235 | static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |
1236 | { | |
1237 | struct tcmu_dev *udev; | |
1238 | ||
1239 | udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); | |
1240 | if (!udev) | |
1241 | return NULL; | |
f3cdbe39 | 1242 | kref_init(&udev->kref); |
7c9e7a6f AG |
1243 | |
1244 | udev->name = kstrdup(name, GFP_KERNEL); | |
1245 | if (!udev->name) { | |
1246 | kfree(udev); | |
1247 | return NULL; | |
1248 | } | |
1249 | ||
1250 | udev->hba = hba; | |
af980e46 | 1251 | udev->cmd_time_out = TCMU_TIME_OUT; |
9103575a | 1252 | udev->qfull_time_out = -1; |
7c9e7a6f | 1253 | |
80eb8761 | 1254 | udev->max_blocks = DATA_BLOCK_BITS_DEF; |
b6df4b79 | 1255 | mutex_init(&udev->cmdr_lock); |
7c9e7a6f | 1256 | |
488ebe4c | 1257 | INIT_LIST_HEAD(&udev->timedout_entry); |
af1dd7ff | 1258 | INIT_LIST_HEAD(&udev->cmdr_queue); |
7c9e7a6f | 1259 | idr_init(&udev->commands); |
7c9e7a6f | 1260 | |
9103575a MC |
1261 | timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); |
1262 | timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); | |
7c9e7a6f | 1263 | |
b3af66e2 MC |
1264 | init_waitqueue_head(&udev->nl_cmd_wq); |
1265 | spin_lock_init(&udev->nl_cmd_lock); | |
1266 | ||
c22adc0b XL |
1267 | INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL); |
1268 | ||
7c9e7a6f AG |
1269 | return &udev->se_dev; |
1270 | } | |
1271 | ||
892782ca | 1272 | static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) |
af1dd7ff MC |
1273 | { |
1274 | struct tcmu_cmd *tcmu_cmd, *tmp_cmd; | |
1275 | LIST_HEAD(cmds); | |
1276 | bool drained = true; | |
1277 | sense_reason_t scsi_ret; | |
1278 | int ret; | |
1279 | ||
1280 | if (list_empty(&udev->cmdr_queue)) | |
1281 | return true; | |
1282 | ||
892782ca | 1283 | pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); |
af1dd7ff MC |
1284 | |
1285 | list_splice_init(&udev->cmdr_queue, &cmds); | |
1286 | ||
1287 | list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { | |
1288 | list_del_init(&tcmu_cmd->cmdr_queue_entry); | |
1289 | ||
1290 | pr_debug("removing cmd %u on dev %s from queue\n", | |
1291 | tcmu_cmd->cmd_id, udev->name); | |
1292 | ||
892782ca MC |
1293 | if (fail) { |
1294 | idr_remove(&udev->commands, tcmu_cmd->cmd_id); | |
1295 | /* | |
1296 | * We were not able to even start the command, so | |
1297 | * fail with busy to allow a retry in case runner | |
1298 | * was only temporarily down. If the device is being | |
1299 | * removed then LIO core will do the right thing and | |
1300 | * fail the retry. | |
1301 | */ | |
1302 | target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); | |
1303 | tcmu_free_cmd(tcmu_cmd); | |
1304 | continue; | |
1305 | } | |
1306 | ||
af1dd7ff MC |
1307 | ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); |
1308 | if (ret < 0) { | |
1309 | pr_debug("cmd %u on dev %s failed with %u\n", | |
1310 | tcmu_cmd->cmd_id, udev->name, scsi_ret); | |
1311 | ||
1312 | idr_remove(&udev->commands, tcmu_cmd->cmd_id); | |
1313 | /* | |
1314 | * Ignore scsi_ret for now. target_complete_cmd | |
1315 | * drops it. | |
1316 | */ | |
1317 | target_complete_cmd(tcmu_cmd->se_cmd, | |
1318 | SAM_STAT_CHECK_CONDITION); | |
1319 | tcmu_free_cmd(tcmu_cmd); | |
1320 | } else if (ret > 0) { | |
1321 | pr_debug("ran out of space during cmdr queue run\n"); | |
1322 | /* | |
1323 | * cmd was requeued, so just put all cmds back in | |
1324 | * the queue | |
1325 | */ | |
1326 | list_splice_tail(&cmds, &udev->cmdr_queue); | |
1327 | drained = false; | |
1328 | goto done; | |
1329 | } | |
1330 | } | |
9103575a MC |
1331 | if (list_empty(&udev->cmdr_queue)) |
1332 | del_timer(&udev->qfull_timer); | |
af1dd7ff MC |
1333 | done: |
1334 | return drained; | |
1335 | } | |
1336 | ||
7c9e7a6f AG |
1337 | static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) |
1338 | { | |
af1dd7ff | 1339 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); |
7c9e7a6f | 1340 | |
af1dd7ff MC |
1341 | mutex_lock(&udev->cmdr_lock); |
1342 | tcmu_handle_completions(udev); | |
892782ca | 1343 | run_cmdr_queue(udev, false); |
af1dd7ff | 1344 | mutex_unlock(&udev->cmdr_lock); |
7c9e7a6f AG |
1345 | |
1346 | return 0; | |
1347 | } | |
1348 | ||
1349 | /* | |
1350 | * mmap code from uio.c. Copied here because we want to hook mmap() | |
1351 | * and this stuff must come along. | |
1352 | */ | |
1353 | static int tcmu_find_mem_index(struct vm_area_struct *vma) | |
1354 | { | |
1355 | struct tcmu_dev *udev = vma->vm_private_data; | |
1356 | struct uio_info *info = &udev->uio_info; | |
1357 | ||
1358 | if (vma->vm_pgoff < MAX_UIO_MAPS) { | |
1359 | if (info->mem[vma->vm_pgoff].size == 0) | |
1360 | return -1; | |
1361 | return (int)vma->vm_pgoff; | |
1362 | } | |
1363 | return -1; | |
1364 | } | |
1365 | ||
b6df4b79 XL |
1366 | static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) |
1367 | { | |
1368 | struct page *page; | |
b6df4b79 XL |
1369 | |
1370 | mutex_lock(&udev->cmdr_lock); | |
1371 | page = tcmu_get_block_page(udev, dbi); | |
1372 | if (likely(page)) { | |
1373 | mutex_unlock(&udev->cmdr_lock); | |
1374 | return page; | |
1375 | } | |
1376 | ||
1377 | /* | |
c1c390ba MC |
1378 | * Userspace messed up and passed in a address not in the |
1379 | * data iov passed to it. | |
b6df4b79 | 1380 | */ |
c1c390ba MC |
1381 | pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n", |
1382 | dbi, udev->name); | |
1383 | page = NULL; | |
b6df4b79 XL |
1384 | mutex_unlock(&udev->cmdr_lock); |
1385 | ||
1386 | return page; | |
1387 | } | |
1388 | ||
69589c9b | 1389 | static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) |
7c9e7a6f | 1390 | { |
11bac800 | 1391 | struct tcmu_dev *udev = vmf->vma->vm_private_data; |
7c9e7a6f AG |
1392 | struct uio_info *info = &udev->uio_info; |
1393 | struct page *page; | |
1394 | unsigned long offset; | |
1395 | void *addr; | |
1396 | ||
11bac800 | 1397 | int mi = tcmu_find_mem_index(vmf->vma); |
7c9e7a6f AG |
1398 | if (mi < 0) |
1399 | return VM_FAULT_SIGBUS; | |
1400 | ||
1401 | /* | |
1402 | * We need to subtract mi because userspace uses offset = N*PAGE_SIZE | |
1403 | * to use mem[N]. | |
1404 | */ | |
1405 | offset = (vmf->pgoff - mi) << PAGE_SHIFT; | |
1406 | ||
141685a3 XL |
1407 | if (offset < udev->data_off) { |
1408 | /* For the vmalloc()ed cmd area pages */ | |
1409 | addr = (void *)(unsigned long)info->mem[mi].addr + offset; | |
7c9e7a6f | 1410 | page = vmalloc_to_page(addr); |
141685a3 | 1411 | } else { |
141685a3 XL |
1412 | uint32_t dbi; |
1413 | ||
b6df4b79 | 1414 | /* For the dynamically growing data area pages */ |
141685a3 | 1415 | dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE; |
b6df4b79 XL |
1416 | page = tcmu_try_get_block_page(udev, dbi); |
1417 | if (!page) | |
c1c390ba | 1418 | return VM_FAULT_SIGBUS; |
141685a3 XL |
1419 | } |
1420 | ||
7c9e7a6f AG |
1421 | get_page(page); |
1422 | vmf->page = page; | |
1423 | return 0; | |
1424 | } | |
1425 | ||
1426 | static const struct vm_operations_struct tcmu_vm_ops = { | |
1427 | .fault = tcmu_vma_fault, | |
1428 | }; | |
1429 | ||
1430 | static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) | |
1431 | { | |
1432 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); | |
1433 | ||
1434 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; | |
1435 | vma->vm_ops = &tcmu_vm_ops; | |
1436 | ||
1437 | vma->vm_private_data = udev; | |
1438 | ||
1439 | /* Ensure the mmap is exactly the right size */ | |
80eb8761 | 1440 | if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT)) |
7c9e7a6f AG |
1441 | return -EINVAL; |
1442 | ||
1443 | return 0; | |
1444 | } | |
1445 | ||
1446 | static int tcmu_open(struct uio_info *info, struct inode *inode) | |
1447 | { | |
1448 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); | |
1449 | ||
1450 | /* O_EXCL not supported for char devs, so fake it? */ | |
1451 | if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) | |
1452 | return -EBUSY; | |
1453 | ||
b6df4b79 | 1454 | udev->inode = inode; |
9260695d | 1455 | kref_get(&udev->kref); |
b6df4b79 | 1456 | |
7c9e7a6f AG |
1457 | pr_debug("open\n"); |
1458 | ||
1459 | return 0; | |
1460 | } | |
1461 | ||
f3cdbe39 MC |
1462 | static void tcmu_dev_call_rcu(struct rcu_head *p) |
1463 | { | |
1464 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | |
1465 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
1466 | ||
1467 | kfree(udev->uio_info.name); | |
1468 | kfree(udev->name); | |
1469 | kfree(udev); | |
1470 | } | |
1471 | ||
c22adc0b XL |
1472 | static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) |
1473 | { | |
1474 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { | |
1475 | kmem_cache_free(tcmu_cmd_cache, cmd); | |
1476 | return 0; | |
1477 | } | |
1478 | return -EINVAL; | |
1479 | } | |
1480 | ||
bf99ec13 MC |
1481 | static void tcmu_blocks_release(struct radix_tree_root *blocks, |
1482 | int start, int end) | |
c22adc0b XL |
1483 | { |
1484 | int i; | |
1485 | struct page *page; | |
1486 | ||
bf99ec13 MC |
1487 | for (i = start; i < end; i++) { |
1488 | page = radix_tree_delete(blocks, i); | |
c22adc0b XL |
1489 | if (page) { |
1490 | __free_page(page); | |
1491 | atomic_dec(&global_db_count); | |
1492 | } | |
1493 | } | |
c22adc0b XL |
1494 | } |
1495 | ||
f3cdbe39 MC |
1496 | static void tcmu_dev_kref_release(struct kref *kref) |
1497 | { | |
1498 | struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); | |
1499 | struct se_device *dev = &udev->se_dev; | |
c22adc0b XL |
1500 | struct tcmu_cmd *cmd; |
1501 | bool all_expired = true; | |
1502 | int i; | |
1503 | ||
1504 | vfree(udev->mb_addr); | |
1505 | udev->mb_addr = NULL; | |
1506 | ||
488ebe4c MC |
1507 | spin_lock_bh(&timed_out_udevs_lock); |
1508 | if (!list_empty(&udev->timedout_entry)) | |
1509 | list_del(&udev->timedout_entry); | |
1510 | spin_unlock_bh(&timed_out_udevs_lock); | |
1511 | ||
c22adc0b | 1512 | /* Upper layer should drain all requests before calling this */ |
6fddcb77 | 1513 | mutex_lock(&udev->cmdr_lock); |
c22adc0b XL |
1514 | idr_for_each_entry(&udev->commands, cmd, i) { |
1515 | if (tcmu_check_and_free_pending_cmd(cmd) != 0) | |
1516 | all_expired = false; | |
1517 | } | |
1518 | idr_destroy(&udev->commands); | |
c22adc0b XL |
1519 | WARN_ON(!all_expired); |
1520 | ||
bf99ec13 | 1521 | tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1); |
80eb8761 | 1522 | kfree(udev->data_bitmap); |
bf99ec13 | 1523 | mutex_unlock(&udev->cmdr_lock); |
f3cdbe39 MC |
1524 | |
1525 | call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); | |
1526 | } | |
1527 | ||
7c9e7a6f AG |
1528 | static int tcmu_release(struct uio_info *info, struct inode *inode) |
1529 | { | |
1530 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); | |
1531 | ||
1532 | clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); | |
1533 | ||
1534 | pr_debug("close\n"); | |
9260695d | 1535 | /* release ref from open */ |
f3cdbe39 | 1536 | kref_put(&udev->kref, tcmu_dev_kref_release); |
7c9e7a6f AG |
1537 | return 0; |
1538 | } | |
1539 | ||
b3af66e2 MC |
1540 | static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) |
1541 | { | |
1542 | struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; | |
1543 | ||
1544 | if (!tcmu_kern_cmd_reply_supported) | |
1545 | return; | |
b849b456 KN |
1546 | |
1547 | if (udev->nl_reply_supported <= 0) | |
1548 | return; | |
1549 | ||
b3af66e2 MC |
1550 | relock: |
1551 | spin_lock(&udev->nl_cmd_lock); | |
1552 | ||
1553 | if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { | |
1554 | spin_unlock(&udev->nl_cmd_lock); | |
1555 | pr_debug("sleeping for open nl cmd\n"); | |
1556 | wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC)); | |
1557 | goto relock; | |
1558 | } | |
1559 | ||
1560 | memset(nl_cmd, 0, sizeof(*nl_cmd)); | |
1561 | nl_cmd->cmd = cmd; | |
1562 | init_completion(&nl_cmd->complete); | |
1563 | ||
1564 | spin_unlock(&udev->nl_cmd_lock); | |
1565 | } | |
1566 | ||
1567 | static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) | |
1568 | { | |
1569 | struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; | |
1570 | int ret; | |
1571 | DEFINE_WAIT(__wait); | |
1572 | ||
1573 | if (!tcmu_kern_cmd_reply_supported) | |
1574 | return 0; | |
1575 | ||
b849b456 KN |
1576 | if (udev->nl_reply_supported <= 0) |
1577 | return 0; | |
1578 | ||
b3af66e2 MC |
1579 | pr_debug("sleeping for nl reply\n"); |
1580 | wait_for_completion(&nl_cmd->complete); | |
1581 | ||
1582 | spin_lock(&udev->nl_cmd_lock); | |
1583 | nl_cmd->cmd = TCMU_CMD_UNSPEC; | |
1584 | ret = nl_cmd->status; | |
1585 | nl_cmd->status = 0; | |
1586 | spin_unlock(&udev->nl_cmd_lock); | |
1587 | ||
1588 | wake_up_all(&udev->nl_cmd_wq); | |
1589 | ||
85fae482 | 1590 | return ret; |
b3af66e2 MC |
1591 | } |
1592 | ||
0e5aee39 ZL |
1593 | static int tcmu_netlink_event_init(struct tcmu_dev *udev, |
1594 | enum tcmu_genl_cmd cmd, | |
1595 | struct sk_buff **buf, void **hdr) | |
7c9e7a6f AG |
1596 | { |
1597 | struct sk_buff *skb; | |
1598 | void *msg_header; | |
6e14eab9 | 1599 | int ret = -ENOMEM; |
7c9e7a6f AG |
1600 | |
1601 | skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | |
1602 | if (!skb) | |
6e14eab9 | 1603 | return ret; |
7c9e7a6f AG |
1604 | |
1605 | msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); | |
6e14eab9 NB |
1606 | if (!msg_header) |
1607 | goto free_skb; | |
7c9e7a6f | 1608 | |
b3af66e2 | 1609 | ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); |
6e14eab9 NB |
1610 | if (ret < 0) |
1611 | goto free_skb; | |
7c9e7a6f | 1612 | |
b3af66e2 MC |
1613 | ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); |
1614 | if (ret < 0) | |
1615 | goto free_skb; | |
1616 | ||
1617 | ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); | |
6e14eab9 NB |
1618 | if (ret < 0) |
1619 | goto free_skb; | |
7c9e7a6f | 1620 | |
0e5aee39 ZL |
1621 | *buf = skb; |
1622 | *hdr = msg_header; | |
1623 | return ret; | |
2d76443e | 1624 | |
0e5aee39 ZL |
1625 | free_skb: |
1626 | nlmsg_free(skb); | |
1627 | return ret; | |
1628 | } | |
1629 | ||
1630 | static int tcmu_netlink_event_send(struct tcmu_dev *udev, | |
1631 | enum tcmu_genl_cmd cmd, | |
1632 | struct sk_buff **buf, void **hdr) | |
1633 | { | |
1634 | int ret = 0; | |
1635 | struct sk_buff *skb = *buf; | |
1636 | void *msg_header = *hdr; | |
8a45885c | 1637 | |
053c095a | 1638 | genlmsg_end(skb, msg_header); |
7c9e7a6f | 1639 | |
b3af66e2 MC |
1640 | tcmu_init_genl_cmd_reply(udev, cmd); |
1641 | ||
20c08b36 | 1642 | ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, |
0e5aee39 ZL |
1643 | TCMU_MCGRP_CONFIG, GFP_KERNEL); |
1644 | /* We don't care if no one is listening */ | |
7c9e7a6f AG |
1645 | if (ret == -ESRCH) |
1646 | ret = 0; | |
b3af66e2 MC |
1647 | if (!ret) |
1648 | ret = tcmu_wait_genl_cmd_reply(udev); | |
6e14eab9 | 1649 | return ret; |
7c9e7a6f AG |
1650 | } |
1651 | ||
e0c240ac ZL |
1652 | static int tcmu_send_dev_add_event(struct tcmu_dev *udev) |
1653 | { | |
1654 | struct sk_buff *skb = NULL; | |
1655 | void *msg_header = NULL; | |
1656 | int ret = 0; | |
1657 | ||
1658 | ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb, | |
1659 | &msg_header); | |
1660 | if (ret < 0) | |
1661 | return ret; | |
1662 | return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, &skb, | |
1663 | &msg_header); | |
1664 | ||
1665 | } | |
1666 | ||
f892bd8e ZL |
1667 | static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) |
1668 | { | |
1669 | struct sk_buff *skb = NULL; | |
1670 | void *msg_header = NULL; | |
1671 | int ret = 0; | |
1672 | ||
1673 | ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE, | |
1674 | &skb, &msg_header); | |
1675 | if (ret < 0) | |
1676 | return ret; | |
1677 | return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, | |
1678 | &skb, &msg_header); | |
1679 | } | |
1680 | ||
de8c5221 | 1681 | static int tcmu_update_uio_info(struct tcmu_dev *udev) |
7c9e7a6f | 1682 | { |
7c9e7a6f AG |
1683 | struct tcmu_hba *hba = udev->hba->hba_ptr; |
1684 | struct uio_info *info; | |
de8c5221 | 1685 | size_t size, used; |
7c9e7a6f AG |
1686 | char *str; |
1687 | ||
1688 | info = &udev->uio_info; | |
7c9e7a6f AG |
1689 | size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, |
1690 | udev->dev_config); | |
1691 | size += 1; /* for \0 */ | |
1692 | str = kmalloc(size, GFP_KERNEL); | |
1693 | if (!str) | |
1694 | return -ENOMEM; | |
1695 | ||
1696 | used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); | |
7c9e7a6f AG |
1697 | if (udev->dev_config[0]) |
1698 | snprintf(str + used, size - used, "/%s", udev->dev_config); | |
1699 | ||
ededd039 BL |
1700 | /* If the old string exists, free it */ |
1701 | kfree(info->name); | |
7c9e7a6f AG |
1702 | info->name = str; |
1703 | ||
de8c5221 BL |
1704 | return 0; |
1705 | } | |
1706 | ||
1707 | static int tcmu_configure_device(struct se_device *dev) | |
1708 | { | |
1709 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
1710 | struct uio_info *info; | |
1711 | struct tcmu_mailbox *mb; | |
1712 | int ret = 0; | |
1713 | ||
1714 | ret = tcmu_update_uio_info(udev); | |
1715 | if (ret) | |
1716 | return ret; | |
1717 | ||
1718 | info = &udev->uio_info; | |
1719 | ||
6396bb22 KC |
1720 | udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks), |
1721 | sizeof(unsigned long), | |
1722 | GFP_KERNEL); | |
a24e7917 WY |
1723 | if (!udev->data_bitmap) { |
1724 | ret = -ENOMEM; | |
80eb8761 | 1725 | goto err_bitmap_alloc; |
a24e7917 | 1726 | } |
80eb8761 | 1727 | |
141685a3 | 1728 | udev->mb_addr = vzalloc(CMDR_SIZE); |
7c9e7a6f AG |
1729 | if (!udev->mb_addr) { |
1730 | ret = -ENOMEM; | |
1731 | goto err_vzalloc; | |
1732 | } | |
1733 | ||
1734 | /* mailbox fits in first part of CMDR space */ | |
1735 | udev->cmdr_size = CMDR_SIZE - CMDR_OFF; | |
1736 | udev->data_off = CMDR_SIZE; | |
80eb8761 | 1737 | udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE; |
b6df4b79 | 1738 | udev->dbi_thresh = 0; /* Default in Idle state */ |
7c9e7a6f | 1739 | |
141685a3 | 1740 | /* Initialise the mailbox of the ring buffer */ |
7c9e7a6f | 1741 | mb = udev->mb_addr; |
0ad46af8 | 1742 | mb->version = TCMU_MAILBOX_VERSION; |
32c76de3 | 1743 | mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC; |
7c9e7a6f AG |
1744 | mb->cmdr_off = CMDR_OFF; |
1745 | mb->cmdr_size = udev->cmdr_size; | |
1746 | ||
1747 | WARN_ON(!PAGE_ALIGNED(udev->data_off)); | |
1748 | WARN_ON(udev->data_size % PAGE_SIZE); | |
26418649 | 1749 | WARN_ON(udev->data_size % DATA_BLOCK_SIZE); |
7c9e7a6f | 1750 | |
ac64a2ce | 1751 | info->version = __stringify(TCMU_MAILBOX_VERSION); |
7c9e7a6f AG |
1752 | |
1753 | info->mem[0].name = "tcm-user command & data buffer"; | |
0633e123 | 1754 | info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; |
80eb8761 | 1755 | info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE; |
141685a3 | 1756 | info->mem[0].memtype = UIO_MEM_NONE; |
7c9e7a6f AG |
1757 | |
1758 | info->irqcontrol = tcmu_irqcontrol; | |
1759 | info->irq = UIO_IRQ_CUSTOM; | |
1760 | ||
1761 | info->mmap = tcmu_mmap; | |
1762 | info->open = tcmu_open; | |
1763 | info->release = tcmu_release; | |
1764 | ||
1765 | ret = uio_register_device(tcmu_root_device, info); | |
1766 | if (ret) | |
1767 | goto err_register; | |
1768 | ||
81ee28de SY |
1769 | /* User can set hw_block_size before enable the device */ |
1770 | if (dev->dev_attrib.hw_block_size == 0) | |
1771 | dev->dev_attrib.hw_block_size = 512; | |
7c9e7a6f | 1772 | /* Other attributes can be configured in userspace */ |
3abaa2bf MC |
1773 | if (!dev->dev_attrib.hw_max_sectors) |
1774 | dev->dev_attrib.hw_max_sectors = 128; | |
9a8bb606 BL |
1775 | if (!dev->dev_attrib.emulate_write_cache) |
1776 | dev->dev_attrib.emulate_write_cache = 0; | |
7c9e7a6f AG |
1777 | dev->dev_attrib.hw_queue_depth = 128; |
1778 | ||
b849b456 KN |
1779 | /* If user didn't explicitly disable netlink reply support, use |
1780 | * module scope setting. | |
1781 | */ | |
1782 | if (udev->nl_reply_supported >= 0) | |
1783 | udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; | |
1784 | ||
f3cdbe39 MC |
1785 | /* |
1786 | * Get a ref incase userspace does a close on the uio device before | |
1787 | * LIO has initiated tcmu_free_device. | |
1788 | */ | |
1789 | kref_get(&udev->kref); | |
1790 | ||
e0c240ac | 1791 | ret = tcmu_send_dev_add_event(udev); |
7c9e7a6f AG |
1792 | if (ret) |
1793 | goto err_netlink; | |
1794 | ||
b6df4b79 XL |
1795 | mutex_lock(&root_udev_mutex); |
1796 | list_add(&udev->node, &root_udev); | |
1797 | mutex_unlock(&root_udev_mutex); | |
1798 | ||
7c9e7a6f AG |
1799 | return 0; |
1800 | ||
1801 | err_netlink: | |
f3cdbe39 | 1802 | kref_put(&udev->kref, tcmu_dev_kref_release); |
7c9e7a6f AG |
1803 | uio_unregister_device(&udev->uio_info); |
1804 | err_register: | |
1805 | vfree(udev->mb_addr); | |
c22adc0b | 1806 | udev->mb_addr = NULL; |
7c9e7a6f | 1807 | err_vzalloc: |
80eb8761 MC |
1808 | kfree(udev->data_bitmap); |
1809 | udev->data_bitmap = NULL; | |
1810 | err_bitmap_alloc: | |
7c9e7a6f | 1811 | kfree(info->name); |
f3cdbe39 | 1812 | info->name = NULL; |
7c9e7a6f AG |
1813 | |
1814 | return ret; | |
1815 | } | |
1816 | ||
972c7f16 MC |
1817 | static bool tcmu_dev_configured(struct tcmu_dev *udev) |
1818 | { | |
1819 | return udev->uio_info.uio_dev ? true : false; | |
1820 | } | |
1821 | ||
7c9e7a6f | 1822 | static void tcmu_free_device(struct se_device *dev) |
92634706 MC |
1823 | { |
1824 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
1825 | ||
1826 | /* release ref from init */ | |
1827 | kref_put(&udev->kref, tcmu_dev_kref_release); | |
1828 | } | |
1829 | ||
1830 | static void tcmu_destroy_device(struct se_device *dev) | |
7c9e7a6f AG |
1831 | { |
1832 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
7c9e7a6f | 1833 | |
9103575a MC |
1834 | del_timer_sync(&udev->cmd_timer); |
1835 | del_timer_sync(&udev->qfull_timer); | |
7c9e7a6f | 1836 | |
b6df4b79 XL |
1837 | mutex_lock(&root_udev_mutex); |
1838 | list_del(&udev->node); | |
1839 | mutex_unlock(&root_udev_mutex); | |
1840 | ||
f892bd8e | 1841 | tcmu_send_dev_remove_event(udev); |
7c9e7a6f | 1842 | |
531283ff | 1843 | uio_unregister_device(&udev->uio_info); |
9260695d MC |
1844 | |
1845 | /* release ref from configure */ | |
1846 | kref_put(&udev->kref, tcmu_dev_kref_release); | |
7c9e7a6f AG |
1847 | } |
1848 | ||
892782ca MC |
1849 | static void tcmu_unblock_dev(struct tcmu_dev *udev) |
1850 | { | |
1851 | mutex_lock(&udev->cmdr_lock); | |
1852 | clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); | |
1853 | mutex_unlock(&udev->cmdr_lock); | |
1854 | } | |
1855 | ||
1856 | static void tcmu_block_dev(struct tcmu_dev *udev) | |
1857 | { | |
1858 | mutex_lock(&udev->cmdr_lock); | |
1859 | ||
1860 | if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) | |
1861 | goto unlock; | |
1862 | ||
1863 | /* complete IO that has executed successfully */ | |
1864 | tcmu_handle_completions(udev); | |
1865 | /* fail IO waiting to be queued */ | |
1866 | run_cmdr_queue(udev, true); | |
1867 | ||
1868 | unlock: | |
1869 | mutex_unlock(&udev->cmdr_lock); | |
1870 | } | |
1871 | ||
1872 | static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) | |
1873 | { | |
1874 | struct tcmu_mailbox *mb; | |
1875 | struct tcmu_cmd *cmd; | |
1876 | int i; | |
1877 | ||
1878 | mutex_lock(&udev->cmdr_lock); | |
1879 | ||
1880 | idr_for_each_entry(&udev->commands, cmd, i) { | |
1881 | if (!list_empty(&cmd->cmdr_queue_entry)) | |
1882 | continue; | |
1883 | ||
1884 | pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", | |
1885 | cmd->cmd_id, udev->name, | |
1886 | test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); | |
1887 | ||
1888 | idr_remove(&udev->commands, i); | |
1889 | if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { | |
1890 | if (err_level == 1) { | |
1891 | /* | |
1892 | * Userspace was not able to start the | |
1893 | * command or it is retryable. | |
1894 | */ | |
1895 | target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY); | |
1896 | } else { | |
1897 | /* hard failure */ | |
1898 | target_complete_cmd(cmd->se_cmd, | |
1899 | SAM_STAT_CHECK_CONDITION); | |
1900 | } | |
1901 | } | |
1902 | tcmu_cmd_free_data(cmd, cmd->dbi_cnt); | |
1903 | tcmu_free_cmd(cmd); | |
1904 | } | |
1905 | ||
1906 | mb = udev->mb_addr; | |
1907 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | |
1908 | pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, | |
1909 | mb->cmd_tail, mb->cmd_head); | |
1910 | ||
1911 | udev->cmdr_last_cleaned = 0; | |
1912 | mb->cmd_tail = 0; | |
1913 | mb->cmd_head = 0; | |
1914 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | |
1915 | ||
1916 | del_timer(&udev->cmd_timer); | |
1917 | ||
1918 | mutex_unlock(&udev->cmdr_lock); | |
1919 | } | |
1920 | ||
7c9e7a6f | 1921 | enum { |
3abaa2bf | 1922 | Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, |
80eb8761 | 1923 | Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err, |
7c9e7a6f AG |
1924 | }; |
1925 | ||
1926 | static match_table_t tokens = { | |
1927 | {Opt_dev_config, "dev_config=%s"}, | |
1928 | {Opt_dev_size, "dev_size=%u"}, | |
9c1cd1b6 | 1929 | {Opt_hw_block_size, "hw_block_size=%u"}, |
3abaa2bf | 1930 | {Opt_hw_max_sectors, "hw_max_sectors=%u"}, |
b849b456 | 1931 | {Opt_nl_reply_supported, "nl_reply_supported=%d"}, |
80eb8761 | 1932 | {Opt_max_data_area_mb, "max_data_area_mb=%u"}, |
7c9e7a6f AG |
1933 | {Opt_err, NULL} |
1934 | }; | |
1935 | ||
3abaa2bf MC |
1936 | static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) |
1937 | { | |
1938 | unsigned long tmp_ul; | |
1939 | char *arg_p; | |
1940 | int ret; | |
1941 | ||
1942 | arg_p = match_strdup(arg); | |
1943 | if (!arg_p) | |
1944 | return -ENOMEM; | |
1945 | ||
1946 | ret = kstrtoul(arg_p, 0, &tmp_ul); | |
1947 | kfree(arg_p); | |
1948 | if (ret < 0) { | |
1949 | pr_err("kstrtoul() failed for dev attrib\n"); | |
1950 | return ret; | |
1951 | } | |
1952 | if (!tmp_ul) { | |
1953 | pr_err("dev attrib must be nonzero\n"); | |
1954 | return -EINVAL; | |
1955 | } | |
1956 | *dev_attrib = tmp_ul; | |
1957 | return 0; | |
1958 | } | |
1959 | ||
7c9e7a6f AG |
1960 | static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, |
1961 | const char *page, ssize_t count) | |
1962 | { | |
1963 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
1964 | char *orig, *ptr, *opts, *arg_p; | |
1965 | substring_t args[MAX_OPT_ARGS]; | |
80eb8761 | 1966 | int ret = 0, token, tmpval; |
7c9e7a6f AG |
1967 | |
1968 | opts = kstrdup(page, GFP_KERNEL); | |
1969 | if (!opts) | |
1970 | return -ENOMEM; | |
1971 | ||
1972 | orig = opts; | |
1973 | ||
1974 | while ((ptr = strsep(&opts, ",\n")) != NULL) { | |
1975 | if (!*ptr) | |
1976 | continue; | |
1977 | ||
1978 | token = match_token(ptr, tokens, args); | |
1979 | switch (token) { | |
1980 | case Opt_dev_config: | |
1981 | if (match_strlcpy(udev->dev_config, &args[0], | |
1982 | TCMU_CONFIG_LEN) == 0) { | |
1983 | ret = -EINVAL; | |
1984 | break; | |
1985 | } | |
1986 | pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); | |
1987 | break; | |
1988 | case Opt_dev_size: | |
1989 | arg_p = match_strdup(&args[0]); | |
1990 | if (!arg_p) { | |
1991 | ret = -ENOMEM; | |
1992 | break; | |
1993 | } | |
1994 | ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size); | |
1995 | kfree(arg_p); | |
1996 | if (ret < 0) | |
1997 | pr_err("kstrtoul() failed for dev_size=\n"); | |
1998 | break; | |
9c1cd1b6 | 1999 | case Opt_hw_block_size: |
3abaa2bf MC |
2000 | ret = tcmu_set_dev_attrib(&args[0], |
2001 | &(dev->dev_attrib.hw_block_size)); | |
2002 | break; | |
2003 | case Opt_hw_max_sectors: | |
2004 | ret = tcmu_set_dev_attrib(&args[0], | |
2005 | &(dev->dev_attrib.hw_max_sectors)); | |
9c1cd1b6 | 2006 | break; |
b849b456 KN |
2007 | case Opt_nl_reply_supported: |
2008 | arg_p = match_strdup(&args[0]); | |
2009 | if (!arg_p) { | |
2010 | ret = -ENOMEM; | |
2011 | break; | |
2012 | } | |
16b93277 | 2013 | ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported); |
b849b456 KN |
2014 | kfree(arg_p); |
2015 | if (ret < 0) | |
16b93277 | 2016 | pr_err("kstrtoint() failed for nl_reply_supported=\n"); |
b849b456 | 2017 | break; |
80eb8761 MC |
2018 | case Opt_max_data_area_mb: |
2019 | if (dev->export_count) { | |
2020 | pr_err("Unable to set max_data_area_mb while exports exist\n"); | |
2021 | ret = -EINVAL; | |
2022 | break; | |
2023 | } | |
2024 | ||
2025 | arg_p = match_strdup(&args[0]); | |
2026 | if (!arg_p) { | |
2027 | ret = -ENOMEM; | |
2028 | break; | |
2029 | } | |
2030 | ret = kstrtoint(arg_p, 0, &tmpval); | |
2031 | kfree(arg_p); | |
2032 | if (ret < 0) { | |
2033 | pr_err("kstrtoint() failed for max_data_area_mb=\n"); | |
2034 | break; | |
2035 | } | |
2036 | ||
2037 | if (tmpval <= 0) { | |
2038 | pr_err("Invalid max_data_area %d\n", tmpval); | |
2039 | ret = -EINVAL; | |
2040 | break; | |
2041 | } | |
2042 | ||
2043 | udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval); | |
2044 | if (udev->max_blocks > tcmu_global_max_blocks) { | |
2045 | pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", | |
2046 | tmpval, | |
2047 | TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); | |
2048 | udev->max_blocks = tcmu_global_max_blocks; | |
2049 | } | |
2050 | break; | |
7c9e7a6f AG |
2051 | default: |
2052 | break; | |
2053 | } | |
2579325c MC |
2054 | |
2055 | if (ret) | |
2056 | break; | |
7c9e7a6f AG |
2057 | } |
2058 | ||
2059 | kfree(orig); | |
2060 | return (!ret) ? count : ret; | |
2061 | } | |
2062 | ||
2063 | static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) | |
2064 | { | |
2065 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
2066 | ssize_t bl = 0; | |
2067 | ||
2068 | bl = sprintf(b + bl, "Config: %s ", | |
2069 | udev->dev_config[0] ? udev->dev_config : "NULL"); | |
80eb8761 MC |
2070 | bl += sprintf(b + bl, "Size: %zu ", udev->dev_size); |
2071 | bl += sprintf(b + bl, "MaxDataAreaMB: %u\n", | |
2072 | TCMU_BLOCKS_TO_MBS(udev->max_blocks)); | |
7c9e7a6f AG |
2073 | |
2074 | return bl; | |
2075 | } | |
2076 | ||
2077 | static sector_t tcmu_get_blocks(struct se_device *dev) | |
2078 | { | |
2079 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
2080 | ||
2081 | return div_u64(udev->dev_size - dev->dev_attrib.block_size, | |
2082 | dev->dev_attrib.block_size); | |
2083 | } | |
2084 | ||
7c9e7a6f | 2085 | static sense_reason_t |
9c1cd1b6 | 2086 | tcmu_parse_cdb(struct se_cmd *cmd) |
7c9e7a6f | 2087 | { |
02eb924f | 2088 | return passthrough_parse_cdb(cmd, tcmu_queue_cmd); |
7c9e7a6f AG |
2089 | } |
2090 | ||
7d7a7435 NB |
2091 | static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) |
2092 | { | |
2093 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2094 | struct se_dev_attrib, da_group); | |
b5ab697c | 2095 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); |
7d7a7435 NB |
2096 | |
2097 | return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); | |
2098 | } | |
2099 | ||
2100 | static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, | |
2101 | size_t count) | |
2102 | { | |
2103 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2104 | struct se_dev_attrib, da_group); | |
2105 | struct tcmu_dev *udev = container_of(da->da_dev, | |
2106 | struct tcmu_dev, se_dev); | |
2107 | u32 val; | |
2108 | int ret; | |
2109 | ||
2110 | if (da->da_dev->export_count) { | |
2111 | pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); | |
2112 | return -EINVAL; | |
2113 | } | |
2114 | ||
2115 | ret = kstrtou32(page, 0, &val); | |
2116 | if (ret < 0) | |
2117 | return ret; | |
2118 | ||
7d7a7435 NB |
2119 | udev->cmd_time_out = val * MSEC_PER_SEC; |
2120 | return count; | |
2121 | } | |
2122 | CONFIGFS_ATTR(tcmu_, cmd_time_out); | |
2123 | ||
9103575a MC |
2124 | static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page) |
2125 | { | |
2126 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2127 | struct se_dev_attrib, da_group); | |
2128 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
2129 | ||
2130 | return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? | |
2131 | udev->qfull_time_out : | |
2132 | udev->qfull_time_out / MSEC_PER_SEC); | |
2133 | } | |
2134 | ||
2135 | static ssize_t tcmu_qfull_time_out_store(struct config_item *item, | |
2136 | const char *page, size_t count) | |
2137 | { | |
2138 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2139 | struct se_dev_attrib, da_group); | |
2140 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
2141 | s32 val; | |
2142 | int ret; | |
2143 | ||
2144 | ret = kstrtos32(page, 0, &val); | |
2145 | if (ret < 0) | |
2146 | return ret; | |
2147 | ||
2148 | if (val >= 0) { | |
2149 | udev->qfull_time_out = val * MSEC_PER_SEC; | |
125966db PKK |
2150 | } else if (val == -1) { |
2151 | udev->qfull_time_out = val; | |
9103575a MC |
2152 | } else { |
2153 | printk(KERN_ERR "Invalid qfull timeout value %d\n", val); | |
2154 | return -EINVAL; | |
2155 | } | |
2156 | return count; | |
2157 | } | |
2158 | CONFIGFS_ATTR(tcmu_, qfull_time_out); | |
2159 | ||
80eb8761 MC |
2160 | static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page) |
2161 | { | |
2162 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2163 | struct se_dev_attrib, da_group); | |
2164 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
2165 | ||
2166 | return snprintf(page, PAGE_SIZE, "%u\n", | |
2167 | TCMU_BLOCKS_TO_MBS(udev->max_blocks)); | |
2168 | } | |
2169 | CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb); | |
2170 | ||
2d76443e | 2171 | static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) |
ee018252 BL |
2172 | { |
2173 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2174 | struct se_dev_attrib, da_group); | |
2175 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
2176 | ||
2177 | return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); | |
2178 | } | |
2179 | ||
02ccfb54 ZL |
2180 | static int tcmu_send_dev_config_event(struct tcmu_dev *udev, |
2181 | const char *reconfig_data) | |
2182 | { | |
2183 | struct sk_buff *skb = NULL; | |
2184 | void *msg_header = NULL; | |
2185 | int ret = 0; | |
2186 | ||
2187 | ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, | |
2188 | &skb, &msg_header); | |
2189 | if (ret < 0) | |
2190 | return ret; | |
2191 | ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data); | |
2192 | if (ret < 0) { | |
2193 | nlmsg_free(skb); | |
2194 | return ret; | |
2195 | } | |
2196 | return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, | |
2197 | &skb, &msg_header); | |
2198 | } | |
2199 | ||
2200 | ||
2d76443e MC |
2201 | static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, |
2202 | size_t count) | |
ee018252 BL |
2203 | { |
2204 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2205 | struct se_dev_attrib, da_group); | |
2206 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
2d76443e | 2207 | int ret, len; |
ee018252 | 2208 | |
2d76443e MC |
2209 | len = strlen(page); |
2210 | if (!len || len > TCMU_CONFIG_LEN - 1) | |
ee018252 | 2211 | return -EINVAL; |
ee018252 BL |
2212 | |
2213 | /* Check if device has been configured before */ | |
2214 | if (tcmu_dev_configured(udev)) { | |
02ccfb54 | 2215 | ret = tcmu_send_dev_config_event(udev, page); |
ee018252 BL |
2216 | if (ret) { |
2217 | pr_err("Unable to reconfigure device\n"); | |
2218 | return ret; | |
2219 | } | |
de8c5221 BL |
2220 | strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); |
2221 | ||
2222 | ret = tcmu_update_uio_info(udev); | |
2223 | if (ret) | |
2224 | return ret; | |
2225 | return count; | |
ee018252 | 2226 | } |
2d76443e | 2227 | strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); |
ee018252 BL |
2228 | |
2229 | return count; | |
2230 | } | |
2d76443e | 2231 | CONFIGFS_ATTR(tcmu_, dev_config); |
ee018252 | 2232 | |
801fc54d BL |
2233 | static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) |
2234 | { | |
2235 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2236 | struct se_dev_attrib, da_group); | |
2237 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
2238 | ||
2239 | return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size); | |
2240 | } | |
2241 | ||
84e28506 ZL |
2242 | static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) |
2243 | { | |
2244 | struct sk_buff *skb = NULL; | |
2245 | void *msg_header = NULL; | |
2246 | int ret = 0; | |
2247 | ||
2248 | ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, | |
2249 | &skb, &msg_header); | |
2250 | if (ret < 0) | |
2251 | return ret; | |
2252 | ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE, | |
2253 | size, TCMU_ATTR_PAD); | |
2254 | if (ret < 0) { | |
2255 | nlmsg_free(skb); | |
2256 | return ret; | |
2257 | } | |
2258 | return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, | |
2259 | &skb, &msg_header); | |
2260 | } | |
2261 | ||
801fc54d BL |
2262 | static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, |
2263 | size_t count) | |
2264 | { | |
2265 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2266 | struct se_dev_attrib, da_group); | |
2267 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
2d76443e | 2268 | u64 val; |
801fc54d BL |
2269 | int ret; |
2270 | ||
2d76443e | 2271 | ret = kstrtou64(page, 0, &val); |
801fc54d BL |
2272 | if (ret < 0) |
2273 | return ret; | |
801fc54d BL |
2274 | |
2275 | /* Check if device has been configured before */ | |
2276 | if (tcmu_dev_configured(udev)) { | |
84e28506 | 2277 | ret = tcmu_send_dev_size_event(udev, val); |
801fc54d BL |
2278 | if (ret) { |
2279 | pr_err("Unable to reconfigure device\n"); | |
2280 | return ret; | |
2281 | } | |
2282 | } | |
2d76443e | 2283 | udev->dev_size = val; |
801fc54d BL |
2284 | return count; |
2285 | } | |
2286 | CONFIGFS_ATTR(tcmu_, dev_size); | |
2287 | ||
b849b456 KN |
2288 | static ssize_t tcmu_nl_reply_supported_show(struct config_item *item, |
2289 | char *page) | |
2290 | { | |
2291 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2292 | struct se_dev_attrib, da_group); | |
2293 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
2294 | ||
2295 | return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); | |
2296 | } | |
2297 | ||
2298 | static ssize_t tcmu_nl_reply_supported_store(struct config_item *item, | |
2299 | const char *page, size_t count) | |
2300 | { | |
2301 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2302 | struct se_dev_attrib, da_group); | |
2303 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
2304 | s8 val; | |
2305 | int ret; | |
2306 | ||
2307 | ret = kstrtos8(page, 0, &val); | |
2308 | if (ret < 0) | |
2309 | return ret; | |
2310 | ||
2311 | udev->nl_reply_supported = val; | |
2312 | return count; | |
2313 | } | |
2314 | CONFIGFS_ATTR(tcmu_, nl_reply_supported); | |
2315 | ||
9a8bb606 BL |
2316 | static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, |
2317 | char *page) | |
2318 | { | |
2319 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2320 | struct se_dev_attrib, da_group); | |
2321 | ||
2322 | return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); | |
2323 | } | |
2324 | ||
33d065cc ZL |
2325 | static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) |
2326 | { | |
2327 | struct sk_buff *skb = NULL; | |
2328 | void *msg_header = NULL; | |
2329 | int ret = 0; | |
2330 | ||
2331 | ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, | |
2332 | &skb, &msg_header); | |
2333 | if (ret < 0) | |
2334 | return ret; | |
2335 | ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val); | |
2336 | if (ret < 0) { | |
2337 | nlmsg_free(skb); | |
2338 | return ret; | |
2339 | } | |
2340 | return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, | |
2341 | &skb, &msg_header); | |
2342 | } | |
2343 | ||
9a8bb606 BL |
2344 | static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, |
2345 | const char *page, size_t count) | |
2346 | { | |
2347 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2348 | struct se_dev_attrib, da_group); | |
1068be7b | 2349 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); |
2d76443e | 2350 | u8 val; |
9a8bb606 BL |
2351 | int ret; |
2352 | ||
2d76443e | 2353 | ret = kstrtou8(page, 0, &val); |
9a8bb606 BL |
2354 | if (ret < 0) |
2355 | return ret; | |
2356 | ||
1068be7b BL |
2357 | /* Check if device has been configured before */ |
2358 | if (tcmu_dev_configured(udev)) { | |
33d065cc | 2359 | ret = tcmu_send_emulate_write_cache(udev, val); |
1068be7b BL |
2360 | if (ret) { |
2361 | pr_err("Unable to reconfigure device\n"); | |
2362 | return ret; | |
2363 | } | |
2364 | } | |
2d76443e MC |
2365 | |
2366 | da->emulate_write_cache = val; | |
9a8bb606 BL |
2367 | return count; |
2368 | } | |
2369 | CONFIGFS_ATTR(tcmu_, emulate_write_cache); | |
2370 | ||
892782ca MC |
2371 | static ssize_t tcmu_block_dev_show(struct config_item *item, char *page) |
2372 | { | |
2373 | struct se_device *se_dev = container_of(to_config_group(item), | |
2374 | struct se_device, | |
2375 | dev_action_group); | |
2376 | struct tcmu_dev *udev = TCMU_DEV(se_dev); | |
2377 | ||
2378 | if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) | |
2379 | return snprintf(page, PAGE_SIZE, "%s\n", "blocked"); | |
2380 | else | |
2381 | return snprintf(page, PAGE_SIZE, "%s\n", "unblocked"); | |
2382 | } | |
2383 | ||
2384 | static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page, | |
2385 | size_t count) | |
2386 | { | |
2387 | struct se_device *se_dev = container_of(to_config_group(item), | |
2388 | struct se_device, | |
2389 | dev_action_group); | |
2390 | struct tcmu_dev *udev = TCMU_DEV(se_dev); | |
2391 | u8 val; | |
2392 | int ret; | |
2393 | ||
2394 | ret = kstrtou8(page, 0, &val); | |
2395 | if (ret < 0) | |
2396 | return ret; | |
2397 | ||
2398 | if (val > 1) { | |
2399 | pr_err("Invalid block value %d\n", val); | |
2400 | return -EINVAL; | |
2401 | } | |
2402 | ||
2403 | if (!val) | |
2404 | tcmu_unblock_dev(udev); | |
2405 | else | |
2406 | tcmu_block_dev(udev); | |
2407 | return count; | |
2408 | } | |
2409 | CONFIGFS_ATTR(tcmu_, block_dev); | |
2410 | ||
2411 | static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, | |
2412 | size_t count) | |
2413 | { | |
2414 | struct se_device *se_dev = container_of(to_config_group(item), | |
2415 | struct se_device, | |
2416 | dev_action_group); | |
2417 | struct tcmu_dev *udev = TCMU_DEV(se_dev); | |
2418 | u8 val; | |
2419 | int ret; | |
2420 | ||
2421 | ret = kstrtou8(page, 0, &val); | |
2422 | if (ret < 0) | |
2423 | return ret; | |
2424 | ||
2425 | if (val != 1 && val != 2) { | |
2426 | pr_err("Invalid reset ring value %d\n", val); | |
2427 | return -EINVAL; | |
2428 | } | |
2429 | ||
2430 | tcmu_reset_ring(udev, val); | |
2431 | return count; | |
2432 | } | |
2433 | CONFIGFS_ATTR_WO(tcmu_, reset_ring); | |
2434 | ||
5821783b | 2435 | static struct configfs_attribute *tcmu_attrib_attrs[] = { |
801fc54d | 2436 | &tcmu_attr_cmd_time_out, |
9103575a | 2437 | &tcmu_attr_qfull_time_out, |
80eb8761 | 2438 | &tcmu_attr_max_data_area_mb, |
2d76443e | 2439 | &tcmu_attr_dev_config, |
801fc54d BL |
2440 | &tcmu_attr_dev_size, |
2441 | &tcmu_attr_emulate_write_cache, | |
b849b456 | 2442 | &tcmu_attr_nl_reply_supported, |
801fc54d BL |
2443 | NULL, |
2444 | }; | |
2445 | ||
7d7a7435 NB |
2446 | static struct configfs_attribute **tcmu_attrs; |
2447 | ||
892782ca MC |
2448 | static struct configfs_attribute *tcmu_action_attrs[] = { |
2449 | &tcmu_attr_block_dev, | |
2450 | &tcmu_attr_reset_ring, | |
2451 | NULL, | |
2452 | }; | |
2453 | ||
7d7a7435 | 2454 | static struct target_backend_ops tcmu_ops = { |
7c9e7a6f | 2455 | .name = "user", |
7c9e7a6f | 2456 | .owner = THIS_MODULE, |
a3541703 | 2457 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, |
7c9e7a6f AG |
2458 | .attach_hba = tcmu_attach_hba, |
2459 | .detach_hba = tcmu_detach_hba, | |
2460 | .alloc_device = tcmu_alloc_device, | |
2461 | .configure_device = tcmu_configure_device, | |
92634706 | 2462 | .destroy_device = tcmu_destroy_device, |
7c9e7a6f AG |
2463 | .free_device = tcmu_free_device, |
2464 | .parse_cdb = tcmu_parse_cdb, | |
2465 | .set_configfs_dev_params = tcmu_set_configfs_dev_params, | |
2466 | .show_configfs_dev_params = tcmu_show_configfs_dev_params, | |
2467 | .get_device_type = sbc_get_device_type, | |
2468 | .get_blocks = tcmu_get_blocks, | |
892782ca | 2469 | .tb_dev_action_attrs = tcmu_action_attrs, |
7c9e7a6f AG |
2470 | }; |
2471 | ||
89ec9cfd | 2472 | static void find_free_blocks(void) |
b6df4b79 XL |
2473 | { |
2474 | struct tcmu_dev *udev; | |
2475 | loff_t off; | |
af1dd7ff MC |
2476 | u32 start, end, block, total_freed = 0; |
2477 | ||
80eb8761 | 2478 | if (atomic_read(&global_db_count) <= tcmu_global_max_blocks) |
af1dd7ff | 2479 | return; |
b6df4b79 | 2480 | |
89ec9cfd MC |
2481 | mutex_lock(&root_udev_mutex); |
2482 | list_for_each_entry(udev, &root_udev, node) { | |
2483 | mutex_lock(&udev->cmdr_lock); | |
b6df4b79 | 2484 | |
89ec9cfd MC |
2485 | /* Try to complete the finished commands first */ |
2486 | tcmu_handle_completions(udev); | |
d906d8af | 2487 | |
af1dd7ff MC |
2488 | /* Skip the udevs in idle */ |
2489 | if (!udev->dbi_thresh) { | |
89ec9cfd MC |
2490 | mutex_unlock(&udev->cmdr_lock); |
2491 | continue; | |
2492 | } | |
b6df4b79 | 2493 | |
89ec9cfd MC |
2494 | end = udev->dbi_max + 1; |
2495 | block = find_last_bit(udev->data_bitmap, end); | |
2496 | if (block == udev->dbi_max) { | |
2497 | /* | |
af1dd7ff MC |
2498 | * The last bit is dbi_max, so it is not possible |
2499 | * reclaim any blocks. | |
89ec9cfd MC |
2500 | */ |
2501 | mutex_unlock(&udev->cmdr_lock); | |
2502 | continue; | |
2503 | } else if (block == end) { | |
2504 | /* The current udev will goto idle state */ | |
2505 | udev->dbi_thresh = start = 0; | |
2506 | udev->dbi_max = 0; | |
2507 | } else { | |
2508 | udev->dbi_thresh = start = block + 1; | |
2509 | udev->dbi_max = block; | |
2510 | } | |
b6df4b79 | 2511 | |
89ec9cfd MC |
2512 | /* Here will truncate the data area from off */ |
2513 | off = udev->data_off + start * DATA_BLOCK_SIZE; | |
2514 | unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); | |
b6df4b79 | 2515 | |
89ec9cfd MC |
2516 | /* Release the block pages */ |
2517 | tcmu_blocks_release(&udev->data_blocks, start, end); | |
2518 | mutex_unlock(&udev->cmdr_lock); | |
b6df4b79 | 2519 | |
af1dd7ff MC |
2520 | total_freed += end - start; |
2521 | pr_debug("Freed %u blocks (total %u) from %s.\n", end - start, | |
2522 | total_freed, udev->name); | |
89ec9cfd MC |
2523 | } |
2524 | mutex_unlock(&root_udev_mutex); | |
af1dd7ff | 2525 | |
80eb8761 | 2526 | if (atomic_read(&global_db_count) > tcmu_global_max_blocks) |
af1dd7ff | 2527 | schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000)); |
89ec9cfd MC |
2528 | } |
2529 | ||
488ebe4c MC |
2530 | static void check_timedout_devices(void) |
2531 | { | |
2532 | struct tcmu_dev *udev, *tmp_dev; | |
2533 | LIST_HEAD(devs); | |
2534 | ||
2535 | spin_lock_bh(&timed_out_udevs_lock); | |
2536 | list_splice_init(&timed_out_udevs, &devs); | |
2537 | ||
2538 | list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { | |
2539 | list_del_init(&udev->timedout_entry); | |
2540 | spin_unlock_bh(&timed_out_udevs_lock); | |
2541 | ||
6fddcb77 | 2542 | mutex_lock(&udev->cmdr_lock); |
488ebe4c | 2543 | idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); |
6fddcb77 | 2544 | mutex_unlock(&udev->cmdr_lock); |
488ebe4c MC |
2545 | |
2546 | spin_lock_bh(&timed_out_udevs_lock); | |
2547 | } | |
2548 | ||
2549 | spin_unlock_bh(&timed_out_udevs_lock); | |
2550 | } | |
2551 | ||
9972cebb | 2552 | static void tcmu_unmap_work_fn(struct work_struct *work) |
89ec9cfd | 2553 | { |
488ebe4c | 2554 | check_timedout_devices(); |
9972cebb | 2555 | find_free_blocks(); |
b6df4b79 XL |
2556 | } |
2557 | ||
7c9e7a6f AG |
2558 | static int __init tcmu_module_init(void) |
2559 | { | |
801fc54d | 2560 | int ret, i, k, len = 0; |
7c9e7a6f AG |
2561 | |
2562 | BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); | |
2563 | ||
af1dd7ff | 2564 | INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn); |
9972cebb | 2565 | |
7c9e7a6f AG |
2566 | tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", |
2567 | sizeof(struct tcmu_cmd), | |
2568 | __alignof__(struct tcmu_cmd), | |
2569 | 0, NULL); | |
2570 | if (!tcmu_cmd_cache) | |
2571 | return -ENOMEM; | |
2572 | ||
2573 | tcmu_root_device = root_device_register("tcm_user"); | |
2574 | if (IS_ERR(tcmu_root_device)) { | |
2575 | ret = PTR_ERR(tcmu_root_device); | |
2576 | goto out_free_cache; | |
2577 | } | |
2578 | ||
2579 | ret = genl_register_family(&tcmu_genl_family); | |
2580 | if (ret < 0) { | |
2581 | goto out_unreg_device; | |
2582 | } | |
2583 | ||
7d7a7435 NB |
2584 | for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { |
2585 | len += sizeof(struct configfs_attribute *); | |
2586 | } | |
801fc54d BL |
2587 | for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) { |
2588 | len += sizeof(struct configfs_attribute *); | |
2589 | } | |
2590 | len += sizeof(struct configfs_attribute *); | |
7d7a7435 NB |
2591 | |
2592 | tcmu_attrs = kzalloc(len, GFP_KERNEL); | |
2593 | if (!tcmu_attrs) { | |
2594 | ret = -ENOMEM; | |
2595 | goto out_unreg_genl; | |
2596 | } | |
2597 | ||
2598 | for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { | |
2599 | tcmu_attrs[i] = passthrough_attrib_attrs[i]; | |
2600 | } | |
801fc54d BL |
2601 | for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) { |
2602 | tcmu_attrs[i] = tcmu_attrib_attrs[k]; | |
2603 | i++; | |
2604 | } | |
7d7a7435 NB |
2605 | tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; |
2606 | ||
0a06d430 | 2607 | ret = transport_backend_register(&tcmu_ops); |
7c9e7a6f | 2608 | if (ret) |
7d7a7435 | 2609 | goto out_attrs; |
7c9e7a6f AG |
2610 | |
2611 | return 0; | |
2612 | ||
7d7a7435 NB |
2613 | out_attrs: |
2614 | kfree(tcmu_attrs); | |
7c9e7a6f AG |
2615 | out_unreg_genl: |
2616 | genl_unregister_family(&tcmu_genl_family); | |
2617 | out_unreg_device: | |
2618 | root_device_unregister(tcmu_root_device); | |
2619 | out_free_cache: | |
2620 | kmem_cache_destroy(tcmu_cmd_cache); | |
2621 | ||
2622 | return ret; | |
2623 | } | |
2624 | ||
2625 | static void __exit tcmu_module_exit(void) | |
2626 | { | |
af1dd7ff | 2627 | cancel_delayed_work_sync(&tcmu_unmap_work); |
0a06d430 | 2628 | target_backend_unregister(&tcmu_ops); |
7d7a7435 | 2629 | kfree(tcmu_attrs); |
7c9e7a6f AG |
2630 | genl_unregister_family(&tcmu_genl_family); |
2631 | root_device_unregister(tcmu_root_device); | |
2632 | kmem_cache_destroy(tcmu_cmd_cache); | |
2633 | } | |
2634 | ||
2635 | MODULE_DESCRIPTION("TCM USER subsystem plugin"); | |
2636 | MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); | |
2637 | MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); | |
2638 | MODULE_LICENSE("GPL"); | |
2639 | ||
2640 | module_init(tcmu_module_init); | |
2641 | module_exit(tcmu_module_exit); |