Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-2.6-block.git] / drivers / target / target_core_device.c
1 /*******************************************************************************
2  * Filename:  target_core_device.c (based on iscsi_target_device.c)
3  *
4  * This file contains the TCM Virtual Device and Disk Transport
5  * agnostic related functions.
6  *
7  * (c) Copyright 2003-2013 Datera, Inc.
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24  *
25  ******************************************************************************/
26
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
34 #include <linux/in.h>
35 #include <linux/export.h>
36 #include <linux/t10-pi.h>
37 #include <asm/unaligned.h>
38 #include <net/sock.h>
39 #include <net/tcp.h>
40 #include <scsi/scsi_common.h>
41 #include <scsi/scsi_proto.h>
42
43 #include <target/target_core_base.h>
44 #include <target/target_core_backend.h>
45 #include <target/target_core_fabric.h>
46
47 #include "target_core_internal.h"
48 #include "target_core_alua.h"
49 #include "target_core_pr.h"
50 #include "target_core_ua.h"
51
52 static DEFINE_MUTEX(device_mutex);
53 static LIST_HEAD(device_list);
54 static DEFINE_IDR(devices_idr);
55
56 static struct se_hba *lun0_hba;
57 /* not static, needed by tpg.c */
58 struct se_device *g_lun0_dev;
59
60 sense_reason_t
61 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
62 {
63         struct se_lun *se_lun = NULL;
64         struct se_session *se_sess = se_cmd->se_sess;
65         struct se_node_acl *nacl = se_sess->se_node_acl;
66         struct se_dev_entry *deve;
67         sense_reason_t ret = TCM_NO_SENSE;
68
69         rcu_read_lock();
70         deve = target_nacl_find_deve(nacl, unpacked_lun);
71         if (deve) {
72                 atomic_long_inc(&deve->total_cmds);
73
74                 if (se_cmd->data_direction == DMA_TO_DEVICE)
75                         atomic_long_add(se_cmd->data_length,
76                                         &deve->write_bytes);
77                 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
78                         atomic_long_add(se_cmd->data_length,
79                                         &deve->read_bytes);
80
81                 se_lun = rcu_dereference(deve->se_lun);
82
83                 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
84                         se_lun = NULL;
85                         goto out_unlock;
86                 }
87
88                 se_cmd->se_lun = rcu_dereference(deve->se_lun);
89                 se_cmd->pr_res_key = deve->pr_res_key;
90                 se_cmd->orig_fe_lun = unpacked_lun;
91                 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
92                 se_cmd->lun_ref_active = true;
93
94                 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
95                     deve->lun_access_ro) {
96                         pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
97                                 " Access for 0x%08llx\n",
98                                 se_cmd->se_tfo->fabric_name,
99                                 unpacked_lun);
100                         rcu_read_unlock();
101                         ret = TCM_WRITE_PROTECTED;
102                         goto ref_dev;
103                 }
104         }
105 out_unlock:
106         rcu_read_unlock();
107
108         if (!se_lun) {
109                 /*
110                  * Use the se_portal_group->tpg_virt_lun0 to allow for
111                  * REPORT_LUNS, et al to be returned when no active
112                  * MappedLUN=0 exists for this Initiator Port.
113                  */
114                 if (unpacked_lun != 0) {
115                         pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
116                                 " Access for 0x%08llx\n",
117                                 se_cmd->se_tfo->fabric_name,
118                                 unpacked_lun);
119                         return TCM_NON_EXISTENT_LUN;
120                 }
121
122                 se_lun = se_sess->se_tpg->tpg_virt_lun0;
123                 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
124                 se_cmd->orig_fe_lun = 0;
125                 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
126
127                 percpu_ref_get(&se_lun->lun_ref);
128                 se_cmd->lun_ref_active = true;
129
130                 /*
131                  * Force WRITE PROTECT for virtual LUN 0
132                  */
133                 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
134                     (se_cmd->data_direction != DMA_NONE)) {
135                         ret = TCM_WRITE_PROTECTED;
136                         goto ref_dev;
137                 }
138         }
139         /*
140          * RCU reference protected by percpu se_lun->lun_ref taken above that
141          * must drop to zero (including initial reference) before this se_lun
142          * pointer can be kfree_rcu() by the final se_lun->lun_group put via
143          * target_core_fabric_configfs.c:target_fabric_port_release
144          */
145 ref_dev:
146         se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
147         atomic_long_inc(&se_cmd->se_dev->num_cmds);
148
149         if (se_cmd->data_direction == DMA_TO_DEVICE)
150                 atomic_long_add(se_cmd->data_length,
151                                 &se_cmd->se_dev->write_bytes);
152         else if (se_cmd->data_direction == DMA_FROM_DEVICE)
153                 atomic_long_add(se_cmd->data_length,
154                                 &se_cmd->se_dev->read_bytes);
155
156         return ret;
157 }
158 EXPORT_SYMBOL(transport_lookup_cmd_lun);
159
160 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
161 {
162         struct se_dev_entry *deve;
163         struct se_lun *se_lun = NULL;
164         struct se_session *se_sess = se_cmd->se_sess;
165         struct se_node_acl *nacl = se_sess->se_node_acl;
166         struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
167         unsigned long flags;
168
169         rcu_read_lock();
170         deve = target_nacl_find_deve(nacl, unpacked_lun);
171         if (deve) {
172                 se_lun = rcu_dereference(deve->se_lun);
173
174                 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
175                         se_lun = NULL;
176                         goto out_unlock;
177                 }
178
179                 se_cmd->se_lun = rcu_dereference(deve->se_lun);
180                 se_cmd->pr_res_key = deve->pr_res_key;
181                 se_cmd->orig_fe_lun = unpacked_lun;
182                 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
183                 se_cmd->lun_ref_active = true;
184         }
185 out_unlock:
186         rcu_read_unlock();
187
188         if (!se_lun) {
189                 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
190                         " Access for 0x%08llx\n",
191                         se_cmd->se_tfo->fabric_name,
192                         unpacked_lun);
193                 return -ENODEV;
194         }
195         se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
196         se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
197
198         spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
199         list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
200         spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
201
202         return 0;
203 }
204 EXPORT_SYMBOL(transport_lookup_tmr_lun);
205
206 bool target_lun_is_rdonly(struct se_cmd *cmd)
207 {
208         struct se_session *se_sess = cmd->se_sess;
209         struct se_dev_entry *deve;
210         bool ret;
211
212         rcu_read_lock();
213         deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
214         ret = deve && deve->lun_access_ro;
215         rcu_read_unlock();
216
217         return ret;
218 }
219 EXPORT_SYMBOL(target_lun_is_rdonly);
220
221 /*
222  * This function is called from core_scsi3_emulate_pro_register_and_move()
223  * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
224  * when a matching rtpi is found.
225  */
226 struct se_dev_entry *core_get_se_deve_from_rtpi(
227         struct se_node_acl *nacl,
228         u16 rtpi)
229 {
230         struct se_dev_entry *deve;
231         struct se_lun *lun;
232         struct se_portal_group *tpg = nacl->se_tpg;
233
234         rcu_read_lock();
235         hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
236                 lun = rcu_dereference(deve->se_lun);
237                 if (!lun) {
238                         pr_err("%s device entries device pointer is"
239                                 " NULL, but Initiator has access.\n",
240                                 tpg->se_tpg_tfo->fabric_name);
241                         continue;
242                 }
243                 if (lun->lun_rtpi != rtpi)
244                         continue;
245
246                 kref_get(&deve->pr_kref);
247                 rcu_read_unlock();
248
249                 return deve;
250         }
251         rcu_read_unlock();
252
253         return NULL;
254 }
255
256 void core_free_device_list_for_node(
257         struct se_node_acl *nacl,
258         struct se_portal_group *tpg)
259 {
260         struct se_dev_entry *deve;
261
262         mutex_lock(&nacl->lun_entry_mutex);
263         hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
264                 struct se_lun *lun = rcu_dereference_check(deve->se_lun,
265                                         lockdep_is_held(&nacl->lun_entry_mutex));
266                 core_disable_device_list_for_node(lun, deve, nacl, tpg);
267         }
268         mutex_unlock(&nacl->lun_entry_mutex);
269 }
270
271 void core_update_device_list_access(
272         u64 mapped_lun,
273         bool lun_access_ro,
274         struct se_node_acl *nacl)
275 {
276         struct se_dev_entry *deve;
277
278         mutex_lock(&nacl->lun_entry_mutex);
279         deve = target_nacl_find_deve(nacl, mapped_lun);
280         if (deve)
281                 deve->lun_access_ro = lun_access_ro;
282         mutex_unlock(&nacl->lun_entry_mutex);
283 }
284
285 /*
286  * Called with rcu_read_lock or nacl->device_list_lock held.
287  */
288 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
289 {
290         struct se_dev_entry *deve;
291
292         hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
293                 if (deve->mapped_lun == mapped_lun)
294                         return deve;
295
296         return NULL;
297 }
298 EXPORT_SYMBOL(target_nacl_find_deve);
299
300 void target_pr_kref_release(struct kref *kref)
301 {
302         struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
303                                                  pr_kref);
304         complete(&deve->pr_comp);
305 }
306
307 static void
308 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
309                              bool skip_new)
310 {
311         struct se_dev_entry *tmp;
312
313         rcu_read_lock();
314         hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
315                 if (skip_new && tmp == new)
316                         continue;
317                 core_scsi3_ua_allocate(tmp, 0x3F,
318                                        ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
319         }
320         rcu_read_unlock();
321 }
322
323 int core_enable_device_list_for_node(
324         struct se_lun *lun,
325         struct se_lun_acl *lun_acl,
326         u64 mapped_lun,
327         bool lun_access_ro,
328         struct se_node_acl *nacl,
329         struct se_portal_group *tpg)
330 {
331         struct se_dev_entry *orig, *new;
332
333         new = kzalloc(sizeof(*new), GFP_KERNEL);
334         if (!new) {
335                 pr_err("Unable to allocate se_dev_entry memory\n");
336                 return -ENOMEM;
337         }
338
339         spin_lock_init(&new->ua_lock);
340         INIT_LIST_HEAD(&new->ua_list);
341         INIT_LIST_HEAD(&new->lun_link);
342
343         new->mapped_lun = mapped_lun;
344         kref_init(&new->pr_kref);
345         init_completion(&new->pr_comp);
346
347         new->lun_access_ro = lun_access_ro;
348         new->creation_time = get_jiffies_64();
349         new->attach_count++;
350
351         mutex_lock(&nacl->lun_entry_mutex);
352         orig = target_nacl_find_deve(nacl, mapped_lun);
353         if (orig && orig->se_lun) {
354                 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
355                                         lockdep_is_held(&nacl->lun_entry_mutex));
356
357                 if (orig_lun != lun) {
358                         pr_err("Existing orig->se_lun doesn't match new lun"
359                                " for dynamic -> explicit NodeACL conversion:"
360                                 " %s\n", nacl->initiatorname);
361                         mutex_unlock(&nacl->lun_entry_mutex);
362                         kfree(new);
363                         return -EINVAL;
364                 }
365                 if (orig->se_lun_acl != NULL) {
366                         pr_warn_ratelimited("Detected existing explicit"
367                                 " se_lun_acl->se_lun_group reference for %s"
368                                 " mapped_lun: %llu, failing\n",
369                                  nacl->initiatorname, mapped_lun);
370                         mutex_unlock(&nacl->lun_entry_mutex);
371                         kfree(new);
372                         return -EINVAL;
373                 }
374
375                 rcu_assign_pointer(new->se_lun, lun);
376                 rcu_assign_pointer(new->se_lun_acl, lun_acl);
377                 hlist_del_rcu(&orig->link);
378                 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
379                 mutex_unlock(&nacl->lun_entry_mutex);
380
381                 spin_lock(&lun->lun_deve_lock);
382                 list_del(&orig->lun_link);
383                 list_add_tail(&new->lun_link, &lun->lun_deve_list);
384                 spin_unlock(&lun->lun_deve_lock);
385
386                 kref_put(&orig->pr_kref, target_pr_kref_release);
387                 wait_for_completion(&orig->pr_comp);
388
389                 target_luns_data_has_changed(nacl, new, true);
390                 kfree_rcu(orig, rcu_head);
391                 return 0;
392         }
393
394         rcu_assign_pointer(new->se_lun, lun);
395         rcu_assign_pointer(new->se_lun_acl, lun_acl);
396         hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
397         mutex_unlock(&nacl->lun_entry_mutex);
398
399         spin_lock(&lun->lun_deve_lock);
400         list_add_tail(&new->lun_link, &lun->lun_deve_list);
401         spin_unlock(&lun->lun_deve_lock);
402
403         target_luns_data_has_changed(nacl, new, true);
404         return 0;
405 }
406
407 /*
408  *      Called with se_node_acl->lun_entry_mutex held.
409  */
410 void core_disable_device_list_for_node(
411         struct se_lun *lun,
412         struct se_dev_entry *orig,
413         struct se_node_acl *nacl,
414         struct se_portal_group *tpg)
415 {
416         /*
417          * rcu_dereference_raw protected by se_lun->lun_group symlink
418          * reference to se_device->dev_group.
419          */
420         struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
421         /*
422          * If the MappedLUN entry is being disabled, the entry in
423          * lun->lun_deve_list must be removed now before clearing the
424          * struct se_dev_entry pointers below as logic in
425          * core_alua_do_transition_tg_pt() depends on these being present.
426          *
427          * deve->se_lun_acl will be NULL for demo-mode created LUNs
428          * that have not been explicitly converted to MappedLUNs ->
429          * struct se_lun_acl, but we remove deve->lun_link from
430          * lun->lun_deve_list. This also means that active UAs and
431          * NodeACL context specific PR metadata for demo-mode
432          * MappedLUN *deve will be released below..
433          */
434         spin_lock(&lun->lun_deve_lock);
435         list_del(&orig->lun_link);
436         spin_unlock(&lun->lun_deve_lock);
437         /*
438          * Disable struct se_dev_entry LUN ACL mapping
439          */
440         core_scsi3_ua_release_all(orig);
441
442         hlist_del_rcu(&orig->link);
443         clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
444         orig->lun_access_ro = false;
445         orig->creation_time = 0;
446         orig->attach_count--;
447         /*
448          * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
449          * or REGISTER_AND_MOVE PR operation to complete.
450          */
451         kref_put(&orig->pr_kref, target_pr_kref_release);
452         wait_for_completion(&orig->pr_comp);
453
454         rcu_assign_pointer(orig->se_lun, NULL);
455         rcu_assign_pointer(orig->se_lun_acl, NULL);
456
457         kfree_rcu(orig, rcu_head);
458
459         core_scsi3_free_pr_reg_from_nacl(dev, nacl);
460         target_luns_data_has_changed(nacl, NULL, false);
461 }
462
463 /*      core_clear_lun_from_tpg():
464  *
465  *
466  */
467 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
468 {
469         struct se_node_acl *nacl;
470         struct se_dev_entry *deve;
471
472         mutex_lock(&tpg->acl_node_mutex);
473         list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
474
475                 mutex_lock(&nacl->lun_entry_mutex);
476                 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
477                         struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
478                                         lockdep_is_held(&nacl->lun_entry_mutex));
479
480                         if (lun != tmp_lun)
481                                 continue;
482
483                         core_disable_device_list_for_node(lun, deve, nacl, tpg);
484                 }
485                 mutex_unlock(&nacl->lun_entry_mutex);
486         }
487         mutex_unlock(&tpg->acl_node_mutex);
488 }
489
490 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
491 {
492         struct se_lun *tmp;
493
494         spin_lock(&dev->se_port_lock);
495         if (dev->export_count == 0x0000ffff) {
496                 pr_warn("Reached dev->dev_port_count =="
497                                 " 0x0000ffff\n");
498                 spin_unlock(&dev->se_port_lock);
499                 return -ENOSPC;
500         }
501 again:
502         /*
503          * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
504          * Here is the table from spc4r17 section 7.7.3.8.
505          *
506          *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
507          *
508          * Code      Description
509          * 0h        Reserved
510          * 1h        Relative port 1, historically known as port A
511          * 2h        Relative port 2, historically known as port B
512          * 3h to FFFFh    Relative port 3 through 65 535
513          */
514         lun->lun_rtpi = dev->dev_rpti_counter++;
515         if (!lun->lun_rtpi)
516                 goto again;
517
518         list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
519                 /*
520                  * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
521                  * for 16-bit wrap..
522                  */
523                 if (lun->lun_rtpi == tmp->lun_rtpi)
524                         goto again;
525         }
526         spin_unlock(&dev->se_port_lock);
527
528         return 0;
529 }
530
531 static void se_release_vpd_for_dev(struct se_device *dev)
532 {
533         struct t10_vpd *vpd, *vpd_tmp;
534
535         spin_lock(&dev->t10_wwn.t10_vpd_lock);
536         list_for_each_entry_safe(vpd, vpd_tmp,
537                         &dev->t10_wwn.t10_vpd_list, vpd_list) {
538                 list_del(&vpd->vpd_list);
539                 kfree(vpd);
540         }
541         spin_unlock(&dev->t10_wwn.t10_vpd_lock);
542 }
543
544 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
545 {
546         u32 aligned_max_sectors;
547         u32 alignment;
548         /*
549          * Limit max_sectors to a PAGE_SIZE aligned value for modern
550          * transport_allocate_data_tasks() operation.
551          */
552         alignment = max(1ul, PAGE_SIZE / block_size);
553         aligned_max_sectors = rounddown(max_sectors, alignment);
554
555         if (max_sectors != aligned_max_sectors)
556                 pr_info("Rounding down aligned max_sectors from %u to %u\n",
557                         max_sectors, aligned_max_sectors);
558
559         return aligned_max_sectors;
560 }
561
562 int core_dev_add_lun(
563         struct se_portal_group *tpg,
564         struct se_device *dev,
565         struct se_lun *lun)
566 {
567         int rc;
568
569         rc = core_tpg_add_lun(tpg, lun, false, dev);
570         if (rc < 0)
571                 return rc;
572
573         pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
574                 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
575                 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
576                 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
577         /*
578          * Update LUN maps for dynamically added initiators when
579          * generate_node_acl is enabled.
580          */
581         if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
582                 struct se_node_acl *acl;
583
584                 mutex_lock(&tpg->acl_node_mutex);
585                 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
586                         if (acl->dynamic_node_acl &&
587                             (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
588                              !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
589                                 core_tpg_add_node_to_devs(acl, tpg, lun);
590                         }
591                 }
592                 mutex_unlock(&tpg->acl_node_mutex);
593         }
594
595         return 0;
596 }
597
598 /*      core_dev_del_lun():
599  *
600  *
601  */
602 void core_dev_del_lun(
603         struct se_portal_group *tpg,
604         struct se_lun *lun)
605 {
606         pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
607                 " device object\n", tpg->se_tpg_tfo->fabric_name,
608                 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
609                 tpg->se_tpg_tfo->fabric_name);
610
611         core_tpg_remove_lun(tpg, lun);
612 }
613
614 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
615         struct se_portal_group *tpg,
616         struct se_node_acl *nacl,
617         u64 mapped_lun,
618         int *ret)
619 {
620         struct se_lun_acl *lacl;
621
622         if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
623                 pr_err("%s InitiatorName exceeds maximum size.\n",
624                         tpg->se_tpg_tfo->fabric_name);
625                 *ret = -EOVERFLOW;
626                 return NULL;
627         }
628         lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
629         if (!lacl) {
630                 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
631                 *ret = -ENOMEM;
632                 return NULL;
633         }
634
635         lacl->mapped_lun = mapped_lun;
636         lacl->se_lun_nacl = nacl;
637
638         return lacl;
639 }
640
641 int core_dev_add_initiator_node_lun_acl(
642         struct se_portal_group *tpg,
643         struct se_lun_acl *lacl,
644         struct se_lun *lun,
645         bool lun_access_ro)
646 {
647         struct se_node_acl *nacl = lacl->se_lun_nacl;
648         /*
649          * rcu_dereference_raw protected by se_lun->lun_group symlink
650          * reference to se_device->dev_group.
651          */
652         struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
653
654         if (!nacl)
655                 return -EINVAL;
656
657         if (lun->lun_access_ro)
658                 lun_access_ro = true;
659
660         lacl->se_lun = lun;
661
662         if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
663                         lun_access_ro, nacl, tpg) < 0)
664                 return -EINVAL;
665
666         pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
667                 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
668                 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
669                 lun_access_ro ? "RO" : "RW",
670                 nacl->initiatorname);
671         /*
672          * Check to see if there are any existing persistent reservation APTPL
673          * pre-registrations that need to be enabled for this LUN ACL..
674          */
675         core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
676                                             lacl->mapped_lun);
677         return 0;
678 }
679
680 int core_dev_del_initiator_node_lun_acl(
681         struct se_lun *lun,
682         struct se_lun_acl *lacl)
683 {
684         struct se_portal_group *tpg = lun->lun_tpg;
685         struct se_node_acl *nacl;
686         struct se_dev_entry *deve;
687
688         nacl = lacl->se_lun_nacl;
689         if (!nacl)
690                 return -EINVAL;
691
692         mutex_lock(&nacl->lun_entry_mutex);
693         deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
694         if (deve)
695                 core_disable_device_list_for_node(lun, deve, nacl, tpg);
696         mutex_unlock(&nacl->lun_entry_mutex);
697
698         pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
699                 " InitiatorNode: %s Mapped LUN: %llu\n",
700                 tpg->se_tpg_tfo->fabric_name,
701                 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
702                 nacl->initiatorname, lacl->mapped_lun);
703
704         return 0;
705 }
706
707 void core_dev_free_initiator_node_lun_acl(
708         struct se_portal_group *tpg,
709         struct se_lun_acl *lacl)
710 {
711         pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
712                 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
713                 tpg->se_tpg_tfo->tpg_get_tag(tpg),
714                 tpg->se_tpg_tfo->fabric_name,
715                 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
716
717         kfree(lacl);
718 }
719
720 static void scsi_dump_inquiry(struct se_device *dev)
721 {
722         struct t10_wwn *wwn = &dev->t10_wwn;
723         int device_type = dev->transport->get_device_type(dev);
724
725         /*
726          * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
727          */
728         pr_debug("  Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
729                 wwn->vendor);
730         pr_debug("  Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
731                 wwn->model);
732         pr_debug("  Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
733                 wwn->revision);
734         pr_debug("  Type:   %s ", scsi_device_type(device_type));
735 }
736
737 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
738 {
739         struct se_device *dev;
740         struct se_lun *xcopy_lun;
741
742         dev = hba->backend->ops->alloc_device(hba, name);
743         if (!dev)
744                 return NULL;
745
746         dev->se_hba = hba;
747         dev->transport = hba->backend->ops;
748         dev->prot_length = sizeof(struct t10_pi_tuple);
749         dev->hba_index = hba->hba_index;
750
751         INIT_LIST_HEAD(&dev->dev_sep_list);
752         INIT_LIST_HEAD(&dev->dev_tmr_list);
753         INIT_LIST_HEAD(&dev->delayed_cmd_list);
754         INIT_LIST_HEAD(&dev->state_list);
755         INIT_LIST_HEAD(&dev->qf_cmd_list);
756         spin_lock_init(&dev->execute_task_lock);
757         spin_lock_init(&dev->delayed_cmd_lock);
758         spin_lock_init(&dev->dev_reservation_lock);
759         spin_lock_init(&dev->se_port_lock);
760         spin_lock_init(&dev->se_tmr_lock);
761         spin_lock_init(&dev->qf_cmd_lock);
762         sema_init(&dev->caw_sem, 1);
763         INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
764         spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
765         INIT_LIST_HEAD(&dev->t10_pr.registration_list);
766         INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
767         spin_lock_init(&dev->t10_pr.registration_lock);
768         spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
769         INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
770         spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
771         INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
772         spin_lock_init(&dev->t10_alua.lba_map_lock);
773
774         dev->t10_wwn.t10_dev = dev;
775         dev->t10_alua.t10_dev = dev;
776
777         dev->dev_attrib.da_dev = dev;
778         dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
779         dev->dev_attrib.emulate_dpo = 1;
780         dev->dev_attrib.emulate_fua_write = 1;
781         dev->dev_attrib.emulate_fua_read = 1;
782         dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
783         dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
784         dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
785         dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
786         dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
787         dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
788         dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
789         dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
790         dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
791         dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
792         dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
793         dev->dev_attrib.is_nonrot = DA_IS_NONROT;
794         dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
795         dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
796         dev->dev_attrib.max_unmap_block_desc_count =
797                 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
798         dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
799         dev->dev_attrib.unmap_granularity_alignment =
800                                 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
801         dev->dev_attrib.unmap_zeroes_data =
802                                 DA_UNMAP_ZEROES_DATA_DEFAULT;
803         dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
804
805         xcopy_lun = &dev->xcopy_lun;
806         rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
807         init_completion(&xcopy_lun->lun_shutdown_comp);
808         INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
809         INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
810         mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
811         xcopy_lun->lun_tpg = &xcopy_pt_tpg;
812
813         /* Preload the default INQUIRY const values */
814         strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
815         strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
816                 sizeof(dev->t10_wwn.model));
817         strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
818                 sizeof(dev->t10_wwn.revision));
819
820         return dev;
821 }
822
823 /*
824  * Check if the underlying struct block_device request_queue supports
825  * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
826  * in ATA and we need to set TPE=1
827  */
828 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
829                                        struct request_queue *q)
830 {
831         int block_size = queue_logical_block_size(q);
832
833         if (!blk_queue_discard(q))
834                 return false;
835
836         attrib->max_unmap_lba_count =
837                 q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
838         /*
839          * Currently hardcoded to 1 in Linux/SCSI code..
840          */
841         attrib->max_unmap_block_desc_count = 1;
842         attrib->unmap_granularity = q->limits.discard_granularity / block_size;
843         attrib->unmap_granularity_alignment = q->limits.discard_alignment /
844                                                                 block_size;
845         attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors);
846         return true;
847 }
848 EXPORT_SYMBOL(target_configure_unmap_from_queue);
849
850 /*
851  * Convert from blocksize advertised to the initiator to the 512 byte
852  * units unconditionally used by the Linux block layer.
853  */
854 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
855 {
856         switch (dev->dev_attrib.block_size) {
857         case 4096:
858                 return lb << 3;
859         case 2048:
860                 return lb << 2;
861         case 1024:
862                 return lb << 1;
863         default:
864                 return lb;
865         }
866 }
867 EXPORT_SYMBOL(target_to_linux_sector);
868
869 struct devices_idr_iter {
870         struct config_item *prev_item;
871         int (*fn)(struct se_device *dev, void *data);
872         void *data;
873 };
874
875 static int target_devices_idr_iter(int id, void *p, void *data)
876          __must_hold(&device_mutex)
877 {
878         struct devices_idr_iter *iter = data;
879         struct se_device *dev = p;
880         int ret;
881
882         config_item_put(iter->prev_item);
883         iter->prev_item = NULL;
884
885         /*
886          * We add the device early to the idr, so it can be used
887          * by backend modules during configuration. We do not want
888          * to allow other callers to access partially setup devices,
889          * so we skip them here.
890          */
891         if (!target_dev_configured(dev))
892                 return 0;
893
894         iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
895         if (!iter->prev_item)
896                 return 0;
897         mutex_unlock(&device_mutex);
898
899         ret = iter->fn(dev, iter->data);
900
901         mutex_lock(&device_mutex);
902         return ret;
903 }
904
905 /**
906  * target_for_each_device - iterate over configured devices
907  * @fn: iterator function
908  * @data: pointer to data that will be passed to fn
909  *
910  * fn must return 0 to continue looping over devices. non-zero will break
911  * from the loop and return that value to the caller.
912  */
913 int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
914                            void *data)
915 {
916         struct devices_idr_iter iter = { .fn = fn, .data = data };
917         int ret;
918
919         mutex_lock(&device_mutex);
920         ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
921         mutex_unlock(&device_mutex);
922         config_item_put(iter.prev_item);
923         return ret;
924 }
925
926 int target_configure_device(struct se_device *dev)
927 {
928         struct se_hba *hba = dev->se_hba;
929         int ret, id;
930
931         if (target_dev_configured(dev)) {
932                 pr_err("se_dev->se_dev_ptr already set for storage"
933                                 " object\n");
934                 return -EEXIST;
935         }
936
937         /*
938          * Add early so modules like tcmu can use during its
939          * configuration.
940          */
941         mutex_lock(&device_mutex);
942         /*
943          * Use cyclic to try and avoid collisions with devices
944          * that were recently removed.
945          */
946         id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
947         mutex_unlock(&device_mutex);
948         if (id < 0) {
949                 ret = -ENOMEM;
950                 goto out;
951         }
952         dev->dev_index = id;
953
954         ret = dev->transport->configure_device(dev);
955         if (ret)
956                 goto out_free_index;
957         /*
958          * XXX: there is not much point to have two different values here..
959          */
960         dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
961         dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
962
963         /*
964          * Align max_hw_sectors down to PAGE_SIZE I/O transfers
965          */
966         dev->dev_attrib.hw_max_sectors =
967                 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
968                                          dev->dev_attrib.hw_block_size);
969         dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
970
971         dev->creation_time = get_jiffies_64();
972
973         ret = core_setup_alua(dev);
974         if (ret)
975                 goto out_destroy_device;
976
977         /*
978          * Setup work_queue for QUEUE_FULL
979          */
980         INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
981
982         scsi_dump_inquiry(dev);
983
984         spin_lock(&hba->device_lock);
985         hba->dev_count++;
986         spin_unlock(&hba->device_lock);
987
988         dev->dev_flags |= DF_CONFIGURED;
989
990         return 0;
991
992 out_destroy_device:
993         dev->transport->destroy_device(dev);
994 out_free_index:
995         mutex_lock(&device_mutex);
996         idr_remove(&devices_idr, dev->dev_index);
997         mutex_unlock(&device_mutex);
998 out:
999         se_release_vpd_for_dev(dev);
1000         return ret;
1001 }
1002
1003 void target_free_device(struct se_device *dev)
1004 {
1005         struct se_hba *hba = dev->se_hba;
1006
1007         WARN_ON(!list_empty(&dev->dev_sep_list));
1008
1009         if (target_dev_configured(dev)) {
1010                 dev->transport->destroy_device(dev);
1011
1012                 mutex_lock(&device_mutex);
1013                 idr_remove(&devices_idr, dev->dev_index);
1014                 mutex_unlock(&device_mutex);
1015
1016                 spin_lock(&hba->device_lock);
1017                 hba->dev_count--;
1018                 spin_unlock(&hba->device_lock);
1019         }
1020
1021         core_alua_free_lu_gp_mem(dev);
1022         core_alua_set_lba_map(dev, NULL, 0, 0);
1023         core_scsi3_free_all_registrations(dev);
1024         se_release_vpd_for_dev(dev);
1025
1026         if (dev->transport->free_prot)
1027                 dev->transport->free_prot(dev);
1028
1029         dev->transport->free_device(dev);
1030 }
1031
1032 int core_dev_setup_virtual_lun0(void)
1033 {
1034         struct se_hba *hba;
1035         struct se_device *dev;
1036         char buf[] = "rd_pages=8,rd_nullio=1";
1037         int ret;
1038
1039         hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1040         if (IS_ERR(hba))
1041                 return PTR_ERR(hba);
1042
1043         dev = target_alloc_device(hba, "virt_lun0");
1044         if (!dev) {
1045                 ret = -ENOMEM;
1046                 goto out_free_hba;
1047         }
1048
1049         hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1050
1051         ret = target_configure_device(dev);
1052         if (ret)
1053                 goto out_free_se_dev;
1054
1055         lun0_hba = hba;
1056         g_lun0_dev = dev;
1057         return 0;
1058
1059 out_free_se_dev:
1060         target_free_device(dev);
1061 out_free_hba:
1062         core_delete_hba(hba);
1063         return ret;
1064 }
1065
1066
1067 void core_dev_release_virtual_lun0(void)
1068 {
1069         struct se_hba *hba = lun0_hba;
1070
1071         if (!hba)
1072                 return;
1073
1074         if (g_lun0_dev)
1075                 target_free_device(g_lun0_dev);
1076         core_delete_hba(hba);
1077 }
1078
1079 /*
1080  * Common CDB parsing for kernel and user passthrough.
1081  */
1082 sense_reason_t
1083 passthrough_parse_cdb(struct se_cmd *cmd,
1084         sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1085 {
1086         unsigned char *cdb = cmd->t_task_cdb;
1087         struct se_device *dev = cmd->se_dev;
1088         unsigned int size;
1089
1090         /*
1091          * Clear a lun set in the cdb if the initiator talking to use spoke
1092          * and old standards version, as we can't assume the underlying device
1093          * won't choke up on it.
1094          */
1095         switch (cdb[0]) {
1096         case READ_10: /* SBC - RDProtect */
1097         case READ_12: /* SBC - RDProtect */
1098         case READ_16: /* SBC - RDProtect */
1099         case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1100         case VERIFY: /* SBC - VRProtect */
1101         case VERIFY_16: /* SBC - VRProtect */
1102         case WRITE_VERIFY: /* SBC - VRProtect */
1103         case WRITE_VERIFY_12: /* SBC - VRProtect */
1104         case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
1105                 break;
1106         default:
1107                 cdb[1] &= 0x1f; /* clear logical unit number */
1108                 break;
1109         }
1110
1111         /*
1112          * For REPORT LUNS we always need to emulate the response, for everything
1113          * else, pass it up.
1114          */
1115         if (cdb[0] == REPORT_LUNS) {
1116                 cmd->execute_cmd = spc_emulate_report_luns;
1117                 return TCM_NO_SENSE;
1118         }
1119
1120         /*
1121          * With emulate_pr disabled, all reservation requests should fail,
1122          * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1123          */
1124         if (!dev->dev_attrib.emulate_pr &&
1125             ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1126              (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1127              (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1128              (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1129                 return TCM_UNSUPPORTED_SCSI_OPCODE;
1130         }
1131
1132         /*
1133          * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1134          * emulate the response, since tcmu does not have the information
1135          * required to process these commands.
1136          */
1137         if (!(dev->transport->transport_flags &
1138               TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1139                 if (cdb[0] == PERSISTENT_RESERVE_IN) {
1140                         cmd->execute_cmd = target_scsi3_emulate_pr_in;
1141                         size = get_unaligned_be16(&cdb[7]);
1142                         return target_cmd_size_check(cmd, size);
1143                 }
1144                 if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1145                         cmd->execute_cmd = target_scsi3_emulate_pr_out;
1146                         size = get_unaligned_be32(&cdb[5]);
1147                         return target_cmd_size_check(cmd, size);
1148                 }
1149
1150                 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1151                         cmd->execute_cmd = target_scsi2_reservation_release;
1152                         if (cdb[0] == RELEASE_10)
1153                                 size = get_unaligned_be16(&cdb[7]);
1154                         else
1155                                 size = cmd->data_length;
1156                         return target_cmd_size_check(cmd, size);
1157                 }
1158                 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1159                         cmd->execute_cmd = target_scsi2_reservation_reserve;
1160                         if (cdb[0] == RESERVE_10)
1161                                 size = get_unaligned_be16(&cdb[7]);
1162                         else
1163                                 size = cmd->data_length;
1164                         return target_cmd_size_check(cmd, size);
1165                 }
1166         }
1167
1168         /* Set DATA_CDB flag for ops that should have it */
1169         switch (cdb[0]) {
1170         case READ_6:
1171         case READ_10:
1172         case READ_12:
1173         case READ_16:
1174         case WRITE_6:
1175         case WRITE_10:
1176         case WRITE_12:
1177         case WRITE_16:
1178         case WRITE_VERIFY:
1179         case WRITE_VERIFY_12:
1180         case WRITE_VERIFY_16:
1181         case COMPARE_AND_WRITE:
1182         case XDWRITEREAD_10:
1183                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1184                 break;
1185         case VARIABLE_LENGTH_CMD:
1186                 switch (get_unaligned_be16(&cdb[8])) {
1187                 case READ_32:
1188                 case WRITE_32:
1189                 case WRITE_VERIFY_32:
1190                 case XDWRITEREAD_32:
1191                         cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1192                         break;
1193                 }
1194         }
1195
1196         cmd->execute_cmd = exec_cmd;
1197
1198         return TCM_NO_SENSE;
1199 }
1200 EXPORT_SYMBOL(passthrough_parse_cdb);