target: Add se_node_acl->acl_kref for ->acl_free_comp usage
[linux-2.6-block.git] / drivers / target / target_core_tpg.c
1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/in.h>
35 #include <linux/export.h>
36 #include <net/sock.h>
37 #include <net/tcp.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
44
45 #include "target_core_internal.h"
46
47 extern struct se_device *g_lun0_dev;
48
49 static DEFINE_SPINLOCK(tpg_lock);
50 static LIST_HEAD(tpg_list);
51
52 /*      core_clear_initiator_node_from_tpg():
53  *
54  *
55  */
56 static void core_clear_initiator_node_from_tpg(
57         struct se_node_acl *nacl,
58         struct se_portal_group *tpg)
59 {
60         int i;
61         struct se_dev_entry *deve;
62         struct se_lun *lun;
63         struct se_lun_acl *acl, *acl_tmp;
64
65         spin_lock_irq(&nacl->device_list_lock);
66         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
67                 deve = &nacl->device_list[i];
68
69                 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
70                         continue;
71
72                 if (!deve->se_lun) {
73                         pr_err("%s device entries device pointer is"
74                                 " NULL, but Initiator has access.\n",
75                                 tpg->se_tpg_tfo->get_fabric_name());
76                         continue;
77                 }
78
79                 lun = deve->se_lun;
80                 spin_unlock_irq(&nacl->device_list_lock);
81                 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
82                         TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
83
84                 spin_lock(&lun->lun_acl_lock);
85                 list_for_each_entry_safe(acl, acl_tmp,
86                                         &lun->lun_acl_list, lacl_list) {
87                         if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
88                             (acl->mapped_lun == deve->mapped_lun))
89                                 break;
90                 }
91
92                 if (!acl) {
93                         pr_err("Unable to locate struct se_lun_acl for %s,"
94                                 " mapped_lun: %u\n", nacl->initiatorname,
95                                 deve->mapped_lun);
96                         spin_unlock(&lun->lun_acl_lock);
97                         spin_lock_irq(&nacl->device_list_lock);
98                         continue;
99                 }
100
101                 list_del(&acl->lacl_list);
102                 spin_unlock(&lun->lun_acl_lock);
103
104                 spin_lock_irq(&nacl->device_list_lock);
105                 kfree(acl);
106         }
107         spin_unlock_irq(&nacl->device_list_lock);
108 }
109
110 /*      __core_tpg_get_initiator_node_acl():
111  *
112  *      spin_lock_bh(&tpg->acl_node_lock); must be held when calling
113  */
114 struct se_node_acl *__core_tpg_get_initiator_node_acl(
115         struct se_portal_group *tpg,
116         const char *initiatorname)
117 {
118         struct se_node_acl *acl;
119
120         list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
121                 if (!strcmp(acl->initiatorname, initiatorname))
122                         return acl;
123         }
124
125         return NULL;
126 }
127
128 /*      core_tpg_get_initiator_node_acl():
129  *
130  *
131  */
132 struct se_node_acl *core_tpg_get_initiator_node_acl(
133         struct se_portal_group *tpg,
134         unsigned char *initiatorname)
135 {
136         struct se_node_acl *acl;
137
138         spin_lock_irq(&tpg->acl_node_lock);
139         list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
140                 if (!strcmp(acl->initiatorname, initiatorname) &&
141                     !acl->dynamic_node_acl) {
142                         spin_unlock_irq(&tpg->acl_node_lock);
143                         return acl;
144                 }
145         }
146         spin_unlock_irq(&tpg->acl_node_lock);
147
148         return NULL;
149 }
150
151 /*      core_tpg_add_node_to_devs():
152  *
153  *
154  */
155 void core_tpg_add_node_to_devs(
156         struct se_node_acl *acl,
157         struct se_portal_group *tpg)
158 {
159         int i = 0;
160         u32 lun_access = 0;
161         struct se_lun *lun;
162         struct se_device *dev;
163
164         spin_lock(&tpg->tpg_lun_lock);
165         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
166                 lun = &tpg->tpg_lun_list[i];
167                 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
168                         continue;
169
170                 spin_unlock(&tpg->tpg_lun_lock);
171
172                 dev = lun->lun_se_dev;
173                 /*
174                  * By default in LIO-Target $FABRIC_MOD,
175                  * demo_mode_write_protect is ON, or READ_ONLY;
176                  */
177                 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
178                         if (dev->dev_flags & DF_READ_ONLY)
179                                 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
180                         else
181                                 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
182                 } else {
183                         /*
184                          * Allow only optical drives to issue R/W in default RO
185                          * demo mode.
186                          */
187                         if (dev->transport->get_device_type(dev) == TYPE_DISK)
188                                 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
189                         else
190                                 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
191                 }
192
193                 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
194                         " access for LUN in Demo Mode\n",
195                         tpg->se_tpg_tfo->get_fabric_name(),
196                         tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
197                         (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
198                         "READ-WRITE" : "READ-ONLY");
199
200                 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
201                                 lun_access, acl, tpg, 1);
202                 spin_lock(&tpg->tpg_lun_lock);
203         }
204         spin_unlock(&tpg->tpg_lun_lock);
205 }
206
207 /*      core_set_queue_depth_for_node():
208  *
209  *
210  */
211 static int core_set_queue_depth_for_node(
212         struct se_portal_group *tpg,
213         struct se_node_acl *acl)
214 {
215         if (!acl->queue_depth) {
216                 pr_err("Queue depth for %s Initiator Node: %s is 0,"
217                         "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
218                         acl->initiatorname);
219                 acl->queue_depth = 1;
220         }
221
222         return 0;
223 }
224
225 /*      core_create_device_list_for_node():
226  *
227  *
228  */
229 static int core_create_device_list_for_node(struct se_node_acl *nacl)
230 {
231         struct se_dev_entry *deve;
232         int i;
233
234         nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
235                                 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
236         if (!nacl->device_list) {
237                 pr_err("Unable to allocate memory for"
238                         " struct se_node_acl->device_list\n");
239                 return -ENOMEM;
240         }
241         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
242                 deve = &nacl->device_list[i];
243
244                 atomic_set(&deve->ua_count, 0);
245                 atomic_set(&deve->pr_ref_count, 0);
246                 spin_lock_init(&deve->ua_lock);
247                 INIT_LIST_HEAD(&deve->alua_port_list);
248                 INIT_LIST_HEAD(&deve->ua_list);
249         }
250
251         return 0;
252 }
253
254 /*      core_tpg_check_initiator_node_acl()
255  *
256  *
257  */
258 struct se_node_acl *core_tpg_check_initiator_node_acl(
259         struct se_portal_group *tpg,
260         unsigned char *initiatorname)
261 {
262         struct se_node_acl *acl;
263
264         acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
265         if (acl)
266                 return acl;
267
268         if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
269                 return NULL;
270
271         acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
272         if (!acl)
273                 return NULL;
274
275         INIT_LIST_HEAD(&acl->acl_list);
276         INIT_LIST_HEAD(&acl->acl_sess_list);
277         kref_init(&acl->acl_kref);
278         init_completion(&acl->acl_free_comp);
279         spin_lock_init(&acl->device_list_lock);
280         spin_lock_init(&acl->nacl_sess_lock);
281         atomic_set(&acl->acl_pr_ref_count, 0);
282         acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
283         snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
284         acl->se_tpg = tpg;
285         acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
286         spin_lock_init(&acl->stats_lock);
287         acl->dynamic_node_acl = 1;
288
289         tpg->se_tpg_tfo->set_default_node_attributes(acl);
290
291         if (core_create_device_list_for_node(acl) < 0) {
292                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
293                 return NULL;
294         }
295
296         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
297                 core_free_device_list_for_node(acl, tpg);
298                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
299                 return NULL;
300         }
301         /*
302          * Here we only create demo-mode MappedLUNs from the active
303          * TPG LUNs if the fabric is not explictly asking for
304          * tpg_check_demo_mode_login_only() == 1.
305          */
306         if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
307             (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
308                 do { ; } while (0);
309         else
310                 core_tpg_add_node_to_devs(acl, tpg);
311
312         spin_lock_irq(&tpg->acl_node_lock);
313         list_add_tail(&acl->acl_list, &tpg->acl_node_list);
314         tpg->num_node_acls++;
315         spin_unlock_irq(&tpg->acl_node_lock);
316
317         pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
318                 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
319                 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
320                 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
321
322         return acl;
323 }
324 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
325
326 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
327 {
328         while (atomic_read(&nacl->acl_pr_ref_count) != 0)
329                 cpu_relax();
330 }
331
332 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
333 {
334         int i, ret;
335         struct se_lun *lun;
336
337         spin_lock(&tpg->tpg_lun_lock);
338         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
339                 lun = &tpg->tpg_lun_list[i];
340
341                 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
342                     (lun->lun_se_dev == NULL))
343                         continue;
344
345                 spin_unlock(&tpg->tpg_lun_lock);
346                 ret = core_dev_del_lun(tpg, lun->unpacked_lun);
347                 spin_lock(&tpg->tpg_lun_lock);
348         }
349         spin_unlock(&tpg->tpg_lun_lock);
350 }
351 EXPORT_SYMBOL(core_tpg_clear_object_luns);
352
353 /*      core_tpg_add_initiator_node_acl():
354  *
355  *
356  */
357 struct se_node_acl *core_tpg_add_initiator_node_acl(
358         struct se_portal_group *tpg,
359         struct se_node_acl *se_nacl,
360         const char *initiatorname,
361         u32 queue_depth)
362 {
363         struct se_node_acl *acl = NULL;
364
365         spin_lock_irq(&tpg->acl_node_lock);
366         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
367         if (acl) {
368                 if (acl->dynamic_node_acl) {
369                         acl->dynamic_node_acl = 0;
370                         pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
371                                 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
372                                 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
373                         spin_unlock_irq(&tpg->acl_node_lock);
374                         /*
375                          * Release the locally allocated struct se_node_acl
376                          * because * core_tpg_add_initiator_node_acl() returned
377                          * a pointer to an existing demo mode node ACL.
378                          */
379                         if (se_nacl)
380                                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
381                                                         se_nacl);
382                         goto done;
383                 }
384
385                 pr_err("ACL entry for %s Initiator"
386                         " Node %s already exists for TPG %u, ignoring"
387                         " request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
388                         initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
389                 spin_unlock_irq(&tpg->acl_node_lock);
390                 return ERR_PTR(-EEXIST);
391         }
392         spin_unlock_irq(&tpg->acl_node_lock);
393
394         if (!se_nacl) {
395                 pr_err("struct se_node_acl pointer is NULL\n");
396                 return ERR_PTR(-EINVAL);
397         }
398         /*
399          * For v4.x logic the se_node_acl_s is hanging off a fabric
400          * dependent structure allocated via
401          * struct target_core_fabric_ops->fabric_make_nodeacl()
402          */
403         acl = se_nacl;
404
405         INIT_LIST_HEAD(&acl->acl_list);
406         INIT_LIST_HEAD(&acl->acl_sess_list);
407         kref_init(&acl->acl_kref);
408         init_completion(&acl->acl_free_comp);
409         spin_lock_init(&acl->device_list_lock);
410         spin_lock_init(&acl->nacl_sess_lock);
411         atomic_set(&acl->acl_pr_ref_count, 0);
412         acl->queue_depth = queue_depth;
413         snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
414         acl->se_tpg = tpg;
415         acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
416         spin_lock_init(&acl->stats_lock);
417
418         tpg->se_tpg_tfo->set_default_node_attributes(acl);
419
420         if (core_create_device_list_for_node(acl) < 0) {
421                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
422                 return ERR_PTR(-ENOMEM);
423         }
424
425         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
426                 core_free_device_list_for_node(acl, tpg);
427                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
428                 return ERR_PTR(-EINVAL);
429         }
430
431         spin_lock_irq(&tpg->acl_node_lock);
432         list_add_tail(&acl->acl_list, &tpg->acl_node_list);
433         tpg->num_node_acls++;
434         spin_unlock_irq(&tpg->acl_node_lock);
435
436 done:
437         pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
438                 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
439                 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
440                 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
441
442         return acl;
443 }
444 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
445
446 /*      core_tpg_del_initiator_node_acl():
447  *
448  *
449  */
450 int core_tpg_del_initiator_node_acl(
451         struct se_portal_group *tpg,
452         struct se_node_acl *acl,
453         int force)
454 {
455         struct se_session *sess, *sess_tmp;
456         unsigned long flags;
457         int dynamic_acl = 0;
458
459         spin_lock_irq(&tpg->acl_node_lock);
460         if (acl->dynamic_node_acl) {
461                 acl->dynamic_node_acl = 0;
462                 dynamic_acl = 1;
463         }
464         list_del(&acl->acl_list);
465         tpg->num_node_acls--;
466         spin_unlock_irq(&tpg->acl_node_lock);
467
468         spin_lock_irqsave(&tpg->session_lock, flags);
469         list_for_each_entry_safe(sess, sess_tmp,
470                                 &tpg->tpg_sess_list, sess_list) {
471                 if (sess->se_node_acl != acl)
472                         continue;
473                 /*
474                  * Determine if the session needs to be closed by our context.
475                  */
476                 if (!tpg->se_tpg_tfo->shutdown_session(sess))
477                         continue;
478
479                 spin_unlock_irqrestore(&tpg->session_lock, flags);
480                 /*
481                  * If the $FABRIC_MOD session for the Initiator Node ACL exists,
482                  * forcefully shutdown the $FABRIC_MOD session/nexus.
483                  */
484                 tpg->se_tpg_tfo->close_session(sess);
485
486                 spin_lock_irqsave(&tpg->session_lock, flags);
487         }
488         spin_unlock_irqrestore(&tpg->session_lock, flags);
489
490         core_tpg_wait_for_nacl_pr_ref(acl);
491         core_clear_initiator_node_from_tpg(acl, tpg);
492         core_free_device_list_for_node(acl, tpg);
493
494         pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
495                 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
496                 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
497                 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
498
499         return 0;
500 }
501 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
502
503 /*      core_tpg_set_initiator_node_queue_depth():
504  *
505  *
506  */
507 int core_tpg_set_initiator_node_queue_depth(
508         struct se_portal_group *tpg,
509         unsigned char *initiatorname,
510         u32 queue_depth,
511         int force)
512 {
513         struct se_session *sess, *init_sess = NULL;
514         struct se_node_acl *acl;
515         unsigned long flags;
516         int dynamic_acl = 0;
517
518         spin_lock_irq(&tpg->acl_node_lock);
519         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
520         if (!acl) {
521                 pr_err("Access Control List entry for %s Initiator"
522                         " Node %s does not exists for TPG %hu, ignoring"
523                         " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
524                         initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
525                 spin_unlock_irq(&tpg->acl_node_lock);
526                 return -ENODEV;
527         }
528         if (acl->dynamic_node_acl) {
529                 acl->dynamic_node_acl = 0;
530                 dynamic_acl = 1;
531         }
532         spin_unlock_irq(&tpg->acl_node_lock);
533
534         spin_lock_irqsave(&tpg->session_lock, flags);
535         list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
536                 if (sess->se_node_acl != acl)
537                         continue;
538
539                 if (!force) {
540                         pr_err("Unable to change queue depth for %s"
541                                 " Initiator Node: %s while session is"
542                                 " operational.  To forcefully change the queue"
543                                 " depth and force session reinstatement"
544                                 " use the \"force=1\" parameter.\n",
545                                 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
546                         spin_unlock_irqrestore(&tpg->session_lock, flags);
547
548                         spin_lock_irq(&tpg->acl_node_lock);
549                         if (dynamic_acl)
550                                 acl->dynamic_node_acl = 1;
551                         spin_unlock_irq(&tpg->acl_node_lock);
552                         return -EEXIST;
553                 }
554                 /*
555                  * Determine if the session needs to be closed by our context.
556                  */
557                 if (!tpg->se_tpg_tfo->shutdown_session(sess))
558                         continue;
559
560                 init_sess = sess;
561                 break;
562         }
563
564         /*
565          * User has requested to change the queue depth for a Initiator Node.
566          * Change the value in the Node's struct se_node_acl, and call
567          * core_set_queue_depth_for_node() to add the requested queue depth.
568          *
569          * Finally call  tpg->se_tpg_tfo->close_session() to force session
570          * reinstatement to occur if there is an active session for the
571          * $FABRIC_MOD Initiator Node in question.
572          */
573         acl->queue_depth = queue_depth;
574
575         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
576                 spin_unlock_irqrestore(&tpg->session_lock, flags);
577                 /*
578                  * Force session reinstatement if
579                  * core_set_queue_depth_for_node() failed, because we assume
580                  * the $FABRIC_MOD has already the set session reinstatement
581                  * bit from tpg->se_tpg_tfo->shutdown_session() called above.
582                  */
583                 if (init_sess)
584                         tpg->se_tpg_tfo->close_session(init_sess);
585
586                 spin_lock_irq(&tpg->acl_node_lock);
587                 if (dynamic_acl)
588                         acl->dynamic_node_acl = 1;
589                 spin_unlock_irq(&tpg->acl_node_lock);
590                 return -EINVAL;
591         }
592         spin_unlock_irqrestore(&tpg->session_lock, flags);
593         /*
594          * If the $FABRIC_MOD session for the Initiator Node ACL exists,
595          * forcefully shutdown the $FABRIC_MOD session/nexus.
596          */
597         if (init_sess)
598                 tpg->se_tpg_tfo->close_session(init_sess);
599
600         pr_debug("Successfully changed queue depth to: %d for Initiator"
601                 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
602                 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
603                 tpg->se_tpg_tfo->tpg_get_tag(tpg));
604
605         spin_lock_irq(&tpg->acl_node_lock);
606         if (dynamic_acl)
607                 acl->dynamic_node_acl = 1;
608         spin_unlock_irq(&tpg->acl_node_lock);
609
610         return 0;
611 }
612 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
613
614 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
615 {
616         /* Set in core_dev_setup_virtual_lun0() */
617         struct se_device *dev = g_lun0_dev;
618         struct se_lun *lun = &se_tpg->tpg_virt_lun0;
619         u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
620         int ret;
621
622         lun->unpacked_lun = 0;
623         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
624         atomic_set(&lun->lun_acl_count, 0);
625         init_completion(&lun->lun_shutdown_comp);
626         INIT_LIST_HEAD(&lun->lun_acl_list);
627         INIT_LIST_HEAD(&lun->lun_cmd_list);
628         spin_lock_init(&lun->lun_acl_lock);
629         spin_lock_init(&lun->lun_cmd_lock);
630         spin_lock_init(&lun->lun_sep_lock);
631
632         ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
633         if (ret < 0)
634                 return ret;
635
636         return 0;
637 }
638
639 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
640 {
641         struct se_lun *lun = &se_tpg->tpg_virt_lun0;
642
643         core_tpg_post_dellun(se_tpg, lun);
644 }
645
646 int core_tpg_register(
647         struct target_core_fabric_ops *tfo,
648         struct se_wwn *se_wwn,
649         struct se_portal_group *se_tpg,
650         void *tpg_fabric_ptr,
651         int se_tpg_type)
652 {
653         struct se_lun *lun;
654         u32 i;
655
656         se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
657                                 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
658         if (!se_tpg->tpg_lun_list) {
659                 pr_err("Unable to allocate struct se_portal_group->"
660                                 "tpg_lun_list\n");
661                 return -ENOMEM;
662         }
663
664         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
665                 lun = &se_tpg->tpg_lun_list[i];
666                 lun->unpacked_lun = i;
667                 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
668                 atomic_set(&lun->lun_acl_count, 0);
669                 init_completion(&lun->lun_shutdown_comp);
670                 INIT_LIST_HEAD(&lun->lun_acl_list);
671                 INIT_LIST_HEAD(&lun->lun_cmd_list);
672                 spin_lock_init(&lun->lun_acl_lock);
673                 spin_lock_init(&lun->lun_cmd_lock);
674                 spin_lock_init(&lun->lun_sep_lock);
675         }
676
677         se_tpg->se_tpg_type = se_tpg_type;
678         se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
679         se_tpg->se_tpg_tfo = tfo;
680         se_tpg->se_tpg_wwn = se_wwn;
681         atomic_set(&se_tpg->tpg_pr_ref_count, 0);
682         INIT_LIST_HEAD(&se_tpg->acl_node_list);
683         INIT_LIST_HEAD(&se_tpg->se_tpg_node);
684         INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
685         spin_lock_init(&se_tpg->acl_node_lock);
686         spin_lock_init(&se_tpg->session_lock);
687         spin_lock_init(&se_tpg->tpg_lun_lock);
688
689         if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
690                 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
691                         kfree(se_tpg);
692                         return -ENOMEM;
693                 }
694         }
695
696         spin_lock_bh(&tpg_lock);
697         list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
698         spin_unlock_bh(&tpg_lock);
699
700         pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
701                 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
702                 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
703                 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
704                 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
705
706         return 0;
707 }
708 EXPORT_SYMBOL(core_tpg_register);
709
710 int core_tpg_deregister(struct se_portal_group *se_tpg)
711 {
712         struct se_node_acl *nacl, *nacl_tmp;
713
714         pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
715                 " for endpoint: %s Portal Tag %u\n",
716                 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
717                 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
718                 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
719                 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
720
721         spin_lock_bh(&tpg_lock);
722         list_del(&se_tpg->se_tpg_node);
723         spin_unlock_bh(&tpg_lock);
724
725         while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
726                 cpu_relax();
727         /*
728          * Release any remaining demo-mode generated se_node_acl that have
729          * not been released because of TFO->tpg_check_demo_mode_cache() == 1
730          * in transport_deregister_session().
731          */
732         spin_lock_irq(&se_tpg->acl_node_lock);
733         list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
734                         acl_list) {
735                 list_del(&nacl->acl_list);
736                 se_tpg->num_node_acls--;
737                 spin_unlock_irq(&se_tpg->acl_node_lock);
738
739                 core_tpg_wait_for_nacl_pr_ref(nacl);
740                 core_free_device_list_for_node(nacl, se_tpg);
741                 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
742
743                 spin_lock_irq(&se_tpg->acl_node_lock);
744         }
745         spin_unlock_irq(&se_tpg->acl_node_lock);
746
747         if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
748                 core_tpg_release_virtual_lun0(se_tpg);
749
750         se_tpg->se_tpg_fabric_ptr = NULL;
751         kfree(se_tpg->tpg_lun_list);
752         return 0;
753 }
754 EXPORT_SYMBOL(core_tpg_deregister);
755
756 struct se_lun *core_tpg_pre_addlun(
757         struct se_portal_group *tpg,
758         u32 unpacked_lun)
759 {
760         struct se_lun *lun;
761
762         if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
763                 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
764                         "-1: %u for Target Portal Group: %u\n",
765                         tpg->se_tpg_tfo->get_fabric_name(),
766                         unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
767                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
768                 return ERR_PTR(-EOVERFLOW);
769         }
770
771         spin_lock(&tpg->tpg_lun_lock);
772         lun = &tpg->tpg_lun_list[unpacked_lun];
773         if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
774                 pr_err("TPG Logical Unit Number: %u is already active"
775                         " on %s Target Portal Group: %u, ignoring request.\n",
776                         unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
777                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
778                 spin_unlock(&tpg->tpg_lun_lock);
779                 return ERR_PTR(-EINVAL);
780         }
781         spin_unlock(&tpg->tpg_lun_lock);
782
783         return lun;
784 }
785
786 int core_tpg_post_addlun(
787         struct se_portal_group *tpg,
788         struct se_lun *lun,
789         u32 lun_access,
790         void *lun_ptr)
791 {
792         int ret;
793
794         ret = core_dev_export(lun_ptr, tpg, lun);
795         if (ret < 0)
796                 return ret;
797
798         spin_lock(&tpg->tpg_lun_lock);
799         lun->lun_access = lun_access;
800         lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
801         spin_unlock(&tpg->tpg_lun_lock);
802
803         return 0;
804 }
805
806 static void core_tpg_shutdown_lun(
807         struct se_portal_group *tpg,
808         struct se_lun *lun)
809 {
810         core_clear_lun_from_tpg(lun, tpg);
811         transport_clear_lun_from_sessions(lun);
812 }
813
814 struct se_lun *core_tpg_pre_dellun(
815         struct se_portal_group *tpg,
816         u32 unpacked_lun)
817 {
818         struct se_lun *lun;
819
820         if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
821                 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
822                         "-1: %u for Target Portal Group: %u\n",
823                         tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
824                         TRANSPORT_MAX_LUNS_PER_TPG-1,
825                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
826                 return ERR_PTR(-EOVERFLOW);
827         }
828
829         spin_lock(&tpg->tpg_lun_lock);
830         lun = &tpg->tpg_lun_list[unpacked_lun];
831         if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
832                 pr_err("%s Logical Unit Number: %u is not active on"
833                         " Target Portal Group: %u, ignoring request.\n",
834                         tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
835                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
836                 spin_unlock(&tpg->tpg_lun_lock);
837                 return ERR_PTR(-ENODEV);
838         }
839         spin_unlock(&tpg->tpg_lun_lock);
840
841         return lun;
842 }
843
844 int core_tpg_post_dellun(
845         struct se_portal_group *tpg,
846         struct se_lun *lun)
847 {
848         core_tpg_shutdown_lun(tpg, lun);
849
850         core_dev_unexport(lun->lun_se_dev, tpg, lun);
851
852         spin_lock(&tpg->tpg_lun_lock);
853         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
854         spin_unlock(&tpg->tpg_lun_lock);
855
856         return 0;
857 }