target: move node ACL allocation to core code
[linux-2.6-block.git] / drivers / target / target_core_tpg.c
1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * (c) Copyright 2002-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/in.h>
32 #include <linux/export.h>
33 #include <net/sock.h>
34 #include <net/tcp.h>
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_cmnd.h>
37
38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h>
40 #include <target/target_core_fabric.h>
41
42 #include "target_core_internal.h"
43 #include "target_core_pr.h"
44
45 extern struct se_device *g_lun0_dev;
46
47 static DEFINE_SPINLOCK(tpg_lock);
48 static LIST_HEAD(tpg_list);
49
50 /*      core_clear_initiator_node_from_tpg():
51  *
52  *
53  */
54 static void core_clear_initiator_node_from_tpg(
55         struct se_node_acl *nacl,
56         struct se_portal_group *tpg)
57 {
58         int i;
59         struct se_dev_entry *deve;
60         struct se_lun *lun;
61
62         spin_lock_irq(&nacl->device_list_lock);
63         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
64                 deve = nacl->device_list[i];
65
66                 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
67                         continue;
68
69                 if (!deve->se_lun) {
70                         pr_err("%s device entries device pointer is"
71                                 " NULL, but Initiator has access.\n",
72                                 tpg->se_tpg_tfo->get_fabric_name());
73                         continue;
74                 }
75
76                 lun = deve->se_lun;
77                 spin_unlock_irq(&nacl->device_list_lock);
78                 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
79                         TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
80
81                 spin_lock_irq(&nacl->device_list_lock);
82         }
83         spin_unlock_irq(&nacl->device_list_lock);
84 }
85
86 /*      __core_tpg_get_initiator_node_acl():
87  *
88  *      spin_lock_bh(&tpg->acl_node_lock); must be held when calling
89  */
90 struct se_node_acl *__core_tpg_get_initiator_node_acl(
91         struct se_portal_group *tpg,
92         const char *initiatorname)
93 {
94         struct se_node_acl *acl;
95
96         list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
97                 if (!strcmp(acl->initiatorname, initiatorname))
98                         return acl;
99         }
100
101         return NULL;
102 }
103
104 /*      core_tpg_get_initiator_node_acl():
105  *
106  *
107  */
108 struct se_node_acl *core_tpg_get_initiator_node_acl(
109         struct se_portal_group *tpg,
110         unsigned char *initiatorname)
111 {
112         struct se_node_acl *acl;
113
114         spin_lock_irq(&tpg->acl_node_lock);
115         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
116         spin_unlock_irq(&tpg->acl_node_lock);
117
118         return acl;
119 }
120 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
121
122 /*      core_tpg_add_node_to_devs():
123  *
124  *
125  */
126 void core_tpg_add_node_to_devs(
127         struct se_node_acl *acl,
128         struct se_portal_group *tpg)
129 {
130         int i = 0;
131         u32 lun_access = 0;
132         struct se_lun *lun;
133         struct se_device *dev;
134
135         spin_lock(&tpg->tpg_lun_lock);
136         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
137                 lun = tpg->tpg_lun_list[i];
138                 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
139                         continue;
140
141                 spin_unlock(&tpg->tpg_lun_lock);
142
143                 dev = lun->lun_se_dev;
144                 /*
145                  * By default in LIO-Target $FABRIC_MOD,
146                  * demo_mode_write_protect is ON, or READ_ONLY;
147                  */
148                 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
149                         lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
150                 } else {
151                         /*
152                          * Allow only optical drives to issue R/W in default RO
153                          * demo mode.
154                          */
155                         if (dev->transport->get_device_type(dev) == TYPE_DISK)
156                                 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
157                         else
158                                 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
159                 }
160
161                 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
162                         " access for LUN in Demo Mode\n",
163                         tpg->se_tpg_tfo->get_fabric_name(),
164                         tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
165                         (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
166                         "READ-WRITE" : "READ-ONLY");
167
168                 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
169                                 lun_access, acl, tpg);
170                 /*
171                  * Check to see if there are any existing persistent reservation
172                  * APTPL pre-registrations that need to be enabled for this dynamic
173                  * LUN ACL now..
174                  */
175                 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
176                                                     lun->unpacked_lun);
177                 spin_lock(&tpg->tpg_lun_lock);
178         }
179         spin_unlock(&tpg->tpg_lun_lock);
180 }
181
182 /*      core_set_queue_depth_for_node():
183  *
184  *
185  */
186 static int core_set_queue_depth_for_node(
187         struct se_portal_group *tpg,
188         struct se_node_acl *acl)
189 {
190         if (!acl->queue_depth) {
191                 pr_err("Queue depth for %s Initiator Node: %s is 0,"
192                         "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
193                         acl->initiatorname);
194                 acl->queue_depth = 1;
195         }
196
197         return 0;
198 }
199
200 void array_free(void *array, int n)
201 {
202         void **a = array;
203         int i;
204
205         for (i = 0; i < n; i++)
206                 kfree(a[i]);
207         kfree(a);
208 }
209
210 static void *array_zalloc(int n, size_t size, gfp_t flags)
211 {
212         void **a;
213         int i;
214
215         a = kzalloc(n * sizeof(void*), flags);
216         if (!a)
217                 return NULL;
218         for (i = 0; i < n; i++) {
219                 a[i] = kzalloc(size, flags);
220                 if (!a[i]) {
221                         array_free(a, n);
222                         return NULL;
223                 }
224         }
225         return a;
226 }
227
228 /*      core_create_device_list_for_node():
229  *
230  *
231  */
232 static int core_create_device_list_for_node(struct se_node_acl *nacl)
233 {
234         struct se_dev_entry *deve;
235         int i;
236
237         nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
238                         sizeof(struct se_dev_entry), GFP_KERNEL);
239         if (!nacl->device_list) {
240                 pr_err("Unable to allocate memory for"
241                         " struct se_node_acl->device_list\n");
242                 return -ENOMEM;
243         }
244         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
245                 deve = nacl->device_list[i];
246
247                 atomic_set(&deve->ua_count, 0);
248                 atomic_set(&deve->pr_ref_count, 0);
249                 spin_lock_init(&deve->ua_lock);
250                 INIT_LIST_HEAD(&deve->alua_port_list);
251                 INIT_LIST_HEAD(&deve->ua_list);
252         }
253
254         return 0;
255 }
256
257 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
258                 const unsigned char *initiatorname)
259 {
260         struct se_node_acl *acl;
261
262         acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
263                         GFP_KERNEL);
264         if (!acl)
265                 return NULL;
266
267         INIT_LIST_HEAD(&acl->acl_list);
268         INIT_LIST_HEAD(&acl->acl_sess_list);
269         kref_init(&acl->acl_kref);
270         init_completion(&acl->acl_free_comp);
271         spin_lock_init(&acl->device_list_lock);
272         spin_lock_init(&acl->nacl_sess_lock);
273         atomic_set(&acl->acl_pr_ref_count, 0);
274         if (tpg->se_tpg_tfo->tpg_get_default_depth)
275                 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
276         else
277                 acl->queue_depth = 1;
278         snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
279         acl->se_tpg = tpg;
280         acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
281
282         tpg->se_tpg_tfo->set_default_node_attributes(acl);
283
284         if (core_create_device_list_for_node(acl) < 0)
285                 goto out_free_acl;
286         if (core_set_queue_depth_for_node(tpg, acl) < 0)
287                 goto out_free_device_list;
288
289         return acl;
290
291 out_free_device_list:
292         core_free_device_list_for_node(acl, tpg);
293 out_free_acl:
294         kfree(acl);
295         return NULL;
296 }
297
298 static void target_add_node_acl(struct se_node_acl *acl)
299 {
300         struct se_portal_group *tpg = acl->se_tpg;
301
302         spin_lock_irq(&tpg->acl_node_lock);
303         list_add_tail(&acl->acl_list, &tpg->acl_node_list);
304         tpg->num_node_acls++;
305         spin_unlock_irq(&tpg->acl_node_lock);
306
307         pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
308                 " Initiator Node: %s\n",
309                 tpg->se_tpg_tfo->get_fabric_name(),
310                 tpg->se_tpg_tfo->tpg_get_tag(tpg),
311                 acl->dynamic_node_acl ? "DYNAMIC" : "",
312                 acl->queue_depth,
313                 tpg->se_tpg_tfo->get_fabric_name(),
314                 acl->initiatorname);
315 }
316
317 struct se_node_acl *core_tpg_check_initiator_node_acl(
318         struct se_portal_group *tpg,
319         unsigned char *initiatorname)
320 {
321         struct se_node_acl *acl;
322
323         acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
324         if (acl)
325                 return acl;
326
327         if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
328                 return NULL;
329
330         acl = target_alloc_node_acl(tpg, initiatorname);
331         if (!acl)
332                 return NULL;
333         acl->dynamic_node_acl = 1;
334
335         /*
336          * Here we only create demo-mode MappedLUNs from the active
337          * TPG LUNs if the fabric is not explicitly asking for
338          * tpg_check_demo_mode_login_only() == 1.
339          */
340         if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
341             (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
342                 core_tpg_add_node_to_devs(acl, tpg);
343
344         target_add_node_acl(acl);
345         return acl;
346 }
347 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
348
349 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
350 {
351         while (atomic_read(&nacl->acl_pr_ref_count) != 0)
352                 cpu_relax();
353 }
354
355 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
356 {
357         int i;
358         struct se_lun *lun;
359
360         spin_lock(&tpg->tpg_lun_lock);
361         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
362                 lun = tpg->tpg_lun_list[i];
363
364                 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
365                     (lun->lun_se_dev == NULL))
366                         continue;
367
368                 spin_unlock(&tpg->tpg_lun_lock);
369                 core_dev_del_lun(tpg, lun);
370                 spin_lock(&tpg->tpg_lun_lock);
371         }
372         spin_unlock(&tpg->tpg_lun_lock);
373 }
374 EXPORT_SYMBOL(core_tpg_clear_object_luns);
375
376 struct se_node_acl *core_tpg_add_initiator_node_acl(
377         struct se_portal_group *tpg,
378         const char *initiatorname)
379 {
380         struct se_node_acl *acl;
381
382         spin_lock_irq(&tpg->acl_node_lock);
383         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
384         if (acl) {
385                 if (acl->dynamic_node_acl) {
386                         acl->dynamic_node_acl = 0;
387                         pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
388                                 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
389                                 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
390                         spin_unlock_irq(&tpg->acl_node_lock);
391                         return acl;
392                 }
393
394                 pr_err("ACL entry for %s Initiator"
395                         " Node %s already exists for TPG %u, ignoring"
396                         " request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
397                         initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
398                 spin_unlock_irq(&tpg->acl_node_lock);
399                 return ERR_PTR(-EEXIST);
400         }
401         spin_unlock_irq(&tpg->acl_node_lock);
402
403         acl = target_alloc_node_acl(tpg, initiatorname);
404         if (!acl)
405                 return ERR_PTR(-ENOMEM);
406
407         target_add_node_acl(acl);
408         return acl;
409 }
410
411 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
412 {
413         struct se_portal_group *tpg = acl->se_tpg;
414         LIST_HEAD(sess_list);
415         struct se_session *sess, *sess_tmp;
416         unsigned long flags;
417         int rc;
418
419         spin_lock_irq(&tpg->acl_node_lock);
420         if (acl->dynamic_node_acl) {
421                 acl->dynamic_node_acl = 0;
422         }
423         list_del(&acl->acl_list);
424         tpg->num_node_acls--;
425         spin_unlock_irq(&tpg->acl_node_lock);
426
427         spin_lock_irqsave(&acl->nacl_sess_lock, flags);
428         acl->acl_stop = 1;
429
430         list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
431                                 sess_acl_list) {
432                 if (sess->sess_tearing_down != 0)
433                         continue;
434
435                 target_get_session(sess);
436                 list_move(&sess->sess_acl_list, &sess_list);
437         }
438         spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
439
440         list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
441                 list_del(&sess->sess_acl_list);
442
443                 rc = tpg->se_tpg_tfo->shutdown_session(sess);
444                 target_put_session(sess);
445                 if (!rc)
446                         continue;
447                 target_put_session(sess);
448         }
449         target_put_nacl(acl);
450         /*
451          * Wait for last target_put_nacl() to complete in target_complete_nacl()
452          * for active fabric session transport_deregister_session() callbacks.
453          */
454         wait_for_completion(&acl->acl_free_comp);
455
456         core_tpg_wait_for_nacl_pr_ref(acl);
457         core_clear_initiator_node_from_tpg(acl, tpg);
458         core_free_device_list_for_node(acl, tpg);
459
460         pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
461                 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
462                 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
463                 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
464
465         kfree(acl);
466 }
467
468 /*      core_tpg_set_initiator_node_queue_depth():
469  *
470  *
471  */
472 int core_tpg_set_initiator_node_queue_depth(
473         struct se_portal_group *tpg,
474         unsigned char *initiatorname,
475         u32 queue_depth,
476         int force)
477 {
478         struct se_session *sess, *init_sess = NULL;
479         struct se_node_acl *acl;
480         unsigned long flags;
481         int dynamic_acl = 0;
482
483         spin_lock_irq(&tpg->acl_node_lock);
484         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
485         if (!acl) {
486                 pr_err("Access Control List entry for %s Initiator"
487                         " Node %s does not exists for TPG %hu, ignoring"
488                         " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
489                         initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
490                 spin_unlock_irq(&tpg->acl_node_lock);
491                 return -ENODEV;
492         }
493         if (acl->dynamic_node_acl) {
494                 acl->dynamic_node_acl = 0;
495                 dynamic_acl = 1;
496         }
497         spin_unlock_irq(&tpg->acl_node_lock);
498
499         spin_lock_irqsave(&tpg->session_lock, flags);
500         list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
501                 if (sess->se_node_acl != acl)
502                         continue;
503
504                 if (!force) {
505                         pr_err("Unable to change queue depth for %s"
506                                 " Initiator Node: %s while session is"
507                                 " operational.  To forcefully change the queue"
508                                 " depth and force session reinstatement"
509                                 " use the \"force=1\" parameter.\n",
510                                 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
511                         spin_unlock_irqrestore(&tpg->session_lock, flags);
512
513                         spin_lock_irq(&tpg->acl_node_lock);
514                         if (dynamic_acl)
515                                 acl->dynamic_node_acl = 1;
516                         spin_unlock_irq(&tpg->acl_node_lock);
517                         return -EEXIST;
518                 }
519                 /*
520                  * Determine if the session needs to be closed by our context.
521                  */
522                 if (!tpg->se_tpg_tfo->shutdown_session(sess))
523                         continue;
524
525                 init_sess = sess;
526                 break;
527         }
528
529         /*
530          * User has requested to change the queue depth for a Initiator Node.
531          * Change the value in the Node's struct se_node_acl, and call
532          * core_set_queue_depth_for_node() to add the requested queue depth.
533          *
534          * Finally call  tpg->se_tpg_tfo->close_session() to force session
535          * reinstatement to occur if there is an active session for the
536          * $FABRIC_MOD Initiator Node in question.
537          */
538         acl->queue_depth = queue_depth;
539
540         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
541                 spin_unlock_irqrestore(&tpg->session_lock, flags);
542                 /*
543                  * Force session reinstatement if
544                  * core_set_queue_depth_for_node() failed, because we assume
545                  * the $FABRIC_MOD has already the set session reinstatement
546                  * bit from tpg->se_tpg_tfo->shutdown_session() called above.
547                  */
548                 if (init_sess)
549                         tpg->se_tpg_tfo->close_session(init_sess);
550
551                 spin_lock_irq(&tpg->acl_node_lock);
552                 if (dynamic_acl)
553                         acl->dynamic_node_acl = 1;
554                 spin_unlock_irq(&tpg->acl_node_lock);
555                 return -EINVAL;
556         }
557         spin_unlock_irqrestore(&tpg->session_lock, flags);
558         /*
559          * If the $FABRIC_MOD session for the Initiator Node ACL exists,
560          * forcefully shutdown the $FABRIC_MOD session/nexus.
561          */
562         if (init_sess)
563                 tpg->se_tpg_tfo->close_session(init_sess);
564
565         pr_debug("Successfully changed queue depth to: %d for Initiator"
566                 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
567                 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
568                 tpg->se_tpg_tfo->tpg_get_tag(tpg));
569
570         spin_lock_irq(&tpg->acl_node_lock);
571         if (dynamic_acl)
572                 acl->dynamic_node_acl = 1;
573         spin_unlock_irq(&tpg->acl_node_lock);
574
575         return 0;
576 }
577 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
578
579 /*      core_tpg_set_initiator_node_tag():
580  *
581  *      Initiator nodeacl tags are not used internally, but may be used by
582  *      userspace to emulate aliases or groups.
583  *      Returns length of newly-set tag or -EINVAL.
584  */
585 int core_tpg_set_initiator_node_tag(
586         struct se_portal_group *tpg,
587         struct se_node_acl *acl,
588         const char *new_tag)
589 {
590         if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
591                 return -EINVAL;
592
593         if (!strncmp("NULL", new_tag, 4)) {
594                 acl->acl_tag[0] = '\0';
595                 return 0;
596         }
597
598         return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
599 }
600 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
601
602 static void core_tpg_lun_ref_release(struct percpu_ref *ref)
603 {
604         struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
605
606         complete(&lun->lun_ref_comp);
607 }
608
609 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
610 {
611         /* Set in core_dev_setup_virtual_lun0() */
612         struct se_device *dev = g_lun0_dev;
613         struct se_lun *lun = &se_tpg->tpg_virt_lun0;
614         u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
615         int ret;
616
617         lun->unpacked_lun = 0;
618         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
619         atomic_set(&lun->lun_acl_count, 0);
620         init_completion(&lun->lun_shutdown_comp);
621         INIT_LIST_HEAD(&lun->lun_acl_list);
622         spin_lock_init(&lun->lun_acl_lock);
623         spin_lock_init(&lun->lun_sep_lock);
624         init_completion(&lun->lun_ref_comp);
625
626         ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
627         if (ret < 0)
628                 return ret;
629
630         return 0;
631 }
632
633 int core_tpg_register(
634         const struct target_core_fabric_ops *tfo,
635         struct se_wwn *se_wwn,
636         struct se_portal_group *se_tpg,
637         void *tpg_fabric_ptr,
638         int se_tpg_type)
639 {
640         struct se_lun *lun;
641         u32 i;
642
643         se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
644                         sizeof(struct se_lun), GFP_KERNEL);
645         if (!se_tpg->tpg_lun_list) {
646                 pr_err("Unable to allocate struct se_portal_group->"
647                                 "tpg_lun_list\n");
648                 return -ENOMEM;
649         }
650
651         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
652                 lun = se_tpg->tpg_lun_list[i];
653                 lun->unpacked_lun = i;
654                 lun->lun_link_magic = SE_LUN_LINK_MAGIC;
655                 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
656                 atomic_set(&lun->lun_acl_count, 0);
657                 init_completion(&lun->lun_shutdown_comp);
658                 INIT_LIST_HEAD(&lun->lun_acl_list);
659                 spin_lock_init(&lun->lun_acl_lock);
660                 spin_lock_init(&lun->lun_sep_lock);
661                 init_completion(&lun->lun_ref_comp);
662         }
663
664         se_tpg->se_tpg_type = se_tpg_type;
665         se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
666         se_tpg->se_tpg_tfo = tfo;
667         se_tpg->se_tpg_wwn = se_wwn;
668         atomic_set(&se_tpg->tpg_pr_ref_count, 0);
669         INIT_LIST_HEAD(&se_tpg->acl_node_list);
670         INIT_LIST_HEAD(&se_tpg->se_tpg_node);
671         INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
672         spin_lock_init(&se_tpg->acl_node_lock);
673         spin_lock_init(&se_tpg->session_lock);
674         spin_lock_init(&se_tpg->tpg_lun_lock);
675
676         if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
677                 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
678                         array_free(se_tpg->tpg_lun_list,
679                                    TRANSPORT_MAX_LUNS_PER_TPG);
680                         return -ENOMEM;
681                 }
682         }
683
684         spin_lock_bh(&tpg_lock);
685         list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
686         spin_unlock_bh(&tpg_lock);
687
688         pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
689                 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
690                 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
691                 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
692                 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
693
694         return 0;
695 }
696 EXPORT_SYMBOL(core_tpg_register);
697
698 int core_tpg_deregister(struct se_portal_group *se_tpg)
699 {
700         struct se_node_acl *nacl, *nacl_tmp;
701
702         pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
703                 " for endpoint: %s Portal Tag %u\n",
704                 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
705                 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
706                 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
707                 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
708
709         spin_lock_bh(&tpg_lock);
710         list_del(&se_tpg->se_tpg_node);
711         spin_unlock_bh(&tpg_lock);
712
713         while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
714                 cpu_relax();
715         /*
716          * Release any remaining demo-mode generated se_node_acl that have
717          * not been released because of TFO->tpg_check_demo_mode_cache() == 1
718          * in transport_deregister_session().
719          */
720         spin_lock_irq(&se_tpg->acl_node_lock);
721         list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
722                         acl_list) {
723                 list_del(&nacl->acl_list);
724                 se_tpg->num_node_acls--;
725                 spin_unlock_irq(&se_tpg->acl_node_lock);
726
727                 core_tpg_wait_for_nacl_pr_ref(nacl);
728                 core_free_device_list_for_node(nacl, se_tpg);
729                 kfree(nacl);
730
731                 spin_lock_irq(&se_tpg->acl_node_lock);
732         }
733         spin_unlock_irq(&se_tpg->acl_node_lock);
734
735         if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
736                 core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
737
738         se_tpg->se_tpg_fabric_ptr = NULL;
739         array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
740         return 0;
741 }
742 EXPORT_SYMBOL(core_tpg_deregister);
743
744 struct se_lun *core_tpg_alloc_lun(
745         struct se_portal_group *tpg,
746         u32 unpacked_lun)
747 {
748         struct se_lun *lun;
749
750         if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
751                 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
752                         "-1: %u for Target Portal Group: %u\n",
753                         tpg->se_tpg_tfo->get_fabric_name(),
754                         unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
755                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
756                 return ERR_PTR(-EOVERFLOW);
757         }
758
759         spin_lock(&tpg->tpg_lun_lock);
760         lun = tpg->tpg_lun_list[unpacked_lun];
761         if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
762                 pr_err("TPG Logical Unit Number: %u is already active"
763                         " on %s Target Portal Group: %u, ignoring request.\n",
764                         unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
765                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
766                 spin_unlock(&tpg->tpg_lun_lock);
767                 return ERR_PTR(-EINVAL);
768         }
769         spin_unlock(&tpg->tpg_lun_lock);
770
771         return lun;
772 }
773
774 int core_tpg_add_lun(
775         struct se_portal_group *tpg,
776         struct se_lun *lun,
777         u32 lun_access,
778         struct se_device *dev)
779 {
780         int ret;
781
782         ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
783                               GFP_KERNEL);
784         if (ret < 0)
785                 return ret;
786
787         ret = core_dev_export(dev, tpg, lun);
788         if (ret < 0) {
789                 percpu_ref_exit(&lun->lun_ref);
790                 return ret;
791         }
792
793         spin_lock(&tpg->tpg_lun_lock);
794         lun->lun_access = lun_access;
795         lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
796         spin_unlock(&tpg->tpg_lun_lock);
797
798         return 0;
799 }
800
801 void core_tpg_remove_lun(
802         struct se_portal_group *tpg,
803         struct se_lun *lun)
804 {
805         core_clear_lun_from_tpg(lun, tpg);
806         transport_clear_lun_ref(lun);
807
808         core_dev_unexport(lun->lun_se_dev, tpg, lun);
809
810         spin_lock(&tpg->tpg_lun_lock);
811         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
812         spin_unlock(&tpg->tpg_lun_lock);
813
814         percpu_ref_exit(&lun->lun_ref);
815 }