Merge tag 'asoc-v4.2-disable-topology' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / drivers / target / target_core_hba.c
1 /*******************************************************************************
2  * Filename:  target_core_hba.c
3  *
4  * This file contains the TCM HBA Transport related functions.
5  *
6  * (c) Copyright 2003-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/in.h>
32 #include <linux/module.h>
33 #include <net/sock.h>
34 #include <net/tcp.h>
35
36 #include <target/target_core_base.h>
37 #include <target/target_core_backend.h>
38 #include <target/target_core_fabric.h>
39
40 #include "target_core_internal.h"
41
42 static LIST_HEAD(backend_list);
43 static DEFINE_MUTEX(backend_mutex);
44
45 static u32 hba_id_counter;
46
47 static DEFINE_SPINLOCK(hba_lock);
48 static LIST_HEAD(hba_list);
49
50
51 int transport_backend_register(const struct target_backend_ops *ops)
52 {
53         struct target_backend *tb, *old;
54
55         tb = kzalloc(sizeof(*tb), GFP_KERNEL);
56         if (!tb)
57                 return -ENOMEM;
58         tb->ops = ops;
59
60         mutex_lock(&backend_mutex);
61         list_for_each_entry(old, &backend_list, list) {
62                 if (!strcmp(old->ops->name, ops->name)) {
63                         pr_err("backend %s already registered.\n", ops->name);
64                         mutex_unlock(&backend_mutex);
65                         kfree(tb);
66                         return -EEXIST;
67                 }
68         }
69         target_setup_backend_cits(tb);
70         list_add_tail(&tb->list, &backend_list);
71         mutex_unlock(&backend_mutex);
72
73         pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n",
74                         ops->name, ops->owner);
75         return 0;
76 }
77 EXPORT_SYMBOL(transport_backend_register);
78
79 void target_backend_unregister(const struct target_backend_ops *ops)
80 {
81         struct target_backend *tb;
82
83         mutex_lock(&backend_mutex);
84         list_for_each_entry(tb, &backend_list, list) {
85                 if (tb->ops == ops) {
86                         list_del(&tb->list);
87                         kfree(tb);
88                         break;
89                 }
90         }
91         mutex_unlock(&backend_mutex);
92 }
93 EXPORT_SYMBOL(target_backend_unregister);
94
95 static struct target_backend *core_get_backend(const char *name)
96 {
97         struct target_backend *tb;
98
99         mutex_lock(&backend_mutex);
100         list_for_each_entry(tb, &backend_list, list) {
101                 if (!strcmp(tb->ops->name, name))
102                         goto found;
103         }
104         mutex_unlock(&backend_mutex);
105         return NULL;
106 found:
107         if (tb->ops->owner && !try_module_get(tb->ops->owner))
108                 tb = NULL;
109         mutex_unlock(&backend_mutex);
110         return tb;
111 }
112
113 struct se_hba *
114 core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
115 {
116         struct se_hba *hba;
117         int ret = 0;
118
119         hba = kzalloc(sizeof(*hba), GFP_KERNEL);
120         if (!hba) {
121                 pr_err("Unable to allocate struct se_hba\n");
122                 return ERR_PTR(-ENOMEM);
123         }
124
125         spin_lock_init(&hba->device_lock);
126         mutex_init(&hba->hba_access_mutex);
127
128         hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
129         hba->hba_flags |= hba_flags;
130
131         hba->backend = core_get_backend(plugin_name);
132         if (!hba->backend) {
133                 ret = -EINVAL;
134                 goto out_free_hba;
135         }
136
137         ret = hba->backend->ops->attach_hba(hba, plugin_dep_id);
138         if (ret < 0)
139                 goto out_module_put;
140
141         spin_lock(&hba_lock);
142         hba->hba_id = hba_id_counter++;
143         list_add_tail(&hba->hba_node, &hba_list);
144         spin_unlock(&hba_lock);
145
146         pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
147                         " Core\n", hba->hba_id);
148
149         return hba;
150
151 out_module_put:
152         module_put(hba->backend->ops->owner);
153         hba->backend = NULL;
154 out_free_hba:
155         kfree(hba);
156         return ERR_PTR(ret);
157 }
158
159 int
160 core_delete_hba(struct se_hba *hba)
161 {
162         WARN_ON(hba->dev_count);
163
164         hba->backend->ops->detach_hba(hba);
165
166         spin_lock(&hba_lock);
167         list_del(&hba->hba_node);
168         spin_unlock(&hba_lock);
169
170         pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
171                         " Core\n", hba->hba_id);
172
173         module_put(hba->backend->ops->owner);
174
175         hba->backend = NULL;
176         kfree(hba);
177         return 0;
178 }