Commit | Line | Data |
---|---|---|
ec16227e AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
34 | #include <linux/in.h> | |
35 | #include <linux/if.h> | |
36 | #include <linux/netdevice.h> | |
37 | #include <linux/inetdevice.h> | |
38 | #include <linux/if_arp.h> | |
39 | #include <linux/delay.h> | |
5a0e3ad6 | 40 | #include <linux/slab.h> |
ec16227e AG |
41 | |
42 | #include "rds.h" | |
43 | #include "ib.h" | |
44 | ||
45 | unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE; | |
46 | unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */ | |
3ba23ade | 47 | unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; |
ec16227e AG |
48 | |
49 | module_param(fmr_pool_size, int, 0444); | |
50 | MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA"); | |
51 | module_param(fmr_message_size, int, 0444); | |
52 | MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer"); | |
3ba23ade AG |
53 | module_param(rds_ib_retry_count, int, 0444); |
54 | MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error"); | |
ec16227e AG |
55 | |
56 | struct list_head rds_ib_devices; | |
57 | ||
745cbcca | 58 | /* NOTE: if also grabbing ibdev lock, grab this first */ |
ec16227e AG |
59 | DEFINE_SPINLOCK(ib_nodev_conns_lock); |
60 | LIST_HEAD(ib_nodev_conns); | |
61 | ||
3e0249f9 ZB |
62 | /* |
63 | * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references | |
64 | * from interrupt context so we push freing off into a work struct in krdsd. | |
65 | */ | |
66 | static void rds_ib_dev_free(struct work_struct *work) | |
67 | { | |
68 | struct rds_ib_ipaddr *i_ipaddr, *i_next; | |
69 | struct rds_ib_device *rds_ibdev = container_of(work, | |
70 | struct rds_ib_device, free_work); | |
71 | ||
72 | if (rds_ibdev->mr_pool) | |
73 | rds_ib_destroy_mr_pool(rds_ibdev->mr_pool); | |
74 | if (rds_ibdev->mr) | |
75 | ib_dereg_mr(rds_ibdev->mr); | |
76 | if (rds_ibdev->pd) | |
77 | ib_dealloc_pd(rds_ibdev->pd); | |
78 | ||
79 | list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) { | |
80 | list_del(&i_ipaddr->list); | |
81 | kfree(i_ipaddr); | |
82 | } | |
83 | ||
84 | kfree(rds_ibdev); | |
85 | } | |
86 | ||
87 | void rds_ib_dev_put(struct rds_ib_device *rds_ibdev) | |
88 | { | |
89 | BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0); | |
90 | if (atomic_dec_and_test(&rds_ibdev->refcount)) | |
91 | queue_work(rds_wq, &rds_ibdev->free_work); | |
92 | } | |
93 | ||
ec16227e AG |
94 | void rds_ib_add_one(struct ib_device *device) |
95 | { | |
96 | struct rds_ib_device *rds_ibdev; | |
97 | struct ib_device_attr *dev_attr; | |
98 | ||
99 | /* Only handle IB (no iWARP) devices */ | |
100 | if (device->node_type != RDMA_NODE_IB_CA) | |
101 | return; | |
102 | ||
103 | dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); | |
104 | if (!dev_attr) | |
105 | return; | |
106 | ||
107 | if (ib_query_device(device, dev_attr)) { | |
108 | rdsdebug("Query device failed for %s\n", device->name); | |
109 | goto free_attr; | |
110 | } | |
111 | ||
3e0249f9 ZB |
112 | rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL, |
113 | ibdev_to_node(device)); | |
ec16227e AG |
114 | if (!rds_ibdev) |
115 | goto free_attr; | |
116 | ||
117 | spin_lock_init(&rds_ibdev->spinlock); | |
3e0249f9 ZB |
118 | atomic_set(&rds_ibdev->refcount, 1); |
119 | INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free); | |
ec16227e AG |
120 | |
121 | rds_ibdev->max_wrs = dev_attr->max_qp_wr; | |
122 | rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE); | |
123 | ||
ec16227e AG |
124 | rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32; |
125 | rds_ibdev->max_fmrs = dev_attr->max_fmr ? | |
126 | min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) : | |
127 | fmr_pool_size; | |
128 | ||
40589e74 AG |
129 | rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom; |
130 | rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom; | |
131 | ||
ec16227e AG |
132 | rds_ibdev->dev = device; |
133 | rds_ibdev->pd = ib_alloc_pd(device); | |
3e0249f9 ZB |
134 | if (IS_ERR(rds_ibdev->pd)) { |
135 | rds_ibdev->pd = NULL; | |
136 | goto put_dev; | |
137 | } | |
ec16227e | 138 | |
3e0249f9 ZB |
139 | rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE); |
140 | if (IS_ERR(rds_ibdev->mr)) { | |
141 | rds_ibdev->mr = NULL; | |
142 | goto put_dev; | |
143 | } | |
ec16227e AG |
144 | |
145 | rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev); | |
146 | if (IS_ERR(rds_ibdev->mr_pool)) { | |
147 | rds_ibdev->mr_pool = NULL; | |
3e0249f9 | 148 | goto put_dev; |
ec16227e AG |
149 | } |
150 | ||
151 | INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); | |
152 | INIT_LIST_HEAD(&rds_ibdev->conn_list); | |
153 | list_add_tail(&rds_ibdev->list, &rds_ib_devices); | |
3e0249f9 | 154 | atomic_inc(&rds_ibdev->refcount); |
ec16227e AG |
155 | |
156 | ib_set_client_data(device, &rds_ib_client, rds_ibdev); | |
3e0249f9 | 157 | atomic_inc(&rds_ibdev->refcount); |
ec16227e | 158 | |
3e0249f9 ZB |
159 | put_dev: |
160 | rds_ib_dev_put(rds_ibdev); | |
ec16227e AG |
161 | free_attr: |
162 | kfree(dev_attr); | |
163 | } | |
164 | ||
3e0249f9 ZB |
165 | /* |
166 | * New connections use this to find the device to associate with the | |
167 | * connection. It's not in the fast path so we're not concerned about the | |
168 | * performance of the IB call. (As of this writing, it uses an interrupt | |
169 | * blocking spinlock to serialize walking a per-device list of all registered | |
170 | * clients.) | |
171 | * | |
172 | * RCU is used to handle incoming connections racing with device teardown. | |
173 | * Rather than use a lock to serialize removal from the client_data and | |
174 | * getting a new reference, we use an RCU grace period. The destruction | |
175 | * path removes the device from client_data and then waits for all RCU | |
176 | * readers to finish. | |
177 | * | |
178 | * A new connection can get NULL from this if its arriving on a | |
179 | * device that is in the process of being removed. | |
180 | */ | |
181 | struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device) | |
182 | { | |
183 | struct rds_ib_device *rds_ibdev; | |
184 | ||
185 | rcu_read_lock(); | |
186 | rds_ibdev = ib_get_client_data(device, &rds_ib_client); | |
187 | if (rds_ibdev) | |
188 | atomic_inc(&rds_ibdev->refcount); | |
189 | rcu_read_unlock(); | |
190 | return rds_ibdev; | |
191 | } | |
192 | ||
193 | /* | |
194 | * The IB stack is letting us know that a device is going away. This can | |
195 | * happen if the underlying HCA driver is removed or if PCI hotplug is removing | |
196 | * the pci function, for example. | |
197 | * | |
198 | * This can be called at any time and can be racing with any other RDS path. | |
199 | */ | |
ec16227e AG |
200 | void rds_ib_remove_one(struct ib_device *device) |
201 | { | |
202 | struct rds_ib_device *rds_ibdev; | |
ec16227e AG |
203 | |
204 | rds_ibdev = ib_get_client_data(device, &rds_ib_client); | |
205 | if (!rds_ibdev) | |
206 | return; | |
207 | ||
745cbcca | 208 | rds_ib_destroy_conns(rds_ibdev); |
ec16227e | 209 | |
3e0249f9 ZB |
210 | /* |
211 | * prevent future connection attempts from getting a reference to this | |
212 | * device and wait for currently racing connection attempts to finish | |
213 | * getting their reference | |
214 | */ | |
215 | ib_set_client_data(device, &rds_ib_client, NULL); | |
216 | synchronize_rcu(); | |
217 | rds_ib_dev_put(rds_ibdev); | |
ec16227e AG |
218 | |
219 | list_del(&rds_ibdev->list); | |
3e0249f9 | 220 | rds_ib_dev_put(rds_ibdev); |
ec16227e AG |
221 | } |
222 | ||
223 | struct ib_client rds_ib_client = { | |
224 | .name = "rds_ib", | |
225 | .add = rds_ib_add_one, | |
226 | .remove = rds_ib_remove_one | |
227 | }; | |
228 | ||
229 | static int rds_ib_conn_info_visitor(struct rds_connection *conn, | |
230 | void *buffer) | |
231 | { | |
232 | struct rds_info_rdma_connection *iinfo = buffer; | |
233 | struct rds_ib_connection *ic; | |
234 | ||
235 | /* We will only ever look at IB transports */ | |
236 | if (conn->c_trans != &rds_ib_transport) | |
237 | return 0; | |
238 | ||
239 | iinfo->src_addr = conn->c_laddr; | |
240 | iinfo->dst_addr = conn->c_faddr; | |
241 | ||
242 | memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); | |
243 | memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); | |
244 | if (rds_conn_state(conn) == RDS_CONN_UP) { | |
245 | struct rds_ib_device *rds_ibdev; | |
246 | struct rdma_dev_addr *dev_addr; | |
247 | ||
248 | ic = conn->c_transport_data; | |
249 | dev_addr = &ic->i_cm_id->route.addr.dev_addr; | |
250 | ||
6f8372b6 SH |
251 | rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); |
252 | rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); | |
ec16227e | 253 | |
3e0249f9 | 254 | rds_ibdev = ic->rds_ibdev; |
ec16227e AG |
255 | iinfo->max_send_wr = ic->i_send_ring.w_nr; |
256 | iinfo->max_recv_wr = ic->i_recv_ring.w_nr; | |
257 | iinfo->max_send_sge = rds_ibdev->max_sge; | |
258 | rds_ib_get_mr_info(rds_ibdev, iinfo); | |
259 | } | |
260 | return 1; | |
261 | } | |
262 | ||
263 | static void rds_ib_ic_info(struct socket *sock, unsigned int len, | |
264 | struct rds_info_iterator *iter, | |
265 | struct rds_info_lengths *lens) | |
266 | { | |
267 | rds_for_each_conn_info(sock, len, iter, lens, | |
268 | rds_ib_conn_info_visitor, | |
269 | sizeof(struct rds_info_rdma_connection)); | |
270 | } | |
271 | ||
272 | ||
273 | /* | |
274 | * Early RDS/IB was built to only bind to an address if there is an IPoIB | |
275 | * device with that address set. | |
276 | * | |
277 | * If it were me, I'd advocate for something more flexible. Sending and | |
278 | * receiving should be device-agnostic. Transports would try and maintain | |
279 | * connections between peers who have messages queued. Userspace would be | |
280 | * allowed to influence which paths have priority. We could call userspace | |
281 | * asserting this policy "routing". | |
282 | */ | |
283 | static int rds_ib_laddr_check(__be32 addr) | |
284 | { | |
285 | int ret; | |
286 | struct rdma_cm_id *cm_id; | |
287 | struct sockaddr_in sin; | |
288 | ||
289 | /* Create a CMA ID and try to bind it. This catches both | |
290 | * IB and iWARP capable NICs. | |
291 | */ | |
292 | cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); | |
94713bab DC |
293 | if (IS_ERR(cm_id)) |
294 | return PTR_ERR(cm_id); | |
ec16227e AG |
295 | |
296 | memset(&sin, 0, sizeof(sin)); | |
297 | sin.sin_family = AF_INET; | |
298 | sin.sin_addr.s_addr = addr; | |
299 | ||
300 | /* rdma_bind_addr will only succeed for IB & iWARP devices */ | |
301 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); | |
302 | /* due to this, we will claim to support iWARP devices unless we | |
303 | check node_type. */ | |
304 | if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA) | |
305 | ret = -EADDRNOTAVAIL; | |
306 | ||
307 | rdsdebug("addr %pI4 ret %d node type %d\n", | |
308 | &addr, ret, | |
309 | cm_id->device ? cm_id->device->node_type : -1); | |
310 | ||
311 | rdma_destroy_id(cm_id); | |
312 | ||
313 | return ret; | |
314 | } | |
315 | ||
316 | void rds_ib_exit(void) | |
317 | { | |
318 | rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); | |
745cbcca | 319 | rds_ib_destroy_nodev_conns(); |
ec16227e AG |
320 | ib_unregister_client(&rds_ib_client); |
321 | rds_ib_sysctl_exit(); | |
322 | rds_ib_recv_exit(); | |
323 | rds_trans_unregister(&rds_ib_transport); | |
324 | } | |
325 | ||
326 | struct rds_transport rds_ib_transport = { | |
327 | .laddr_check = rds_ib_laddr_check, | |
328 | .xmit_complete = rds_ib_xmit_complete, | |
329 | .xmit = rds_ib_xmit, | |
ec16227e | 330 | .xmit_rdma = rds_ib_xmit_rdma, |
15133f6e | 331 | .xmit_atomic = rds_ib_xmit_atomic, |
ec16227e AG |
332 | .recv = rds_ib_recv, |
333 | .conn_alloc = rds_ib_conn_alloc, | |
334 | .conn_free = rds_ib_conn_free, | |
335 | .conn_connect = rds_ib_conn_connect, | |
336 | .conn_shutdown = rds_ib_conn_shutdown, | |
337 | .inc_copy_to_user = rds_ib_inc_copy_to_user, | |
ec16227e AG |
338 | .inc_free = rds_ib_inc_free, |
339 | .cm_initiate_connect = rds_ib_cm_initiate_connect, | |
340 | .cm_handle_connect = rds_ib_cm_handle_connect, | |
341 | .cm_connect_complete = rds_ib_cm_connect_complete, | |
342 | .stats_info_copy = rds_ib_stats_info_copy, | |
343 | .exit = rds_ib_exit, | |
344 | .get_mr = rds_ib_get_mr, | |
345 | .sync_mr = rds_ib_sync_mr, | |
346 | .free_mr = rds_ib_free_mr, | |
347 | .flush_mrs = rds_ib_flush_mrs, | |
348 | .t_owner = THIS_MODULE, | |
349 | .t_name = "infiniband", | |
335776bd | 350 | .t_type = RDS_TRANS_IB |
ec16227e AG |
351 | }; |
352 | ||
353 | int __init rds_ib_init(void) | |
354 | { | |
355 | int ret; | |
356 | ||
357 | INIT_LIST_HEAD(&rds_ib_devices); | |
358 | ||
359 | ret = ib_register_client(&rds_ib_client); | |
360 | if (ret) | |
361 | goto out; | |
362 | ||
363 | ret = rds_ib_sysctl_init(); | |
364 | if (ret) | |
365 | goto out_ibreg; | |
366 | ||
367 | ret = rds_ib_recv_init(); | |
368 | if (ret) | |
369 | goto out_sysctl; | |
370 | ||
371 | ret = rds_trans_register(&rds_ib_transport); | |
372 | if (ret) | |
373 | goto out_recv; | |
374 | ||
375 | rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); | |
376 | ||
377 | goto out; | |
378 | ||
379 | out_recv: | |
380 | rds_ib_recv_exit(); | |
381 | out_sysctl: | |
382 | rds_ib_sysctl_exit(); | |
383 | out_ibreg: | |
384 | ib_unregister_client(&rds_ib_client); | |
385 | out: | |
386 | return ret; | |
387 | } | |
388 | ||
389 | MODULE_LICENSE("GPL"); | |
390 |