2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
35 #include <linux/mlx4/cmd.h>
36 #include <linux/rbtree.h>
37 #include <linux/idr.h>
38 #include <rdma/ib_cm.h>
42 #define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
51 struct mlx4_ib_dev *dev;
53 struct list_head list;
54 struct delayed_work timeout;
57 struct cm_generic_msg {
58 struct ib_mad_hdr hdr;
61 __be32 remote_comm_id;
64 struct cm_sidr_generic_msg {
65 struct ib_mad_hdr hdr;
70 unsigned char unused[0x60];
71 union ib_gid primary_path_sgid;
75 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
77 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
78 struct cm_sidr_generic_msg *msg =
79 (struct cm_sidr_generic_msg *)mad;
80 msg->request_id = cpu_to_be32(cm_id);
81 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
82 pr_err("trying to set local_comm_id in SIDR_REP\n");
85 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
86 msg->local_comm_id = cpu_to_be32(cm_id);
90 static u32 get_local_comm_id(struct ib_mad *mad)
92 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
93 struct cm_sidr_generic_msg *msg =
94 (struct cm_sidr_generic_msg *)mad;
95 return be32_to_cpu(msg->request_id);
96 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
97 pr_err("trying to set local_comm_id in SIDR_REP\n");
100 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
101 return be32_to_cpu(msg->local_comm_id);
105 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
107 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
108 struct cm_sidr_generic_msg *msg =
109 (struct cm_sidr_generic_msg *)mad;
110 msg->request_id = cpu_to_be32(cm_id);
111 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
112 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
115 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
116 msg->remote_comm_id = cpu_to_be32(cm_id);
120 static u32 get_remote_comm_id(struct ib_mad *mad)
122 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
123 struct cm_sidr_generic_msg *msg =
124 (struct cm_sidr_generic_msg *)mad;
125 return be32_to_cpu(msg->request_id);
126 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
127 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
130 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
131 return be32_to_cpu(msg->remote_comm_id);
135 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
137 struct cm_req_msg *msg = (struct cm_req_msg *)mad;
139 return msg->primary_path_sgid;
142 /* Lock should be taken before called */
143 static struct id_map_entry *
144 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
146 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
147 struct rb_node *node = sl_id_map->rb_node;
150 struct id_map_entry *id_map_entry =
151 rb_entry(node, struct id_map_entry, node);
153 if (id_map_entry->sl_cm_id > sl_cm_id)
154 node = node->rb_left;
155 else if (id_map_entry->sl_cm_id < sl_cm_id)
156 node = node->rb_right;
157 else if (id_map_entry->slave_id > slave_id)
158 node = node->rb_left;
159 else if (id_map_entry->slave_id < slave_id)
160 node = node->rb_right;
167 static void id_map_ent_timeout(struct work_struct *work)
169 struct delayed_work *delay = to_delayed_work(work);
170 struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
171 struct id_map_entry *found_ent;
172 struct mlx4_ib_dev *dev = ent->dev;
173 struct mlx4_ib_sriov *sriov = &dev->sriov;
174 struct rb_root *sl_id_map = &sriov->sl_id_map;
176 spin_lock(&sriov->id_map_lock);
177 if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
179 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
180 if (found_ent && found_ent == ent)
181 rb_erase(&found_ent->node, sl_id_map);
184 list_del(&ent->list);
185 spin_unlock(&sriov->id_map_lock);
189 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
191 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
192 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
193 struct id_map_entry *ent;
194 int slave_id = new->slave_id;
195 int sl_cm_id = new->sl_cm_id;
197 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
199 pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
202 rb_replace_node(&ent->node, &new->node, sl_id_map);
206 /* Go to the bottom of the tree */
209 ent = rb_entry(parent, struct id_map_entry, node);
211 if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
212 link = &(*link)->rb_left;
214 link = &(*link)->rb_right;
217 rb_link_node(&new->node, parent, link);
218 rb_insert_color(&new->node, sl_id_map);
221 static struct id_map_entry *
222 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
225 struct id_map_entry *ent;
226 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
228 ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
230 return ERR_PTR(-ENOMEM);
232 ent->sl_cm_id = sl_cm_id;
233 ent->slave_id = slave_id;
234 ent->scheduled_delete = 0;
235 ent->dev = to_mdev(ibdev);
236 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
238 ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
239 xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
241 spin_lock(&sriov->id_map_lock);
242 sl_id_map_add(ibdev, ent);
243 list_add_tail(&ent->list, &sriov->cm_list);
244 spin_unlock(&sriov->id_map_lock);
250 mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
251 return ERR_PTR(-ENOMEM);
254 static struct id_map_entry *
255 id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
257 struct id_map_entry *ent;
258 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
260 spin_lock(&sriov->id_map_lock);
261 if (*pv_cm_id == -1) {
262 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
264 *pv_cm_id = (int) ent->pv_cm_id;
266 ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
267 spin_unlock(&sriov->id_map_lock);
272 static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
274 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
277 spin_lock(&sriov->id_map_lock);
278 spin_lock_irqsave(&sriov->going_down_lock, flags);
279 /*make sure that there is no schedule inside the scheduled work.*/
280 if (!sriov->is_going_down && !id->scheduled_delete) {
281 id->scheduled_delete = 1;
282 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
284 spin_unlock_irqrestore(&sriov->going_down_lock, flags);
285 spin_unlock(&sriov->id_map_lock);
288 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
291 struct id_map_entry *id;
295 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
296 mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
297 mad->mad_hdr.attr_id == CM_MRA_ATTR_ID ||
298 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
299 sl_cm_id = get_local_comm_id(mad);
300 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
303 id = id_map_alloc(ibdev, slave_id, sl_cm_id);
305 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
306 __func__, slave_id, sl_cm_id);
309 } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
310 mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
313 sl_cm_id = get_local_comm_id(mad);
314 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
318 pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n",
319 slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
324 set_local_comm_id(mad, id->pv_cm_id);
326 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
327 schedule_delayed(ibdev, id);
331 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
335 struct id_map_entry *id;
337 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
338 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
344 gid = gid_from_req_msg(ibdev, mad);
345 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
347 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
348 be64_to_cpu(gid.global.interface_id));
354 pv_cm_id = get_remote_comm_id(mad);
355 id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
358 pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n",
359 pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
364 *slave = id->slave_id;
365 set_remote_comm_id(mad, id->sl_cm_id);
367 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
368 mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
369 schedule_delayed(ibdev, id);
374 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
376 spin_lock_init(&dev->sriov.id_map_lock);
377 INIT_LIST_HEAD(&dev->sriov.cm_list);
378 dev->sriov.sl_id_map = RB_ROOT;
379 xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
382 /* slave = -1 ==> all slaves */
383 /* TBD -- call paravirt clean for single slave. Need for slave RESET event */
384 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
386 struct mlx4_ib_sriov *sriov = &dev->sriov;
387 struct rb_root *sl_id_map = &sriov->sl_id_map;
391 struct id_map_entry *map, *tmp_map;
392 /* cancel all delayed work queue entries */
394 spin_lock(&sriov->id_map_lock);
395 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
396 if (slave < 0 || slave == map->slave_id) {
397 if (map->scheduled_delete)
398 need_flush |= !cancel_delayed_work(&map->timeout);
402 spin_unlock(&sriov->id_map_lock);
405 flush_scheduled_work(); /* make sure all timers were flushed */
407 /* now, remove all leftover entries from databases*/
408 spin_lock(&sriov->id_map_lock);
410 while (rb_first(sl_id_map)) {
411 struct id_map_entry *ent =
412 rb_entry(rb_first(sl_id_map),
413 struct id_map_entry, node);
415 rb_erase(&ent->node, sl_id_map);
416 xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
418 list_splice_init(&dev->sriov.cm_list, &lh);
420 /* first, move nodes belonging to slave to db remove list */
421 nd = rb_first(sl_id_map);
423 struct id_map_entry *ent =
424 rb_entry(nd, struct id_map_entry, node);
426 if (ent->slave_id == slave)
427 list_move_tail(&ent->list, &lh);
429 /* remove those nodes from databases */
430 list_for_each_entry_safe(map, tmp_map, &lh, list) {
431 rb_erase(&map->node, sl_id_map);
432 xa_erase(&sriov->pv_id_table, map->pv_cm_id);
435 /* add remaining nodes from cm_list */
436 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
437 if (slave == map->slave_id)
438 list_move_tail(&map->list, &lh);
442 spin_unlock(&sriov->id_map_lock);
444 /* free any map entries left behind due to cancel_delayed_work above */
445 list_for_each_entry_safe(map, tmp_map, &lh, list) {
446 list_del(&map->list);