Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | |
2a1d9b7f RD |
3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
4 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | |
1da177e4 LT |
5 | * |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | * | |
34 | * $Id: ipoib_multicast.c 1362 2004-12-18 15:56:29Z roland $ | |
35 | */ | |
36 | ||
37 | #include <linux/skbuff.h> | |
38 | #include <linux/rtnetlink.h> | |
39 | #include <linux/ip.h> | |
40 | #include <linux/in.h> | |
41 | #include <linux/igmp.h> | |
42 | #include <linux/inetdevice.h> | |
43 | #include <linux/delay.h> | |
44 | #include <linux/completion.h> | |
45 | ||
46 | #include "ipoib.h" | |
47 | ||
48 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | |
49 | static int mcast_debug_level; | |
50 | ||
51 | module_param(mcast_debug_level, int, 0644); | |
52 | MODULE_PARM_DESC(mcast_debug_level, | |
53 | "Enable multicast debug tracing if > 0"); | |
54 | #endif | |
55 | ||
56 | static DECLARE_MUTEX(mcast_mutex); | |
57 | ||
58 | /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ | |
59 | struct ipoib_mcast { | |
60 | struct ib_sa_mcmember_rec mcmember; | |
61 | struct ipoib_ah *ah; | |
62 | ||
63 | struct rb_node rb_node; | |
64 | struct list_head list; | |
65 | struct completion done; | |
66 | ||
67 | int query_id; | |
68 | struct ib_sa_query *query; | |
69 | ||
70 | unsigned long created; | |
71 | unsigned long backoff; | |
72 | ||
73 | unsigned long flags; | |
74 | unsigned char logcount; | |
75 | ||
76 | struct list_head neigh_list; | |
77 | ||
78 | struct sk_buff_head pkt_queue; | |
79 | ||
80 | struct net_device *dev; | |
81 | }; | |
82 | ||
83 | struct ipoib_mcast_iter { | |
84 | struct net_device *dev; | |
85 | union ib_gid mgid; | |
86 | unsigned long created; | |
87 | unsigned int queuelen; | |
88 | unsigned int complete; | |
89 | unsigned int send_only; | |
90 | }; | |
91 | ||
92 | static void ipoib_mcast_free(struct ipoib_mcast *mcast) | |
93 | { | |
94 | struct net_device *dev = mcast->dev; | |
95 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
96 | struct ipoib_neigh *neigh, *tmp; | |
97 | unsigned long flags; | |
98 | LIST_HEAD(ah_list); | |
99 | struct ipoib_ah *ah, *tah; | |
100 | ||
101 | ipoib_dbg_mcast(netdev_priv(dev), | |
102 | "deleting multicast group " IPOIB_GID_FMT "\n", | |
103 | IPOIB_GID_ARG(mcast->mcmember.mgid)); | |
104 | ||
105 | spin_lock_irqsave(&priv->lock, flags); | |
106 | ||
107 | list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { | |
108 | if (neigh->ah) | |
109 | list_add_tail(&neigh->ah->list, &ah_list); | |
110 | *to_ipoib_neigh(neigh->neighbour) = NULL; | |
111 | neigh->neighbour->ops->destructor = NULL; | |
112 | kfree(neigh); | |
113 | } | |
114 | ||
115 | spin_unlock_irqrestore(&priv->lock, flags); | |
116 | ||
117 | list_for_each_entry_safe(ah, tah, &ah_list, list) | |
118 | ipoib_put_ah(ah); | |
119 | ||
120 | if (mcast->ah) | |
121 | ipoib_put_ah(mcast->ah); | |
122 | ||
123 | while (!skb_queue_empty(&mcast->pkt_queue)) { | |
124 | struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); | |
125 | ||
126 | skb->dev = dev; | |
127 | dev_kfree_skb_any(skb); | |
128 | } | |
129 | ||
130 | kfree(mcast); | |
131 | } | |
132 | ||
133 | static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, | |
134 | int can_sleep) | |
135 | { | |
136 | struct ipoib_mcast *mcast; | |
137 | ||
138 | mcast = kmalloc(sizeof (*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC); | |
139 | if (!mcast) | |
140 | return NULL; | |
141 | ||
142 | memset(mcast, 0, sizeof (*mcast)); | |
143 | ||
144 | init_completion(&mcast->done); | |
145 | ||
146 | mcast->dev = dev; | |
147 | mcast->created = jiffies; | |
148 | mcast->backoff = HZ; | |
149 | mcast->logcount = 0; | |
150 | ||
151 | INIT_LIST_HEAD(&mcast->list); | |
152 | INIT_LIST_HEAD(&mcast->neigh_list); | |
153 | skb_queue_head_init(&mcast->pkt_queue); | |
154 | ||
155 | mcast->ah = NULL; | |
156 | mcast->query = NULL; | |
157 | ||
158 | return mcast; | |
159 | } | |
160 | ||
161 | static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, union ib_gid *mgid) | |
162 | { | |
163 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
164 | struct rb_node *n = priv->multicast_tree.rb_node; | |
165 | ||
166 | while (n) { | |
167 | struct ipoib_mcast *mcast; | |
168 | int ret; | |
169 | ||
170 | mcast = rb_entry(n, struct ipoib_mcast, rb_node); | |
171 | ||
172 | ret = memcmp(mgid->raw, mcast->mcmember.mgid.raw, | |
173 | sizeof (union ib_gid)); | |
174 | if (ret < 0) | |
175 | n = n->rb_left; | |
176 | else if (ret > 0) | |
177 | n = n->rb_right; | |
178 | else | |
179 | return mcast; | |
180 | } | |
181 | ||
182 | return NULL; | |
183 | } | |
184 | ||
185 | static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast) | |
186 | { | |
187 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
188 | struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL; | |
189 | ||
190 | while (*n) { | |
191 | struct ipoib_mcast *tmcast; | |
192 | int ret; | |
193 | ||
194 | pn = *n; | |
195 | tmcast = rb_entry(pn, struct ipoib_mcast, rb_node); | |
196 | ||
197 | ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw, | |
198 | sizeof (union ib_gid)); | |
199 | if (ret < 0) | |
200 | n = &pn->rb_left; | |
201 | else if (ret > 0) | |
202 | n = &pn->rb_right; | |
203 | else | |
204 | return -EEXIST; | |
205 | } | |
206 | ||
207 | rb_link_node(&mcast->rb_node, pn, n); | |
208 | rb_insert_color(&mcast->rb_node, &priv->multicast_tree); | |
209 | ||
210 | return 0; | |
211 | } | |
212 | ||
213 | static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, | |
214 | struct ib_sa_mcmember_rec *mcmember) | |
215 | { | |
216 | struct net_device *dev = mcast->dev; | |
217 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
218 | int ret; | |
219 | ||
220 | mcast->mcmember = *mcmember; | |
221 | ||
222 | /* Set the cached Q_Key before we attach if it's the broadcast group */ | |
223 | if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, | |
224 | sizeof (union ib_gid))) { | |
225 | priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); | |
226 | priv->tx_wr.wr.ud.remote_qkey = priv->qkey; | |
227 | } | |
228 | ||
229 | if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { | |
230 | if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { | |
231 | ipoib_warn(priv, "multicast group " IPOIB_GID_FMT | |
232 | " already attached\n", | |
233 | IPOIB_GID_ARG(mcast->mcmember.mgid)); | |
234 | ||
235 | return 0; | |
236 | } | |
237 | ||
238 | ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid), | |
239 | &mcast->mcmember.mgid); | |
240 | if (ret < 0) { | |
241 | ipoib_warn(priv, "couldn't attach QP to multicast group " | |
242 | IPOIB_GID_FMT "\n", | |
243 | IPOIB_GID_ARG(mcast->mcmember.mgid)); | |
244 | ||
245 | clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags); | |
246 | return ret; | |
247 | } | |
248 | } | |
249 | ||
250 | { | |
251 | struct ib_ah_attr av = { | |
252 | .dlid = be16_to_cpu(mcast->mcmember.mlid), | |
253 | .port_num = priv->port, | |
254 | .sl = mcast->mcmember.sl, | |
255 | .ah_flags = IB_AH_GRH, | |
256 | .grh = { | |
257 | .flow_label = be32_to_cpu(mcast->mcmember.flow_label), | |
258 | .hop_limit = mcast->mcmember.hop_limit, | |
259 | .sgid_index = 0, | |
260 | .traffic_class = mcast->mcmember.traffic_class | |
261 | } | |
262 | }; | |
e6ded99c | 263 | int path_rate = ib_sa_rate_enum_to_int(mcast->mcmember.rate); |
1da177e4 LT |
264 | |
265 | av.grh.dgid = mcast->mcmember.mgid; | |
266 | ||
e6ded99c RD |
267 | if (path_rate > 0 && priv->local_rate > path_rate) |
268 | av.static_rate = (priv->local_rate - 1) / path_rate; | |
1da177e4 LT |
269 | |
270 | ipoib_dbg_mcast(priv, "static_rate %d for local port %dX, mcmember %dX\n", | |
271 | av.static_rate, priv->local_rate, | |
272 | ib_sa_rate_enum_to_int(mcast->mcmember.rate)); | |
273 | ||
274 | mcast->ah = ipoib_create_ah(dev, priv->pd, &av); | |
275 | if (!mcast->ah) { | |
276 | ipoib_warn(priv, "ib_address_create failed\n"); | |
277 | } else { | |
278 | ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT | |
279 | " AV %p, LID 0x%04x, SL %d\n", | |
280 | IPOIB_GID_ARG(mcast->mcmember.mgid), | |
281 | mcast->ah->ah, | |
282 | be16_to_cpu(mcast->mcmember.mlid), | |
283 | mcast->mcmember.sl); | |
284 | } | |
285 | } | |
286 | ||
287 | /* actually send any queued packets */ | |
288 | while (!skb_queue_empty(&mcast->pkt_queue)) { | |
289 | struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); | |
290 | ||
291 | skb->dev = dev; | |
292 | ||
293 | if (!skb->dst || !skb->dst->neighbour) { | |
294 | /* put pseudoheader back on for next time */ | |
295 | skb_push(skb, sizeof (struct ipoib_pseudoheader)); | |
296 | } | |
297 | ||
298 | if (dev_queue_xmit(skb)) | |
299 | ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); | |
300 | } | |
301 | ||
302 | return 0; | |
303 | } | |
304 | ||
305 | static void | |
306 | ipoib_mcast_sendonly_join_complete(int status, | |
307 | struct ib_sa_mcmember_rec *mcmember, | |
308 | void *mcast_ptr) | |
309 | { | |
310 | struct ipoib_mcast *mcast = mcast_ptr; | |
311 | struct net_device *dev = mcast->dev; | |
312 | ||
313 | if (!status) | |
314 | ipoib_mcast_join_finish(mcast, mcmember); | |
315 | else { | |
316 | if (mcast->logcount++ < 20) | |
317 | ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for " | |
318 | IPOIB_GID_FMT ", status %d\n", | |
319 | IPOIB_GID_ARG(mcast->mcmember.mgid), status); | |
320 | ||
321 | /* Flush out any queued packets */ | |
322 | while (!skb_queue_empty(&mcast->pkt_queue)) { | |
323 | struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); | |
324 | ||
325 | skb->dev = dev; | |
326 | ||
327 | dev_kfree_skb_any(skb); | |
328 | } | |
329 | ||
330 | /* Clear the busy flag so we try again */ | |
331 | clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | |
332 | } | |
333 | ||
334 | complete(&mcast->done); | |
335 | } | |
336 | ||
337 | static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) | |
338 | { | |
339 | struct net_device *dev = mcast->dev; | |
340 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
341 | struct ib_sa_mcmember_rec rec = { | |
342 | #if 0 /* Some SMs don't support send-only yet */ | |
343 | .join_state = 4 | |
344 | #else | |
345 | .join_state = 1 | |
346 | #endif | |
347 | }; | |
348 | int ret = 0; | |
349 | ||
350 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { | |
351 | ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n"); | |
352 | return -ENODEV; | |
353 | } | |
354 | ||
355 | if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { | |
356 | ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n"); | |
357 | return -EBUSY; | |
358 | } | |
359 | ||
360 | rec.mgid = mcast->mcmember.mgid; | |
361 | rec.port_gid = priv->local_gid; | |
97f52eb4 | 362 | rec.pkey = cpu_to_be16(priv->pkey); |
1da177e4 LT |
363 | |
364 | ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, | |
365 | IB_SA_MCMEMBER_REC_MGID | | |
366 | IB_SA_MCMEMBER_REC_PORT_GID | | |
367 | IB_SA_MCMEMBER_REC_PKEY | | |
368 | IB_SA_MCMEMBER_REC_JOIN_STATE, | |
369 | 1000, GFP_ATOMIC, | |
370 | ipoib_mcast_sendonly_join_complete, | |
371 | mcast, &mcast->query); | |
372 | if (ret < 0) { | |
373 | ipoib_warn(priv, "ib_sa_mcmember_rec_set failed (ret = %d)\n", | |
374 | ret); | |
375 | } else { | |
376 | ipoib_dbg_mcast(priv, "no multicast record for " IPOIB_GID_FMT | |
377 | ", starting join\n", | |
378 | IPOIB_GID_ARG(mcast->mcmember.mgid)); | |
379 | ||
380 | mcast->query_id = ret; | |
381 | } | |
382 | ||
383 | return ret; | |
384 | } | |
385 | ||
386 | static void ipoib_mcast_join_complete(int status, | |
387 | struct ib_sa_mcmember_rec *mcmember, | |
388 | void *mcast_ptr) | |
389 | { | |
390 | struct ipoib_mcast *mcast = mcast_ptr; | |
391 | struct net_device *dev = mcast->dev; | |
392 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
393 | ||
394 | ipoib_dbg_mcast(priv, "join completion for " IPOIB_GID_FMT | |
395 | " (status %d)\n", | |
396 | IPOIB_GID_ARG(mcast->mcmember.mgid), status); | |
397 | ||
398 | if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) { | |
399 | mcast->backoff = HZ; | |
400 | down(&mcast_mutex); | |
401 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | |
402 | queue_work(ipoib_workqueue, &priv->mcast_task); | |
403 | up(&mcast_mutex); | |
404 | complete(&mcast->done); | |
405 | return; | |
406 | } | |
407 | ||
408 | if (status == -EINTR) { | |
409 | complete(&mcast->done); | |
410 | return; | |
411 | } | |
412 | ||
413 | if (status && mcast->logcount++ < 20) { | |
414 | if (status == -ETIMEDOUT || status == -EINTR) { | |
415 | ipoib_dbg_mcast(priv, "multicast join failed for " IPOIB_GID_FMT | |
416 | ", status %d\n", | |
417 | IPOIB_GID_ARG(mcast->mcmember.mgid), | |
418 | status); | |
419 | } else { | |
420 | ipoib_warn(priv, "multicast join failed for " | |
421 | IPOIB_GID_FMT ", status %d\n", | |
422 | IPOIB_GID_ARG(mcast->mcmember.mgid), | |
423 | status); | |
424 | } | |
425 | } | |
426 | ||
427 | mcast->backoff *= 2; | |
428 | if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) | |
429 | mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; | |
430 | ||
431 | mcast->query = NULL; | |
432 | ||
433 | down(&mcast_mutex); | |
434 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { | |
435 | if (status == -ETIMEDOUT) | |
436 | queue_work(ipoib_workqueue, &priv->mcast_task); | |
437 | else | |
438 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, | |
439 | mcast->backoff * HZ); | |
440 | } else | |
441 | complete(&mcast->done); | |
442 | up(&mcast_mutex); | |
443 | ||
444 | return; | |
445 | } | |
446 | ||
447 | static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, | |
448 | int create) | |
449 | { | |
450 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
451 | struct ib_sa_mcmember_rec rec = { | |
452 | .join_state = 1 | |
453 | }; | |
454 | ib_sa_comp_mask comp_mask; | |
455 | int ret = 0; | |
456 | ||
457 | ipoib_dbg_mcast(priv, "joining MGID " IPOIB_GID_FMT "\n", | |
458 | IPOIB_GID_ARG(mcast->mcmember.mgid)); | |
459 | ||
460 | rec.mgid = mcast->mcmember.mgid; | |
461 | rec.port_gid = priv->local_gid; | |
97f52eb4 | 462 | rec.pkey = cpu_to_be16(priv->pkey); |
1da177e4 LT |
463 | |
464 | comp_mask = | |
465 | IB_SA_MCMEMBER_REC_MGID | | |
466 | IB_SA_MCMEMBER_REC_PORT_GID | | |
467 | IB_SA_MCMEMBER_REC_PKEY | | |
468 | IB_SA_MCMEMBER_REC_JOIN_STATE; | |
469 | ||
470 | if (create) { | |
471 | comp_mask |= | |
472 | IB_SA_MCMEMBER_REC_QKEY | | |
473 | IB_SA_MCMEMBER_REC_SL | | |
474 | IB_SA_MCMEMBER_REC_FLOW_LABEL | | |
475 | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; | |
476 | ||
477 | rec.qkey = priv->broadcast->mcmember.qkey; | |
478 | rec.sl = priv->broadcast->mcmember.sl; | |
479 | rec.flow_label = priv->broadcast->mcmember.flow_label; | |
480 | rec.traffic_class = priv->broadcast->mcmember.traffic_class; | |
481 | } | |
482 | ||
483 | ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask, | |
484 | mcast->backoff * 1000, GFP_ATOMIC, | |
485 | ipoib_mcast_join_complete, | |
486 | mcast, &mcast->query); | |
487 | ||
488 | if (ret < 0) { | |
489 | ipoib_warn(priv, "ib_sa_mcmember_rec_set failed, status %d\n", ret); | |
490 | ||
491 | mcast->backoff *= 2; | |
492 | if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) | |
493 | mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; | |
494 | ||
495 | down(&mcast_mutex); | |
496 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | |
497 | queue_delayed_work(ipoib_workqueue, | |
498 | &priv->mcast_task, | |
499 | mcast->backoff); | |
500 | up(&mcast_mutex); | |
501 | } else | |
502 | mcast->query_id = ret; | |
503 | } | |
504 | ||
505 | void ipoib_mcast_join_task(void *dev_ptr) | |
506 | { | |
507 | struct net_device *dev = dev_ptr; | |
508 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
509 | ||
510 | if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) | |
511 | return; | |
512 | ||
513 | if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) | |
514 | ipoib_warn(priv, "ib_gid_entry_get() failed\n"); | |
515 | else | |
516 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); | |
517 | ||
518 | { | |
519 | struct ib_port_attr attr; | |
520 | ||
521 | if (!ib_query_port(priv->ca, priv->port, &attr)) { | |
522 | priv->local_lid = attr.lid; | |
523 | priv->local_rate = attr.active_speed * | |
524 | ib_width_enum_to_int(attr.active_width); | |
525 | } else | |
526 | ipoib_warn(priv, "ib_query_port failed\n"); | |
527 | } | |
528 | ||
529 | if (!priv->broadcast) { | |
530 | priv->broadcast = ipoib_mcast_alloc(dev, 1); | |
531 | if (!priv->broadcast) { | |
532 | ipoib_warn(priv, "failed to allocate broadcast group\n"); | |
533 | down(&mcast_mutex); | |
534 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | |
535 | queue_delayed_work(ipoib_workqueue, | |
536 | &priv->mcast_task, HZ); | |
537 | up(&mcast_mutex); | |
538 | return; | |
539 | } | |
540 | ||
541 | memcpy(priv->broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, | |
542 | sizeof (union ib_gid)); | |
543 | ||
544 | spin_lock_irq(&priv->lock); | |
545 | __ipoib_mcast_add(dev, priv->broadcast); | |
546 | spin_unlock_irq(&priv->lock); | |
547 | } | |
548 | ||
549 | if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { | |
550 | ipoib_mcast_join(dev, priv->broadcast, 0); | |
551 | return; | |
552 | } | |
553 | ||
554 | while (1) { | |
555 | struct ipoib_mcast *mcast = NULL; | |
556 | ||
557 | spin_lock_irq(&priv->lock); | |
558 | list_for_each_entry(mcast, &priv->multicast_list, list) { | |
559 | if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) | |
560 | && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) | |
561 | && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { | |
562 | /* Found the next unjoined group */ | |
563 | break; | |
564 | } | |
565 | } | |
566 | spin_unlock_irq(&priv->lock); | |
567 | ||
568 | if (&mcast->list == &priv->multicast_list) { | |
569 | /* All done */ | |
570 | break; | |
571 | } | |
572 | ||
573 | ipoib_mcast_join(dev, mcast, 1); | |
574 | return; | |
575 | } | |
576 | ||
577 | priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) - | |
578 | IPOIB_ENCAP_LEN; | |
579 | dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); | |
580 | ||
581 | ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); | |
582 | ||
583 | clear_bit(IPOIB_MCAST_RUN, &priv->flags); | |
584 | netif_carrier_on(dev); | |
585 | } | |
586 | ||
587 | int ipoib_mcast_start_thread(struct net_device *dev) | |
588 | { | |
589 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
590 | ||
591 | ipoib_dbg_mcast(priv, "starting multicast thread\n"); | |
592 | ||
593 | down(&mcast_mutex); | |
594 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) | |
595 | queue_work(ipoib_workqueue, &priv->mcast_task); | |
596 | up(&mcast_mutex); | |
597 | ||
598 | return 0; | |
599 | } | |
600 | ||
601 | int ipoib_mcast_stop_thread(struct net_device *dev) | |
602 | { | |
603 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
604 | struct ipoib_mcast *mcast; | |
605 | ||
606 | ipoib_dbg_mcast(priv, "stopping multicast thread\n"); | |
607 | ||
608 | down(&mcast_mutex); | |
609 | clear_bit(IPOIB_MCAST_RUN, &priv->flags); | |
610 | cancel_delayed_work(&priv->mcast_task); | |
611 | up(&mcast_mutex); | |
612 | ||
613 | flush_workqueue(ipoib_workqueue); | |
614 | ||
615 | if (priv->broadcast && priv->broadcast->query) { | |
616 | ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query); | |
617 | priv->broadcast->query = NULL; | |
618 | ipoib_dbg_mcast(priv, "waiting for bcast\n"); | |
619 | wait_for_completion(&priv->broadcast->done); | |
620 | } | |
621 | ||
622 | list_for_each_entry(mcast, &priv->multicast_list, list) { | |
623 | if (mcast->query) { | |
624 | ib_sa_cancel_query(mcast->query_id, mcast->query); | |
625 | mcast->query = NULL; | |
626 | ipoib_dbg_mcast(priv, "waiting for MGID " IPOIB_GID_FMT "\n", | |
627 | IPOIB_GID_ARG(mcast->mcmember.mgid)); | |
628 | wait_for_completion(&mcast->done); | |
629 | } | |
630 | } | |
631 | ||
632 | return 0; | |
633 | } | |
634 | ||
635 | static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) | |
636 | { | |
637 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
638 | struct ib_sa_mcmember_rec rec = { | |
639 | .join_state = 1 | |
640 | }; | |
641 | int ret = 0; | |
642 | ||
643 | if (!test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) | |
644 | return 0; | |
645 | ||
646 | ipoib_dbg_mcast(priv, "leaving MGID " IPOIB_GID_FMT "\n", | |
647 | IPOIB_GID_ARG(mcast->mcmember.mgid)); | |
648 | ||
649 | rec.mgid = mcast->mcmember.mgid; | |
650 | rec.port_gid = priv->local_gid; | |
97f52eb4 | 651 | rec.pkey = cpu_to_be16(priv->pkey); |
1da177e4 LT |
652 | |
653 | /* Remove ourselves from the multicast group */ | |
654 | ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid), | |
655 | &mcast->mcmember.mgid); | |
656 | if (ret) | |
657 | ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret); | |
658 | ||
659 | /* | |
660 | * Just make one shot at leaving and don't wait for a reply; | |
661 | * if we fail, too bad. | |
662 | */ | |
663 | ret = ib_sa_mcmember_rec_delete(priv->ca, priv->port, &rec, | |
664 | IB_SA_MCMEMBER_REC_MGID | | |
665 | IB_SA_MCMEMBER_REC_PORT_GID | | |
666 | IB_SA_MCMEMBER_REC_PKEY | | |
667 | IB_SA_MCMEMBER_REC_JOIN_STATE, | |
668 | 0, GFP_ATOMIC, NULL, | |
669 | mcast, &mcast->query); | |
670 | if (ret < 0) | |
671 | ipoib_warn(priv, "ib_sa_mcmember_rec_delete failed " | |
672 | "for leave (result = %d)\n", ret); | |
673 | ||
674 | return 0; | |
675 | } | |
676 | ||
677 | void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid, | |
678 | struct sk_buff *skb) | |
679 | { | |
680 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
681 | struct ipoib_mcast *mcast; | |
682 | ||
683 | /* | |
684 | * We can only be called from ipoib_start_xmit, so we're | |
685 | * inside tx_lock -- no need to save/restore flags. | |
686 | */ | |
687 | spin_lock(&priv->lock); | |
688 | ||
689 | mcast = __ipoib_mcast_find(dev, mgid); | |
690 | if (!mcast) { | |
691 | /* Let's create a new send only group now */ | |
692 | ipoib_dbg_mcast(priv, "setting up send only multicast group for " | |
693 | IPOIB_GID_FMT "\n", IPOIB_GID_ARG(*mgid)); | |
694 | ||
695 | mcast = ipoib_mcast_alloc(dev, 0); | |
696 | if (!mcast) { | |
697 | ipoib_warn(priv, "unable to allocate memory for " | |
698 | "multicast structure\n"); | |
699 | dev_kfree_skb_any(skb); | |
700 | goto out; | |
701 | } | |
702 | ||
703 | set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); | |
704 | mcast->mcmember.mgid = *mgid; | |
705 | __ipoib_mcast_add(dev, mcast); | |
706 | list_add_tail(&mcast->list, &priv->multicast_list); | |
707 | } | |
708 | ||
709 | if (!mcast->ah) { | |
710 | if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) | |
711 | skb_queue_tail(&mcast->pkt_queue, skb); | |
712 | else | |
713 | dev_kfree_skb_any(skb); | |
714 | ||
715 | if (mcast->query) | |
716 | ipoib_dbg_mcast(priv, "no address vector, " | |
717 | "but multicast join already started\n"); | |
718 | else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) | |
719 | ipoib_mcast_sendonly_join(mcast); | |
720 | ||
721 | /* | |
722 | * If lookup completes between here and out:, don't | |
723 | * want to send packet twice. | |
724 | */ | |
725 | mcast = NULL; | |
726 | } | |
727 | ||
728 | out: | |
729 | if (mcast && mcast->ah) { | |
730 | if (skb->dst && | |
731 | skb->dst->neighbour && | |
732 | !*to_ipoib_neigh(skb->dst->neighbour)) { | |
733 | struct ipoib_neigh *neigh = kmalloc(sizeof *neigh, GFP_ATOMIC); | |
734 | ||
735 | if (neigh) { | |
736 | kref_get(&mcast->ah->ref); | |
737 | neigh->ah = mcast->ah; | |
738 | neigh->neighbour = skb->dst->neighbour; | |
739 | *to_ipoib_neigh(skb->dst->neighbour) = neigh; | |
740 | list_add_tail(&neigh->list, &mcast->neigh_list); | |
741 | } | |
742 | } | |
743 | ||
744 | ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); | |
745 | } | |
746 | ||
747 | spin_unlock(&priv->lock); | |
748 | } | |
749 | ||
750 | void ipoib_mcast_dev_flush(struct net_device *dev) | |
751 | { | |
752 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
753 | LIST_HEAD(remove_list); | |
754 | struct ipoib_mcast *mcast, *tmcast, *nmcast; | |
755 | unsigned long flags; | |
756 | ||
757 | ipoib_dbg_mcast(priv, "flushing multicast list\n"); | |
758 | ||
759 | spin_lock_irqsave(&priv->lock, flags); | |
760 | list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { | |
761 | nmcast = ipoib_mcast_alloc(dev, 0); | |
762 | if (nmcast) { | |
763 | nmcast->flags = | |
764 | mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY); | |
765 | ||
766 | nmcast->mcmember.mgid = mcast->mcmember.mgid; | |
767 | ||
768 | /* Add the new group in before the to-be-destroyed group */ | |
769 | list_add_tail(&nmcast->list, &mcast->list); | |
770 | list_del_init(&mcast->list); | |
771 | ||
772 | rb_replace_node(&mcast->rb_node, &nmcast->rb_node, | |
773 | &priv->multicast_tree); | |
774 | ||
775 | list_add_tail(&mcast->list, &remove_list); | |
776 | } else { | |
777 | ipoib_warn(priv, "could not reallocate multicast group " | |
778 | IPOIB_GID_FMT "\n", | |
779 | IPOIB_GID_ARG(mcast->mcmember.mgid)); | |
780 | } | |
781 | } | |
782 | ||
783 | if (priv->broadcast) { | |
784 | nmcast = ipoib_mcast_alloc(dev, 0); | |
785 | if (nmcast) { | |
786 | nmcast->mcmember.mgid = priv->broadcast->mcmember.mgid; | |
787 | ||
788 | rb_replace_node(&priv->broadcast->rb_node, | |
789 | &nmcast->rb_node, | |
790 | &priv->multicast_tree); | |
791 | ||
792 | list_add_tail(&priv->broadcast->list, &remove_list); | |
793 | } | |
794 | ||
795 | priv->broadcast = nmcast; | |
796 | } | |
797 | ||
798 | spin_unlock_irqrestore(&priv->lock, flags); | |
799 | ||
800 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { | |
801 | ipoib_mcast_leave(dev, mcast); | |
802 | ipoib_mcast_free(mcast); | |
803 | } | |
804 | } | |
805 | ||
806 | void ipoib_mcast_dev_down(struct net_device *dev) | |
807 | { | |
808 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
809 | unsigned long flags; | |
810 | ||
811 | /* Delete broadcast since it will be recreated */ | |
812 | if (priv->broadcast) { | |
813 | ipoib_dbg_mcast(priv, "deleting broadcast group\n"); | |
814 | ||
815 | spin_lock_irqsave(&priv->lock, flags); | |
816 | rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); | |
817 | spin_unlock_irqrestore(&priv->lock, flags); | |
818 | ipoib_mcast_leave(dev, priv->broadcast); | |
819 | ipoib_mcast_free(priv->broadcast); | |
820 | priv->broadcast = NULL; | |
821 | } | |
822 | } | |
823 | ||
824 | void ipoib_mcast_restart_task(void *dev_ptr) | |
825 | { | |
826 | struct net_device *dev = dev_ptr; | |
827 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
828 | struct dev_mc_list *mclist; | |
829 | struct ipoib_mcast *mcast, *tmcast; | |
830 | LIST_HEAD(remove_list); | |
831 | unsigned long flags; | |
832 | ||
833 | ipoib_dbg_mcast(priv, "restarting multicast task\n"); | |
834 | ||
835 | ipoib_mcast_stop_thread(dev); | |
836 | ||
837 | spin_lock_irqsave(&priv->lock, flags); | |
838 | ||
839 | /* | |
840 | * Unfortunately, the networking core only gives us a list of all of | |
841 | * the multicast hardware addresses. We need to figure out which ones | |
842 | * are new and which ones have been removed | |
843 | */ | |
844 | ||
845 | /* Clear out the found flag */ | |
846 | list_for_each_entry(mcast, &priv->multicast_list, list) | |
847 | clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); | |
848 | ||
849 | /* Mark all of the entries that are found or don't exist */ | |
850 | for (mclist = dev->mc_list; mclist; mclist = mclist->next) { | |
851 | union ib_gid mgid; | |
852 | ||
853 | memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); | |
854 | ||
855 | /* Add in the P_Key */ | |
856 | mgid.raw[4] = (priv->pkey >> 8) & 0xff; | |
857 | mgid.raw[5] = priv->pkey & 0xff; | |
858 | ||
859 | mcast = __ipoib_mcast_find(dev, &mgid); | |
860 | if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { | |
861 | struct ipoib_mcast *nmcast; | |
862 | ||
863 | /* Not found or send-only group, let's add a new entry */ | |
864 | ipoib_dbg_mcast(priv, "adding multicast entry for mgid " | |
865 | IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mgid)); | |
866 | ||
867 | nmcast = ipoib_mcast_alloc(dev, 0); | |
868 | if (!nmcast) { | |
869 | ipoib_warn(priv, "unable to allocate memory for multicast structure\n"); | |
870 | continue; | |
871 | } | |
872 | ||
873 | set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags); | |
874 | ||
875 | nmcast->mcmember.mgid = mgid; | |
876 | ||
877 | if (mcast) { | |
878 | /* Destroy the send only entry */ | |
879 | list_del(&mcast->list); | |
880 | list_add_tail(&mcast->list, &remove_list); | |
881 | ||
882 | rb_replace_node(&mcast->rb_node, | |
883 | &nmcast->rb_node, | |
884 | &priv->multicast_tree); | |
885 | } else | |
886 | __ipoib_mcast_add(dev, nmcast); | |
887 | ||
888 | list_add_tail(&nmcast->list, &priv->multicast_list); | |
889 | } | |
890 | ||
891 | if (mcast) | |
892 | set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); | |
893 | } | |
894 | ||
895 | /* Remove all of the entries don't exist anymore */ | |
896 | list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { | |
897 | if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && | |
898 | !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { | |
899 | ipoib_dbg_mcast(priv, "deleting multicast group " IPOIB_GID_FMT "\n", | |
900 | IPOIB_GID_ARG(mcast->mcmember.mgid)); | |
901 | ||
902 | rb_erase(&mcast->rb_node, &priv->multicast_tree); | |
903 | ||
904 | /* Move to the remove list */ | |
905 | list_del(&mcast->list); | |
906 | list_add_tail(&mcast->list, &remove_list); | |
907 | } | |
908 | } | |
909 | spin_unlock_irqrestore(&priv->lock, flags); | |
910 | ||
911 | /* We have to cancel outside of the spinlock */ | |
912 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { | |
913 | ipoib_mcast_leave(mcast->dev, mcast); | |
914 | ipoib_mcast_free(mcast); | |
915 | } | |
916 | ||
917 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) | |
918 | ipoib_mcast_start_thread(dev); | |
919 | } | |
920 | ||
921 | struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev) | |
922 | { | |
923 | struct ipoib_mcast_iter *iter; | |
924 | ||
925 | iter = kmalloc(sizeof *iter, GFP_KERNEL); | |
926 | if (!iter) | |
927 | return NULL; | |
928 | ||
929 | iter->dev = dev; | |
930 | memset(iter->mgid.raw, 0, sizeof iter->mgid); | |
931 | ||
932 | if (ipoib_mcast_iter_next(iter)) { | |
933 | ipoib_mcast_iter_free(iter); | |
934 | return NULL; | |
935 | } | |
936 | ||
937 | return iter; | |
938 | } | |
939 | ||
940 | void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter) | |
941 | { | |
942 | kfree(iter); | |
943 | } | |
944 | ||
945 | int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) | |
946 | { | |
947 | struct ipoib_dev_priv *priv = netdev_priv(iter->dev); | |
948 | struct rb_node *n; | |
949 | struct ipoib_mcast *mcast; | |
950 | int ret = 1; | |
951 | ||
952 | spin_lock_irq(&priv->lock); | |
953 | ||
954 | n = rb_first(&priv->multicast_tree); | |
955 | ||
956 | while (n) { | |
957 | mcast = rb_entry(n, struct ipoib_mcast, rb_node); | |
958 | ||
959 | if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw, | |
960 | sizeof (union ib_gid)) < 0) { | |
961 | iter->mgid = mcast->mcmember.mgid; | |
962 | iter->created = mcast->created; | |
963 | iter->queuelen = skb_queue_len(&mcast->pkt_queue); | |
964 | iter->complete = !!mcast->ah; | |
965 | iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); | |
966 | ||
967 | ret = 0; | |
968 | ||
969 | break; | |
970 | } | |
971 | ||
972 | n = rb_next(n); | |
973 | } | |
974 | ||
975 | spin_unlock_irq(&priv->lock); | |
976 | ||
977 | return ret; | |
978 | } | |
979 | ||
980 | void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, | |
981 | union ib_gid *mgid, | |
982 | unsigned long *created, | |
983 | unsigned int *queuelen, | |
984 | unsigned int *complete, | |
985 | unsigned int *send_only) | |
986 | { | |
987 | *mgid = iter->mgid; | |
988 | *created = iter->created; | |
989 | *queuelen = iter->queuelen; | |
990 | *complete = iter->complete; | |
991 | *send_only = iter->send_only; | |
992 | } |