Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | |
51a379d0 | 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. |
225c7b1f RD |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
225c7b1f | 34 | #include <linux/string.h> |
0345584e | 35 | #include <linux/etherdevice.h> |
225c7b1f RD |
36 | |
37 | #include <linux/mlx4/cmd.h> | |
ee40fa06 | 38 | #include <linux/export.h> |
225c7b1f RD |
39 | |
40 | #include "mlx4.h" | |
41 | ||
521e575b RL |
42 | #define MGM_QPN_MASK 0x00FFFFFF |
43 | #define MGM_BLCK_LB_BIT 30 | |
0ff1fb65 | 44 | #define MLX4_MAC_MASK 0xffffffffffffULL |
521e575b | 45 | |
225c7b1f RD |
46 | static const u8 zero_gid[16]; /* automatically initialized to 0 */ |
47 | ||
0ec2c0f8 EE |
48 | struct mlx4_mgm { |
49 | __be32 next_gid_index; | |
50 | __be32 members_count; | |
51 | u32 reserved[2]; | |
52 | u8 gid[16]; | |
53 | __be32 qp[MLX4_MAX_QP_PER_MGM]; | |
54 | }; | |
55 | ||
56 | int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) | |
57 | { | |
0ff1fb65 HHZ |
58 | if (dev->caps.steering_mode == |
59 | MLX4_STEERING_MODE_DEVICE_MANAGED) | |
60 | return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE; | |
61 | else | |
62 | return min((1 << mlx4_log_num_mgm_entry_size), | |
63 | MLX4_MAX_MGM_ENTRY_SIZE); | |
0ec2c0f8 EE |
64 | } |
65 | ||
66 | int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) | |
67 | { | |
68 | return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); | |
69 | } | |
70 | ||
8fcfb4db HHZ |
71 | static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, |
72 | struct mlx4_cmd_mailbox *mailbox, | |
73 | u32 size, | |
74 | u64 *reg_id) | |
75 | { | |
76 | u64 imm; | |
77 | int err = 0; | |
78 | ||
79 | err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, | |
80 | MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, | |
81 | MLX4_CMD_NATIVE); | |
82 | if (err) | |
83 | return err; | |
84 | *reg_id = imm; | |
85 | ||
86 | return err; | |
87 | } | |
88 | ||
89 | static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) | |
90 | { | |
91 | int err = 0; | |
92 | ||
93 | err = mlx4_cmd(dev, regid, 0, 0, | |
94 | MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, | |
95 | MLX4_CMD_NATIVE); | |
96 | ||
97 | return err; | |
98 | } | |
99 | ||
0345584e YP |
100 | static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, |
101 | struct mlx4_cmd_mailbox *mailbox) | |
225c7b1f RD |
102 | { |
103 | return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, | |
f9baff50 | 104 | MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); |
225c7b1f RD |
105 | } |
106 | ||
0345584e YP |
107 | static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, |
108 | struct mlx4_cmd_mailbox *mailbox) | |
225c7b1f RD |
109 | { |
110 | return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, | |
f9baff50 | 111 | MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); |
225c7b1f RD |
112 | } |
113 | ||
0ec2c0f8 | 114 | static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, |
b12d93d6 YP |
115 | struct mlx4_cmd_mailbox *mailbox) |
116 | { | |
117 | u32 in_mod; | |
118 | ||
0ec2c0f8 | 119 | in_mod = (u32) port << 16 | steer << 1; |
b12d93d6 | 120 | return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, |
f9baff50 JM |
121 | MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, |
122 | MLX4_CMD_NATIVE); | |
b12d93d6 YP |
123 | } |
124 | ||
0345584e YP |
125 | static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, |
126 | u16 *hash, u8 op_mod) | |
225c7b1f RD |
127 | { |
128 | u64 imm; | |
129 | int err; | |
130 | ||
0345584e | 131 | err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, |
f9baff50 JM |
132 | MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, |
133 | MLX4_CMD_NATIVE); | |
225c7b1f RD |
134 | |
135 | if (!err) | |
136 | *hash = imm; | |
137 | ||
138 | return err; | |
139 | } | |
140 | ||
b12d93d6 YP |
141 | static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num, |
142 | enum mlx4_steer_type steer, | |
143 | u32 qpn) | |
144 | { | |
145 | struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num]; | |
146 | struct mlx4_promisc_qp *pqp; | |
147 | ||
148 | list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { | |
149 | if (pqp->qpn == qpn) | |
150 | return pqp; | |
151 | } | |
152 | /* not found */ | |
153 | return NULL; | |
154 | } | |
155 | ||
156 | /* | |
157 | * Add new entry to steering data structure. | |
158 | * All promisc QPs should be added as well | |
159 | */ | |
0ec2c0f8 | 160 | static int new_steering_entry(struct mlx4_dev *dev, u8 port, |
b12d93d6 YP |
161 | enum mlx4_steer_type steer, |
162 | unsigned int index, u32 qpn) | |
163 | { | |
164 | struct mlx4_steer *s_steer; | |
165 | struct mlx4_cmd_mailbox *mailbox; | |
166 | struct mlx4_mgm *mgm; | |
167 | u32 members_count; | |
168 | struct mlx4_steer_index *new_entry; | |
169 | struct mlx4_promisc_qp *pqp; | |
a14b289d | 170 | struct mlx4_promisc_qp *dqp = NULL; |
b12d93d6 YP |
171 | u32 prot; |
172 | int err; | |
b12d93d6 | 173 | |
4c41b367 | 174 | s_steer = &mlx4_priv(dev)->steer[port - 1]; |
b12d93d6 YP |
175 | new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); |
176 | if (!new_entry) | |
177 | return -ENOMEM; | |
178 | ||
179 | INIT_LIST_HEAD(&new_entry->duplicates); | |
180 | new_entry->index = index; | |
181 | list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); | |
182 | ||
183 | /* If the given qpn is also a promisc qp, | |
184 | * it should be inserted to duplicates list | |
185 | */ | |
0ec2c0f8 | 186 | pqp = get_promisc_qp(dev, 0, steer, qpn); |
b12d93d6 YP |
187 | if (pqp) { |
188 | dqp = kmalloc(sizeof *dqp, GFP_KERNEL); | |
189 | if (!dqp) { | |
190 | err = -ENOMEM; | |
191 | goto out_alloc; | |
192 | } | |
193 | dqp->qpn = qpn; | |
194 | list_add_tail(&dqp->list, &new_entry->duplicates); | |
195 | } | |
196 | ||
197 | /* if no promisc qps for this vep, we are done */ | |
198 | if (list_empty(&s_steer->promisc_qps[steer])) | |
199 | return 0; | |
200 | ||
201 | /* now need to add all the promisc qps to the new | |
202 | * steering entry, as they should also receive the packets | |
203 | * destined to this address */ | |
204 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
205 | if (IS_ERR(mailbox)) { | |
206 | err = -ENOMEM; | |
207 | goto out_alloc; | |
208 | } | |
209 | mgm = mailbox->buf; | |
210 | ||
211 | err = mlx4_READ_ENTRY(dev, index, mailbox); | |
212 | if (err) | |
213 | goto out_mailbox; | |
214 | ||
215 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | |
216 | prot = be32_to_cpu(mgm->members_count) >> 30; | |
217 | list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { | |
218 | /* don't add already existing qpn */ | |
219 | if (pqp->qpn == qpn) | |
220 | continue; | |
0ec2c0f8 | 221 | if (members_count == dev->caps.num_qp_per_mgm) { |
b12d93d6 YP |
222 | /* out of space */ |
223 | err = -ENOMEM; | |
224 | goto out_mailbox; | |
225 | } | |
226 | ||
227 | /* add the qpn */ | |
228 | mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); | |
229 | } | |
230 | /* update the qps count and update the entry with all the promisc qps*/ | |
231 | mgm->members_count = cpu_to_be32(members_count | (prot << 30)); | |
232 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); | |
233 | ||
234 | out_mailbox: | |
235 | mlx4_free_cmd_mailbox(dev, mailbox); | |
236 | if (!err) | |
237 | return 0; | |
238 | out_alloc: | |
239 | if (dqp) { | |
240 | list_del(&dqp->list); | |
a14b289d | 241 | kfree(dqp); |
b12d93d6 YP |
242 | } |
243 | list_del(&new_entry->list); | |
244 | kfree(new_entry); | |
245 | return err; | |
246 | } | |
247 | ||
248 | /* update the data structures with existing steering entry */ | |
0ec2c0f8 | 249 | static int existing_steering_entry(struct mlx4_dev *dev, u8 port, |
b12d93d6 YP |
250 | enum mlx4_steer_type steer, |
251 | unsigned int index, u32 qpn) | |
252 | { | |
253 | struct mlx4_steer *s_steer; | |
254 | struct mlx4_steer_index *tmp_entry, *entry = NULL; | |
255 | struct mlx4_promisc_qp *pqp; | |
256 | struct mlx4_promisc_qp *dqp; | |
b12d93d6 | 257 | |
4c41b367 | 258 | s_steer = &mlx4_priv(dev)->steer[port - 1]; |
b12d93d6 | 259 | |
0ec2c0f8 | 260 | pqp = get_promisc_qp(dev, 0, steer, qpn); |
b12d93d6 YP |
261 | if (!pqp) |
262 | return 0; /* nothing to do */ | |
263 | ||
264 | list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { | |
265 | if (tmp_entry->index == index) { | |
266 | entry = tmp_entry; | |
267 | break; | |
268 | } | |
269 | } | |
270 | if (unlikely(!entry)) { | |
271 | mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); | |
272 | return -EINVAL; | |
273 | } | |
274 | ||
275 | /* the given qpn is listed as a promisc qpn | |
276 | * we need to add it as a duplicate to this entry | |
25985edc | 277 | * for future references */ |
b12d93d6 | 278 | list_for_each_entry(dqp, &entry->duplicates, list) { |
0ec2c0f8 | 279 | if (qpn == pqp->qpn) |
b12d93d6 YP |
280 | return 0; /* qp is already duplicated */ |
281 | } | |
282 | ||
283 | /* add the qp as a duplicate on this index */ | |
284 | dqp = kmalloc(sizeof *dqp, GFP_KERNEL); | |
285 | if (!dqp) | |
286 | return -ENOMEM; | |
287 | dqp->qpn = qpn; | |
288 | list_add_tail(&dqp->list, &entry->duplicates); | |
289 | ||
290 | return 0; | |
291 | } | |
292 | ||
293 | /* Check whether a qpn is a duplicate on steering entry | |
294 | * If so, it should not be removed from mgm */ | |
0ec2c0f8 | 295 | static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, |
b12d93d6 YP |
296 | enum mlx4_steer_type steer, |
297 | unsigned int index, u32 qpn) | |
298 | { | |
299 | struct mlx4_steer *s_steer; | |
300 | struct mlx4_steer_index *tmp_entry, *entry = NULL; | |
301 | struct mlx4_promisc_qp *dqp, *tmp_dqp; | |
b12d93d6 | 302 | |
4c41b367 | 303 | s_steer = &mlx4_priv(dev)->steer[port - 1]; |
b12d93d6 YP |
304 | |
305 | /* if qp is not promisc, it cannot be duplicated */ | |
0ec2c0f8 | 306 | if (!get_promisc_qp(dev, 0, steer, qpn)) |
b12d93d6 YP |
307 | return false; |
308 | ||
309 | /* The qp is promisc qp so it is a duplicate on this index | |
310 | * Find the index entry, and remove the duplicate */ | |
311 | list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { | |
312 | if (tmp_entry->index == index) { | |
313 | entry = tmp_entry; | |
314 | break; | |
315 | } | |
316 | } | |
317 | if (unlikely(!entry)) { | |
318 | mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); | |
319 | return false; | |
320 | } | |
321 | list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { | |
322 | if (dqp->qpn == qpn) { | |
323 | list_del(&dqp->list); | |
324 | kfree(dqp); | |
325 | } | |
326 | } | |
327 | return true; | |
328 | } | |
329 | ||
330 | /* I a steering entry contains only promisc QPs, it can be removed. */ | |
0ec2c0f8 | 331 | static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, |
b12d93d6 YP |
332 | enum mlx4_steer_type steer, |
333 | unsigned int index, u32 tqpn) | |
334 | { | |
335 | struct mlx4_steer *s_steer; | |
336 | struct mlx4_cmd_mailbox *mailbox; | |
337 | struct mlx4_mgm *mgm; | |
338 | struct mlx4_steer_index *entry = NULL, *tmp_entry; | |
339 | u32 qpn; | |
340 | u32 members_count; | |
341 | bool ret = false; | |
342 | int i; | |
b12d93d6 | 343 | |
4c41b367 | 344 | s_steer = &mlx4_priv(dev)->steer[port - 1]; |
b12d93d6 YP |
345 | |
346 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
347 | if (IS_ERR(mailbox)) | |
348 | return false; | |
349 | mgm = mailbox->buf; | |
350 | ||
351 | if (mlx4_READ_ENTRY(dev, index, mailbox)) | |
352 | goto out; | |
353 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | |
354 | for (i = 0; i < members_count; i++) { | |
355 | qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; | |
0ec2c0f8 | 356 | if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) { |
b12d93d6 YP |
357 | /* the qp is not promisc, the entry can't be removed */ |
358 | goto out; | |
359 | } | |
360 | } | |
361 | /* All the qps currently registered for this entry are promiscuous, | |
362 | * Checking for duplicates */ | |
363 | ret = true; | |
364 | list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { | |
365 | if (entry->index == index) { | |
366 | if (list_empty(&entry->duplicates)) { | |
367 | list_del(&entry->list); | |
368 | kfree(entry); | |
369 | } else { | |
370 | /* This entry contains duplicates so it shouldn't be removed */ | |
371 | ret = false; | |
372 | goto out; | |
373 | } | |
374 | } | |
375 | } | |
376 | ||
377 | out: | |
378 | mlx4_free_cmd_mailbox(dev, mailbox); | |
379 | return ret; | |
380 | } | |
381 | ||
0ec2c0f8 | 382 | static int add_promisc_qp(struct mlx4_dev *dev, u8 port, |
b12d93d6 YP |
383 | enum mlx4_steer_type steer, u32 qpn) |
384 | { | |
385 | struct mlx4_steer *s_steer; | |
386 | struct mlx4_cmd_mailbox *mailbox; | |
387 | struct mlx4_mgm *mgm; | |
388 | struct mlx4_steer_index *entry; | |
389 | struct mlx4_promisc_qp *pqp; | |
390 | struct mlx4_promisc_qp *dqp; | |
391 | u32 members_count; | |
392 | u32 prot; | |
393 | int i; | |
394 | bool found; | |
b12d93d6 | 395 | int err; |
b12d93d6 | 396 | struct mlx4_priv *priv = mlx4_priv(dev); |
0ec2c0f8 | 397 | |
4c41b367 | 398 | s_steer = &mlx4_priv(dev)->steer[port - 1]; |
b12d93d6 YP |
399 | |
400 | mutex_lock(&priv->mcg_table.mutex); | |
401 | ||
0ec2c0f8 | 402 | if (get_promisc_qp(dev, 0, steer, qpn)) { |
b12d93d6 YP |
403 | err = 0; /* Noting to do, already exists */ |
404 | goto out_mutex; | |
405 | } | |
406 | ||
407 | pqp = kmalloc(sizeof *pqp, GFP_KERNEL); | |
408 | if (!pqp) { | |
409 | err = -ENOMEM; | |
410 | goto out_mutex; | |
411 | } | |
412 | pqp->qpn = qpn; | |
413 | ||
414 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
415 | if (IS_ERR(mailbox)) { | |
416 | err = -ENOMEM; | |
417 | goto out_alloc; | |
418 | } | |
419 | mgm = mailbox->buf; | |
420 | ||
421 | /* the promisc qp needs to be added for each one of the steering | |
422 | * entries, if it already exists, needs to be added as a duplicate | |
423 | * for this entry */ | |
424 | list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { | |
425 | err = mlx4_READ_ENTRY(dev, entry->index, mailbox); | |
426 | if (err) | |
427 | goto out_mailbox; | |
428 | ||
429 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | |
430 | prot = be32_to_cpu(mgm->members_count) >> 30; | |
431 | found = false; | |
432 | for (i = 0; i < members_count; i++) { | |
433 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { | |
434 | /* Entry already exists, add to duplicates */ | |
435 | dqp = kmalloc(sizeof *dqp, GFP_KERNEL); | |
436 | if (!dqp) | |
437 | goto out_mailbox; | |
438 | dqp->qpn = qpn; | |
439 | list_add_tail(&dqp->list, &entry->duplicates); | |
440 | found = true; | |
441 | } | |
442 | } | |
443 | if (!found) { | |
444 | /* Need to add the qpn to mgm */ | |
0ec2c0f8 | 445 | if (members_count == dev->caps.num_qp_per_mgm) { |
b12d93d6 YP |
446 | /* entry is full */ |
447 | err = -ENOMEM; | |
448 | goto out_mailbox; | |
449 | } | |
450 | mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); | |
451 | mgm->members_count = cpu_to_be32(members_count | (prot << 30)); | |
452 | err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); | |
453 | if (err) | |
454 | goto out_mailbox; | |
455 | } | |
b12d93d6 YP |
456 | } |
457 | ||
458 | /* add the new qpn to list of promisc qps */ | |
459 | list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); | |
460 | /* now need to add all the promisc qps to default entry */ | |
461 | memset(mgm, 0, sizeof *mgm); | |
462 | members_count = 0; | |
463 | list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) | |
464 | mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); | |
465 | mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); | |
466 | ||
0ec2c0f8 | 467 | err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); |
b12d93d6 YP |
468 | if (err) |
469 | goto out_list; | |
470 | ||
471 | mlx4_free_cmd_mailbox(dev, mailbox); | |
472 | mutex_unlock(&priv->mcg_table.mutex); | |
473 | return 0; | |
474 | ||
475 | out_list: | |
476 | list_del(&pqp->list); | |
477 | out_mailbox: | |
478 | mlx4_free_cmd_mailbox(dev, mailbox); | |
479 | out_alloc: | |
480 | kfree(pqp); | |
481 | out_mutex: | |
482 | mutex_unlock(&priv->mcg_table.mutex); | |
483 | return err; | |
484 | } | |
485 | ||
0ec2c0f8 | 486 | static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, |
b12d93d6 YP |
487 | enum mlx4_steer_type steer, u32 qpn) |
488 | { | |
489 | struct mlx4_priv *priv = mlx4_priv(dev); | |
490 | struct mlx4_steer *s_steer; | |
491 | struct mlx4_cmd_mailbox *mailbox; | |
492 | struct mlx4_mgm *mgm; | |
493 | struct mlx4_steer_index *entry; | |
494 | struct mlx4_promisc_qp *pqp; | |
495 | struct mlx4_promisc_qp *dqp; | |
496 | u32 members_count; | |
497 | bool found; | |
498 | bool back_to_list = false; | |
499 | int loc, i; | |
500 | int err; | |
b12d93d6 | 501 | |
4c41b367 | 502 | s_steer = &mlx4_priv(dev)->steer[port - 1]; |
b12d93d6 YP |
503 | mutex_lock(&priv->mcg_table.mutex); |
504 | ||
0ec2c0f8 | 505 | pqp = get_promisc_qp(dev, 0, steer, qpn); |
b12d93d6 YP |
506 | if (unlikely(!pqp)) { |
507 | mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); | |
508 | /* nothing to do */ | |
509 | err = 0; | |
510 | goto out_mutex; | |
511 | } | |
512 | ||
513 | /*remove from list of promisc qps */ | |
514 | list_del(&pqp->list); | |
b12d93d6 YP |
515 | |
516 | /* set the default entry not to include the removed one */ | |
517 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
518 | if (IS_ERR(mailbox)) { | |
519 | err = -ENOMEM; | |
520 | back_to_list = true; | |
521 | goto out_list; | |
522 | } | |
523 | mgm = mailbox->buf; | |
0ec2c0f8 | 524 | memset(mgm, 0, sizeof *mgm); |
b12d93d6 YP |
525 | members_count = 0; |
526 | list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) | |
527 | mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); | |
528 | mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); | |
529 | ||
0ec2c0f8 | 530 | err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); |
b12d93d6 YP |
531 | if (err) |
532 | goto out_mailbox; | |
533 | ||
534 | /* remove the qp from all the steering entries*/ | |
535 | list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { | |
536 | found = false; | |
537 | list_for_each_entry(dqp, &entry->duplicates, list) { | |
538 | if (dqp->qpn == qpn) { | |
539 | found = true; | |
540 | break; | |
541 | } | |
542 | } | |
543 | if (found) { | |
544 | /* a duplicate, no need to change the mgm, | |
545 | * only update the duplicates list */ | |
546 | list_del(&dqp->list); | |
547 | kfree(dqp); | |
548 | } else { | |
549 | err = mlx4_READ_ENTRY(dev, entry->index, mailbox); | |
550 | if (err) | |
551 | goto out_mailbox; | |
552 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | |
553 | for (loc = -1, i = 0; i < members_count; ++i) | |
554 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) | |
555 | loc = i; | |
556 | ||
557 | mgm->members_count = cpu_to_be32(--members_count | | |
558 | (MLX4_PROT_ETH << 30)); | |
559 | mgm->qp[loc] = mgm->qp[i - 1]; | |
560 | mgm->qp[i - 1] = 0; | |
561 | ||
562 | err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); | |
563 | if (err) | |
564 | goto out_mailbox; | |
565 | } | |
566 | ||
567 | } | |
568 | ||
569 | out_mailbox: | |
570 | mlx4_free_cmd_mailbox(dev, mailbox); | |
571 | out_list: | |
572 | if (back_to_list) | |
573 | list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); | |
53020092 YP |
574 | else |
575 | kfree(pqp); | |
b12d93d6 YP |
576 | out_mutex: |
577 | mutex_unlock(&priv->mcg_table.mutex); | |
578 | return err; | |
579 | } | |
580 | ||
225c7b1f RD |
581 | /* |
582 | * Caller must hold MCG table semaphore. gid and mgm parameters must | |
583 | * be properly aligned for command interface. | |
584 | * | |
585 | * Returns 0 unless a firmware command error occurs. | |
586 | * | |
587 | * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 | |
588 | * and *mgm holds MGM entry. | |
589 | * | |
590 | * if GID is found in AMGM, *index = index in AMGM, *prev = index of | |
591 | * previous entry in hash chain and *mgm holds AMGM entry. | |
592 | * | |
593 | * If no AMGM exists for given gid, *index = -1, *prev = index of last | |
594 | * entry in hash chain and *mgm holds end of hash chain. | |
595 | */ | |
0345584e YP |
596 | static int find_entry(struct mlx4_dev *dev, u8 port, |
597 | u8 *gid, enum mlx4_protocol prot, | |
0345584e | 598 | struct mlx4_cmd_mailbox *mgm_mailbox, |
deb8b3e8 | 599 | int *prev, int *index) |
225c7b1f RD |
600 | { |
601 | struct mlx4_cmd_mailbox *mailbox; | |
602 | struct mlx4_mgm *mgm = mgm_mailbox->buf; | |
603 | u8 *mgid; | |
604 | int err; | |
deb8b3e8 | 605 | u16 hash; |
ccf86321 OG |
606 | u8 op_mod = (prot == MLX4_PROT_ETH) ? |
607 | !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; | |
225c7b1f RD |
608 | |
609 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
610 | if (IS_ERR(mailbox)) | |
611 | return -ENOMEM; | |
612 | mgid = mailbox->buf; | |
613 | ||
614 | memcpy(mgid, gid, 16); | |
615 | ||
deb8b3e8 | 616 | err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); |
225c7b1f RD |
617 | mlx4_free_cmd_mailbox(dev, mailbox); |
618 | if (err) | |
619 | return err; | |
620 | ||
621 | if (0) | |
deb8b3e8 | 622 | mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash); |
225c7b1f | 623 | |
deb8b3e8 | 624 | *index = hash; |
225c7b1f RD |
625 | *prev = -1; |
626 | ||
627 | do { | |
0345584e | 628 | err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); |
225c7b1f RD |
629 | if (err) |
630 | return err; | |
631 | ||
0345584e | 632 | if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { |
deb8b3e8 | 633 | if (*index != hash) { |
225c7b1f RD |
634 | mlx4_err(dev, "Found zero MGID in AMGM.\n"); |
635 | err = -EINVAL; | |
636 | } | |
637 | return err; | |
638 | } | |
639 | ||
da995a8a | 640 | if (!memcmp(mgm->gid, gid, 16) && |
0345584e | 641 | be32_to_cpu(mgm->members_count) >> 30 == prot) |
225c7b1f RD |
642 | return err; |
643 | ||
644 | *prev = *index; | |
645 | *index = be32_to_cpu(mgm->next_gid_index) >> 6; | |
646 | } while (*index); | |
647 | ||
648 | *index = -1; | |
649 | return err; | |
650 | } | |
651 | ||
0ff1fb65 HHZ |
652 | struct mlx4_net_trans_rule_hw_ctrl { |
653 | __be32 ctrl; | |
654 | __be32 vf_vep_port; | |
655 | __be32 qpn; | |
656 | __be32 reserved; | |
657 | }; | |
658 | ||
659 | static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, | |
660 | struct mlx4_net_trans_rule_hw_ctrl *hw) | |
661 | { | |
662 | static const u8 __promisc_mode[] = { | |
663 | [MLX4_FS_PROMISC_NONE] = 0x0, | |
664 | [MLX4_FS_PROMISC_UPLINK] = 0x1, | |
665 | [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2, | |
666 | [MLX4_FS_PROMISC_ALL_MULTI] = 0x3, | |
667 | }; | |
668 | ||
669 | u32 dw = 0; | |
670 | ||
671 | dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; | |
672 | dw |= ctrl->exclusive ? (1 << 2) : 0; | |
673 | dw |= ctrl->allow_loopback ? (1 << 3) : 0; | |
674 | dw |= __promisc_mode[ctrl->promisc_mode] << 8; | |
675 | dw |= ctrl->priority << 16; | |
676 | ||
677 | hw->ctrl = cpu_to_be32(dw); | |
678 | hw->vf_vep_port = cpu_to_be32(ctrl->port); | |
679 | hw->qpn = cpu_to_be32(ctrl->qpn); | |
680 | } | |
681 | ||
682 | struct mlx4_net_trans_rule_hw_ib { | |
683 | u8 size; | |
684 | u8 rsvd1; | |
685 | __be16 id; | |
686 | u32 rsvd2; | |
687 | __be32 qpn; | |
688 | __be32 qpn_mask; | |
689 | u8 dst_gid[16]; | |
690 | u8 dst_gid_msk[16]; | |
691 | } __packed; | |
692 | ||
693 | struct mlx4_net_trans_rule_hw_eth { | |
694 | u8 size; | |
695 | u8 rsvd; | |
696 | __be16 id; | |
697 | u8 rsvd1[6]; | |
698 | u8 dst_mac[6]; | |
699 | u16 rsvd2; | |
700 | u8 dst_mac_msk[6]; | |
701 | u16 rsvd3; | |
702 | u8 src_mac[6]; | |
703 | u16 rsvd4; | |
704 | u8 src_mac_msk[6]; | |
705 | u8 rsvd5; | |
706 | u8 ether_type_enable; | |
707 | __be16 ether_type; | |
708 | __be16 vlan_id_msk; | |
709 | __be16 vlan_id; | |
710 | } __packed; | |
711 | ||
712 | struct mlx4_net_trans_rule_hw_tcp_udp { | |
713 | u8 size; | |
714 | u8 rsvd; | |
715 | __be16 id; | |
716 | __be16 rsvd1[3]; | |
717 | __be16 dst_port; | |
718 | __be16 rsvd2; | |
719 | __be16 dst_port_msk; | |
720 | __be16 rsvd3; | |
721 | __be16 src_port; | |
722 | __be16 rsvd4; | |
723 | __be16 src_port_msk; | |
724 | } __packed; | |
725 | ||
726 | struct mlx4_net_trans_rule_hw_ipv4 { | |
727 | u8 size; | |
728 | u8 rsvd; | |
729 | __be16 id; | |
730 | __be32 rsvd1; | |
731 | __be32 dst_ip; | |
732 | __be32 dst_ip_msk; | |
733 | __be32 src_ip; | |
734 | __be32 src_ip_msk; | |
735 | } __packed; | |
736 | ||
737 | struct _rule_hw { | |
738 | union { | |
739 | struct { | |
740 | u8 size; | |
741 | u8 rsvd; | |
742 | __be16 id; | |
743 | }; | |
744 | struct mlx4_net_trans_rule_hw_eth eth; | |
745 | struct mlx4_net_trans_rule_hw_ib ib; | |
746 | struct mlx4_net_trans_rule_hw_ipv4 ipv4; | |
747 | struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; | |
748 | }; | |
749 | }; | |
750 | ||
751 | static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, | |
752 | struct _rule_hw *rule_hw) | |
753 | { | |
754 | static const u16 __sw_id_hw[] = { | |
755 | [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, | |
756 | [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, | |
757 | [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, | |
758 | [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, | |
759 | [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, | |
760 | [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 | |
761 | }; | |
762 | ||
763 | static const size_t __rule_hw_sz[] = { | |
764 | [MLX4_NET_TRANS_RULE_ID_ETH] = | |
765 | sizeof(struct mlx4_net_trans_rule_hw_eth), | |
766 | [MLX4_NET_TRANS_RULE_ID_IB] = | |
767 | sizeof(struct mlx4_net_trans_rule_hw_ib), | |
768 | [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, | |
769 | [MLX4_NET_TRANS_RULE_ID_IPV4] = | |
770 | sizeof(struct mlx4_net_trans_rule_hw_ipv4), | |
771 | [MLX4_NET_TRANS_RULE_ID_TCP] = | |
772 | sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), | |
773 | [MLX4_NET_TRANS_RULE_ID_UDP] = | |
774 | sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) | |
775 | }; | |
447458c0 | 776 | if (spec->id >= MLX4_NET_TRANS_RULE_NUM) { |
0ff1fb65 HHZ |
777 | mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id); |
778 | return -EINVAL; | |
779 | } | |
780 | memset(rule_hw, 0, __rule_hw_sz[spec->id]); | |
781 | rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); | |
782 | rule_hw->size = __rule_hw_sz[spec->id] >> 2; | |
783 | ||
784 | switch (spec->id) { | |
785 | case MLX4_NET_TRANS_RULE_ID_ETH: | |
786 | memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); | |
787 | memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, | |
788 | ETH_ALEN); | |
789 | memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); | |
790 | memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, | |
791 | ETH_ALEN); | |
792 | if (spec->eth.ether_type_enable) { | |
793 | rule_hw->eth.ether_type_enable = 1; | |
794 | rule_hw->eth.ether_type = spec->eth.ether_type; | |
795 | } | |
796 | rule_hw->eth.vlan_id = spec->eth.vlan_id; | |
797 | rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk; | |
798 | break; | |
799 | ||
800 | case MLX4_NET_TRANS_RULE_ID_IB: | |
801 | rule_hw->ib.qpn = spec->ib.r_qpn; | |
802 | rule_hw->ib.qpn_mask = spec->ib.qpn_msk; | |
803 | memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); | |
804 | memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); | |
805 | break; | |
806 | ||
807 | case MLX4_NET_TRANS_RULE_ID_IPV6: | |
808 | return -EOPNOTSUPP; | |
809 | ||
810 | case MLX4_NET_TRANS_RULE_ID_IPV4: | |
811 | rule_hw->ipv4.src_ip = spec->ipv4.src_ip; | |
812 | rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; | |
813 | rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; | |
814 | rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; | |
815 | break; | |
816 | ||
817 | case MLX4_NET_TRANS_RULE_ID_TCP: | |
818 | case MLX4_NET_TRANS_RULE_ID_UDP: | |
819 | rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; | |
820 | rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; | |
821 | rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; | |
822 | rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; | |
823 | break; | |
824 | ||
825 | default: | |
826 | return -EINVAL; | |
827 | } | |
828 | ||
829 | return __rule_hw_sz[spec->id]; | |
830 | } | |
831 | ||
832 | static void mlx4_err_rule(struct mlx4_dev *dev, char *str, | |
833 | struct mlx4_net_trans_rule *rule) | |
834 | { | |
835 | #define BUF_SIZE 256 | |
836 | struct mlx4_spec_list *cur; | |
837 | char buf[BUF_SIZE]; | |
838 | int len = 0; | |
839 | ||
840 | mlx4_err(dev, "%s", str); | |
841 | len += snprintf(buf + len, BUF_SIZE - len, | |
842 | "port = %d prio = 0x%x qp = 0x%x ", | |
843 | rule->port, rule->priority, rule->qpn); | |
844 | ||
845 | list_for_each_entry(cur, &rule->list, list) { | |
846 | switch (cur->id) { | |
847 | case MLX4_NET_TRANS_RULE_ID_ETH: | |
848 | len += snprintf(buf + len, BUF_SIZE - len, | |
849 | "dmac = %pM ", &cur->eth.dst_mac); | |
850 | if (cur->eth.ether_type) | |
851 | len += snprintf(buf + len, BUF_SIZE - len, | |
852 | "ethertype = 0x%x ", | |
853 | be16_to_cpu(cur->eth.ether_type)); | |
854 | if (cur->eth.vlan_id) | |
855 | len += snprintf(buf + len, BUF_SIZE - len, | |
856 | "vlan-id = %d ", | |
857 | be16_to_cpu(cur->eth.vlan_id)); | |
858 | break; | |
859 | ||
860 | case MLX4_NET_TRANS_RULE_ID_IPV4: | |
861 | if (cur->ipv4.src_ip) | |
862 | len += snprintf(buf + len, BUF_SIZE - len, | |
863 | "src-ip = %pI4 ", | |
864 | &cur->ipv4.src_ip); | |
865 | if (cur->ipv4.dst_ip) | |
866 | len += snprintf(buf + len, BUF_SIZE - len, | |
867 | "dst-ip = %pI4 ", | |
868 | &cur->ipv4.dst_ip); | |
869 | break; | |
870 | ||
871 | case MLX4_NET_TRANS_RULE_ID_TCP: | |
872 | case MLX4_NET_TRANS_RULE_ID_UDP: | |
873 | if (cur->tcp_udp.src_port) | |
874 | len += snprintf(buf + len, BUF_SIZE - len, | |
875 | "src-port = %d ", | |
876 | be16_to_cpu(cur->tcp_udp.src_port)); | |
877 | if (cur->tcp_udp.dst_port) | |
878 | len += snprintf(buf + len, BUF_SIZE - len, | |
879 | "dst-port = %d ", | |
880 | be16_to_cpu(cur->tcp_udp.dst_port)); | |
881 | break; | |
882 | ||
883 | case MLX4_NET_TRANS_RULE_ID_IB: | |
884 | len += snprintf(buf + len, BUF_SIZE - len, | |
885 | "dst-gid = %pI6\n", cur->ib.dst_gid); | |
886 | len += snprintf(buf + len, BUF_SIZE - len, | |
887 | "dst-gid-mask = %pI6\n", | |
888 | cur->ib.dst_gid_msk); | |
889 | break; | |
890 | ||
891 | case MLX4_NET_TRANS_RULE_ID_IPV6: | |
892 | break; | |
893 | ||
894 | default: | |
895 | break; | |
896 | } | |
897 | } | |
898 | len += snprintf(buf + len, BUF_SIZE - len, "\n"); | |
899 | mlx4_err(dev, "%s", buf); | |
900 | ||
901 | if (len >= BUF_SIZE) | |
902 | mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); | |
903 | } | |
904 | ||
905 | int mlx4_flow_attach(struct mlx4_dev *dev, | |
906 | struct mlx4_net_trans_rule *rule, u64 *reg_id) | |
907 | { | |
908 | struct mlx4_cmd_mailbox *mailbox; | |
909 | struct mlx4_spec_list *cur; | |
910 | u32 size = 0; | |
911 | int ret; | |
912 | ||
913 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
914 | if (IS_ERR(mailbox)) | |
915 | return PTR_ERR(mailbox); | |
916 | ||
917 | memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl)); | |
918 | trans_rule_ctrl_to_hw(rule, mailbox->buf); | |
919 | ||
920 | size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); | |
921 | ||
922 | list_for_each_entry(cur, &rule->list, list) { | |
923 | ret = parse_trans_rule(dev, cur, mailbox->buf + size); | |
924 | if (ret < 0) { | |
925 | mlx4_free_cmd_mailbox(dev, mailbox); | |
926 | return -EINVAL; | |
927 | } | |
928 | size += ret; | |
929 | } | |
930 | ||
931 | ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); | |
932 | if (ret == -ENOMEM) | |
933 | mlx4_err_rule(dev, | |
934 | "mcg table is full. Fail to register network rule.\n", | |
935 | rule); | |
936 | else if (ret) | |
937 | mlx4_err_rule(dev, "Fail to register network rule.\n", rule); | |
938 | ||
939 | mlx4_free_cmd_mailbox(dev, mailbox); | |
940 | ||
941 | return ret; | |
942 | } | |
943 | EXPORT_SYMBOL_GPL(mlx4_flow_attach); | |
944 | ||
945 | int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) | |
946 | { | |
947 | int err; | |
948 | ||
949 | err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); | |
950 | if (err) | |
951 | mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", | |
952 | reg_id); | |
953 | return err; | |
954 | } | |
955 | EXPORT_SYMBOL_GPL(mlx4_flow_detach); | |
956 | ||
0345584e YP |
957 | int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
958 | int block_mcast_loopback, enum mlx4_protocol prot, | |
959 | enum mlx4_steer_type steer) | |
225c7b1f RD |
960 | { |
961 | struct mlx4_priv *priv = mlx4_priv(dev); | |
962 | struct mlx4_cmd_mailbox *mailbox; | |
963 | struct mlx4_mgm *mgm; | |
964 | u32 members_count; | |
225c7b1f RD |
965 | int index, prev; |
966 | int link = 0; | |
967 | int i; | |
968 | int err; | |
0345584e | 969 | u8 port = gid[5]; |
b12d93d6 | 970 | u8 new_entry = 0; |
225c7b1f RD |
971 | |
972 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
973 | if (IS_ERR(mailbox)) | |
974 | return PTR_ERR(mailbox); | |
975 | mgm = mailbox->buf; | |
976 | ||
977 | mutex_lock(&priv->mcg_table.mutex); | |
deb8b3e8 EE |
978 | err = find_entry(dev, port, gid, prot, |
979 | mailbox, &prev, &index); | |
225c7b1f RD |
980 | if (err) |
981 | goto out; | |
982 | ||
983 | if (index != -1) { | |
b12d93d6 YP |
984 | if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { |
985 | new_entry = 1; | |
225c7b1f | 986 | memcpy(mgm->gid, gid, 16); |
b12d93d6 | 987 | } |
225c7b1f RD |
988 | } else { |
989 | link = 1; | |
990 | ||
991 | index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); | |
992 | if (index == -1) { | |
993 | mlx4_err(dev, "No AMGM entries left\n"); | |
994 | err = -ENOMEM; | |
995 | goto out; | |
996 | } | |
997 | index += dev->caps.num_mgms; | |
998 | ||
0ec2c0f8 | 999 | new_entry = 1; |
225c7b1f RD |
1000 | memset(mgm, 0, sizeof *mgm); |
1001 | memcpy(mgm->gid, gid, 16); | |
1002 | } | |
1003 | ||
da995a8a | 1004 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; |
0ec2c0f8 | 1005 | if (members_count == dev->caps.num_qp_per_mgm) { |
225c7b1f RD |
1006 | mlx4_err(dev, "MGM at index %x is full.\n", index); |
1007 | err = -ENOMEM; | |
1008 | goto out; | |
1009 | } | |
1010 | ||
1011 | for (i = 0; i < members_count; ++i) | |
521e575b | 1012 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { |
225c7b1f RD |
1013 | mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); |
1014 | err = 0; | |
1015 | goto out; | |
1016 | } | |
1017 | ||
521e575b RL |
1018 | if (block_mcast_loopback) |
1019 | mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | | |
e6a17622 | 1020 | (1U << MGM_BLCK_LB_BIT)); |
521e575b RL |
1021 | else |
1022 | mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); | |
1023 | ||
0345584e | 1024 | mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); |
225c7b1f | 1025 | |
0345584e | 1026 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); |
225c7b1f RD |
1027 | if (err) |
1028 | goto out; | |
1029 | ||
1030 | if (!link) | |
1031 | goto out; | |
1032 | ||
0345584e | 1033 | err = mlx4_READ_ENTRY(dev, prev, mailbox); |
225c7b1f RD |
1034 | if (err) |
1035 | goto out; | |
1036 | ||
1037 | mgm->next_gid_index = cpu_to_be32(index << 6); | |
1038 | ||
0345584e | 1039 | err = mlx4_WRITE_ENTRY(dev, prev, mailbox); |
225c7b1f RD |
1040 | if (err) |
1041 | goto out; | |
1042 | ||
1043 | out: | |
b12d93d6 YP |
1044 | if (prot == MLX4_PROT_ETH) { |
1045 | /* manage the steering entry for promisc mode */ | |
1046 | if (new_entry) | |
0ec2c0f8 | 1047 | new_steering_entry(dev, port, steer, index, qp->qpn); |
b12d93d6 | 1048 | else |
0ec2c0f8 | 1049 | existing_steering_entry(dev, port, steer, |
b12d93d6 YP |
1050 | index, qp->qpn); |
1051 | } | |
225c7b1f RD |
1052 | if (err && link && index != -1) { |
1053 | if (index < dev->caps.num_mgms) | |
1054 | mlx4_warn(dev, "Got AMGM index %d < %d", | |
1055 | index, dev->caps.num_mgms); | |
1056 | else | |
1057 | mlx4_bitmap_free(&priv->mcg_table.bitmap, | |
1058 | index - dev->caps.num_mgms); | |
1059 | } | |
1060 | mutex_unlock(&priv->mcg_table.mutex); | |
1061 | ||
1062 | mlx4_free_cmd_mailbox(dev, mailbox); | |
1063 | return err; | |
1064 | } | |
225c7b1f | 1065 | |
0345584e YP |
1066 | int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
1067 | enum mlx4_protocol prot, enum mlx4_steer_type steer) | |
225c7b1f RD |
1068 | { |
1069 | struct mlx4_priv *priv = mlx4_priv(dev); | |
1070 | struct mlx4_cmd_mailbox *mailbox; | |
1071 | struct mlx4_mgm *mgm; | |
1072 | u32 members_count; | |
225c7b1f RD |
1073 | int prev, index; |
1074 | int i, loc; | |
1075 | int err; | |
0345584e | 1076 | u8 port = gid[5]; |
b12d93d6 | 1077 | bool removed_entry = false; |
225c7b1f RD |
1078 | |
1079 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
1080 | if (IS_ERR(mailbox)) | |
1081 | return PTR_ERR(mailbox); | |
1082 | mgm = mailbox->buf; | |
1083 | ||
1084 | mutex_lock(&priv->mcg_table.mutex); | |
1085 | ||
deb8b3e8 EE |
1086 | err = find_entry(dev, port, gid, prot, |
1087 | mailbox, &prev, &index); | |
225c7b1f RD |
1088 | if (err) |
1089 | goto out; | |
1090 | ||
1091 | if (index == -1) { | |
5b095d98 | 1092 | mlx4_err(dev, "MGID %pI6 not found\n", gid); |
225c7b1f RD |
1093 | err = -EINVAL; |
1094 | goto out; | |
1095 | } | |
1096 | ||
b12d93d6 YP |
1097 | /* if this pq is also a promisc qp, it shouldn't be removed */ |
1098 | if (prot == MLX4_PROT_ETH && | |
0ec2c0f8 | 1099 | check_duplicate_entry(dev, port, steer, index, qp->qpn)) |
b12d93d6 YP |
1100 | goto out; |
1101 | ||
da995a8a | 1102 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; |
225c7b1f | 1103 | for (loc = -1, i = 0; i < members_count; ++i) |
521e575b | 1104 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) |
225c7b1f RD |
1105 | loc = i; |
1106 | ||
1107 | if (loc == -1) { | |
1108 | mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); | |
1109 | err = -EINVAL; | |
1110 | goto out; | |
1111 | } | |
1112 | ||
1113 | ||
0345584e | 1114 | mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); |
225c7b1f RD |
1115 | mgm->qp[loc] = mgm->qp[i - 1]; |
1116 | mgm->qp[i - 1] = 0; | |
1117 | ||
b12d93d6 | 1118 | if (prot == MLX4_PROT_ETH) |
0ec2c0f8 EE |
1119 | removed_entry = can_remove_steering_entry(dev, port, steer, |
1120 | index, qp->qpn); | |
b12d93d6 | 1121 | if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) { |
0345584e | 1122 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); |
225c7b1f | 1123 | goto out; |
4dc51b32 | 1124 | } |
225c7b1f | 1125 | |
b12d93d6 YP |
1126 | /* We are going to delete the entry, members count should be 0 */ |
1127 | mgm->members_count = cpu_to_be32((u32) prot << 30); | |
1128 | ||
225c7b1f RD |
1129 | if (prev == -1) { |
1130 | /* Remove entry from MGM */ | |
1131 | int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; | |
1132 | if (amgm_index) { | |
0345584e | 1133 | err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); |
225c7b1f RD |
1134 | if (err) |
1135 | goto out; | |
1136 | } else | |
1137 | memset(mgm->gid, 0, 16); | |
1138 | ||
0345584e | 1139 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); |
225c7b1f RD |
1140 | if (err) |
1141 | goto out; | |
1142 | ||
1143 | if (amgm_index) { | |
1144 | if (amgm_index < dev->caps.num_mgms) | |
1145 | mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", | |
1146 | index, amgm_index, dev->caps.num_mgms); | |
1147 | else | |
1148 | mlx4_bitmap_free(&priv->mcg_table.bitmap, | |
1149 | amgm_index - dev->caps.num_mgms); | |
1150 | } | |
1151 | } else { | |
1152 | /* Remove entry from AMGM */ | |
1153 | int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; | |
0345584e | 1154 | err = mlx4_READ_ENTRY(dev, prev, mailbox); |
225c7b1f RD |
1155 | if (err) |
1156 | goto out; | |
1157 | ||
1158 | mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); | |
1159 | ||
0345584e | 1160 | err = mlx4_WRITE_ENTRY(dev, prev, mailbox); |
225c7b1f RD |
1161 | if (err) |
1162 | goto out; | |
1163 | ||
1164 | if (index < dev->caps.num_mgms) | |
1165 | mlx4_warn(dev, "entry %d had next AMGM index %d < %d", | |
1166 | prev, index, dev->caps.num_mgms); | |
1167 | else | |
1168 | mlx4_bitmap_free(&priv->mcg_table.bitmap, | |
1169 | index - dev->caps.num_mgms); | |
1170 | } | |
1171 | ||
1172 | out: | |
1173 | mutex_unlock(&priv->mcg_table.mutex); | |
1174 | ||
1175 | mlx4_free_cmd_mailbox(dev, mailbox); | |
1176 | return err; | |
1177 | } | |
0345584e | 1178 | |
0ec2c0f8 EE |
1179 | static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, |
1180 | u8 gid[16], u8 attach, u8 block_loopback, | |
1181 | enum mlx4_protocol prot) | |
1182 | { | |
1183 | struct mlx4_cmd_mailbox *mailbox; | |
1184 | int err = 0; | |
1185 | int qpn; | |
1186 | ||
1187 | if (!mlx4_is_mfunc(dev)) | |
1188 | return -EBADF; | |
1189 | ||
1190 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
1191 | if (IS_ERR(mailbox)) | |
1192 | return PTR_ERR(mailbox); | |
1193 | ||
1194 | memcpy(mailbox->buf, gid, 16); | |
1195 | qpn = qp->qpn; | |
1196 | qpn |= (prot << 28); | |
1197 | if (attach && block_loopback) | |
1198 | qpn |= (1 << 31); | |
1199 | ||
1200 | err = mlx4_cmd(dev, mailbox->dma, qpn, attach, | |
1201 | MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, | |
1202 | MLX4_CMD_WRAPPED); | |
1203 | ||
1204 | mlx4_free_cmd_mailbox(dev, mailbox); | |
1205 | return err; | |
1206 | } | |
0345584e YP |
1207 | |
1208 | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |
0ff1fb65 HHZ |
1209 | u8 port, int block_mcast_loopback, |
1210 | enum mlx4_protocol prot, u64 *reg_id) | |
0345584e | 1211 | { |
0345584e | 1212 | |
c96d97f4 HHZ |
1213 | switch (dev->caps.steering_mode) { |
1214 | case MLX4_STEERING_MODE_A0: | |
1215 | if (prot == MLX4_PROT_ETH) | |
1216 | return 0; | |
0345584e | 1217 | |
c96d97f4 HHZ |
1218 | case MLX4_STEERING_MODE_B0: |
1219 | if (prot == MLX4_PROT_ETH) | |
1220 | gid[7] |= (MLX4_MC_STEER << 1); | |
0ec2c0f8 | 1221 | |
c96d97f4 HHZ |
1222 | if (mlx4_is_mfunc(dev)) |
1223 | return mlx4_QP_ATTACH(dev, qp, gid, 1, | |
1224 | block_mcast_loopback, prot); | |
1225 | return mlx4_qp_attach_common(dev, qp, gid, | |
1226 | block_mcast_loopback, prot, | |
1227 | MLX4_MC_STEER); | |
1228 | ||
0ff1fb65 HHZ |
1229 | case MLX4_STEERING_MODE_DEVICE_MANAGED: { |
1230 | struct mlx4_spec_list spec = { {NULL} }; | |
1231 | __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); | |
1232 | ||
1233 | struct mlx4_net_trans_rule rule = { | |
1234 | .queue_mode = MLX4_NET_TRANS_Q_FIFO, | |
1235 | .exclusive = 0, | |
1236 | .promisc_mode = MLX4_FS_PROMISC_NONE, | |
1237 | .priority = MLX4_DOMAIN_NIC, | |
1238 | }; | |
1239 | ||
1240 | rule.allow_loopback = ~block_mcast_loopback; | |
1241 | rule.port = port; | |
1242 | rule.qpn = qp->qpn; | |
1243 | INIT_LIST_HEAD(&rule.list); | |
1244 | ||
1245 | switch (prot) { | |
1246 | case MLX4_PROT_ETH: | |
1247 | spec.id = MLX4_NET_TRANS_RULE_ID_ETH; | |
1248 | memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); | |
1249 | memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); | |
1250 | break; | |
1251 | ||
1252 | case MLX4_PROT_IB_IPV6: | |
1253 | spec.id = MLX4_NET_TRANS_RULE_ID_IB; | |
1254 | memcpy(spec.ib.dst_gid, gid, 16); | |
1255 | memset(&spec.ib.dst_gid_msk, 0xff, 16); | |
1256 | break; | |
1257 | default: | |
1258 | return -EINVAL; | |
1259 | } | |
1260 | list_add_tail(&spec.list, &rule.list); | |
1261 | ||
1262 | return mlx4_flow_attach(dev, &rule, reg_id); | |
1263 | } | |
1264 | ||
c96d97f4 HHZ |
1265 | default: |
1266 | return -EINVAL; | |
1267 | } | |
0345584e YP |
1268 | } |
1269 | EXPORT_SYMBOL_GPL(mlx4_multicast_attach); | |
1270 | ||
1271 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |
0ff1fb65 | 1272 | enum mlx4_protocol prot, u64 reg_id) |
0345584e | 1273 | { |
c96d97f4 HHZ |
1274 | switch (dev->caps.steering_mode) { |
1275 | case MLX4_STEERING_MODE_A0: | |
1276 | if (prot == MLX4_PROT_ETH) | |
1277 | return 0; | |
0345584e | 1278 | |
c96d97f4 HHZ |
1279 | case MLX4_STEERING_MODE_B0: |
1280 | if (prot == MLX4_PROT_ETH) | |
1281 | gid[7] |= (MLX4_MC_STEER << 1); | |
0ec2c0f8 | 1282 | |
c96d97f4 HHZ |
1283 | if (mlx4_is_mfunc(dev)) |
1284 | return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); | |
1285 | ||
1286 | return mlx4_qp_detach_common(dev, qp, gid, prot, | |
1287 | MLX4_MC_STEER); | |
0345584e | 1288 | |
0ff1fb65 HHZ |
1289 | case MLX4_STEERING_MODE_DEVICE_MANAGED: |
1290 | return mlx4_flow_detach(dev, reg_id); | |
1291 | ||
c96d97f4 HHZ |
1292 | default: |
1293 | return -EINVAL; | |
1294 | } | |
0345584e | 1295 | } |
225c7b1f RD |
1296 | EXPORT_SYMBOL_GPL(mlx4_multicast_detach); |
1297 | ||
592e49dd HHZ |
1298 | int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, |
1299 | u32 qpn, enum mlx4_net_trans_promisc_mode mode) | |
1300 | { | |
1301 | struct mlx4_net_trans_rule rule; | |
1302 | u64 *regid_p; | |
1303 | ||
1304 | switch (mode) { | |
1305 | case MLX4_FS_PROMISC_UPLINK: | |
1306 | case MLX4_FS_PROMISC_FUNCTION_PORT: | |
1307 | regid_p = &dev->regid_promisc_array[port]; | |
1308 | break; | |
1309 | case MLX4_FS_PROMISC_ALL_MULTI: | |
1310 | regid_p = &dev->regid_allmulti_array[port]; | |
1311 | break; | |
1312 | default: | |
1313 | return -1; | |
1314 | } | |
1315 | ||
1316 | if (*regid_p != 0) | |
1317 | return -1; | |
1318 | ||
1319 | rule.promisc_mode = mode; | |
1320 | rule.port = port; | |
1321 | rule.qpn = qpn; | |
1322 | INIT_LIST_HEAD(&rule.list); | |
1323 | mlx4_err(dev, "going promisc on %x\n", port); | |
1324 | ||
1325 | return mlx4_flow_attach(dev, &rule, regid_p); | |
1326 | } | |
1327 | EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); | |
1328 | ||
1329 | int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, | |
1330 | enum mlx4_net_trans_promisc_mode mode) | |
1331 | { | |
1332 | int ret; | |
1333 | u64 *regid_p; | |
1334 | ||
1335 | switch (mode) { | |
1336 | case MLX4_FS_PROMISC_UPLINK: | |
1337 | case MLX4_FS_PROMISC_FUNCTION_PORT: | |
1338 | regid_p = &dev->regid_promisc_array[port]; | |
1339 | break; | |
1340 | case MLX4_FS_PROMISC_ALL_MULTI: | |
1341 | regid_p = &dev->regid_allmulti_array[port]; | |
1342 | break; | |
1343 | default: | |
1344 | return -1; | |
1345 | } | |
1346 | ||
1347 | if (*regid_p == 0) | |
1348 | return -1; | |
1349 | ||
1350 | ret = mlx4_flow_detach(dev, *regid_p); | |
1351 | if (ret == 0) | |
1352 | *regid_p = 0; | |
1353 | ||
1354 | return ret; | |
1355 | } | |
1356 | EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); | |
1357 | ||
ffe455ad | 1358 | int mlx4_unicast_attach(struct mlx4_dev *dev, |
0ec2c0f8 EE |
1359 | struct mlx4_qp *qp, u8 gid[16], |
1360 | int block_mcast_loopback, enum mlx4_protocol prot) | |
1361 | { | |
0ec2c0f8 EE |
1362 | if (prot == MLX4_PROT_ETH) |
1363 | gid[7] |= (MLX4_UC_STEER << 1); | |
1364 | ||
1365 | if (mlx4_is_mfunc(dev)) | |
1366 | return mlx4_QP_ATTACH(dev, qp, gid, 1, | |
1367 | block_mcast_loopback, prot); | |
1368 | ||
1369 | return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, | |
1370 | prot, MLX4_UC_STEER); | |
1371 | } | |
1372 | EXPORT_SYMBOL_GPL(mlx4_unicast_attach); | |
1373 | ||
ffe455ad | 1374 | int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, |
0ec2c0f8 EE |
1375 | u8 gid[16], enum mlx4_protocol prot) |
1376 | { | |
0ec2c0f8 EE |
1377 | if (prot == MLX4_PROT_ETH) |
1378 | gid[7] |= (MLX4_UC_STEER << 1); | |
1379 | ||
1380 | if (mlx4_is_mfunc(dev)) | |
1381 | return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); | |
1382 | ||
1383 | return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); | |
1384 | } | |
1385 | EXPORT_SYMBOL_GPL(mlx4_unicast_detach); | |
1386 | ||
1387 | int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, | |
1388 | struct mlx4_vhcr *vhcr, | |
1389 | struct mlx4_cmd_mailbox *inbox, | |
1390 | struct mlx4_cmd_mailbox *outbox, | |
1391 | struct mlx4_cmd_info *cmd) | |
1392 | { | |
1393 | u32 qpn = (u32) vhcr->in_param & 0xffffffff; | |
1394 | u8 port = vhcr->in_param >> 62; | |
1395 | enum mlx4_steer_type steer = vhcr->in_modifier; | |
1396 | ||
1397 | /* Promiscuous unicast is not allowed in mfunc */ | |
1398 | if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) | |
1399 | return 0; | |
1400 | ||
1401 | if (vhcr->op_modifier) | |
1402 | return add_promisc_qp(dev, port, steer, qpn); | |
1403 | else | |
1404 | return remove_promisc_qp(dev, port, steer, qpn); | |
1405 | } | |
1406 | ||
1407 | static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, | |
1408 | enum mlx4_steer_type steer, u8 add, u8 port) | |
1409 | { | |
1410 | return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, | |
1411 | MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, | |
1412 | MLX4_CMD_WRAPPED); | |
1413 | } | |
b12d93d6 YP |
1414 | |
1415 | int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) | |
1416 | { | |
0ec2c0f8 EE |
1417 | if (mlx4_is_mfunc(dev)) |
1418 | return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); | |
b12d93d6 | 1419 | |
0ec2c0f8 | 1420 | return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); |
b12d93d6 YP |
1421 | } |
1422 | EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); | |
1423 | ||
1424 | int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) | |
1425 | { | |
0ec2c0f8 EE |
1426 | if (mlx4_is_mfunc(dev)) |
1427 | return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); | |
b12d93d6 | 1428 | |
0ec2c0f8 | 1429 | return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); |
b12d93d6 YP |
1430 | } |
1431 | EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); | |
1432 | ||
1433 | int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) | |
1434 | { | |
0ec2c0f8 EE |
1435 | if (mlx4_is_mfunc(dev)) |
1436 | return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); | |
b12d93d6 | 1437 | |
0ec2c0f8 | 1438 | return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); |
b12d93d6 YP |
1439 | } |
1440 | EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); | |
1441 | ||
1442 | int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) | |
1443 | { | |
0ec2c0f8 EE |
1444 | if (mlx4_is_mfunc(dev)) |
1445 | return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); | |
1446 | ||
1447 | return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); | |
b12d93d6 YP |
1448 | } |
1449 | EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); | |
1450 | ||
3d73c288 | 1451 | int mlx4_init_mcg_table(struct mlx4_dev *dev) |
225c7b1f RD |
1452 | { |
1453 | struct mlx4_priv *priv = mlx4_priv(dev); | |
1454 | int err; | |
1455 | ||
0ff1fb65 HHZ |
1456 | /* No need for mcg_table when fw managed the mcg table*/ |
1457 | if (dev->caps.steering_mode == | |
1458 | MLX4_STEERING_MODE_DEVICE_MANAGED) | |
1459 | return 0; | |
93fc9e1b YP |
1460 | err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, |
1461 | dev->caps.num_amgms - 1, 0, 0); | |
225c7b1f RD |
1462 | if (err) |
1463 | return err; | |
1464 | ||
1465 | mutex_init(&priv->mcg_table.mutex); | |
1466 | ||
1467 | return 0; | |
1468 | } | |
1469 | ||
1470 | void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) | |
1471 | { | |
0ff1fb65 HHZ |
1472 | if (dev->caps.steering_mode != |
1473 | MLX4_STEERING_MODE_DEVICE_MANAGED) | |
1474 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); | |
225c7b1f | 1475 | } |