Merge branch 'dax-fix-5.3-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
[linux-2.6-block.git] / drivers / infiniband / core / cache.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
2a1d9b7f
RD
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
1da177e4
LT
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
1da177e4
LT
34 */
35
1da177e4
LT
36#include <linux/module.h>
37#include <linux/errno.h>
38#include <linux/slab.h>
e8edc6e0 39#include <linux/workqueue.h>
03db3a2d
MB
40#include <linux/netdevice.h>
41#include <net/addrconf.h>
1da177e4 42
a4d61e84 43#include <rdma/ib_cache.h>
1da177e4
LT
44
45#include "core_priv.h"
46
47struct ib_pkey_cache {
48 int table_len;
49 u16 table[0];
50};
51
1da177e4
LT
52struct ib_update_work {
53 struct work_struct work;
54 struct ib_device *device;
55 u8 port_num;
d291f1a6 56 bool enforce_security;
1da177e4
LT
57};
58
e26be1bf
MS
59union ib_gid zgid;
60EXPORT_SYMBOL(zgid);
03db3a2d 61
03db3a2d
MB
62enum gid_attr_find_mask {
63 GID_ATTR_FIND_MASK_GID = 1UL << 0,
64 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
65 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
b39ffa1d 66 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
03db3a2d
MB
67};
68
b150c386
PP
69enum gid_table_entry_state {
70 GID_TABLE_ENTRY_INVALID = 1,
71 GID_TABLE_ENTRY_VALID = 2,
72 /*
73 * Indicates that entry is pending to be removed, there may
74 * be active users of this GID entry.
75 * When last user of the GID entry releases reference to it,
76 * GID entry is detached from the table.
77 */
78 GID_TABLE_ENTRY_PENDING_DEL = 3,
03db3a2d
MB
79};
80
943bd984
PP
81struct roce_gid_ndev_storage {
82 struct rcu_head rcu_head;
83 struct net_device *ndev;
84};
85
03db3a2d 86struct ib_gid_table_entry {
b150c386
PP
87 struct kref kref;
88 struct work_struct del_work;
89 struct ib_gid_attr attr;
90 void *context;
943bd984
PP
91 /* Store the ndev pointer to release reference later on in
92 * call_rcu context because by that time gid_table_entry
93 * and attr might be already freed. So keep a copy of it.
94 * ndev_storage is freed by rcu callback.
95 */
96 struct roce_gid_ndev_storage *ndev_storage;
b150c386 97 enum gid_table_entry_state state;
03db3a2d
MB
98};
99
100struct ib_gid_table {
1c36cf91 101 int sz;
03db3a2d
MB
102 /* In RoCE, adding a GID to the table requires:
103 * (a) Find if this GID is already exists.
104 * (b) Find a free space.
105 * (c) Write the new GID
106 *
107 * Delete requires different set of operations:
108 * (a) Find the GID
109 * (b) Delete it.
110 *
03db3a2d 111 **/
598ff6ba 112 /* Any writer to data_vec must hold this lock and the write side of
b150c386 113 * rwlock. Readers must hold only rwlock. All writers must be in a
598ff6ba 114 * sleepable context.
9c584f04 115 */
1c36cf91 116 struct mutex lock;
b150c386
PP
117 /* rwlock protects data_vec[ix]->state and entry pointer.
118 */
1c36cf91 119 rwlock_t rwlock;
b150c386 120 struct ib_gid_table_entry **data_vec;
1c36cf91
PP
121 /* bit field, each bit indicates the index of default GID */
122 u32 default_gid_indices;
03db3a2d
MB
123};
124
f3906bd3
MB
125static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
126{
3401857e 127 struct ib_event event;
f3906bd3 128
3401857e
PP
129 event.device = ib_dev;
130 event.element.port_num = port;
131 event.event = IB_EVENT_GID_CHANGE;
f3906bd3 132
3401857e 133 ib_dispatch_event(&event);
f3906bd3
MB
134}
135
b39ffa1d
MB
136static const char * const gid_type_str[] = {
137 [IB_GID_TYPE_IB] = "IB/RoCE v1",
7766a99f 138 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
b39ffa1d
MB
139};
140
141const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
142{
143 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
144 return gid_type_str[gid_type];
145
146 return "Invalid GID type";
147}
148EXPORT_SYMBOL(ib_cache_gid_type_str);
149
25e62655
PP
150/** rdma_is_zero_gid - Check if given GID is zero or not.
151 * @gid: GID to check
152 * Returns true if given GID is zero, returns false otherwise.
153 */
154bool rdma_is_zero_gid(const union ib_gid *gid)
155{
156 return !memcmp(gid, &zgid, sizeof(*gid));
157}
158EXPORT_SYMBOL(rdma_is_zero_gid);
159
1c36cf91
PP
160/** is_gid_index_default - Check if a given index belongs to
161 * reserved default GIDs or not.
162 * @table: GID table pointer
163 * @index: Index to check in GID table
164 * Returns true if index is one of the reserved default GID index otherwise
165 * returns false.
166 */
167static bool is_gid_index_default(const struct ib_gid_table *table,
168 unsigned int index)
169{
170 return index < 32 && (BIT(index) & table->default_gid_indices);
171}
172
045959db
MB
173int ib_cache_gid_parse_type_str(const char *buf)
174{
175 unsigned int i;
176 size_t len;
177 int err = -EINVAL;
178
179 len = strlen(buf);
180 if (len == 0)
181 return -EINVAL;
182
183 if (buf[len - 1] == '\n')
184 len--;
185
186 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
187 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
188 len == strlen(gid_type_str[i])) {
189 err = i;
190 break;
191 }
192
193 return err;
194}
195EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
196
724631a9
PP
197static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
198{
8faea9fd 199 return device->port_data[port].cache.gid;
724631a9
PP
200}
201
b150c386
PP
202static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
203{
204 return !entry;
205}
206
207static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
208{
209 return entry && entry->state == GID_TABLE_ENTRY_VALID;
210}
211
212static void schedule_free_gid(struct kref *kref)
213{
214 struct ib_gid_table_entry *entry =
215 container_of(kref, struct ib_gid_table_entry, kref);
216
217 queue_work(ib_wq, &entry->del_work);
218}
219
943bd984
PP
220static void put_gid_ndev(struct rcu_head *head)
221{
222 struct roce_gid_ndev_storage *storage =
223 container_of(head, struct roce_gid_ndev_storage, rcu_head);
224
225 WARN_ON(!storage->ndev);
226 /* At this point its safe to release netdev reference,
227 * as all callers working on gid_attr->ndev are done
228 * using this netdev.
229 */
230 dev_put(storage->ndev);
231 kfree(storage);
232}
233
59d40813 234static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
1da177e4 235{
b150c386
PP
236 struct ib_device *device = entry->attr.device;
237 u8 port_num = entry->attr.port_num;
238 struct ib_gid_table *table = rdma_gid_table(device, port_num);
239
43c7c851
JG
240 dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__,
241 port_num, entry->attr.index, entry->attr.gid.raw);
b150c386 242
b150c386 243 write_lock_irq(&table->rwlock);
598ff6ba 244
b150c386
PP
245 /*
246 * The only way to avoid overwriting NULL in table is
247 * by comparing if it is same entry in table or not!
248 * If new entry in table is added by the time we free here,
249 * don't overwrite the table entry.
250 */
251 if (entry == table->data_vec[entry->attr.index])
252 table->data_vec[entry->attr.index] = NULL;
253 /* Now this index is ready to be allocated */
254 write_unlock_irq(&table->rwlock);
b150c386 255
943bd984
PP
256 if (entry->ndev_storage)
257 call_rcu(&entry->ndev_storage->rcu_head, put_gid_ndev);
b150c386
PP
258 kfree(entry);
259}
260
59d40813
PP
261static void free_gid_entry(struct kref *kref)
262{
263 struct ib_gid_table_entry *entry =
264 container_of(kref, struct ib_gid_table_entry, kref);
265
266 free_gid_entry_locked(entry);
267}
268
b150c386
PP
269/**
270 * free_gid_work - Release reference to the GID entry
271 * @work: Work structure to refer to GID entry which needs to be
272 * deleted.
273 *
274 * free_gid_work() frees the entry from the HCA's hardware table
275 * if provider supports it. It releases reference to netdevice.
276 */
277static void free_gid_work(struct work_struct *work)
278{
279 struct ib_gid_table_entry *entry =
280 container_of(work, struct ib_gid_table_entry, del_work);
59d40813
PP
281 struct ib_device *device = entry->attr.device;
282 u8 port_num = entry->attr.port_num;
283 struct ib_gid_table *table = rdma_gid_table(device, port_num);
284
285 mutex_lock(&table->lock);
286 free_gid_entry_locked(entry);
287 mutex_unlock(&table->lock);
598ff6ba 288}
03db3a2d 289
b150c386
PP
290static struct ib_gid_table_entry *
291alloc_gid_entry(const struct ib_gid_attr *attr)
598ff6ba
PP
292{
293 struct ib_gid_table_entry *entry;
943bd984 294 struct net_device *ndev;
b150c386
PP
295
296 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
297 if (!entry)
298 return NULL;
943bd984
PP
299
300 ndev = rcu_dereference_protected(attr->ndev, 1);
301 if (ndev) {
302 entry->ndev_storage = kzalloc(sizeof(*entry->ndev_storage),
303 GFP_KERNEL);
304 if (!entry->ndev_storage) {
305 kfree(entry);
306 return NULL;
307 }
308 dev_hold(ndev);
309 entry->ndev_storage->ndev = ndev;
310 }
b150c386
PP
311 kref_init(&entry->kref);
312 memcpy(&entry->attr, attr, sizeof(*attr));
b150c386
PP
313 INIT_WORK(&entry->del_work, free_gid_work);
314 entry->state = GID_TABLE_ENTRY_INVALID;
315 return entry;
316}
317
318static void store_gid_entry(struct ib_gid_table *table,
319 struct ib_gid_table_entry *entry)
320{
321 entry->state = GID_TABLE_ENTRY_VALID;
322
43c7c851
JG
323 dev_dbg(&entry->attr.device->dev, "%s port=%d index=%d gid %pI6\n",
324 __func__, entry->attr.port_num, entry->attr.index,
325 entry->attr.gid.raw);
b150c386
PP
326
327 lockdep_assert_held(&table->lock);
328 write_lock_irq(&table->rwlock);
329 table->data_vec[entry->attr.index] = entry;
330 write_unlock_irq(&table->rwlock);
331}
332
bf399c2c
PP
333static void get_gid_entry(struct ib_gid_table_entry *entry)
334{
335 kref_get(&entry->kref);
336}
337
b150c386
PP
338static void put_gid_entry(struct ib_gid_table_entry *entry)
339{
340 kref_put(&entry->kref, schedule_free_gid);
341}
342
59d40813
PP
343static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
344{
345 kref_put(&entry->kref, free_gid_entry);
346}
347
b150c386
PP
348static int add_roce_gid(struct ib_gid_table_entry *entry)
349{
350 const struct ib_gid_attr *attr = &entry->attr;
351 int ret;
03db3a2d 352
598ff6ba 353 if (!attr->ndev) {
43c7c851
JG
354 dev_err(&attr->device->dev, "%s NULL netdev port=%d index=%d\n",
355 __func__, attr->port_num, attr->index);
598ff6ba 356 return -EINVAL;
03db3a2d 357 }
598ff6ba 358 if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
3023a1e9 359 ret = attr->device->ops.add_gid(attr, &entry->context);
598ff6ba 360 if (ret) {
43c7c851
JG
361 dev_err(&attr->device->dev,
362 "%s GID add failed port=%d index=%d\n",
363 __func__, attr->port_num, attr->index);
b150c386 364 return ret;
598ff6ba 365 }
8e787646 366 }
b150c386 367 return 0;
03db3a2d
MB
368}
369
5c5702e2
PP
370/**
371 * del_gid - Delete GID table entry
372 *
373 * @ib_dev: IB device whose GID entry to be deleted
374 * @port: Port number of the IB device
375 * @table: GID table of the IB device for a port
376 * @ix: GID entry index to delete
377 *
378 */
379static void del_gid(struct ib_device *ib_dev, u8 port,
380 struct ib_gid_table *table, int ix)
381{
943bd984 382 struct roce_gid_ndev_storage *ndev_storage;
5c5702e2
PP
383 struct ib_gid_table_entry *entry;
384
385 lockdep_assert_held(&table->lock);
386
59bfc59a
JG
387 dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port,
388 ix, table->data_vec[ix]->attr.gid.raw);
5c5702e2
PP
389
390 write_lock_irq(&table->rwlock);
391 entry = table->data_vec[ix];
392 entry->state = GID_TABLE_ENTRY_PENDING_DEL;
393 /*
394 * For non RoCE protocol, GID entry slot is ready to use.
395 */
396 if (!rdma_protocol_roce(ib_dev, port))
397 table->data_vec[ix] = NULL;
398 write_unlock_irq(&table->rwlock);
399
943bd984
PP
400 ndev_storage = entry->ndev_storage;
401 if (ndev_storage) {
402 entry->ndev_storage = NULL;
403 rcu_assign_pointer(entry->attr.ndev, NULL);
404 call_rcu(&ndev_storage->rcu_head, put_gid_ndev);
405 }
406
be5914c1
PP
407 if (rdma_cap_roce_gid_table(ib_dev, port))
408 ib_dev->ops.del_gid(&entry->attr, &entry->context);
409
5c5702e2
PP
410 put_gid_entry_locked(entry);
411}
412
598ff6ba
PP
413/**
414 * add_modify_gid - Add or modify GID table entry
415 *
416 * @table: GID table in which GID to be added or modified
598ff6ba
PP
417 * @attr: Attributes of the GID
418 *
419 * Returns 0 on success or appropriate error code. It accepts zero
420 * GID addition for non RoCE ports for HCA's who report them as valid
421 * GID. However such zero GIDs are not added to the cache.
422 */
423static int add_modify_gid(struct ib_gid_table *table,
598ff6ba
PP
424 const struct ib_gid_attr *attr)
425{
b150c386
PP
426 struct ib_gid_table_entry *entry;
427 int ret = 0;
428
429 /*
430 * Invalidate any old entry in the table to make it safe to write to
431 * this index.
432 */
433 if (is_gid_entry_valid(table->data_vec[attr->index]))
5c5702e2 434 del_gid(attr->device, attr->port_num, table, attr->index);
b150c386
PP
435
436 /*
437 * Some HCA's report multiple GID entries with only one valid GID, and
438 * leave other unused entries as the zero GID. Convert zero GIDs to
439 * empty table entries instead of storing them.
440 */
441 if (rdma_is_zero_gid(&attr->gid))
442 return 0;
443
444 entry = alloc_gid_entry(attr);
445 if (!entry)
446 return -ENOMEM;
598ff6ba
PP
447
448 if (rdma_protocol_roce(attr->device, attr->port_num)) {
b150c386 449 ret = add_roce_gid(entry);
598ff6ba 450 if (ret)
b150c386 451 goto done;
598ff6ba
PP
452 }
453
b150c386 454 store_gid_entry(table, entry);
598ff6ba 455 return 0;
b150c386
PP
456
457done:
458 put_gid_entry(entry);
459 return ret;
03db3a2d
MB
460}
461
598ff6ba 462/* rwlock should be read locked, or lock should be held */
03db3a2d
MB
463static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
464 const struct ib_gid_attr *val, bool default_gid,
cee3c4d0 465 unsigned long mask, int *pempty)
03db3a2d 466{
cee3c4d0
MB
467 int i = 0;
468 int found = -1;
469 int empty = pempty ? -1 : 0;
03db3a2d 470
cee3c4d0 471 while (i < table->sz && (found < 0 || empty < 0)) {
b150c386
PP
472 struct ib_gid_table_entry *data = table->data_vec[i];
473 struct ib_gid_attr *attr;
cee3c4d0 474 int curr_index = i;
03db3a2d 475
cee3c4d0 476 i++;
03db3a2d 477
598ff6ba
PP
478 /* find_gid() is used during GID addition where it is expected
479 * to return a free entry slot which is not duplicate.
480 * Free entry slot is requested and returned if pempty is set,
481 * so lookup free slot only if requested.
482 */
483 if (pempty && empty < 0) {
b150c386
PP
484 if (is_gid_entry_free(data) &&
485 default_gid ==
486 is_gid_index_default(table, curr_index)) {
a66ed149
PP
487 /*
488 * Found an invalid (free) entry; allocate it.
489 * If default GID is requested, then our
490 * found slot must be one of the DEFAULT
491 * reserved slots or we fail.
492 * This ensures that only DEFAULT reserved
493 * slots are used for default property GIDs.
494 */
495 empty = curr_index;
598ff6ba
PP
496 }
497 }
498
499 /*
500 * Additionally find_gid() is used to find valid entry during
b150c386
PP
501 * lookup operation; so ignore the entries which are marked as
502 * pending for removal and the entries which are marked as
503 * invalid.
598ff6ba 504 */
b150c386 505 if (!is_gid_entry_valid(data))
cee3c4d0
MB
506 continue;
507
cee3c4d0 508 if (found >= 0)
9c584f04 509 continue;
03db3a2d 510
b150c386 511 attr = &data->attr;
b39ffa1d
MB
512 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
513 attr->gid_type != val->gid_type)
514 continue;
515
03db3a2d 516 if (mask & GID_ATTR_FIND_MASK_GID &&
b150c386 517 memcmp(gid, &data->attr.gid, sizeof(*gid)))
9c584f04 518 continue;
03db3a2d
MB
519
520 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
521 attr->ndev != val->ndev)
9c584f04 522 continue;
03db3a2d
MB
523
524 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
1c36cf91 525 is_gid_index_default(table, curr_index) != default_gid)
9c584f04 526 continue;
03db3a2d 527
cee3c4d0 528 found = curr_index;
03db3a2d
MB
529 }
530
cee3c4d0
MB
531 if (pempty)
532 *pempty = empty;
533
534 return found;
03db3a2d
MB
535}
536
537static void make_default_gid(struct net_device *dev, union ib_gid *gid)
538{
539 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
540 addrconf_ifid_eui48(&gid->raw[8], dev);
541}
542
598ff6ba
PP
543static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
544 union ib_gid *gid, struct ib_gid_attr *attr,
545 unsigned long mask, bool default_gid)
03db3a2d 546{
03db3a2d 547 struct ib_gid_table *table;
1da177e4 548 int ret = 0;
cee3c4d0 549 int empty;
598ff6ba 550 int ix;
1da177e4 551
598ff6ba
PP
552 /* Do not allow adding zero GID in support of
553 * IB spec version 1.3 section 4.1.1 point (6) and
554 * section 12.7.10 and section 12.7.20
555 */
25e62655 556 if (rdma_is_zero_gid(gid))
1da177e4
LT
557 return -EINVAL;
558
724631a9 559 table = rdma_gid_table(ib_dev, port);
598ff6ba
PP
560
561 mutex_lock(&table->lock);
562
563 ix = find_gid(table, gid, attr, default_gid, mask, &empty);
564 if (ix >= 0)
565 goto out_unlock;
566
567 if (empty < 0) {
568 ret = -ENOSPC;
569 goto out_unlock;
570 }
571 attr->device = ib_dev;
572 attr->index = empty;
573 attr->port_num = port;
b150c386
PP
574 attr->gid = *gid;
575 ret = add_modify_gid(table, attr);
598ff6ba
PP
576 if (!ret)
577 dispatch_gid_change_event(ib_dev, port);
578
579out_unlock:
580 mutex_unlock(&table->lock);
581 if (ret)
582 pr_warn("%s: unable to add gid %pI6 error=%d\n",
583 __func__, gid->raw, ret);
584 return ret;
585}
586
587int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
588 union ib_gid *gid, struct ib_gid_attr *attr)
589{
823b23da
PP
590 unsigned long mask = GID_ATTR_FIND_MASK_GID |
591 GID_ATTR_FIND_MASK_GID_TYPE |
592 GID_ATTR_FIND_MASK_NETDEV;
c2261dd7 593
823b23da 594 return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
1da177e4 595}
1da177e4 596
22c01ee4
PP
597static int
598_ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
599 union ib_gid *gid, struct ib_gid_attr *attr,
dc5640f2 600 unsigned long mask, bool default_gid)
1da177e4 601{
03db3a2d 602 struct ib_gid_table *table;
598ff6ba 603 int ret = 0;
03db3a2d
MB
604 int ix;
605
724631a9 606 table = rdma_gid_table(ib_dev, port);
03db3a2d
MB
607
608 mutex_lock(&table->lock);
609
dc5640f2 610 ix = find_gid(table, gid, attr, default_gid, mask, NULL);
598ff6ba
PP
611 if (ix < 0) {
612 ret = -EINVAL;
03db3a2d 613 goto out_unlock;
598ff6ba 614 }
03db3a2d 615
598ff6ba
PP
616 del_gid(ib_dev, port, table, ix);
617 dispatch_gid_change_event(ib_dev, port);
03db3a2d
MB
618
619out_unlock:
620 mutex_unlock(&table->lock);
598ff6ba
PP
621 if (ret)
622 pr_debug("%s: can't delete gid %pI6 error=%d\n",
623 __func__, gid->raw, ret);
624 return ret;
03db3a2d
MB
625}
626
22c01ee4
PP
627int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
628 union ib_gid *gid, struct ib_gid_attr *attr)
629{
dc5640f2
PP
630 unsigned long mask = GID_ATTR_FIND_MASK_GID |
631 GID_ATTR_FIND_MASK_GID_TYPE |
632 GID_ATTR_FIND_MASK_DEFAULT |
633 GID_ATTR_FIND_MASK_NETDEV;
634
635 return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
22c01ee4
PP
636}
637
03db3a2d
MB
638int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
639 struct net_device *ndev)
640{
03db3a2d
MB
641 struct ib_gid_table *table;
642 int ix;
9c584f04 643 bool deleted = false;
03db3a2d 644
724631a9 645 table = rdma_gid_table(ib_dev, port);
03db3a2d
MB
646
647 mutex_lock(&table->lock);
648
598ff6ba 649 for (ix = 0; ix < table->sz; ix++) {
b150c386
PP
650 if (is_gid_entry_valid(table->data_vec[ix]) &&
651 table->data_vec[ix]->attr.ndev == ndev) {
598ff6ba
PP
652 del_gid(ib_dev, port, table, ix);
653 deleted = true;
654 }
655 }
03db3a2d
MB
656
657 mutex_unlock(&table->lock);
9c584f04
MB
658
659 if (deleted)
660 dispatch_gid_change_event(ib_dev, port);
661
03db3a2d
MB
662 return 0;
663}
664
6612b498 665/**
c3d71b69
JG
666 * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
667 * a valid GID entry for given search parameters. It searches for the specified
668 * GID value in the local software cache.
6612b498
PP
669 * @device: The device to query.
670 * @gid: The GID value to search for.
671 * @gid_type: The GID type to search for.
672 * @port_num: The port number of the device where the GID value should be
673 * searched.
c3d71b69
JG
674 * @ndev: In RoCE, the net device of the device. NULL means ignore.
675 *
676 * Returns sgid attributes if the GID is found with valid reference or
677 * returns ERR_PTR for the error.
678 * The caller must invoke rdma_put_gid_attr() to release the reference.
6612b498 679 */
c3d71b69
JG
680const struct ib_gid_attr *
681rdma_find_gid_by_port(struct ib_device *ib_dev,
682 const union ib_gid *gid,
683 enum ib_gid_type gid_type,
684 u8 port, struct net_device *ndev)
03db3a2d
MB
685{
686 int local_index;
03db3a2d 687 struct ib_gid_table *table;
b39ffa1d
MB
688 unsigned long mask = GID_ATTR_FIND_MASK_GID |
689 GID_ATTR_FIND_MASK_GID_TYPE;
690 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
c3d71b69 691 const struct ib_gid_attr *attr;
9c584f04 692 unsigned long flags;
03db3a2d 693
24dc831b 694 if (!rdma_is_port_valid(ib_dev, port))
c3d71b69 695 return ERR_PTR(-ENOENT);
03db3a2d 696
724631a9 697 table = rdma_gid_table(ib_dev, port);
03db3a2d
MB
698
699 if (ndev)
700 mask |= GID_ATTR_FIND_MASK_NETDEV;
701
9c584f04 702 read_lock_irqsave(&table->rwlock, flags);
cee3c4d0 703 local_index = find_gid(table, gid, &val, false, mask, NULL);
03db3a2d 704 if (local_index >= 0) {
c3d71b69
JG
705 get_gid_entry(table->data_vec[local_index]);
706 attr = &table->data_vec[local_index]->attr;
9c584f04 707 read_unlock_irqrestore(&table->rwlock, flags);
c3d71b69 708 return attr;
03db3a2d
MB
709 }
710
9c584f04 711 read_unlock_irqrestore(&table->rwlock, flags);
c3d71b69 712 return ERR_PTR(-ENOENT);
03db3a2d 713}
c3d71b69 714EXPORT_SYMBOL(rdma_find_gid_by_port);
03db3a2d 715
99b27e3b 716/**
c3d71b69
JG
717 * rdma_find_gid_by_filter - Returns the GID table attribute where a
718 * specified GID value occurs
99b27e3b
MB
719 * @device: The device to query.
720 * @gid: The GID value to search for.
c3d71b69 721 * @port: The port number of the device where the GID value could be
99b27e3b
MB
722 * searched.
723 * @filter: The filter function is executed on any matching GID in the table.
724 * If the filter function returns true, the corresponding index is returned,
725 * otherwise, we continue searching the GID table. It's guaranteed that
726 * while filter is executed, ndev field is valid and the structure won't
727 * change. filter is executed in an atomic context. filter must not be NULL.
99b27e3b 728 *
c3d71b69 729 * rdma_find_gid_by_filter() searches for the specified GID value
99b27e3b 730 * of which the filter function returns true in the port's GID table.
99b27e3b
MB
731 *
732 */
c3d71b69
JG
733const struct ib_gid_attr *rdma_find_gid_by_filter(
734 struct ib_device *ib_dev, const union ib_gid *gid, u8 port,
735 bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
736 void *),
737 void *context)
99b27e3b 738{
c3d71b69 739 const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
99b27e3b 740 struct ib_gid_table *table;
9c584f04 741 unsigned long flags;
c3d71b69 742 unsigned int i;
99b27e3b 743
83f6f8d2
JG
744 if (!rdma_is_port_valid(ib_dev, port))
745 return ERR_PTR(-EINVAL);
99b27e3b 746
724631a9 747 table = rdma_gid_table(ib_dev, port);
99b27e3b 748
9c584f04 749 read_lock_irqsave(&table->rwlock, flags);
99b27e3b 750 for (i = 0; i < table->sz; i++) {
83f6f8d2 751 struct ib_gid_table_entry *entry = table->data_vec[i];
99b27e3b 752
83f6f8d2 753 if (!is_gid_entry_valid(entry))
151ed9d7 754 continue;
99b27e3b 755
83f6f8d2 756 if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
151ed9d7 757 continue;
99b27e3b 758
83f6f8d2
JG
759 if (filter(gid, &entry->attr, context)) {
760 get_gid_entry(entry);
761 res = &entry->attr;
99b27e3b 762 break;
151ed9d7 763 }
99b27e3b 764 }
9c584f04 765 read_unlock_irqrestore(&table->rwlock, flags);
c3d71b69
JG
766 return res;
767}
768
03db3a2d
MB
769static struct ib_gid_table *alloc_gid_table(int sz)
770{
b150c386 771 struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL);
9c584f04 772
03db3a2d
MB
773 if (!table)
774 return NULL;
775
776 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
777 if (!table->data_vec)
778 goto err_free_table;
779
780 mutex_init(&table->lock);
781
782 table->sz = sz;
9c584f04 783 rwlock_init(&table->rwlock);
03db3a2d
MB
784 return table;
785
786err_free_table:
787 kfree(table);
788 return NULL;
789}
790
8faea9fd 791static void release_gid_table(struct ib_device *device,
b150c386 792 struct ib_gid_table *table)
03db3a2d 793{
b150c386
PP
794 bool leak = false;
795 int i;
796
797 if (!table)
798 return;
799
800 for (i = 0; i < table->sz; i++) {
801 if (is_gid_entry_free(table->data_vec[i]))
802 continue;
803 if (kref_read(&table->data_vec[i]->kref) > 1) {
43c7c851
JG
804 dev_err(&device->dev,
805 "GID entry ref leak for index %d ref=%d\n", i,
806 kref_read(&table->data_vec[i]->kref));
b150c386
PP
807 leak = true;
808 }
03db3a2d 809 }
b150c386
PP
810 if (leak)
811 return;
812
813 kfree(table->data_vec);
814 kfree(table);
03db3a2d
MB
815}
816
817static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
818 struct ib_gid_table *table)
819{
820 int i;
9c584f04 821 bool deleted = false;
03db3a2d
MB
822
823 if (!table)
824 return;
825
598ff6ba 826 mutex_lock(&table->lock);
03db3a2d 827 for (i = 0; i < table->sz; ++i) {
b150c386 828 if (is_gid_entry_valid(table->data_vec[i])) {
598ff6ba
PP
829 del_gid(ib_dev, port, table, i);
830 deleted = true;
831 }
03db3a2d 832 }
598ff6ba 833 mutex_unlock(&table->lock);
9c584f04
MB
834
835 if (deleted)
836 dispatch_gid_change_event(ib_dev, port);
03db3a2d
MB
837}
838
839void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
840 struct net_device *ndev,
b39ffa1d 841 unsigned long gid_type_mask,
03db3a2d
MB
842 enum ib_cache_gid_default_mode mode)
843{
dc5640f2 844 union ib_gid gid = { };
03db3a2d 845 struct ib_gid_attr gid_attr;
b39ffa1d 846 unsigned int gid_type;
598ff6ba 847 unsigned long mask;
03db3a2d 848
dc5640f2
PP
849 mask = GID_ATTR_FIND_MASK_GID_TYPE |
850 GID_ATTR_FIND_MASK_DEFAULT |
851 GID_ATTR_FIND_MASK_NETDEV;
03db3a2d
MB
852 memset(&gid_attr, 0, sizeof(gid_attr));
853 gid_attr.ndev = ndev;
854
b39ffa1d 855 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
b39ffa1d
MB
856 if (1UL << gid_type & ~gid_type_mask)
857 continue;
858
859 gid_attr.gid_type = gid_type;
860
b39ffa1d 861 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
dc5640f2 862 make_default_gid(ndev, &gid);
598ff6ba
PP
863 __ib_cache_gid_add(ib_dev, port, &gid,
864 &gid_attr, mask, true);
865 } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
dc5640f2
PP
866 _ib_cache_gid_del(ib_dev, port, &gid,
867 &gid_attr, mask, true);
9c584f04 868 }
b39ffa1d 869 }
03db3a2d
MB
870}
871
25a1cd3f
PP
872static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
873 struct ib_gid_table *table)
03db3a2d 874{
b39ffa1d
MB
875 unsigned int i;
876 unsigned long roce_gid_type_mask;
877 unsigned int num_default_gids;
b39ffa1d
MB
878
879 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
880 num_default_gids = hweight_long(roce_gid_type_mask);
1c36cf91
PP
881 /* Reserve starting indices for default GIDs */
882 for (i = 0; i < num_default_gids && i < table->sz; i++)
883 table->default_gid_indices |= BIT(i);
03db3a2d 884}
03db3a2d 885
be0e8f34
PP
886
887static void gid_table_release_one(struct ib_device *ib_dev)
888{
8faea9fd 889 unsigned int p;
be0e8f34 890
8faea9fd
JG
891 rdma_for_each_port (ib_dev, p) {
892 release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
893 ib_dev->port_data[p].cache.gid = NULL;
be0e8f34 894 }
03db3a2d
MB
895}
896
897static int _gid_table_setup_one(struct ib_device *ib_dev)
898{
21d6454a 899 struct ib_gid_table *table;
8faea9fd 900 unsigned int rdma_port;
03db3a2d 901
8faea9fd 902 rdma_for_each_port (ib_dev, rdma_port) {
8ceb1357
JG
903 table = alloc_gid_table(
904 ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
be0e8f34 905 if (!table)
03db3a2d 906 goto rollback_table_setup;
03db3a2d 907
25a1cd3f 908 gid_table_reserve_default(ib_dev, rdma_port, table);
8faea9fd 909 ib_dev->port_data[rdma_port].cache.gid = table;
03db3a2d 910 }
03db3a2d
MB
911 return 0;
912
913rollback_table_setup:
be0e8f34
PP
914 gid_table_release_one(ib_dev);
915 return -ENOMEM;
03db3a2d
MB
916}
917
918static void gid_table_cleanup_one(struct ib_device *ib_dev)
919{
8faea9fd 920 unsigned int p;
03db3a2d 921
8faea9fd
JG
922 rdma_for_each_port (ib_dev, p)
923 cleanup_gid_table_port(ib_dev, p,
924 ib_dev->port_data[p].cache.gid);
03db3a2d
MB
925}
926
927static int gid_table_setup_one(struct ib_device *ib_dev)
928{
929 int err;
930
931 err = _gid_table_setup_one(ib_dev);
932
933 if (err)
934 return err;
935
32f69e4b 936 rdma_roce_rescan_device(ib_dev);
03db3a2d
MB
937
938 return err;
939}
940
6612b498 941/**
c3d71b69
JG
942 * rdma_query_gid - Read the GID content from the GID software cache
943 * @device: Device to query the GID
944 * @port_num: Port number of the device
945 * @index: Index of the GID table entry to read
946 * @gid: Pointer to GID where to store the entry's GID
947 *
948 * rdma_query_gid() only reads the GID entry content for requested device,
949 * port and index. It reads for IB, RoCE and iWarp link layers. It doesn't
950 * hold any reference to the GID table entry in the HCA or software cache.
951 *
952 * Returns 0 on success or appropriate error code.
953 *
954 */
955int rdma_query_gid(struct ib_device *device, u8 port_num,
956 int index, union ib_gid *gid)
957{
958 struct ib_gid_table *table;
959 unsigned long flags;
960 int res = -EINVAL;
961
962 if (!rdma_is_port_valid(device, port_num))
963 return -EINVAL;
964
965 table = rdma_gid_table(device, port_num);
966 read_lock_irqsave(&table->rwlock, flags);
967
968 if (index < 0 || index >= table->sz ||
969 !is_gid_entry_valid(table->data_vec[index]))
970 goto done;
971
972 memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
973 res = 0;
974
975done:
976 read_unlock_irqrestore(&table->rwlock, flags);
977 return res;
978}
979EXPORT_SYMBOL(rdma_query_gid);
980
981/**
982 * rdma_find_gid - Returns SGID attributes if the matching GID is found.
6612b498
PP
983 * @device: The device to query.
984 * @gid: The GID value to search for.
985 * @gid_type: The GID type to search for.
986 * @ndev: In RoCE, the net device of the device. NULL means ignore.
6612b498 987 *
c3d71b69
JG
988 * rdma_find_gid() searches for the specified GID value in the software cache.
989 *
990 * Returns GID attributes if a valid GID is found or returns ERR_PTR for the
991 * error. The caller must invoke rdma_put_gid_attr() to release the reference.
992 *
6612b498 993 */
c3d71b69
JG
994const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
995 const union ib_gid *gid,
996 enum ib_gid_type gid_type,
997 struct net_device *ndev)
03db3a2d 998{
c3d71b69
JG
999 unsigned long mask = GID_ATTR_FIND_MASK_GID |
1000 GID_ATTR_FIND_MASK_GID_TYPE;
1001 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
8faea9fd 1002 unsigned int p;
c3d71b69
JG
1003
1004 if (ndev)
1005 mask |= GID_ATTR_FIND_MASK_NETDEV;
1006
8faea9fd 1007 rdma_for_each_port(device, p) {
c3d71b69
JG
1008 struct ib_gid_table *table;
1009 unsigned long flags;
1010 int index;
1011
8faea9fd 1012 table = device->port_data[p].cache.gid;
c3d71b69
JG
1013 read_lock_irqsave(&table->rwlock, flags);
1014 index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
1015 if (index >= 0) {
1016 const struct ib_gid_attr *attr;
1017
1018 get_gid_entry(table->data_vec[index]);
1019 attr = &table->data_vec[index]->attr;
1020 read_unlock_irqrestore(&table->rwlock, flags);
1021 return attr;
1022 }
1023 read_unlock_irqrestore(&table->rwlock, flags);
1024 }
1025
1026 return ERR_PTR(-ENOENT);
1027}
1028EXPORT_SYMBOL(rdma_find_gid);
1029
1da177e4
LT
1030int ib_get_cached_pkey(struct ib_device *device,
1031 u8 port_num,
1032 int index,
1033 u16 *pkey)
1034{
1035 struct ib_pkey_cache *cache;
1036 unsigned long flags;
1037 int ret = 0;
1038
24dc831b 1039 if (!rdma_is_port_valid(device, port_num))
1da177e4
LT
1040 return -EINVAL;
1041
1042 read_lock_irqsave(&device->cache.lock, flags);
1043
8faea9fd 1044 cache = device->port_data[port_num].cache.pkey;
1da177e4
LT
1045
1046 if (index < 0 || index >= cache->table_len)
1047 ret = -EINVAL;
1048 else
1049 *pkey = cache->table[index];
1050
1051 read_unlock_irqrestore(&device->cache.lock, flags);
1052
1053 return ret;
1054}
1055EXPORT_SYMBOL(ib_get_cached_pkey);
1056
883c71fe
DJ
1057int ib_get_cached_subnet_prefix(struct ib_device *device,
1058 u8 port_num,
1059 u64 *sn_pfx)
1060{
1061 unsigned long flags;
883c71fe 1062
6d5b2047 1063 if (!rdma_is_port_valid(device, port_num))
883c71fe
DJ
1064 return -EINVAL;
1065
883c71fe 1066 read_lock_irqsave(&device->cache.lock, flags);
8faea9fd 1067 *sn_pfx = device->port_data[port_num].cache.subnet_prefix;
883c71fe
DJ
1068 read_unlock_irqrestore(&device->cache.lock, flags);
1069
1070 return 0;
1071}
1072EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
1073
1da177e4
LT
1074int ib_find_cached_pkey(struct ib_device *device,
1075 u8 port_num,
1076 u16 pkey,
1077 u16 *index)
1078{
1079 struct ib_pkey_cache *cache;
1080 unsigned long flags;
1081 int i;
1082 int ret = -ENOENT;
ff7166c4 1083 int partial_ix = -1;
1da177e4 1084
24dc831b 1085 if (!rdma_is_port_valid(device, port_num))
1da177e4
LT
1086 return -EINVAL;
1087
1088 read_lock_irqsave(&device->cache.lock, flags);
1089
8faea9fd 1090 cache = device->port_data[port_num].cache.pkey;
1da177e4
LT
1091
1092 *index = -1;
1093
1094 for (i = 0; i < cache->table_len; ++i)
1095 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
ff7166c4
JM
1096 if (cache->table[i] & 0x8000) {
1097 *index = i;
1098 ret = 0;
1099 break;
1100 } else
1101 partial_ix = i;
1da177e4
LT
1102 }
1103
ff7166c4
JM
1104 if (ret && partial_ix >= 0) {
1105 *index = partial_ix;
1106 ret = 0;
1107 }
1108
1da177e4
LT
1109 read_unlock_irqrestore(&device->cache.lock, flags);
1110
1111 return ret;
1112}
1113EXPORT_SYMBOL(ib_find_cached_pkey);
1114
73aaa741
JM
1115int ib_find_exact_cached_pkey(struct ib_device *device,
1116 u8 port_num,
1117 u16 pkey,
1118 u16 *index)
1119{
1120 struct ib_pkey_cache *cache;
1121 unsigned long flags;
1122 int i;
1123 int ret = -ENOENT;
1124
24dc831b 1125 if (!rdma_is_port_valid(device, port_num))
73aaa741
JM
1126 return -EINVAL;
1127
1128 read_lock_irqsave(&device->cache.lock, flags);
1129
8faea9fd 1130 cache = device->port_data[port_num].cache.pkey;
73aaa741
JM
1131
1132 *index = -1;
1133
1134 for (i = 0; i < cache->table_len; ++i)
1135 if (cache->table[i] == pkey) {
1136 *index = i;
1137 ret = 0;
1138 break;
1139 }
1140
1141 read_unlock_irqrestore(&device->cache.lock, flags);
1142
1143 return ret;
1144}
1145EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1146
6fb9cdbf
JM
1147int ib_get_cached_lmc(struct ib_device *device,
1148 u8 port_num,
1149 u8 *lmc)
1150{
1151 unsigned long flags;
1152 int ret = 0;
1153
24dc831b 1154 if (!rdma_is_port_valid(device, port_num))
6fb9cdbf
JM
1155 return -EINVAL;
1156
1157 read_lock_irqsave(&device->cache.lock, flags);
8faea9fd 1158 *lmc = device->port_data[port_num].cache.lmc;
6fb9cdbf
JM
1159 read_unlock_irqrestore(&device->cache.lock, flags);
1160
1161 return ret;
1162}
1163EXPORT_SYMBOL(ib_get_cached_lmc);
1164
9e2c3f1c
JW
1165int ib_get_cached_port_state(struct ib_device *device,
1166 u8 port_num,
1167 enum ib_port_state *port_state)
1168{
1169 unsigned long flags;
1170 int ret = 0;
1171
6d5b2047 1172 if (!rdma_is_port_valid(device, port_num))
9e2c3f1c
JW
1173 return -EINVAL;
1174
1175 read_lock_irqsave(&device->cache.lock, flags);
8faea9fd 1176 *port_state = device->port_data[port_num].cache.port_state;
9e2c3f1c
JW
1177 read_unlock_irqrestore(&device->cache.lock, flags);
1178
1179 return ret;
1180}
1181EXPORT_SYMBOL(ib_get_cached_port_state);
1182
bf399c2c
PP
1183/**
1184 * rdma_get_gid_attr - Returns GID attributes for a port of a device
1185 * at a requested gid_index, if a valid GID entry exists.
1186 * @device: The device to query.
1187 * @port_num: The port number on the device where the GID value
1188 * is to be queried.
1189 * @index: Index of the GID table entry whose attributes are to
1190 * be queried.
1191 *
1192 * rdma_get_gid_attr() acquires reference count of gid attributes from the
1193 * cached GID table. Caller must invoke rdma_put_gid_attr() to release
1194 * reference to gid attribute regardless of link layer.
1195 *
1196 * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
1197 * code.
1198 */
1199const struct ib_gid_attr *
1200rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
1201{
1202 const struct ib_gid_attr *attr = ERR_PTR(-EINVAL);
1203 struct ib_gid_table *table;
1204 unsigned long flags;
1205
1206 if (!rdma_is_port_valid(device, port_num))
1207 return ERR_PTR(-EINVAL);
1208
1209 table = rdma_gid_table(device, port_num);
1210 if (index < 0 || index >= table->sz)
1211 return ERR_PTR(-EINVAL);
1212
1213 read_lock_irqsave(&table->rwlock, flags);
1214 if (!is_gid_entry_valid(table->data_vec[index]))
1215 goto done;
1216
1217 get_gid_entry(table->data_vec[index]);
1218 attr = &table->data_vec[index]->attr;
1219done:
1220 read_unlock_irqrestore(&table->rwlock, flags);
1221 return attr;
1222}
1223EXPORT_SYMBOL(rdma_get_gid_attr);
1224
1225/**
1226 * rdma_put_gid_attr - Release reference to the GID attribute
1227 * @attr: Pointer to the GID attribute whose reference
1228 * needs to be released.
1229 *
1230 * rdma_put_gid_attr() must be used to release reference whose
1231 * reference is acquired using rdma_get_gid_attr() or any APIs
1232 * which returns a pointer to the ib_gid_attr regardless of link layer
1233 * of IB or RoCE.
1234 *
1235 */
1236void rdma_put_gid_attr(const struct ib_gid_attr *attr)
1237{
1238 struct ib_gid_table_entry *entry =
1239 container_of(attr, struct ib_gid_table_entry, attr);
1240
1241 put_gid_entry(entry);
1242}
1243EXPORT_SYMBOL(rdma_put_gid_attr);
1244
1245/**
1246 * rdma_hold_gid_attr - Get reference to existing GID attribute
1247 *
1248 * @attr: Pointer to the GID attribute whose reference
1249 * needs to be taken.
1250 *
1251 * Increase the reference count to a GID attribute to keep it from being
1252 * freed. Callers are required to already be holding a reference to attribute.
1253 *
1254 */
1255void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
1256{
1257 struct ib_gid_table_entry *entry =
1258 container_of(attr, struct ib_gid_table_entry, attr);
1259
1260 get_gid_entry(entry);
1261}
1262EXPORT_SYMBOL(rdma_hold_gid_attr);
1263
d6b1764a
PP
1264/**
1265 * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
1266 * which must be in UP state.
1267 *
1268 * @attr:Pointer to the GID attribute
1269 *
1270 * Returns pointer to netdevice if the netdevice was attached to GID and
1271 * netdevice is in UP state. Caller must hold RCU lock as this API
1272 * reads the netdev flags which can change while netdevice migrates to
1273 * different net namespace. Returns ERR_PTR with error code otherwise.
1274 *
1275 */
1276struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
1277{
1278 struct ib_gid_table_entry *entry =
1279 container_of(attr, struct ib_gid_table_entry, attr);
1280 struct ib_device *device = entry->attr.device;
1281 struct net_device *ndev = ERR_PTR(-ENODEV);
1282 u8 port_num = entry->attr.port_num;
1283 struct ib_gid_table *table;
1284 unsigned long flags;
1285 bool valid;
1286
1287 table = rdma_gid_table(device, port_num);
1288
1289 read_lock_irqsave(&table->rwlock, flags);
1290 valid = is_gid_entry_valid(table->data_vec[attr->index]);
943bd984
PP
1291 if (valid) {
1292 ndev = rcu_dereference(attr->ndev);
1293 if (!ndev ||
1294 (ndev && ((READ_ONCE(ndev->flags) & IFF_UP) == 0)))
1295 ndev = ERR_PTR(-ENODEV);
1296 }
d6b1764a
PP
1297 read_unlock_irqrestore(&table->rwlock, flags);
1298 return ndev;
1299}
adb4a57a 1300EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu);
d6b1764a 1301
a70c0739
PP
1302static int get_lower_dev_vlan(struct net_device *lower_dev, void *data)
1303{
1304 u16 *vlan_id = data;
1305
1306 if (is_vlan_dev(lower_dev))
1307 *vlan_id = vlan_dev_vlan_id(lower_dev);
1308
1309 /* We are interested only in first level vlan device, so
1310 * always return 1 to stop iterating over next level devices.
1311 */
1312 return 1;
1313}
1314
1315/**
1316 * rdma_read_gid_l2_fields - Read the vlan ID and source MAC address
1317 * of a GID entry.
1318 *
1319 * @attr: GID attribute pointer whose L2 fields to be read
1320 * @vlan_id: Pointer to vlan id to fill up if the GID entry has
1321 * vlan id. It is optional.
1322 * @smac: Pointer to smac to fill up for a GID entry. It is optional.
1323 *
1324 * rdma_read_gid_l2_fields() returns 0 on success and returns vlan id
1325 * (if gid entry has vlan) and source MAC, or returns error.
1326 */
1327int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
1328 u16 *vlan_id, u8 *smac)
1329{
1330 struct net_device *ndev;
1331
943bd984
PP
1332 rcu_read_lock();
1333 ndev = rcu_dereference(attr->ndev);
1334 if (!ndev) {
1335 rcu_read_unlock();
1336 return -ENODEV;
1337 }
a70c0739
PP
1338 if (smac)
1339 ether_addr_copy(smac, ndev->dev_addr);
1340 if (vlan_id) {
1341 *vlan_id = 0xffff;
1342 if (is_vlan_dev(ndev)) {
1343 *vlan_id = vlan_dev_vlan_id(ndev);
1344 } else {
1345 /* If the netdev is upper device and if it's lower
1346 * device is vlan device, consider vlan id of the
1347 * the lower vlan device for this gid entry.
1348 */
a70c0739
PP
1349 netdev_walk_all_lower_dev_rcu(attr->ndev,
1350 get_lower_dev_vlan, vlan_id);
a70c0739
PP
1351 }
1352 }
943bd984 1353 rcu_read_unlock();
a70c0739
PP
1354 return 0;
1355}
1356EXPORT_SYMBOL(rdma_read_gid_l2_fields);
1357
598ff6ba
PP
1358static int config_non_roce_gid_cache(struct ib_device *device,
1359 u8 port, int gid_tbl_len)
1360{
1361 struct ib_gid_attr gid_attr = {};
1362 struct ib_gid_table *table;
598ff6ba
PP
1363 int ret = 0;
1364 int i;
1365
1366 gid_attr.device = device;
1367 gid_attr.port_num = port;
724631a9 1368 table = rdma_gid_table(device, port);
598ff6ba
PP
1369
1370 mutex_lock(&table->lock);
1371 for (i = 0; i < gid_tbl_len; ++i) {
3023a1e9 1372 if (!device->ops.query_gid)
598ff6ba 1373 continue;
3023a1e9 1374 ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
598ff6ba 1375 if (ret) {
43c7c851
JG
1376 dev_warn(&device->dev,
1377 "query_gid failed (%d) for index %d\n", ret,
1378 i);
598ff6ba
PP
1379 goto err;
1380 }
1381 gid_attr.index = i;
b150c386 1382 add_modify_gid(table, &gid_attr);
598ff6ba
PP
1383 }
1384err:
1385 mutex_unlock(&table->lock);
1386 return ret;
1387}
1388
1da177e4 1389static void ib_cache_update(struct ib_device *device,
d291f1a6
DJ
1390 u8 port,
1391 bool enforce_security)
1da177e4
LT
1392{
1393 struct ib_port_attr *tprops = NULL;
1394 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
1da177e4
LT
1395 int i;
1396 int ret;
03db3a2d 1397
24dc831b 1398 if (!rdma_is_port_valid(device, port))
03db3a2d
MB
1399 return;
1400
1da177e4
LT
1401 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1402 if (!tprops)
1403 return;
1404
1405 ret = ib_query_port(device, port, tprops);
1406 if (ret) {
43c7c851 1407 dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
1da177e4
LT
1408 goto err;
1409 }
1410
598ff6ba
PP
1411 if (!rdma_protocol_roce(device, port)) {
1412 ret = config_non_roce_gid_cache(device, port,
1413 tprops->gid_tbl_len);
1414 if (ret)
1415 goto err;
1416 }
1417
acafe7e3
KC
1418 pkey_cache = kmalloc(struct_size(pkey_cache, table,
1419 tprops->pkey_tbl_len),
1420 GFP_KERNEL);
1da177e4
LT
1421 if (!pkey_cache)
1422 goto err;
1423
1424 pkey_cache->table_len = tprops->pkey_tbl_len;
1425
1da177e4
LT
1426 for (i = 0; i < pkey_cache->table_len; ++i) {
1427 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1428 if (ret) {
43c7c851
JG
1429 dev_warn(&device->dev,
1430 "ib_query_pkey failed (%d) for index %d\n",
1431 ret, i);
1da177e4
LT
1432 goto err;
1433 }
1434 }
1435
1da177e4
LT
1436 write_lock_irq(&device->cache.lock);
1437
8faea9fd 1438 old_pkey_cache = device->port_data[port].cache.pkey;
1da177e4 1439
8faea9fd
JG
1440 device->port_data[port].cache.pkey = pkey_cache;
1441 device->port_data[port].cache.lmc = tprops->lmc;
1442 device->port_data[port].cache.port_state = tprops->state;
6fb9cdbf 1443
8faea9fd 1444 device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
1da177e4
LT
1445 write_unlock_irq(&device->cache.lock);
1446
d291f1a6
DJ
1447 if (enforce_security)
1448 ib_security_cache_change(device,
1449 port,
1450 tprops->subnet_prefix);
1451
1da177e4 1452 kfree(old_pkey_cache);
1da177e4
LT
1453 kfree(tprops);
1454 return;
1455
1456err:
1457 kfree(pkey_cache);
1da177e4
LT
1458 kfree(tprops);
1459}
1460
c4028958 1461static void ib_cache_task(struct work_struct *_work)
1da177e4 1462{
c4028958
DH
1463 struct ib_update_work *work =
1464 container_of(_work, struct ib_update_work, work);
1da177e4 1465
d291f1a6
DJ
1466 ib_cache_update(work->device,
1467 work->port_num,
1468 work->enforce_security);
1da177e4
LT
1469 kfree(work);
1470}
1471
1472static void ib_cache_event(struct ib_event_handler *handler,
1473 struct ib_event *event)
1474{
1475 struct ib_update_work *work;
1476
1477 if (event->event == IB_EVENT_PORT_ERR ||
1478 event->event == IB_EVENT_PORT_ACTIVE ||
1479 event->event == IB_EVENT_LID_CHANGE ||
1480 event->event == IB_EVENT_PKEY_CHANGE ||
761d90ed
OG
1481 event->event == IB_EVENT_CLIENT_REREGISTER ||
1482 event->event == IB_EVENT_GID_CHANGE) {
1da177e4
LT
1483 work = kmalloc(sizeof *work, GFP_ATOMIC);
1484 if (work) {
c4028958 1485 INIT_WORK(&work->work, ib_cache_task);
1da177e4
LT
1486 work->device = event->device;
1487 work->port_num = event->element.port_num;
d291f1a6
DJ
1488 if (event->event == IB_EVENT_PKEY_CHANGE ||
1489 event->event == IB_EVENT_GID_CHANGE)
1490 work->enforce_security = true;
1491 else
1492 work->enforce_security = false;
1493
f0626710 1494 queue_work(ib_wq, &work->work);
1da177e4
LT
1495 }
1496 }
1497}
1498
03db3a2d 1499int ib_cache_setup_one(struct ib_device *device)
1da177e4 1500{
ea1075ed 1501 unsigned int p;
03db3a2d 1502 int err;
1da177e4
LT
1503
1504 rwlock_init(&device->cache.lock);
1505
03db3a2d 1506 err = gid_table_setup_one(device);
8faea9fd 1507 if (err)
dcc9881e 1508 return err;
03db3a2d 1509
ea1075ed
JG
1510 rdma_for_each_port (device, p)
1511 ib_cache_update(device, p, true);
1da177e4
LT
1512
1513 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1514 device, ib_cache_event);
dcc9881e 1515 ib_register_event_handler(&device->cache.event_handler);
03db3a2d 1516 return 0;
1da177e4
LT
1517}
1518
03db3a2d 1519void ib_cache_release_one(struct ib_device *device)
1da177e4 1520{
8faea9fd 1521 unsigned int p;
d45f89d5 1522
03db3a2d
MB
1523 /*
1524 * The release function frees all the cache elements.
1525 * This function should be called as part of freeing
1526 * all the device's resources when the cache could no
1527 * longer be accessed.
1528 */
8faea9fd
JG
1529 rdma_for_each_port (device, p)
1530 kfree(device->port_data[p].cache.pkey);
03db3a2d
MB
1531
1532 gid_table_release_one(device);
1da177e4
LT
1533}
1534
03db3a2d
MB
1535void ib_cache_cleanup_one(struct ib_device *device)
1536{
1537 /* The cleanup function unregisters the event handler,
1538 * waits for all in-progress workqueue elements and cleans
1539 * up the GID cache. This function should be called after
1540 * the device was removed from the devices list and all
1541 * clients were removed, so the cache exists but is
1542 * non-functional and shouldn't be updated anymore.
1543 */
1544 ib_unregister_event_handler(&device->cache.event_handler);
1545 flush_workqueue(ib_wq);
1546 gid_table_cleanup_one(device);
b150c386
PP
1547
1548 /*
1549 * Flush the wq second time for any pending GID delete work.
1550 */
1551 flush_workqueue(ib_wq);
03db3a2d 1552}