Merge tag 'mfd-fixes-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[linux-2.6-block.git] / drivers / infiniband / core / cache.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
2a1d9b7f
RD
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
1da177e4
LT
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
1da177e4
LT
34 */
35
1da177e4
LT
36#include <linux/module.h>
37#include <linux/errno.h>
38#include <linux/slab.h>
e8edc6e0 39#include <linux/workqueue.h>
03db3a2d
MB
40#include <linux/netdevice.h>
41#include <net/addrconf.h>
1da177e4 42
a4d61e84 43#include <rdma/ib_cache.h>
1da177e4
LT
44
45#include "core_priv.h"
46
47struct ib_pkey_cache {
48 int table_len;
49 u16 table[0];
50};
51
1da177e4
LT
52struct ib_update_work {
53 struct work_struct work;
54 struct ib_device *device;
55 u8 port_num;
56};
57
e26be1bf
MS
58union ib_gid zgid;
59EXPORT_SYMBOL(zgid);
03db3a2d
MB
60
61static const struct ib_gid_attr zattr;
62
63enum gid_attr_find_mask {
64 GID_ATTR_FIND_MASK_GID = 1UL << 0,
65 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
66 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
b39ffa1d 67 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
03db3a2d
MB
68};
69
70enum gid_table_entry_props {
71 GID_TABLE_ENTRY_INVALID = 1UL << 0,
72 GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
73};
74
75enum gid_table_write_action {
76 GID_TABLE_WRITE_ACTION_ADD,
77 GID_TABLE_WRITE_ACTION_DEL,
78 /* MODIFY only updates the GID table. Currently only used by
79 * ib_cache_update.
80 */
81 GID_TABLE_WRITE_ACTION_MODIFY
82};
83
84struct ib_gid_table_entry {
03db3a2d
MB
85 unsigned long props;
86 union ib_gid gid;
87 struct ib_gid_attr attr;
88 void *context;
89};
90
91struct ib_gid_table {
92 int sz;
93 /* In RoCE, adding a GID to the table requires:
94 * (a) Find if this GID is already exists.
95 * (b) Find a free space.
96 * (c) Write the new GID
97 *
98 * Delete requires different set of operations:
99 * (a) Find the GID
100 * (b) Delete it.
101 *
102 * Add/delete should be carried out atomically.
103 * This is done by locking this mutex from multiple
104 * writers. We don't need this lock for IB, as the MAD
105 * layer replaces all entries. All data_vec entries
106 * are locked by this lock.
107 **/
108 struct mutex lock;
9c584f04
MB
109 /* This lock protects the table entries from being
110 * read and written simultaneously.
111 */
112 rwlock_t rwlock;
03db3a2d
MB
113 struct ib_gid_table_entry *data_vec;
114};
115
f3906bd3
MB
116static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
117{
118 if (rdma_cap_roce_gid_table(ib_dev, port)) {
119 struct ib_event event;
120
121 event.device = ib_dev;
122 event.element.port_num = port;
123 event.event = IB_EVENT_GID_CHANGE;
124
125 ib_dispatch_event(&event);
126 }
127}
128
b39ffa1d
MB
129static const char * const gid_type_str[] = {
130 [IB_GID_TYPE_IB] = "IB/RoCE v1",
7766a99f 131 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
b39ffa1d
MB
132};
133
134const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
135{
136 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
137 return gid_type_str[gid_type];
138
139 return "Invalid GID type";
140}
141EXPORT_SYMBOL(ib_cache_gid_type_str);
142
045959db
MB
143int ib_cache_gid_parse_type_str(const char *buf)
144{
145 unsigned int i;
146 size_t len;
147 int err = -EINVAL;
148
149 len = strlen(buf);
150 if (len == 0)
151 return -EINVAL;
152
153 if (buf[len - 1] == '\n')
154 len--;
155
156 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
157 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
158 len == strlen(gid_type_str[i])) {
159 err = i;
160 break;
161 }
162
163 return err;
164}
165EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
166
9c584f04
MB
167/* This function expects that rwlock will be write locked in all
168 * scenarios and that lock will be locked in sleep-able (RoCE)
169 * scenarios.
170 */
03db3a2d
MB
171static int write_gid(struct ib_device *ib_dev, u8 port,
172 struct ib_gid_table *table, int ix,
173 const union ib_gid *gid,
174 const struct ib_gid_attr *attr,
175 enum gid_table_write_action action,
176 bool default_gid)
2e2cdace 177 __releases(&table->rwlock) __acquires(&table->rwlock)
1da177e4 178{
03db3a2d
MB
179 int ret = 0;
180 struct net_device *old_net_dev;
8e787646 181 enum ib_gid_type old_gid_type;
03db3a2d
MB
182
183 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
184 * sleep-able lock.
185 */
03db3a2d
MB
186
187 if (rdma_cap_roce_gid_table(ib_dev, port)) {
188 table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
9c584f04 189 write_unlock_irq(&table->rwlock);
03db3a2d
MB
190 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
191 * RoCE providers and thus only updates the cache.
192 */
193 if (action == GID_TABLE_WRITE_ACTION_ADD)
194 ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
195 &table->data_vec[ix].context);
196 else if (action == GID_TABLE_WRITE_ACTION_DEL)
197 ret = ib_dev->del_gid(ib_dev, port, ix,
198 &table->data_vec[ix].context);
9c584f04 199 write_lock_irq(&table->rwlock);
03db3a2d
MB
200 }
201
202 old_net_dev = table->data_vec[ix].attr.ndev;
8e787646 203 old_gid_type = table->data_vec[ix].attr.gid_type;
03db3a2d
MB
204 if (old_net_dev && old_net_dev != attr->ndev)
205 dev_put(old_net_dev);
206 /* if modify_gid failed, just delete the old gid */
207 if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
208 gid = &zgid;
209 attr = &zattr;
210 table->data_vec[ix].context = NULL;
211 }
8e787646 212
03db3a2d
MB
213 memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
214 memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
8e787646
AH
215 if (default_gid) {
216 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
217 if (action == GID_TABLE_WRITE_ACTION_DEL)
218 table->data_vec[ix].attr.gid_type = old_gid_type;
219 }
03db3a2d
MB
220 if (table->data_vec[ix].attr.ndev &&
221 table->data_vec[ix].attr.ndev != old_net_dev)
222 dev_hold(table->data_vec[ix].attr.ndev);
223
224 table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
225
03db3a2d
MB
226 return ret;
227}
228
229static int add_gid(struct ib_device *ib_dev, u8 port,
230 struct ib_gid_table *table, int ix,
231 const union ib_gid *gid,
232 const struct ib_gid_attr *attr,
233 bool default_gid) {
234 return write_gid(ib_dev, port, table, ix, gid, attr,
235 GID_TABLE_WRITE_ACTION_ADD, default_gid);
236}
237
238static int modify_gid(struct ib_device *ib_dev, u8 port,
239 struct ib_gid_table *table, int ix,
240 const union ib_gid *gid,
241 const struct ib_gid_attr *attr,
242 bool default_gid) {
243 return write_gid(ib_dev, port, table, ix, gid, attr,
244 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
245}
246
247static int del_gid(struct ib_device *ib_dev, u8 port,
248 struct ib_gid_table *table, int ix,
249 bool default_gid) {
250 return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
251 GID_TABLE_WRITE_ACTION_DEL, default_gid);
252}
253
9c584f04 254/* rwlock should be read locked */
03db3a2d
MB
255static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
256 const struct ib_gid_attr *val, bool default_gid,
cee3c4d0 257 unsigned long mask, int *pempty)
03db3a2d 258{
cee3c4d0
MB
259 int i = 0;
260 int found = -1;
261 int empty = pempty ? -1 : 0;
03db3a2d 262
cee3c4d0
MB
263 while (i < table->sz && (found < 0 || empty < 0)) {
264 struct ib_gid_table_entry *data = &table->data_vec[i];
265 struct ib_gid_attr *attr = &data->attr;
266 int curr_index = i;
03db3a2d 267
cee3c4d0 268 i++;
03db3a2d 269
cee3c4d0
MB
270 if (data->props & GID_TABLE_ENTRY_INVALID)
271 continue;
272
273 if (empty < 0)
274 if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
275 !memcmp(attr, &zattr, sizeof(*attr)) &&
276 !data->props)
277 empty = curr_index;
278
279 if (found >= 0)
9c584f04 280 continue;
03db3a2d 281
b39ffa1d
MB
282 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
283 attr->gid_type != val->gid_type)
284 continue;
285
03db3a2d 286 if (mask & GID_ATTR_FIND_MASK_GID &&
cee3c4d0 287 memcmp(gid, &data->gid, sizeof(*gid)))
9c584f04 288 continue;
03db3a2d
MB
289
290 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
291 attr->ndev != val->ndev)
9c584f04 292 continue;
03db3a2d
MB
293
294 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
cee3c4d0 295 !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
03db3a2d 296 default_gid)
9c584f04 297 continue;
03db3a2d 298
cee3c4d0 299 found = curr_index;
03db3a2d
MB
300 }
301
cee3c4d0
MB
302 if (pempty)
303 *pempty = empty;
304
305 return found;
03db3a2d
MB
306}
307
308static void make_default_gid(struct net_device *dev, union ib_gid *gid)
309{
310 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
311 addrconf_ifid_eui48(&gid->raw[8], dev);
312}
313
314int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
315 union ib_gid *gid, struct ib_gid_attr *attr)
316{
317 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
318 struct ib_gid_table *table;
319 int ix;
1da177e4 320 int ret = 0;
03db3a2d 321 struct net_device *idev;
cee3c4d0 322 int empty;
1da177e4 323
03db3a2d
MB
324 table = ports_table[port - rdma_start_port(ib_dev)];
325
326 if (!memcmp(gid, &zgid, sizeof(*gid)))
1da177e4
LT
327 return -EINVAL;
328
03db3a2d
MB
329 if (ib_dev->get_netdev) {
330 idev = ib_dev->get_netdev(ib_dev, port);
331 if (idev && attr->ndev != idev) {
332 union ib_gid default_gid;
1da177e4 333
03db3a2d
MB
334 /* Adding default GIDs in not permitted */
335 make_default_gid(idev, &default_gid);
336 if (!memcmp(gid, &default_gid, sizeof(*gid))) {
337 dev_put(idev);
338 return -EPERM;
339 }
340 }
341 if (idev)
342 dev_put(idev);
343 }
1da177e4 344
03db3a2d 345 mutex_lock(&table->lock);
9c584f04 346 write_lock_irq(&table->rwlock);
1da177e4 347
03db3a2d 348 ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
b39ffa1d 349 GID_ATTR_FIND_MASK_GID_TYPE |
cee3c4d0 350 GID_ATTR_FIND_MASK_NETDEV, &empty);
03db3a2d
MB
351 if (ix >= 0)
352 goto out_unlock;
1da177e4 353
cee3c4d0 354 if (empty < 0) {
03db3a2d
MB
355 ret = -ENOSPC;
356 goto out_unlock;
357 }
358
cee3c4d0 359 ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
9c584f04
MB
360 if (!ret)
361 dispatch_gid_change_event(ib_dev, port);
03db3a2d
MB
362
363out_unlock:
9c584f04 364 write_unlock_irq(&table->rwlock);
03db3a2d 365 mutex_unlock(&table->lock);
1da177e4
LT
366 return ret;
367}
1da177e4 368
03db3a2d
MB
369int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
370 union ib_gid *gid, struct ib_gid_attr *attr)
1da177e4 371{
03db3a2d
MB
372 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
373 struct ib_gid_table *table;
374 int ix;
375
376 table = ports_table[port - rdma_start_port(ib_dev)];
377
378 mutex_lock(&table->lock);
9c584f04 379 write_lock_irq(&table->rwlock);
03db3a2d
MB
380
381 ix = find_gid(table, gid, attr, false,
382 GID_ATTR_FIND_MASK_GID |
b39ffa1d 383 GID_ATTR_FIND_MASK_GID_TYPE |
03db3a2d 384 GID_ATTR_FIND_MASK_NETDEV |
cee3c4d0
MB
385 GID_ATTR_FIND_MASK_DEFAULT,
386 NULL);
03db3a2d
MB
387 if (ix < 0)
388 goto out_unlock;
389
9c584f04
MB
390 if (!del_gid(ib_dev, port, table, ix, false))
391 dispatch_gid_change_event(ib_dev, port);
03db3a2d
MB
392
393out_unlock:
9c584f04 394 write_unlock_irq(&table->rwlock);
03db3a2d
MB
395 mutex_unlock(&table->lock);
396 return 0;
397}
398
399int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
400 struct net_device *ndev)
401{
402 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
403 struct ib_gid_table *table;
404 int ix;
9c584f04 405 bool deleted = false;
03db3a2d
MB
406
407 table = ports_table[port - rdma_start_port(ib_dev)];
408
409 mutex_lock(&table->lock);
9c584f04 410 write_lock_irq(&table->rwlock);
03db3a2d
MB
411
412 for (ix = 0; ix < table->sz; ix++)
413 if (table->data_vec[ix].attr.ndev == ndev)
f336ae03
TB
414 if (!del_gid(ib_dev, port, table, ix,
415 !!(table->data_vec[ix].props &
416 GID_TABLE_ENTRY_DEFAULT)))
9c584f04 417 deleted = true;
03db3a2d 418
9c584f04 419 write_unlock_irq(&table->rwlock);
03db3a2d 420 mutex_unlock(&table->lock);
9c584f04
MB
421
422 if (deleted)
423 dispatch_gid_change_event(ib_dev, port);
424
03db3a2d
MB
425 return 0;
426}
427
428static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
429 union ib_gid *gid, struct ib_gid_attr *attr)
430{
431 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
432 struct ib_gid_table *table;
1da177e4 433
03db3a2d 434 table = ports_table[port - rdma_start_port(ib_dev)];
1da177e4 435
03db3a2d
MB
436 if (index < 0 || index >= table->sz)
437 return -EINVAL;
1da177e4 438
9c584f04 439 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
03db3a2d 440 return -EAGAIN;
03db3a2d
MB
441
442 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
443 if (attr) {
444 memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
445 if (attr->ndev)
446 dev_hold(attr->ndev);
447 }
448
03db3a2d
MB
449 return 0;
450}
451
452static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
453 const union ib_gid *gid,
454 const struct ib_gid_attr *val,
455 unsigned long mask,
456 u8 *port, u16 *index)
457{
458 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
459 struct ib_gid_table *table;
460 u8 p;
461 int local_index;
9c584f04 462 unsigned long flags;
03db3a2d
MB
463
464 for (p = 0; p < ib_dev->phys_port_cnt; p++) {
465 table = ports_table[p];
9c584f04 466 read_lock_irqsave(&table->rwlock, flags);
cee3c4d0 467 local_index = find_gid(table, gid, val, false, mask, NULL);
03db3a2d
MB
468 if (local_index >= 0) {
469 if (index)
470 *index = local_index;
471 if (port)
472 *port = p + rdma_start_port(ib_dev);
9c584f04 473 read_unlock_irqrestore(&table->rwlock, flags);
03db3a2d 474 return 0;
1da177e4 475 }
9c584f04 476 read_unlock_irqrestore(&table->rwlock, flags);
1da177e4 477 }
1da177e4 478
03db3a2d
MB
479 return -ENOENT;
480}
481
482static int ib_cache_gid_find(struct ib_device *ib_dev,
483 const union ib_gid *gid,
b39ffa1d 484 enum ib_gid_type gid_type,
03db3a2d
MB
485 struct net_device *ndev, u8 *port,
486 u16 *index)
487{
b39ffa1d
MB
488 unsigned long mask = GID_ATTR_FIND_MASK_GID |
489 GID_ATTR_FIND_MASK_GID_TYPE;
490 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
03db3a2d
MB
491
492 if (ndev)
493 mask |= GID_ATTR_FIND_MASK_NETDEV;
494
495 return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
496 mask, port, index);
497}
498
d300ec52
MB
499int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
500 const union ib_gid *gid,
b39ffa1d 501 enum ib_gid_type gid_type,
d300ec52
MB
502 u8 port, struct net_device *ndev,
503 u16 *index)
03db3a2d
MB
504{
505 int local_index;
506 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
507 struct ib_gid_table *table;
b39ffa1d
MB
508 unsigned long mask = GID_ATTR_FIND_MASK_GID |
509 GID_ATTR_FIND_MASK_GID_TYPE;
510 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
9c584f04 511 unsigned long flags;
03db3a2d
MB
512
513 if (port < rdma_start_port(ib_dev) ||
514 port > rdma_end_port(ib_dev))
515 return -ENOENT;
516
517 table = ports_table[port - rdma_start_port(ib_dev)];
518
519 if (ndev)
520 mask |= GID_ATTR_FIND_MASK_NETDEV;
521
9c584f04 522 read_lock_irqsave(&table->rwlock, flags);
cee3c4d0 523 local_index = find_gid(table, gid, &val, false, mask, NULL);
03db3a2d
MB
524 if (local_index >= 0) {
525 if (index)
526 *index = local_index;
9c584f04 527 read_unlock_irqrestore(&table->rwlock, flags);
03db3a2d
MB
528 return 0;
529 }
530
9c584f04 531 read_unlock_irqrestore(&table->rwlock, flags);
03db3a2d
MB
532 return -ENOENT;
533}
d300ec52 534EXPORT_SYMBOL(ib_find_cached_gid_by_port);
03db3a2d 535
99b27e3b
MB
536/**
537 * ib_find_gid_by_filter - Returns the GID table index where a specified
538 * GID value occurs
539 * @device: The device to query.
540 * @gid: The GID value to search for.
541 * @port_num: The port number of the device where the GID value could be
542 * searched.
543 * @filter: The filter function is executed on any matching GID in the table.
544 * If the filter function returns true, the corresponding index is returned,
545 * otherwise, we continue searching the GID table. It's guaranteed that
546 * while filter is executed, ndev field is valid and the structure won't
547 * change. filter is executed in an atomic context. filter must not be NULL.
548 * @index: The index into the cached GID table where the GID was found. This
549 * parameter may be NULL.
550 *
551 * ib_cache_gid_find_by_filter() searches for the specified GID value
552 * of which the filter function returns true in the port's GID table.
553 * This function is only supported on RoCE ports.
554 *
555 */
556static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
557 const union ib_gid *gid,
558 u8 port,
559 bool (*filter)(const union ib_gid *,
560 const struct ib_gid_attr *,
561 void *),
562 void *context,
563 u16 *index)
564{
565 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
566 struct ib_gid_table *table;
567 unsigned int i;
9c584f04 568 unsigned long flags;
99b27e3b
MB
569 bool found = false;
570
571 if (!ports_table)
572 return -EOPNOTSUPP;
573
574 if (port < rdma_start_port(ib_dev) ||
575 port > rdma_end_port(ib_dev) ||
576 !rdma_protocol_roce(ib_dev, port))
577 return -EPROTONOSUPPORT;
578
579 table = ports_table[port - rdma_start_port(ib_dev)];
580
9c584f04 581 read_lock_irqsave(&table->rwlock, flags);
99b27e3b
MB
582 for (i = 0; i < table->sz; i++) {
583 struct ib_gid_attr attr;
99b27e3b 584
99b27e3b
MB
585 if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
586 goto next;
587
588 if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
589 goto next;
590
591 memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
592
593 if (filter(gid, &attr, context))
594 found = true;
595
596next:
99b27e3b
MB
597 if (found)
598 break;
599 }
9c584f04 600 read_unlock_irqrestore(&table->rwlock, flags);
99b27e3b
MB
601
602 if (!found)
603 return -ENOENT;
604
605 if (index)
606 *index = i;
607 return 0;
608}
609
03db3a2d
MB
610static struct ib_gid_table *alloc_gid_table(int sz)
611{
03db3a2d
MB
612 struct ib_gid_table *table =
613 kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
9c584f04 614
03db3a2d
MB
615 if (!table)
616 return NULL;
617
618 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
619 if (!table->data_vec)
620 goto err_free_table;
621
622 mutex_init(&table->lock);
623
624 table->sz = sz;
9c584f04 625 rwlock_init(&table->rwlock);
03db3a2d
MB
626
627 return table;
628
629err_free_table:
630 kfree(table);
631 return NULL;
632}
633
634static void release_gid_table(struct ib_gid_table *table)
635{
636 if (table) {
637 kfree(table->data_vec);
638 kfree(table);
639 }
640}
641
642static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
643 struct ib_gid_table *table)
644{
645 int i;
9c584f04 646 bool deleted = false;
03db3a2d
MB
647
648 if (!table)
649 return;
650
9c584f04 651 write_lock_irq(&table->rwlock);
03db3a2d
MB
652 for (i = 0; i < table->sz; ++i) {
653 if (memcmp(&table->data_vec[i].gid, &zgid,
654 sizeof(table->data_vec[i].gid)))
9c584f04
MB
655 if (!del_gid(ib_dev, port, table, i,
656 table->data_vec[i].props &
657 GID_ATTR_FIND_MASK_DEFAULT))
658 deleted = true;
03db3a2d 659 }
9c584f04
MB
660 write_unlock_irq(&table->rwlock);
661
662 if (deleted)
663 dispatch_gid_change_event(ib_dev, port);
03db3a2d
MB
664}
665
666void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
667 struct net_device *ndev,
b39ffa1d 668 unsigned long gid_type_mask,
03db3a2d
MB
669 enum ib_cache_gid_default_mode mode)
670{
671 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
672 union ib_gid gid;
673 struct ib_gid_attr gid_attr;
b39ffa1d 674 struct ib_gid_attr zattr_type = zattr;
03db3a2d 675 struct ib_gid_table *table;
b39ffa1d 676 unsigned int gid_type;
03db3a2d
MB
677
678 table = ports_table[port - rdma_start_port(ib_dev)];
679
680 make_default_gid(ndev, &gid);
681 memset(&gid_attr, 0, sizeof(gid_attr));
682 gid_attr.ndev = ndev;
683
b39ffa1d
MB
684 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
685 int ix;
686 union ib_gid current_gid;
687 struct ib_gid_attr current_gid_attr = {};
688
689 if (1UL << gid_type & ~gid_type_mask)
690 continue;
691
692 gid_attr.gid_type = gid_type;
693
694 mutex_lock(&table->lock);
695 write_lock_irq(&table->rwlock);
696 ix = find_gid(table, NULL, &gid_attr, true,
697 GID_ATTR_FIND_MASK_GID_TYPE |
698 GID_ATTR_FIND_MASK_DEFAULT,
699 NULL);
700
701 /* Coudn't find default GID location */
f4e7de63
DL
702 if (WARN_ON(ix < 0))
703 goto release;
b39ffa1d
MB
704
705 zattr_type.gid_type = gid_type;
706
707 if (!__ib_cache_gid_get(ib_dev, port, ix,
708 &current_gid, &current_gid_attr) &&
709 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
710 !memcmp(&gid, &current_gid, sizeof(gid)) &&
711 !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
712 goto release;
713
714 if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
715 memcmp(&current_gid_attr, &zattr_type,
716 sizeof(current_gid_attr))) {
717 if (del_gid(ib_dev, port, table, ix, true)) {
718 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
719 ix, gid.raw);
720 goto release;
721 } else {
722 dispatch_gid_change_event(ib_dev, port);
723 }
9c584f04 724 }
03db3a2d 725
b39ffa1d
MB
726 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
727 if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
728 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
729 gid.raw);
730 else
731 dispatch_gid_change_event(ib_dev, port);
9c584f04 732 }
03db3a2d 733
b39ffa1d
MB
734release:
735 if (current_gid_attr.ndev)
736 dev_put(current_gid_attr.ndev);
737 write_unlock_irq(&table->rwlock);
738 mutex_unlock(&table->lock);
739 }
03db3a2d
MB
740}
741
742static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
743 struct ib_gid_table *table)
744{
b39ffa1d
MB
745 unsigned int i;
746 unsigned long roce_gid_type_mask;
747 unsigned int num_default_gids;
748 unsigned int current_gid = 0;
749
750 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
751 num_default_gids = hweight_long(roce_gid_type_mask);
752 for (i = 0; i < num_default_gids && i < table->sz; i++) {
753 struct ib_gid_table_entry *entry =
754 &table->data_vec[i];
03db3a2d
MB
755
756 entry->props |= GID_TABLE_ENTRY_DEFAULT;
b39ffa1d
MB
757 current_gid = find_next_bit(&roce_gid_type_mask,
758 BITS_PER_LONG,
759 current_gid);
760 entry->attr.gid_type = current_gid++;
03db3a2d
MB
761 }
762
763 return 0;
764}
765
766static int _gid_table_setup_one(struct ib_device *ib_dev)
767{
768 u8 port;
769 struct ib_gid_table **table;
770 int err = 0;
771
772 table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
773
774 if (!table) {
775 pr_warn("failed to allocate ib gid cache for %s\n",
776 ib_dev->name);
777 return -ENOMEM;
778 }
779
780 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
781 u8 rdma_port = port + rdma_start_port(ib_dev);
782
783 table[port] =
784 alloc_gid_table(
785 ib_dev->port_immutable[rdma_port].gid_tbl_len);
786 if (!table[port]) {
787 err = -ENOMEM;
788 goto rollback_table_setup;
789 }
790
791 err = gid_table_reserve_default(ib_dev,
792 port + rdma_start_port(ib_dev),
793 table[port]);
794 if (err)
795 goto rollback_table_setup;
796 }
797
798 ib_dev->cache.gid_cache = table;
799 return 0;
800
801rollback_table_setup:
802 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
803 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
804 table[port]);
805 release_gid_table(table[port]);
806 }
807
808 kfree(table);
809 return err;
810}
811
812static void gid_table_release_one(struct ib_device *ib_dev)
813{
814 struct ib_gid_table **table = ib_dev->cache.gid_cache;
815 u8 port;
816
817 if (!table)
818 return;
819
820 for (port = 0; port < ib_dev->phys_port_cnt; port++)
821 release_gid_table(table[port]);
822
823 kfree(table);
824 ib_dev->cache.gid_cache = NULL;
825}
826
827static void gid_table_cleanup_one(struct ib_device *ib_dev)
828{
829 struct ib_gid_table **table = ib_dev->cache.gid_cache;
830 u8 port;
831
832 if (!table)
833 return;
834
835 for (port = 0; port < ib_dev->phys_port_cnt; port++)
836 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
837 table[port]);
838}
839
840static int gid_table_setup_one(struct ib_device *ib_dev)
841{
842 int err;
843
844 err = _gid_table_setup_one(ib_dev);
845
846 if (err)
847 return err;
848
849 err = roce_rescan_device(ib_dev);
850
851 if (err) {
852 gid_table_cleanup_one(ib_dev);
853 gid_table_release_one(ib_dev);
854 }
855
856 return err;
857}
858
859int ib_get_cached_gid(struct ib_device *device,
860 u8 port_num,
861 int index,
55ee3ab2
MB
862 union ib_gid *gid,
863 struct ib_gid_attr *gid_attr)
03db3a2d 864{
9c584f04
MB
865 int res;
866 unsigned long flags;
867 struct ib_gid_table **ports_table = device->cache.gid_cache;
868 struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
869
03db3a2d
MB
870 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
871 return -EINVAL;
872
9c584f04
MB
873 read_lock_irqsave(&table->rwlock, flags);
874 res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
875 read_unlock_irqrestore(&table->rwlock, flags);
876
877 return res;
03db3a2d
MB
878}
879EXPORT_SYMBOL(ib_get_cached_gid);
880
881int ib_find_cached_gid(struct ib_device *device,
882 const union ib_gid *gid,
b39ffa1d 883 enum ib_gid_type gid_type,
55ee3ab2 884 struct net_device *ndev,
03db3a2d
MB
885 u8 *port_num,
886 u16 *index)
887{
b39ffa1d 888 return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
1da177e4
LT
889}
890EXPORT_SYMBOL(ib_find_cached_gid);
891
99b27e3b
MB
892int ib_find_gid_by_filter(struct ib_device *device,
893 const union ib_gid *gid,
894 u8 port_num,
895 bool (*filter)(const union ib_gid *gid,
896 const struct ib_gid_attr *,
897 void *),
898 void *context, u16 *index)
899{
900 /* Only RoCE GID table supports filter function */
901 if (!rdma_cap_roce_gid_table(device, port_num) && filter)
902 return -EPROTONOSUPPORT;
903
904 return ib_cache_gid_find_by_filter(device, gid,
905 port_num, filter,
906 context, index);
907}
908EXPORT_SYMBOL(ib_find_gid_by_filter);
909
1da177e4
LT
910int ib_get_cached_pkey(struct ib_device *device,
911 u8 port_num,
912 int index,
913 u16 *pkey)
914{
915 struct ib_pkey_cache *cache;
916 unsigned long flags;
917 int ret = 0;
918
0cf18d77 919 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1da177e4
LT
920 return -EINVAL;
921
922 read_lock_irqsave(&device->cache.lock, flags);
923
0cf18d77 924 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
1da177e4
LT
925
926 if (index < 0 || index >= cache->table_len)
927 ret = -EINVAL;
928 else
929 *pkey = cache->table[index];
930
931 read_unlock_irqrestore(&device->cache.lock, flags);
932
933 return ret;
934}
935EXPORT_SYMBOL(ib_get_cached_pkey);
936
937int ib_find_cached_pkey(struct ib_device *device,
938 u8 port_num,
939 u16 pkey,
940 u16 *index)
941{
942 struct ib_pkey_cache *cache;
943 unsigned long flags;
944 int i;
945 int ret = -ENOENT;
ff7166c4 946 int partial_ix = -1;
1da177e4 947
0cf18d77 948 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1da177e4
LT
949 return -EINVAL;
950
951 read_lock_irqsave(&device->cache.lock, flags);
952
0cf18d77 953 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
1da177e4
LT
954
955 *index = -1;
956
957 for (i = 0; i < cache->table_len; ++i)
958 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
ff7166c4
JM
959 if (cache->table[i] & 0x8000) {
960 *index = i;
961 ret = 0;
962 break;
963 } else
964 partial_ix = i;
1da177e4
LT
965 }
966
ff7166c4
JM
967 if (ret && partial_ix >= 0) {
968 *index = partial_ix;
969 ret = 0;
970 }
971
1da177e4
LT
972 read_unlock_irqrestore(&device->cache.lock, flags);
973
974 return ret;
975}
976EXPORT_SYMBOL(ib_find_cached_pkey);
977
73aaa741
JM
978int ib_find_exact_cached_pkey(struct ib_device *device,
979 u8 port_num,
980 u16 pkey,
981 u16 *index)
982{
983 struct ib_pkey_cache *cache;
984 unsigned long flags;
985 int i;
986 int ret = -ENOENT;
987
0cf18d77 988 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
73aaa741
JM
989 return -EINVAL;
990
991 read_lock_irqsave(&device->cache.lock, flags);
992
0cf18d77 993 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
73aaa741
JM
994
995 *index = -1;
996
997 for (i = 0; i < cache->table_len; ++i)
998 if (cache->table[i] == pkey) {
999 *index = i;
1000 ret = 0;
1001 break;
1002 }
1003
1004 read_unlock_irqrestore(&device->cache.lock, flags);
1005
1006 return ret;
1007}
1008EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1009
6fb9cdbf
JM
1010int ib_get_cached_lmc(struct ib_device *device,
1011 u8 port_num,
1012 u8 *lmc)
1013{
1014 unsigned long flags;
1015 int ret = 0;
1016
0cf18d77 1017 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
6fb9cdbf
JM
1018 return -EINVAL;
1019
1020 read_lock_irqsave(&device->cache.lock, flags);
0cf18d77 1021 *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
6fb9cdbf
JM
1022 read_unlock_irqrestore(&device->cache.lock, flags);
1023
1024 return ret;
1025}
1026EXPORT_SYMBOL(ib_get_cached_lmc);
1027
1da177e4
LT
1028static void ib_cache_update(struct ib_device *device,
1029 u8 port)
1030{
1031 struct ib_port_attr *tprops = NULL;
1032 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
03db3a2d
MB
1033 struct ib_gid_cache {
1034 int table_len;
1035 union ib_gid table[0];
1036 } *gid_cache = NULL;
1da177e4
LT
1037 int i;
1038 int ret;
03db3a2d
MB
1039 struct ib_gid_table *table;
1040 struct ib_gid_table **ports_table = device->cache.gid_cache;
1041 bool use_roce_gid_table =
1042 rdma_cap_roce_gid_table(device, port);
1043
1044 if (port < rdma_start_port(device) || port > rdma_end_port(device))
1045 return;
1046
1047 table = ports_table[port - rdma_start_port(device)];
1da177e4
LT
1048
1049 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1050 if (!tprops)
1051 return;
1052
1053 ret = ib_query_port(device, port, tprops);
1054 if (ret) {
aba25a3e
PP
1055 pr_warn("ib_query_port failed (%d) for %s\n",
1056 ret, device->name);
1da177e4
LT
1057 goto err;
1058 }
1059
1060 pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1061 sizeof *pkey_cache->table, GFP_KERNEL);
1062 if (!pkey_cache)
1063 goto err;
1064
1065 pkey_cache->table_len = tprops->pkey_tbl_len;
1066
03db3a2d
MB
1067 if (!use_roce_gid_table) {
1068 gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1069 sizeof(*gid_cache->table), GFP_KERNEL);
1070 if (!gid_cache)
1071 goto err;
1da177e4 1072
03db3a2d
MB
1073 gid_cache->table_len = tprops->gid_tbl_len;
1074 }
1da177e4
LT
1075
1076 for (i = 0; i < pkey_cache->table_len; ++i) {
1077 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1078 if (ret) {
aba25a3e
PP
1079 pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1080 ret, device->name, i);
1da177e4
LT
1081 goto err;
1082 }
1083 }
1084
03db3a2d
MB
1085 if (!use_roce_gid_table) {
1086 for (i = 0; i < gid_cache->table_len; ++i) {
1087 ret = ib_query_gid(device, port, i,
55ee3ab2 1088 gid_cache->table + i, NULL);
03db3a2d 1089 if (ret) {
aba25a3e
PP
1090 pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1091 ret, device->name, i);
03db3a2d
MB
1092 goto err;
1093 }
1da177e4
LT
1094 }
1095 }
1096
1097 write_lock_irq(&device->cache.lock);
1098
0cf18d77 1099 old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
1da177e4 1100
0cf18d77 1101 device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
03db3a2d 1102 if (!use_roce_gid_table) {
9c584f04 1103 write_lock(&table->rwlock);
03db3a2d
MB
1104 for (i = 0; i < gid_cache->table_len; i++) {
1105 modify_gid(device, port, table, i, gid_cache->table + i,
1106 &zattr, false);
1107 }
9c584f04 1108 write_unlock(&table->rwlock);
03db3a2d 1109 }
1da177e4 1110
0cf18d77 1111 device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
6fb9cdbf 1112
1da177e4
LT
1113 write_unlock_irq(&device->cache.lock);
1114
03db3a2d 1115 kfree(gid_cache);
1da177e4 1116 kfree(old_pkey_cache);
1da177e4
LT
1117 kfree(tprops);
1118 return;
1119
1120err:
1121 kfree(pkey_cache);
1122 kfree(gid_cache);
1123 kfree(tprops);
1124}
1125
c4028958 1126static void ib_cache_task(struct work_struct *_work)
1da177e4 1127{
c4028958
DH
1128 struct ib_update_work *work =
1129 container_of(_work, struct ib_update_work, work);
1da177e4
LT
1130
1131 ib_cache_update(work->device, work->port_num);
1132 kfree(work);
1133}
1134
1135static void ib_cache_event(struct ib_event_handler *handler,
1136 struct ib_event *event)
1137{
1138 struct ib_update_work *work;
1139
1140 if (event->event == IB_EVENT_PORT_ERR ||
1141 event->event == IB_EVENT_PORT_ACTIVE ||
1142 event->event == IB_EVENT_LID_CHANGE ||
1143 event->event == IB_EVENT_PKEY_CHANGE ||
acaea9ee 1144 event->event == IB_EVENT_SM_CHANGE ||
761d90ed
OG
1145 event->event == IB_EVENT_CLIENT_REREGISTER ||
1146 event->event == IB_EVENT_GID_CHANGE) {
1da177e4
LT
1147 work = kmalloc(sizeof *work, GFP_ATOMIC);
1148 if (work) {
c4028958 1149 INIT_WORK(&work->work, ib_cache_task);
1da177e4
LT
1150 work->device = event->device;
1151 work->port_num = event->element.port_num;
f0626710 1152 queue_work(ib_wq, &work->work);
1da177e4
LT
1153 }
1154 }
1155}
1156
03db3a2d 1157int ib_cache_setup_one(struct ib_device *device)
1da177e4
LT
1158{
1159 int p;
03db3a2d 1160 int err;
1da177e4
LT
1161
1162 rwlock_init(&device->cache.lock);
1163
1164 device->cache.pkey_cache =
55aeed06 1165 kzalloc(sizeof *device->cache.pkey_cache *
0cf18d77 1166 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
6fb9cdbf 1167 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
0cf18d77
IW
1168 (rdma_end_port(device) -
1169 rdma_start_port(device) + 1),
6fb9cdbf 1170 GFP_KERNEL);
03db3a2d 1171 if (!device->cache.pkey_cache ||
6fb9cdbf 1172 !device->cache.lmc_cache) {
aba25a3e 1173 pr_warn("Couldn't allocate cache for %s\n", device->name);
03db3a2d 1174 return -ENOMEM;
1da177e4
LT
1175 }
1176
03db3a2d
MB
1177 err = gid_table_setup_one(device);
1178 if (err)
1179 /* Allocated memory will be cleaned in the release function */
1180 return err;
1181
55aeed06 1182 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
0cf18d77 1183 ib_cache_update(device, p + rdma_start_port(device));
1da177e4
LT
1184
1185 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1186 device, ib_cache_event);
03db3a2d
MB
1187 err = ib_register_event_handler(&device->cache.event_handler);
1188 if (err)
1189 goto err;
1da177e4 1190
03db3a2d 1191 return 0;
1da177e4
LT
1192
1193err:
03db3a2d
MB
1194 gid_table_cleanup_one(device);
1195 return err;
1da177e4
LT
1196}
1197
03db3a2d 1198void ib_cache_release_one(struct ib_device *device)
1da177e4
LT
1199{
1200 int p;
1201
03db3a2d
MB
1202 /*
1203 * The release function frees all the cache elements.
1204 * This function should be called as part of freeing
1205 * all the device's resources when the cache could no
1206 * longer be accessed.
1207 */
1208 if (device->cache.pkey_cache)
1209 for (p = 0;
1210 p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1211 kfree(device->cache.pkey_cache[p]);
1212
1213 gid_table_release_one(device);
1da177e4 1214 kfree(device->cache.pkey_cache);
6fb9cdbf 1215 kfree(device->cache.lmc_cache);
1da177e4
LT
1216}
1217
03db3a2d
MB
1218void ib_cache_cleanup_one(struct ib_device *device)
1219{
1220 /* The cleanup function unregisters the event handler,
1221 * waits for all in-progress workqueue elements and cleans
1222 * up the GID cache. This function should be called after
1223 * the device was removed from the devices list and all
1224 * clients were removed, so the cache exists but is
1225 * non-functional and shouldn't be updated anymore.
1226 */
1227 ib_unregister_event_handler(&device->cache.event_handler);
1228 flush_workqueue(ib_wq);
1229 gid_table_cleanup_one(device);
1230}
1da177e4 1231
03db3a2d 1232void __init ib_cache_setup(void)
1da177e4 1233{
03db3a2d 1234 roce_gid_mgmt_init();
1da177e4
LT
1235}
1236
1237void __exit ib_cache_cleanup(void)
1238{
03db3a2d 1239 roce_gid_mgmt_cleanup();
1da177e4 1240}