RDMA/core: Sync unregistration with netlink commands
[linux-block.git] / drivers / infiniband / core / nldev.c
CommitLineData
6c80b41a
LR
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Neither the names of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") version 2 as published by the Free
18 * Software Foundation.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
e3bf14bd 33#include <linux/module.h>
bf3c5a93
LR
34#include <linux/pid.h>
35#include <linux/pid_namespace.h>
b4c598a6 36#include <net/netlink.h>
00313983 37#include <rdma/rdma_cm.h>
6c80b41a
LR
38#include <rdma/rdma_netlink.h>
39
40#include "core_priv.h"
00313983 41#include "cma_priv.h"
6c80b41a 42
b4c598a6
LR
43static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
44 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
45 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
46 .len = IB_DEVICE_NAME_MAX - 1},
47 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
8621a7e3
LR
48 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
49 .len = IB_FW_VERSION_NAME_MAX - 1},
1aaff896
LR
50 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
51 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
12026fbb 52 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
80a06dd3
LR
53 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
54 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
34840fea 55 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
5654e49d
LR
56 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
57 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
1bb77b8c 58 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
bf3c5a93
LR
59 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
60 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
61 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
62 .len = 16 },
63 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
b5fa635a
LR
64 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
65 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
66 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
67 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
68 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
69 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
70 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
71 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
72 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
73 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
74 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
75 .len = TASK_COMM_LEN },
00313983
SW
76 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED },
77 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED },
78 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 },
79 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = {
80 .len = sizeof(struct __kernel_sockaddr_storage) },
81 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
82 .len = sizeof(struct __kernel_sockaddr_storage) },
a34fc089
SW
83 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED },
84 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
85 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
86 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
87 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 },
fccec5b8
SW
88 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED },
89 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED },
90 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 },
91 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 },
92 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
93 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 },
29cf1351
SW
94 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED },
95 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED },
96 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 },
97 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY] = { .type = NLA_U32 },
5b2cc79d
LR
98 [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 },
99 [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING,
100 .len = IFNAMSIZ },
da5c8507
SW
101 [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED },
102 [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED },
103 [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING,
104 .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
105 [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 },
106 [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 },
107 [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 },
108 [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 },
109 [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 },
b4c598a6
LR
110};
111
73937e8a
SW
112static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
113 enum rdma_nldev_print_type print_type)
114{
115 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
116 return -EMSGSIZE;
117 if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
118 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
119 return -EMSGSIZE;
120
121 return 0;
122}
123
124static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
125 enum rdma_nldev_print_type print_type,
126 u32 value)
127{
128 if (put_driver_name_print_type(msg, name, print_type))
129 return -EMSGSIZE;
130 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
131 return -EMSGSIZE;
132
133 return 0;
134}
135
136static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
137 enum rdma_nldev_print_type print_type,
138 u64 value)
139{
140 if (put_driver_name_print_type(msg, name, print_type))
141 return -EMSGSIZE;
142 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
143 RDMA_NLDEV_ATTR_PAD))
144 return -EMSGSIZE;
145
146 return 0;
147}
148
149int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
150{
151 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
152 value);
153}
154EXPORT_SYMBOL(rdma_nl_put_driver_u32);
155
156int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
157 u32 value)
158{
159 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
160 value);
161}
162EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
163
164int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
165{
166 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
167 value);
168}
169EXPORT_SYMBOL(rdma_nl_put_driver_u64);
170
171int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
172{
173 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
174 value);
175}
176EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
177
c2409810 178static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
b4c598a6
LR
179{
180 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
181 return -EMSGSIZE;
896de009
JG
182 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
183 dev_name(&device->dev)))
b4c598a6 184 return -EMSGSIZE;
c2409810
LR
185
186 return 0;
187}
188
189static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
190{
191 char fw[IB_FW_VERSION_NAME_MAX];
192
193 if (fill_nldev_handle(msg, device))
194 return -EMSGSIZE;
195
b4c598a6
LR
196 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
197 return -EMSGSIZE;
ac505253
LR
198
199 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
200 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
25a0ad85
SW
201 device->attrs.device_cap_flags,
202 RDMA_NLDEV_ATTR_PAD))
ac505253
LR
203 return -EMSGSIZE;
204
8621a7e3 205 ib_get_device_fw_str(device, fw);
5b2cc79d 206 /* Device without FW has strlen(fw) = 0 */
8621a7e3
LR
207 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
208 return -EMSGSIZE;
209
1aaff896 210 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
25a0ad85
SW
211 be64_to_cpu(device->node_guid),
212 RDMA_NLDEV_ATTR_PAD))
1aaff896
LR
213 return -EMSGSIZE;
214 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
25a0ad85
SW
215 be64_to_cpu(device->attrs.sys_image_guid),
216 RDMA_NLDEV_ATTR_PAD))
1aaff896 217 return -EMSGSIZE;
1bb77b8c
LR
218 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
219 return -EMSGSIZE;
b4c598a6
LR
220 return 0;
221}
222
7d02f605 223static int fill_port_info(struct sk_buff *msg,
5b2cc79d
LR
224 struct ib_device *device, u32 port,
225 const struct net *net)
7d02f605 226{
5b2cc79d 227 struct net_device *netdev = NULL;
ac505253
LR
228 struct ib_port_attr attr;
229 int ret;
230
c2409810 231 if (fill_nldev_handle(msg, device))
7d02f605 232 return -EMSGSIZE;
c2409810 233
7d02f605
LR
234 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
235 return -EMSGSIZE;
ac505253
LR
236
237 ret = ib_query_port(device, port, &attr);
238 if (ret)
239 return ret;
240
80a06dd3 241 if (rdma_protocol_ib(device, port)) {
dd8028f1
LR
242 BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
243 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
244 (u64)attr.port_cap_flags,
245 RDMA_NLDEV_ATTR_PAD))
246 return -EMSGSIZE;
247 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
248 attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
249 return -EMSGSIZE;
80a06dd3
LR
250 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
251 return -EMSGSIZE;
252 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
253 return -EMSGSIZE;
34840fea
LR
254 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
255 return -EMSGSIZE;
80a06dd3 256 }
5654e49d
LR
257 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
258 return -EMSGSIZE;
259 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
260 return -EMSGSIZE;
5b2cc79d
LR
261
262 if (device->get_netdev)
263 netdev = device->get_netdev(device, port);
264
265 if (netdev && net_eq(dev_net(netdev), net)) {
266 ret = nla_put_u32(msg,
267 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
268 if (ret)
269 goto out;
270 ret = nla_put_string(msg,
271 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
272 }
273
274out:
275 if (netdev)
276 dev_put(netdev);
277 return ret;
7d02f605
LR
278}
279
bf3c5a93
LR
280static int fill_res_info_entry(struct sk_buff *msg,
281 const char *name, u64 curr)
282{
283 struct nlattr *entry_attr;
284
285 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
286 if (!entry_attr)
287 return -EMSGSIZE;
288
289 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
290 goto err;
25a0ad85
SW
291 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
292 RDMA_NLDEV_ATTR_PAD))
bf3c5a93
LR
293 goto err;
294
295 nla_nest_end(msg, entry_attr);
296 return 0;
297
298err:
299 nla_nest_cancel(msg, entry_attr);
300 return -EMSGSIZE;
301}
302
303static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
304{
305 static const char * const names[RDMA_RESTRACK_MAX] = {
306 [RDMA_RESTRACK_PD] = "pd",
307 [RDMA_RESTRACK_CQ] = "cq",
308 [RDMA_RESTRACK_QP] = "qp",
00313983 309 [RDMA_RESTRACK_CM_ID] = "cm_id",
fccec5b8 310 [RDMA_RESTRACK_MR] = "mr",
bf3c5a93
LR
311 };
312
313 struct rdma_restrack_root *res = &device->res;
314 struct nlattr *table_attr;
315 int ret, i, curr;
316
317 if (fill_nldev_handle(msg, device))
318 return -EMSGSIZE;
319
320 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
321 if (!table_attr)
322 return -EMSGSIZE;
323
324 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
325 if (!names[i])
326 continue;
327 curr = rdma_restrack_count(res, i, task_active_pid_ns(current));
328 ret = fill_res_info_entry(msg, names[i], curr);
329 if (ret)
330 goto err;
331 }
332
333 nla_nest_end(msg, table_attr);
334 return 0;
335
336err:
337 nla_nest_cancel(msg, table_attr);
338 return ret;
339}
340
00313983
SW
341static int fill_res_name_pid(struct sk_buff *msg,
342 struct rdma_restrack_entry *res)
343{
344 /*
345 * For user resources, user is should read /proc/PID/comm to get the
346 * name of the task file.
347 */
348 if (rdma_is_kernel_res(res)) {
349 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
350 res->kern_name))
351 return -EMSGSIZE;
352 } else {
353 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
354 task_pid_vnr(res->task)))
355 return -EMSGSIZE;
356 }
357 return 0;
358}
359
d12ff624
SW
360static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
361 struct rdma_restrack_entry *res, uint32_t port)
b5fa635a 362{
d12ff624 363 struct ib_qp *qp = container_of(res, struct ib_qp, res);
da5c8507 364 struct rdma_restrack_root *resroot = &qp->device->res;
b5fa635a
LR
365 struct ib_qp_init_attr qp_init_attr;
366 struct nlattr *entry_attr;
367 struct ib_qp_attr qp_attr;
368 int ret;
369
370 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
371 if (ret)
372 return ret;
373
374 if (port && port != qp_attr.port_num)
375 return 0;
376
377 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
378 if (!entry_attr)
379 goto out;
380
381 /* In create_qp() port is not set yet */
382 if (qp_attr.port_num &&
383 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
384 goto err;
385
386 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
387 goto err;
388 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
389 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
390 qp_attr.dest_qp_num))
391 goto err;
392 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
393 qp_attr.rq_psn))
394 goto err;
395 }
396
397 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
398 goto err;
399
400 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
401 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
402 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
403 qp_attr.path_mig_state))
404 goto err;
405 }
406 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
407 goto err;
408 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
409 goto err;
410
00313983
SW
411 if (fill_res_name_pid(msg, res))
412 goto err;
413
da5c8507
SW
414 if (resroot->fill_res_entry(msg, res))
415 goto err;
416
00313983
SW
417 nla_nest_end(msg, entry_attr);
418 return 0;
419
420err:
421 nla_nest_cancel(msg, entry_attr);
422out:
423 return -EMSGSIZE;
424}
425
426static int fill_res_cm_id_entry(struct sk_buff *msg,
427 struct netlink_callback *cb,
428 struct rdma_restrack_entry *res, uint32_t port)
429{
430 struct rdma_id_private *id_priv =
431 container_of(res, struct rdma_id_private, res);
da5c8507 432 struct rdma_restrack_root *resroot = &id_priv->id.device->res;
00313983
SW
433 struct rdma_cm_id *cm_id = &id_priv->id;
434 struct nlattr *entry_attr;
435
436 if (port && port != cm_id->port_num)
437 return 0;
438
439 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY);
440 if (!entry_attr)
441 goto out;
442
443 if (cm_id->port_num &&
444 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
445 goto err;
446
447 if (id_priv->qp_num) {
448 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
b5fa635a 449 goto err;
00313983 450 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
b5fa635a
LR
451 goto err;
452 }
453
00313983
SW
454 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
455 goto err;
456
457 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
458 goto err;
459
460 if (cm_id->route.addr.src_addr.ss_family &&
461 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
462 sizeof(cm_id->route.addr.src_addr),
463 &cm_id->route.addr.src_addr))
464 goto err;
465 if (cm_id->route.addr.dst_addr.ss_family &&
466 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
467 sizeof(cm_id->route.addr.dst_addr),
468 &cm_id->route.addr.dst_addr))
469 goto err;
470
471 if (fill_res_name_pid(msg, res))
472 goto err;
473
da5c8507
SW
474 if (resroot->fill_res_entry(msg, res))
475 goto err;
476
b5fa635a
LR
477 nla_nest_end(msg, entry_attr);
478 return 0;
479
480err:
481 nla_nest_cancel(msg, entry_attr);
482out:
483 return -EMSGSIZE;
484}
485
a34fc089
SW
486static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
487 struct rdma_restrack_entry *res, uint32_t port)
488{
489 struct ib_cq *cq = container_of(res, struct ib_cq, res);
da5c8507 490 struct rdma_restrack_root *resroot = &cq->device->res;
a34fc089
SW
491 struct nlattr *entry_attr;
492
493 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CQ_ENTRY);
494 if (!entry_attr)
495 goto out;
496
497 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
498 goto err;
499 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
25a0ad85 500 atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
a34fc089
SW
501 goto err;
502
503 /* Poll context is only valid for kernel CQs */
504 if (rdma_is_kernel_res(res) &&
505 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
506 goto err;
507
508 if (fill_res_name_pid(msg, res))
509 goto err;
510
da5c8507
SW
511 if (resroot->fill_res_entry(msg, res))
512 goto err;
513
a34fc089
SW
514 nla_nest_end(msg, entry_attr);
515 return 0;
516
517err:
518 nla_nest_cancel(msg, entry_attr);
519out:
520 return -EMSGSIZE;
521}
522
fccec5b8
SW
523static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb,
524 struct rdma_restrack_entry *res, uint32_t port)
525{
526 struct ib_mr *mr = container_of(res, struct ib_mr, res);
da5c8507 527 struct rdma_restrack_root *resroot = &mr->pd->device->res;
fccec5b8
SW
528 struct nlattr *entry_attr;
529
530 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_MR_ENTRY);
531 if (!entry_attr)
532 goto out;
533
534 if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
535 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
536 goto err;
537 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
538 goto err;
fccec5b8
SW
539 }
540
25a0ad85
SW
541 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
542 RDMA_NLDEV_ATTR_PAD))
fccec5b8
SW
543 goto err;
544
545 if (fill_res_name_pid(msg, res))
546 goto err;
547
da5c8507
SW
548 if (resroot->fill_res_entry(msg, res))
549 goto err;
550
fccec5b8
SW
551 nla_nest_end(msg, entry_attr);
552 return 0;
553
554err:
555 nla_nest_cancel(msg, entry_attr);
556out:
557 return -EMSGSIZE;
558}
559
29cf1351
SW
560static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
561 struct rdma_restrack_entry *res, uint32_t port)
562{
563 struct ib_pd *pd = container_of(res, struct ib_pd, res);
da5c8507 564 struct rdma_restrack_root *resroot = &pd->device->res;
29cf1351
SW
565 struct nlattr *entry_attr;
566
567 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_PD_ENTRY);
568 if (!entry_attr)
569 goto out;
570
571 if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
572 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
573 pd->local_dma_lkey))
574 goto err;
575 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
576 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
577 pd->unsafe_global_rkey))
578 goto err;
579 }
580 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
25a0ad85 581 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
29cf1351
SW
582 goto err;
583 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
584 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
585 pd->unsafe_global_rkey))
586 goto err;
587
588 if (fill_res_name_pid(msg, res))
589 goto err;
590
da5c8507
SW
591 if (resroot->fill_res_entry(msg, res))
592 goto err;
593
29cf1351
SW
594 nla_nest_end(msg, entry_attr);
595 return 0;
596
597err:
598 nla_nest_cancel(msg, entry_attr);
599out:
600 return -EMSGSIZE;
601}
602
e5c9469e
LR
603static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
604 struct netlink_ext_ack *extack)
605{
606 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
607 struct ib_device *device;
608 struct sk_buff *msg;
609 u32 index;
610 int err;
611
612 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
613 nldev_policy, extack);
614 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
615 return -EINVAL;
616
617 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
618
f8978bd9 619 device = ib_device_get_by_index(index);
e5c9469e
LR
620 if (!device)
621 return -EINVAL;
622
623 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
f8978bd9
LR
624 if (!msg) {
625 err = -ENOMEM;
626 goto err;
627 }
e5c9469e
LR
628
629 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
630 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
631 0, 0);
632
633 err = fill_dev_info(msg, device);
f8978bd9
LR
634 if (err)
635 goto err_free;
e5c9469e
LR
636
637 nlmsg_end(msg, nlh);
638
01b67117 639 ib_device_put(device);
e5c9469e 640 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
f8978bd9
LR
641
642err_free:
643 nlmsg_free(msg);
644err:
01b67117 645 ib_device_put(device);
f8978bd9 646 return err;
e5c9469e
LR
647}
648
05d940d3
LR
649static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
650 struct netlink_ext_ack *extack)
651{
652 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
653 struct ib_device *device;
654 u32 index;
655 int err;
656
657 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
658 extack);
659 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
660 return -EINVAL;
661
662 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
663 device = ib_device_get_by_index(index);
664 if (!device)
665 return -EINVAL;
666
667 if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
668 char name[IB_DEVICE_NAME_MAX] = {};
669
670 nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
671 IB_DEVICE_NAME_MAX);
672 err = ib_device_rename(device, name);
673 }
674
01b67117 675 ib_device_put(device);
05d940d3
LR
676 return err;
677}
678
b4c598a6
LR
679static int _nldev_get_dumpit(struct ib_device *device,
680 struct sk_buff *skb,
681 struct netlink_callback *cb,
682 unsigned int idx)
683{
684 int start = cb->args[0];
685 struct nlmsghdr *nlh;
686
687 if (idx < start)
688 return 0;
689
690 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
691 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
692 0, NLM_F_MULTI);
693
694 if (fill_dev_info(skb, device)) {
695 nlmsg_cancel(skb, nlh);
696 goto out;
697 }
698
699 nlmsg_end(skb, nlh);
700
701 idx++;
702
703out: cb->args[0] = idx;
704 return skb->len;
705}
706
707static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
708{
709 /*
710 * There is no need to take lock, because
711 * we are relying on ib_core's lists_rwsem
712 */
713 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
714}
715
c3f66f7b
LR
716static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
717 struct netlink_ext_ack *extack)
718{
719 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
720 struct ib_device *device;
721 struct sk_buff *msg;
722 u32 index;
723 u32 port;
724 int err;
725
726 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
727 nldev_policy, extack);
287683d0
LR
728 if (err ||
729 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
730 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
c3f66f7b
LR
731 return -EINVAL;
732
733 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
f8978bd9 734 device = ib_device_get_by_index(index);
c3f66f7b
LR
735 if (!device)
736 return -EINVAL;
737
738 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
f8978bd9
LR
739 if (!rdma_is_port_valid(device, port)) {
740 err = -EINVAL;
741 goto err;
742 }
c3f66f7b
LR
743
744 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
f8978bd9
LR
745 if (!msg) {
746 err = -ENOMEM;
747 goto err;
748 }
c3f66f7b
LR
749
750 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
751 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
752 0, 0);
753
5b2cc79d 754 err = fill_port_info(msg, device, port, sock_net(skb->sk));
f8978bd9
LR
755 if (err)
756 goto err_free;
c3f66f7b
LR
757
758 nlmsg_end(msg, nlh);
01b67117 759 ib_device_put(device);
c3f66f7b
LR
760
761 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
f8978bd9
LR
762
763err_free:
764 nlmsg_free(msg);
765err:
01b67117 766 ib_device_put(device);
f8978bd9 767 return err;
c3f66f7b
LR
768}
769
7d02f605
LR
770static int nldev_port_get_dumpit(struct sk_buff *skb,
771 struct netlink_callback *cb)
772{
773 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
774 struct ib_device *device;
775 int start = cb->args[0];
776 struct nlmsghdr *nlh;
777 u32 idx = 0;
778 u32 ifindex;
779 int err;
780 u32 p;
781
782 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
783 nldev_policy, NULL);
784 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
785 return -EINVAL;
786
787 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
f8978bd9 788 device = ib_device_get_by_index(ifindex);
7d02f605
LR
789 if (!device)
790 return -EINVAL;
791
792 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
793 /*
794 * The dumpit function returns all information from specific
795 * index. This specific index is taken from the netlink
796 * messages request sent by user and it is available
797 * in cb->args[0].
798 *
799 * Usually, the user doesn't fill this field and it causes
800 * to return everything.
801 *
802 */
803 if (idx < start) {
804 idx++;
805 continue;
806 }
807
808 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
809 cb->nlh->nlmsg_seq,
810 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
811 RDMA_NLDEV_CMD_PORT_GET),
812 0, NLM_F_MULTI);
813
5b2cc79d 814 if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
7d02f605
LR
815 nlmsg_cancel(skb, nlh);
816 goto out;
817 }
818 idx++;
819 nlmsg_end(skb, nlh);
820 }
821
f8978bd9 822out:
01b67117 823 ib_device_put(device);
f8978bd9 824 cb->args[0] = idx;
7d02f605
LR
825 return skb->len;
826}
827
bf3c5a93
LR
828static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
829 struct netlink_ext_ack *extack)
830{
831 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
832 struct ib_device *device;
833 struct sk_buff *msg;
834 u32 index;
835 int ret;
836
837 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
838 nldev_policy, extack);
839 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
840 return -EINVAL;
841
842 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
843 device = ib_device_get_by_index(index);
844 if (!device)
845 return -EINVAL;
846
847 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
f34727a1
DC
848 if (!msg) {
849 ret = -ENOMEM;
bf3c5a93 850 goto err;
f34727a1 851 }
bf3c5a93
LR
852
853 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
854 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
855 0, 0);
856
857 ret = fill_res_info(msg, device);
858 if (ret)
859 goto err_free;
860
861 nlmsg_end(msg, nlh);
01b67117 862 ib_device_put(device);
bf3c5a93
LR
863 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
864
865err_free:
866 nlmsg_free(msg);
867err:
01b67117 868 ib_device_put(device);
bf3c5a93
LR
869 return ret;
870}
871
872static int _nldev_res_get_dumpit(struct ib_device *device,
873 struct sk_buff *skb,
874 struct netlink_callback *cb,
875 unsigned int idx)
876{
877 int start = cb->args[0];
878 struct nlmsghdr *nlh;
879
880 if (idx < start)
881 return 0;
882
883 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
884 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
885 0, NLM_F_MULTI);
886
887 if (fill_res_info(skb, device)) {
888 nlmsg_cancel(skb, nlh);
889 goto out;
890 }
891
892 nlmsg_end(skb, nlh);
893
894 idx++;
895
896out:
897 cb->args[0] = idx;
898 return skb->len;
899}
900
901static int nldev_res_get_dumpit(struct sk_buff *skb,
902 struct netlink_callback *cb)
903{
904 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
905}
906
d12ff624
SW
907struct nldev_fill_res_entry {
908 int (*fill_res_func)(struct sk_buff *msg, struct netlink_callback *cb,
909 struct rdma_restrack_entry *res, u32 port);
910 enum rdma_nldev_attr nldev_attr;
911 enum rdma_nldev_command nldev_cmd;
912};
913
914static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
915 [RDMA_RESTRACK_QP] = {
916 .fill_res_func = fill_res_qp_entry,
917 .nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
918 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
919 },
00313983
SW
920 [RDMA_RESTRACK_CM_ID] = {
921 .fill_res_func = fill_res_cm_id_entry,
922 .nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
923 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
924 },
a34fc089
SW
925 [RDMA_RESTRACK_CQ] = {
926 .fill_res_func = fill_res_cq_entry,
927 .nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
928 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
929 },
fccec5b8
SW
930 [RDMA_RESTRACK_MR] = {
931 .fill_res_func = fill_res_mr_entry,
932 .nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
933 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
934 },
29cf1351
SW
935 [RDMA_RESTRACK_PD] = {
936 .fill_res_func = fill_res_pd_entry,
937 .nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
938 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
939 },
d12ff624
SW
940};
941
942static int res_get_common_dumpit(struct sk_buff *skb,
943 struct netlink_callback *cb,
944 enum rdma_restrack_type res_type)
b5fa635a 945{
d12ff624 946 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
b5fa635a
LR
947 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
948 struct rdma_restrack_entry *res;
949 int err, ret = 0, idx = 0;
950 struct nlattr *table_attr;
951 struct ib_device *device;
952 int start = cb->args[0];
b5fa635a
LR
953 struct nlmsghdr *nlh;
954 u32 index, port = 0;
d12ff624 955 bool filled = false;
b5fa635a
LR
956
957 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
958 nldev_policy, NULL);
959 /*
d12ff624 960 * Right now, we are expecting the device index to get res information,
b5fa635a
LR
961 * but it is possible to extend this code to return all devices in
962 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
963 * if it doesn't exist, we will iterate over all devices.
964 *
965 * But it is not needed for now.
966 */
967 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
968 return -EINVAL;
969
970 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
971 device = ib_device_get_by_index(index);
972 if (!device)
973 return -EINVAL;
974
975 /*
976 * If no PORT_INDEX is supplied, we will return all QPs from that device
977 */
978 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
979 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
980 if (!rdma_is_port_valid(device, port)) {
981 ret = -EINVAL;
982 goto err_index;
983 }
984 }
985
986 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
d12ff624 987 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
b5fa635a
LR
988 0, NLM_F_MULTI);
989
990 if (fill_nldev_handle(skb, device)) {
991 ret = -EMSGSIZE;
992 goto err;
993 }
994
d12ff624 995 table_attr = nla_nest_start(skb, fe->nldev_attr);
b5fa635a
LR
996 if (!table_attr) {
997 ret = -EMSGSIZE;
998 goto err;
999 }
1000
1001 down_read(&device->res.rwsem);
d12ff624 1002 hash_for_each_possible(device->res.hash, res, node, res_type) {
b5fa635a
LR
1003 if (idx < start)
1004 goto next;
1005
1006 if ((rdma_is_kernel_res(res) &&
1007 task_active_pid_ns(current) != &init_pid_ns) ||
d12ff624
SW
1008 (!rdma_is_kernel_res(res) && task_active_pid_ns(current) !=
1009 task_active_pid_ns(res->task)))
b5fa635a 1010 /*
d12ff624
SW
1011 * 1. Kern resources should be visible in init
1012 * namspace only
1013 * 2. Present only resources visible in the current
1014 * namespace
b5fa635a
LR
1015 */
1016 goto next;
1017
1018 if (!rdma_restrack_get(res))
1019 /*
1020 * Resource is under release now, but we are not
1021 * relesing lock now, so it will be released in
1022 * our next pass, once we will get ->next pointer.
1023 */
1024 goto next;
1025
d12ff624 1026 filled = true;
b5fa635a
LR
1027
1028 up_read(&device->res.rwsem);
d12ff624 1029 ret = fe->fill_res_func(skb, cb, res, port);
b5fa635a
LR
1030 down_read(&device->res.rwsem);
1031 /*
1032 * Return resource back, but it won't be released till
1033 * the &device->res.rwsem will be released for write.
1034 */
1035 rdma_restrack_put(res);
1036
1037 if (ret == -EMSGSIZE)
1038 /*
1039 * There is a chance to optimize here.
1040 * It can be done by using list_prepare_entry
1041 * and list_for_each_entry_continue afterwards.
1042 */
1043 break;
1044 if (ret)
1045 goto res_err;
1046next: idx++;
1047 }
1048 up_read(&device->res.rwsem);
1049
1050 nla_nest_end(skb, table_attr);
1051 nlmsg_end(skb, nlh);
1052 cb->args[0] = idx;
1053
1054 /*
d12ff624 1055 * No more entries to fill, cancel the message and
b5fa635a
LR
1056 * return 0 to mark end of dumpit.
1057 */
d12ff624 1058 if (!filled)
b5fa635a
LR
1059 goto err;
1060
01b67117 1061 ib_device_put(device);
b5fa635a
LR
1062 return skb->len;
1063
1064res_err:
1065 nla_nest_cancel(skb, table_attr);
1066 up_read(&device->res.rwsem);
1067
1068err:
1069 nlmsg_cancel(skb, nlh);
1070
1071err_index:
01b67117 1072 ib_device_put(device);
b5fa635a
LR
1073 return ret;
1074}
1075
d12ff624
SW
1076static int nldev_res_get_qp_dumpit(struct sk_buff *skb,
1077 struct netlink_callback *cb)
1078{
1079 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_QP);
1080}
1081
00313983
SW
1082static int nldev_res_get_cm_id_dumpit(struct sk_buff *skb,
1083 struct netlink_callback *cb)
1084{
1085 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CM_ID);
1086}
1087
a34fc089
SW
1088static int nldev_res_get_cq_dumpit(struct sk_buff *skb,
1089 struct netlink_callback *cb)
1090{
1091 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CQ);
1092}
1093
fccec5b8
SW
1094static int nldev_res_get_mr_dumpit(struct sk_buff *skb,
1095 struct netlink_callback *cb)
1096{
1097 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR);
1098}
1099
29cf1351
SW
1100static int nldev_res_get_pd_dumpit(struct sk_buff *skb,
1101 struct netlink_callback *cb)
1102{
1103 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_PD);
1104}
1105
d0e312fe 1106static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
b4c598a6 1107 [RDMA_NLDEV_CMD_GET] = {
e5c9469e 1108 .doit = nldev_get_doit,
b4c598a6
LR
1109 .dump = nldev_get_dumpit,
1110 },
05d940d3
LR
1111 [RDMA_NLDEV_CMD_SET] = {
1112 .doit = nldev_set_doit,
1113 .flags = RDMA_NL_ADMIN_PERM,
1114 },
7d02f605 1115 [RDMA_NLDEV_CMD_PORT_GET] = {
c3f66f7b 1116 .doit = nldev_port_get_doit,
7d02f605
LR
1117 .dump = nldev_port_get_dumpit,
1118 },
bf3c5a93
LR
1119 [RDMA_NLDEV_CMD_RES_GET] = {
1120 .doit = nldev_res_get_doit,
1121 .dump = nldev_res_get_dumpit,
1122 },
b5fa635a
LR
1123 [RDMA_NLDEV_CMD_RES_QP_GET] = {
1124 .dump = nldev_res_get_qp_dumpit,
1125 /*
1126 * .doit is not implemented yet for two reasons:
1127 * 1. It is not needed yet.
1128 * 2. There is a need to provide identifier, while it is easy
1129 * for the QPs (device index + port index + LQPN), it is not
1130 * the case for the rest of resources (PD and CQ). Because it
1131 * is better to provide similar interface for all resources,
1132 * let's wait till we will have other resources implemented
1133 * too.
1134 */
1135 },
00313983
SW
1136 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
1137 .dump = nldev_res_get_cm_id_dumpit,
1138 },
a34fc089
SW
1139 [RDMA_NLDEV_CMD_RES_CQ_GET] = {
1140 .dump = nldev_res_get_cq_dumpit,
1141 },
fccec5b8
SW
1142 [RDMA_NLDEV_CMD_RES_MR_GET] = {
1143 .dump = nldev_res_get_mr_dumpit,
1144 },
29cf1351
SW
1145 [RDMA_NLDEV_CMD_RES_PD_GET] = {
1146 .dump = nldev_res_get_pd_dumpit,
1147 },
b4c598a6
LR
1148};
1149
6c80b41a
LR
1150void __init nldev_init(void)
1151{
b4c598a6 1152 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
6c80b41a
LR
1153}
1154
1155void __exit nldev_exit(void)
1156{
1157 rdma_nl_unregister(RDMA_NL_NLDEV);
1158}
e3bf14bd
JG
1159
1160MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);