2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Neither the names of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") version 2 as published by the Free
18 * Software Foundation.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <linux/module.h>
34 #include <linux/pid.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/mutex.h>
37 #include <net/netlink.h>
38 #include <rdma/rdma_cm.h>
39 #include <rdma/rdma_netlink.h>
41 #include "core_priv.h"
47 * This determines whether a non-privileged user is allowed to specify a
48 * controlled QKEY or not, when true non-privileged user is allowed to specify
51 static bool privileged_qkey;
53 typedef int (*res_fill_func_t)(struct sk_buff*, bool,
54 struct rdma_restrack_entry*, uint32_t);
57 * Sort array elements by the netlink attribute name
59 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
60 [RDMA_NLDEV_ATTR_CHARDEV] = { .type = NLA_U64 },
61 [RDMA_NLDEV_ATTR_CHARDEV_ABI] = { .type = NLA_U64 },
62 [RDMA_NLDEV_ATTR_CHARDEV_NAME] = { .type = NLA_NUL_STRING,
63 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
64 [RDMA_NLDEV_ATTR_CHARDEV_TYPE] = { .type = NLA_NUL_STRING,
65 .len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE },
66 [RDMA_NLDEV_ATTR_DEV_DIM] = { .type = NLA_U8 },
67 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
68 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
69 .len = IB_DEVICE_NAME_MAX },
70 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
71 [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING,
72 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
73 [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED },
74 [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED },
75 [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 },
76 [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING,
77 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
78 [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 },
79 [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 },
80 [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 },
81 [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 },
82 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
83 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
84 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
85 [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING,
87 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
88 [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 },
89 [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING,
91 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
92 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
93 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
94 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
95 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED },
96 [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 },
97 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED },
98 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED },
99 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
100 [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 },
101 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
102 [RDMA_NLDEV_ATTR_RES_CTX] = { .type = NLA_NESTED },
103 [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 },
104 [RDMA_NLDEV_ATTR_RES_CTX_ENTRY] = { .type = NLA_NESTED },
105 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
106 .len = sizeof(struct __kernel_sockaddr_storage) },
107 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
108 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
109 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
110 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 },
111 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 },
112 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
113 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED },
114 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 },
115 [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 },
116 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED },
117 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
118 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED },
119 [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 },
120 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED },
121 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
122 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 },
123 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 },
124 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
125 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
126 [RDMA_NLDEV_ATTR_RES_RAW] = { .type = NLA_BINARY },
127 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 },
128 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
129 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
130 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
131 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = {
132 .len = sizeof(struct __kernel_sockaddr_storage) },
133 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
134 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
135 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
136 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 },
137 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
138 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
139 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
140 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
141 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
142 [RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED },
143 [RDMA_NLDEV_ATTR_RES_SRQN] = { .type = NLA_U32 },
144 [RDMA_NLDEV_ATTR_RES_SRQ_ENTRY] = { .type = NLA_NESTED },
145 [RDMA_NLDEV_ATTR_MIN_RANGE] = { .type = NLA_U32 },
146 [RDMA_NLDEV_ATTR_MAX_RANGE] = { .type = NLA_U32 },
147 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
148 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
149 [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 },
150 [RDMA_NLDEV_ATTR_STAT_MODE] = { .type = NLA_U32 },
151 [RDMA_NLDEV_ATTR_STAT_RES] = { .type = NLA_U32 },
152 [RDMA_NLDEV_ATTR_STAT_COUNTER] = { .type = NLA_NESTED },
153 [RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY] = { .type = NLA_NESTED },
154 [RDMA_NLDEV_ATTR_STAT_COUNTER_ID] = { .type = NLA_U32 },
155 [RDMA_NLDEV_ATTR_STAT_HWCOUNTERS] = { .type = NLA_NESTED },
156 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY] = { .type = NLA_NESTED },
157 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING },
158 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 },
159 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
160 [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 },
161 [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 },
162 [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 },
163 [RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 },
164 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 },
165 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 },
166 [RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE] = { .type = NLA_U8 },
169 static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
170 enum rdma_nldev_print_type print_type)
172 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
174 if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
175 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
181 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
182 enum rdma_nldev_print_type print_type,
185 if (put_driver_name_print_type(msg, name, print_type))
187 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
193 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
194 enum rdma_nldev_print_type print_type,
197 if (put_driver_name_print_type(msg, name, print_type))
199 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
200 RDMA_NLDEV_ATTR_PAD))
206 int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
209 if (put_driver_name_print_type(msg, name,
210 RDMA_NLDEV_PRINT_TYPE_UNSPEC))
212 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str))
217 EXPORT_SYMBOL(rdma_nl_put_driver_string);
219 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
221 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
224 EXPORT_SYMBOL(rdma_nl_put_driver_u32);
226 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
229 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
232 EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
234 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
236 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
239 EXPORT_SYMBOL(rdma_nl_put_driver_u64);
241 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
243 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
246 EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
248 bool rdma_nl_get_privileged_qkey(void)
250 return privileged_qkey || capable(CAP_NET_RAW);
252 EXPORT_SYMBOL(rdma_nl_get_privileged_qkey);
254 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
256 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
258 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
259 dev_name(&device->dev)))
265 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
267 char fw[IB_FW_VERSION_NAME_MAX];
271 if (fill_nldev_handle(msg, device))
274 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
277 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
278 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
279 device->attrs.device_cap_flags,
280 RDMA_NLDEV_ATTR_PAD))
283 ib_get_device_fw_str(device, fw);
284 /* Device without FW has strlen(fw) = 0 */
285 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
288 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
289 be64_to_cpu(device->node_guid),
290 RDMA_NLDEV_ATTR_PAD))
292 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
293 be64_to_cpu(device->attrs.sys_image_guid),
294 RDMA_NLDEV_ATTR_PAD))
296 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
298 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim))
302 * Link type is determined on first port and mlx4 device
303 * which can potentially have two different link type for the same
304 * IB device is considered as better to be avoided in the future,
306 port = rdma_start_port(device);
307 if (rdma_cap_opa_mad(device, port))
308 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
309 else if (rdma_protocol_ib(device, port))
310 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
311 else if (rdma_protocol_iwarp(device, port))
312 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
313 else if (rdma_protocol_roce(device, port))
314 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
315 else if (rdma_protocol_usnic(device, port))
316 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
321 static int fill_port_info(struct sk_buff *msg,
322 struct ib_device *device, u32 port,
323 const struct net *net)
325 struct net_device *netdev = NULL;
326 struct ib_port_attr attr;
330 if (fill_nldev_handle(msg, device))
333 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
336 ret = ib_query_port(device, port, &attr);
340 if (rdma_protocol_ib(device, port)) {
341 BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
342 sizeof(attr.port_cap_flags2)) > sizeof(u64));
343 cap_flags = attr.port_cap_flags |
344 ((u64)attr.port_cap_flags2 << 32);
345 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
346 cap_flags, RDMA_NLDEV_ATTR_PAD))
348 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
349 attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
351 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
353 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
355 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
358 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
360 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
363 netdev = ib_device_get_netdev(device, port);
364 if (netdev && net_eq(dev_net(netdev), net)) {
365 ret = nla_put_u32(msg,
366 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
369 ret = nla_put_string(msg,
370 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
378 static int fill_res_info_entry(struct sk_buff *msg,
379 const char *name, u64 curr)
381 struct nlattr *entry_attr;
383 entry_attr = nla_nest_start_noflag(msg,
384 RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
388 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
390 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
391 RDMA_NLDEV_ATTR_PAD))
394 nla_nest_end(msg, entry_attr);
398 nla_nest_cancel(msg, entry_attr);
402 static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
404 static const char * const names[RDMA_RESTRACK_MAX] = {
405 [RDMA_RESTRACK_PD] = "pd",
406 [RDMA_RESTRACK_CQ] = "cq",
407 [RDMA_RESTRACK_QP] = "qp",
408 [RDMA_RESTRACK_CM_ID] = "cm_id",
409 [RDMA_RESTRACK_MR] = "mr",
410 [RDMA_RESTRACK_CTX] = "ctx",
411 [RDMA_RESTRACK_SRQ] = "srq",
414 struct nlattr *table_attr;
417 if (fill_nldev_handle(msg, device))
420 table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
424 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
427 curr = rdma_restrack_count(device, i);
428 ret = fill_res_info_entry(msg, names[i], curr);
433 nla_nest_end(msg, table_attr);
437 nla_nest_cancel(msg, table_attr);
441 static int fill_res_name_pid(struct sk_buff *msg,
442 struct rdma_restrack_entry *res)
447 * For user resources, user is should read /proc/PID/comm to get the
448 * name of the task file.
450 if (rdma_is_kernel_res(res)) {
451 err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
456 pid = task_pid_vnr(res->task);
458 * Task is dead and in zombie state.
459 * There is no need to print PID anymore.
463 * This part is racy, task can be killed and PID will
464 * be zero right here but it is ok, next query won't
465 * return PID. We don't promise real-time reflection
468 err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid);
471 return err ? -EMSGSIZE : 0;
474 static int fill_res_qp_entry_query(struct sk_buff *msg,
475 struct rdma_restrack_entry *res,
476 struct ib_device *dev,
479 struct ib_qp_init_attr qp_init_attr;
480 struct ib_qp_attr qp_attr;
483 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
487 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
488 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
489 qp_attr.dest_qp_num))
491 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
496 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
499 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
500 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
501 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
502 qp_attr.path_mig_state))
505 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
507 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
510 if (dev->ops.fill_res_qp_entry)
511 return dev->ops.fill_res_qp_entry(msg, qp);
514 err: return -EMSGSIZE;
517 static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
518 struct rdma_restrack_entry *res, uint32_t port)
520 struct ib_qp *qp = container_of(res, struct ib_qp, res);
521 struct ib_device *dev = qp->device;
524 if (port && port != qp->port)
527 /* In create_qp() port is not set yet */
528 if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port))
531 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num);
535 if (!rdma_is_kernel_res(res) &&
536 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
539 ret = fill_res_name_pid(msg, res);
543 return fill_res_qp_entry_query(msg, res, dev, qp);
546 static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
547 struct rdma_restrack_entry *res, uint32_t port)
549 struct ib_qp *qp = container_of(res, struct ib_qp, res);
550 struct ib_device *dev = qp->device;
552 if (port && port != qp->port)
554 if (!dev->ops.fill_res_qp_entry_raw)
556 return dev->ops.fill_res_qp_entry_raw(msg, qp);
559 static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
560 struct rdma_restrack_entry *res, uint32_t port)
562 struct rdma_id_private *id_priv =
563 container_of(res, struct rdma_id_private, res);
564 struct ib_device *dev = id_priv->id.device;
565 struct rdma_cm_id *cm_id = &id_priv->id;
567 if (port && port != cm_id->port_num)
570 if (cm_id->port_num &&
571 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
574 if (id_priv->qp_num) {
575 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
577 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
581 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
584 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
587 if (cm_id->route.addr.src_addr.ss_family &&
588 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
589 sizeof(cm_id->route.addr.src_addr),
590 &cm_id->route.addr.src_addr))
592 if (cm_id->route.addr.dst_addr.ss_family &&
593 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
594 sizeof(cm_id->route.addr.dst_addr),
595 &cm_id->route.addr.dst_addr))
598 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
601 if (fill_res_name_pid(msg, res))
604 if (dev->ops.fill_res_cm_id_entry)
605 return dev->ops.fill_res_cm_id_entry(msg, cm_id);
608 err: return -EMSGSIZE;
611 static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
612 struct rdma_restrack_entry *res, uint32_t port)
614 struct ib_cq *cq = container_of(res, struct ib_cq, res);
615 struct ib_device *dev = cq->device;
617 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
619 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
620 atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
623 /* Poll context is only valid for kernel CQs */
624 if (rdma_is_kernel_res(res) &&
625 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
628 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL)))
631 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
633 if (!rdma_is_kernel_res(res) &&
634 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
635 cq->uobject->uevent.uobject.context->res.id))
638 if (fill_res_name_pid(msg, res))
641 return (dev->ops.fill_res_cq_entry) ?
642 dev->ops.fill_res_cq_entry(msg, cq) : 0;
645 static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
646 struct rdma_restrack_entry *res, uint32_t port)
648 struct ib_cq *cq = container_of(res, struct ib_cq, res);
649 struct ib_device *dev = cq->device;
651 if (!dev->ops.fill_res_cq_entry_raw)
653 return dev->ops.fill_res_cq_entry_raw(msg, cq);
656 static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
657 struct rdma_restrack_entry *res, uint32_t port)
659 struct ib_mr *mr = container_of(res, struct ib_mr, res);
660 struct ib_device *dev = mr->pd->device;
662 if (has_cap_net_admin) {
663 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
665 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
669 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
670 RDMA_NLDEV_ATTR_PAD))
673 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
676 if (!rdma_is_kernel_res(res) &&
677 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
680 if (fill_res_name_pid(msg, res))
683 return (dev->ops.fill_res_mr_entry) ?
684 dev->ops.fill_res_mr_entry(msg, mr) :
688 static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
689 struct rdma_restrack_entry *res, uint32_t port)
691 struct ib_mr *mr = container_of(res, struct ib_mr, res);
692 struct ib_device *dev = mr->pd->device;
694 if (!dev->ops.fill_res_mr_entry_raw)
696 return dev->ops.fill_res_mr_entry_raw(msg, mr);
699 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
700 struct rdma_restrack_entry *res, uint32_t port)
702 struct ib_pd *pd = container_of(res, struct ib_pd, res);
704 if (has_cap_net_admin) {
705 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
708 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
709 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
710 pd->unsafe_global_rkey))
713 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
714 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
717 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
720 if (!rdma_is_kernel_res(res) &&
721 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
722 pd->uobject->context->res.id))
725 return fill_res_name_pid(msg, res);
727 err: return -EMSGSIZE;
730 static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin,
731 struct rdma_restrack_entry *res, uint32_t port)
733 struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res);
735 if (rdma_is_kernel_res(res))
738 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id))
741 return fill_res_name_pid(msg, res);
744 static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range,
747 struct nlattr *entry_attr;
752 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
756 if (min_range == max_range) {
757 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range))
760 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range))
762 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range))
765 nla_nest_end(msg, entry_attr);
769 nla_nest_cancel(msg, entry_attr);
773 static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq)
775 uint32_t min_range = 0, prev = 0;
776 struct rdma_restrack_entry *res;
777 struct rdma_restrack_root *rt;
778 struct nlattr *table_attr;
779 struct ib_qp *qp = NULL;
780 unsigned long id = 0;
782 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
786 rt = &srq->device->res[RDMA_RESTRACK_QP];
788 xa_for_each(&rt->xa, id, res) {
789 if (!rdma_restrack_get(res))
792 qp = container_of(res, struct ib_qp, res);
793 if (!qp->srq || (qp->srq->res.id != srq->res.id)) {
794 rdma_restrack_put(res);
798 if (qp->qp_num < prev)
799 /* qp_num should be ascending */
802 if (min_range == 0) {
803 min_range = qp->qp_num;
804 } else if (qp->qp_num > (prev + 1)) {
805 if (fill_res_range_qp_entry(msg, min_range, prev))
808 min_range = qp->qp_num;
811 rdma_restrack_put(res);
816 if (fill_res_range_qp_entry(msg, min_range, prev))
819 nla_nest_end(msg, table_attr);
823 rdma_restrack_put(res);
826 nla_nest_cancel(msg, table_attr);
830 static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin,
831 struct rdma_restrack_entry *res, uint32_t port)
833 struct ib_srq *srq = container_of(res, struct ib_srq, res);
834 struct ib_device *dev = srq->device;
836 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id))
839 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type))
842 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id))
845 if (ib_srq_has_cq(srq->srq_type)) {
846 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN,
847 srq->ext.cq->res.id))
851 if (fill_res_srq_qps(msg, srq))
854 if (fill_res_name_pid(msg, res))
857 if (dev->ops.fill_res_srq_entry)
858 return dev->ops.fill_res_srq_entry(msg, srq);
866 static int fill_res_srq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
867 struct rdma_restrack_entry *res, uint32_t port)
869 struct ib_srq *srq = container_of(res, struct ib_srq, res);
870 struct ib_device *dev = srq->device;
872 if (!dev->ops.fill_res_srq_entry_raw)
874 return dev->ops.fill_res_srq_entry_raw(msg, srq);
877 static int fill_stat_counter_mode(struct sk_buff *msg,
878 struct rdma_counter *counter)
880 struct rdma_counter_mode *m = &counter->mode;
882 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode))
885 if (m->mode == RDMA_COUNTER_MODE_AUTO) {
886 if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) &&
887 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type))
890 if ((m->mask & RDMA_COUNTER_MASK_PID) &&
891 fill_res_name_pid(msg, &counter->res))
898 static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn)
900 struct nlattr *entry_attr;
902 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
906 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn))
909 nla_nest_end(msg, entry_attr);
913 nla_nest_cancel(msg, entry_attr);
917 static int fill_stat_counter_qps(struct sk_buff *msg,
918 struct rdma_counter *counter)
920 struct rdma_restrack_entry *res;
921 struct rdma_restrack_root *rt;
922 struct nlattr *table_attr;
923 struct ib_qp *qp = NULL;
924 unsigned long id = 0;
927 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
931 rt = &counter->device->res[RDMA_RESTRACK_QP];
933 xa_for_each(&rt->xa, id, res) {
934 qp = container_of(res, struct ib_qp, res);
935 if (!qp->counter || (qp->counter->id != counter->id))
938 ret = fill_stat_counter_qp_entry(msg, qp->qp_num);
944 nla_nest_end(msg, table_attr);
949 nla_nest_cancel(msg, table_attr);
953 int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
956 struct nlattr *entry_attr;
958 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
962 if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
965 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE,
966 value, RDMA_NLDEV_ATTR_PAD))
969 nla_nest_end(msg, entry_attr);
973 nla_nest_cancel(msg, entry_attr);
976 EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry);
978 static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
979 struct rdma_restrack_entry *res, uint32_t port)
981 struct ib_mr *mr = container_of(res, struct ib_mr, res);
982 struct ib_device *dev = mr->pd->device;
984 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
987 if (dev->ops.fill_stat_mr_entry)
988 return dev->ops.fill_stat_mr_entry(msg, mr);
995 static int fill_stat_counter_hwcounters(struct sk_buff *msg,
996 struct rdma_counter *counter)
998 struct rdma_hw_stats *st = counter->stats;
999 struct nlattr *table_attr;
1002 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
1006 mutex_lock(&st->lock);
1007 for (i = 0; i < st->num_counters; i++) {
1008 if (test_bit(i, st->is_disabled))
1010 if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name,
1014 mutex_unlock(&st->lock);
1016 nla_nest_end(msg, table_attr);
1020 mutex_unlock(&st->lock);
1021 nla_nest_cancel(msg, table_attr);
1025 static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
1026 struct rdma_restrack_entry *res,
1029 struct rdma_counter *counter =
1030 container_of(res, struct rdma_counter, res);
1032 if (port && port != counter->port)
1035 /* Dump it even query failed */
1036 rdma_counter_query_stats(counter);
1038 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) ||
1039 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) ||
1040 fill_stat_counter_mode(msg, counter) ||
1041 fill_stat_counter_qps(msg, counter) ||
1042 fill_stat_counter_hwcounters(msg, counter))
1048 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1049 struct netlink_ext_ack *extack)
1051 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1052 struct ib_device *device;
1053 struct sk_buff *msg;
1057 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1058 nldev_policy, extack);
1059 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1062 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1064 device = ib_device_get_by_index(sock_net(skb->sk), index);
1068 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1074 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1075 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1082 err = fill_dev_info(msg, device);
1086 nlmsg_end(msg, nlh);
1088 ib_device_put(device);
1089 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1094 ib_device_put(device);
1098 static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1099 struct netlink_ext_ack *extack)
1101 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1102 struct ib_device *device;
1106 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1107 nldev_policy, extack);
1108 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1111 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1112 device = ib_device_get_by_index(sock_net(skb->sk), index);
1116 if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
1117 char name[IB_DEVICE_NAME_MAX] = {};
1119 nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1120 IB_DEVICE_NAME_MAX);
1121 if (strlen(name) == 0) {
1125 err = ib_device_rename(device, name);
1129 if (tb[RDMA_NLDEV_NET_NS_FD]) {
1132 ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
1133 err = ib_device_set_netns_put(skb, device, ns_fd);
1137 if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) {
1140 use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]);
1141 err = ib_device_set_dim(device, use_dim);
1146 ib_device_put(device);
1151 static int _nldev_get_dumpit(struct ib_device *device,
1152 struct sk_buff *skb,
1153 struct netlink_callback *cb,
1156 int start = cb->args[0];
1157 struct nlmsghdr *nlh;
1162 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1163 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1166 if (!nlh || fill_dev_info(skb, device)) {
1167 nlmsg_cancel(skb, nlh);
1171 nlmsg_end(skb, nlh);
1175 out: cb->args[0] = idx;
1179 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1182 * There is no need to take lock, because
1183 * we are relying on ib_core's locking.
1185 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
1188 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1189 struct netlink_ext_ack *extack)
1191 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1192 struct ib_device *device;
1193 struct sk_buff *msg;
1198 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1199 nldev_policy, extack);
1201 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1202 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
1205 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1206 device = ib_device_get_by_index(sock_net(skb->sk), index);
1210 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1211 if (!rdma_is_port_valid(device, port)) {
1216 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1222 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1223 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1230 err = fill_port_info(msg, device, port, sock_net(skb->sk));
1234 nlmsg_end(msg, nlh);
1235 ib_device_put(device);
1237 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1242 ib_device_put(device);
1246 static int nldev_port_get_dumpit(struct sk_buff *skb,
1247 struct netlink_callback *cb)
1249 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1250 struct ib_device *device;
1251 int start = cb->args[0];
1252 struct nlmsghdr *nlh;
1258 err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1259 nldev_policy, NULL);
1260 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1263 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1264 device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
1268 rdma_for_each_port (device, p) {
1270 * The dumpit function returns all information from specific
1271 * index. This specific index is taken from the netlink
1272 * messages request sent by user and it is available
1275 * Usually, the user doesn't fill this field and it causes
1276 * to return everything.
1284 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
1286 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1287 RDMA_NLDEV_CMD_PORT_GET),
1290 if (!nlh || fill_port_info(skb, device, p, sock_net(skb->sk))) {
1291 nlmsg_cancel(skb, nlh);
1295 nlmsg_end(skb, nlh);
1299 ib_device_put(device);
1304 static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1305 struct netlink_ext_ack *extack)
1307 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1308 struct ib_device *device;
1309 struct sk_buff *msg;
1313 ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1314 nldev_policy, extack);
1315 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1318 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1319 device = ib_device_get_by_index(sock_net(skb->sk), index);
1323 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1329 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1330 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1337 ret = fill_res_info(msg, device);
1341 nlmsg_end(msg, nlh);
1342 ib_device_put(device);
1343 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1348 ib_device_put(device);
1352 static int _nldev_res_get_dumpit(struct ib_device *device,
1353 struct sk_buff *skb,
1354 struct netlink_callback *cb,
1357 int start = cb->args[0];
1358 struct nlmsghdr *nlh;
1363 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1364 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1367 if (!nlh || fill_res_info(skb, device)) {
1368 nlmsg_cancel(skb, nlh);
1371 nlmsg_end(skb, nlh);
1380 static int nldev_res_get_dumpit(struct sk_buff *skb,
1381 struct netlink_callback *cb)
1383 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
1386 struct nldev_fill_res_entry {
1387 enum rdma_nldev_attr nldev_attr;
1393 enum nldev_res_flags {
1394 NLDEV_PER_DEV = 1 << 0,
1397 static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
1398 [RDMA_RESTRACK_QP] = {
1399 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
1400 .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
1401 .id = RDMA_NLDEV_ATTR_RES_LQPN,
1403 [RDMA_RESTRACK_CM_ID] = {
1404 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
1405 .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
1406 .id = RDMA_NLDEV_ATTR_RES_CM_IDN,
1408 [RDMA_RESTRACK_CQ] = {
1409 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
1410 .flags = NLDEV_PER_DEV,
1411 .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
1412 .id = RDMA_NLDEV_ATTR_RES_CQN,
1414 [RDMA_RESTRACK_MR] = {
1415 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
1416 .flags = NLDEV_PER_DEV,
1417 .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
1418 .id = RDMA_NLDEV_ATTR_RES_MRN,
1420 [RDMA_RESTRACK_PD] = {
1421 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
1422 .flags = NLDEV_PER_DEV,
1423 .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
1424 .id = RDMA_NLDEV_ATTR_RES_PDN,
1426 [RDMA_RESTRACK_COUNTER] = {
1427 .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
1428 .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
1429 .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
1431 [RDMA_RESTRACK_CTX] = {
1432 .nldev_attr = RDMA_NLDEV_ATTR_RES_CTX,
1433 .flags = NLDEV_PER_DEV,
1434 .entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY,
1435 .id = RDMA_NLDEV_ATTR_RES_CTXN,
1437 [RDMA_RESTRACK_SRQ] = {
1438 .nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ,
1439 .flags = NLDEV_PER_DEV,
1440 .entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY,
1441 .id = RDMA_NLDEV_ATTR_RES_SRQN,
1446 static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1447 struct netlink_ext_ack *extack,
1448 enum rdma_restrack_type res_type,
1449 res_fill_func_t fill_func)
1451 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1452 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1453 struct rdma_restrack_entry *res;
1454 struct ib_device *device;
1455 u32 index, id, port = 0;
1456 bool has_cap_net_admin;
1457 struct sk_buff *msg;
1460 ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1461 nldev_policy, extack);
1462 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
1465 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1466 device = ib_device_get_by_index(sock_net(skb->sk), index);
1470 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1471 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1472 if (!rdma_is_port_valid(device, port)) {
1478 if ((port && fe->flags & NLDEV_PER_DEV) ||
1479 (!port && ~fe->flags & NLDEV_PER_DEV)) {
1484 id = nla_get_u32(tb[fe->id]);
1485 res = rdma_restrack_get_byid(device, res_type, id);
1491 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1497 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1498 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1499 RDMA_NL_GET_OP(nlh->nlmsg_type)),
1502 if (!nlh || fill_nldev_handle(msg, device)) {
1507 has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1509 ret = fill_func(msg, has_cap_net_admin, res, port);
1513 rdma_restrack_put(res);
1514 nlmsg_end(msg, nlh);
1515 ib_device_put(device);
1516 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1521 rdma_restrack_put(res);
1523 ib_device_put(device);
1527 static int res_get_common_dumpit(struct sk_buff *skb,
1528 struct netlink_callback *cb,
1529 enum rdma_restrack_type res_type,
1530 res_fill_func_t fill_func)
1532 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1533 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1534 struct rdma_restrack_entry *res;
1535 struct rdma_restrack_root *rt;
1536 int err, ret = 0, idx = 0;
1537 struct nlattr *table_attr;
1538 struct nlattr *entry_attr;
1539 struct ib_device *device;
1540 int start = cb->args[0];
1541 bool has_cap_net_admin;
1542 struct nlmsghdr *nlh;
1544 u32 index, port = 0;
1545 bool filled = false;
1547 err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1548 nldev_policy, NULL);
1550 * Right now, we are expecting the device index to get res information,
1551 * but it is possible to extend this code to return all devices in
1552 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
1553 * if it doesn't exist, we will iterate over all devices.
1555 * But it is not needed for now.
1557 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1560 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1561 device = ib_device_get_by_index(sock_net(skb->sk), index);
1566 * If no PORT_INDEX is supplied, we will return all QPs from that device
1568 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1569 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1570 if (!rdma_is_port_valid(device, port)) {
1576 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1577 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1578 RDMA_NL_GET_OP(cb->nlh->nlmsg_type)),
1581 if (!nlh || fill_nldev_handle(skb, device)) {
1586 table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
1592 has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
1594 rt = &device->res[res_type];
1597 * FIXME: if the skip ahead is something common this loop should
1598 * use xas_for_each & xas_pause to optimize, we can have a lot of
1601 xa_for_each(&rt->xa, id, res) {
1602 if (idx < start || !rdma_restrack_get(res))
1609 entry_attr = nla_nest_start_noflag(skb, fe->entry);
1612 rdma_restrack_put(res);
1616 ret = fill_func(skb, has_cap_net_admin, res, port);
1618 rdma_restrack_put(res);
1621 nla_nest_cancel(skb, entry_attr);
1622 if (ret == -EMSGSIZE)
1628 nla_nest_end(skb, entry_attr);
1629 again: xa_lock(&rt->xa);
1635 nla_nest_end(skb, table_attr);
1636 nlmsg_end(skb, nlh);
1640 * No more entries to fill, cancel the message and
1641 * return 0 to mark end of dumpit.
1646 ib_device_put(device);
1650 nla_nest_cancel(skb, table_attr);
1653 nlmsg_cancel(skb, nlh);
1656 ib_device_put(device);
1660 #define RES_GET_FUNCS(name, type) \
1661 static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
1662 struct netlink_callback *cb) \
1664 return res_get_common_dumpit(skb, cb, type, \
1665 fill_res_##name##_entry); \
1667 static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
1668 struct nlmsghdr *nlh, \
1669 struct netlink_ext_ack *extack) \
1671 return res_get_common_doit(skb, nlh, extack, type, \
1672 fill_res_##name##_entry); \
1675 RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
1676 RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP);
1677 RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
1678 RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
1679 RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ);
1680 RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
1681 RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
1682 RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR);
1683 RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
1684 RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX);
1685 RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ);
1686 RES_GET_FUNCS(srq_raw, RDMA_RESTRACK_SRQ);
1688 static LIST_HEAD(link_ops);
1689 static DECLARE_RWSEM(link_ops_rwsem);
1691 static const struct rdma_link_ops *link_ops_get(const char *type)
1693 const struct rdma_link_ops *ops;
1695 list_for_each_entry(ops, &link_ops, list) {
1696 if (!strcmp(ops->type, type))
1704 void rdma_link_register(struct rdma_link_ops *ops)
1706 down_write(&link_ops_rwsem);
1707 if (WARN_ON_ONCE(link_ops_get(ops->type)))
1709 list_add(&ops->list, &link_ops);
1711 up_write(&link_ops_rwsem);
1713 EXPORT_SYMBOL(rdma_link_register);
1715 void rdma_link_unregister(struct rdma_link_ops *ops)
1717 down_write(&link_ops_rwsem);
1718 list_del(&ops->list);
1719 up_write(&link_ops_rwsem);
1721 EXPORT_SYMBOL(rdma_link_unregister);
1723 static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
1724 struct netlink_ext_ack *extack)
1726 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1727 char ibdev_name[IB_DEVICE_NAME_MAX];
1728 const struct rdma_link_ops *ops;
1729 char ndev_name[IFNAMSIZ];
1730 struct net_device *ndev;
1731 char type[IFNAMSIZ];
1734 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1735 nldev_policy, extack);
1736 if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
1737 !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
1740 nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1741 sizeof(ibdev_name));
1742 if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
1745 nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
1746 nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
1749 ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
1753 down_read(&link_ops_rwsem);
1754 ops = link_ops_get(type);
1755 #ifdef CONFIG_MODULES
1757 up_read(&link_ops_rwsem);
1758 request_module("rdma-link-%s", type);
1759 down_read(&link_ops_rwsem);
1760 ops = link_ops_get(type);
1763 err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
1764 up_read(&link_ops_rwsem);
1770 static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
1771 struct netlink_ext_ack *extack)
1773 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1774 struct ib_device *device;
1778 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1779 nldev_policy, extack);
1780 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1783 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1784 device = ib_device_get_by_index(sock_net(skb->sk), index);
1788 if (!(device->attrs.kernel_cap_flags & IBK_ALLOW_USER_UNREG)) {
1789 ib_device_put(device);
1793 ib_unregister_device_and_put(device);
1797 static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
1798 struct netlink_ext_ack *extack)
1800 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1801 char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE];
1802 struct ib_client_nl_info data = {};
1803 struct ib_device *ibdev = NULL;
1804 struct sk_buff *msg;
1808 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
1810 if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
1813 nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
1814 sizeof(client_name));
1816 if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
1817 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1818 ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
1822 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1823 data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1824 if (!rdma_is_port_valid(ibdev, data.port)) {
1831 } else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1835 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1840 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1841 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1842 RDMA_NLDEV_CMD_GET_CHARDEV),
1850 err = ib_get_client_nl_info(ibdev, client_name, &data);
1854 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV,
1855 huge_encode_dev(data.cdev->devt),
1856 RDMA_NLDEV_ATTR_PAD);
1859 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi,
1860 RDMA_NLDEV_ATTR_PAD);
1863 if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME,
1864 dev_name(data.cdev))) {
1869 nlmsg_end(msg, nlh);
1870 put_device(data.cdev);
1872 ib_device_put(ibdev);
1873 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1876 put_device(data.cdev);
1881 ib_device_put(ibdev);
1885 static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1886 struct netlink_ext_ack *extack)
1888 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1889 struct sk_buff *msg;
1892 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1893 nldev_policy, extack);
1897 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1901 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1902 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1903 RDMA_NLDEV_CMD_SYS_GET),
1910 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
1911 (u8)ib_devices_shared_netns);
1917 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE,
1918 (u8)privileged_qkey);
1924 * Copy-on-fork is supported.
1926 * 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes")
1927 * 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm")
1928 * for more details. Don't backport this without them.
1930 * Return value ignored on purpose, assume copy-on-fork is not
1931 * supported in case of failure.
1933 nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1);
1935 nlmsg_end(msg, nlh);
1936 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1939 static int nldev_set_sys_set_netns_doit(struct nlattr *tb[])
1944 enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
1945 /* Only 0 and 1 are supported */
1949 err = rdma_compatdev_set(enable);
1953 static int nldev_set_sys_set_pqkey_doit(struct nlattr *tb[])
1957 enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE]);
1958 /* Only 0 and 1 are supported */
1962 privileged_qkey = enable;
1966 static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1967 struct netlink_ext_ack *extack)
1969 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1972 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1973 nldev_policy, extack);
1977 if (tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
1978 return nldev_set_sys_set_netns_doit(tb);
1980 if (tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE])
1981 return nldev_set_sys_set_pqkey_doit(tb);
1987 static int nldev_stat_set_mode_doit(struct sk_buff *msg,
1988 struct netlink_ext_ack *extack,
1989 struct nlattr *tb[],
1990 struct ib_device *device, u32 port)
1992 u32 mode, mask = 0, qpn, cntn = 0;
1995 /* Currently only counter for QP is supported */
1996 if (!tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1997 nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
2000 mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
2001 if (mode == RDMA_COUNTER_MODE_AUTO) {
2002 if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
2004 tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
2005 return rdma_counter_set_auto_mode(device, port, mask, extack);
2008 if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
2011 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
2012 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
2013 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
2014 ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
2018 ret = rdma_counter_bind_qpn_alloc(device, port, qpn, &cntn);
2023 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
2024 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
2032 rdma_counter_unbind_qpn(device, port, qpn, cntn);
2036 static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[],
2037 struct ib_device *device,
2040 struct rdma_hw_stats *stats;
2041 struct nlattr *entry_attr;
2042 unsigned long *target;
2043 int rem, i, ret = 0;
2046 stats = ib_get_hw_stats_port(device, port);
2050 target = kcalloc(BITS_TO_LONGS(stats->num_counters),
2051 sizeof(*stats->is_disabled), GFP_KERNEL);
2055 nla_for_each_nested(entry_attr, tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS],
2057 index = nla_get_u32(entry_attr);
2058 if ((index >= stats->num_counters) ||
2059 !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) {
2064 set_bit(index, target);
2067 for (i = 0; i < stats->num_counters; i++) {
2068 if (!(stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL))
2071 ret = rdma_counter_modify(device, port, i, test_bit(i, target));
2081 static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
2082 struct netlink_ext_ack *extack)
2084 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2085 struct ib_device *device;
2086 struct sk_buff *msg;
2090 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
2092 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
2093 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
2096 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
2097 device = ib_device_get_by_index(sock_net(skb->sk), index);
2101 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2102 if (!rdma_is_port_valid(device, port)) {
2104 goto err_put_device;
2107 if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] &&
2108 !tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) {
2110 goto err_put_device;
2113 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2116 goto err_put_device;
2118 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2119 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2120 RDMA_NLDEV_CMD_STAT_SET),
2122 if (!nlh || fill_nldev_handle(msg, device) ||
2123 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
2128 if (tb[RDMA_NLDEV_ATTR_STAT_MODE]) {
2129 ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port);
2134 if (tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) {
2135 ret = nldev_stat_set_counter_dynamic_doit(tb, device, port);
2140 nlmsg_end(msg, nlh);
2141 ib_device_put(device);
2142 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2147 ib_device_put(device);
2151 static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
2152 struct netlink_ext_ack *extack)
2154 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2155 struct ib_device *device;
2156 struct sk_buff *msg;
2157 u32 index, port, qpn, cntn;
2160 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2161 nldev_policy, extack);
2162 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
2163 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
2164 !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
2165 !tb[RDMA_NLDEV_ATTR_RES_LQPN])
2168 if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
2171 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
2172 device = ib_device_get_by_index(sock_net(skb->sk), index);
2176 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2177 if (!rdma_is_port_valid(device, port)) {
2182 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2187 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2188 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2189 RDMA_NLDEV_CMD_STAT_SET),
2196 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
2197 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
2198 if (fill_nldev_handle(msg, device) ||
2199 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
2200 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
2201 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
2206 ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
2210 nlmsg_end(msg, nlh);
2211 ib_device_put(device);
2212 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2217 ib_device_put(device);
2221 static int stat_get_doit_default_counter(struct sk_buff *skb,
2222 struct nlmsghdr *nlh,
2223 struct netlink_ext_ack *extack,
2224 struct nlattr *tb[])
2226 struct rdma_hw_stats *stats;
2227 struct nlattr *table_attr;
2228 struct ib_device *device;
2229 int ret, num_cnts, i;
2230 struct sk_buff *msg;
2234 if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
2237 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
2238 device = ib_device_get_by_index(sock_net(skb->sk), index);
2242 if (!device->ops.alloc_hw_port_stats || !device->ops.get_hw_stats) {
2247 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2248 stats = ib_get_hw_stats_port(device, port);
2254 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2260 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2261 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2262 RDMA_NLDEV_CMD_STAT_GET),
2265 if (!nlh || fill_nldev_handle(msg, device) ||
2266 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
2271 mutex_lock(&stats->lock);
2273 num_cnts = device->ops.get_hw_stats(device, stats, port, 0);
2279 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
2284 for (i = 0; i < num_cnts; i++) {
2285 if (test_bit(i, stats->is_disabled))
2288 v = stats->value[i] +
2289 rdma_counter_get_hwstat_value(device, port, i);
2290 if (rdma_nl_stat_hwcounter_entry(msg,
2291 stats->descs[i].name, v)) {
2296 nla_nest_end(msg, table_attr);
2298 mutex_unlock(&stats->lock);
2299 nlmsg_end(msg, nlh);
2300 ib_device_put(device);
2301 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2304 nla_nest_cancel(msg, table_attr);
2306 mutex_unlock(&stats->lock);
2310 ib_device_put(device);
2314 static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
2315 struct netlink_ext_ack *extack, struct nlattr *tb[])
2318 static enum rdma_nl_counter_mode mode;
2319 static enum rdma_nl_counter_mask mask;
2320 struct ib_device *device;
2321 struct sk_buff *msg;
2325 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID])
2326 return nldev_res_get_counter_doit(skb, nlh, extack);
2328 if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] ||
2329 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
2332 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
2333 device = ib_device_get_by_index(sock_net(skb->sk), index);
2337 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2338 if (!rdma_is_port_valid(device, port)) {
2343 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2349 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2350 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2351 RDMA_NLDEV_CMD_STAT_GET),
2358 ret = rdma_counter_get_mode(device, port, &mode, &mask);
2362 if (fill_nldev_handle(msg, device) ||
2363 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
2364 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
2369 if ((mode == RDMA_COUNTER_MODE_AUTO) &&
2370 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
2375 nlmsg_end(msg, nlh);
2376 ib_device_put(device);
2377 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2382 ib_device_put(device);
2386 static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
2387 struct netlink_ext_ack *extack)
2389 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2392 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2393 nldev_policy, extack);
2397 if (!tb[RDMA_NLDEV_ATTR_STAT_RES])
2398 return stat_get_doit_default_counter(skb, nlh, extack, tb);
2400 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2401 case RDMA_NLDEV_ATTR_RES_QP:
2402 ret = stat_get_doit_qp(skb, nlh, extack, tb);
2404 case RDMA_NLDEV_ATTR_RES_MR:
2405 ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR,
2406 fill_stat_mr_entry);
2416 static int nldev_stat_get_dumpit(struct sk_buff *skb,
2417 struct netlink_callback *cb)
2419 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2422 ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2423 nldev_policy, NULL);
2424 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
2427 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2428 case RDMA_NLDEV_ATTR_RES_QP:
2429 ret = nldev_res_get_counter_dumpit(skb, cb);
2431 case RDMA_NLDEV_ATTR_RES_MR:
2432 ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR,
2433 fill_stat_mr_entry);
2443 static int nldev_stat_get_counter_status_doit(struct sk_buff *skb,
2444 struct nlmsghdr *nlh,
2445 struct netlink_ext_ack *extack)
2447 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX], *table, *entry;
2448 struct rdma_hw_stats *stats;
2449 struct ib_device *device;
2450 struct sk_buff *msg;
2454 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2455 nldev_policy, extack);
2456 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
2457 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
2460 devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
2461 device = ib_device_get_by_index(sock_net(skb->sk), devid);
2465 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2466 if (!rdma_is_port_valid(device, port)) {
2471 stats = ib_get_hw_stats_port(device, port);
2477 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2484 msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2485 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET_STATUS),
2489 if (!nlh || fill_nldev_handle(msg, device) ||
2490 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
2493 table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
2497 mutex_lock(&stats->lock);
2498 for (i = 0; i < stats->num_counters; i++) {
2499 entry = nla_nest_start(msg,
2500 RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
2504 if (nla_put_string(msg,
2505 RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
2506 stats->descs[i].name) ||
2507 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i))
2510 if ((stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) &&
2511 (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC,
2512 !test_bit(i, stats->is_disabled))))
2515 nla_nest_end(msg, entry);
2517 mutex_unlock(&stats->lock);
2519 nla_nest_end(msg, table);
2520 nlmsg_end(msg, nlh);
2521 ib_device_put(device);
2522 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2525 nla_nest_cancel(msg, entry);
2527 mutex_unlock(&stats->lock);
2528 nla_nest_cancel(msg, table);
2532 ib_device_put(device);
2536 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
2537 [RDMA_NLDEV_CMD_GET] = {
2538 .doit = nldev_get_doit,
2539 .dump = nldev_get_dumpit,
2541 [RDMA_NLDEV_CMD_GET_CHARDEV] = {
2542 .doit = nldev_get_chardev,
2544 [RDMA_NLDEV_CMD_SET] = {
2545 .doit = nldev_set_doit,
2546 .flags = RDMA_NL_ADMIN_PERM,
2548 [RDMA_NLDEV_CMD_NEWLINK] = {
2549 .doit = nldev_newlink,
2550 .flags = RDMA_NL_ADMIN_PERM,
2552 [RDMA_NLDEV_CMD_DELLINK] = {
2553 .doit = nldev_dellink,
2554 .flags = RDMA_NL_ADMIN_PERM,
2556 [RDMA_NLDEV_CMD_PORT_GET] = {
2557 .doit = nldev_port_get_doit,
2558 .dump = nldev_port_get_dumpit,
2560 [RDMA_NLDEV_CMD_RES_GET] = {
2561 .doit = nldev_res_get_doit,
2562 .dump = nldev_res_get_dumpit,
2564 [RDMA_NLDEV_CMD_RES_QP_GET] = {
2565 .doit = nldev_res_get_qp_doit,
2566 .dump = nldev_res_get_qp_dumpit,
2568 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
2569 .doit = nldev_res_get_cm_id_doit,
2570 .dump = nldev_res_get_cm_id_dumpit,
2572 [RDMA_NLDEV_CMD_RES_CQ_GET] = {
2573 .doit = nldev_res_get_cq_doit,
2574 .dump = nldev_res_get_cq_dumpit,
2576 [RDMA_NLDEV_CMD_RES_MR_GET] = {
2577 .doit = nldev_res_get_mr_doit,
2578 .dump = nldev_res_get_mr_dumpit,
2580 [RDMA_NLDEV_CMD_RES_PD_GET] = {
2581 .doit = nldev_res_get_pd_doit,
2582 .dump = nldev_res_get_pd_dumpit,
2584 [RDMA_NLDEV_CMD_RES_CTX_GET] = {
2585 .doit = nldev_res_get_ctx_doit,
2586 .dump = nldev_res_get_ctx_dumpit,
2588 [RDMA_NLDEV_CMD_RES_SRQ_GET] = {
2589 .doit = nldev_res_get_srq_doit,
2590 .dump = nldev_res_get_srq_dumpit,
2592 [RDMA_NLDEV_CMD_SYS_GET] = {
2593 .doit = nldev_sys_get_doit,
2595 [RDMA_NLDEV_CMD_SYS_SET] = {
2596 .doit = nldev_set_sys_set_doit,
2597 .flags = RDMA_NL_ADMIN_PERM,
2599 [RDMA_NLDEV_CMD_STAT_SET] = {
2600 .doit = nldev_stat_set_doit,
2601 .flags = RDMA_NL_ADMIN_PERM,
2603 [RDMA_NLDEV_CMD_STAT_GET] = {
2604 .doit = nldev_stat_get_doit,
2605 .dump = nldev_stat_get_dumpit,
2607 [RDMA_NLDEV_CMD_STAT_DEL] = {
2608 .doit = nldev_stat_del_doit,
2609 .flags = RDMA_NL_ADMIN_PERM,
2611 [RDMA_NLDEV_CMD_RES_QP_GET_RAW] = {
2612 .doit = nldev_res_get_qp_raw_doit,
2613 .dump = nldev_res_get_qp_raw_dumpit,
2614 .flags = RDMA_NL_ADMIN_PERM,
2616 [RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = {
2617 .doit = nldev_res_get_cq_raw_doit,
2618 .dump = nldev_res_get_cq_raw_dumpit,
2619 .flags = RDMA_NL_ADMIN_PERM,
2621 [RDMA_NLDEV_CMD_RES_MR_GET_RAW] = {
2622 .doit = nldev_res_get_mr_raw_doit,
2623 .dump = nldev_res_get_mr_raw_dumpit,
2624 .flags = RDMA_NL_ADMIN_PERM,
2626 [RDMA_NLDEV_CMD_RES_SRQ_GET_RAW] = {
2627 .doit = nldev_res_get_srq_raw_doit,
2628 .dump = nldev_res_get_srq_raw_dumpit,
2629 .flags = RDMA_NL_ADMIN_PERM,
2631 [RDMA_NLDEV_CMD_STAT_GET_STATUS] = {
2632 .doit = nldev_stat_get_counter_status_doit,
2636 void __init nldev_init(void)
2638 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
2641 void nldev_exit(void)
2643 rdma_nl_unregister(RDMA_NL_NLDEV);
2646 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);