vDPA: check VIRTIO_NET_F_RSS for max_virtqueue_paris's presence
[linux-block.git] / drivers / vdpa / vdpa.c
CommitLineData
961e9c84
JW
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * vDPA bus.
4 *
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10#include <linux/module.h>
11#include <linux/idr.h>
12#include <linux/slab.h>
13#include <linux/vdpa.h>
33b34750
PP
14#include <uapi/linux/vdpa.h>
15#include <net/genetlink.h>
16#include <linux/mod_devicetable.h>
ad69dd0b 17#include <linux/virtio_ids.h>
961e9c84 18
33b34750 19static LIST_HEAD(mdev_head);
fd70a406 20/* A global mutex that protects vdpa management device and device level operations. */
0078ad90 21static DECLARE_RWSEM(vdpa_dev_lock);
961e9c84
JW
22static DEFINE_IDA(vdpa_index_ida);
23
73bc0dbb
EC
24void vdpa_set_status(struct vdpa_device *vdev, u8 status)
25{
a6a51adc 26 down_write(&vdev->cf_lock);
73bc0dbb 27 vdev->config->set_status(vdev, status);
a6a51adc 28 up_write(&vdev->cf_lock);
73bc0dbb
EC
29}
30EXPORT_SYMBOL(vdpa_set_status);
31
33b34750
PP
32static struct genl_family vdpa_nl_family;
33
961e9c84
JW
34static int vdpa_dev_probe(struct device *d)
35{
36 struct vdpa_device *vdev = dev_to_vdpa(d);
37 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
c53e5d1b
WZ
38 const struct vdpa_config_ops *ops = vdev->config;
39 u32 max_num, min_num = 1;
961e9c84
JW
40 int ret = 0;
41
c53e5d1b
WZ
42 max_num = ops->get_vq_num_max(vdev);
43 if (ops->get_vq_num_min)
44 min_num = ops->get_vq_num_min(vdev);
45 if (max_num < min_num)
46 return -EINVAL;
47
961e9c84
JW
48 if (drv && drv->probe)
49 ret = drv->probe(vdev);
50
51 return ret;
52}
53
fc7a6209 54static void vdpa_dev_remove(struct device *d)
961e9c84
JW
55{
56 struct vdpa_device *vdev = dev_to_vdpa(d);
57 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
58
59 if (drv && drv->remove)
60 drv->remove(vdev);
961e9c84
JW
61}
62
539fec78
SG
63static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
64{
65 struct vdpa_device *vdev = dev_to_vdpa(dev);
66
67 /* Check override first, and if set, only use the named driver */
68 if (vdev->driver_override)
69 return strcmp(vdev->driver_override, drv->name) == 0;
70
71 /* Currently devices must be supported by all vDPA bus drivers */
72 return 1;
73}
74
75static ssize_t driver_override_store(struct device *dev,
76 struct device_attribute *attr,
77 const char *buf, size_t count)
78{
79 struct vdpa_device *vdev = dev_to_vdpa(dev);
240bf4e6 80 int ret;
539fec78 81
240bf4e6
KK
82 ret = driver_set_override(dev, &vdev->driver_override, buf, count);
83 if (ret)
84 return ret;
539fec78
SG
85
86 return count;
87}
88
89static ssize_t driver_override_show(struct device *dev,
90 struct device_attribute *attr, char *buf)
91{
92 struct vdpa_device *vdev = dev_to_vdpa(dev);
93 ssize_t len;
94
95 device_lock(dev);
96 len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
97 device_unlock(dev);
98
99 return len;
100}
101static DEVICE_ATTR_RW(driver_override);
102
103static struct attribute *vdpa_dev_attrs[] = {
104 &dev_attr_driver_override.attr,
105 NULL,
106};
107
108static const struct attribute_group vdpa_dev_group = {
109 .attrs = vdpa_dev_attrs,
110};
111__ATTRIBUTE_GROUPS(vdpa_dev);
112
961e9c84
JW
113static struct bus_type vdpa_bus = {
114 .name = "vdpa",
539fec78
SG
115 .dev_groups = vdpa_dev_groups,
116 .match = vdpa_dev_match,
961e9c84
JW
117 .probe = vdpa_dev_probe,
118 .remove = vdpa_dev_remove,
119};
120
121static void vdpa_release_dev(struct device *d)
122{
123 struct vdpa_device *vdev = dev_to_vdpa(d);
124 const struct vdpa_config_ops *ops = vdev->config;
125
126 if (ops->free)
127 ops->free(vdev);
128
129 ida_simple_remove(&vdpa_index_ida, vdev->index);
539fec78 130 kfree(vdev->driver_override);
961e9c84
JW
131 kfree(vdev);
132}
133
134/**
135 * __vdpa_alloc_device - allocate and initilaize a vDPA device
136 * This allows driver to some prepartion after device is
137 * initialized but before registered.
138 * @parent: the parent device
139 * @config: the bus operations that is supported by this device
d4821902 140 * @ngroups: number of groups supported by this device
db9adcbf 141 * @nas: number of address spaces supported by this device
961e9c84 142 * @size: size of the parent structure that contains private data
fd70a406 143 * @name: name of the vdpa device; optional.
d8945ec4 144 * @use_va: indicate whether virtual address must be used by this device
961e9c84 145 *
24eae8eb 146 * Driver should use vdpa_alloc_device() wrapper macro instead of
961e9c84
JW
147 * using this directly.
148 *
c0a54b4b
PP
149 * Return: Returns an error when parent/config/dma_dev is not set or fail to get
150 * ida.
961e9c84
JW
151 */
152struct vdpa_device *__vdpa_alloc_device(struct device *parent,
153 const struct vdpa_config_ops *config,
db9adcbf 154 unsigned int ngroups, unsigned int nas,
d8945ec4
XY
155 size_t size, const char *name,
156 bool use_va)
961e9c84
JW
157{
158 struct vdpa_device *vdev;
159 int err = -EINVAL;
160
161 if (!config)
162 goto err;
163
164 if (!!config->dma_map != !!config->dma_unmap)
165 goto err;
166
d8945ec4
XY
167 /* It should only work for the device that use on-chip IOMMU */
168 if (use_va && !(config->dma_map || config->set_map))
169 goto err;
170
961e9c84
JW
171 err = -ENOMEM;
172 vdev = kzalloc(size, GFP_KERNEL);
173 if (!vdev)
174 goto err;
175
418eddef 176 err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
961e9c84
JW
177 if (err < 0)
178 goto err_ida;
179
180 vdev->dev.bus = &vdpa_bus;
181 vdev->dev.parent = parent;
182 vdev->dev.release = vdpa_release_dev;
183 vdev->index = err;
184 vdev->config = config;
452639a6 185 vdev->features_valid = false;
d8945ec4 186 vdev->use_va = use_va;
d4821902 187 vdev->ngroups = ngroups;
db9adcbf 188 vdev->nas = nas;
961e9c84 189
fd70a406
PP
190 if (name)
191 err = dev_set_name(&vdev->dev, "%s", name);
192 else
193 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
961e9c84
JW
194 if (err)
195 goto err_name;
196
a6a51adc 197 init_rwsem(&vdev->cf_lock);
961e9c84
JW
198 device_initialize(&vdev->dev);
199
200 return vdev;
201
202err_name:
203 ida_simple_remove(&vdpa_index_ida, vdev->index);
204err_ida:
205 kfree(vdev);
206err:
207 return ERR_PTR(err);
208}
209EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
210
fd70a406
PP
211static int vdpa_name_match(struct device *dev, const void *data)
212{
213 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
214
215 return (strcmp(dev_name(&vdev->dev), data) == 0);
216}
217
81d46d69 218static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
903f7bca
PP
219{
220 struct device *dev;
221
f00bdce0
JW
222 vdev->nvqs = nvqs;
223
0078ad90 224 lockdep_assert_held(&vdpa_dev_lock);
903f7bca
PP
225 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
226 if (dev) {
227 put_device(dev);
228 return -EEXIST;
229 }
230 return device_add(&vdev->dev);
231}
232
233/**
234 * _vdpa_register_device - register a vDPA device with vdpa lock held
235 * Caller must have a succeed call of vdpa_alloc_device() before.
236 * Caller must invoke this routine in the management device dev_add()
237 * callback after setting up valid mgmtdev for this vdpa device.
238 * @vdev: the vdpa device to be registered to vDPA bus
f00bdce0 239 * @nvqs: number of virtqueues supported by this device
903f7bca 240 *
c0a54b4b 241 * Return: Returns an error when fail to add device to vDPA bus
903f7bca 242 */
81d46d69 243int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
903f7bca
PP
244{
245 if (!vdev->mdev)
246 return -EINVAL;
247
f00bdce0 248 return __vdpa_register_device(vdev, nvqs);
903f7bca
PP
249}
250EXPORT_SYMBOL_GPL(_vdpa_register_device);
251
961e9c84
JW
252/**
253 * vdpa_register_device - register a vDPA device
ac8b85f9 254 * Callers must have a succeed call of vdpa_alloc_device() before.
961e9c84 255 * @vdev: the vdpa device to be registered to vDPA bus
f00bdce0 256 * @nvqs: number of virtqueues supported by this device
961e9c84 257 *
c0a54b4b 258 * Return: Returns an error when fail to add to vDPA bus
961e9c84 259 */
81d46d69 260int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
961e9c84 261{
fd70a406
PP
262 int err;
263
0078ad90 264 down_write(&vdpa_dev_lock);
f00bdce0 265 err = __vdpa_register_device(vdev, nvqs);
0078ad90 266 up_write(&vdpa_dev_lock);
fd70a406 267 return err;
961e9c84
JW
268}
269EXPORT_SYMBOL_GPL(vdpa_register_device);
270
903f7bca
PP
271/**
272 * _vdpa_unregister_device - unregister a vDPA device
273 * Caller must invoke this routine as part of management device dev_del()
274 * callback.
275 * @vdev: the vdpa device to be unregisted from vDPA bus
276 */
277void _vdpa_unregister_device(struct vdpa_device *vdev)
278{
0078ad90 279 lockdep_assert_held(&vdpa_dev_lock);
903f7bca
PP
280 WARN_ON(!vdev->mdev);
281 device_unregister(&vdev->dev);
282}
283EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
284
961e9c84
JW
285/**
286 * vdpa_unregister_device - unregister a vDPA device
287 * @vdev: the vdpa device to be unregisted from vDPA bus
288 */
289void vdpa_unregister_device(struct vdpa_device *vdev)
290{
0078ad90 291 down_write(&vdpa_dev_lock);
961e9c84 292 device_unregister(&vdev->dev);
0078ad90 293 up_write(&vdpa_dev_lock);
961e9c84
JW
294}
295EXPORT_SYMBOL_GPL(vdpa_unregister_device);
296
297/**
298 * __vdpa_register_driver - register a vDPA device driver
299 * @drv: the vdpa device driver to be registered
300 * @owner: module owner of the driver
301 *
c0a54b4b 302 * Return: Returns an err when fail to do the registration
961e9c84
JW
303 */
304int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
305{
306 drv->driver.bus = &vdpa_bus;
307 drv->driver.owner = owner;
308
309 return driver_register(&drv->driver);
310}
311EXPORT_SYMBOL_GPL(__vdpa_register_driver);
312
313/**
314 * vdpa_unregister_driver - unregister a vDPA device driver
315 * @drv: the vdpa device driver to be unregistered
316 */
317void vdpa_unregister_driver(struct vdpa_driver *drv)
318{
319 driver_unregister(&drv->driver);
320}
321EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
322
33b34750
PP
323/**
324 * vdpa_mgmtdev_register - register a vdpa management device
325 *
326 * @mdev: Pointer to vdpa management device
327 * vdpa_mgmtdev_register() register a vdpa management device which supports
328 * vdpa device management.
c0a54b4b
PP
329 * Return: Returns 0 on success or failure when required callback ops are not
330 * initialized.
33b34750
PP
331 */
332int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
333{
334 if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
335 return -EINVAL;
336
337 INIT_LIST_HEAD(&mdev->list);
0078ad90 338 down_write(&vdpa_dev_lock);
33b34750 339 list_add_tail(&mdev->list, &mdev_head);
0078ad90 340 up_write(&vdpa_dev_lock);
33b34750
PP
341 return 0;
342}
343EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
344
903f7bca
PP
345static int vdpa_match_remove(struct device *dev, void *data)
346{
347 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
348 struct vdpa_mgmt_dev *mdev = vdev->mdev;
349
350 if (mdev == data)
351 mdev->ops->dev_del(mdev, vdev);
352 return 0;
353}
354
33b34750
PP
355void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
356{
0078ad90 357 down_write(&vdpa_dev_lock);
903f7bca 358
33b34750 359 list_del(&mdev->list);
903f7bca
PP
360
361 /* Filter out all the entries belong to this management device and delete it. */
362 bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
363
0078ad90 364 up_write(&vdpa_dev_lock);
33b34750
PP
365}
366EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
367
30ef7a8a
EC
368static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
369 unsigned int offset,
370 void *buf, unsigned int len)
371{
372 const struct vdpa_config_ops *ops = vdev->config;
373
374 /*
375 * Config accesses aren't supposed to trigger before features are set.
376 * If it does happen we assume a legacy guest.
377 */
378 if (!vdev->features_valid)
e0077cc1 379 vdpa_set_features_unlocked(vdev, 0);
30ef7a8a
EC
380 ops->get_config(vdev, offset, buf, len);
381}
382
6dbb1f16
PP
383/**
384 * vdpa_get_config - Get one or more device configuration fields.
385 * @vdev: vdpa device to operate on
386 * @offset: starting byte offset of the field
387 * @buf: buffer pointer to read to
388 * @len: length of the configuration fields in bytes
389 */
390void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
391 void *buf, unsigned int len)
392{
a6a51adc 393 down_read(&vdev->cf_lock);
30ef7a8a 394 vdpa_get_config_unlocked(vdev, offset, buf, len);
a6a51adc 395 up_read(&vdev->cf_lock);
6dbb1f16
PP
396}
397EXPORT_SYMBOL_GPL(vdpa_get_config);
398
399/**
400 * vdpa_set_config - Set one or more device configuration fields.
401 * @vdev: vdpa device to operate on
402 * @offset: starting byte offset of the field
403 * @buf: buffer pointer to read from
404 * @length: length of the configuration fields in bytes
405 */
406void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
407 const void *buf, unsigned int length)
408{
a6a51adc 409 down_write(&vdev->cf_lock);
6dbb1f16 410 vdev->config->set_config(vdev, offset, buf, length);
a6a51adc 411 up_write(&vdev->cf_lock);
6dbb1f16
PP
412}
413EXPORT_SYMBOL_GPL(vdpa_set_config);
414
33b34750
PP
415static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
416 const char *busname, const char *devname)
417{
418 /* Bus name is optional for simulated management device, so ignore the
419 * device with bus if bus attribute is provided.
420 */
421 if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
422 return false;
423
424 if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
425 return true;
426
427 if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
428 (strcmp(dev_name(mdev->device), devname) == 0))
429 return true;
430
431 return false;
432}
433
434static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
435{
436 struct vdpa_mgmt_dev *mdev;
437 const char *busname = NULL;
438 const char *devname;
439
440 if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
441 return ERR_PTR(-EINVAL);
442 devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
443 if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
444 busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
445
446 list_for_each_entry(mdev, &mdev_head, list) {
447 if (mgmtdev_handle_match(mdev, busname, devname))
448 return mdev;
449 }
450 return ERR_PTR(-ENODEV);
451}
452
453static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
454{
455 if (mdev->device->bus &&
456 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
457 return -EMSGSIZE;
458 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
459 return -EMSGSIZE;
460 return 0;
461}
462
463static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
464 u32 portid, u32 seq, int flags)
465{
466 u64 supported_classes = 0;
467 void *hdr;
468 int i = 0;
469 int err;
470
471 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
472 if (!hdr)
473 return -EMSGSIZE;
474 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
475 if (err)
476 goto msg_err;
477
478 while (mdev->id_table[i].device) {
bb47620b
PP
479 if (mdev->id_table[i].device <= 63)
480 supported_classes |= BIT_ULL(mdev->id_table[i].device);
33b34750
PP
481 i++;
482 }
483
484 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
485 supported_classes, VDPA_ATTR_UNSPEC)) {
486 err = -EMSGSIZE;
487 goto msg_err;
488 }
cd2629f6
EC
489 if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
490 mdev->max_supported_vqs)) {
491 err = -EMSGSIZE;
492 goto msg_err;
493 }
494 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
495 mdev->supported_features, VDPA_ATTR_PAD)) {
496 err = -EMSGSIZE;
497 goto msg_err;
498 }
33b34750
PP
499
500 genlmsg_end(msg, hdr);
501 return 0;
502
503msg_err:
504 genlmsg_cancel(msg, hdr);
505 return err;
506}
507
508static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
509{
510 struct vdpa_mgmt_dev *mdev;
511 struct sk_buff *msg;
512 int err;
513
514 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
515 if (!msg)
516 return -ENOMEM;
517
0078ad90 518 down_read(&vdpa_dev_lock);
33b34750
PP
519 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
520 if (IS_ERR(mdev)) {
0078ad90 521 up_read(&vdpa_dev_lock);
33b34750
PP
522 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
523 err = PTR_ERR(mdev);
524 goto out;
525 }
526
527 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
0078ad90 528 up_read(&vdpa_dev_lock);
33b34750
PP
529 if (err)
530 goto out;
531 err = genlmsg_reply(msg, info);
532 return err;
533
534out:
535 nlmsg_free(msg);
536 return err;
537}
538
539static int
540vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
541{
542 struct vdpa_mgmt_dev *mdev;
543 int start = cb->args[0];
544 int idx = 0;
545 int err;
546
0078ad90 547 down_read(&vdpa_dev_lock);
33b34750
PP
548 list_for_each_entry(mdev, &mdev_head, list) {
549 if (idx < start) {
550 idx++;
551 continue;
552 }
553 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
554 cb->nlh->nlmsg_seq, NLM_F_MULTI);
555 if (err)
556 goto out;
557 idx++;
558 }
559out:
0078ad90 560 up_read(&vdpa_dev_lock);
33b34750
PP
561 cb->args[0] = idx;
562 return msg->len;
563}
564
47a1401a
EC
565#define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
566 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
567 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
d8ca2fa5 568
903f7bca
PP
569static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
570{
d8ca2fa5
PP
571 struct vdpa_dev_set_config config = {};
572 struct nlattr **nl_attrs = info->attrs;
903f7bca 573 struct vdpa_mgmt_dev *mdev;
d8ca2fa5 574 const u8 *macaddr;
903f7bca
PP
575 const char *name;
576 int err = 0;
577
578 if (!info->attrs[VDPA_ATTR_DEV_NAME])
579 return -EINVAL;
580
581 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
582
d8ca2fa5
PP
583 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
584 macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
585 memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
47a1401a 586 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
d8ca2fa5
PP
587 }
588 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
589 config.net.mtu =
590 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
47a1401a 591 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
d8ca2fa5 592 }
aba21aff
EC
593 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
594 config.net.max_vq_pairs =
595 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
596 if (!config.net.max_vq_pairs) {
597 NL_SET_ERR_MSG_MOD(info->extack,
598 "At least one pair of VQs is required");
599 return -EINVAL;
600 }
601 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
602 }
90fea5a8
JW
603 if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) {
604 config.device_features =
605 nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]);
606 config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
607 }
d8ca2fa5
PP
608
609 /* Skip checking capability if user didn't prefer to configure any
610 * device networking attributes. It is likely that user might have used
611 * a device specific method to configure such attributes or using device
612 * default attributes.
613 */
614 if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
615 !netlink_capable(skb, CAP_NET_ADMIN))
616 return -EPERM;
617
0078ad90 618 down_write(&vdpa_dev_lock);
903f7bca
PP
619 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
620 if (IS_ERR(mdev)) {
621 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
622 err = PTR_ERR(mdev);
623 goto err;
624 }
d8ca2fa5
PP
625 if ((config.mask & mdev->config_attr_mask) != config.mask) {
626 NL_SET_ERR_MSG_MOD(info->extack,
627 "All provided attributes are not supported");
628 err = -EOPNOTSUPP;
629 goto err;
630 }
903f7bca 631
d8ca2fa5 632 err = mdev->ops->dev_add(mdev, name, &config);
903f7bca 633err:
0078ad90 634 up_write(&vdpa_dev_lock);
903f7bca
PP
635 return err;
636}
637
638static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
639{
640 struct vdpa_mgmt_dev *mdev;
641 struct vdpa_device *vdev;
642 struct device *dev;
643 const char *name;
644 int err = 0;
645
646 if (!info->attrs[VDPA_ATTR_DEV_NAME])
647 return -EINVAL;
648 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
649
0078ad90 650 down_write(&vdpa_dev_lock);
903f7bca
PP
651 dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
652 if (!dev) {
653 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
654 err = -ENODEV;
655 goto dev_err;
656 }
657 vdev = container_of(dev, struct vdpa_device, dev);
658 if (!vdev->mdev) {
659 NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
660 err = -EINVAL;
661 goto mdev_err;
662 }
663 mdev = vdev->mdev;
664 mdev->ops->dev_del(mdev, vdev);
665mdev_err:
666 put_device(dev);
667dev_err:
0078ad90 668 up_write(&vdpa_dev_lock);
903f7bca
PP
669 return err;
670}
671
bc0d90ee
PP
672static int
673vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
674 int flags, struct netlink_ext_ack *extack)
675{
676 u16 max_vq_size;
e47be840 677 u16 min_vq_size = 1;
bc0d90ee
PP
678 u32 device_id;
679 u32 vendor_id;
680 void *hdr;
681 int err;
682
683 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
684 if (!hdr)
685 return -EMSGSIZE;
686
687 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
688 if (err)
689 goto msg_err;
690
691 device_id = vdev->config->get_device_id(vdev);
692 vendor_id = vdev->config->get_vendor_id(vdev);
693 max_vq_size = vdev->config->get_vq_num_max(vdev);
e47be840
WZ
694 if (vdev->config->get_vq_num_min)
695 min_vq_size = vdev->config->get_vq_num_min(vdev);
bc0d90ee
PP
696
697 err = -EMSGSIZE;
698 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
699 goto msg_err;
700 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
701 goto msg_err;
702 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
703 goto msg_err;
704 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
705 goto msg_err;
706 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
707 goto msg_err;
e47be840
WZ
708 if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
709 goto msg_err;
bc0d90ee
PP
710
711 genlmsg_end(msg, hdr);
712 return 0;
713
714msg_err:
715 genlmsg_cancel(msg, hdr);
716 return err;
717}
718
719static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
720{
721 struct vdpa_device *vdev;
722 struct sk_buff *msg;
723 const char *devname;
724 struct device *dev;
725 int err;
726
727 if (!info->attrs[VDPA_ATTR_DEV_NAME])
728 return -EINVAL;
729 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
730 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
731 if (!msg)
732 return -ENOMEM;
733
0078ad90 734 down_read(&vdpa_dev_lock);
bc0d90ee
PP
735 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
736 if (!dev) {
737 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
738 err = -ENODEV;
739 goto err;
740 }
741 vdev = container_of(dev, struct vdpa_device, dev);
742 if (!vdev->mdev) {
743 err = -EINVAL;
744 goto mdev_err;
745 }
746 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
7a6691f1
EC
747 if (err)
748 goto mdev_err;
749
750 err = genlmsg_reply(msg, info);
751 put_device(dev);
0078ad90 752 up_read(&vdpa_dev_lock);
7a6691f1
EC
753 return err;
754
bc0d90ee
PP
755mdev_err:
756 put_device(dev);
757err:
0078ad90 758 up_read(&vdpa_dev_lock);
7a6691f1 759 nlmsg_free(msg);
bc0d90ee
PP
760 return err;
761}
762
763struct vdpa_dev_dump_info {
764 struct sk_buff *msg;
765 struct netlink_callback *cb;
766 int start_idx;
767 int idx;
768};
769
770static int vdpa_dev_dump(struct device *dev, void *data)
771{
772 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
773 struct vdpa_dev_dump_info *info = data;
774 int err;
775
776 if (!vdev->mdev)
777 return 0;
778 if (info->idx < info->start_idx) {
779 info->idx++;
780 return 0;
781 }
782 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
783 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
784 if (err)
785 return err;
786
787 info->idx++;
788 return 0;
789}
790
791static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
792{
793 struct vdpa_dev_dump_info info;
794
795 info.msg = msg;
796 info.cb = cb;
797 info.start_idx = cb->args[0];
798 info.idx = 0;
799
0078ad90 800 down_read(&vdpa_dev_lock);
bc0d90ee 801 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
0078ad90 802 up_read(&vdpa_dev_lock);
bc0d90ee
PP
803 cb->args[0] = info.idx;
804 return msg->len;
805}
806
8a505711 807static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features,
ad69dd0b
PP
808 const struct virtio_net_config *config)
809{
810 u16 val_u16;
811
8a505711
ZL
812 if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0 &&
813 (features & BIT_ULL(VIRTIO_NET_F_RSS)) == 0)
ad69dd0b
PP
814 return 0;
815
816 val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
817 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
818}
819
820static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
821{
822 struct virtio_net_config config = {};
c6dac2ec 823 u64 features_device;
ad69dd0b
PP
824 u16 val_u16;
825
22856510 826 vdev->config->get_config(vdev, 0, &config, sizeof(config));
ad69dd0b
PP
827
828 if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
829 config.mac))
830 return -EMSGSIZE;
831
79e0034c 832 val_u16 = __virtio16_to_cpu(true, config.status);
ad69dd0b
PP
833 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
834 return -EMSGSIZE;
835
79e0034c 836 val_u16 = __virtio16_to_cpu(true, config.mtu);
ad69dd0b
PP
837 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
838 return -EMSGSIZE;
839
22856510
ZL
840 features_device = vdev->config->get_device_features(vdev);
841
842 if (nla_put_u64_64bit(msg, VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, features_device,
843 VDPA_ATTR_PAD))
844 return -EMSGSIZE;
845
846 return vdpa_dev_net_mq_config_fill(vdev, msg, features_driver, &config);
ad69dd0b
PP
847}
848
849static int
850vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
851 int flags, struct netlink_ext_ack *extack)
852{
c6dac2ec
ZL
853 u64 features_driver;
854 u8 status = 0;
ad69dd0b
PP
855 u32 device_id;
856 void *hdr;
857 int err;
858
a6a51adc 859 down_read(&vdev->cf_lock);
ad69dd0b
PP
860 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
861 VDPA_CMD_DEV_CONFIG_GET);
30ef7a8a
EC
862 if (!hdr) {
863 err = -EMSGSIZE;
864 goto out;
865 }
ad69dd0b
PP
866
867 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
868 err = -EMSGSIZE;
869 goto msg_err;
870 }
871
872 device_id = vdev->config->get_device_id(vdev);
873 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
874 err = -EMSGSIZE;
875 goto msg_err;
876 }
877
c6dac2ec
ZL
878 /* only read driver features after the feature negotiation is done */
879 status = vdev->config->get_status(vdev);
880 if (status & VIRTIO_CONFIG_S_FEATURES_OK) {
881 features_driver = vdev->config->get_driver_features(vdev);
882 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver,
883 VDPA_ATTR_PAD)) {
884 err = -EMSGSIZE;
885 goto msg_err;
886 }
887 }
888
ad69dd0b
PP
889 switch (device_id) {
890 case VIRTIO_ID_NET:
891 err = vdpa_dev_net_config_fill(vdev, msg);
892 break;
893 default:
894 err = -EOPNOTSUPP;
895 break;
896 }
897 if (err)
898 goto msg_err;
899
a6a51adc 900 up_read(&vdev->cf_lock);
ad69dd0b
PP
901 genlmsg_end(msg, hdr);
902 return 0;
903
904msg_err:
905 genlmsg_cancel(msg, hdr);
30ef7a8a 906out:
a6a51adc 907 up_read(&vdev->cf_lock);
ad69dd0b
PP
908 return err;
909}
910
13b00b13
EC
911static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
912 struct genl_info *info, u32 index)
913{
914 struct virtio_net_config config = {};
915 u64 features;
916 u16 max_vqp;
917 u8 status;
918 int err;
919
920 status = vdev->config->get_status(vdev);
921 if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
922 NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
923 return -EAGAIN;
924 }
925 vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
926
79e0034c 927 max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs);
13b00b13
EC
928 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
929 return -EMSGSIZE;
930
931 features = vdev->config->get_driver_features(vdev);
932 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
933 features, VDPA_ATTR_PAD))
934 return -EMSGSIZE;
935
936 if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
937 return -EMSGSIZE;
938
939 err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
940 if (err)
941 return err;
942
943 return 0;
944}
945
946static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
947 struct genl_info *info, u32 index)
948{
949 int err;
950
a6a51adc 951 down_read(&vdev->cf_lock);
13b00b13
EC
952 if (!vdev->config->get_vendor_vq_stats) {
953 err = -EOPNOTSUPP;
954 goto out;
955 }
956
957 err = vdpa_fill_stats_rec(vdev, msg, info, index);
958out:
a6a51adc 959 up_read(&vdev->cf_lock);
13b00b13
EC
960 return err;
961}
962
963static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
964 struct sk_buff *msg,
965 struct genl_info *info, u32 index)
966{
967 u32 device_id;
968 void *hdr;
969 int err;
970 u32 portid = info->snd_portid;
971 u32 seq = info->snd_seq;
972 u32 flags = 0;
973
974 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
975 VDPA_CMD_DEV_VSTATS_GET);
976 if (!hdr)
977 return -EMSGSIZE;
978
979 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
980 err = -EMSGSIZE;
981 goto undo_msg;
982 }
983
984 device_id = vdev->config->get_device_id(vdev);
985 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
986 err = -EMSGSIZE;
987 goto undo_msg;
988 }
989
990 switch (device_id) {
991 case VIRTIO_ID_NET:
992 if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
993 NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
994 err = -ERANGE;
995 break;
996 }
997
998 err = vendor_stats_fill(vdev, msg, info, index);
999 break;
1000 default:
1001 err = -EOPNOTSUPP;
1002 break;
1003 }
1004 genlmsg_end(msg, hdr);
1005
1006 return err;
1007
1008undo_msg:
1009 genlmsg_cancel(msg, hdr);
1010 return err;
1011}
1012
ad69dd0b
PP
1013static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
1014{
1015 struct vdpa_device *vdev;
1016 struct sk_buff *msg;
1017 const char *devname;
1018 struct device *dev;
1019 int err;
1020
1021 if (!info->attrs[VDPA_ATTR_DEV_NAME])
1022 return -EINVAL;
1023 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1024 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1025 if (!msg)
1026 return -ENOMEM;
1027
0078ad90 1028 down_read(&vdpa_dev_lock);
ad69dd0b
PP
1029 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1030 if (!dev) {
1031 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1032 err = -ENODEV;
1033 goto dev_err;
1034 }
1035 vdev = container_of(dev, struct vdpa_device, dev);
1036 if (!vdev->mdev) {
1037 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1038 err = -EINVAL;
1039 goto mdev_err;
1040 }
1041 err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
1042 0, info->extack);
1043 if (!err)
1044 err = genlmsg_reply(msg, info);
1045
1046mdev_err:
1047 put_device(dev);
1048dev_err:
0078ad90 1049 up_read(&vdpa_dev_lock);
ad69dd0b
PP
1050 if (err)
1051 nlmsg_free(msg);
1052 return err;
1053}
1054
1055static int vdpa_dev_config_dump(struct device *dev, void *data)
1056{
1057 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
1058 struct vdpa_dev_dump_info *info = data;
1059 int err;
1060
1061 if (!vdev->mdev)
1062 return 0;
1063 if (info->idx < info->start_idx) {
1064 info->idx++;
1065 return 0;
1066 }
1067 err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
1068 info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1069 info->cb->extack);
1070 if (err)
1071 return err;
1072
1073 info->idx++;
1074 return 0;
1075}
1076
1077static int
1078vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
1079{
1080 struct vdpa_dev_dump_info info;
1081
1082 info.msg = msg;
1083 info.cb = cb;
1084 info.start_idx = cb->args[0];
1085 info.idx = 0;
1086
0078ad90 1087 down_read(&vdpa_dev_lock);
ad69dd0b 1088 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
0078ad90 1089 up_read(&vdpa_dev_lock);
ad69dd0b
PP
1090 cb->args[0] = info.idx;
1091 return msg->len;
1092}
1093
13b00b13
EC
1094static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
1095 struct genl_info *info)
1096{
1097 struct vdpa_device *vdev;
1098 struct sk_buff *msg;
1099 const char *devname;
1100 struct device *dev;
1101 u32 index;
1102 int err;
1103
1104 if (!info->attrs[VDPA_ATTR_DEV_NAME])
1105 return -EINVAL;
1106
1107 if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
1108 return -EINVAL;
1109
1110 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1111 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1112 if (!msg)
1113 return -ENOMEM;
1114
1115 index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
0078ad90 1116 down_read(&vdpa_dev_lock);
13b00b13
EC
1117 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1118 if (!dev) {
1119 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1120 err = -ENODEV;
1121 goto dev_err;
1122 }
1123 vdev = container_of(dev, struct vdpa_device, dev);
1124 if (!vdev->mdev) {
1125 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1126 err = -EINVAL;
1127 goto mdev_err;
1128 }
1129 err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
1130 if (err)
1131 goto mdev_err;
1132
1133 err = genlmsg_reply(msg, info);
1134
1135 put_device(dev);
0078ad90 1136 up_read(&vdpa_dev_lock);
13b00b13
EC
1137
1138 return err;
1139
1140mdev_err:
1141 put_device(dev);
1142dev_err:
1143 nlmsg_free(msg);
0078ad90 1144 up_read(&vdpa_dev_lock);
13b00b13
EC
1145 return err;
1146}
1147
33b34750
PP
1148static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
1149 [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
1150 [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
903f7bca 1151 [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
d8ca2fa5
PP
1152 [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
1153 /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
1154 [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
33b34750
PP
1155};
1156
1157static const struct genl_ops vdpa_nl_ops[] = {
1158 {
1159 .cmd = VDPA_CMD_MGMTDEV_GET,
1160 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1161 .doit = vdpa_nl_cmd_mgmtdev_get_doit,
1162 .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
1163 },
903f7bca
PP
1164 {
1165 .cmd = VDPA_CMD_DEV_NEW,
1166 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1167 .doit = vdpa_nl_cmd_dev_add_set_doit,
1168 .flags = GENL_ADMIN_PERM,
1169 },
1170 {
1171 .cmd = VDPA_CMD_DEV_DEL,
1172 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1173 .doit = vdpa_nl_cmd_dev_del_set_doit,
1174 .flags = GENL_ADMIN_PERM,
1175 },
bc0d90ee
PP
1176 {
1177 .cmd = VDPA_CMD_DEV_GET,
1178 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1179 .doit = vdpa_nl_cmd_dev_get_doit,
1180 .dumpit = vdpa_nl_cmd_dev_get_dumpit,
1181 },
ad69dd0b
PP
1182 {
1183 .cmd = VDPA_CMD_DEV_CONFIG_GET,
1184 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1185 .doit = vdpa_nl_cmd_dev_config_get_doit,
1186 .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
1187 },
13b00b13
EC
1188 {
1189 .cmd = VDPA_CMD_DEV_VSTATS_GET,
1190 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1191 .doit = vdpa_nl_cmd_dev_stats_get_doit,
1192 .flags = GENL_ADMIN_PERM,
1193 },
33b34750
PP
1194};
1195
1196static struct genl_family vdpa_nl_family __ro_after_init = {
1197 .name = VDPA_GENL_NAME,
1198 .version = VDPA_GENL_VERSION,
1199 .maxattr = VDPA_ATTR_MAX,
1200 .policy = vdpa_nl_policy,
1201 .netnsok = false,
1202 .module = THIS_MODULE,
1203 .ops = vdpa_nl_ops,
1204 .n_ops = ARRAY_SIZE(vdpa_nl_ops),
1205};
1206
961e9c84
JW
1207static int vdpa_init(void)
1208{
33b34750
PP
1209 int err;
1210
1211 err = bus_register(&vdpa_bus);
1212 if (err)
1213 return err;
1214 err = genl_register_family(&vdpa_nl_family);
1215 if (err)
1216 goto err;
1217 return 0;
1218
1219err:
1220 bus_unregister(&vdpa_bus);
1221 return err;
961e9c84
JW
1222}
1223
1224static void __exit vdpa_exit(void)
1225{
33b34750 1226 genl_unregister_family(&vdpa_nl_family);
961e9c84
JW
1227 bus_unregister(&vdpa_bus);
1228 ida_destroy(&vdpa_index_ida);
1229}
1230core_initcall(vdpa_init);
1231module_exit(vdpa_exit);
1232
1233MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
1234MODULE_LICENSE("GPL v2");