Merge tag 'hyperv-next-signed-20220807' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / virtio / virtio_vdpa.c
CommitLineData
c043b4a8
JW
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * VIRTIO based driver for vDPA device
4 *
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/uuid.h>
16#include <linux/virtio.h>
17#include <linux/vdpa.h>
18#include <linux/virtio_config.h>
19#include <linux/virtio_ring.h>
20
21#define MOD_VERSION "0.1"
22#define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>"
23#define MOD_DESC "vDPA bus driver for virtio devices"
24#define MOD_LICENSE "GPL v2"
25
26struct virtio_vdpa_device {
27 struct virtio_device vdev;
28 struct vdpa_device *vdpa;
29 u64 features;
30
31 /* The lock to protect virtqueue list */
32 spinlock_t lock;
33 /* List of virtio_vdpa_vq_info */
34 struct list_head virtqueues;
35};
36
37struct virtio_vdpa_vq_info {
38 /* the actual virtqueue */
39 struct virtqueue *vq;
40
41 /* the list node for the virtqueues list */
42 struct list_head node;
43};
44
45static inline struct virtio_vdpa_device *
46to_virtio_vdpa_device(struct virtio_device *dev)
47{
48 return container_of(dev, struct virtio_vdpa_device, vdev);
49}
50
51static struct vdpa_device *vd_get_vdpa(struct virtio_device *vdev)
52{
53 return to_virtio_vdpa_device(vdev)->vdpa;
54}
55
31532340
ST
56static void virtio_vdpa_get(struct virtio_device *vdev, unsigned int offset,
57 void *buf, unsigned int len)
c043b4a8
JW
58{
59 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
c043b4a8 60
63991673 61 vdpa_get_config(vdpa, offset, buf, len);
c043b4a8
JW
62}
63
31532340
ST
64static void virtio_vdpa_set(struct virtio_device *vdev, unsigned int offset,
65 const void *buf, unsigned int len)
c043b4a8
JW
66{
67 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
c043b4a8 68
6dbb1f16 69 vdpa_set_config(vdpa, offset, buf, len);
c043b4a8
JW
70}
71
72static u32 virtio_vdpa_generation(struct virtio_device *vdev)
73{
74 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
75 const struct vdpa_config_ops *ops = vdpa->config;
76
77 if (ops->get_generation)
78 return ops->get_generation(vdpa);
79
80 return 0;
81}
82
83static u8 virtio_vdpa_get_status(struct virtio_device *vdev)
84{
85 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
86 const struct vdpa_config_ops *ops = vdpa->config;
87
88 return ops->get_status(vdpa);
89}
90
91static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
92{
93 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
c043b4a8 94
73bc0dbb 95 return vdpa_set_status(vdpa, status);
c043b4a8
JW
96}
97
98static void virtio_vdpa_reset(struct virtio_device *vdev)
99{
100 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
c043b4a8 101
63991673 102 vdpa_reset(vdpa);
c043b4a8
JW
103}
104
105static bool virtio_vdpa_notify(struct virtqueue *vq)
106{
107 struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev);
108 const struct vdpa_config_ops *ops = vdpa->config;
109
110 ops->kick_vq(vdpa, vq->index);
111
112 return true;
113}
114
115static irqreturn_t virtio_vdpa_config_cb(void *private)
116{
117 struct virtio_vdpa_device *vd_dev = private;
118
119 virtio_config_changed(&vd_dev->vdev);
120
121 return IRQ_HANDLED;
122}
123
124static irqreturn_t virtio_vdpa_virtqueue_cb(void *private)
125{
126 struct virtio_vdpa_vq_info *info = private;
127
128 return vring_interrupt(0, info->vq);
129}
130
131static struct virtqueue *
132virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
133 void (*callback)(struct virtqueue *vq),
134 const char *name, bool ctx)
135{
136 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
137 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
138 const struct vdpa_config_ops *ops = vdpa->config;
139 struct virtio_vdpa_vq_info *info;
140 struct vdpa_callback cb;
141 struct virtqueue *vq;
142 u64 desc_addr, driver_addr, device_addr;
efa08cb4
EC
143 /* Assume split virtqueue, switch to packed if necessary */
144 struct vdpa_vq_state state = {0};
c043b4a8 145 unsigned long flags;
30a03dfc
WZ
146 u32 align, max_num, min_num = 1;
147 bool may_reduce_num = true;
c043b4a8
JW
148 int err;
149
150 if (!name)
151 return NULL;
152
cb5d2c1f
VW
153 if (index >= vdpa->nvqs)
154 return ERR_PTR(-ENOENT);
155
c043b4a8
JW
156 /* Queue shouldn't already be set up. */
157 if (ops->get_vq_ready(vdpa, index))
158 return ERR_PTR(-ENOENT);
159
160 /* Allocate and fill out our active queue description */
161 info = kmalloc(sizeof(*info), GFP_KERNEL);
162 if (!info)
163 return ERR_PTR(-ENOMEM);
164
30a03dfc
WZ
165 max_num = ops->get_vq_num_max(vdpa);
166 if (max_num == 0) {
c043b4a8
JW
167 err = -ENOENT;
168 goto error_new_virtqueue;
169 }
170
30a03dfc
WZ
171 if (ops->get_vq_num_min)
172 min_num = ops->get_vq_num_min(vdpa);
173
174 may_reduce_num = (max_num == min_num) ? false : true;
175
c043b4a8
JW
176 /* Create the vring */
177 align = ops->get_vq_align(vdpa);
30a03dfc
WZ
178 vq = vring_create_virtqueue(index, max_num, align, vdev,
179 true, may_reduce_num, ctx,
c043b4a8
JW
180 virtio_vdpa_notify, callback, name);
181 if (!vq) {
182 err = -ENOMEM;
183 goto error_new_virtqueue;
184 }
185
186 /* Setup virtqueue callback */
ea239a67 187 cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
c043b4a8
JW
188 cb.private = info;
189 ops->set_vq_cb(vdpa, index, &cb);
190 ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
191
192 desc_addr = virtqueue_get_desc_addr(vq);
193 driver_addr = virtqueue_get_avail_addr(vq);
194 device_addr = virtqueue_get_used_addr(vq);
195
196 if (ops->set_vq_address(vdpa, index,
197 desc_addr, driver_addr,
198 device_addr)) {
199 err = -EINVAL;
200 goto err_vq;
201 }
202
efa08cb4
EC
203 /* reset virtqueue state index */
204 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
205 struct vdpa_vq_state_packed *s = &state.packed;
206
207 s->last_avail_counter = 1;
208 s->last_avail_idx = 0;
209 s->last_used_counter = 1;
210 s->last_used_idx = 0;
211 }
212 err = ops->set_vq_state(vdpa, index, &state);
213 if (err)
214 goto err_vq;
215
c043b4a8
JW
216 ops->set_vq_ready(vdpa, index, 1);
217
218 vq->priv = info;
219 info->vq = vq;
220
221 spin_lock_irqsave(&vd_dev->lock, flags);
222 list_add(&info->node, &vd_dev->virtqueues);
223 spin_unlock_irqrestore(&vd_dev->lock, flags);
224
225 return vq;
226
227err_vq:
228 vring_del_virtqueue(vq);
229error_new_virtqueue:
230 ops->set_vq_ready(vdpa, index, 0);
231 /* VDPA driver should make sure vq is stopeed here */
232 WARN_ON(ops->get_vq_ready(vdpa, index));
233 kfree(info);
234 return ERR_PTR(err);
235}
236
237static void virtio_vdpa_del_vq(struct virtqueue *vq)
238{
239 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev);
240 struct vdpa_device *vdpa = vd_dev->vdpa;
241 const struct vdpa_config_ops *ops = vdpa->config;
242 struct virtio_vdpa_vq_info *info = vq->priv;
243 unsigned int index = vq->index;
244 unsigned long flags;
245
246 spin_lock_irqsave(&vd_dev->lock, flags);
247 list_del(&info->node);
248 spin_unlock_irqrestore(&vd_dev->lock, flags);
249
1628c687 250 /* Select and deactivate the queue (best effort) */
c043b4a8 251 ops->set_vq_ready(vdpa, index, 0);
c043b4a8
JW
252
253 vring_del_virtqueue(vq);
254
255 kfree(info);
256}
257
258static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
259{
260 struct virtqueue *vq, *n;
261
262 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
263 virtio_vdpa_del_vq(vq);
264}
265
31532340 266static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
c043b4a8
JW
267 struct virtqueue *vqs[],
268 vq_callback_t *callbacks[],
269 const char * const names[],
270 const bool *ctx,
271 struct irq_affinity *desc)
272{
273 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
274 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
275 const struct vdpa_config_ops *ops = vdpa->config;
276 struct vdpa_callback cb;
277 int i, err, queue_idx = 0;
278
279 for (i = 0; i < nvqs; ++i) {
280 if (!names[i]) {
281 vqs[i] = NULL;
282 continue;
283 }
284
285 vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++,
286 callbacks[i], names[i], ctx ?
287 ctx[i] : false);
288 if (IS_ERR(vqs[i])) {
289 err = PTR_ERR(vqs[i]);
290 goto err_setup_vq;
291 }
292 }
293
294 cb.callback = virtio_vdpa_config_cb;
295 cb.private = vd_dev;
296 ops->set_config_cb(vdpa, &cb);
297
298 return 0;
299
300err_setup_vq:
301 virtio_vdpa_del_vqs(vdev);
302 return err;
303}
304
305static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
306{
307 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
308 const struct vdpa_config_ops *ops = vdpa->config;
309
a64917bc 310 return ops->get_device_features(vdpa);
c043b4a8
JW
311}
312
313static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
314{
315 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
c043b4a8
JW
316
317 /* Give virtio_ring a chance to accept features. */
318 vring_transport_features(vdev);
319
e0077cc1 320 return vdpa_set_features(vdpa, vdev->features);
c043b4a8
JW
321}
322
323static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
324{
325 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
326 struct vdpa_device *vdpa = vd_dev->vdpa;
327
328 return dev_name(&vdpa->dev);
329}
330
331static const struct virtio_config_ops virtio_vdpa_config_ops = {
332 .get = virtio_vdpa_get,
333 .set = virtio_vdpa_set,
334 .generation = virtio_vdpa_generation,
335 .get_status = virtio_vdpa_get_status,
336 .set_status = virtio_vdpa_set_status,
337 .reset = virtio_vdpa_reset,
338 .find_vqs = virtio_vdpa_find_vqs,
339 .del_vqs = virtio_vdpa_del_vqs,
340 .get_features = virtio_vdpa_get_features,
341 .finalize_features = virtio_vdpa_finalize_features,
342 .bus_name = virtio_vdpa_bus_name,
343};
344
345static void virtio_vdpa_release_dev(struct device *_d)
346{
347 struct virtio_device *vdev =
348 container_of(_d, struct virtio_device, dev);
349 struct virtio_vdpa_device *vd_dev =
350 container_of(vdev, struct virtio_vdpa_device, vdev);
351
352 kfree(vd_dev);
353}
354
355static int virtio_vdpa_probe(struct vdpa_device *vdpa)
356{
357 const struct vdpa_config_ops *ops = vdpa->config;
358 struct virtio_vdpa_device *vd_dev, *reg_dev = NULL;
359 int ret = -EINVAL;
360
361 vd_dev = kzalloc(sizeof(*vd_dev), GFP_KERNEL);
362 if (!vd_dev)
363 return -ENOMEM;
364
365 vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa);
366 vd_dev->vdev.dev.release = virtio_vdpa_release_dev;
367 vd_dev->vdev.config = &virtio_vdpa_config_ops;
368 vd_dev->vdpa = vdpa;
369 INIT_LIST_HEAD(&vd_dev->virtqueues);
370 spin_lock_init(&vd_dev->lock);
371
372 vd_dev->vdev.id.device = ops->get_device_id(vdpa);
373 if (vd_dev->vdev.id.device == 0)
374 goto err;
375
376 vd_dev->vdev.id.vendor = ops->get_vendor_id(vdpa);
377 ret = register_virtio_device(&vd_dev->vdev);
378 reg_dev = vd_dev;
379 if (ret)
380 goto err;
381
382 vdpa_set_drvdata(vdpa, vd_dev);
383
384 return 0;
385
386err:
387 if (reg_dev)
388 put_device(&vd_dev->vdev.dev);
389 else
390 kfree(vd_dev);
391 return ret;
392}
393
394static void virtio_vdpa_remove(struct vdpa_device *vdpa)
395{
396 struct virtio_vdpa_device *vd_dev = vdpa_get_drvdata(vdpa);
397
398 unregister_virtio_device(&vd_dev->vdev);
399}
400
401static struct vdpa_driver virtio_vdpa_driver = {
402 .driver = {
403 .name = "virtio_vdpa",
404 },
405 .probe = virtio_vdpa_probe,
406 .remove = virtio_vdpa_remove,
407};
408
409module_vdpa_driver(virtio_vdpa_driver);
410
411MODULE_VERSION(MOD_VERSION);
412MODULE_LICENSE(MOD_LICENSE);
413MODULE_AUTHOR(MOD_AUTHOR);
414MODULE_DESCRIPTION(MOD_DESC);