iommu/qcom: Use accessor functions for iommu private data
[linux-block.git] / drivers / iommu / virtio-iommu.c
CommitLineData
edcd69ab
JPB
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Virtio driver for the paravirtualized IOMMU
4 *
ae24fb49 5 * Copyright (C) 2019 Arm Limited
edcd69ab
JPB
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/amba/bus.h>
11#include <linux/delay.h>
12#include <linux/dma-iommu.h>
13#include <linux/freezer.h>
14#include <linux/interval_tree.h>
15#include <linux/iommu.h>
16#include <linux/module.h>
17#include <linux/of_iommu.h>
18#include <linux/of_platform.h>
19#include <linux/pci.h>
20#include <linux/platform_device.h>
21#include <linux/virtio.h>
22#include <linux/virtio_config.h>
23#include <linux/virtio_ids.h>
24#include <linux/wait.h>
25
26#include <uapi/linux/virtio_iommu.h>
27
28#define MSI_IOVA_BASE 0x8000000
29#define MSI_IOVA_LENGTH 0x100000
30
31#define VIOMMU_REQUEST_VQ 0
169a126c
JPB
32#define VIOMMU_EVENT_VQ 1
33#define VIOMMU_NR_VQS 2
edcd69ab
JPB
34
35struct viommu_dev {
36 struct iommu_device iommu;
37 struct device *dev;
38 struct virtio_device *vdev;
39
40 struct ida domain_ids;
41
42 struct virtqueue *vqs[VIOMMU_NR_VQS];
43 spinlock_t request_lock;
44 struct list_head requests;
169a126c 45 void *evts;
edcd69ab
JPB
46
47 /* Device configuration */
48 struct iommu_domain_geometry geometry;
49 u64 pgsize_bitmap;
ae24fb49
JPB
50 u32 first_domain;
51 u32 last_domain;
52 /* Supported MAP flags */
53 u32 map_flags;
2a5a3148 54 u32 probe_size;
edcd69ab
JPB
55};
56
57struct viommu_mapping {
58 phys_addr_t paddr;
59 struct interval_tree_node iova;
60 u32 flags;
61};
62
63struct viommu_domain {
64 struct iommu_domain domain;
65 struct viommu_dev *viommu;
66 struct mutex mutex; /* protects viommu pointer */
67 unsigned int id;
ae24fb49 68 u32 map_flags;
edcd69ab
JPB
69
70 spinlock_t mappings_lock;
71 struct rb_root_cached mappings;
72
73 unsigned long nr_endpoints;
74};
75
76struct viommu_endpoint {
2a5a3148 77 struct device *dev;
edcd69ab
JPB
78 struct viommu_dev *viommu;
79 struct viommu_domain *vdomain;
2a5a3148 80 struct list_head resv_regions;
edcd69ab
JPB
81};
82
83struct viommu_request {
84 struct list_head list;
85 void *writeback;
86 unsigned int write_offset;
87 unsigned int len;
88 char buf[];
89};
90
169a126c
JPB
91#define VIOMMU_FAULT_RESV_MASK 0xffffff00
92
93struct viommu_event {
94 union {
95 u32 head;
96 struct virtio_iommu_fault fault;
97 };
98};
99
edcd69ab
JPB
100#define to_viommu_domain(domain) \
101 container_of(domain, struct viommu_domain, domain)
102
103static int viommu_get_req_errno(void *buf, size_t len)
104{
105 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
106
107 switch (tail->status) {
108 case VIRTIO_IOMMU_S_OK:
109 return 0;
110 case VIRTIO_IOMMU_S_UNSUPP:
111 return -ENOSYS;
112 case VIRTIO_IOMMU_S_INVAL:
113 return -EINVAL;
114 case VIRTIO_IOMMU_S_RANGE:
115 return -ERANGE;
116 case VIRTIO_IOMMU_S_NOENT:
117 return -ENOENT;
118 case VIRTIO_IOMMU_S_FAULT:
119 return -EFAULT;
ae24fb49
JPB
120 case VIRTIO_IOMMU_S_NOMEM:
121 return -ENOMEM;
edcd69ab
JPB
122 case VIRTIO_IOMMU_S_IOERR:
123 case VIRTIO_IOMMU_S_DEVERR:
124 default:
125 return -EIO;
126 }
127}
128
129static void viommu_set_req_status(void *buf, size_t len, int status)
130{
131 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
132
133 tail->status = status;
134}
135
136static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
137 struct virtio_iommu_req_head *req,
138 size_t len)
139{
140 size_t tail_size = sizeof(struct virtio_iommu_req_tail);
141
2a5a3148
JPB
142 if (req->type == VIRTIO_IOMMU_T_PROBE)
143 return len - viommu->probe_size - tail_size;
144
edcd69ab
JPB
145 return len - tail_size;
146}
147
148/*
149 * __viommu_sync_req - Complete all in-flight requests
150 *
151 * Wait for all added requests to complete. When this function returns, all
152 * requests that were in-flight at the time of the call have completed.
153 */
154static int __viommu_sync_req(struct viommu_dev *viommu)
155{
edcd69ab
JPB
156 unsigned int len;
157 size_t write_len;
158 struct viommu_request *req;
159 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
160
161 assert_spin_locked(&viommu->request_lock);
162
163 virtqueue_kick(vq);
164
165 while (!list_empty(&viommu->requests)) {
166 len = 0;
167 req = virtqueue_get_buf(vq, &len);
168 if (!req)
169 continue;
170
171 if (!len)
172 viommu_set_req_status(req->buf, req->len,
173 VIRTIO_IOMMU_S_IOERR);
174
175 write_len = req->len - req->write_offset;
176 if (req->writeback && len == write_len)
177 memcpy(req->writeback, req->buf + req->write_offset,
178 write_len);
179
180 list_del(&req->list);
181 kfree(req);
182 }
183
c1c8058d 184 return 0;
edcd69ab
JPB
185}
186
187static int viommu_sync_req(struct viommu_dev *viommu)
188{
189 int ret;
190 unsigned long flags;
191
192 spin_lock_irqsave(&viommu->request_lock, flags);
193 ret = __viommu_sync_req(viommu);
194 if (ret)
195 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
196 spin_unlock_irqrestore(&viommu->request_lock, flags);
197
198 return ret;
199}
200
201/*
202 * __viommu_add_request - Add one request to the queue
203 * @buf: pointer to the request buffer
204 * @len: length of the request buffer
205 * @writeback: copy data back to the buffer when the request completes.
206 *
207 * Add a request to the queue. Only synchronize the queue if it's already full.
208 * Otherwise don't kick the queue nor wait for requests to complete.
209 *
210 * When @writeback is true, data written by the device, including the request
211 * status, is copied into @buf after the request completes. This is unsafe if
212 * the caller allocates @buf on stack and drops the lock between add_req() and
213 * sync_req().
214 *
215 * Return 0 if the request was successfully added to the queue.
216 */
217static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
218 bool writeback)
219{
220 int ret;
221 off_t write_offset;
222 struct viommu_request *req;
223 struct scatterlist top_sg, bottom_sg;
224 struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
225 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
226
227 assert_spin_locked(&viommu->request_lock);
228
229 write_offset = viommu_get_write_desc_offset(viommu, buf, len);
230 if (write_offset <= 0)
231 return -EINVAL;
232
233 req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
234 if (!req)
235 return -ENOMEM;
236
237 req->len = len;
238 if (writeback) {
239 req->writeback = buf + write_offset;
240 req->write_offset = write_offset;
241 }
242 memcpy(&req->buf, buf, write_offset);
243
244 sg_init_one(&top_sg, req->buf, write_offset);
245 sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
246
247 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
248 if (ret == -ENOSPC) {
249 /* If the queue is full, sync and retry */
250 if (!__viommu_sync_req(viommu))
251 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
252 }
253 if (ret)
254 goto err_free;
255
256 list_add_tail(&req->list, &viommu->requests);
257 return 0;
258
259err_free:
260 kfree(req);
261 return ret;
262}
263
264static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
265{
266 int ret;
267 unsigned long flags;
268
269 spin_lock_irqsave(&viommu->request_lock, flags);
270 ret = __viommu_add_req(viommu, buf, len, false);
271 if (ret)
272 dev_dbg(viommu->dev, "could not add request: %d\n", ret);
273 spin_unlock_irqrestore(&viommu->request_lock, flags);
274
275 return ret;
276}
277
278/*
279 * Send a request and wait for it to complete. Return the request status (as an
280 * errno)
281 */
282static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
283 size_t len)
284{
285 int ret;
286 unsigned long flags;
287
288 spin_lock_irqsave(&viommu->request_lock, flags);
289
290 ret = __viommu_add_req(viommu, buf, len, true);
291 if (ret) {
292 dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
293 goto out_unlock;
294 }
295
296 ret = __viommu_sync_req(viommu);
297 if (ret) {
298 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
299 /* Fall-through (get the actual request status) */
300 }
301
302 ret = viommu_get_req_errno(buf, len);
303out_unlock:
304 spin_unlock_irqrestore(&viommu->request_lock, flags);
305 return ret;
306}
307
308/*
309 * viommu_add_mapping - add a mapping to the internal tree
310 *
311 * On success, return the new mapping. Otherwise return NULL.
312 */
313static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
314 phys_addr_t paddr, size_t size, u32 flags)
315{
316 unsigned long irqflags;
317 struct viommu_mapping *mapping;
318
319 mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
320 if (!mapping)
321 return -ENOMEM;
322
323 mapping->paddr = paddr;
324 mapping->iova.start = iova;
325 mapping->iova.last = iova + size - 1;
326 mapping->flags = flags;
327
328 spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
329 interval_tree_insert(&mapping->iova, &vdomain->mappings);
330 spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
331
332 return 0;
333}
334
335/*
336 * viommu_del_mappings - remove mappings from the internal tree
337 *
338 * @vdomain: the domain
339 * @iova: start of the range
340 * @size: size of the range. A size of 0 corresponds to the entire address
341 * space.
342 *
343 * On success, returns the number of unmapped bytes (>= size)
344 */
345static size_t viommu_del_mappings(struct viommu_domain *vdomain,
346 unsigned long iova, size_t size)
347{
348 size_t unmapped = 0;
349 unsigned long flags;
350 unsigned long last = iova + size - 1;
351 struct viommu_mapping *mapping = NULL;
352 struct interval_tree_node *node, *next;
353
354 spin_lock_irqsave(&vdomain->mappings_lock, flags);
355 next = interval_tree_iter_first(&vdomain->mappings, iova, last);
356 while (next) {
357 node = next;
358 mapping = container_of(node, struct viommu_mapping, iova);
359 next = interval_tree_iter_next(node, iova, last);
360
361 /* Trying to split a mapping? */
362 if (mapping->iova.start < iova)
363 break;
364
365 /*
366 * Virtio-iommu doesn't allow UNMAP to split a mapping created
367 * with a single MAP request, so remove the full mapping.
368 */
369 unmapped += mapping->iova.last - mapping->iova.start + 1;
370
371 interval_tree_remove(node, &vdomain->mappings);
372 kfree(mapping);
373 }
374 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
375
376 return unmapped;
377}
378
379/*
380 * viommu_replay_mappings - re-send MAP requests
381 *
382 * When reattaching a domain that was previously detached from all endpoints,
383 * mappings were deleted from the device. Re-create the mappings available in
384 * the internal tree.
385 */
386static int viommu_replay_mappings(struct viommu_domain *vdomain)
387{
388 int ret = 0;
389 unsigned long flags;
390 struct viommu_mapping *mapping;
391 struct interval_tree_node *node;
392 struct virtio_iommu_req_map map;
393
394 spin_lock_irqsave(&vdomain->mappings_lock, flags);
395 node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
396 while (node) {
397 mapping = container_of(node, struct viommu_mapping, iova);
398 map = (struct virtio_iommu_req_map) {
399 .head.type = VIRTIO_IOMMU_T_MAP,
400 .domain = cpu_to_le32(vdomain->id),
401 .virt_start = cpu_to_le64(mapping->iova.start),
402 .virt_end = cpu_to_le64(mapping->iova.last),
403 .phys_start = cpu_to_le64(mapping->paddr),
404 .flags = cpu_to_le32(mapping->flags),
405 };
406
407 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
408 if (ret)
409 break;
410
411 node = interval_tree_iter_next(node, 0, -1UL);
412 }
413 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
414
415 return ret;
416}
417
2a5a3148
JPB
418static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
419 struct virtio_iommu_probe_resv_mem *mem,
420 size_t len)
421{
422 size_t size;
423 u64 start64, end64;
424 phys_addr_t start, end;
425 struct iommu_resv_region *region = NULL;
426 unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
427
428 start = start64 = le64_to_cpu(mem->start);
429 end = end64 = le64_to_cpu(mem->end);
430 size = end64 - start64 + 1;
431
432 /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */
433 if (start != start64 || end != end64 || size < end64 - start64)
434 return -EOVERFLOW;
435
436 if (len < sizeof(*mem))
437 return -EINVAL;
438
439 switch (mem->subtype) {
440 default:
441 dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n",
442 mem->subtype);
443 /* Fall-through */
444 case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
445 region = iommu_alloc_resv_region(start, size, 0,
446 IOMMU_RESV_RESERVED);
447 break;
448 case VIRTIO_IOMMU_RESV_MEM_T_MSI:
449 region = iommu_alloc_resv_region(start, size, prot,
450 IOMMU_RESV_MSI);
451 break;
452 }
453 if (!region)
454 return -ENOMEM;
455
456 list_add(&vdev->resv_regions, &region->list);
457 return 0;
458}
459
460static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
461{
462 int ret;
463 u16 type, len;
464 size_t cur = 0;
465 size_t probe_len;
466 struct virtio_iommu_req_probe *probe;
467 struct virtio_iommu_probe_property *prop;
468 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
469 struct viommu_endpoint *vdev = fwspec->iommu_priv;
470
471 if (!fwspec->num_ids)
472 return -EINVAL;
473
474 probe_len = sizeof(*probe) + viommu->probe_size +
475 sizeof(struct virtio_iommu_req_tail);
476 probe = kzalloc(probe_len, GFP_KERNEL);
477 if (!probe)
478 return -ENOMEM;
479
480 probe->head.type = VIRTIO_IOMMU_T_PROBE;
481 /*
482 * For now, assume that properties of an endpoint that outputs multiple
483 * IDs are consistent. Only probe the first one.
484 */
485 probe->endpoint = cpu_to_le32(fwspec->ids[0]);
486
487 ret = viommu_send_req_sync(viommu, probe, probe_len);
488 if (ret)
489 goto out_free;
490
491 prop = (void *)probe->properties;
492 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
493
494 while (type != VIRTIO_IOMMU_PROBE_T_NONE &&
495 cur < viommu->probe_size) {
496 len = le16_to_cpu(prop->length) + sizeof(*prop);
497
498 switch (type) {
499 case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
500 ret = viommu_add_resv_mem(vdev, (void *)prop, len);
501 break;
502 default:
503 dev_err(dev, "unknown viommu prop 0x%x\n", type);
504 }
505
506 if (ret)
507 dev_err(dev, "failed to parse viommu prop 0x%x\n", type);
508
509 cur += len;
510 if (cur >= viommu->probe_size)
511 break;
512
513 prop = (void *)probe->properties + cur;
514 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
515 }
516
517out_free:
518 kfree(probe);
519 return ret;
520}
521
169a126c
JPB
522static int viommu_fault_handler(struct viommu_dev *viommu,
523 struct virtio_iommu_fault *fault)
524{
525 char *reason_str;
526
527 u8 reason = fault->reason;
528 u32 flags = le32_to_cpu(fault->flags);
529 u32 endpoint = le32_to_cpu(fault->endpoint);
530 u64 address = le64_to_cpu(fault->address);
531
532 switch (reason) {
533 case VIRTIO_IOMMU_FAULT_R_DOMAIN:
534 reason_str = "domain";
535 break;
536 case VIRTIO_IOMMU_FAULT_R_MAPPING:
537 reason_str = "page";
538 break;
539 case VIRTIO_IOMMU_FAULT_R_UNKNOWN:
540 default:
541 reason_str = "unknown";
542 break;
543 }
544
545 /* TODO: find EP by ID and report_iommu_fault */
546 if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS)
547 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n",
548 reason_str, endpoint, address,
549 flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "",
550 flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "",
551 flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : "");
552 else
553 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n",
554 reason_str, endpoint);
555 return 0;
556}
557
558static void viommu_event_handler(struct virtqueue *vq)
559{
560 int ret;
561 unsigned int len;
562 struct scatterlist sg[1];
563 struct viommu_event *evt;
564 struct viommu_dev *viommu = vq->vdev->priv;
565
566 while ((evt = virtqueue_get_buf(vq, &len)) != NULL) {
567 if (len > sizeof(*evt)) {
568 dev_err(viommu->dev,
569 "invalid event buffer (len %u != %zu)\n",
570 len, sizeof(*evt));
571 } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) {
572 viommu_fault_handler(viommu, &evt->fault);
573 }
574
575 sg_init_one(sg, evt, sizeof(*evt));
576 ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC);
577 if (ret)
578 dev_err(viommu->dev, "could not add event buffer\n");
579 }
580
581 virtqueue_kick(vq);
582}
583
edcd69ab
JPB
584/* IOMMU API */
585
586static struct iommu_domain *viommu_domain_alloc(unsigned type)
587{
588 struct viommu_domain *vdomain;
589
590 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
591 return NULL;
592
593 vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
594 if (!vdomain)
595 return NULL;
596
597 mutex_init(&vdomain->mutex);
598 spin_lock_init(&vdomain->mappings_lock);
599 vdomain->mappings = RB_ROOT_CACHED;
600
601 if (type == IOMMU_DOMAIN_DMA &&
602 iommu_get_dma_cookie(&vdomain->domain)) {
603 kfree(vdomain);
604 return NULL;
605 }
606
607 return &vdomain->domain;
608}
609
610static int viommu_domain_finalise(struct viommu_dev *viommu,
611 struct iommu_domain *domain)
612{
613 int ret;
614 struct viommu_domain *vdomain = to_viommu_domain(domain);
edcd69ab
JPB
615
616 vdomain->viommu = viommu;
ae24fb49 617 vdomain->map_flags = viommu->map_flags;
edcd69ab
JPB
618
619 domain->pgsize_bitmap = viommu->pgsize_bitmap;
620 domain->geometry = viommu->geometry;
621
ae24fb49
JPB
622 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
623 viommu->last_domain, GFP_KERNEL);
edcd69ab
JPB
624 if (ret >= 0)
625 vdomain->id = (unsigned int)ret;
626
627 return ret > 0 ? 0 : ret;
628}
629
630static void viommu_domain_free(struct iommu_domain *domain)
631{
632 struct viommu_domain *vdomain = to_viommu_domain(domain);
633
634 iommu_put_dma_cookie(domain);
635
636 /* Free all remaining mappings (size 2^64) */
637 viommu_del_mappings(vdomain, 0, 0);
638
639 if (vdomain->viommu)
640 ida_free(&vdomain->viommu->domain_ids, vdomain->id);
641
642 kfree(vdomain);
643}
644
645static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
646{
647 int i;
648 int ret = 0;
649 struct virtio_iommu_req_attach req;
650 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
651 struct viommu_endpoint *vdev = fwspec->iommu_priv;
652 struct viommu_domain *vdomain = to_viommu_domain(domain);
653
654 mutex_lock(&vdomain->mutex);
655 if (!vdomain->viommu) {
656 /*
657 * Properly initialize the domain now that we know which viommu
658 * owns it.
659 */
660 ret = viommu_domain_finalise(vdev->viommu, domain);
661 } else if (vdomain->viommu != vdev->viommu) {
662 dev_err(dev, "cannot attach to foreign vIOMMU\n");
663 ret = -EXDEV;
664 }
665 mutex_unlock(&vdomain->mutex);
666
667 if (ret)
668 return ret;
669
670 /*
671 * In the virtio-iommu device, when attaching the endpoint to a new
672 * domain, it is detached from the old one and, if as as a result the
673 * old domain isn't attached to any endpoint, all mappings are removed
674 * from the old domain and it is freed.
675 *
676 * In the driver the old domain still exists, and its mappings will be
677 * recreated if it gets reattached to an endpoint. Otherwise it will be
678 * freed explicitly.
679 *
680 * vdev->vdomain is protected by group->mutex
681 */
682 if (vdev->vdomain)
683 vdev->vdomain->nr_endpoints--;
684
685 req = (struct virtio_iommu_req_attach) {
686 .head.type = VIRTIO_IOMMU_T_ATTACH,
687 .domain = cpu_to_le32(vdomain->id),
688 };
689
690 for (i = 0; i < fwspec->num_ids; i++) {
691 req.endpoint = cpu_to_le32(fwspec->ids[i]);
692
693 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
694 if (ret)
695 return ret;
696 }
697
698 if (!vdomain->nr_endpoints) {
699 /*
700 * This endpoint is the first to be attached to the domain.
701 * Replay existing mappings (e.g. SW MSI).
702 */
703 ret = viommu_replay_mappings(vdomain);
704 if (ret)
705 return ret;
706 }
707
708 vdomain->nr_endpoints++;
709 vdev->vdomain = vdomain;
710
711 return 0;
712}
713
714static int viommu_map(struct iommu_domain *domain, unsigned long iova,
781ca2de 715 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
edcd69ab
JPB
716{
717 int ret;
ae24fb49 718 u32 flags;
edcd69ab
JPB
719 struct virtio_iommu_req_map map;
720 struct viommu_domain *vdomain = to_viommu_domain(domain);
721
722 flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
723 (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
724 (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
725
ae24fb49
JPB
726 if (flags & ~vdomain->map_flags)
727 return -EINVAL;
728
edcd69ab
JPB
729 ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
730 if (ret)
731 return ret;
732
733 map = (struct virtio_iommu_req_map) {
734 .head.type = VIRTIO_IOMMU_T_MAP,
735 .domain = cpu_to_le32(vdomain->id),
736 .virt_start = cpu_to_le64(iova),
737 .phys_start = cpu_to_le64(paddr),
738 .virt_end = cpu_to_le64(iova + size - 1),
739 .flags = cpu_to_le32(flags),
740 };
741
742 if (!vdomain->nr_endpoints)
743 return 0;
744
745 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
746 if (ret)
747 viommu_del_mappings(vdomain, iova, size);
748
749 return ret;
750}
751
752static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
56f8af5e 753 size_t size, struct iommu_iotlb_gather *gather)
edcd69ab
JPB
754{
755 int ret = 0;
756 size_t unmapped;
757 struct virtio_iommu_req_unmap unmap;
758 struct viommu_domain *vdomain = to_viommu_domain(domain);
759
760 unmapped = viommu_del_mappings(vdomain, iova, size);
761 if (unmapped < size)
762 return 0;
763
764 /* Device already removed all mappings after detach. */
765 if (!vdomain->nr_endpoints)
766 return unmapped;
767
768 unmap = (struct virtio_iommu_req_unmap) {
769 .head.type = VIRTIO_IOMMU_T_UNMAP,
770 .domain = cpu_to_le32(vdomain->id),
771 .virt_start = cpu_to_le64(iova),
772 .virt_end = cpu_to_le64(iova + unmapped - 1),
773 };
774
775 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
776 return ret ? 0 : unmapped;
777}
778
779static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
780 dma_addr_t iova)
781{
782 u64 paddr = 0;
783 unsigned long flags;
784 struct viommu_mapping *mapping;
785 struct interval_tree_node *node;
786 struct viommu_domain *vdomain = to_viommu_domain(domain);
787
788 spin_lock_irqsave(&vdomain->mappings_lock, flags);
789 node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
790 if (node) {
791 mapping = container_of(node, struct viommu_mapping, iova);
792 paddr = mapping->paddr + (iova - mapping->iova.start);
793 }
794 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
795
796 return paddr;
797}
798
56f8af5e
WD
799static void viommu_iotlb_sync(struct iommu_domain *domain,
800 struct iommu_iotlb_gather *gather)
edcd69ab
JPB
801{
802 struct viommu_domain *vdomain = to_viommu_domain(domain);
803
804 viommu_sync_req(vdomain->viommu);
805}
806
807static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
808{
2a5a3148
JPB
809 struct iommu_resv_region *entry, *new_entry, *msi = NULL;
810 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
811 struct viommu_endpoint *vdev = fwspec->iommu_priv;
edcd69ab
JPB
812 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
813
2a5a3148
JPB
814 list_for_each_entry(entry, &vdev->resv_regions, list) {
815 if (entry->type == IOMMU_RESV_MSI)
816 msi = entry;
817
818 new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL);
819 if (!new_entry)
820 return;
821 list_add_tail(&new_entry->list, head);
822 }
823
824 /*
825 * If the device didn't register any bypass MSI window, add a
826 * software-mapped region.
827 */
828 if (!msi) {
829 msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
830 prot, IOMMU_RESV_SW_MSI);
831 if (!msi)
832 return;
833
834 list_add_tail(&msi->list, head);
835 }
edcd69ab 836
edcd69ab
JPB
837 iommu_dma_get_resv_regions(dev, head);
838}
839
edcd69ab
JPB
840static struct iommu_ops viommu_ops;
841static struct virtio_driver virtio_iommu_drv;
842
3a1d5384 843static int viommu_match_node(struct device *dev, const void *data)
edcd69ab
JPB
844{
845 return dev->parent->fwnode == data;
846}
847
848static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
849{
850 struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
851 fwnode, viommu_match_node);
852 put_device(dev);
853
854 return dev ? dev_to_virtio(dev)->priv : NULL;
855}
856
857static int viommu_add_device(struct device *dev)
858{
859 int ret;
860 struct iommu_group *group;
861 struct viommu_endpoint *vdev;
862 struct viommu_dev *viommu = NULL;
863 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
864
865 if (!fwspec || fwspec->ops != &viommu_ops)
866 return -ENODEV;
867
868 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
869 if (!viommu)
870 return -ENODEV;
871
872 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
873 if (!vdev)
874 return -ENOMEM;
875
2a5a3148 876 vdev->dev = dev;
edcd69ab 877 vdev->viommu = viommu;
2a5a3148 878 INIT_LIST_HEAD(&vdev->resv_regions);
edcd69ab
JPB
879 fwspec->iommu_priv = vdev;
880
2a5a3148
JPB
881 if (viommu->probe_size) {
882 /* Get additional information for this endpoint */
883 ret = viommu_probe_endpoint(viommu, dev);
884 if (ret)
885 goto err_free_dev;
886 }
887
edcd69ab
JPB
888 ret = iommu_device_link(&viommu->iommu, dev);
889 if (ret)
890 goto err_free_dev;
891
892 /*
893 * Last step creates a default domain and attaches to it. Everything
894 * must be ready.
895 */
896 group = iommu_group_get_for_dev(dev);
897 if (IS_ERR(group)) {
898 ret = PTR_ERR(group);
899 goto err_unlink_dev;
900 }
901
902 iommu_group_put(group);
903
904 return PTR_ERR_OR_ZERO(group);
905
906err_unlink_dev:
907 iommu_device_unlink(&viommu->iommu, dev);
908err_free_dev:
c11738cf 909 generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
edcd69ab
JPB
910 kfree(vdev);
911
912 return ret;
913}
914
915static void viommu_remove_device(struct device *dev)
916{
917 struct viommu_endpoint *vdev;
918 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
919
920 if (!fwspec || fwspec->ops != &viommu_ops)
921 return;
922
923 vdev = fwspec->iommu_priv;
924
925 iommu_group_remove_device(dev);
926 iommu_device_unlink(&vdev->viommu->iommu, dev);
c11738cf 927 generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
edcd69ab
JPB
928 kfree(vdev);
929}
930
931static struct iommu_group *viommu_device_group(struct device *dev)
932{
933 if (dev_is_pci(dev))
934 return pci_device_group(dev);
935 else
936 return generic_device_group(dev);
937}
938
939static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
940{
941 return iommu_fwspec_add_ids(dev, args->args, 1);
942}
943
944static struct iommu_ops viommu_ops = {
945 .domain_alloc = viommu_domain_alloc,
946 .domain_free = viommu_domain_free,
947 .attach_dev = viommu_attach_dev,
948 .map = viommu_map,
949 .unmap = viommu_unmap,
950 .iova_to_phys = viommu_iova_to_phys,
951 .iotlb_sync = viommu_iotlb_sync,
952 .add_device = viommu_add_device,
953 .remove_device = viommu_remove_device,
954 .device_group = viommu_device_group,
955 .get_resv_regions = viommu_get_resv_regions,
c11738cf 956 .put_resv_regions = generic_iommu_put_resv_regions,
edcd69ab
JPB
957 .of_xlate = viommu_of_xlate,
958};
959
960static int viommu_init_vqs(struct viommu_dev *viommu)
961{
962 struct virtio_device *vdev = dev_to_virtio(viommu->dev);
169a126c
JPB
963 const char *names[] = { "request", "event" };
964 vq_callback_t *callbacks[] = {
965 NULL, /* No async requests */
966 viommu_event_handler,
967 };
edcd69ab 968
169a126c
JPB
969 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks,
970 names, NULL);
971}
edcd69ab 972
169a126c
JPB
973static int viommu_fill_evtq(struct viommu_dev *viommu)
974{
975 int i, ret;
976 struct scatterlist sg[1];
977 struct viommu_event *evts;
978 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
979 size_t nr_evts = vq->num_free;
980
981 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts,
982 sizeof(*evts), GFP_KERNEL);
983 if (!evts)
984 return -ENOMEM;
985
986 for (i = 0; i < nr_evts; i++) {
987 sg_init_one(sg, &evts[i], sizeof(*evts));
988 ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL);
989 if (ret)
990 return ret;
991 }
edcd69ab
JPB
992
993 return 0;
994}
995
996static int viommu_probe(struct virtio_device *vdev)
997{
998 struct device *parent_dev = vdev->dev.parent;
999 struct viommu_dev *viommu = NULL;
1000 struct device *dev = &vdev->dev;
1001 u64 input_start = 0;
1002 u64 input_end = -1UL;
1003 int ret;
1004
1005 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
1006 !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
1007 return -ENODEV;
1008
1009 viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
1010 if (!viommu)
1011 return -ENOMEM;
1012
1013 spin_lock_init(&viommu->request_lock);
1014 ida_init(&viommu->domain_ids);
1015 viommu->dev = dev;
1016 viommu->vdev = vdev;
1017 INIT_LIST_HEAD(&viommu->requests);
1018
1019 ret = viommu_init_vqs(viommu);
1020 if (ret)
1021 return ret;
1022
1023 virtio_cread(vdev, struct virtio_iommu_config, page_size_mask,
1024 &viommu->pgsize_bitmap);
1025
1026 if (!viommu->pgsize_bitmap) {
1027 ret = -EINVAL;
1028 goto err_free_vqs;
1029 }
1030
ae24fb49
JPB
1031 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
1032 viommu->last_domain = ~0U;
edcd69ab
JPB
1033
1034 /* Optional features */
1035 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
1036 struct virtio_iommu_config, input_range.start,
1037 &input_start);
1038
1039 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
1040 struct virtio_iommu_config, input_range.end,
1041 &input_end);
1042
ae24fb49
JPB
1043 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1044 struct virtio_iommu_config, domain_range.start,
1045 &viommu->first_domain);
1046
1047 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1048 struct virtio_iommu_config, domain_range.end,
1049 &viommu->last_domain);
edcd69ab 1050
2a5a3148
JPB
1051 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE,
1052 struct virtio_iommu_config, probe_size,
1053 &viommu->probe_size);
1054
edcd69ab
JPB
1055 viommu->geometry = (struct iommu_domain_geometry) {
1056 .aperture_start = input_start,
1057 .aperture_end = input_end,
1058 .force_aperture = true,
1059 };
1060
ae24fb49
JPB
1061 if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
1062 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
1063
edcd69ab
JPB
1064 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
1065
1066 virtio_device_ready(vdev);
1067
169a126c
JPB
1068 /* Populate the event queue with buffers */
1069 ret = viommu_fill_evtq(viommu);
1070 if (ret)
1071 goto err_free_vqs;
1072
edcd69ab
JPB
1073 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
1074 virtio_bus_name(vdev));
1075 if (ret)
1076 goto err_free_vqs;
1077
1078 iommu_device_set_ops(&viommu->iommu, &viommu_ops);
1079 iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode);
1080
1081 iommu_device_register(&viommu->iommu);
1082
1083#ifdef CONFIG_PCI
1084 if (pci_bus_type.iommu_ops != &viommu_ops) {
1085 pci_request_acs();
1086 ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
1087 if (ret)
1088 goto err_unregister;
1089 }
1090#endif
1091#ifdef CONFIG_ARM_AMBA
1092 if (amba_bustype.iommu_ops != &viommu_ops) {
1093 ret = bus_set_iommu(&amba_bustype, &viommu_ops);
1094 if (ret)
1095 goto err_unregister;
1096 }
1097#endif
1098 if (platform_bus_type.iommu_ops != &viommu_ops) {
1099 ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
1100 if (ret)
1101 goto err_unregister;
1102 }
1103
1104 vdev->priv = viommu;
1105
1106 dev_info(dev, "input address: %u bits\n",
1107 order_base_2(viommu->geometry.aperture_end));
1108 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
1109
1110 return 0;
1111
1112err_unregister:
1113 iommu_device_sysfs_remove(&viommu->iommu);
1114 iommu_device_unregister(&viommu->iommu);
1115err_free_vqs:
1116 vdev->config->del_vqs(vdev);
1117
1118 return ret;
1119}
1120
1121static void viommu_remove(struct virtio_device *vdev)
1122{
1123 struct viommu_dev *viommu = vdev->priv;
1124
1125 iommu_device_sysfs_remove(&viommu->iommu);
1126 iommu_device_unregister(&viommu->iommu);
1127
1128 /* Stop all virtqueues */
1129 vdev->config->reset(vdev);
1130 vdev->config->del_vqs(vdev);
1131
1132 dev_info(&vdev->dev, "device removed\n");
1133}
1134
1135static void viommu_config_changed(struct virtio_device *vdev)
1136{
1137 dev_warn(&vdev->dev, "config changed\n");
1138}
1139
1140static unsigned int features[] = {
1141 VIRTIO_IOMMU_F_MAP_UNMAP,
edcd69ab 1142 VIRTIO_IOMMU_F_INPUT_RANGE,
ae24fb49 1143 VIRTIO_IOMMU_F_DOMAIN_RANGE,
2a5a3148 1144 VIRTIO_IOMMU_F_PROBE,
ae24fb49 1145 VIRTIO_IOMMU_F_MMIO,
edcd69ab
JPB
1146};
1147
1148static struct virtio_device_id id_table[] = {
1149 { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
1150 { 0 },
1151};
1152
1153static struct virtio_driver virtio_iommu_drv = {
1154 .driver.name = KBUILD_MODNAME,
1155 .driver.owner = THIS_MODULE,
1156 .id_table = id_table,
1157 .feature_table = features,
1158 .feature_table_size = ARRAY_SIZE(features),
1159 .probe = viommu_probe,
1160 .remove = viommu_remove,
1161 .config_changed = viommu_config_changed,
1162};
1163
1164module_virtio_driver(virtio_iommu_drv);
1165
1166MODULE_DESCRIPTION("Virtio IOMMU driver");
1167MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
1168MODULE_LICENSE("GPL v2");