Merge tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux
[linux-2.6-block.git] / drivers / iommu / virtio-iommu.c
CommitLineData
edcd69ab
JPB
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Virtio driver for the paravirtualized IOMMU
4 *
ae24fb49 5 * Copyright (C) 2019 Arm Limited
edcd69ab
JPB
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
edcd69ab 10#include <linux/delay.h>
8ce4904b 11#include <linux/dma-map-ops.h>
edcd69ab
JPB
12#include <linux/freezer.h>
13#include <linux/interval_tree.h>
14#include <linux/iommu.h>
15#include <linux/module.h>
edcd69ab
JPB
16#include <linux/of_platform.h>
17#include <linux/pci.h>
edcd69ab
JPB
18#include <linux/virtio.h>
19#include <linux/virtio_config.h>
20#include <linux/virtio_ids.h>
21#include <linux/wait.h>
22
23#include <uapi/linux/virtio_iommu.h>
24
f2042ed2
RM
25#include "dma-iommu.h"
26
edcd69ab
JPB
27#define MSI_IOVA_BASE 0x8000000
28#define MSI_IOVA_LENGTH 0x100000
29
30#define VIOMMU_REQUEST_VQ 0
169a126c
JPB
31#define VIOMMU_EVENT_VQ 1
32#define VIOMMU_NR_VQS 2
edcd69ab
JPB
33
34struct viommu_dev {
35 struct iommu_device iommu;
36 struct device *dev;
37 struct virtio_device *vdev;
38
39 struct ida domain_ids;
40
41 struct virtqueue *vqs[VIOMMU_NR_VQS];
42 spinlock_t request_lock;
43 struct list_head requests;
169a126c 44 void *evts;
edcd69ab
JPB
45
46 /* Device configuration */
47 struct iommu_domain_geometry geometry;
48 u64 pgsize_bitmap;
ae24fb49
JPB
49 u32 first_domain;
50 u32 last_domain;
51 /* Supported MAP flags */
52 u32 map_flags;
2a5a3148 53 u32 probe_size;
edcd69ab
JPB
54};
55
56struct viommu_mapping {
57 phys_addr_t paddr;
58 struct interval_tree_node iova;
59 u32 flags;
60};
61
62struct viommu_domain {
63 struct iommu_domain domain;
64 struct viommu_dev *viommu;
65 struct mutex mutex; /* protects viommu pointer */
66 unsigned int id;
ae24fb49 67 u32 map_flags;
edcd69ab
JPB
68
69 spinlock_t mappings_lock;
70 struct rb_root_cached mappings;
71
72 unsigned long nr_endpoints;
f0f07a84 73 bool bypass;
edcd69ab
JPB
74};
75
76struct viommu_endpoint {
2a5a3148 77 struct device *dev;
edcd69ab
JPB
78 struct viommu_dev *viommu;
79 struct viommu_domain *vdomain;
2a5a3148 80 struct list_head resv_regions;
edcd69ab
JPB
81};
82
83struct viommu_request {
84 struct list_head list;
85 void *writeback;
86 unsigned int write_offset;
87 unsigned int len;
88 char buf[];
89};
90
169a126c
JPB
91#define VIOMMU_FAULT_RESV_MASK 0xffffff00
92
93struct viommu_event {
94 union {
95 u32 head;
96 struct virtio_iommu_fault fault;
97 };
98};
99
edcd69ab
JPB
100#define to_viommu_domain(domain) \
101 container_of(domain, struct viommu_domain, domain)
102
103static int viommu_get_req_errno(void *buf, size_t len)
104{
105 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
106
107 switch (tail->status) {
108 case VIRTIO_IOMMU_S_OK:
109 return 0;
110 case VIRTIO_IOMMU_S_UNSUPP:
111 return -ENOSYS;
112 case VIRTIO_IOMMU_S_INVAL:
113 return -EINVAL;
114 case VIRTIO_IOMMU_S_RANGE:
115 return -ERANGE;
116 case VIRTIO_IOMMU_S_NOENT:
117 return -ENOENT;
118 case VIRTIO_IOMMU_S_FAULT:
119 return -EFAULT;
ae24fb49
JPB
120 case VIRTIO_IOMMU_S_NOMEM:
121 return -ENOMEM;
edcd69ab
JPB
122 case VIRTIO_IOMMU_S_IOERR:
123 case VIRTIO_IOMMU_S_DEVERR:
124 default:
125 return -EIO;
126 }
127}
128
129static void viommu_set_req_status(void *buf, size_t len, int status)
130{
131 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
132
133 tail->status = status;
134}
135
136static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
137 struct virtio_iommu_req_head *req,
138 size_t len)
139{
140 size_t tail_size = sizeof(struct virtio_iommu_req_tail);
141
2a5a3148
JPB
142 if (req->type == VIRTIO_IOMMU_T_PROBE)
143 return len - viommu->probe_size - tail_size;
144
edcd69ab
JPB
145 return len - tail_size;
146}
147
148/*
149 * __viommu_sync_req - Complete all in-flight requests
150 *
151 * Wait for all added requests to complete. When this function returns, all
152 * requests that were in-flight at the time of the call have completed.
153 */
154static int __viommu_sync_req(struct viommu_dev *viommu)
155{
edcd69ab
JPB
156 unsigned int len;
157 size_t write_len;
158 struct viommu_request *req;
159 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
160
161 assert_spin_locked(&viommu->request_lock);
162
163 virtqueue_kick(vq);
164
165 while (!list_empty(&viommu->requests)) {
166 len = 0;
167 req = virtqueue_get_buf(vq, &len);
168 if (!req)
169 continue;
170
171 if (!len)
172 viommu_set_req_status(req->buf, req->len,
173 VIRTIO_IOMMU_S_IOERR);
174
175 write_len = req->len - req->write_offset;
176 if (req->writeback && len == write_len)
177 memcpy(req->writeback, req->buf + req->write_offset,
178 write_len);
179
180 list_del(&req->list);
181 kfree(req);
182 }
183
c1c8058d 184 return 0;
edcd69ab
JPB
185}
186
187static int viommu_sync_req(struct viommu_dev *viommu)
188{
189 int ret;
190 unsigned long flags;
191
192 spin_lock_irqsave(&viommu->request_lock, flags);
193 ret = __viommu_sync_req(viommu);
194 if (ret)
195 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
196 spin_unlock_irqrestore(&viommu->request_lock, flags);
197
198 return ret;
199}
200
201/*
202 * __viommu_add_request - Add one request to the queue
203 * @buf: pointer to the request buffer
204 * @len: length of the request buffer
205 * @writeback: copy data back to the buffer when the request completes.
206 *
207 * Add a request to the queue. Only synchronize the queue if it's already full.
208 * Otherwise don't kick the queue nor wait for requests to complete.
209 *
210 * When @writeback is true, data written by the device, including the request
211 * status, is copied into @buf after the request completes. This is unsafe if
212 * the caller allocates @buf on stack and drops the lock between add_req() and
213 * sync_req().
214 *
215 * Return 0 if the request was successfully added to the queue.
216 */
217static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
218 bool writeback)
219{
220 int ret;
221 off_t write_offset;
222 struct viommu_request *req;
223 struct scatterlist top_sg, bottom_sg;
224 struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
225 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
226
227 assert_spin_locked(&viommu->request_lock);
228
229 write_offset = viommu_get_write_desc_offset(viommu, buf, len);
230 if (write_offset <= 0)
231 return -EINVAL;
232
233 req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
234 if (!req)
235 return -ENOMEM;
236
237 req->len = len;
238 if (writeback) {
239 req->writeback = buf + write_offset;
240 req->write_offset = write_offset;
241 }
242 memcpy(&req->buf, buf, write_offset);
243
244 sg_init_one(&top_sg, req->buf, write_offset);
245 sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
246
247 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
248 if (ret == -ENOSPC) {
249 /* If the queue is full, sync and retry */
250 if (!__viommu_sync_req(viommu))
251 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
252 }
253 if (ret)
254 goto err_free;
255
256 list_add_tail(&req->list, &viommu->requests);
257 return 0;
258
259err_free:
260 kfree(req);
261 return ret;
262}
263
264static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
265{
266 int ret;
267 unsigned long flags;
268
269 spin_lock_irqsave(&viommu->request_lock, flags);
270 ret = __viommu_add_req(viommu, buf, len, false);
271 if (ret)
272 dev_dbg(viommu->dev, "could not add request: %d\n", ret);
273 spin_unlock_irqrestore(&viommu->request_lock, flags);
274
275 return ret;
276}
277
278/*
279 * Send a request and wait for it to complete. Return the request status (as an
280 * errno)
281 */
282static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
283 size_t len)
284{
285 int ret;
286 unsigned long flags;
287
288 spin_lock_irqsave(&viommu->request_lock, flags);
289
290 ret = __viommu_add_req(viommu, buf, len, true);
291 if (ret) {
292 dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
293 goto out_unlock;
294 }
295
296 ret = __viommu_sync_req(viommu);
297 if (ret) {
298 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
299 /* Fall-through (get the actual request status) */
300 }
301
302 ret = viommu_get_req_errno(buf, len);
303out_unlock:
304 spin_unlock_irqrestore(&viommu->request_lock, flags);
305 return ret;
306}
307
308/*
309 * viommu_add_mapping - add a mapping to the internal tree
310 *
311 * On success, return the new mapping. Otherwise return NULL.
312 */
c0c76359
JPB
313static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
314 phys_addr_t paddr, u32 flags)
edcd69ab
JPB
315{
316 unsigned long irqflags;
317 struct viommu_mapping *mapping;
318
319 mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
320 if (!mapping)
321 return -ENOMEM;
322
323 mapping->paddr = paddr;
324 mapping->iova.start = iova;
c0c76359 325 mapping->iova.last = end;
edcd69ab
JPB
326 mapping->flags = flags;
327
328 spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
329 interval_tree_insert(&mapping->iova, &vdomain->mappings);
330 spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
331
332 return 0;
333}
334
335/*
336 * viommu_del_mappings - remove mappings from the internal tree
337 *
338 * @vdomain: the domain
339 * @iova: start of the range
c0c76359 340 * @end: end of the range
edcd69ab 341 *
c0c76359 342 * On success, returns the number of unmapped bytes
edcd69ab
JPB
343 */
344static size_t viommu_del_mappings(struct viommu_domain *vdomain,
c0c76359 345 u64 iova, u64 end)
edcd69ab
JPB
346{
347 size_t unmapped = 0;
348 unsigned long flags;
edcd69ab
JPB
349 struct viommu_mapping *mapping = NULL;
350 struct interval_tree_node *node, *next;
351
352 spin_lock_irqsave(&vdomain->mappings_lock, flags);
c0c76359 353 next = interval_tree_iter_first(&vdomain->mappings, iova, end);
edcd69ab
JPB
354 while (next) {
355 node = next;
356 mapping = container_of(node, struct viommu_mapping, iova);
c0c76359 357 next = interval_tree_iter_next(node, iova, end);
edcd69ab
JPB
358
359 /* Trying to split a mapping? */
360 if (mapping->iova.start < iova)
361 break;
362
363 /*
364 * Virtio-iommu doesn't allow UNMAP to split a mapping created
365 * with a single MAP request, so remove the full mapping.
366 */
367 unmapped += mapping->iova.last - mapping->iova.start + 1;
368
369 interval_tree_remove(node, &vdomain->mappings);
370 kfree(mapping);
371 }
372 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
373
374 return unmapped;
375}
376
b03cbca4
JPB
377/*
378 * Fill the domain with identity mappings, skipping the device's reserved
379 * regions.
380 */
381static int viommu_domain_map_identity(struct viommu_endpoint *vdev,
382 struct viommu_domain *vdomain)
383{
384 int ret;
385 struct iommu_resv_region *resv;
386 u64 iova = vdomain->domain.geometry.aperture_start;
387 u64 limit = vdomain->domain.geometry.aperture_end;
388 u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
389 unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap);
390
391 iova = ALIGN(iova, granule);
392 limit = ALIGN_DOWN(limit + 1, granule) - 1;
393
394 list_for_each_entry(resv, &vdev->resv_regions, list) {
395 u64 resv_start = ALIGN_DOWN(resv->start, granule);
396 u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1;
397
398 if (resv_end < iova || resv_start > limit)
399 /* No overlap */
400 continue;
401
402 if (resv_start > iova) {
403 ret = viommu_add_mapping(vdomain, iova, resv_start - 1,
404 (phys_addr_t)iova, flags);
405 if (ret)
406 goto err_unmap;
407 }
408
409 if (resv_end >= limit)
410 return 0;
411
412 iova = resv_end + 1;
413 }
414
415 ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova,
416 flags);
417 if (ret)
418 goto err_unmap;
419 return 0;
420
421err_unmap:
422 viommu_del_mappings(vdomain, 0, iova);
423 return ret;
424}
425
edcd69ab
JPB
426/*
427 * viommu_replay_mappings - re-send MAP requests
428 *
429 * When reattaching a domain that was previously detached from all endpoints,
430 * mappings were deleted from the device. Re-create the mappings available in
431 * the internal tree.
432 */
433static int viommu_replay_mappings(struct viommu_domain *vdomain)
434{
435 int ret = 0;
436 unsigned long flags;
437 struct viommu_mapping *mapping;
438 struct interval_tree_node *node;
439 struct virtio_iommu_req_map map;
440
441 spin_lock_irqsave(&vdomain->mappings_lock, flags);
442 node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
443 while (node) {
444 mapping = container_of(node, struct viommu_mapping, iova);
445 map = (struct virtio_iommu_req_map) {
446 .head.type = VIRTIO_IOMMU_T_MAP,
447 .domain = cpu_to_le32(vdomain->id),
448 .virt_start = cpu_to_le64(mapping->iova.start),
449 .virt_end = cpu_to_le64(mapping->iova.last),
450 .phys_start = cpu_to_le64(mapping->paddr),
451 .flags = cpu_to_le32(mapping->flags),
452 };
453
454 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
455 if (ret)
456 break;
457
458 node = interval_tree_iter_next(node, 0, -1UL);
459 }
460 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
461
462 return ret;
463}
464
2a5a3148
JPB
465static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
466 struct virtio_iommu_probe_resv_mem *mem,
467 size_t len)
468{
469 size_t size;
470 u64 start64, end64;
471 phys_addr_t start, end;
56109794 472 struct iommu_resv_region *region = NULL, *next;
2a5a3148
JPB
473 unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
474
475 start = start64 = le64_to_cpu(mem->start);
476 end = end64 = le64_to_cpu(mem->end);
477 size = end64 - start64 + 1;
478
479 /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */
480 if (start != start64 || end != end64 || size < end64 - start64)
481 return -EOVERFLOW;
482
483 if (len < sizeof(*mem))
484 return -EINVAL;
485
486 switch (mem->subtype) {
487 default:
488 dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n",
489 mem->subtype);
df561f66 490 fallthrough;
2a5a3148
JPB
491 case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
492 region = iommu_alloc_resv_region(start, size, 0,
0251d010
LB
493 IOMMU_RESV_RESERVED,
494 GFP_KERNEL);
2a5a3148
JPB
495 break;
496 case VIRTIO_IOMMU_RESV_MEM_T_MSI:
497 region = iommu_alloc_resv_region(start, size, prot,
0251d010
LB
498 IOMMU_RESV_MSI,
499 GFP_KERNEL);
2a5a3148
JPB
500 break;
501 }
502 if (!region)
503 return -ENOMEM;
504
56109794
JPB
505 /* Keep the list sorted */
506 list_for_each_entry(next, &vdev->resv_regions, list) {
507 if (next->start > region->start)
508 break;
509 }
510 list_add_tail(&region->list, &next->list);
2a5a3148
JPB
511 return 0;
512}
513
514static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
515{
516 int ret;
517 u16 type, len;
518 size_t cur = 0;
519 size_t probe_len;
520 struct virtio_iommu_req_probe *probe;
521 struct virtio_iommu_probe_property *prop;
522 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
a4b6c2af 523 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
2a5a3148
JPB
524
525 if (!fwspec->num_ids)
526 return -EINVAL;
527
528 probe_len = sizeof(*probe) + viommu->probe_size +
529 sizeof(struct virtio_iommu_req_tail);
530 probe = kzalloc(probe_len, GFP_KERNEL);
531 if (!probe)
532 return -ENOMEM;
533
534 probe->head.type = VIRTIO_IOMMU_T_PROBE;
535 /*
536 * For now, assume that properties of an endpoint that outputs multiple
537 * IDs are consistent. Only probe the first one.
538 */
539 probe->endpoint = cpu_to_le32(fwspec->ids[0]);
540
541 ret = viommu_send_req_sync(viommu, probe, probe_len);
542 if (ret)
543 goto out_free;
544
545 prop = (void *)probe->properties;
546 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
547
548 while (type != VIRTIO_IOMMU_PROBE_T_NONE &&
549 cur < viommu->probe_size) {
550 len = le16_to_cpu(prop->length) + sizeof(*prop);
551
552 switch (type) {
553 case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
554 ret = viommu_add_resv_mem(vdev, (void *)prop, len);
555 break;
556 default:
557 dev_err(dev, "unknown viommu prop 0x%x\n", type);
558 }
559
560 if (ret)
561 dev_err(dev, "failed to parse viommu prop 0x%x\n", type);
562
563 cur += len;
564 if (cur >= viommu->probe_size)
565 break;
566
567 prop = (void *)probe->properties + cur;
568 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
569 }
570
571out_free:
572 kfree(probe);
573 return ret;
574}
575
169a126c
JPB
576static int viommu_fault_handler(struct viommu_dev *viommu,
577 struct virtio_iommu_fault *fault)
578{
579 char *reason_str;
580
581 u8 reason = fault->reason;
582 u32 flags = le32_to_cpu(fault->flags);
583 u32 endpoint = le32_to_cpu(fault->endpoint);
584 u64 address = le64_to_cpu(fault->address);
585
586 switch (reason) {
587 case VIRTIO_IOMMU_FAULT_R_DOMAIN:
588 reason_str = "domain";
589 break;
590 case VIRTIO_IOMMU_FAULT_R_MAPPING:
591 reason_str = "page";
592 break;
593 case VIRTIO_IOMMU_FAULT_R_UNKNOWN:
594 default:
595 reason_str = "unknown";
596 break;
597 }
598
599 /* TODO: find EP by ID and report_iommu_fault */
600 if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS)
601 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n",
602 reason_str, endpoint, address,
603 flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "",
604 flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "",
605 flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : "");
606 else
607 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n",
608 reason_str, endpoint);
609 return 0;
610}
611
612static void viommu_event_handler(struct virtqueue *vq)
613{
614 int ret;
615 unsigned int len;
616 struct scatterlist sg[1];
617 struct viommu_event *evt;
618 struct viommu_dev *viommu = vq->vdev->priv;
619
620 while ((evt = virtqueue_get_buf(vq, &len)) != NULL) {
621 if (len > sizeof(*evt)) {
622 dev_err(viommu->dev,
623 "invalid event buffer (len %u != %zu)\n",
624 len, sizeof(*evt));
625 } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) {
626 viommu_fault_handler(viommu, &evt->fault);
627 }
628
629 sg_init_one(sg, evt, sizeof(*evt));
630 ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC);
631 if (ret)
632 dev_err(viommu->dev, "could not add event buffer\n");
633 }
634
635 virtqueue_kick(vq);
636}
637
edcd69ab
JPB
638/* IOMMU API */
639
640static struct iommu_domain *viommu_domain_alloc(unsigned type)
641{
642 struct viommu_domain *vdomain;
643
f0f07a84
JPB
644 if (type != IOMMU_DOMAIN_UNMANAGED &&
645 type != IOMMU_DOMAIN_DMA &&
646 type != IOMMU_DOMAIN_IDENTITY)
edcd69ab
JPB
647 return NULL;
648
649 vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
650 if (!vdomain)
651 return NULL;
652
653 mutex_init(&vdomain->mutex);
654 spin_lock_init(&vdomain->mappings_lock);
655 vdomain->mappings = RB_ROOT_CACHED;
656
edcd69ab
JPB
657 return &vdomain->domain;
658}
659
39b3b3c9 660static int viommu_domain_finalise(struct viommu_endpoint *vdev,
edcd69ab
JPB
661 struct iommu_domain *domain)
662{
663 int ret;
39b3b3c9
JPB
664 unsigned long viommu_page_size;
665 struct viommu_dev *viommu = vdev->viommu;
edcd69ab 666 struct viommu_domain *vdomain = to_viommu_domain(domain);
edcd69ab 667
39b3b3c9
JPB
668 viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
669 if (viommu_page_size > PAGE_SIZE) {
670 dev_err(vdev->dev,
671 "granule 0x%lx larger than system page size 0x%lx\n",
672 viommu_page_size, PAGE_SIZE);
bd7ebb77 673 return -ENODEV;
39b3b3c9
JPB
674 }
675
7062af3e
JPB
676 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
677 viommu->last_domain, GFP_KERNEL);
678 if (ret < 0)
679 return ret;
680
681 vdomain->id = (unsigned int)ret;
edcd69ab
JPB
682
683 domain->pgsize_bitmap = viommu->pgsize_bitmap;
684 domain->geometry = viommu->geometry;
685
7062af3e
JPB
686 vdomain->map_flags = viommu->map_flags;
687 vdomain->viommu = viommu;
edcd69ab 688
f0f07a84 689 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
b03cbca4
JPB
690 if (virtio_has_feature(viommu->vdev,
691 VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
692 vdomain->bypass = true;
693 return 0;
694 }
695
696 ret = viommu_domain_map_identity(vdev, vdomain);
697 if (ret) {
f0f07a84
JPB
698 ida_free(&viommu->domain_ids, vdomain->id);
699 vdomain->viommu = NULL;
04cee82e 700 return ret;
f0f07a84 701 }
f0f07a84
JPB
702 }
703
7062af3e 704 return 0;
edcd69ab
JPB
705}
706
707static void viommu_domain_free(struct iommu_domain *domain)
708{
709 struct viommu_domain *vdomain = to_viommu_domain(domain);
edcd69ab 710
c0c76359
JPB
711 /* Free all remaining mappings */
712 viommu_del_mappings(vdomain, 0, ULLONG_MAX);
edcd69ab
JPB
713
714 if (vdomain->viommu)
715 ida_free(&vdomain->viommu->domain_ids, vdomain->id);
716
717 kfree(vdomain);
718}
719
720static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
721{
722 int i;
723 int ret = 0;
724 struct virtio_iommu_req_attach req;
725 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
a4b6c2af 726 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
edcd69ab
JPB
727 struct viommu_domain *vdomain = to_viommu_domain(domain);
728
729 mutex_lock(&vdomain->mutex);
730 if (!vdomain->viommu) {
731 /*
732 * Properly initialize the domain now that we know which viommu
733 * owns it.
734 */
39b3b3c9 735 ret = viommu_domain_finalise(vdev, domain);
edcd69ab 736 } else if (vdomain->viommu != vdev->viommu) {
f4a14773 737 ret = -EINVAL;
edcd69ab
JPB
738 }
739 mutex_unlock(&vdomain->mutex);
740
741 if (ret)
742 return ret;
743
744 /*
745 * In the virtio-iommu device, when attaching the endpoint to a new
4cb3600e 746 * domain, it is detached from the old one and, if as a result the
edcd69ab
JPB
747 * old domain isn't attached to any endpoint, all mappings are removed
748 * from the old domain and it is freed.
749 *
750 * In the driver the old domain still exists, and its mappings will be
751 * recreated if it gets reattached to an endpoint. Otherwise it will be
752 * freed explicitly.
753 *
754 * vdev->vdomain is protected by group->mutex
755 */
756 if (vdev->vdomain)
757 vdev->vdomain->nr_endpoints--;
758
759 req = (struct virtio_iommu_req_attach) {
760 .head.type = VIRTIO_IOMMU_T_ATTACH,
761 .domain = cpu_to_le32(vdomain->id),
762 };
763
f0f07a84
JPB
764 if (vdomain->bypass)
765 req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
766
edcd69ab
JPB
767 for (i = 0; i < fwspec->num_ids; i++) {
768 req.endpoint = cpu_to_le32(fwspec->ids[i]);
769
770 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
771 if (ret)
772 return ret;
773 }
774
775 if (!vdomain->nr_endpoints) {
776 /*
777 * This endpoint is the first to be attached to the domain.
778 * Replay existing mappings (e.g. SW MSI).
779 */
780 ret = viommu_replay_mappings(vdomain);
781 if (ret)
782 return ret;
783 }
784
785 vdomain->nr_endpoints++;
786 vdev->vdomain = vdomain;
787
788 return 0;
789}
790
7e62edd7
TZ
791static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
792 phys_addr_t paddr, size_t pgsize, size_t pgcount,
793 int prot, gfp_t gfp, size_t *mapped)
edcd69ab
JPB
794{
795 int ret;
ae24fb49 796 u32 flags;
7e62edd7 797 size_t size = pgsize * pgcount;
c0c76359 798 u64 end = iova + size - 1;
edcd69ab
JPB
799 struct virtio_iommu_req_map map;
800 struct viommu_domain *vdomain = to_viommu_domain(domain);
801
802 flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
803 (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
804 (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
805
ae24fb49
JPB
806 if (flags & ~vdomain->map_flags)
807 return -EINVAL;
808
c0c76359 809 ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
edcd69ab
JPB
810 if (ret)
811 return ret;
812
813 map = (struct virtio_iommu_req_map) {
814 .head.type = VIRTIO_IOMMU_T_MAP,
815 .domain = cpu_to_le32(vdomain->id),
816 .virt_start = cpu_to_le64(iova),
817 .phys_start = cpu_to_le64(paddr),
c0c76359 818 .virt_end = cpu_to_le64(end),
edcd69ab
JPB
819 .flags = cpu_to_le32(flags),
820 };
821
822 if (!vdomain->nr_endpoints)
823 return 0;
824
825 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
826 if (ret)
c0c76359 827 viommu_del_mappings(vdomain, iova, end);
7e62edd7
TZ
828 else if (mapped)
829 *mapped = size;
edcd69ab
JPB
830
831 return ret;
832}
833
7e62edd7
TZ
834static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
835 size_t pgsize, size_t pgcount,
836 struct iommu_iotlb_gather *gather)
edcd69ab
JPB
837{
838 int ret = 0;
839 size_t unmapped;
840 struct virtio_iommu_req_unmap unmap;
841 struct viommu_domain *vdomain = to_viommu_domain(domain);
7e62edd7 842 size_t size = pgsize * pgcount;
edcd69ab 843
c0c76359 844 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
edcd69ab
JPB
845 if (unmapped < size)
846 return 0;
847
848 /* Device already removed all mappings after detach. */
849 if (!vdomain->nr_endpoints)
850 return unmapped;
851
852 unmap = (struct virtio_iommu_req_unmap) {
853 .head.type = VIRTIO_IOMMU_T_UNMAP,
854 .domain = cpu_to_le32(vdomain->id),
855 .virt_start = cpu_to_le64(iova),
856 .virt_end = cpu_to_le64(iova + unmapped - 1),
857 };
858
859 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
860 return ret ? 0 : unmapped;
861}
862
863static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
864 dma_addr_t iova)
865{
866 u64 paddr = 0;
867 unsigned long flags;
868 struct viommu_mapping *mapping;
869 struct interval_tree_node *node;
870 struct viommu_domain *vdomain = to_viommu_domain(domain);
871
872 spin_lock_irqsave(&vdomain->mappings_lock, flags);
873 node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
874 if (node) {
875 mapping = container_of(node, struct viommu_mapping, iova);
876 paddr = mapping->paddr + (iova - mapping->iova.start);
877 }
878 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
879
880 return paddr;
881}
882
56f8af5e
WD
883static void viommu_iotlb_sync(struct iommu_domain *domain,
884 struct iommu_iotlb_gather *gather)
edcd69ab
JPB
885{
886 struct viommu_domain *vdomain = to_viommu_domain(domain);
887
888 viommu_sync_req(vdomain->viommu);
889}
890
891static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
892{
2a5a3148 893 struct iommu_resv_region *entry, *new_entry, *msi = NULL;
a4b6c2af 894 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
edcd69ab
JPB
895 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
896
2a5a3148
JPB
897 list_for_each_entry(entry, &vdev->resv_regions, list) {
898 if (entry->type == IOMMU_RESV_MSI)
899 msi = entry;
900
901 new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL);
902 if (!new_entry)
903 return;
904 list_add_tail(&new_entry->list, head);
905 }
906
907 /*
908 * If the device didn't register any bypass MSI window, add a
909 * software-mapped region.
910 */
911 if (!msi) {
912 msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
0251d010
LB
913 prot, IOMMU_RESV_SW_MSI,
914 GFP_KERNEL);
2a5a3148
JPB
915 if (!msi)
916 return;
917
918 list_add_tail(&msi->list, head);
919 }
edcd69ab 920
edcd69ab
JPB
921 iommu_dma_get_resv_regions(dev, head);
922}
923
edcd69ab
JPB
924static struct iommu_ops viommu_ops;
925static struct virtio_driver virtio_iommu_drv;
926
3a1d5384 927static int viommu_match_node(struct device *dev, const void *data)
edcd69ab 928{
0c9ccaf2 929 return device_match_fwnode(dev->parent, data);
edcd69ab
JPB
930}
931
932static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
933{
934 struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
935 fwnode, viommu_match_node);
936 put_device(dev);
937
938 return dev ? dev_to_virtio(dev)->priv : NULL;
939}
940
21acf659 941static struct iommu_device *viommu_probe_device(struct device *dev)
edcd69ab
JPB
942{
943 int ret;
edcd69ab
JPB
944 struct viommu_endpoint *vdev;
945 struct viommu_dev *viommu = NULL;
946 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
947
948 if (!fwspec || fwspec->ops != &viommu_ops)
21acf659 949 return ERR_PTR(-ENODEV);
edcd69ab
JPB
950
951 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
952 if (!viommu)
21acf659 953 return ERR_PTR(-ENODEV);
edcd69ab
JPB
954
955 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
956 if (!vdev)
21acf659 957 return ERR_PTR(-ENOMEM);
edcd69ab 958
2a5a3148 959 vdev->dev = dev;
edcd69ab 960 vdev->viommu = viommu;
2a5a3148 961 INIT_LIST_HEAD(&vdev->resv_regions);
a4b6c2af 962 dev_iommu_priv_set(dev, vdev);
edcd69ab 963
2a5a3148
JPB
964 if (viommu->probe_size) {
965 /* Get additional information for this endpoint */
966 ret = viommu_probe_endpoint(viommu, dev);
967 if (ret)
968 goto err_free_dev;
969 }
970
21acf659 971 return &viommu->iommu;
edcd69ab 972
edcd69ab 973err_free_dev:
ae3ff39a 974 iommu_put_resv_regions(dev, &vdev->resv_regions);
edcd69ab
JPB
975 kfree(vdev);
976
21acf659 977 return ERR_PTR(ret);
edcd69ab
JPB
978}
979
8ce4904b
JPB
980static void viommu_probe_finalize(struct device *dev)
981{
982#ifndef CONFIG_ARCH_HAS_SETUP_DMA_OPS
983 /* First clear the DMA ops in case we're switching from a DMA domain */
984 set_dma_ops(dev, NULL);
985 iommu_setup_dma_ops(dev, 0, U64_MAX);
986#endif
987}
988
21acf659 989static void viommu_release_device(struct device *dev)
edcd69ab 990{
4d26ba67 991 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
edcd69ab 992
ae3ff39a 993 iommu_put_resv_regions(dev, &vdev->resv_regions);
edcd69ab
JPB
994 kfree(vdev);
995}
996
997static struct iommu_group *viommu_device_group(struct device *dev)
998{
999 if (dev_is_pci(dev))
1000 return pci_device_group(dev);
1001 else
1002 return generic_device_group(dev);
1003}
1004
1005static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
1006{
1007 return iommu_fwspec_add_ids(dev, args->args, 1);
1008}
1009
c7883f8d 1010static bool viommu_capable(struct device *dev, enum iommu_cap cap)
91c98fe7
JPB
1011{
1012 switch (cap) {
1013 case IOMMU_CAP_CACHE_COHERENCY:
1014 return true;
1015 default:
1016 return false;
1017 }
1018}
1019
edcd69ab 1020static struct iommu_ops viommu_ops = {
91c98fe7 1021 .capable = viommu_capable,
edcd69ab 1022 .domain_alloc = viommu_domain_alloc,
21acf659 1023 .probe_device = viommu_probe_device,
8ce4904b 1024 .probe_finalize = viommu_probe_finalize,
21acf659 1025 .release_device = viommu_release_device,
edcd69ab
JPB
1026 .device_group = viommu_device_group,
1027 .get_resv_regions = viommu_get_resv_regions,
edcd69ab 1028 .of_xlate = viommu_of_xlate,
c0aec668 1029 .owner = THIS_MODULE,
9a630a4b
LB
1030 .default_domain_ops = &(const struct iommu_domain_ops) {
1031 .attach_dev = viommu_attach_dev,
7e62edd7
TZ
1032 .map_pages = viommu_map_pages,
1033 .unmap_pages = viommu_unmap_pages,
9a630a4b
LB
1034 .iova_to_phys = viommu_iova_to_phys,
1035 .iotlb_sync = viommu_iotlb_sync,
1036 .free = viommu_domain_free,
1037 }
edcd69ab
JPB
1038};
1039
1040static int viommu_init_vqs(struct viommu_dev *viommu)
1041{
1042 struct virtio_device *vdev = dev_to_virtio(viommu->dev);
169a126c
JPB
1043 const char *names[] = { "request", "event" };
1044 vq_callback_t *callbacks[] = {
1045 NULL, /* No async requests */
1046 viommu_event_handler,
1047 };
edcd69ab 1048
169a126c
JPB
1049 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks,
1050 names, NULL);
1051}
edcd69ab 1052
169a126c
JPB
1053static int viommu_fill_evtq(struct viommu_dev *viommu)
1054{
1055 int i, ret;
1056 struct scatterlist sg[1];
1057 struct viommu_event *evts;
1058 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
1059 size_t nr_evts = vq->num_free;
1060
1061 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts,
1062 sizeof(*evts), GFP_KERNEL);
1063 if (!evts)
1064 return -ENOMEM;
1065
1066 for (i = 0; i < nr_evts; i++) {
1067 sg_init_one(sg, &evts[i], sizeof(*evts));
1068 ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL);
1069 if (ret)
1070 return ret;
1071 }
edcd69ab
JPB
1072
1073 return 0;
1074}
1075
1076static int viommu_probe(struct virtio_device *vdev)
1077{
1078 struct device *parent_dev = vdev->dev.parent;
1079 struct viommu_dev *viommu = NULL;
1080 struct device *dev = &vdev->dev;
1081 u64 input_start = 0;
1082 u64 input_end = -1UL;
1083 int ret;
1084
1085 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
1086 !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
1087 return -ENODEV;
1088
1089 viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
1090 if (!viommu)
1091 return -ENOMEM;
1092
1093 spin_lock_init(&viommu->request_lock);
1094 ida_init(&viommu->domain_ids);
1095 viommu->dev = dev;
1096 viommu->vdev = vdev;
1097 INIT_LIST_HEAD(&viommu->requests);
1098
1099 ret = viommu_init_vqs(viommu);
1100 if (ret)
1101 return ret;
1102
d83c67c4
MT
1103 virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask,
1104 &viommu->pgsize_bitmap);
edcd69ab
JPB
1105
1106 if (!viommu->pgsize_bitmap) {
1107 ret = -EINVAL;
1108 goto err_free_vqs;
1109 }
1110
ae24fb49
JPB
1111 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
1112 viommu->last_domain = ~0U;
edcd69ab
JPB
1113
1114 /* Optional features */
d83c67c4
MT
1115 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
1116 struct virtio_iommu_config, input_range.start,
1117 &input_start);
edcd69ab 1118
d83c67c4
MT
1119 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
1120 struct virtio_iommu_config, input_range.end,
1121 &input_end);
edcd69ab 1122
d83c67c4
MT
1123 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1124 struct virtio_iommu_config, domain_range.start,
1125 &viommu->first_domain);
ae24fb49 1126
d83c67c4
MT
1127 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1128 struct virtio_iommu_config, domain_range.end,
1129 &viommu->last_domain);
edcd69ab 1130
d83c67c4
MT
1131 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE,
1132 struct virtio_iommu_config, probe_size,
1133 &viommu->probe_size);
2a5a3148 1134
edcd69ab
JPB
1135 viommu->geometry = (struct iommu_domain_geometry) {
1136 .aperture_start = input_start,
1137 .aperture_end = input_end,
1138 .force_aperture = true,
1139 };
1140
ae24fb49
JPB
1141 if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
1142 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
1143
edcd69ab
JPB
1144 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
1145
1146 virtio_device_ready(vdev);
1147
169a126c
JPB
1148 /* Populate the event queue with buffers */
1149 ret = viommu_fill_evtq(viommu);
1150 if (ret)
1151 goto err_free_vqs;
1152
edcd69ab
JPB
1153 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
1154 virtio_bus_name(vdev));
1155 if (ret)
1156 goto err_free_vqs;
1157
2d471b20 1158 iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
edcd69ab 1159
edcd69ab
JPB
1160 vdev->priv = viommu;
1161
1162 dev_info(dev, "input address: %u bits\n",
1163 order_base_2(viommu->geometry.aperture_end));
1164 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
1165
1166 return 0;
1167
edcd69ab
JPB
1168err_free_vqs:
1169 vdev->config->del_vqs(vdev);
1170
1171 return ret;
1172}
1173
1174static void viommu_remove(struct virtio_device *vdev)
1175{
1176 struct viommu_dev *viommu = vdev->priv;
1177
1178 iommu_device_sysfs_remove(&viommu->iommu);
1179 iommu_device_unregister(&viommu->iommu);
1180
1181 /* Stop all virtqueues */
d9679d00 1182 virtio_reset_device(vdev);
edcd69ab
JPB
1183 vdev->config->del_vqs(vdev);
1184
1185 dev_info(&vdev->dev, "device removed\n");
1186}
1187
1188static void viommu_config_changed(struct virtio_device *vdev)
1189{
1190 dev_warn(&vdev->dev, "config changed\n");
1191}
1192
1193static unsigned int features[] = {
1194 VIRTIO_IOMMU_F_MAP_UNMAP,
edcd69ab 1195 VIRTIO_IOMMU_F_INPUT_RANGE,
ae24fb49 1196 VIRTIO_IOMMU_F_DOMAIN_RANGE,
2a5a3148 1197 VIRTIO_IOMMU_F_PROBE,
ae24fb49 1198 VIRTIO_IOMMU_F_MMIO,
f0f07a84 1199 VIRTIO_IOMMU_F_BYPASS_CONFIG,
edcd69ab
JPB
1200};
1201
1202static struct virtio_device_id id_table[] = {
1203 { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
1204 { 0 },
1205};
382d91fc 1206MODULE_DEVICE_TABLE(virtio, id_table);
edcd69ab
JPB
1207
1208static struct virtio_driver virtio_iommu_drv = {
1209 .driver.name = KBUILD_MODNAME,
1210 .driver.owner = THIS_MODULE,
1211 .id_table = id_table,
1212 .feature_table = features,
1213 .feature_table_size = ARRAY_SIZE(features),
1214 .probe = viommu_probe,
1215 .remove = viommu_remove,
1216 .config_changed = viommu_config_changed,
1217};
1218
1219module_virtio_driver(virtio_iommu_drv);
1220
1221MODULE_DESCRIPTION("Virtio IOMMU driver");
1222MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
1223MODULE_LICENSE("GPL v2");