iommu/dma: Move public interfaces to linux/iommu.h
[linux-block.git] / drivers / iommu / virtio-iommu.c
CommitLineData
edcd69ab
JPB
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Virtio driver for the paravirtualized IOMMU
4 *
ae24fb49 5 * Copyright (C) 2019 Arm Limited
edcd69ab
JPB
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
edcd69ab
JPB
10#include <linux/delay.h>
11#include <linux/dma-iommu.h>
8ce4904b 12#include <linux/dma-map-ops.h>
edcd69ab
JPB
13#include <linux/freezer.h>
14#include <linux/interval_tree.h>
15#include <linux/iommu.h>
16#include <linux/module.h>
edcd69ab
JPB
17#include <linux/of_platform.h>
18#include <linux/pci.h>
edcd69ab
JPB
19#include <linux/virtio.h>
20#include <linux/virtio_config.h>
21#include <linux/virtio_ids.h>
22#include <linux/wait.h>
23
24#include <uapi/linux/virtio_iommu.h>
25
26#define MSI_IOVA_BASE 0x8000000
27#define MSI_IOVA_LENGTH 0x100000
28
29#define VIOMMU_REQUEST_VQ 0
169a126c
JPB
30#define VIOMMU_EVENT_VQ 1
31#define VIOMMU_NR_VQS 2
edcd69ab
JPB
32
33struct viommu_dev {
34 struct iommu_device iommu;
35 struct device *dev;
36 struct virtio_device *vdev;
37
38 struct ida domain_ids;
39
40 struct virtqueue *vqs[VIOMMU_NR_VQS];
41 spinlock_t request_lock;
42 struct list_head requests;
169a126c 43 void *evts;
edcd69ab
JPB
44
45 /* Device configuration */
46 struct iommu_domain_geometry geometry;
47 u64 pgsize_bitmap;
ae24fb49
JPB
48 u32 first_domain;
49 u32 last_domain;
50 /* Supported MAP flags */
51 u32 map_flags;
2a5a3148 52 u32 probe_size;
edcd69ab
JPB
53};
54
55struct viommu_mapping {
56 phys_addr_t paddr;
57 struct interval_tree_node iova;
58 u32 flags;
59};
60
61struct viommu_domain {
62 struct iommu_domain domain;
63 struct viommu_dev *viommu;
64 struct mutex mutex; /* protects viommu pointer */
65 unsigned int id;
ae24fb49 66 u32 map_flags;
edcd69ab
JPB
67
68 spinlock_t mappings_lock;
69 struct rb_root_cached mappings;
70
71 unsigned long nr_endpoints;
f0f07a84 72 bool bypass;
edcd69ab
JPB
73};
74
75struct viommu_endpoint {
2a5a3148 76 struct device *dev;
edcd69ab
JPB
77 struct viommu_dev *viommu;
78 struct viommu_domain *vdomain;
2a5a3148 79 struct list_head resv_regions;
edcd69ab
JPB
80};
81
82struct viommu_request {
83 struct list_head list;
84 void *writeback;
85 unsigned int write_offset;
86 unsigned int len;
87 char buf[];
88};
89
169a126c
JPB
90#define VIOMMU_FAULT_RESV_MASK 0xffffff00
91
92struct viommu_event {
93 union {
94 u32 head;
95 struct virtio_iommu_fault fault;
96 };
97};
98
edcd69ab
JPB
99#define to_viommu_domain(domain) \
100 container_of(domain, struct viommu_domain, domain)
101
102static int viommu_get_req_errno(void *buf, size_t len)
103{
104 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
105
106 switch (tail->status) {
107 case VIRTIO_IOMMU_S_OK:
108 return 0;
109 case VIRTIO_IOMMU_S_UNSUPP:
110 return -ENOSYS;
111 case VIRTIO_IOMMU_S_INVAL:
112 return -EINVAL;
113 case VIRTIO_IOMMU_S_RANGE:
114 return -ERANGE;
115 case VIRTIO_IOMMU_S_NOENT:
116 return -ENOENT;
117 case VIRTIO_IOMMU_S_FAULT:
118 return -EFAULT;
ae24fb49
JPB
119 case VIRTIO_IOMMU_S_NOMEM:
120 return -ENOMEM;
edcd69ab
JPB
121 case VIRTIO_IOMMU_S_IOERR:
122 case VIRTIO_IOMMU_S_DEVERR:
123 default:
124 return -EIO;
125 }
126}
127
128static void viommu_set_req_status(void *buf, size_t len, int status)
129{
130 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
131
132 tail->status = status;
133}
134
135static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
136 struct virtio_iommu_req_head *req,
137 size_t len)
138{
139 size_t tail_size = sizeof(struct virtio_iommu_req_tail);
140
2a5a3148
JPB
141 if (req->type == VIRTIO_IOMMU_T_PROBE)
142 return len - viommu->probe_size - tail_size;
143
edcd69ab
JPB
144 return len - tail_size;
145}
146
147/*
148 * __viommu_sync_req - Complete all in-flight requests
149 *
150 * Wait for all added requests to complete. When this function returns, all
151 * requests that were in-flight at the time of the call have completed.
152 */
153static int __viommu_sync_req(struct viommu_dev *viommu)
154{
edcd69ab
JPB
155 unsigned int len;
156 size_t write_len;
157 struct viommu_request *req;
158 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
159
160 assert_spin_locked(&viommu->request_lock);
161
162 virtqueue_kick(vq);
163
164 while (!list_empty(&viommu->requests)) {
165 len = 0;
166 req = virtqueue_get_buf(vq, &len);
167 if (!req)
168 continue;
169
170 if (!len)
171 viommu_set_req_status(req->buf, req->len,
172 VIRTIO_IOMMU_S_IOERR);
173
174 write_len = req->len - req->write_offset;
175 if (req->writeback && len == write_len)
176 memcpy(req->writeback, req->buf + req->write_offset,
177 write_len);
178
179 list_del(&req->list);
180 kfree(req);
181 }
182
c1c8058d 183 return 0;
edcd69ab
JPB
184}
185
186static int viommu_sync_req(struct viommu_dev *viommu)
187{
188 int ret;
189 unsigned long flags;
190
191 spin_lock_irqsave(&viommu->request_lock, flags);
192 ret = __viommu_sync_req(viommu);
193 if (ret)
194 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
195 spin_unlock_irqrestore(&viommu->request_lock, flags);
196
197 return ret;
198}
199
200/*
201 * __viommu_add_request - Add one request to the queue
202 * @buf: pointer to the request buffer
203 * @len: length of the request buffer
204 * @writeback: copy data back to the buffer when the request completes.
205 *
206 * Add a request to the queue. Only synchronize the queue if it's already full.
207 * Otherwise don't kick the queue nor wait for requests to complete.
208 *
209 * When @writeback is true, data written by the device, including the request
210 * status, is copied into @buf after the request completes. This is unsafe if
211 * the caller allocates @buf on stack and drops the lock between add_req() and
212 * sync_req().
213 *
214 * Return 0 if the request was successfully added to the queue.
215 */
216static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
217 bool writeback)
218{
219 int ret;
220 off_t write_offset;
221 struct viommu_request *req;
222 struct scatterlist top_sg, bottom_sg;
223 struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
224 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
225
226 assert_spin_locked(&viommu->request_lock);
227
228 write_offset = viommu_get_write_desc_offset(viommu, buf, len);
229 if (write_offset <= 0)
230 return -EINVAL;
231
232 req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
233 if (!req)
234 return -ENOMEM;
235
236 req->len = len;
237 if (writeback) {
238 req->writeback = buf + write_offset;
239 req->write_offset = write_offset;
240 }
241 memcpy(&req->buf, buf, write_offset);
242
243 sg_init_one(&top_sg, req->buf, write_offset);
244 sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
245
246 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
247 if (ret == -ENOSPC) {
248 /* If the queue is full, sync and retry */
249 if (!__viommu_sync_req(viommu))
250 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
251 }
252 if (ret)
253 goto err_free;
254
255 list_add_tail(&req->list, &viommu->requests);
256 return 0;
257
258err_free:
259 kfree(req);
260 return ret;
261}
262
263static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
264{
265 int ret;
266 unsigned long flags;
267
268 spin_lock_irqsave(&viommu->request_lock, flags);
269 ret = __viommu_add_req(viommu, buf, len, false);
270 if (ret)
271 dev_dbg(viommu->dev, "could not add request: %d\n", ret);
272 spin_unlock_irqrestore(&viommu->request_lock, flags);
273
274 return ret;
275}
276
277/*
278 * Send a request and wait for it to complete. Return the request status (as an
279 * errno)
280 */
281static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
282 size_t len)
283{
284 int ret;
285 unsigned long flags;
286
287 spin_lock_irqsave(&viommu->request_lock, flags);
288
289 ret = __viommu_add_req(viommu, buf, len, true);
290 if (ret) {
291 dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
292 goto out_unlock;
293 }
294
295 ret = __viommu_sync_req(viommu);
296 if (ret) {
297 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
298 /* Fall-through (get the actual request status) */
299 }
300
301 ret = viommu_get_req_errno(buf, len);
302out_unlock:
303 spin_unlock_irqrestore(&viommu->request_lock, flags);
304 return ret;
305}
306
307/*
308 * viommu_add_mapping - add a mapping to the internal tree
309 *
310 * On success, return the new mapping. Otherwise return NULL.
311 */
c0c76359
JPB
312static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
313 phys_addr_t paddr, u32 flags)
edcd69ab
JPB
314{
315 unsigned long irqflags;
316 struct viommu_mapping *mapping;
317
318 mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
319 if (!mapping)
320 return -ENOMEM;
321
322 mapping->paddr = paddr;
323 mapping->iova.start = iova;
c0c76359 324 mapping->iova.last = end;
edcd69ab
JPB
325 mapping->flags = flags;
326
327 spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
328 interval_tree_insert(&mapping->iova, &vdomain->mappings);
329 spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
330
331 return 0;
332}
333
334/*
335 * viommu_del_mappings - remove mappings from the internal tree
336 *
337 * @vdomain: the domain
338 * @iova: start of the range
c0c76359 339 * @end: end of the range
edcd69ab 340 *
c0c76359 341 * On success, returns the number of unmapped bytes
edcd69ab
JPB
342 */
343static size_t viommu_del_mappings(struct viommu_domain *vdomain,
c0c76359 344 u64 iova, u64 end)
edcd69ab
JPB
345{
346 size_t unmapped = 0;
347 unsigned long flags;
edcd69ab
JPB
348 struct viommu_mapping *mapping = NULL;
349 struct interval_tree_node *node, *next;
350
351 spin_lock_irqsave(&vdomain->mappings_lock, flags);
c0c76359 352 next = interval_tree_iter_first(&vdomain->mappings, iova, end);
edcd69ab
JPB
353 while (next) {
354 node = next;
355 mapping = container_of(node, struct viommu_mapping, iova);
c0c76359 356 next = interval_tree_iter_next(node, iova, end);
edcd69ab
JPB
357
358 /* Trying to split a mapping? */
359 if (mapping->iova.start < iova)
360 break;
361
362 /*
363 * Virtio-iommu doesn't allow UNMAP to split a mapping created
364 * with a single MAP request, so remove the full mapping.
365 */
366 unmapped += mapping->iova.last - mapping->iova.start + 1;
367
368 interval_tree_remove(node, &vdomain->mappings);
369 kfree(mapping);
370 }
371 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
372
373 return unmapped;
374}
375
b03cbca4
JPB
376/*
377 * Fill the domain with identity mappings, skipping the device's reserved
378 * regions.
379 */
380static int viommu_domain_map_identity(struct viommu_endpoint *vdev,
381 struct viommu_domain *vdomain)
382{
383 int ret;
384 struct iommu_resv_region *resv;
385 u64 iova = vdomain->domain.geometry.aperture_start;
386 u64 limit = vdomain->domain.geometry.aperture_end;
387 u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
388 unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap);
389
390 iova = ALIGN(iova, granule);
391 limit = ALIGN_DOWN(limit + 1, granule) - 1;
392
393 list_for_each_entry(resv, &vdev->resv_regions, list) {
394 u64 resv_start = ALIGN_DOWN(resv->start, granule);
395 u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1;
396
397 if (resv_end < iova || resv_start > limit)
398 /* No overlap */
399 continue;
400
401 if (resv_start > iova) {
402 ret = viommu_add_mapping(vdomain, iova, resv_start - 1,
403 (phys_addr_t)iova, flags);
404 if (ret)
405 goto err_unmap;
406 }
407
408 if (resv_end >= limit)
409 return 0;
410
411 iova = resv_end + 1;
412 }
413
414 ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova,
415 flags);
416 if (ret)
417 goto err_unmap;
418 return 0;
419
420err_unmap:
421 viommu_del_mappings(vdomain, 0, iova);
422 return ret;
423}
424
edcd69ab
JPB
425/*
426 * viommu_replay_mappings - re-send MAP requests
427 *
428 * When reattaching a domain that was previously detached from all endpoints,
429 * mappings were deleted from the device. Re-create the mappings available in
430 * the internal tree.
431 */
432static int viommu_replay_mappings(struct viommu_domain *vdomain)
433{
434 int ret = 0;
435 unsigned long flags;
436 struct viommu_mapping *mapping;
437 struct interval_tree_node *node;
438 struct virtio_iommu_req_map map;
439
440 spin_lock_irqsave(&vdomain->mappings_lock, flags);
441 node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
442 while (node) {
443 mapping = container_of(node, struct viommu_mapping, iova);
444 map = (struct virtio_iommu_req_map) {
445 .head.type = VIRTIO_IOMMU_T_MAP,
446 .domain = cpu_to_le32(vdomain->id),
447 .virt_start = cpu_to_le64(mapping->iova.start),
448 .virt_end = cpu_to_le64(mapping->iova.last),
449 .phys_start = cpu_to_le64(mapping->paddr),
450 .flags = cpu_to_le32(mapping->flags),
451 };
452
453 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
454 if (ret)
455 break;
456
457 node = interval_tree_iter_next(node, 0, -1UL);
458 }
459 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
460
461 return ret;
462}
463
2a5a3148
JPB
464static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
465 struct virtio_iommu_probe_resv_mem *mem,
466 size_t len)
467{
468 size_t size;
469 u64 start64, end64;
470 phys_addr_t start, end;
56109794 471 struct iommu_resv_region *region = NULL, *next;
2a5a3148
JPB
472 unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
473
474 start = start64 = le64_to_cpu(mem->start);
475 end = end64 = le64_to_cpu(mem->end);
476 size = end64 - start64 + 1;
477
478 /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */
479 if (start != start64 || end != end64 || size < end64 - start64)
480 return -EOVERFLOW;
481
482 if (len < sizeof(*mem))
483 return -EINVAL;
484
485 switch (mem->subtype) {
486 default:
487 dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n",
488 mem->subtype);
df561f66 489 fallthrough;
2a5a3148
JPB
490 case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
491 region = iommu_alloc_resv_region(start, size, 0,
492 IOMMU_RESV_RESERVED);
493 break;
494 case VIRTIO_IOMMU_RESV_MEM_T_MSI:
495 region = iommu_alloc_resv_region(start, size, prot,
496 IOMMU_RESV_MSI);
497 break;
498 }
499 if (!region)
500 return -ENOMEM;
501
56109794
JPB
502 /* Keep the list sorted */
503 list_for_each_entry(next, &vdev->resv_regions, list) {
504 if (next->start > region->start)
505 break;
506 }
507 list_add_tail(&region->list, &next->list);
2a5a3148
JPB
508 return 0;
509}
510
511static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
512{
513 int ret;
514 u16 type, len;
515 size_t cur = 0;
516 size_t probe_len;
517 struct virtio_iommu_req_probe *probe;
518 struct virtio_iommu_probe_property *prop;
519 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
a4b6c2af 520 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
2a5a3148
JPB
521
522 if (!fwspec->num_ids)
523 return -EINVAL;
524
525 probe_len = sizeof(*probe) + viommu->probe_size +
526 sizeof(struct virtio_iommu_req_tail);
527 probe = kzalloc(probe_len, GFP_KERNEL);
528 if (!probe)
529 return -ENOMEM;
530
531 probe->head.type = VIRTIO_IOMMU_T_PROBE;
532 /*
533 * For now, assume that properties of an endpoint that outputs multiple
534 * IDs are consistent. Only probe the first one.
535 */
536 probe->endpoint = cpu_to_le32(fwspec->ids[0]);
537
538 ret = viommu_send_req_sync(viommu, probe, probe_len);
539 if (ret)
540 goto out_free;
541
542 prop = (void *)probe->properties;
543 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
544
545 while (type != VIRTIO_IOMMU_PROBE_T_NONE &&
546 cur < viommu->probe_size) {
547 len = le16_to_cpu(prop->length) + sizeof(*prop);
548
549 switch (type) {
550 case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
551 ret = viommu_add_resv_mem(vdev, (void *)prop, len);
552 break;
553 default:
554 dev_err(dev, "unknown viommu prop 0x%x\n", type);
555 }
556
557 if (ret)
558 dev_err(dev, "failed to parse viommu prop 0x%x\n", type);
559
560 cur += len;
561 if (cur >= viommu->probe_size)
562 break;
563
564 prop = (void *)probe->properties + cur;
565 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
566 }
567
568out_free:
569 kfree(probe);
570 return ret;
571}
572
169a126c
JPB
573static int viommu_fault_handler(struct viommu_dev *viommu,
574 struct virtio_iommu_fault *fault)
575{
576 char *reason_str;
577
578 u8 reason = fault->reason;
579 u32 flags = le32_to_cpu(fault->flags);
580 u32 endpoint = le32_to_cpu(fault->endpoint);
581 u64 address = le64_to_cpu(fault->address);
582
583 switch (reason) {
584 case VIRTIO_IOMMU_FAULT_R_DOMAIN:
585 reason_str = "domain";
586 break;
587 case VIRTIO_IOMMU_FAULT_R_MAPPING:
588 reason_str = "page";
589 break;
590 case VIRTIO_IOMMU_FAULT_R_UNKNOWN:
591 default:
592 reason_str = "unknown";
593 break;
594 }
595
596 /* TODO: find EP by ID and report_iommu_fault */
597 if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS)
598 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n",
599 reason_str, endpoint, address,
600 flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "",
601 flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "",
602 flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : "");
603 else
604 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n",
605 reason_str, endpoint);
606 return 0;
607}
608
609static void viommu_event_handler(struct virtqueue *vq)
610{
611 int ret;
612 unsigned int len;
613 struct scatterlist sg[1];
614 struct viommu_event *evt;
615 struct viommu_dev *viommu = vq->vdev->priv;
616
617 while ((evt = virtqueue_get_buf(vq, &len)) != NULL) {
618 if (len > sizeof(*evt)) {
619 dev_err(viommu->dev,
620 "invalid event buffer (len %u != %zu)\n",
621 len, sizeof(*evt));
622 } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) {
623 viommu_fault_handler(viommu, &evt->fault);
624 }
625
626 sg_init_one(sg, evt, sizeof(*evt));
627 ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC);
628 if (ret)
629 dev_err(viommu->dev, "could not add event buffer\n");
630 }
631
632 virtqueue_kick(vq);
633}
634
edcd69ab
JPB
635/* IOMMU API */
636
637static struct iommu_domain *viommu_domain_alloc(unsigned type)
638{
639 struct viommu_domain *vdomain;
640
f0f07a84
JPB
641 if (type != IOMMU_DOMAIN_UNMANAGED &&
642 type != IOMMU_DOMAIN_DMA &&
643 type != IOMMU_DOMAIN_IDENTITY)
edcd69ab
JPB
644 return NULL;
645
646 vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
647 if (!vdomain)
648 return NULL;
649
650 mutex_init(&vdomain->mutex);
651 spin_lock_init(&vdomain->mappings_lock);
652 vdomain->mappings = RB_ROOT_CACHED;
653
edcd69ab
JPB
654 return &vdomain->domain;
655}
656
39b3b3c9 657static int viommu_domain_finalise(struct viommu_endpoint *vdev,
edcd69ab
JPB
658 struct iommu_domain *domain)
659{
660 int ret;
39b3b3c9
JPB
661 unsigned long viommu_page_size;
662 struct viommu_dev *viommu = vdev->viommu;
edcd69ab 663 struct viommu_domain *vdomain = to_viommu_domain(domain);
edcd69ab 664
39b3b3c9
JPB
665 viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
666 if (viommu_page_size > PAGE_SIZE) {
667 dev_err(vdev->dev,
668 "granule 0x%lx larger than system page size 0x%lx\n",
669 viommu_page_size, PAGE_SIZE);
670 return -EINVAL;
671 }
672
7062af3e
JPB
673 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
674 viommu->last_domain, GFP_KERNEL);
675 if (ret < 0)
676 return ret;
677
678 vdomain->id = (unsigned int)ret;
edcd69ab
JPB
679
680 domain->pgsize_bitmap = viommu->pgsize_bitmap;
681 domain->geometry = viommu->geometry;
682
7062af3e
JPB
683 vdomain->map_flags = viommu->map_flags;
684 vdomain->viommu = viommu;
edcd69ab 685
f0f07a84 686 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
b03cbca4
JPB
687 if (virtio_has_feature(viommu->vdev,
688 VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
689 vdomain->bypass = true;
690 return 0;
691 }
692
693 ret = viommu_domain_map_identity(vdev, vdomain);
694 if (ret) {
f0f07a84
JPB
695 ida_free(&viommu->domain_ids, vdomain->id);
696 vdomain->viommu = NULL;
697 return -EOPNOTSUPP;
698 }
f0f07a84
JPB
699 }
700
7062af3e 701 return 0;
edcd69ab
JPB
702}
703
704static void viommu_domain_free(struct iommu_domain *domain)
705{
706 struct viommu_domain *vdomain = to_viommu_domain(domain);
edcd69ab 707
c0c76359
JPB
708 /* Free all remaining mappings */
709 viommu_del_mappings(vdomain, 0, ULLONG_MAX);
edcd69ab
JPB
710
711 if (vdomain->viommu)
712 ida_free(&vdomain->viommu->domain_ids, vdomain->id);
713
714 kfree(vdomain);
715}
716
717static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
718{
719 int i;
720 int ret = 0;
721 struct virtio_iommu_req_attach req;
722 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
a4b6c2af 723 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
edcd69ab
JPB
724 struct viommu_domain *vdomain = to_viommu_domain(domain);
725
726 mutex_lock(&vdomain->mutex);
727 if (!vdomain->viommu) {
728 /*
729 * Properly initialize the domain now that we know which viommu
730 * owns it.
731 */
39b3b3c9 732 ret = viommu_domain_finalise(vdev, domain);
edcd69ab
JPB
733 } else if (vdomain->viommu != vdev->viommu) {
734 dev_err(dev, "cannot attach to foreign vIOMMU\n");
735 ret = -EXDEV;
736 }
737 mutex_unlock(&vdomain->mutex);
738
739 if (ret)
740 return ret;
741
742 /*
743 * In the virtio-iommu device, when attaching the endpoint to a new
4cb3600e 744 * domain, it is detached from the old one and, if as a result the
edcd69ab
JPB
745 * old domain isn't attached to any endpoint, all mappings are removed
746 * from the old domain and it is freed.
747 *
748 * In the driver the old domain still exists, and its mappings will be
749 * recreated if it gets reattached to an endpoint. Otherwise it will be
750 * freed explicitly.
751 *
752 * vdev->vdomain is protected by group->mutex
753 */
754 if (vdev->vdomain)
755 vdev->vdomain->nr_endpoints--;
756
757 req = (struct virtio_iommu_req_attach) {
758 .head.type = VIRTIO_IOMMU_T_ATTACH,
759 .domain = cpu_to_le32(vdomain->id),
760 };
761
f0f07a84
JPB
762 if (vdomain->bypass)
763 req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
764
edcd69ab
JPB
765 for (i = 0; i < fwspec->num_ids; i++) {
766 req.endpoint = cpu_to_le32(fwspec->ids[i]);
767
768 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
769 if (ret)
770 return ret;
771 }
772
773 if (!vdomain->nr_endpoints) {
774 /*
775 * This endpoint is the first to be attached to the domain.
776 * Replay existing mappings (e.g. SW MSI).
777 */
778 ret = viommu_replay_mappings(vdomain);
779 if (ret)
780 return ret;
781 }
782
783 vdomain->nr_endpoints++;
784 vdev->vdomain = vdomain;
785
786 return 0;
787}
788
7e62edd7
TZ
789static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
790 phys_addr_t paddr, size_t pgsize, size_t pgcount,
791 int prot, gfp_t gfp, size_t *mapped)
edcd69ab
JPB
792{
793 int ret;
ae24fb49 794 u32 flags;
7e62edd7 795 size_t size = pgsize * pgcount;
c0c76359 796 u64 end = iova + size - 1;
edcd69ab
JPB
797 struct virtio_iommu_req_map map;
798 struct viommu_domain *vdomain = to_viommu_domain(domain);
799
800 flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
801 (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
802 (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
803
ae24fb49
JPB
804 if (flags & ~vdomain->map_flags)
805 return -EINVAL;
806
c0c76359 807 ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
edcd69ab
JPB
808 if (ret)
809 return ret;
810
811 map = (struct virtio_iommu_req_map) {
812 .head.type = VIRTIO_IOMMU_T_MAP,
813 .domain = cpu_to_le32(vdomain->id),
814 .virt_start = cpu_to_le64(iova),
815 .phys_start = cpu_to_le64(paddr),
c0c76359 816 .virt_end = cpu_to_le64(end),
edcd69ab
JPB
817 .flags = cpu_to_le32(flags),
818 };
819
820 if (!vdomain->nr_endpoints)
821 return 0;
822
823 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
824 if (ret)
c0c76359 825 viommu_del_mappings(vdomain, iova, end);
7e62edd7
TZ
826 else if (mapped)
827 *mapped = size;
edcd69ab
JPB
828
829 return ret;
830}
831
7e62edd7
TZ
832static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
833 size_t pgsize, size_t pgcount,
834 struct iommu_iotlb_gather *gather)
edcd69ab
JPB
835{
836 int ret = 0;
837 size_t unmapped;
838 struct virtio_iommu_req_unmap unmap;
839 struct viommu_domain *vdomain = to_viommu_domain(domain);
7e62edd7 840 size_t size = pgsize * pgcount;
edcd69ab 841
c0c76359 842 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
edcd69ab
JPB
843 if (unmapped < size)
844 return 0;
845
846 /* Device already removed all mappings after detach. */
847 if (!vdomain->nr_endpoints)
848 return unmapped;
849
850 unmap = (struct virtio_iommu_req_unmap) {
851 .head.type = VIRTIO_IOMMU_T_UNMAP,
852 .domain = cpu_to_le32(vdomain->id),
853 .virt_start = cpu_to_le64(iova),
854 .virt_end = cpu_to_le64(iova + unmapped - 1),
855 };
856
857 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
858 return ret ? 0 : unmapped;
859}
860
861static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
862 dma_addr_t iova)
863{
864 u64 paddr = 0;
865 unsigned long flags;
866 struct viommu_mapping *mapping;
867 struct interval_tree_node *node;
868 struct viommu_domain *vdomain = to_viommu_domain(domain);
869
870 spin_lock_irqsave(&vdomain->mappings_lock, flags);
871 node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
872 if (node) {
873 mapping = container_of(node, struct viommu_mapping, iova);
874 paddr = mapping->paddr + (iova - mapping->iova.start);
875 }
876 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
877
878 return paddr;
879}
880
56f8af5e
WD
881static void viommu_iotlb_sync(struct iommu_domain *domain,
882 struct iommu_iotlb_gather *gather)
edcd69ab
JPB
883{
884 struct viommu_domain *vdomain = to_viommu_domain(domain);
885
886 viommu_sync_req(vdomain->viommu);
887}
888
889static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
890{
2a5a3148 891 struct iommu_resv_region *entry, *new_entry, *msi = NULL;
a4b6c2af 892 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
edcd69ab
JPB
893 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
894
2a5a3148
JPB
895 list_for_each_entry(entry, &vdev->resv_regions, list) {
896 if (entry->type == IOMMU_RESV_MSI)
897 msi = entry;
898
899 new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL);
900 if (!new_entry)
901 return;
902 list_add_tail(&new_entry->list, head);
903 }
904
905 /*
906 * If the device didn't register any bypass MSI window, add a
907 * software-mapped region.
908 */
909 if (!msi) {
910 msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
911 prot, IOMMU_RESV_SW_MSI);
912 if (!msi)
913 return;
914
915 list_add_tail(&msi->list, head);
916 }
edcd69ab 917
edcd69ab
JPB
918 iommu_dma_get_resv_regions(dev, head);
919}
920
edcd69ab
JPB
921static struct iommu_ops viommu_ops;
922static struct virtio_driver virtio_iommu_drv;
923
3a1d5384 924static int viommu_match_node(struct device *dev, const void *data)
edcd69ab
JPB
925{
926 return dev->parent->fwnode == data;
927}
928
929static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
930{
931 struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
932 fwnode, viommu_match_node);
933 put_device(dev);
934
935 return dev ? dev_to_virtio(dev)->priv : NULL;
936}
937
21acf659 938static struct iommu_device *viommu_probe_device(struct device *dev)
edcd69ab
JPB
939{
940 int ret;
edcd69ab
JPB
941 struct viommu_endpoint *vdev;
942 struct viommu_dev *viommu = NULL;
943 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
944
945 if (!fwspec || fwspec->ops != &viommu_ops)
21acf659 946 return ERR_PTR(-ENODEV);
edcd69ab
JPB
947
948 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
949 if (!viommu)
21acf659 950 return ERR_PTR(-ENODEV);
edcd69ab
JPB
951
952 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
953 if (!vdev)
21acf659 954 return ERR_PTR(-ENOMEM);
edcd69ab 955
2a5a3148 956 vdev->dev = dev;
edcd69ab 957 vdev->viommu = viommu;
2a5a3148 958 INIT_LIST_HEAD(&vdev->resv_regions);
a4b6c2af 959 dev_iommu_priv_set(dev, vdev);
edcd69ab 960
2a5a3148
JPB
961 if (viommu->probe_size) {
962 /* Get additional information for this endpoint */
963 ret = viommu_probe_endpoint(viommu, dev);
964 if (ret)
965 goto err_free_dev;
966 }
967
21acf659 968 return &viommu->iommu;
edcd69ab 969
edcd69ab 970err_free_dev:
ae3ff39a 971 iommu_put_resv_regions(dev, &vdev->resv_regions);
edcd69ab
JPB
972 kfree(vdev);
973
21acf659 974 return ERR_PTR(ret);
edcd69ab
JPB
975}
976
8ce4904b
JPB
977static void viommu_probe_finalize(struct device *dev)
978{
979#ifndef CONFIG_ARCH_HAS_SETUP_DMA_OPS
980 /* First clear the DMA ops in case we're switching from a DMA domain */
981 set_dma_ops(dev, NULL);
982 iommu_setup_dma_ops(dev, 0, U64_MAX);
983#endif
984}
985
21acf659 986static void viommu_release_device(struct device *dev)
edcd69ab 987{
4d26ba67 988 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
edcd69ab 989
ae3ff39a 990 iommu_put_resv_regions(dev, &vdev->resv_regions);
edcd69ab
JPB
991 kfree(vdev);
992}
993
994static struct iommu_group *viommu_device_group(struct device *dev)
995{
996 if (dev_is_pci(dev))
997 return pci_device_group(dev);
998 else
999 return generic_device_group(dev);
1000}
1001
1002static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
1003{
1004 return iommu_fwspec_add_ids(dev, args->args, 1);
1005}
1006
1007static struct iommu_ops viommu_ops = {
1008 .domain_alloc = viommu_domain_alloc,
21acf659 1009 .probe_device = viommu_probe_device,
8ce4904b 1010 .probe_finalize = viommu_probe_finalize,
21acf659 1011 .release_device = viommu_release_device,
edcd69ab
JPB
1012 .device_group = viommu_device_group,
1013 .get_resv_regions = viommu_get_resv_regions,
edcd69ab 1014 .of_xlate = viommu_of_xlate,
c0aec668 1015 .owner = THIS_MODULE,
9a630a4b
LB
1016 .default_domain_ops = &(const struct iommu_domain_ops) {
1017 .attach_dev = viommu_attach_dev,
7e62edd7
TZ
1018 .map_pages = viommu_map_pages,
1019 .unmap_pages = viommu_unmap_pages,
9a630a4b
LB
1020 .iova_to_phys = viommu_iova_to_phys,
1021 .iotlb_sync = viommu_iotlb_sync,
1022 .free = viommu_domain_free,
1023 }
edcd69ab
JPB
1024};
1025
1026static int viommu_init_vqs(struct viommu_dev *viommu)
1027{
1028 struct virtio_device *vdev = dev_to_virtio(viommu->dev);
169a126c
JPB
1029 const char *names[] = { "request", "event" };
1030 vq_callback_t *callbacks[] = {
1031 NULL, /* No async requests */
1032 viommu_event_handler,
1033 };
edcd69ab 1034
169a126c
JPB
1035 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks,
1036 names, NULL);
1037}
edcd69ab 1038
169a126c
JPB
1039static int viommu_fill_evtq(struct viommu_dev *viommu)
1040{
1041 int i, ret;
1042 struct scatterlist sg[1];
1043 struct viommu_event *evts;
1044 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
1045 size_t nr_evts = vq->num_free;
1046
1047 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts,
1048 sizeof(*evts), GFP_KERNEL);
1049 if (!evts)
1050 return -ENOMEM;
1051
1052 for (i = 0; i < nr_evts; i++) {
1053 sg_init_one(sg, &evts[i], sizeof(*evts));
1054 ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL);
1055 if (ret)
1056 return ret;
1057 }
edcd69ab
JPB
1058
1059 return 0;
1060}
1061
1062static int viommu_probe(struct virtio_device *vdev)
1063{
1064 struct device *parent_dev = vdev->dev.parent;
1065 struct viommu_dev *viommu = NULL;
1066 struct device *dev = &vdev->dev;
1067 u64 input_start = 0;
1068 u64 input_end = -1UL;
1069 int ret;
1070
1071 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
1072 !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
1073 return -ENODEV;
1074
1075 viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
1076 if (!viommu)
1077 return -ENOMEM;
1078
1079 spin_lock_init(&viommu->request_lock);
1080 ida_init(&viommu->domain_ids);
1081 viommu->dev = dev;
1082 viommu->vdev = vdev;
1083 INIT_LIST_HEAD(&viommu->requests);
1084
1085 ret = viommu_init_vqs(viommu);
1086 if (ret)
1087 return ret;
1088
d83c67c4
MT
1089 virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask,
1090 &viommu->pgsize_bitmap);
edcd69ab
JPB
1091
1092 if (!viommu->pgsize_bitmap) {
1093 ret = -EINVAL;
1094 goto err_free_vqs;
1095 }
1096
ae24fb49
JPB
1097 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
1098 viommu->last_domain = ~0U;
edcd69ab
JPB
1099
1100 /* Optional features */
d83c67c4
MT
1101 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
1102 struct virtio_iommu_config, input_range.start,
1103 &input_start);
edcd69ab 1104
d83c67c4
MT
1105 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
1106 struct virtio_iommu_config, input_range.end,
1107 &input_end);
edcd69ab 1108
d83c67c4
MT
1109 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1110 struct virtio_iommu_config, domain_range.start,
1111 &viommu->first_domain);
ae24fb49 1112
d83c67c4
MT
1113 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1114 struct virtio_iommu_config, domain_range.end,
1115 &viommu->last_domain);
edcd69ab 1116
d83c67c4
MT
1117 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE,
1118 struct virtio_iommu_config, probe_size,
1119 &viommu->probe_size);
2a5a3148 1120
edcd69ab
JPB
1121 viommu->geometry = (struct iommu_domain_geometry) {
1122 .aperture_start = input_start,
1123 .aperture_end = input_end,
1124 .force_aperture = true,
1125 };
1126
ae24fb49
JPB
1127 if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
1128 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
1129
edcd69ab
JPB
1130 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
1131
1132 virtio_device_ready(vdev);
1133
169a126c
JPB
1134 /* Populate the event queue with buffers */
1135 ret = viommu_fill_evtq(viommu);
1136 if (ret)
1137 goto err_free_vqs;
1138
edcd69ab
JPB
1139 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
1140 virtio_bus_name(vdev));
1141 if (ret)
1142 goto err_free_vqs;
1143
2d471b20 1144 iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
edcd69ab 1145
edcd69ab
JPB
1146 vdev->priv = viommu;
1147
1148 dev_info(dev, "input address: %u bits\n",
1149 order_base_2(viommu->geometry.aperture_end));
1150 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
1151
1152 return 0;
1153
edcd69ab
JPB
1154err_free_vqs:
1155 vdev->config->del_vqs(vdev);
1156
1157 return ret;
1158}
1159
1160static void viommu_remove(struct virtio_device *vdev)
1161{
1162 struct viommu_dev *viommu = vdev->priv;
1163
1164 iommu_device_sysfs_remove(&viommu->iommu);
1165 iommu_device_unregister(&viommu->iommu);
1166
1167 /* Stop all virtqueues */
d9679d00 1168 virtio_reset_device(vdev);
edcd69ab
JPB
1169 vdev->config->del_vqs(vdev);
1170
1171 dev_info(&vdev->dev, "device removed\n");
1172}
1173
1174static void viommu_config_changed(struct virtio_device *vdev)
1175{
1176 dev_warn(&vdev->dev, "config changed\n");
1177}
1178
1179static unsigned int features[] = {
1180 VIRTIO_IOMMU_F_MAP_UNMAP,
edcd69ab 1181 VIRTIO_IOMMU_F_INPUT_RANGE,
ae24fb49 1182 VIRTIO_IOMMU_F_DOMAIN_RANGE,
2a5a3148 1183 VIRTIO_IOMMU_F_PROBE,
ae24fb49 1184 VIRTIO_IOMMU_F_MMIO,
f0f07a84 1185 VIRTIO_IOMMU_F_BYPASS_CONFIG,
edcd69ab
JPB
1186};
1187
1188static struct virtio_device_id id_table[] = {
1189 { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
1190 { 0 },
1191};
382d91fc 1192MODULE_DEVICE_TABLE(virtio, id_table);
edcd69ab
JPB
1193
1194static struct virtio_driver virtio_iommu_drv = {
1195 .driver.name = KBUILD_MODNAME,
1196 .driver.owner = THIS_MODULE,
1197 .id_table = id_table,
1198 .feature_table = features,
1199 .feature_table_size = ARRAY_SIZE(features),
1200 .probe = viommu_probe,
1201 .remove = viommu_remove,
1202 .config_changed = viommu_config_changed,
1203};
1204
1205module_virtio_driver(virtio_iommu_drv);
1206
1207MODULE_DESCRIPTION("Virtio IOMMU driver");
1208MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
1209MODULE_LICENSE("GPL v2");