Commit | Line | Data |
---|---|---|
edcd69ab JPB |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Virtio driver for the paravirtualized IOMMU | |
4 | * | |
ae24fb49 | 5 | * Copyright (C) 2019 Arm Limited |
edcd69ab JPB |
6 | */ |
7 | ||
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
9 | ||
10 | #include <linux/amba/bus.h> | |
11 | #include <linux/delay.h> | |
12 | #include <linux/dma-iommu.h> | |
13 | #include <linux/freezer.h> | |
14 | #include <linux/interval_tree.h> | |
15 | #include <linux/iommu.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/of_iommu.h> | |
18 | #include <linux/of_platform.h> | |
19 | #include <linux/pci.h> | |
20 | #include <linux/platform_device.h> | |
21 | #include <linux/virtio.h> | |
22 | #include <linux/virtio_config.h> | |
23 | #include <linux/virtio_ids.h> | |
24 | #include <linux/wait.h> | |
25 | ||
26 | #include <uapi/linux/virtio_iommu.h> | |
27 | ||
28 | #define MSI_IOVA_BASE 0x8000000 | |
29 | #define MSI_IOVA_LENGTH 0x100000 | |
30 | ||
31 | #define VIOMMU_REQUEST_VQ 0 | |
169a126c JPB |
32 | #define VIOMMU_EVENT_VQ 1 |
33 | #define VIOMMU_NR_VQS 2 | |
edcd69ab JPB |
34 | |
35 | struct viommu_dev { | |
36 | struct iommu_device iommu; | |
37 | struct device *dev; | |
38 | struct virtio_device *vdev; | |
39 | ||
40 | struct ida domain_ids; | |
41 | ||
42 | struct virtqueue *vqs[VIOMMU_NR_VQS]; | |
43 | spinlock_t request_lock; | |
44 | struct list_head requests; | |
169a126c | 45 | void *evts; |
edcd69ab JPB |
46 | |
47 | /* Device configuration */ | |
48 | struct iommu_domain_geometry geometry; | |
49 | u64 pgsize_bitmap; | |
ae24fb49 JPB |
50 | u32 first_domain; |
51 | u32 last_domain; | |
52 | /* Supported MAP flags */ | |
53 | u32 map_flags; | |
2a5a3148 | 54 | u32 probe_size; |
edcd69ab JPB |
55 | }; |
56 | ||
57 | struct viommu_mapping { | |
58 | phys_addr_t paddr; | |
59 | struct interval_tree_node iova; | |
60 | u32 flags; | |
61 | }; | |
62 | ||
63 | struct viommu_domain { | |
64 | struct iommu_domain domain; | |
65 | struct viommu_dev *viommu; | |
66 | struct mutex mutex; /* protects viommu pointer */ | |
67 | unsigned int id; | |
ae24fb49 | 68 | u32 map_flags; |
edcd69ab JPB |
69 | |
70 | spinlock_t mappings_lock; | |
71 | struct rb_root_cached mappings; | |
72 | ||
73 | unsigned long nr_endpoints; | |
74 | }; | |
75 | ||
76 | struct viommu_endpoint { | |
2a5a3148 | 77 | struct device *dev; |
edcd69ab JPB |
78 | struct viommu_dev *viommu; |
79 | struct viommu_domain *vdomain; | |
2a5a3148 | 80 | struct list_head resv_regions; |
edcd69ab JPB |
81 | }; |
82 | ||
83 | struct viommu_request { | |
84 | struct list_head list; | |
85 | void *writeback; | |
86 | unsigned int write_offset; | |
87 | unsigned int len; | |
88 | char buf[]; | |
89 | }; | |
90 | ||
169a126c JPB |
91 | #define VIOMMU_FAULT_RESV_MASK 0xffffff00 |
92 | ||
93 | struct viommu_event { | |
94 | union { | |
95 | u32 head; | |
96 | struct virtio_iommu_fault fault; | |
97 | }; | |
98 | }; | |
99 | ||
edcd69ab JPB |
100 | #define to_viommu_domain(domain) \ |
101 | container_of(domain, struct viommu_domain, domain) | |
102 | ||
103 | static int viommu_get_req_errno(void *buf, size_t len) | |
104 | { | |
105 | struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); | |
106 | ||
107 | switch (tail->status) { | |
108 | case VIRTIO_IOMMU_S_OK: | |
109 | return 0; | |
110 | case VIRTIO_IOMMU_S_UNSUPP: | |
111 | return -ENOSYS; | |
112 | case VIRTIO_IOMMU_S_INVAL: | |
113 | return -EINVAL; | |
114 | case VIRTIO_IOMMU_S_RANGE: | |
115 | return -ERANGE; | |
116 | case VIRTIO_IOMMU_S_NOENT: | |
117 | return -ENOENT; | |
118 | case VIRTIO_IOMMU_S_FAULT: | |
119 | return -EFAULT; | |
ae24fb49 JPB |
120 | case VIRTIO_IOMMU_S_NOMEM: |
121 | return -ENOMEM; | |
edcd69ab JPB |
122 | case VIRTIO_IOMMU_S_IOERR: |
123 | case VIRTIO_IOMMU_S_DEVERR: | |
124 | default: | |
125 | return -EIO; | |
126 | } | |
127 | } | |
128 | ||
129 | static void viommu_set_req_status(void *buf, size_t len, int status) | |
130 | { | |
131 | struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); | |
132 | ||
133 | tail->status = status; | |
134 | } | |
135 | ||
136 | static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, | |
137 | struct virtio_iommu_req_head *req, | |
138 | size_t len) | |
139 | { | |
140 | size_t tail_size = sizeof(struct virtio_iommu_req_tail); | |
141 | ||
2a5a3148 JPB |
142 | if (req->type == VIRTIO_IOMMU_T_PROBE) |
143 | return len - viommu->probe_size - tail_size; | |
144 | ||
edcd69ab JPB |
145 | return len - tail_size; |
146 | } | |
147 | ||
148 | /* | |
149 | * __viommu_sync_req - Complete all in-flight requests | |
150 | * | |
151 | * Wait for all added requests to complete. When this function returns, all | |
152 | * requests that were in-flight at the time of the call have completed. | |
153 | */ | |
154 | static int __viommu_sync_req(struct viommu_dev *viommu) | |
155 | { | |
156 | int ret = 0; | |
157 | unsigned int len; | |
158 | size_t write_len; | |
159 | struct viommu_request *req; | |
160 | struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; | |
161 | ||
162 | assert_spin_locked(&viommu->request_lock); | |
163 | ||
164 | virtqueue_kick(vq); | |
165 | ||
166 | while (!list_empty(&viommu->requests)) { | |
167 | len = 0; | |
168 | req = virtqueue_get_buf(vq, &len); | |
169 | if (!req) | |
170 | continue; | |
171 | ||
172 | if (!len) | |
173 | viommu_set_req_status(req->buf, req->len, | |
174 | VIRTIO_IOMMU_S_IOERR); | |
175 | ||
176 | write_len = req->len - req->write_offset; | |
177 | if (req->writeback && len == write_len) | |
178 | memcpy(req->writeback, req->buf + req->write_offset, | |
179 | write_len); | |
180 | ||
181 | list_del(&req->list); | |
182 | kfree(req); | |
183 | } | |
184 | ||
185 | return ret; | |
186 | } | |
187 | ||
188 | static int viommu_sync_req(struct viommu_dev *viommu) | |
189 | { | |
190 | int ret; | |
191 | unsigned long flags; | |
192 | ||
193 | spin_lock_irqsave(&viommu->request_lock, flags); | |
194 | ret = __viommu_sync_req(viommu); | |
195 | if (ret) | |
196 | dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); | |
197 | spin_unlock_irqrestore(&viommu->request_lock, flags); | |
198 | ||
199 | return ret; | |
200 | } | |
201 | ||
202 | /* | |
203 | * __viommu_add_request - Add one request to the queue | |
204 | * @buf: pointer to the request buffer | |
205 | * @len: length of the request buffer | |
206 | * @writeback: copy data back to the buffer when the request completes. | |
207 | * | |
208 | * Add a request to the queue. Only synchronize the queue if it's already full. | |
209 | * Otherwise don't kick the queue nor wait for requests to complete. | |
210 | * | |
211 | * When @writeback is true, data written by the device, including the request | |
212 | * status, is copied into @buf after the request completes. This is unsafe if | |
213 | * the caller allocates @buf on stack and drops the lock between add_req() and | |
214 | * sync_req(). | |
215 | * | |
216 | * Return 0 if the request was successfully added to the queue. | |
217 | */ | |
218 | static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len, | |
219 | bool writeback) | |
220 | { | |
221 | int ret; | |
222 | off_t write_offset; | |
223 | struct viommu_request *req; | |
224 | struct scatterlist top_sg, bottom_sg; | |
225 | struct scatterlist *sg[2] = { &top_sg, &bottom_sg }; | |
226 | struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; | |
227 | ||
228 | assert_spin_locked(&viommu->request_lock); | |
229 | ||
230 | write_offset = viommu_get_write_desc_offset(viommu, buf, len); | |
231 | if (write_offset <= 0) | |
232 | return -EINVAL; | |
233 | ||
234 | req = kzalloc(sizeof(*req) + len, GFP_ATOMIC); | |
235 | if (!req) | |
236 | return -ENOMEM; | |
237 | ||
238 | req->len = len; | |
239 | if (writeback) { | |
240 | req->writeback = buf + write_offset; | |
241 | req->write_offset = write_offset; | |
242 | } | |
243 | memcpy(&req->buf, buf, write_offset); | |
244 | ||
245 | sg_init_one(&top_sg, req->buf, write_offset); | |
246 | sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset); | |
247 | ||
248 | ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC); | |
249 | if (ret == -ENOSPC) { | |
250 | /* If the queue is full, sync and retry */ | |
251 | if (!__viommu_sync_req(viommu)) | |
252 | ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC); | |
253 | } | |
254 | if (ret) | |
255 | goto err_free; | |
256 | ||
257 | list_add_tail(&req->list, &viommu->requests); | |
258 | return 0; | |
259 | ||
260 | err_free: | |
261 | kfree(req); | |
262 | return ret; | |
263 | } | |
264 | ||
265 | static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len) | |
266 | { | |
267 | int ret; | |
268 | unsigned long flags; | |
269 | ||
270 | spin_lock_irqsave(&viommu->request_lock, flags); | |
271 | ret = __viommu_add_req(viommu, buf, len, false); | |
272 | if (ret) | |
273 | dev_dbg(viommu->dev, "could not add request: %d\n", ret); | |
274 | spin_unlock_irqrestore(&viommu->request_lock, flags); | |
275 | ||
276 | return ret; | |
277 | } | |
278 | ||
279 | /* | |
280 | * Send a request and wait for it to complete. Return the request status (as an | |
281 | * errno) | |
282 | */ | |
283 | static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf, | |
284 | size_t len) | |
285 | { | |
286 | int ret; | |
287 | unsigned long flags; | |
288 | ||
289 | spin_lock_irqsave(&viommu->request_lock, flags); | |
290 | ||
291 | ret = __viommu_add_req(viommu, buf, len, true); | |
292 | if (ret) { | |
293 | dev_dbg(viommu->dev, "could not add request (%d)\n", ret); | |
294 | goto out_unlock; | |
295 | } | |
296 | ||
297 | ret = __viommu_sync_req(viommu); | |
298 | if (ret) { | |
299 | dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); | |
300 | /* Fall-through (get the actual request status) */ | |
301 | } | |
302 | ||
303 | ret = viommu_get_req_errno(buf, len); | |
304 | out_unlock: | |
305 | spin_unlock_irqrestore(&viommu->request_lock, flags); | |
306 | return ret; | |
307 | } | |
308 | ||
309 | /* | |
310 | * viommu_add_mapping - add a mapping to the internal tree | |
311 | * | |
312 | * On success, return the new mapping. Otherwise return NULL. | |
313 | */ | |
314 | static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova, | |
315 | phys_addr_t paddr, size_t size, u32 flags) | |
316 | { | |
317 | unsigned long irqflags; | |
318 | struct viommu_mapping *mapping; | |
319 | ||
320 | mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC); | |
321 | if (!mapping) | |
322 | return -ENOMEM; | |
323 | ||
324 | mapping->paddr = paddr; | |
325 | mapping->iova.start = iova; | |
326 | mapping->iova.last = iova + size - 1; | |
327 | mapping->flags = flags; | |
328 | ||
329 | spin_lock_irqsave(&vdomain->mappings_lock, irqflags); | |
330 | interval_tree_insert(&mapping->iova, &vdomain->mappings); | |
331 | spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags); | |
332 | ||
333 | return 0; | |
334 | } | |
335 | ||
336 | /* | |
337 | * viommu_del_mappings - remove mappings from the internal tree | |
338 | * | |
339 | * @vdomain: the domain | |
340 | * @iova: start of the range | |
341 | * @size: size of the range. A size of 0 corresponds to the entire address | |
342 | * space. | |
343 | * | |
344 | * On success, returns the number of unmapped bytes (>= size) | |
345 | */ | |
346 | static size_t viommu_del_mappings(struct viommu_domain *vdomain, | |
347 | unsigned long iova, size_t size) | |
348 | { | |
349 | size_t unmapped = 0; | |
350 | unsigned long flags; | |
351 | unsigned long last = iova + size - 1; | |
352 | struct viommu_mapping *mapping = NULL; | |
353 | struct interval_tree_node *node, *next; | |
354 | ||
355 | spin_lock_irqsave(&vdomain->mappings_lock, flags); | |
356 | next = interval_tree_iter_first(&vdomain->mappings, iova, last); | |
357 | while (next) { | |
358 | node = next; | |
359 | mapping = container_of(node, struct viommu_mapping, iova); | |
360 | next = interval_tree_iter_next(node, iova, last); | |
361 | ||
362 | /* Trying to split a mapping? */ | |
363 | if (mapping->iova.start < iova) | |
364 | break; | |
365 | ||
366 | /* | |
367 | * Virtio-iommu doesn't allow UNMAP to split a mapping created | |
368 | * with a single MAP request, so remove the full mapping. | |
369 | */ | |
370 | unmapped += mapping->iova.last - mapping->iova.start + 1; | |
371 | ||
372 | interval_tree_remove(node, &vdomain->mappings); | |
373 | kfree(mapping); | |
374 | } | |
375 | spin_unlock_irqrestore(&vdomain->mappings_lock, flags); | |
376 | ||
377 | return unmapped; | |
378 | } | |
379 | ||
380 | /* | |
381 | * viommu_replay_mappings - re-send MAP requests | |
382 | * | |
383 | * When reattaching a domain that was previously detached from all endpoints, | |
384 | * mappings were deleted from the device. Re-create the mappings available in | |
385 | * the internal tree. | |
386 | */ | |
387 | static int viommu_replay_mappings(struct viommu_domain *vdomain) | |
388 | { | |
389 | int ret = 0; | |
390 | unsigned long flags; | |
391 | struct viommu_mapping *mapping; | |
392 | struct interval_tree_node *node; | |
393 | struct virtio_iommu_req_map map; | |
394 | ||
395 | spin_lock_irqsave(&vdomain->mappings_lock, flags); | |
396 | node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); | |
397 | while (node) { | |
398 | mapping = container_of(node, struct viommu_mapping, iova); | |
399 | map = (struct virtio_iommu_req_map) { | |
400 | .head.type = VIRTIO_IOMMU_T_MAP, | |
401 | .domain = cpu_to_le32(vdomain->id), | |
402 | .virt_start = cpu_to_le64(mapping->iova.start), | |
403 | .virt_end = cpu_to_le64(mapping->iova.last), | |
404 | .phys_start = cpu_to_le64(mapping->paddr), | |
405 | .flags = cpu_to_le32(mapping->flags), | |
406 | }; | |
407 | ||
408 | ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); | |
409 | if (ret) | |
410 | break; | |
411 | ||
412 | node = interval_tree_iter_next(node, 0, -1UL); | |
413 | } | |
414 | spin_unlock_irqrestore(&vdomain->mappings_lock, flags); | |
415 | ||
416 | return ret; | |
417 | } | |
418 | ||
2a5a3148 JPB |
419 | static int viommu_add_resv_mem(struct viommu_endpoint *vdev, |
420 | struct virtio_iommu_probe_resv_mem *mem, | |
421 | size_t len) | |
422 | { | |
423 | size_t size; | |
424 | u64 start64, end64; | |
425 | phys_addr_t start, end; | |
426 | struct iommu_resv_region *region = NULL; | |
427 | unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | |
428 | ||
429 | start = start64 = le64_to_cpu(mem->start); | |
430 | end = end64 = le64_to_cpu(mem->end); | |
431 | size = end64 - start64 + 1; | |
432 | ||
433 | /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */ | |
434 | if (start != start64 || end != end64 || size < end64 - start64) | |
435 | return -EOVERFLOW; | |
436 | ||
437 | if (len < sizeof(*mem)) | |
438 | return -EINVAL; | |
439 | ||
440 | switch (mem->subtype) { | |
441 | default: | |
442 | dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n", | |
443 | mem->subtype); | |
444 | /* Fall-through */ | |
445 | case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: | |
446 | region = iommu_alloc_resv_region(start, size, 0, | |
447 | IOMMU_RESV_RESERVED); | |
448 | break; | |
449 | case VIRTIO_IOMMU_RESV_MEM_T_MSI: | |
450 | region = iommu_alloc_resv_region(start, size, prot, | |
451 | IOMMU_RESV_MSI); | |
452 | break; | |
453 | } | |
454 | if (!region) | |
455 | return -ENOMEM; | |
456 | ||
457 | list_add(&vdev->resv_regions, ®ion->list); | |
458 | return 0; | |
459 | } | |
460 | ||
461 | static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) | |
462 | { | |
463 | int ret; | |
464 | u16 type, len; | |
465 | size_t cur = 0; | |
466 | size_t probe_len; | |
467 | struct virtio_iommu_req_probe *probe; | |
468 | struct virtio_iommu_probe_property *prop; | |
469 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); | |
470 | struct viommu_endpoint *vdev = fwspec->iommu_priv; | |
471 | ||
472 | if (!fwspec->num_ids) | |
473 | return -EINVAL; | |
474 | ||
475 | probe_len = sizeof(*probe) + viommu->probe_size + | |
476 | sizeof(struct virtio_iommu_req_tail); | |
477 | probe = kzalloc(probe_len, GFP_KERNEL); | |
478 | if (!probe) | |
479 | return -ENOMEM; | |
480 | ||
481 | probe->head.type = VIRTIO_IOMMU_T_PROBE; | |
482 | /* | |
483 | * For now, assume that properties of an endpoint that outputs multiple | |
484 | * IDs are consistent. Only probe the first one. | |
485 | */ | |
486 | probe->endpoint = cpu_to_le32(fwspec->ids[0]); | |
487 | ||
488 | ret = viommu_send_req_sync(viommu, probe, probe_len); | |
489 | if (ret) | |
490 | goto out_free; | |
491 | ||
492 | prop = (void *)probe->properties; | |
493 | type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; | |
494 | ||
495 | while (type != VIRTIO_IOMMU_PROBE_T_NONE && | |
496 | cur < viommu->probe_size) { | |
497 | len = le16_to_cpu(prop->length) + sizeof(*prop); | |
498 | ||
499 | switch (type) { | |
500 | case VIRTIO_IOMMU_PROBE_T_RESV_MEM: | |
501 | ret = viommu_add_resv_mem(vdev, (void *)prop, len); | |
502 | break; | |
503 | default: | |
504 | dev_err(dev, "unknown viommu prop 0x%x\n", type); | |
505 | } | |
506 | ||
507 | if (ret) | |
508 | dev_err(dev, "failed to parse viommu prop 0x%x\n", type); | |
509 | ||
510 | cur += len; | |
511 | if (cur >= viommu->probe_size) | |
512 | break; | |
513 | ||
514 | prop = (void *)probe->properties + cur; | |
515 | type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; | |
516 | } | |
517 | ||
518 | out_free: | |
519 | kfree(probe); | |
520 | return ret; | |
521 | } | |
522 | ||
169a126c JPB |
523 | static int viommu_fault_handler(struct viommu_dev *viommu, |
524 | struct virtio_iommu_fault *fault) | |
525 | { | |
526 | char *reason_str; | |
527 | ||
528 | u8 reason = fault->reason; | |
529 | u32 flags = le32_to_cpu(fault->flags); | |
530 | u32 endpoint = le32_to_cpu(fault->endpoint); | |
531 | u64 address = le64_to_cpu(fault->address); | |
532 | ||
533 | switch (reason) { | |
534 | case VIRTIO_IOMMU_FAULT_R_DOMAIN: | |
535 | reason_str = "domain"; | |
536 | break; | |
537 | case VIRTIO_IOMMU_FAULT_R_MAPPING: | |
538 | reason_str = "page"; | |
539 | break; | |
540 | case VIRTIO_IOMMU_FAULT_R_UNKNOWN: | |
541 | default: | |
542 | reason_str = "unknown"; | |
543 | break; | |
544 | } | |
545 | ||
546 | /* TODO: find EP by ID and report_iommu_fault */ | |
547 | if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS) | |
548 | dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n", | |
549 | reason_str, endpoint, address, | |
550 | flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "", | |
551 | flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "", | |
552 | flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : ""); | |
553 | else | |
554 | dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n", | |
555 | reason_str, endpoint); | |
556 | return 0; | |
557 | } | |
558 | ||
559 | static void viommu_event_handler(struct virtqueue *vq) | |
560 | { | |
561 | int ret; | |
562 | unsigned int len; | |
563 | struct scatterlist sg[1]; | |
564 | struct viommu_event *evt; | |
565 | struct viommu_dev *viommu = vq->vdev->priv; | |
566 | ||
567 | while ((evt = virtqueue_get_buf(vq, &len)) != NULL) { | |
568 | if (len > sizeof(*evt)) { | |
569 | dev_err(viommu->dev, | |
570 | "invalid event buffer (len %u != %zu)\n", | |
571 | len, sizeof(*evt)); | |
572 | } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) { | |
573 | viommu_fault_handler(viommu, &evt->fault); | |
574 | } | |
575 | ||
576 | sg_init_one(sg, evt, sizeof(*evt)); | |
577 | ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC); | |
578 | if (ret) | |
579 | dev_err(viommu->dev, "could not add event buffer\n"); | |
580 | } | |
581 | ||
582 | virtqueue_kick(vq); | |
583 | } | |
584 | ||
edcd69ab JPB |
585 | /* IOMMU API */ |
586 | ||
587 | static struct iommu_domain *viommu_domain_alloc(unsigned type) | |
588 | { | |
589 | struct viommu_domain *vdomain; | |
590 | ||
591 | if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) | |
592 | return NULL; | |
593 | ||
594 | vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL); | |
595 | if (!vdomain) | |
596 | return NULL; | |
597 | ||
598 | mutex_init(&vdomain->mutex); | |
599 | spin_lock_init(&vdomain->mappings_lock); | |
600 | vdomain->mappings = RB_ROOT_CACHED; | |
601 | ||
602 | if (type == IOMMU_DOMAIN_DMA && | |
603 | iommu_get_dma_cookie(&vdomain->domain)) { | |
604 | kfree(vdomain); | |
605 | return NULL; | |
606 | } | |
607 | ||
608 | return &vdomain->domain; | |
609 | } | |
610 | ||
611 | static int viommu_domain_finalise(struct viommu_dev *viommu, | |
612 | struct iommu_domain *domain) | |
613 | { | |
614 | int ret; | |
615 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
edcd69ab JPB |
616 | |
617 | vdomain->viommu = viommu; | |
ae24fb49 | 618 | vdomain->map_flags = viommu->map_flags; |
edcd69ab JPB |
619 | |
620 | domain->pgsize_bitmap = viommu->pgsize_bitmap; | |
621 | domain->geometry = viommu->geometry; | |
622 | ||
ae24fb49 JPB |
623 | ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, |
624 | viommu->last_domain, GFP_KERNEL); | |
edcd69ab JPB |
625 | if (ret >= 0) |
626 | vdomain->id = (unsigned int)ret; | |
627 | ||
628 | return ret > 0 ? 0 : ret; | |
629 | } | |
630 | ||
631 | static void viommu_domain_free(struct iommu_domain *domain) | |
632 | { | |
633 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
634 | ||
635 | iommu_put_dma_cookie(domain); | |
636 | ||
637 | /* Free all remaining mappings (size 2^64) */ | |
638 | viommu_del_mappings(vdomain, 0, 0); | |
639 | ||
640 | if (vdomain->viommu) | |
641 | ida_free(&vdomain->viommu->domain_ids, vdomain->id); | |
642 | ||
643 | kfree(vdomain); | |
644 | } | |
645 | ||
646 | static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
647 | { | |
648 | int i; | |
649 | int ret = 0; | |
650 | struct virtio_iommu_req_attach req; | |
651 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); | |
652 | struct viommu_endpoint *vdev = fwspec->iommu_priv; | |
653 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
654 | ||
655 | mutex_lock(&vdomain->mutex); | |
656 | if (!vdomain->viommu) { | |
657 | /* | |
658 | * Properly initialize the domain now that we know which viommu | |
659 | * owns it. | |
660 | */ | |
661 | ret = viommu_domain_finalise(vdev->viommu, domain); | |
662 | } else if (vdomain->viommu != vdev->viommu) { | |
663 | dev_err(dev, "cannot attach to foreign vIOMMU\n"); | |
664 | ret = -EXDEV; | |
665 | } | |
666 | mutex_unlock(&vdomain->mutex); | |
667 | ||
668 | if (ret) | |
669 | return ret; | |
670 | ||
671 | /* | |
672 | * In the virtio-iommu device, when attaching the endpoint to a new | |
673 | * domain, it is detached from the old one and, if as as a result the | |
674 | * old domain isn't attached to any endpoint, all mappings are removed | |
675 | * from the old domain and it is freed. | |
676 | * | |
677 | * In the driver the old domain still exists, and its mappings will be | |
678 | * recreated if it gets reattached to an endpoint. Otherwise it will be | |
679 | * freed explicitly. | |
680 | * | |
681 | * vdev->vdomain is protected by group->mutex | |
682 | */ | |
683 | if (vdev->vdomain) | |
684 | vdev->vdomain->nr_endpoints--; | |
685 | ||
686 | req = (struct virtio_iommu_req_attach) { | |
687 | .head.type = VIRTIO_IOMMU_T_ATTACH, | |
688 | .domain = cpu_to_le32(vdomain->id), | |
689 | }; | |
690 | ||
691 | for (i = 0; i < fwspec->num_ids; i++) { | |
692 | req.endpoint = cpu_to_le32(fwspec->ids[i]); | |
693 | ||
694 | ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); | |
695 | if (ret) | |
696 | return ret; | |
697 | } | |
698 | ||
699 | if (!vdomain->nr_endpoints) { | |
700 | /* | |
701 | * This endpoint is the first to be attached to the domain. | |
702 | * Replay existing mappings (e.g. SW MSI). | |
703 | */ | |
704 | ret = viommu_replay_mappings(vdomain); | |
705 | if (ret) | |
706 | return ret; | |
707 | } | |
708 | ||
709 | vdomain->nr_endpoints++; | |
710 | vdev->vdomain = vdomain; | |
711 | ||
712 | return 0; | |
713 | } | |
714 | ||
715 | static int viommu_map(struct iommu_domain *domain, unsigned long iova, | |
781ca2de | 716 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
edcd69ab JPB |
717 | { |
718 | int ret; | |
ae24fb49 | 719 | u32 flags; |
edcd69ab JPB |
720 | struct virtio_iommu_req_map map; |
721 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
722 | ||
723 | flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) | | |
724 | (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | | |
725 | (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); | |
726 | ||
ae24fb49 JPB |
727 | if (flags & ~vdomain->map_flags) |
728 | return -EINVAL; | |
729 | ||
edcd69ab JPB |
730 | ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); |
731 | if (ret) | |
732 | return ret; | |
733 | ||
734 | map = (struct virtio_iommu_req_map) { | |
735 | .head.type = VIRTIO_IOMMU_T_MAP, | |
736 | .domain = cpu_to_le32(vdomain->id), | |
737 | .virt_start = cpu_to_le64(iova), | |
738 | .phys_start = cpu_to_le64(paddr), | |
739 | .virt_end = cpu_to_le64(iova + size - 1), | |
740 | .flags = cpu_to_le32(flags), | |
741 | }; | |
742 | ||
743 | if (!vdomain->nr_endpoints) | |
744 | return 0; | |
745 | ||
746 | ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); | |
747 | if (ret) | |
748 | viommu_del_mappings(vdomain, iova, size); | |
749 | ||
750 | return ret; | |
751 | } | |
752 | ||
753 | static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova, | |
56f8af5e | 754 | size_t size, struct iommu_iotlb_gather *gather) |
edcd69ab JPB |
755 | { |
756 | int ret = 0; | |
757 | size_t unmapped; | |
758 | struct virtio_iommu_req_unmap unmap; | |
759 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
760 | ||
761 | unmapped = viommu_del_mappings(vdomain, iova, size); | |
762 | if (unmapped < size) | |
763 | return 0; | |
764 | ||
765 | /* Device already removed all mappings after detach. */ | |
766 | if (!vdomain->nr_endpoints) | |
767 | return unmapped; | |
768 | ||
769 | unmap = (struct virtio_iommu_req_unmap) { | |
770 | .head.type = VIRTIO_IOMMU_T_UNMAP, | |
771 | .domain = cpu_to_le32(vdomain->id), | |
772 | .virt_start = cpu_to_le64(iova), | |
773 | .virt_end = cpu_to_le64(iova + unmapped - 1), | |
774 | }; | |
775 | ||
776 | ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap)); | |
777 | return ret ? 0 : unmapped; | |
778 | } | |
779 | ||
780 | static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain, | |
781 | dma_addr_t iova) | |
782 | { | |
783 | u64 paddr = 0; | |
784 | unsigned long flags; | |
785 | struct viommu_mapping *mapping; | |
786 | struct interval_tree_node *node; | |
787 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
788 | ||
789 | spin_lock_irqsave(&vdomain->mappings_lock, flags); | |
790 | node = interval_tree_iter_first(&vdomain->mappings, iova, iova); | |
791 | if (node) { | |
792 | mapping = container_of(node, struct viommu_mapping, iova); | |
793 | paddr = mapping->paddr + (iova - mapping->iova.start); | |
794 | } | |
795 | spin_unlock_irqrestore(&vdomain->mappings_lock, flags); | |
796 | ||
797 | return paddr; | |
798 | } | |
799 | ||
56f8af5e WD |
800 | static void viommu_iotlb_sync(struct iommu_domain *domain, |
801 | struct iommu_iotlb_gather *gather) | |
edcd69ab JPB |
802 | { |
803 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
804 | ||
805 | viommu_sync_req(vdomain->viommu); | |
806 | } | |
807 | ||
808 | static void viommu_get_resv_regions(struct device *dev, struct list_head *head) | |
809 | { | |
2a5a3148 JPB |
810 | struct iommu_resv_region *entry, *new_entry, *msi = NULL; |
811 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); | |
812 | struct viommu_endpoint *vdev = fwspec->iommu_priv; | |
edcd69ab JPB |
813 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
814 | ||
2a5a3148 JPB |
815 | list_for_each_entry(entry, &vdev->resv_regions, list) { |
816 | if (entry->type == IOMMU_RESV_MSI) | |
817 | msi = entry; | |
818 | ||
819 | new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL); | |
820 | if (!new_entry) | |
821 | return; | |
822 | list_add_tail(&new_entry->list, head); | |
823 | } | |
824 | ||
825 | /* | |
826 | * If the device didn't register any bypass MSI window, add a | |
827 | * software-mapped region. | |
828 | */ | |
829 | if (!msi) { | |
830 | msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, | |
831 | prot, IOMMU_RESV_SW_MSI); | |
832 | if (!msi) | |
833 | return; | |
834 | ||
835 | list_add_tail(&msi->list, head); | |
836 | } | |
edcd69ab | 837 | |
edcd69ab JPB |
838 | iommu_dma_get_resv_regions(dev, head); |
839 | } | |
840 | ||
841 | static void viommu_put_resv_regions(struct device *dev, struct list_head *head) | |
842 | { | |
843 | struct iommu_resv_region *entry, *next; | |
844 | ||
845 | list_for_each_entry_safe(entry, next, head, list) | |
846 | kfree(entry); | |
847 | } | |
848 | ||
849 | static struct iommu_ops viommu_ops; | |
850 | static struct virtio_driver virtio_iommu_drv; | |
851 | ||
3a1d5384 | 852 | static int viommu_match_node(struct device *dev, const void *data) |
edcd69ab JPB |
853 | { |
854 | return dev->parent->fwnode == data; | |
855 | } | |
856 | ||
857 | static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode) | |
858 | { | |
859 | struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL, | |
860 | fwnode, viommu_match_node); | |
861 | put_device(dev); | |
862 | ||
863 | return dev ? dev_to_virtio(dev)->priv : NULL; | |
864 | } | |
865 | ||
866 | static int viommu_add_device(struct device *dev) | |
867 | { | |
868 | int ret; | |
869 | struct iommu_group *group; | |
870 | struct viommu_endpoint *vdev; | |
871 | struct viommu_dev *viommu = NULL; | |
872 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); | |
873 | ||
874 | if (!fwspec || fwspec->ops != &viommu_ops) | |
875 | return -ENODEV; | |
876 | ||
877 | viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode); | |
878 | if (!viommu) | |
879 | return -ENODEV; | |
880 | ||
881 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); | |
882 | if (!vdev) | |
883 | return -ENOMEM; | |
884 | ||
2a5a3148 | 885 | vdev->dev = dev; |
edcd69ab | 886 | vdev->viommu = viommu; |
2a5a3148 | 887 | INIT_LIST_HEAD(&vdev->resv_regions); |
edcd69ab JPB |
888 | fwspec->iommu_priv = vdev; |
889 | ||
2a5a3148 JPB |
890 | if (viommu->probe_size) { |
891 | /* Get additional information for this endpoint */ | |
892 | ret = viommu_probe_endpoint(viommu, dev); | |
893 | if (ret) | |
894 | goto err_free_dev; | |
895 | } | |
896 | ||
edcd69ab JPB |
897 | ret = iommu_device_link(&viommu->iommu, dev); |
898 | if (ret) | |
899 | goto err_free_dev; | |
900 | ||
901 | /* | |
902 | * Last step creates a default domain and attaches to it. Everything | |
903 | * must be ready. | |
904 | */ | |
905 | group = iommu_group_get_for_dev(dev); | |
906 | if (IS_ERR(group)) { | |
907 | ret = PTR_ERR(group); | |
908 | goto err_unlink_dev; | |
909 | } | |
910 | ||
911 | iommu_group_put(group); | |
912 | ||
913 | return PTR_ERR_OR_ZERO(group); | |
914 | ||
915 | err_unlink_dev: | |
916 | iommu_device_unlink(&viommu->iommu, dev); | |
917 | err_free_dev: | |
2a5a3148 | 918 | viommu_put_resv_regions(dev, &vdev->resv_regions); |
edcd69ab JPB |
919 | kfree(vdev); |
920 | ||
921 | return ret; | |
922 | } | |
923 | ||
924 | static void viommu_remove_device(struct device *dev) | |
925 | { | |
926 | struct viommu_endpoint *vdev; | |
927 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); | |
928 | ||
929 | if (!fwspec || fwspec->ops != &viommu_ops) | |
930 | return; | |
931 | ||
932 | vdev = fwspec->iommu_priv; | |
933 | ||
934 | iommu_group_remove_device(dev); | |
935 | iommu_device_unlink(&vdev->viommu->iommu, dev); | |
2a5a3148 | 936 | viommu_put_resv_regions(dev, &vdev->resv_regions); |
edcd69ab JPB |
937 | kfree(vdev); |
938 | } | |
939 | ||
940 | static struct iommu_group *viommu_device_group(struct device *dev) | |
941 | { | |
942 | if (dev_is_pci(dev)) | |
943 | return pci_device_group(dev); | |
944 | else | |
945 | return generic_device_group(dev); | |
946 | } | |
947 | ||
948 | static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args) | |
949 | { | |
950 | return iommu_fwspec_add_ids(dev, args->args, 1); | |
951 | } | |
952 | ||
953 | static struct iommu_ops viommu_ops = { | |
954 | .domain_alloc = viommu_domain_alloc, | |
955 | .domain_free = viommu_domain_free, | |
956 | .attach_dev = viommu_attach_dev, | |
957 | .map = viommu_map, | |
958 | .unmap = viommu_unmap, | |
959 | .iova_to_phys = viommu_iova_to_phys, | |
960 | .iotlb_sync = viommu_iotlb_sync, | |
961 | .add_device = viommu_add_device, | |
962 | .remove_device = viommu_remove_device, | |
963 | .device_group = viommu_device_group, | |
964 | .get_resv_regions = viommu_get_resv_regions, | |
965 | .put_resv_regions = viommu_put_resv_regions, | |
966 | .of_xlate = viommu_of_xlate, | |
967 | }; | |
968 | ||
969 | static int viommu_init_vqs(struct viommu_dev *viommu) | |
970 | { | |
971 | struct virtio_device *vdev = dev_to_virtio(viommu->dev); | |
169a126c JPB |
972 | const char *names[] = { "request", "event" }; |
973 | vq_callback_t *callbacks[] = { | |
974 | NULL, /* No async requests */ | |
975 | viommu_event_handler, | |
976 | }; | |
edcd69ab | 977 | |
169a126c JPB |
978 | return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks, |
979 | names, NULL); | |
980 | } | |
edcd69ab | 981 | |
169a126c JPB |
982 | static int viommu_fill_evtq(struct viommu_dev *viommu) |
983 | { | |
984 | int i, ret; | |
985 | struct scatterlist sg[1]; | |
986 | struct viommu_event *evts; | |
987 | struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; | |
988 | size_t nr_evts = vq->num_free; | |
989 | ||
990 | viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts, | |
991 | sizeof(*evts), GFP_KERNEL); | |
992 | if (!evts) | |
993 | return -ENOMEM; | |
994 | ||
995 | for (i = 0; i < nr_evts; i++) { | |
996 | sg_init_one(sg, &evts[i], sizeof(*evts)); | |
997 | ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL); | |
998 | if (ret) | |
999 | return ret; | |
1000 | } | |
edcd69ab JPB |
1001 | |
1002 | return 0; | |
1003 | } | |
1004 | ||
1005 | static int viommu_probe(struct virtio_device *vdev) | |
1006 | { | |
1007 | struct device *parent_dev = vdev->dev.parent; | |
1008 | struct viommu_dev *viommu = NULL; | |
1009 | struct device *dev = &vdev->dev; | |
1010 | u64 input_start = 0; | |
1011 | u64 input_end = -1UL; | |
1012 | int ret; | |
1013 | ||
1014 | if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || | |
1015 | !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP)) | |
1016 | return -ENODEV; | |
1017 | ||
1018 | viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL); | |
1019 | if (!viommu) | |
1020 | return -ENOMEM; | |
1021 | ||
1022 | spin_lock_init(&viommu->request_lock); | |
1023 | ida_init(&viommu->domain_ids); | |
1024 | viommu->dev = dev; | |
1025 | viommu->vdev = vdev; | |
1026 | INIT_LIST_HEAD(&viommu->requests); | |
1027 | ||
1028 | ret = viommu_init_vqs(viommu); | |
1029 | if (ret) | |
1030 | return ret; | |
1031 | ||
1032 | virtio_cread(vdev, struct virtio_iommu_config, page_size_mask, | |
1033 | &viommu->pgsize_bitmap); | |
1034 | ||
1035 | if (!viommu->pgsize_bitmap) { | |
1036 | ret = -EINVAL; | |
1037 | goto err_free_vqs; | |
1038 | } | |
1039 | ||
ae24fb49 JPB |
1040 | viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; |
1041 | viommu->last_domain = ~0U; | |
edcd69ab JPB |
1042 | |
1043 | /* Optional features */ | |
1044 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, | |
1045 | struct virtio_iommu_config, input_range.start, | |
1046 | &input_start); | |
1047 | ||
1048 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, | |
1049 | struct virtio_iommu_config, input_range.end, | |
1050 | &input_end); | |
1051 | ||
ae24fb49 JPB |
1052 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, |
1053 | struct virtio_iommu_config, domain_range.start, | |
1054 | &viommu->first_domain); | |
1055 | ||
1056 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, | |
1057 | struct virtio_iommu_config, domain_range.end, | |
1058 | &viommu->last_domain); | |
edcd69ab | 1059 | |
2a5a3148 JPB |
1060 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, |
1061 | struct virtio_iommu_config, probe_size, | |
1062 | &viommu->probe_size); | |
1063 | ||
edcd69ab JPB |
1064 | viommu->geometry = (struct iommu_domain_geometry) { |
1065 | .aperture_start = input_start, | |
1066 | .aperture_end = input_end, | |
1067 | .force_aperture = true, | |
1068 | }; | |
1069 | ||
ae24fb49 JPB |
1070 | if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO)) |
1071 | viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; | |
1072 | ||
edcd69ab JPB |
1073 | viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; |
1074 | ||
1075 | virtio_device_ready(vdev); | |
1076 | ||
169a126c JPB |
1077 | /* Populate the event queue with buffers */ |
1078 | ret = viommu_fill_evtq(viommu); | |
1079 | if (ret) | |
1080 | goto err_free_vqs; | |
1081 | ||
edcd69ab JPB |
1082 | ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s", |
1083 | virtio_bus_name(vdev)); | |
1084 | if (ret) | |
1085 | goto err_free_vqs; | |
1086 | ||
1087 | iommu_device_set_ops(&viommu->iommu, &viommu_ops); | |
1088 | iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode); | |
1089 | ||
1090 | iommu_device_register(&viommu->iommu); | |
1091 | ||
1092 | #ifdef CONFIG_PCI | |
1093 | if (pci_bus_type.iommu_ops != &viommu_ops) { | |
1094 | pci_request_acs(); | |
1095 | ret = bus_set_iommu(&pci_bus_type, &viommu_ops); | |
1096 | if (ret) | |
1097 | goto err_unregister; | |
1098 | } | |
1099 | #endif | |
1100 | #ifdef CONFIG_ARM_AMBA | |
1101 | if (amba_bustype.iommu_ops != &viommu_ops) { | |
1102 | ret = bus_set_iommu(&amba_bustype, &viommu_ops); | |
1103 | if (ret) | |
1104 | goto err_unregister; | |
1105 | } | |
1106 | #endif | |
1107 | if (platform_bus_type.iommu_ops != &viommu_ops) { | |
1108 | ret = bus_set_iommu(&platform_bus_type, &viommu_ops); | |
1109 | if (ret) | |
1110 | goto err_unregister; | |
1111 | } | |
1112 | ||
1113 | vdev->priv = viommu; | |
1114 | ||
1115 | dev_info(dev, "input address: %u bits\n", | |
1116 | order_base_2(viommu->geometry.aperture_end)); | |
1117 | dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap); | |
1118 | ||
1119 | return 0; | |
1120 | ||
1121 | err_unregister: | |
1122 | iommu_device_sysfs_remove(&viommu->iommu); | |
1123 | iommu_device_unregister(&viommu->iommu); | |
1124 | err_free_vqs: | |
1125 | vdev->config->del_vqs(vdev); | |
1126 | ||
1127 | return ret; | |
1128 | } | |
1129 | ||
1130 | static void viommu_remove(struct virtio_device *vdev) | |
1131 | { | |
1132 | struct viommu_dev *viommu = vdev->priv; | |
1133 | ||
1134 | iommu_device_sysfs_remove(&viommu->iommu); | |
1135 | iommu_device_unregister(&viommu->iommu); | |
1136 | ||
1137 | /* Stop all virtqueues */ | |
1138 | vdev->config->reset(vdev); | |
1139 | vdev->config->del_vqs(vdev); | |
1140 | ||
1141 | dev_info(&vdev->dev, "device removed\n"); | |
1142 | } | |
1143 | ||
1144 | static void viommu_config_changed(struct virtio_device *vdev) | |
1145 | { | |
1146 | dev_warn(&vdev->dev, "config changed\n"); | |
1147 | } | |
1148 | ||
1149 | static unsigned int features[] = { | |
1150 | VIRTIO_IOMMU_F_MAP_UNMAP, | |
edcd69ab | 1151 | VIRTIO_IOMMU_F_INPUT_RANGE, |
ae24fb49 | 1152 | VIRTIO_IOMMU_F_DOMAIN_RANGE, |
2a5a3148 | 1153 | VIRTIO_IOMMU_F_PROBE, |
ae24fb49 | 1154 | VIRTIO_IOMMU_F_MMIO, |
edcd69ab JPB |
1155 | }; |
1156 | ||
1157 | static struct virtio_device_id id_table[] = { | |
1158 | { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID }, | |
1159 | { 0 }, | |
1160 | }; | |
1161 | ||
1162 | static struct virtio_driver virtio_iommu_drv = { | |
1163 | .driver.name = KBUILD_MODNAME, | |
1164 | .driver.owner = THIS_MODULE, | |
1165 | .id_table = id_table, | |
1166 | .feature_table = features, | |
1167 | .feature_table_size = ARRAY_SIZE(features), | |
1168 | .probe = viommu_probe, | |
1169 | .remove = viommu_remove, | |
1170 | .config_changed = viommu_config_changed, | |
1171 | }; | |
1172 | ||
1173 | module_virtio_driver(virtio_iommu_drv); | |
1174 | ||
1175 | MODULE_DESCRIPTION("Virtio IOMMU driver"); | |
1176 | MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>"); | |
1177 | MODULE_LICENSE("GPL v2"); |