Commit | Line | Data |
---|---|---|
edcd69ab JPB |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Virtio driver for the paravirtualized IOMMU | |
4 | * | |
5 | * Copyright (C) 2018 Arm Limited | |
6 | */ | |
7 | ||
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
9 | ||
10 | #include <linux/amba/bus.h> | |
11 | #include <linux/delay.h> | |
12 | #include <linux/dma-iommu.h> | |
13 | #include <linux/freezer.h> | |
14 | #include <linux/interval_tree.h> | |
15 | #include <linux/iommu.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/of_iommu.h> | |
18 | #include <linux/of_platform.h> | |
19 | #include <linux/pci.h> | |
20 | #include <linux/platform_device.h> | |
21 | #include <linux/virtio.h> | |
22 | #include <linux/virtio_config.h> | |
23 | #include <linux/virtio_ids.h> | |
24 | #include <linux/wait.h> | |
25 | ||
26 | #include <uapi/linux/virtio_iommu.h> | |
27 | ||
28 | #define MSI_IOVA_BASE 0x8000000 | |
29 | #define MSI_IOVA_LENGTH 0x100000 | |
30 | ||
31 | #define VIOMMU_REQUEST_VQ 0 | |
169a126c JPB |
32 | #define VIOMMU_EVENT_VQ 1 |
33 | #define VIOMMU_NR_VQS 2 | |
edcd69ab JPB |
34 | |
35 | struct viommu_dev { | |
36 | struct iommu_device iommu; | |
37 | struct device *dev; | |
38 | struct virtio_device *vdev; | |
39 | ||
40 | struct ida domain_ids; | |
41 | ||
42 | struct virtqueue *vqs[VIOMMU_NR_VQS]; | |
43 | spinlock_t request_lock; | |
44 | struct list_head requests; | |
169a126c | 45 | void *evts; |
edcd69ab JPB |
46 | |
47 | /* Device configuration */ | |
48 | struct iommu_domain_geometry geometry; | |
49 | u64 pgsize_bitmap; | |
50 | u8 domain_bits; | |
2a5a3148 | 51 | u32 probe_size; |
edcd69ab JPB |
52 | }; |
53 | ||
54 | struct viommu_mapping { | |
55 | phys_addr_t paddr; | |
56 | struct interval_tree_node iova; | |
57 | u32 flags; | |
58 | }; | |
59 | ||
60 | struct viommu_domain { | |
61 | struct iommu_domain domain; | |
62 | struct viommu_dev *viommu; | |
63 | struct mutex mutex; /* protects viommu pointer */ | |
64 | unsigned int id; | |
65 | ||
66 | spinlock_t mappings_lock; | |
67 | struct rb_root_cached mappings; | |
68 | ||
69 | unsigned long nr_endpoints; | |
70 | }; | |
71 | ||
72 | struct viommu_endpoint { | |
2a5a3148 | 73 | struct device *dev; |
edcd69ab JPB |
74 | struct viommu_dev *viommu; |
75 | struct viommu_domain *vdomain; | |
2a5a3148 | 76 | struct list_head resv_regions; |
edcd69ab JPB |
77 | }; |
78 | ||
79 | struct viommu_request { | |
80 | struct list_head list; | |
81 | void *writeback; | |
82 | unsigned int write_offset; | |
83 | unsigned int len; | |
84 | char buf[]; | |
85 | }; | |
86 | ||
169a126c JPB |
87 | #define VIOMMU_FAULT_RESV_MASK 0xffffff00 |
88 | ||
89 | struct viommu_event { | |
90 | union { | |
91 | u32 head; | |
92 | struct virtio_iommu_fault fault; | |
93 | }; | |
94 | }; | |
95 | ||
edcd69ab JPB |
96 | #define to_viommu_domain(domain) \ |
97 | container_of(domain, struct viommu_domain, domain) | |
98 | ||
99 | static int viommu_get_req_errno(void *buf, size_t len) | |
100 | { | |
101 | struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); | |
102 | ||
103 | switch (tail->status) { | |
104 | case VIRTIO_IOMMU_S_OK: | |
105 | return 0; | |
106 | case VIRTIO_IOMMU_S_UNSUPP: | |
107 | return -ENOSYS; | |
108 | case VIRTIO_IOMMU_S_INVAL: | |
109 | return -EINVAL; | |
110 | case VIRTIO_IOMMU_S_RANGE: | |
111 | return -ERANGE; | |
112 | case VIRTIO_IOMMU_S_NOENT: | |
113 | return -ENOENT; | |
114 | case VIRTIO_IOMMU_S_FAULT: | |
115 | return -EFAULT; | |
116 | case VIRTIO_IOMMU_S_IOERR: | |
117 | case VIRTIO_IOMMU_S_DEVERR: | |
118 | default: | |
119 | return -EIO; | |
120 | } | |
121 | } | |
122 | ||
123 | static void viommu_set_req_status(void *buf, size_t len, int status) | |
124 | { | |
125 | struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); | |
126 | ||
127 | tail->status = status; | |
128 | } | |
129 | ||
130 | static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, | |
131 | struct virtio_iommu_req_head *req, | |
132 | size_t len) | |
133 | { | |
134 | size_t tail_size = sizeof(struct virtio_iommu_req_tail); | |
135 | ||
2a5a3148 JPB |
136 | if (req->type == VIRTIO_IOMMU_T_PROBE) |
137 | return len - viommu->probe_size - tail_size; | |
138 | ||
edcd69ab JPB |
139 | return len - tail_size; |
140 | } | |
141 | ||
142 | /* | |
143 | * __viommu_sync_req - Complete all in-flight requests | |
144 | * | |
145 | * Wait for all added requests to complete. When this function returns, all | |
146 | * requests that were in-flight at the time of the call have completed. | |
147 | */ | |
148 | static int __viommu_sync_req(struct viommu_dev *viommu) | |
149 | { | |
150 | int ret = 0; | |
151 | unsigned int len; | |
152 | size_t write_len; | |
153 | struct viommu_request *req; | |
154 | struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; | |
155 | ||
156 | assert_spin_locked(&viommu->request_lock); | |
157 | ||
158 | virtqueue_kick(vq); | |
159 | ||
160 | while (!list_empty(&viommu->requests)) { | |
161 | len = 0; | |
162 | req = virtqueue_get_buf(vq, &len); | |
163 | if (!req) | |
164 | continue; | |
165 | ||
166 | if (!len) | |
167 | viommu_set_req_status(req->buf, req->len, | |
168 | VIRTIO_IOMMU_S_IOERR); | |
169 | ||
170 | write_len = req->len - req->write_offset; | |
171 | if (req->writeback && len == write_len) | |
172 | memcpy(req->writeback, req->buf + req->write_offset, | |
173 | write_len); | |
174 | ||
175 | list_del(&req->list); | |
176 | kfree(req); | |
177 | } | |
178 | ||
179 | return ret; | |
180 | } | |
181 | ||
182 | static int viommu_sync_req(struct viommu_dev *viommu) | |
183 | { | |
184 | int ret; | |
185 | unsigned long flags; | |
186 | ||
187 | spin_lock_irqsave(&viommu->request_lock, flags); | |
188 | ret = __viommu_sync_req(viommu); | |
189 | if (ret) | |
190 | dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); | |
191 | spin_unlock_irqrestore(&viommu->request_lock, flags); | |
192 | ||
193 | return ret; | |
194 | } | |
195 | ||
196 | /* | |
197 | * __viommu_add_request - Add one request to the queue | |
198 | * @buf: pointer to the request buffer | |
199 | * @len: length of the request buffer | |
200 | * @writeback: copy data back to the buffer when the request completes. | |
201 | * | |
202 | * Add a request to the queue. Only synchronize the queue if it's already full. | |
203 | * Otherwise don't kick the queue nor wait for requests to complete. | |
204 | * | |
205 | * When @writeback is true, data written by the device, including the request | |
206 | * status, is copied into @buf after the request completes. This is unsafe if | |
207 | * the caller allocates @buf on stack and drops the lock between add_req() and | |
208 | * sync_req(). | |
209 | * | |
210 | * Return 0 if the request was successfully added to the queue. | |
211 | */ | |
212 | static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len, | |
213 | bool writeback) | |
214 | { | |
215 | int ret; | |
216 | off_t write_offset; | |
217 | struct viommu_request *req; | |
218 | struct scatterlist top_sg, bottom_sg; | |
219 | struct scatterlist *sg[2] = { &top_sg, &bottom_sg }; | |
220 | struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; | |
221 | ||
222 | assert_spin_locked(&viommu->request_lock); | |
223 | ||
224 | write_offset = viommu_get_write_desc_offset(viommu, buf, len); | |
225 | if (write_offset <= 0) | |
226 | return -EINVAL; | |
227 | ||
228 | req = kzalloc(sizeof(*req) + len, GFP_ATOMIC); | |
229 | if (!req) | |
230 | return -ENOMEM; | |
231 | ||
232 | req->len = len; | |
233 | if (writeback) { | |
234 | req->writeback = buf + write_offset; | |
235 | req->write_offset = write_offset; | |
236 | } | |
237 | memcpy(&req->buf, buf, write_offset); | |
238 | ||
239 | sg_init_one(&top_sg, req->buf, write_offset); | |
240 | sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset); | |
241 | ||
242 | ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC); | |
243 | if (ret == -ENOSPC) { | |
244 | /* If the queue is full, sync and retry */ | |
245 | if (!__viommu_sync_req(viommu)) | |
246 | ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC); | |
247 | } | |
248 | if (ret) | |
249 | goto err_free; | |
250 | ||
251 | list_add_tail(&req->list, &viommu->requests); | |
252 | return 0; | |
253 | ||
254 | err_free: | |
255 | kfree(req); | |
256 | return ret; | |
257 | } | |
258 | ||
259 | static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len) | |
260 | { | |
261 | int ret; | |
262 | unsigned long flags; | |
263 | ||
264 | spin_lock_irqsave(&viommu->request_lock, flags); | |
265 | ret = __viommu_add_req(viommu, buf, len, false); | |
266 | if (ret) | |
267 | dev_dbg(viommu->dev, "could not add request: %d\n", ret); | |
268 | spin_unlock_irqrestore(&viommu->request_lock, flags); | |
269 | ||
270 | return ret; | |
271 | } | |
272 | ||
273 | /* | |
274 | * Send a request and wait for it to complete. Return the request status (as an | |
275 | * errno) | |
276 | */ | |
277 | static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf, | |
278 | size_t len) | |
279 | { | |
280 | int ret; | |
281 | unsigned long flags; | |
282 | ||
283 | spin_lock_irqsave(&viommu->request_lock, flags); | |
284 | ||
285 | ret = __viommu_add_req(viommu, buf, len, true); | |
286 | if (ret) { | |
287 | dev_dbg(viommu->dev, "could not add request (%d)\n", ret); | |
288 | goto out_unlock; | |
289 | } | |
290 | ||
291 | ret = __viommu_sync_req(viommu); | |
292 | if (ret) { | |
293 | dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); | |
294 | /* Fall-through (get the actual request status) */ | |
295 | } | |
296 | ||
297 | ret = viommu_get_req_errno(buf, len); | |
298 | out_unlock: | |
299 | spin_unlock_irqrestore(&viommu->request_lock, flags); | |
300 | return ret; | |
301 | } | |
302 | ||
303 | /* | |
304 | * viommu_add_mapping - add a mapping to the internal tree | |
305 | * | |
306 | * On success, return the new mapping. Otherwise return NULL. | |
307 | */ | |
308 | static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova, | |
309 | phys_addr_t paddr, size_t size, u32 flags) | |
310 | { | |
311 | unsigned long irqflags; | |
312 | struct viommu_mapping *mapping; | |
313 | ||
314 | mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC); | |
315 | if (!mapping) | |
316 | return -ENOMEM; | |
317 | ||
318 | mapping->paddr = paddr; | |
319 | mapping->iova.start = iova; | |
320 | mapping->iova.last = iova + size - 1; | |
321 | mapping->flags = flags; | |
322 | ||
323 | spin_lock_irqsave(&vdomain->mappings_lock, irqflags); | |
324 | interval_tree_insert(&mapping->iova, &vdomain->mappings); | |
325 | spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags); | |
326 | ||
327 | return 0; | |
328 | } | |
329 | ||
330 | /* | |
331 | * viommu_del_mappings - remove mappings from the internal tree | |
332 | * | |
333 | * @vdomain: the domain | |
334 | * @iova: start of the range | |
335 | * @size: size of the range. A size of 0 corresponds to the entire address | |
336 | * space. | |
337 | * | |
338 | * On success, returns the number of unmapped bytes (>= size) | |
339 | */ | |
340 | static size_t viommu_del_mappings(struct viommu_domain *vdomain, | |
341 | unsigned long iova, size_t size) | |
342 | { | |
343 | size_t unmapped = 0; | |
344 | unsigned long flags; | |
345 | unsigned long last = iova + size - 1; | |
346 | struct viommu_mapping *mapping = NULL; | |
347 | struct interval_tree_node *node, *next; | |
348 | ||
349 | spin_lock_irqsave(&vdomain->mappings_lock, flags); | |
350 | next = interval_tree_iter_first(&vdomain->mappings, iova, last); | |
351 | while (next) { | |
352 | node = next; | |
353 | mapping = container_of(node, struct viommu_mapping, iova); | |
354 | next = interval_tree_iter_next(node, iova, last); | |
355 | ||
356 | /* Trying to split a mapping? */ | |
357 | if (mapping->iova.start < iova) | |
358 | break; | |
359 | ||
360 | /* | |
361 | * Virtio-iommu doesn't allow UNMAP to split a mapping created | |
362 | * with a single MAP request, so remove the full mapping. | |
363 | */ | |
364 | unmapped += mapping->iova.last - mapping->iova.start + 1; | |
365 | ||
366 | interval_tree_remove(node, &vdomain->mappings); | |
367 | kfree(mapping); | |
368 | } | |
369 | spin_unlock_irqrestore(&vdomain->mappings_lock, flags); | |
370 | ||
371 | return unmapped; | |
372 | } | |
373 | ||
374 | /* | |
375 | * viommu_replay_mappings - re-send MAP requests | |
376 | * | |
377 | * When reattaching a domain that was previously detached from all endpoints, | |
378 | * mappings were deleted from the device. Re-create the mappings available in | |
379 | * the internal tree. | |
380 | */ | |
381 | static int viommu_replay_mappings(struct viommu_domain *vdomain) | |
382 | { | |
383 | int ret = 0; | |
384 | unsigned long flags; | |
385 | struct viommu_mapping *mapping; | |
386 | struct interval_tree_node *node; | |
387 | struct virtio_iommu_req_map map; | |
388 | ||
389 | spin_lock_irqsave(&vdomain->mappings_lock, flags); | |
390 | node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); | |
391 | while (node) { | |
392 | mapping = container_of(node, struct viommu_mapping, iova); | |
393 | map = (struct virtio_iommu_req_map) { | |
394 | .head.type = VIRTIO_IOMMU_T_MAP, | |
395 | .domain = cpu_to_le32(vdomain->id), | |
396 | .virt_start = cpu_to_le64(mapping->iova.start), | |
397 | .virt_end = cpu_to_le64(mapping->iova.last), | |
398 | .phys_start = cpu_to_le64(mapping->paddr), | |
399 | .flags = cpu_to_le32(mapping->flags), | |
400 | }; | |
401 | ||
402 | ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); | |
403 | if (ret) | |
404 | break; | |
405 | ||
406 | node = interval_tree_iter_next(node, 0, -1UL); | |
407 | } | |
408 | spin_unlock_irqrestore(&vdomain->mappings_lock, flags); | |
409 | ||
410 | return ret; | |
411 | } | |
412 | ||
2a5a3148 JPB |
413 | static int viommu_add_resv_mem(struct viommu_endpoint *vdev, |
414 | struct virtio_iommu_probe_resv_mem *mem, | |
415 | size_t len) | |
416 | { | |
417 | size_t size; | |
418 | u64 start64, end64; | |
419 | phys_addr_t start, end; | |
420 | struct iommu_resv_region *region = NULL; | |
421 | unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | |
422 | ||
423 | start = start64 = le64_to_cpu(mem->start); | |
424 | end = end64 = le64_to_cpu(mem->end); | |
425 | size = end64 - start64 + 1; | |
426 | ||
427 | /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */ | |
428 | if (start != start64 || end != end64 || size < end64 - start64) | |
429 | return -EOVERFLOW; | |
430 | ||
431 | if (len < sizeof(*mem)) | |
432 | return -EINVAL; | |
433 | ||
434 | switch (mem->subtype) { | |
435 | default: | |
436 | dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n", | |
437 | mem->subtype); | |
438 | /* Fall-through */ | |
439 | case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: | |
440 | region = iommu_alloc_resv_region(start, size, 0, | |
441 | IOMMU_RESV_RESERVED); | |
442 | break; | |
443 | case VIRTIO_IOMMU_RESV_MEM_T_MSI: | |
444 | region = iommu_alloc_resv_region(start, size, prot, | |
445 | IOMMU_RESV_MSI); | |
446 | break; | |
447 | } | |
448 | if (!region) | |
449 | return -ENOMEM; | |
450 | ||
451 | list_add(&vdev->resv_regions, ®ion->list); | |
452 | return 0; | |
453 | } | |
454 | ||
455 | static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) | |
456 | { | |
457 | int ret; | |
458 | u16 type, len; | |
459 | size_t cur = 0; | |
460 | size_t probe_len; | |
461 | struct virtio_iommu_req_probe *probe; | |
462 | struct virtio_iommu_probe_property *prop; | |
463 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); | |
464 | struct viommu_endpoint *vdev = fwspec->iommu_priv; | |
465 | ||
466 | if (!fwspec->num_ids) | |
467 | return -EINVAL; | |
468 | ||
469 | probe_len = sizeof(*probe) + viommu->probe_size + | |
470 | sizeof(struct virtio_iommu_req_tail); | |
471 | probe = kzalloc(probe_len, GFP_KERNEL); | |
472 | if (!probe) | |
473 | return -ENOMEM; | |
474 | ||
475 | probe->head.type = VIRTIO_IOMMU_T_PROBE; | |
476 | /* | |
477 | * For now, assume that properties of an endpoint that outputs multiple | |
478 | * IDs are consistent. Only probe the first one. | |
479 | */ | |
480 | probe->endpoint = cpu_to_le32(fwspec->ids[0]); | |
481 | ||
482 | ret = viommu_send_req_sync(viommu, probe, probe_len); | |
483 | if (ret) | |
484 | goto out_free; | |
485 | ||
486 | prop = (void *)probe->properties; | |
487 | type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; | |
488 | ||
489 | while (type != VIRTIO_IOMMU_PROBE_T_NONE && | |
490 | cur < viommu->probe_size) { | |
491 | len = le16_to_cpu(prop->length) + sizeof(*prop); | |
492 | ||
493 | switch (type) { | |
494 | case VIRTIO_IOMMU_PROBE_T_RESV_MEM: | |
495 | ret = viommu_add_resv_mem(vdev, (void *)prop, len); | |
496 | break; | |
497 | default: | |
498 | dev_err(dev, "unknown viommu prop 0x%x\n", type); | |
499 | } | |
500 | ||
501 | if (ret) | |
502 | dev_err(dev, "failed to parse viommu prop 0x%x\n", type); | |
503 | ||
504 | cur += len; | |
505 | if (cur >= viommu->probe_size) | |
506 | break; | |
507 | ||
508 | prop = (void *)probe->properties + cur; | |
509 | type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; | |
510 | } | |
511 | ||
512 | out_free: | |
513 | kfree(probe); | |
514 | return ret; | |
515 | } | |
516 | ||
169a126c JPB |
517 | static int viommu_fault_handler(struct viommu_dev *viommu, |
518 | struct virtio_iommu_fault *fault) | |
519 | { | |
520 | char *reason_str; | |
521 | ||
522 | u8 reason = fault->reason; | |
523 | u32 flags = le32_to_cpu(fault->flags); | |
524 | u32 endpoint = le32_to_cpu(fault->endpoint); | |
525 | u64 address = le64_to_cpu(fault->address); | |
526 | ||
527 | switch (reason) { | |
528 | case VIRTIO_IOMMU_FAULT_R_DOMAIN: | |
529 | reason_str = "domain"; | |
530 | break; | |
531 | case VIRTIO_IOMMU_FAULT_R_MAPPING: | |
532 | reason_str = "page"; | |
533 | break; | |
534 | case VIRTIO_IOMMU_FAULT_R_UNKNOWN: | |
535 | default: | |
536 | reason_str = "unknown"; | |
537 | break; | |
538 | } | |
539 | ||
540 | /* TODO: find EP by ID and report_iommu_fault */ | |
541 | if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS) | |
542 | dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n", | |
543 | reason_str, endpoint, address, | |
544 | flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "", | |
545 | flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "", | |
546 | flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : ""); | |
547 | else | |
548 | dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n", | |
549 | reason_str, endpoint); | |
550 | return 0; | |
551 | } | |
552 | ||
553 | static void viommu_event_handler(struct virtqueue *vq) | |
554 | { | |
555 | int ret; | |
556 | unsigned int len; | |
557 | struct scatterlist sg[1]; | |
558 | struct viommu_event *evt; | |
559 | struct viommu_dev *viommu = vq->vdev->priv; | |
560 | ||
561 | while ((evt = virtqueue_get_buf(vq, &len)) != NULL) { | |
562 | if (len > sizeof(*evt)) { | |
563 | dev_err(viommu->dev, | |
564 | "invalid event buffer (len %u != %zu)\n", | |
565 | len, sizeof(*evt)); | |
566 | } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) { | |
567 | viommu_fault_handler(viommu, &evt->fault); | |
568 | } | |
569 | ||
570 | sg_init_one(sg, evt, sizeof(*evt)); | |
571 | ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC); | |
572 | if (ret) | |
573 | dev_err(viommu->dev, "could not add event buffer\n"); | |
574 | } | |
575 | ||
576 | virtqueue_kick(vq); | |
577 | } | |
578 | ||
edcd69ab JPB |
579 | /* IOMMU API */ |
580 | ||
581 | static struct iommu_domain *viommu_domain_alloc(unsigned type) | |
582 | { | |
583 | struct viommu_domain *vdomain; | |
584 | ||
585 | if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) | |
586 | return NULL; | |
587 | ||
588 | vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL); | |
589 | if (!vdomain) | |
590 | return NULL; | |
591 | ||
592 | mutex_init(&vdomain->mutex); | |
593 | spin_lock_init(&vdomain->mappings_lock); | |
594 | vdomain->mappings = RB_ROOT_CACHED; | |
595 | ||
596 | if (type == IOMMU_DOMAIN_DMA && | |
597 | iommu_get_dma_cookie(&vdomain->domain)) { | |
598 | kfree(vdomain); | |
599 | return NULL; | |
600 | } | |
601 | ||
602 | return &vdomain->domain; | |
603 | } | |
604 | ||
605 | static int viommu_domain_finalise(struct viommu_dev *viommu, | |
606 | struct iommu_domain *domain) | |
607 | { | |
608 | int ret; | |
609 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
610 | unsigned int max_domain = viommu->domain_bits > 31 ? ~0 : | |
611 | (1U << viommu->domain_bits) - 1; | |
612 | ||
613 | vdomain->viommu = viommu; | |
614 | ||
615 | domain->pgsize_bitmap = viommu->pgsize_bitmap; | |
616 | domain->geometry = viommu->geometry; | |
617 | ||
618 | ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL); | |
619 | if (ret >= 0) | |
620 | vdomain->id = (unsigned int)ret; | |
621 | ||
622 | return ret > 0 ? 0 : ret; | |
623 | } | |
624 | ||
625 | static void viommu_domain_free(struct iommu_domain *domain) | |
626 | { | |
627 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
628 | ||
629 | iommu_put_dma_cookie(domain); | |
630 | ||
631 | /* Free all remaining mappings (size 2^64) */ | |
632 | viommu_del_mappings(vdomain, 0, 0); | |
633 | ||
634 | if (vdomain->viommu) | |
635 | ida_free(&vdomain->viommu->domain_ids, vdomain->id); | |
636 | ||
637 | kfree(vdomain); | |
638 | } | |
639 | ||
640 | static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
641 | { | |
642 | int i; | |
643 | int ret = 0; | |
644 | struct virtio_iommu_req_attach req; | |
645 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); | |
646 | struct viommu_endpoint *vdev = fwspec->iommu_priv; | |
647 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
648 | ||
649 | mutex_lock(&vdomain->mutex); | |
650 | if (!vdomain->viommu) { | |
651 | /* | |
652 | * Properly initialize the domain now that we know which viommu | |
653 | * owns it. | |
654 | */ | |
655 | ret = viommu_domain_finalise(vdev->viommu, domain); | |
656 | } else if (vdomain->viommu != vdev->viommu) { | |
657 | dev_err(dev, "cannot attach to foreign vIOMMU\n"); | |
658 | ret = -EXDEV; | |
659 | } | |
660 | mutex_unlock(&vdomain->mutex); | |
661 | ||
662 | if (ret) | |
663 | return ret; | |
664 | ||
665 | /* | |
666 | * In the virtio-iommu device, when attaching the endpoint to a new | |
667 | * domain, it is detached from the old one and, if as as a result the | |
668 | * old domain isn't attached to any endpoint, all mappings are removed | |
669 | * from the old domain and it is freed. | |
670 | * | |
671 | * In the driver the old domain still exists, and its mappings will be | |
672 | * recreated if it gets reattached to an endpoint. Otherwise it will be | |
673 | * freed explicitly. | |
674 | * | |
675 | * vdev->vdomain is protected by group->mutex | |
676 | */ | |
677 | if (vdev->vdomain) | |
678 | vdev->vdomain->nr_endpoints--; | |
679 | ||
680 | req = (struct virtio_iommu_req_attach) { | |
681 | .head.type = VIRTIO_IOMMU_T_ATTACH, | |
682 | .domain = cpu_to_le32(vdomain->id), | |
683 | }; | |
684 | ||
685 | for (i = 0; i < fwspec->num_ids; i++) { | |
686 | req.endpoint = cpu_to_le32(fwspec->ids[i]); | |
687 | ||
688 | ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); | |
689 | if (ret) | |
690 | return ret; | |
691 | } | |
692 | ||
693 | if (!vdomain->nr_endpoints) { | |
694 | /* | |
695 | * This endpoint is the first to be attached to the domain. | |
696 | * Replay existing mappings (e.g. SW MSI). | |
697 | */ | |
698 | ret = viommu_replay_mappings(vdomain); | |
699 | if (ret) | |
700 | return ret; | |
701 | } | |
702 | ||
703 | vdomain->nr_endpoints++; | |
704 | vdev->vdomain = vdomain; | |
705 | ||
706 | return 0; | |
707 | } | |
708 | ||
709 | static int viommu_map(struct iommu_domain *domain, unsigned long iova, | |
710 | phys_addr_t paddr, size_t size, int prot) | |
711 | { | |
712 | int ret; | |
713 | int flags; | |
714 | struct virtio_iommu_req_map map; | |
715 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
716 | ||
717 | flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) | | |
718 | (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | | |
719 | (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); | |
720 | ||
721 | ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); | |
722 | if (ret) | |
723 | return ret; | |
724 | ||
725 | map = (struct virtio_iommu_req_map) { | |
726 | .head.type = VIRTIO_IOMMU_T_MAP, | |
727 | .domain = cpu_to_le32(vdomain->id), | |
728 | .virt_start = cpu_to_le64(iova), | |
729 | .phys_start = cpu_to_le64(paddr), | |
730 | .virt_end = cpu_to_le64(iova + size - 1), | |
731 | .flags = cpu_to_le32(flags), | |
732 | }; | |
733 | ||
734 | if (!vdomain->nr_endpoints) | |
735 | return 0; | |
736 | ||
737 | ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); | |
738 | if (ret) | |
739 | viommu_del_mappings(vdomain, iova, size); | |
740 | ||
741 | return ret; | |
742 | } | |
743 | ||
744 | static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova, | |
745 | size_t size) | |
746 | { | |
747 | int ret = 0; | |
748 | size_t unmapped; | |
749 | struct virtio_iommu_req_unmap unmap; | |
750 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
751 | ||
752 | unmapped = viommu_del_mappings(vdomain, iova, size); | |
753 | if (unmapped < size) | |
754 | return 0; | |
755 | ||
756 | /* Device already removed all mappings after detach. */ | |
757 | if (!vdomain->nr_endpoints) | |
758 | return unmapped; | |
759 | ||
760 | unmap = (struct virtio_iommu_req_unmap) { | |
761 | .head.type = VIRTIO_IOMMU_T_UNMAP, | |
762 | .domain = cpu_to_le32(vdomain->id), | |
763 | .virt_start = cpu_to_le64(iova), | |
764 | .virt_end = cpu_to_le64(iova + unmapped - 1), | |
765 | }; | |
766 | ||
767 | ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap)); | |
768 | return ret ? 0 : unmapped; | |
769 | } | |
770 | ||
771 | static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain, | |
772 | dma_addr_t iova) | |
773 | { | |
774 | u64 paddr = 0; | |
775 | unsigned long flags; | |
776 | struct viommu_mapping *mapping; | |
777 | struct interval_tree_node *node; | |
778 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
779 | ||
780 | spin_lock_irqsave(&vdomain->mappings_lock, flags); | |
781 | node = interval_tree_iter_first(&vdomain->mappings, iova, iova); | |
782 | if (node) { | |
783 | mapping = container_of(node, struct viommu_mapping, iova); | |
784 | paddr = mapping->paddr + (iova - mapping->iova.start); | |
785 | } | |
786 | spin_unlock_irqrestore(&vdomain->mappings_lock, flags); | |
787 | ||
788 | return paddr; | |
789 | } | |
790 | ||
791 | static void viommu_iotlb_sync(struct iommu_domain *domain) | |
792 | { | |
793 | struct viommu_domain *vdomain = to_viommu_domain(domain); | |
794 | ||
795 | viommu_sync_req(vdomain->viommu); | |
796 | } | |
797 | ||
798 | static void viommu_get_resv_regions(struct device *dev, struct list_head *head) | |
799 | { | |
2a5a3148 JPB |
800 | struct iommu_resv_region *entry, *new_entry, *msi = NULL; |
801 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); | |
802 | struct viommu_endpoint *vdev = fwspec->iommu_priv; | |
edcd69ab JPB |
803 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
804 | ||
2a5a3148 JPB |
805 | list_for_each_entry(entry, &vdev->resv_regions, list) { |
806 | if (entry->type == IOMMU_RESV_MSI) | |
807 | msi = entry; | |
808 | ||
809 | new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL); | |
810 | if (!new_entry) | |
811 | return; | |
812 | list_add_tail(&new_entry->list, head); | |
813 | } | |
814 | ||
815 | /* | |
816 | * If the device didn't register any bypass MSI window, add a | |
817 | * software-mapped region. | |
818 | */ | |
819 | if (!msi) { | |
820 | msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, | |
821 | prot, IOMMU_RESV_SW_MSI); | |
822 | if (!msi) | |
823 | return; | |
824 | ||
825 | list_add_tail(&msi->list, head); | |
826 | } | |
edcd69ab | 827 | |
edcd69ab JPB |
828 | iommu_dma_get_resv_regions(dev, head); |
829 | } | |
830 | ||
831 | static void viommu_put_resv_regions(struct device *dev, struct list_head *head) | |
832 | { | |
833 | struct iommu_resv_region *entry, *next; | |
834 | ||
835 | list_for_each_entry_safe(entry, next, head, list) | |
836 | kfree(entry); | |
837 | } | |
838 | ||
839 | static struct iommu_ops viommu_ops; | |
840 | static struct virtio_driver virtio_iommu_drv; | |
841 | ||
842 | static int viommu_match_node(struct device *dev, void *data) | |
843 | { | |
844 | return dev->parent->fwnode == data; | |
845 | } | |
846 | ||
847 | static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode) | |
848 | { | |
849 | struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL, | |
850 | fwnode, viommu_match_node); | |
851 | put_device(dev); | |
852 | ||
853 | return dev ? dev_to_virtio(dev)->priv : NULL; | |
854 | } | |
855 | ||
856 | static int viommu_add_device(struct device *dev) | |
857 | { | |
858 | int ret; | |
859 | struct iommu_group *group; | |
860 | struct viommu_endpoint *vdev; | |
861 | struct viommu_dev *viommu = NULL; | |
862 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); | |
863 | ||
864 | if (!fwspec || fwspec->ops != &viommu_ops) | |
865 | return -ENODEV; | |
866 | ||
867 | viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode); | |
868 | if (!viommu) | |
869 | return -ENODEV; | |
870 | ||
871 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); | |
872 | if (!vdev) | |
873 | return -ENOMEM; | |
874 | ||
2a5a3148 | 875 | vdev->dev = dev; |
edcd69ab | 876 | vdev->viommu = viommu; |
2a5a3148 | 877 | INIT_LIST_HEAD(&vdev->resv_regions); |
edcd69ab JPB |
878 | fwspec->iommu_priv = vdev; |
879 | ||
2a5a3148 JPB |
880 | if (viommu->probe_size) { |
881 | /* Get additional information for this endpoint */ | |
882 | ret = viommu_probe_endpoint(viommu, dev); | |
883 | if (ret) | |
884 | goto err_free_dev; | |
885 | } | |
886 | ||
edcd69ab JPB |
887 | ret = iommu_device_link(&viommu->iommu, dev); |
888 | if (ret) | |
889 | goto err_free_dev; | |
890 | ||
891 | /* | |
892 | * Last step creates a default domain and attaches to it. Everything | |
893 | * must be ready. | |
894 | */ | |
895 | group = iommu_group_get_for_dev(dev); | |
896 | if (IS_ERR(group)) { | |
897 | ret = PTR_ERR(group); | |
898 | goto err_unlink_dev; | |
899 | } | |
900 | ||
901 | iommu_group_put(group); | |
902 | ||
903 | return PTR_ERR_OR_ZERO(group); | |
904 | ||
905 | err_unlink_dev: | |
906 | iommu_device_unlink(&viommu->iommu, dev); | |
907 | err_free_dev: | |
2a5a3148 | 908 | viommu_put_resv_regions(dev, &vdev->resv_regions); |
edcd69ab JPB |
909 | kfree(vdev); |
910 | ||
911 | return ret; | |
912 | } | |
913 | ||
914 | static void viommu_remove_device(struct device *dev) | |
915 | { | |
916 | struct viommu_endpoint *vdev; | |
917 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); | |
918 | ||
919 | if (!fwspec || fwspec->ops != &viommu_ops) | |
920 | return; | |
921 | ||
922 | vdev = fwspec->iommu_priv; | |
923 | ||
924 | iommu_group_remove_device(dev); | |
925 | iommu_device_unlink(&vdev->viommu->iommu, dev); | |
2a5a3148 | 926 | viommu_put_resv_regions(dev, &vdev->resv_regions); |
edcd69ab JPB |
927 | kfree(vdev); |
928 | } | |
929 | ||
930 | static struct iommu_group *viommu_device_group(struct device *dev) | |
931 | { | |
932 | if (dev_is_pci(dev)) | |
933 | return pci_device_group(dev); | |
934 | else | |
935 | return generic_device_group(dev); | |
936 | } | |
937 | ||
938 | static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args) | |
939 | { | |
940 | return iommu_fwspec_add_ids(dev, args->args, 1); | |
941 | } | |
942 | ||
943 | static struct iommu_ops viommu_ops = { | |
944 | .domain_alloc = viommu_domain_alloc, | |
945 | .domain_free = viommu_domain_free, | |
946 | .attach_dev = viommu_attach_dev, | |
947 | .map = viommu_map, | |
948 | .unmap = viommu_unmap, | |
949 | .iova_to_phys = viommu_iova_to_phys, | |
950 | .iotlb_sync = viommu_iotlb_sync, | |
951 | .add_device = viommu_add_device, | |
952 | .remove_device = viommu_remove_device, | |
953 | .device_group = viommu_device_group, | |
954 | .get_resv_regions = viommu_get_resv_regions, | |
955 | .put_resv_regions = viommu_put_resv_regions, | |
956 | .of_xlate = viommu_of_xlate, | |
957 | }; | |
958 | ||
959 | static int viommu_init_vqs(struct viommu_dev *viommu) | |
960 | { | |
961 | struct virtio_device *vdev = dev_to_virtio(viommu->dev); | |
169a126c JPB |
962 | const char *names[] = { "request", "event" }; |
963 | vq_callback_t *callbacks[] = { | |
964 | NULL, /* No async requests */ | |
965 | viommu_event_handler, | |
966 | }; | |
edcd69ab | 967 | |
169a126c JPB |
968 | return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks, |
969 | names, NULL); | |
970 | } | |
edcd69ab | 971 | |
169a126c JPB |
972 | static int viommu_fill_evtq(struct viommu_dev *viommu) |
973 | { | |
974 | int i, ret; | |
975 | struct scatterlist sg[1]; | |
976 | struct viommu_event *evts; | |
977 | struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; | |
978 | size_t nr_evts = vq->num_free; | |
979 | ||
980 | viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts, | |
981 | sizeof(*evts), GFP_KERNEL); | |
982 | if (!evts) | |
983 | return -ENOMEM; | |
984 | ||
985 | for (i = 0; i < nr_evts; i++) { | |
986 | sg_init_one(sg, &evts[i], sizeof(*evts)); | |
987 | ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL); | |
988 | if (ret) | |
989 | return ret; | |
990 | } | |
edcd69ab JPB |
991 | |
992 | return 0; | |
993 | } | |
994 | ||
995 | static int viommu_probe(struct virtio_device *vdev) | |
996 | { | |
997 | struct device *parent_dev = vdev->dev.parent; | |
998 | struct viommu_dev *viommu = NULL; | |
999 | struct device *dev = &vdev->dev; | |
1000 | u64 input_start = 0; | |
1001 | u64 input_end = -1UL; | |
1002 | int ret; | |
1003 | ||
1004 | if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || | |
1005 | !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP)) | |
1006 | return -ENODEV; | |
1007 | ||
1008 | viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL); | |
1009 | if (!viommu) | |
1010 | return -ENOMEM; | |
1011 | ||
1012 | spin_lock_init(&viommu->request_lock); | |
1013 | ida_init(&viommu->domain_ids); | |
1014 | viommu->dev = dev; | |
1015 | viommu->vdev = vdev; | |
1016 | INIT_LIST_HEAD(&viommu->requests); | |
1017 | ||
1018 | ret = viommu_init_vqs(viommu); | |
1019 | if (ret) | |
1020 | return ret; | |
1021 | ||
1022 | virtio_cread(vdev, struct virtio_iommu_config, page_size_mask, | |
1023 | &viommu->pgsize_bitmap); | |
1024 | ||
1025 | if (!viommu->pgsize_bitmap) { | |
1026 | ret = -EINVAL; | |
1027 | goto err_free_vqs; | |
1028 | } | |
1029 | ||
1030 | viommu->domain_bits = 32; | |
1031 | ||
1032 | /* Optional features */ | |
1033 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, | |
1034 | struct virtio_iommu_config, input_range.start, | |
1035 | &input_start); | |
1036 | ||
1037 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, | |
1038 | struct virtio_iommu_config, input_range.end, | |
1039 | &input_end); | |
1040 | ||
1041 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS, | |
1042 | struct virtio_iommu_config, domain_bits, | |
1043 | &viommu->domain_bits); | |
1044 | ||
2a5a3148 JPB |
1045 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, |
1046 | struct virtio_iommu_config, probe_size, | |
1047 | &viommu->probe_size); | |
1048 | ||
edcd69ab JPB |
1049 | viommu->geometry = (struct iommu_domain_geometry) { |
1050 | .aperture_start = input_start, | |
1051 | .aperture_end = input_end, | |
1052 | .force_aperture = true, | |
1053 | }; | |
1054 | ||
1055 | viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; | |
1056 | ||
1057 | virtio_device_ready(vdev); | |
1058 | ||
169a126c JPB |
1059 | /* Populate the event queue with buffers */ |
1060 | ret = viommu_fill_evtq(viommu); | |
1061 | if (ret) | |
1062 | goto err_free_vqs; | |
1063 | ||
edcd69ab JPB |
1064 | ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s", |
1065 | virtio_bus_name(vdev)); | |
1066 | if (ret) | |
1067 | goto err_free_vqs; | |
1068 | ||
1069 | iommu_device_set_ops(&viommu->iommu, &viommu_ops); | |
1070 | iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode); | |
1071 | ||
1072 | iommu_device_register(&viommu->iommu); | |
1073 | ||
1074 | #ifdef CONFIG_PCI | |
1075 | if (pci_bus_type.iommu_ops != &viommu_ops) { | |
1076 | pci_request_acs(); | |
1077 | ret = bus_set_iommu(&pci_bus_type, &viommu_ops); | |
1078 | if (ret) | |
1079 | goto err_unregister; | |
1080 | } | |
1081 | #endif | |
1082 | #ifdef CONFIG_ARM_AMBA | |
1083 | if (amba_bustype.iommu_ops != &viommu_ops) { | |
1084 | ret = bus_set_iommu(&amba_bustype, &viommu_ops); | |
1085 | if (ret) | |
1086 | goto err_unregister; | |
1087 | } | |
1088 | #endif | |
1089 | if (platform_bus_type.iommu_ops != &viommu_ops) { | |
1090 | ret = bus_set_iommu(&platform_bus_type, &viommu_ops); | |
1091 | if (ret) | |
1092 | goto err_unregister; | |
1093 | } | |
1094 | ||
1095 | vdev->priv = viommu; | |
1096 | ||
1097 | dev_info(dev, "input address: %u bits\n", | |
1098 | order_base_2(viommu->geometry.aperture_end)); | |
1099 | dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap); | |
1100 | ||
1101 | return 0; | |
1102 | ||
1103 | err_unregister: | |
1104 | iommu_device_sysfs_remove(&viommu->iommu); | |
1105 | iommu_device_unregister(&viommu->iommu); | |
1106 | err_free_vqs: | |
1107 | vdev->config->del_vqs(vdev); | |
1108 | ||
1109 | return ret; | |
1110 | } | |
1111 | ||
1112 | static void viommu_remove(struct virtio_device *vdev) | |
1113 | { | |
1114 | struct viommu_dev *viommu = vdev->priv; | |
1115 | ||
1116 | iommu_device_sysfs_remove(&viommu->iommu); | |
1117 | iommu_device_unregister(&viommu->iommu); | |
1118 | ||
1119 | /* Stop all virtqueues */ | |
1120 | vdev->config->reset(vdev); | |
1121 | vdev->config->del_vqs(vdev); | |
1122 | ||
1123 | dev_info(&vdev->dev, "device removed\n"); | |
1124 | } | |
1125 | ||
1126 | static void viommu_config_changed(struct virtio_device *vdev) | |
1127 | { | |
1128 | dev_warn(&vdev->dev, "config changed\n"); | |
1129 | } | |
1130 | ||
1131 | static unsigned int features[] = { | |
1132 | VIRTIO_IOMMU_F_MAP_UNMAP, | |
1133 | VIRTIO_IOMMU_F_DOMAIN_BITS, | |
1134 | VIRTIO_IOMMU_F_INPUT_RANGE, | |
2a5a3148 | 1135 | VIRTIO_IOMMU_F_PROBE, |
edcd69ab JPB |
1136 | }; |
1137 | ||
1138 | static struct virtio_device_id id_table[] = { | |
1139 | { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID }, | |
1140 | { 0 }, | |
1141 | }; | |
1142 | ||
1143 | static struct virtio_driver virtio_iommu_drv = { | |
1144 | .driver.name = KBUILD_MODNAME, | |
1145 | .driver.owner = THIS_MODULE, | |
1146 | .id_table = id_table, | |
1147 | .feature_table = features, | |
1148 | .feature_table_size = ARRAY_SIZE(features), | |
1149 | .probe = viommu_probe, | |
1150 | .remove = viommu_remove, | |
1151 | .config_changed = viommu_config_changed, | |
1152 | }; | |
1153 | ||
1154 | module_virtio_driver(virtio_iommu_drv); | |
1155 | ||
1156 | MODULE_DESCRIPTION("Virtio IOMMU driver"); | |
1157 | MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>"); | |
1158 | MODULE_LICENSE("GPL v2"); |