Commit | Line | Data |
---|---|---|
45051539 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
4a77a6cf JR |
2 | /* |
3 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | |
4 | * Author: Joerg Roedel <joerg.roedel@amd.com> | |
4a77a6cf JR |
5 | */ |
6 | ||
7 | #ifndef __LINUX_IOMMU_H | |
8 | #define __LINUX_IOMMU_H | |
9 | ||
e8245c1b JR |
10 | #include <linux/scatterlist.h> |
11 | #include <linux/device.h> | |
12 | #include <linux/types.h> | |
74315ccc | 13 | #include <linux/errno.h> |
9a08d376 | 14 | #include <linux/err.h> |
d0f60a44 | 15 | #include <linux/of.h> |
4e32348b | 16 | #include <uapi/linux/iommu.h> |
74315ccc | 17 | |
ca13bb3d WD |
18 | #define IOMMU_READ (1 << 0) |
19 | #define IOMMU_WRITE (1 << 1) | |
20 | #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ | |
a720b41c | 21 | #define IOMMU_NOEXEC (1 << 3) |
31e6850e | 22 | #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ |
579b2a65 | 23 | /* |
adf5e516 RM |
24 | * Where the bus hardware includes a privilege level as part of its access type |
25 | * markings, and certain devices are capable of issuing transactions marked as | |
26 | * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other | |
27 | * given permission flags only apply to accesses at the higher privilege level, | |
28 | * and that unprivileged transactions should have as little access as possible. | |
29 | * This would usually imply the same permissions as kernel mappings on the CPU, | |
30 | * if the IOMMU page table format is equivalent. | |
579b2a65 MH |
31 | */ |
32 | #define IOMMU_PRIV (1 << 5) | |
4a77a6cf | 33 | |
905d66c1 | 34 | struct iommu_ops; |
d72e31c9 | 35 | struct iommu_group; |
ff21776d | 36 | struct bus_type; |
4a77a6cf | 37 | struct device; |
4f3f8d9d | 38 | struct iommu_domain; |
9a630a4b | 39 | struct iommu_domain_ops; |
ba1eabfa | 40 | struct notifier_block; |
26b25a2b | 41 | struct iommu_sva; |
4e32348b | 42 | struct iommu_fault_event; |
46983fcd | 43 | struct iommu_dma_cookie; |
4f3f8d9d OBC |
44 | |
45 | /* iommu fault flags */ | |
46 | #define IOMMU_FAULT_READ 0x0 | |
47 | #define IOMMU_FAULT_WRITE 0x1 | |
48 | ||
49 | typedef int (*iommu_fault_handler_t)(struct iommu_domain *, | |
77ca2332 | 50 | struct device *, unsigned long, int, void *); |
4e32348b | 51 | typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); |
4a77a6cf | 52 | |
0ff64f80 JR |
53 | struct iommu_domain_geometry { |
54 | dma_addr_t aperture_start; /* First address that can be mapped */ | |
55 | dma_addr_t aperture_end; /* Last address that can be mapped */ | |
56 | bool force_aperture; /* DMA only allowed in mappable range? */ | |
57 | }; | |
58 | ||
8539c7c1 JR |
59 | /* Domain feature flags */ |
60 | #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ | |
61 | #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API | |
62 | implementation */ | |
63 | #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ | |
bf3aed46 | 64 | #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ |
8539c7c1 | 65 | |
13646796 LB |
66 | #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ |
67 | ||
8539c7c1 JR |
68 | /* |
69 | * This are the possible domain-types | |
70 | * | |
71 | * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate | |
72 | * devices | |
73 | * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses | |
74 | * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used | |
75 | * for VMs | |
76 | * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. | |
77 | * This flag allows IOMMU drivers to implement | |
78 | * certain optimizations for these domains | |
bf3aed46 RM |
79 | * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB |
80 | * invalidation. | |
13646796 LB |
81 | * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses |
82 | * represented by mm_struct's. | |
8539c7c1 JR |
83 | */ |
84 | #define IOMMU_DOMAIN_BLOCKED (0U) | |
85 | #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) | |
86 | #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) | |
87 | #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ | |
88 | __IOMMU_DOMAIN_DMA_API) | |
bf3aed46 RM |
89 | #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ |
90 | __IOMMU_DOMAIN_DMA_API | \ | |
91 | __IOMMU_DOMAIN_DMA_FQ) | |
13646796 | 92 | #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA) |
8539c7c1 | 93 | |
4a77a6cf | 94 | struct iommu_domain { |
8539c7c1 | 95 | unsigned type; |
9a630a4b | 96 | const struct iommu_domain_ops *ops; |
d16e0faa | 97 | unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ |
0ff64f80 | 98 | struct iommu_domain_geometry geometry; |
46983fcd | 99 | struct iommu_dma_cookie *iova_cookie; |
8cc93159 LB |
100 | enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault, |
101 | void *data); | |
102 | void *fault_data; | |
13646796 LB |
103 | union { |
104 | struct { | |
105 | iommu_fault_handler_t handler; | |
106 | void *handler_token; | |
107 | }; | |
108 | struct { /* IOMMU_DOMAIN_SVA */ | |
109 | struct mm_struct *mm; | |
110 | int users; | |
111 | }; | |
112 | }; | |
4a77a6cf JR |
113 | }; |
114 | ||
bf3aed46 RM |
115 | static inline bool iommu_is_dma_domain(struct iommu_domain *domain) |
116 | { | |
117 | return domain->type & __IOMMU_DOMAIN_DMA_API; | |
118 | } | |
119 | ||
1aed0748 | 120 | enum iommu_cap { |
f78dc1da | 121 | IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */ |
c4986649 | 122 | IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ |
d0be55fb RM |
123 | IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for |
124 | DMA protection and we should too */ | |
4989764d JG |
125 | /* |
126 | * Per-device flag indicating if enforce_cache_coherency() will work on | |
127 | * this device. | |
128 | */ | |
129 | IOMMU_CAP_ENFORCE_CACHE_COHERENCY, | |
1aed0748 | 130 | }; |
dbb9fd86 | 131 | |
d30ddcaa | 132 | /* These are the possible reserved region types */ |
9d3a4de4 RM |
133 | enum iommu_resv_type { |
134 | /* Memory regions which must be mapped 1:1 at all times */ | |
135 | IOMMU_RESV_DIRECT, | |
adfd3738 EA |
136 | /* |
137 | * Memory regions which are advertised to be 1:1 but are | |
138 | * commonly considered relaxable in some conditions, | |
139 | * for instance in device assignment use case (USB, Graphics) | |
140 | */ | |
141 | IOMMU_RESV_DIRECT_RELAXABLE, | |
9d3a4de4 RM |
142 | /* Arbitrary "never map this or give it to a device" address ranges */ |
143 | IOMMU_RESV_RESERVED, | |
144 | /* Hardware MSI region (untranslated) */ | |
145 | IOMMU_RESV_MSI, | |
146 | /* Software-managed MSI translation window */ | |
147 | IOMMU_RESV_SW_MSI, | |
148 | }; | |
d30ddcaa | 149 | |
a1015c2b | 150 | /** |
e5b5234a | 151 | * struct iommu_resv_region - descriptor for a reserved memory region |
a1015c2b JR |
152 | * @list: Linked list pointers |
153 | * @start: System physical start address of the region | |
154 | * @length: Length of the region in bytes | |
155 | * @prot: IOMMU Protection flags (READ/WRITE/...) | |
d30ddcaa | 156 | * @type: Type of the reserved region |
3b7e2482 | 157 | * @free: Callback to free associated memory allocations |
a1015c2b | 158 | */ |
e5b5234a | 159 | struct iommu_resv_region { |
a1015c2b JR |
160 | struct list_head list; |
161 | phys_addr_t start; | |
162 | size_t length; | |
163 | int prot; | |
9d3a4de4 | 164 | enum iommu_resv_type type; |
3b7e2482 | 165 | void (*free)(struct device *dev, struct iommu_resv_region *region); |
a1015c2b JR |
166 | }; |
167 | ||
491cf4a6 SK |
168 | struct iommu_iort_rmr_data { |
169 | struct iommu_resv_region rr; | |
170 | ||
171 | /* Stream IDs associated with IORT RMR entry */ | |
172 | const u32 *sids; | |
173 | u32 num_sids; | |
174 | }; | |
175 | ||
34b48c70 JPB |
176 | /** |
177 | * enum iommu_dev_features - Per device IOMMU features | |
34b48c70 JPB |
178 | * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses |
179 | * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally | |
180 | * enabling %IOMMU_DEV_FEAT_SVA requires | |
181 | * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page | |
182 | * Faults themselves instead of relying on the IOMMU. When | |
183 | * supported, this feature must be enabled before and | |
184 | * disabled after %IOMMU_DEV_FEAT_SVA. | |
185 | * | |
309c56e8 | 186 | * Device drivers enable a feature using iommu_dev_enable_feature(). |
34b48c70 | 187 | */ |
a3a19592 | 188 | enum iommu_dev_features { |
34b48c70 JPB |
189 | IOMMU_DEV_FEAT_SVA, |
190 | IOMMU_DEV_FEAT_IOPF, | |
26b25a2b JPB |
191 | }; |
192 | ||
193 | #define IOMMU_PASID_INVALID (-1U) | |
cd389115 | 194 | typedef unsigned int ioasid_t; |
26b25a2b | 195 | |
39d4ebb9 JR |
196 | #ifdef CONFIG_IOMMU_API |
197 | ||
a7d20dc1 WD |
198 | /** |
199 | * struct iommu_iotlb_gather - Range information for a pending IOTLB flush | |
200 | * | |
201 | * @start: IOVA representing the start of the range to be flushed | |
862c3715 | 202 | * @end: IOVA representing the end of the range to be flushed (inclusive) |
a7d20dc1 | 203 | * @pgsize: The interval at which to perform the flush |
7a7c5bad RM |
204 | * @freelist: Removed pages to free after sync |
205 | * @queued: Indicates that the flush will be queued | |
a7d20dc1 WD |
206 | * |
207 | * This structure is intended to be updated by multiple calls to the | |
208 | * ->unmap() function in struct iommu_ops before eventually being passed | |
7a7c5bad RM |
209 | * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after |
210 | * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to | |
211 | * them. @queued is set to indicate when ->iotlb_flush_all() will be called | |
212 | * later instead of ->iotlb_sync(), so drivers may optimise accordingly. | |
a7d20dc1 WD |
213 | */ |
214 | struct iommu_iotlb_gather { | |
215 | unsigned long start; | |
216 | unsigned long end; | |
217 | size_t pgsize; | |
87f60cc6 | 218 | struct list_head freelist; |
7a7c5bad | 219 | bool queued; |
a7d20dc1 WD |
220 | }; |
221 | ||
7d3002cc OBC |
222 | /** |
223 | * struct iommu_ops - iommu ops and capabilities | |
0d9bacb6 MD |
224 | * @capable: check capability |
225 | * @domain_alloc: allocate iommu domain | |
a6a4c7e2 JR |
226 | * @probe_device: Add device to iommu driver handling |
227 | * @release_device: Remove device from iommu driver handling | |
228 | * @probe_finalize: Do final setup work after the device is added to an IOMMU | |
229 | * group and attached to the groups domain | |
6caeb33f LB |
230 | * @set_platform_dma_ops: Returning control back to the platform DMA ops. This op |
231 | * is to support old IOMMU drivers, new drivers should use | |
232 | * default domains, and the common IOMMU DMA ops. | |
0d9bacb6 | 233 | * @device_group: find iommu group for a particular device |
e5b5234a | 234 | * @get_resv_regions: Request list of reserved regions for a device |
d0f60a44 | 235 | * @of_xlate: add OF master IDs to iommu grouping |
a7055d57 GU |
236 | * @is_attach_deferred: Check if domain attach should be deferred from iommu |
237 | * driver init to device driver init (default no) | |
28394501 | 238 | * @dev_enable/disable_feat: per device entries to enable/disable |
a3a19592 | 239 | * iommu specific features. |
bf3255b3 | 240 | * @page_response: handle page request response |
4cbf3851 SPP |
241 | * @def_domain_type: device default domain type, return value: |
242 | * - IOMMU_DOMAIN_IDENTITY: must use an identity domain | |
243 | * - IOMMU_DOMAIN_DMA: must use a dma domain | |
244 | * - 0: use the default setting | |
9a630a4b | 245 | * @default_domain_ops: the default ops for domains |
16603704 LB |
246 | * @remove_dev_pasid: Remove any translation configurations of a specific |
247 | * pasid, so that any DMA transactions with this pasid | |
248 | * will be blocked by the hardware. | |
25f003de WD |
249 | * @pgsize_bitmap: bitmap of all possible supported page sizes |
250 | * @owner: Driver module providing these ops | |
7d3002cc | 251 | */ |
4a77a6cf | 252 | struct iommu_ops { |
359ad157 | 253 | bool (*capable)(struct device *dev, enum iommu_cap); |
938c4709 JR |
254 | |
255 | /* Domain allocation and freeing by the iommu driver */ | |
8539c7c1 | 256 | struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); |
938c4709 | 257 | |
a6a4c7e2 JR |
258 | struct iommu_device *(*probe_device)(struct device *dev); |
259 | void (*release_device)(struct device *dev); | |
260 | void (*probe_finalize)(struct device *dev); | |
6caeb33f | 261 | void (*set_platform_dma_ops)(struct device *dev); |
46c6b2bc | 262 | struct iommu_group *(*device_group)(struct device *dev); |
d7787d57 | 263 | |
e5b5234a EA |
264 | /* Request/Free a list of reserved regions for a device */ |
265 | void (*get_resv_regions)(struct device *dev, struct list_head *list); | |
a1015c2b | 266 | |
d0f60a44 | 267 | int (*of_xlate)(struct device *dev, struct of_phandle_args *args); |
41bb23e7 | 268 | bool (*is_attach_deferred)(struct device *dev); |
d0f60a44 | 269 | |
a3a19592 | 270 | /* Per device IOMMU features */ |
a3a19592 LB |
271 | int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); |
272 | int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); | |
273 | ||
bf3255b3 JPB |
274 | int (*page_response)(struct device *dev, |
275 | struct iommu_fault_event *evt, | |
276 | struct iommu_page_response *msg); | |
277 | ||
4cbf3851 | 278 | int (*def_domain_type)(struct device *dev); |
16603704 | 279 | void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid); |
4cbf3851 | 280 | |
9a630a4b | 281 | const struct iommu_domain_ops *default_domain_ops; |
7d3002cc | 282 | unsigned long pgsize_bitmap; |
25f003de | 283 | struct module *owner; |
4a77a6cf JR |
284 | }; |
285 | ||
9a630a4b LB |
286 | /** |
287 | * struct iommu_domain_ops - domain specific operations | |
288 | * @attach_dev: attach an iommu domain to a device | |
00208852 NC |
289 | * Return: |
290 | * * 0 - success | |
291 | * * EINVAL - can indicate that device and domain are incompatible due to | |
292 | * some previous configuration of the domain, in which case the | |
293 | * driver shouldn't log an error, since it is legitimate for a | |
294 | * caller to test reuse of existing domains. Otherwise, it may | |
295 | * still represent some other fundamental problem | |
296 | * * ENOMEM - out of memory | |
297 | * * ENOSPC - non-ENOMEM type of resource allocation failures | |
298 | * * EBUSY - device is attached to a domain and cannot be changed | |
299 | * * ENODEV - device specific errors, not able to be attached | |
300 | * * <others> - treated as ENODEV by the caller. Use is discouraged | |
16603704 | 301 | * @set_dev_pasid: set an iommu domain to a pasid of device |
9a630a4b LB |
302 | * @map: map a physically contiguous memory region to an iommu domain |
303 | * @map_pages: map a physically contiguous set of pages of the same size to | |
304 | * an iommu domain. | |
305 | * @unmap: unmap a physically contiguous memory region from an iommu domain | |
306 | * @unmap_pages: unmap a number of pages of the same size from an iommu domain | |
307 | * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain | |
308 | * @iotlb_sync_map: Sync mappings created recently using @map to the hardware | |
309 | * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush | |
310 | * queue | |
311 | * @iova_to_phys: translate iova to physical address | |
6043257b JG |
312 | * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE, |
313 | * including no-snoop TLPs on PCIe or other platform | |
314 | * specific mechanisms. | |
9a630a4b LB |
315 | * @enable_nesting: Enable nesting |
316 | * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) | |
317 | * @free: Release the domain after use. | |
318 | */ | |
319 | struct iommu_domain_ops { | |
320 | int (*attach_dev)(struct iommu_domain *domain, struct device *dev); | |
16603704 LB |
321 | int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, |
322 | ioasid_t pasid); | |
9a630a4b LB |
323 | |
324 | int (*map)(struct iommu_domain *domain, unsigned long iova, | |
325 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp); | |
326 | int (*map_pages)(struct iommu_domain *domain, unsigned long iova, | |
327 | phys_addr_t paddr, size_t pgsize, size_t pgcount, | |
328 | int prot, gfp_t gfp, size_t *mapped); | |
329 | size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, | |
330 | size_t size, struct iommu_iotlb_gather *iotlb_gather); | |
331 | size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, | |
332 | size_t pgsize, size_t pgcount, | |
333 | struct iommu_iotlb_gather *iotlb_gather); | |
334 | ||
335 | void (*flush_iotlb_all)(struct iommu_domain *domain); | |
336 | void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, | |
337 | size_t size); | |
338 | void (*iotlb_sync)(struct iommu_domain *domain, | |
339 | struct iommu_iotlb_gather *iotlb_gather); | |
340 | ||
341 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, | |
342 | dma_addr_t iova); | |
343 | ||
6043257b | 344 | bool (*enforce_cache_coherency)(struct iommu_domain *domain); |
9a630a4b LB |
345 | int (*enable_nesting)(struct iommu_domain *domain); |
346 | int (*set_pgtable_quirks)(struct iommu_domain *domain, | |
347 | unsigned long quirks); | |
348 | ||
349 | void (*free)(struct iommu_domain *domain); | |
350 | }; | |
351 | ||
b0119e87 JR |
352 | /** |
353 | * struct iommu_device - IOMMU core representation of one IOMMU hardware | |
354 | * instance | |
355 | * @list: Used by the iommu-core to keep a list of registered iommus | |
356 | * @ops: iommu-ops for talking to this iommu | |
39ab9555 | 357 | * @dev: struct device for sysfs handling |
1adf3cc2 | 358 | * @max_pasids: number of supported PASIDs |
b0119e87 JR |
359 | */ |
360 | struct iommu_device { | |
361 | struct list_head list; | |
362 | const struct iommu_ops *ops; | |
c73e1ac8 | 363 | struct fwnode_handle *fwnode; |
2926a2aa | 364 | struct device *dev; |
1adf3cc2 | 365 | u32 max_pasids; |
b0119e87 JR |
366 | }; |
367 | ||
4e32348b JP |
368 | /** |
369 | * struct iommu_fault_event - Generic fault event | |
370 | * | |
371 | * Can represent recoverable faults such as a page requests or | |
372 | * unrecoverable faults such as DMA or IRQ remapping faults. | |
373 | * | |
374 | * @fault: fault descriptor | |
bf3255b3 | 375 | * @list: pending fault event list, used for tracking responses |
4e32348b JP |
376 | */ |
377 | struct iommu_fault_event { | |
378 | struct iommu_fault fault; | |
bf3255b3 | 379 | struct list_head list; |
4e32348b JP |
380 | }; |
381 | ||
382 | /** | |
383 | * struct iommu_fault_param - per-device IOMMU fault data | |
384 | * @handler: Callback function to handle IOMMU faults at device level | |
385 | * @data: handler private data | |
bf3255b3 JPB |
386 | * @faults: holds the pending faults which needs response |
387 | * @lock: protect pending faults list | |
4e32348b JP |
388 | */ |
389 | struct iommu_fault_param { | |
390 | iommu_dev_fault_handler_t handler; | |
391 | void *data; | |
bf3255b3 JPB |
392 | struct list_head faults; |
393 | struct mutex lock; | |
4e32348b JP |
394 | }; |
395 | ||
396 | /** | |
045a7042 | 397 | * struct dev_iommu - Collection of per-device IOMMU data |
4e32348b JP |
398 | * |
399 | * @fault_param: IOMMU detected device fault reporting data | |
fc36479d | 400 | * @iopf_param: I/O Page Fault queue and data |
72acd9df | 401 | * @fwspec: IOMMU fwspec data |
a6a4c7e2 | 402 | * @iommu_dev: IOMMU device this device is linked to |
986d5ecc | 403 | * @priv: IOMMU Driver private data |
22d2c7af | 404 | * @max_pasids: number of PASIDs this device can consume |
dd8a25c5 | 405 | * @attach_deferred: the dma domain attachment is deferred |
4e32348b JP |
406 | * |
407 | * TODO: migrate other per device data pointers under iommu_dev_data, e.g. | |
408 | * struct iommu_group *iommu_group; | |
4e32348b | 409 | */ |
045a7042 | 410 | struct dev_iommu { |
0c830e6b | 411 | struct mutex lock; |
72acd9df | 412 | struct iommu_fault_param *fault_param; |
fc36479d | 413 | struct iopf_device_param *iopf_param; |
72acd9df | 414 | struct iommu_fwspec *fwspec; |
a6a4c7e2 | 415 | struct iommu_device *iommu_dev; |
986d5ecc | 416 | void *priv; |
22d2c7af | 417 | u32 max_pasids; |
dd8a25c5 | 418 | u32 attach_deferred:1; |
4e32348b JP |
419 | }; |
420 | ||
2d471b20 RM |
421 | int iommu_device_register(struct iommu_device *iommu, |
422 | const struct iommu_ops *ops, | |
423 | struct device *hwdev); | |
b0119e87 | 424 | void iommu_device_unregister(struct iommu_device *iommu); |
39ab9555 JR |
425 | int iommu_device_sysfs_add(struct iommu_device *iommu, |
426 | struct device *parent, | |
427 | const struct attribute_group **groups, | |
428 | const char *fmt, ...) __printf(4, 5); | |
429 | void iommu_device_sysfs_remove(struct iommu_device *iommu); | |
e3d10af1 JR |
430 | int iommu_device_link(struct iommu_device *iommu, struct device *link); |
431 | void iommu_device_unlink(struct iommu_device *iommu, struct device *link); | |
3ab65729 | 432 | int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); |
b0119e87 | 433 | |
2926a2aa JR |
434 | static inline struct iommu_device *dev_to_iommu_device(struct device *dev) |
435 | { | |
436 | return (struct iommu_device *)dev_get_drvdata(dev); | |
437 | } | |
438 | ||
a7d20dc1 WD |
439 | static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) |
440 | { | |
441 | *gather = (struct iommu_iotlb_gather) { | |
442 | .start = ULONG_MAX, | |
87f60cc6 | 443 | .freelist = LIST_HEAD_INIT(gather->freelist), |
a7d20dc1 WD |
444 | }; |
445 | } | |
446 | ||
3f6634d9 LB |
447 | static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) |
448 | { | |
449 | /* | |
450 | * Assume that valid ops must be installed if iommu_probe_device() | |
451 | * has succeeded. The device ops are essentially for internal use | |
452 | * within the IOMMU subsystem itself, so we should be able to trust | |
453 | * ourselves not to misuse the helper. | |
454 | */ | |
455 | return dev->iommu->iommu_dev->ops; | |
456 | } | |
457 | ||
b18d0a0f GKH |
458 | extern int bus_iommu_probe(const struct bus_type *bus); |
459 | extern bool iommu_present(const struct bus_type *bus); | |
ed36d04e | 460 | extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); |
efc30a8f | 461 | extern bool iommu_group_has_isolated_msi(struct iommu_group *group); |
b18d0a0f | 462 | extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus); |
4a77a6cf JR |
463 | extern void iommu_domain_free(struct iommu_domain *domain); |
464 | extern int iommu_attach_device(struct iommu_domain *domain, | |
465 | struct device *dev); | |
466 | extern void iommu_detach_device(struct iommu_domain *domain, | |
467 | struct device *dev); | |
808be0aa | 468 | extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, |
d9057381 | 469 | struct device *dev, ioasid_t pasid); |
2c1296d9 | 470 | extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); |
6af588fe | 471 | extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); |
cefc53c7 | 472 | extern int iommu_map(struct iommu_domain *domain, unsigned long iova, |
1369459b | 473 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp); |
7d3002cc | 474 | extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
add02cfd JR |
475 | size_t size); |
476 | extern size_t iommu_unmap_fast(struct iommu_domain *domain, | |
a7d20dc1 WD |
477 | unsigned long iova, size_t size, |
478 | struct iommu_iotlb_gather *iotlb_gather); | |
ad8f36e4 | 479 | extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, |
f2b2c051 JG |
480 | struct scatterlist *sg, unsigned int nents, |
481 | int prot, gfp_t gfp); | |
bb5547ac | 482 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); |
4f3f8d9d | 483 | extern void iommu_set_fault_handler(struct iommu_domain *domain, |
77ca2332 | 484 | iommu_fault_handler_t handler, void *token); |
d72e31c9 | 485 | |
e5b5234a EA |
486 | extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); |
487 | extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); | |
8a69961c JR |
488 | extern void iommu_set_default_passthrough(bool cmd_line); |
489 | extern void iommu_set_default_translated(bool cmd_line); | |
490 | extern bool iommu_default_passthrough(void); | |
2b20cbba | 491 | extern struct iommu_resv_region * |
9d3a4de4 | 492 | iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, |
0251d010 | 493 | enum iommu_resv_type type, gfp_t gfp); |
6c65fb31 EA |
494 | extern int iommu_get_group_resv_regions(struct iommu_group *group, |
495 | struct list_head *head); | |
a1015c2b | 496 | |
d72e31c9 AW |
497 | extern int iommu_attach_group(struct iommu_domain *domain, |
498 | struct iommu_group *group); | |
499 | extern void iommu_detach_group(struct iommu_domain *domain, | |
500 | struct iommu_group *group); | |
501 | extern struct iommu_group *iommu_group_alloc(void); | |
502 | extern void *iommu_group_get_iommudata(struct iommu_group *group); | |
503 | extern void iommu_group_set_iommudata(struct iommu_group *group, | |
504 | void *iommu_data, | |
505 | void (*release)(void *iommu_data)); | |
506 | extern int iommu_group_set_name(struct iommu_group *group, const char *name); | |
507 | extern int iommu_group_add_device(struct iommu_group *group, | |
508 | struct device *dev); | |
509 | extern void iommu_group_remove_device(struct device *dev); | |
510 | extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, | |
511 | int (*fn)(struct device *, void *)); | |
512 | extern struct iommu_group *iommu_group_get(struct device *dev); | |
13f59a78 | 513 | extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); |
d72e31c9 | 514 | extern void iommu_group_put(struct iommu_group *group); |
0c830e6b JP |
515 | extern int iommu_register_device_fault_handler(struct device *dev, |
516 | iommu_dev_fault_handler_t handler, | |
517 | void *data); | |
518 | ||
519 | extern int iommu_unregister_device_fault_handler(struct device *dev); | |
520 | ||
521 | extern int iommu_report_device_fault(struct device *dev, | |
522 | struct iommu_fault_event *evt); | |
bf3255b3 JPB |
523 | extern int iommu_page_response(struct device *dev, |
524 | struct iommu_page_response *msg); | |
0c830e6b | 525 | |
d72e31c9 | 526 | extern int iommu_group_id(struct iommu_group *group); |
6827ca83 | 527 | extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); |
4f3f8d9d | 528 | |
7e147547 | 529 | int iommu_enable_nesting(struct iommu_domain *domain); |
4fc52b81 CH |
530 | int iommu_set_pgtable_quirks(struct iommu_domain *domain, |
531 | unsigned long quirks); | |
4f3f8d9d | 532 | |
308723e3 | 533 | void iommu_set_dma_strict(void); |
207c6e36 JR |
534 | |
535 | extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, | |
536 | unsigned long iova, int flags); | |
4a77a6cf | 537 | |
aae4c8e2 | 538 | static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) |
add02cfd JR |
539 | { |
540 | if (domain->ops->flush_iotlb_all) | |
541 | domain->ops->flush_iotlb_all(domain); | |
542 | } | |
543 | ||
aae4c8e2 | 544 | static inline void iommu_iotlb_sync(struct iommu_domain *domain, |
a7d20dc1 | 545 | struct iommu_iotlb_gather *iotlb_gather) |
add02cfd JR |
546 | { |
547 | if (domain->ops->iotlb_sync) | |
56f8af5e | 548 | domain->ops->iotlb_sync(domain, iotlb_gather); |
a7d20dc1 WD |
549 | |
550 | iommu_iotlb_gather_init(iotlb_gather); | |
add02cfd JR |
551 | } |
552 | ||
febb82c2 NA |
553 | /** |
554 | * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint | |
555 | * | |
556 | * @gather: TLB gather data | |
557 | * @iova: start of page to invalidate | |
558 | * @size: size of page to invalidate | |
559 | * | |
560 | * Helper for IOMMU drivers to check whether a new range and the gathered range | |
561 | * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better | |
562 | * than merging the two, which might lead to unnecessary invalidations. | |
563 | */ | |
564 | static inline | |
565 | bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather, | |
566 | unsigned long iova, size_t size) | |
567 | { | |
568 | unsigned long start = iova, end = start + size - 1; | |
569 | ||
570 | return gather->end != 0 && | |
571 | (end + 1 < gather->start || start > gather->end + 1); | |
572 | } | |
573 | ||
574 | ||
3136895c RM |
575 | /** |
576 | * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation | |
577 | * @gather: TLB gather data | |
578 | * @iova: start of page to invalidate | |
579 | * @size: size of page to invalidate | |
580 | * | |
581 | * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands | |
582 | * where only the address range matters, and simply minimising intermediate | |
583 | * syncs is preferred. | |
584 | */ | |
585 | static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather, | |
586 | unsigned long iova, size_t size) | |
587 | { | |
588 | unsigned long end = iova + size - 1; | |
589 | ||
590 | if (gather->start > iova) | |
591 | gather->start = iova; | |
592 | if (gather->end < end) | |
593 | gather->end = end; | |
594 | } | |
595 | ||
596 | /** | |
597 | * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation | |
598 | * @domain: IOMMU domain to be invalidated | |
599 | * @gather: TLB gather data | |
600 | * @iova: start of page to invalidate | |
601 | * @size: size of page to invalidate | |
602 | * | |
603 | * Helper for IOMMU drivers to build invalidation commands based on individual | |
604 | * pages, or with page size/table level hints which cannot be gathered if they | |
605 | * differ. | |
606 | */ | |
4fcf8544 WD |
607 | static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, |
608 | struct iommu_iotlb_gather *gather, | |
609 | unsigned long iova, size_t size) | |
610 | { | |
4fcf8544 WD |
611 | /* |
612 | * If the new page is disjoint from the current range or is mapped at | |
613 | * a different granularity, then sync the TLB so that the gather | |
614 | * structure can be rewritten. | |
615 | */ | |
febb82c2 NA |
616 | if ((gather->pgsize && gather->pgsize != size) || |
617 | iommu_iotlb_gather_is_disjoint(gather, iova, size)) | |
618 | iommu_iotlb_sync(domain, gather); | |
4fcf8544 | 619 | |
febb82c2 | 620 | gather->pgsize = size; |
3136895c | 621 | iommu_iotlb_gather_add_range(gather, iova, size); |
4fcf8544 | 622 | } |
4fcf8544 | 623 | |
f7403abf RM |
624 | static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) |
625 | { | |
626 | return gather && gather->queued; | |
4fcf8544 WD |
627 | } |
628 | ||
5e62292b JR |
629 | /* PCI device grouping function */ |
630 | extern struct iommu_group *pci_device_group(struct device *dev); | |
6eab556a JR |
631 | /* Generic device grouping function */ |
632 | extern struct iommu_group *generic_device_group(struct device *dev); | |
eab03e2a NG |
633 | /* FSL-MC device grouping function */ |
634 | struct iommu_group *fsl_mc_device_group(struct device *dev); | |
5e62292b | 635 | |
57f98d2f RM |
636 | /** |
637 | * struct iommu_fwspec - per-device IOMMU instance data | |
638 | * @ops: ops for this device's IOMMU | |
639 | * @iommu_fwnode: firmware handle for this device's IOMMU | |
0d35309a | 640 | * @flags: IOMMU_FWSPEC_* flags |
57f98d2f RM |
641 | * @num_ids: number of associated device IDs |
642 | * @ids: IDs which this device may present to the IOMMU | |
495b637f TR |
643 | * |
644 | * Note that the IDs (and any other information, really) stored in this structure should be | |
645 | * considered private to the IOMMU device driver and are not to be used directly by IOMMU | |
646 | * consumers. | |
57f98d2f RM |
647 | */ |
648 | struct iommu_fwspec { | |
649 | const struct iommu_ops *ops; | |
650 | struct fwnode_handle *iommu_fwnode; | |
5702ee24 | 651 | u32 flags; |
57f98d2f | 652 | unsigned int num_ids; |
098accf2 | 653 | u32 ids[]; |
57f98d2f RM |
654 | }; |
655 | ||
5702ee24 JPB |
656 | /* ATS is supported */ |
657 | #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) | |
658 | ||
26b25a2b JPB |
659 | /** |
660 | * struct iommu_sva - handle to a device-mm bond | |
661 | */ | |
662 | struct iommu_sva { | |
663 | struct device *dev; | |
be51b1d6 | 664 | struct iommu_domain *domain; |
26b25a2b JPB |
665 | }; |
666 | ||
57f98d2f RM |
667 | int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, |
668 | const struct iommu_ops *ops); | |
669 | void iommu_fwspec_free(struct device *dev); | |
670 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); | |
534766df | 671 | const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); |
57f98d2f | 672 | |
b4ef725e JR |
673 | static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) |
674 | { | |
72acd9df JR |
675 | if (dev->iommu) |
676 | return dev->iommu->fwspec; | |
677 | else | |
678 | return NULL; | |
b4ef725e JR |
679 | } |
680 | ||
681 | static inline void dev_iommu_fwspec_set(struct device *dev, | |
682 | struct iommu_fwspec *fwspec) | |
683 | { | |
72acd9df | 684 | dev->iommu->fwspec = fwspec; |
b4ef725e JR |
685 | } |
686 | ||
f9867f41 JR |
687 | static inline void *dev_iommu_priv_get(struct device *dev) |
688 | { | |
4c9fb5d9 JR |
689 | if (dev->iommu) |
690 | return dev->iommu->priv; | |
691 | else | |
692 | return NULL; | |
f9867f41 JR |
693 | } |
694 | ||
695 | static inline void dev_iommu_priv_set(struct device *dev, void *priv) | |
696 | { | |
986d5ecc | 697 | dev->iommu->priv = priv; |
f9867f41 JR |
698 | } |
699 | ||
cc5aed44 | 700 | int iommu_probe_device(struct device *dev); |
cc5aed44 | 701 | |
a3a19592 LB |
702 | int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); |
703 | int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); | |
a3a19592 | 704 | |
1ea2a07a LB |
705 | int iommu_device_use_default_domain(struct device *dev); |
706 | void iommu_device_unuse_default_domain(struct device *dev); | |
707 | ||
708 | int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner); | |
709 | void iommu_group_release_dma_owner(struct iommu_group *group); | |
710 | bool iommu_group_dma_owner_claimed(struct iommu_group *group); | |
711 | ||
89395cce LB |
712 | int iommu_device_claim_dma_owner(struct device *dev, void *owner); |
713 | void iommu_device_release_dma_owner(struct device *dev); | |
714 | ||
13646796 LB |
715 | struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, |
716 | struct mm_struct *mm); | |
16603704 LB |
717 | int iommu_attach_device_pasid(struct iommu_domain *domain, |
718 | struct device *dev, ioasid_t pasid); | |
719 | void iommu_detach_device_pasid(struct iommu_domain *domain, | |
720 | struct device *dev, ioasid_t pasid); | |
721 | struct iommu_domain * | |
722 | iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, | |
723 | unsigned int type); | |
4a77a6cf JR |
724 | #else /* CONFIG_IOMMU_API */ |
725 | ||
39d4ebb9 | 726 | struct iommu_ops {}; |
d72e31c9 | 727 | struct iommu_group {}; |
57f98d2f | 728 | struct iommu_fwspec {}; |
b0119e87 | 729 | struct iommu_device {}; |
4e32348b | 730 | struct iommu_fault_param {}; |
a7d20dc1 | 731 | struct iommu_iotlb_gather {}; |
4a77a6cf | 732 | |
b18d0a0f | 733 | static inline bool iommu_present(const struct bus_type *bus) |
4a77a6cf JR |
734 | { |
735 | return false; | |
736 | } | |
737 | ||
ed36d04e RM |
738 | static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) |
739 | { | |
740 | return false; | |
741 | } | |
742 | ||
b18d0a0f | 743 | static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) |
4a77a6cf JR |
744 | { |
745 | return NULL; | |
746 | } | |
747 | ||
748 | static inline void iommu_domain_free(struct iommu_domain *domain) | |
749 | { | |
750 | } | |
751 | ||
752 | static inline int iommu_attach_device(struct iommu_domain *domain, | |
753 | struct device *dev) | |
754 | { | |
755 | return -ENODEV; | |
756 | } | |
757 | ||
758 | static inline void iommu_detach_device(struct iommu_domain *domain, | |
759 | struct device *dev) | |
760 | { | |
761 | } | |
762 | ||
2c1296d9 JR |
763 | static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) |
764 | { | |
765 | return NULL; | |
766 | } | |
767 | ||
cefc53c7 | 768 | static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, |
1369459b | 769 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
781ca2de TM |
770 | { |
771 | return -ENODEV; | |
772 | } | |
773 | ||
c5611a87 SS |
774 | static inline size_t iommu_unmap(struct iommu_domain *domain, |
775 | unsigned long iova, size_t size) | |
cefc53c7 | 776 | { |
c5611a87 | 777 | return 0; |
cefc53c7 JR |
778 | } |
779 | ||
c5611a87 | 780 | static inline size_t iommu_unmap_fast(struct iommu_domain *domain, |
a7d20dc1 WD |
781 | unsigned long iova, int gfp_order, |
782 | struct iommu_iotlb_gather *iotlb_gather) | |
cefc53c7 | 783 | { |
c5611a87 | 784 | return 0; |
cefc53c7 JR |
785 | } |
786 | ||
ad8f36e4 LG |
787 | static inline ssize_t iommu_map_sg(struct iommu_domain *domain, |
788 | unsigned long iova, struct scatterlist *sg, | |
f2b2c051 | 789 | unsigned int nents, int prot, gfp_t gfp) |
781ca2de | 790 | { |
ad8f36e4 | 791 | return -ENODEV; |
781ca2de TM |
792 | } |
793 | ||
aae4c8e2 | 794 | static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) |
add02cfd JR |
795 | { |
796 | } | |
797 | ||
aae4c8e2 | 798 | static inline void iommu_iotlb_sync(struct iommu_domain *domain, |
a7d20dc1 | 799 | struct iommu_iotlb_gather *iotlb_gather) |
add02cfd JR |
800 | { |
801 | } | |
802 | ||
bb5547ac | 803 | static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) |
4a77a6cf JR |
804 | { |
805 | return 0; | |
806 | } | |
807 | ||
4f3f8d9d | 808 | static inline void iommu_set_fault_handler(struct iommu_domain *domain, |
77ca2332 | 809 | iommu_fault_handler_t handler, void *token) |
4f3f8d9d OBC |
810 | { |
811 | } | |
812 | ||
e5b5234a | 813 | static inline void iommu_get_resv_regions(struct device *dev, |
a1015c2b JR |
814 | struct list_head *list) |
815 | { | |
816 | } | |
817 | ||
e5b5234a | 818 | static inline void iommu_put_resv_regions(struct device *dev, |
a1015c2b JR |
819 | struct list_head *list) |
820 | { | |
821 | } | |
822 | ||
6c65fb31 EA |
823 | static inline int iommu_get_group_resv_regions(struct iommu_group *group, |
824 | struct list_head *head) | |
825 | { | |
826 | return -ENODEV; | |
827 | } | |
828 | ||
8a69961c JR |
829 | static inline void iommu_set_default_passthrough(bool cmd_line) |
830 | { | |
831 | } | |
832 | ||
833 | static inline void iommu_set_default_translated(bool cmd_line) | |
834 | { | |
835 | } | |
836 | ||
837 | static inline bool iommu_default_passthrough(void) | |
838 | { | |
839 | return true; | |
840 | } | |
841 | ||
bef83de5 AW |
842 | static inline int iommu_attach_group(struct iommu_domain *domain, |
843 | struct iommu_group *group) | |
d72e31c9 AW |
844 | { |
845 | return -ENODEV; | |
846 | } | |
847 | ||
bef83de5 AW |
848 | static inline void iommu_detach_group(struct iommu_domain *domain, |
849 | struct iommu_group *group) | |
d72e31c9 AW |
850 | { |
851 | } | |
852 | ||
bef83de5 | 853 | static inline struct iommu_group *iommu_group_alloc(void) |
d72e31c9 AW |
854 | { |
855 | return ERR_PTR(-ENODEV); | |
856 | } | |
857 | ||
bef83de5 | 858 | static inline void *iommu_group_get_iommudata(struct iommu_group *group) |
d72e31c9 AW |
859 | { |
860 | return NULL; | |
861 | } | |
862 | ||
bef83de5 AW |
863 | static inline void iommu_group_set_iommudata(struct iommu_group *group, |
864 | void *iommu_data, | |
865 | void (*release)(void *iommu_data)) | |
d72e31c9 AW |
866 | { |
867 | } | |
868 | ||
bef83de5 AW |
869 | static inline int iommu_group_set_name(struct iommu_group *group, |
870 | const char *name) | |
d72e31c9 AW |
871 | { |
872 | return -ENODEV; | |
873 | } | |
874 | ||
bef83de5 AW |
875 | static inline int iommu_group_add_device(struct iommu_group *group, |
876 | struct device *dev) | |
d72e31c9 AW |
877 | { |
878 | return -ENODEV; | |
879 | } | |
880 | ||
bef83de5 | 881 | static inline void iommu_group_remove_device(struct device *dev) |
d72e31c9 AW |
882 | { |
883 | } | |
884 | ||
bef83de5 AW |
885 | static inline int iommu_group_for_each_dev(struct iommu_group *group, |
886 | void *data, | |
887 | int (*fn)(struct device *, void *)) | |
d72e31c9 AW |
888 | { |
889 | return -ENODEV; | |
890 | } | |
891 | ||
bef83de5 | 892 | static inline struct iommu_group *iommu_group_get(struct device *dev) |
d72e31c9 AW |
893 | { |
894 | return NULL; | |
895 | } | |
896 | ||
bef83de5 | 897 | static inline void iommu_group_put(struct iommu_group *group) |
d72e31c9 AW |
898 | { |
899 | } | |
900 | ||
0c830e6b JP |
901 | static inline |
902 | int iommu_register_device_fault_handler(struct device *dev, | |
903 | iommu_dev_fault_handler_t handler, | |
904 | void *data) | |
905 | { | |
906 | return -ENODEV; | |
907 | } | |
908 | ||
909 | static inline int iommu_unregister_device_fault_handler(struct device *dev) | |
910 | { | |
911 | return 0; | |
912 | } | |
913 | ||
914 | static inline | |
915 | int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) | |
916 | { | |
917 | return -ENODEV; | |
918 | } | |
919 | ||
bf3255b3 JPB |
920 | static inline int iommu_page_response(struct device *dev, |
921 | struct iommu_page_response *msg) | |
922 | { | |
923 | return -ENODEV; | |
924 | } | |
925 | ||
bef83de5 | 926 | static inline int iommu_group_id(struct iommu_group *group) |
d72e31c9 AW |
927 | { |
928 | return -ENODEV; | |
929 | } | |
1460432c | 930 | |
4fc52b81 CH |
931 | static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, |
932 | unsigned long quirks) | |
0cd76dd1 | 933 | { |
4fc52b81 | 934 | return 0; |
0cd76dd1 JR |
935 | } |
936 | ||
2d471b20 RM |
937 | static inline int iommu_device_register(struct iommu_device *iommu, |
938 | const struct iommu_ops *ops, | |
939 | struct device *hwdev) | |
c61959ec | 940 | { |
39ab9555 | 941 | return -ENODEV; |
c61959ec AW |
942 | } |
943 | ||
2926a2aa JR |
944 | static inline struct iommu_device *dev_to_iommu_device(struct device *dev) |
945 | { | |
946 | return NULL; | |
947 | } | |
948 | ||
a7d20dc1 WD |
949 | static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) |
950 | { | |
951 | } | |
952 | ||
4fcf8544 WD |
953 | static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, |
954 | struct iommu_iotlb_gather *gather, | |
955 | unsigned long iova, size_t size) | |
956 | { | |
957 | } | |
958 | ||
f7403abf RM |
959 | static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) |
960 | { | |
961 | return false; | |
962 | } | |
963 | ||
39ab9555 | 964 | static inline void iommu_device_unregister(struct iommu_device *iommu) |
c61959ec | 965 | { |
c61959ec AW |
966 | } |
967 | ||
39ab9555 JR |
968 | static inline int iommu_device_sysfs_add(struct iommu_device *iommu, |
969 | struct device *parent, | |
970 | const struct attribute_group **groups, | |
971 | const char *fmt, ...) | |
b0119e87 | 972 | { |
39ab9555 | 973 | return -ENODEV; |
b0119e87 JR |
974 | } |
975 | ||
39ab9555 | 976 | static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) |
c61959ec AW |
977 | { |
978 | } | |
979 | ||
e09f8ea5 | 980 | static inline int iommu_device_link(struct device *dev, struct device *link) |
c61959ec AW |
981 | { |
982 | return -EINVAL; | |
983 | } | |
984 | ||
e09f8ea5 | 985 | static inline void iommu_device_unlink(struct device *dev, struct device *link) |
c61959ec AW |
986 | { |
987 | } | |
988 | ||
57f98d2f RM |
989 | static inline int iommu_fwspec_init(struct device *dev, |
990 | struct fwnode_handle *iommu_fwnode, | |
991 | const struct iommu_ops *ops) | |
992 | { | |
993 | return -ENODEV; | |
994 | } | |
995 | ||
996 | static inline void iommu_fwspec_free(struct device *dev) | |
997 | { | |
998 | } | |
999 | ||
1000 | static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, | |
1001 | int num_ids) | |
1002 | { | |
1003 | return -ENODEV; | |
1004 | } | |
1005 | ||
e4f10ffe | 1006 | static inline |
534766df | 1007 | const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) |
e4f10ffe LP |
1008 | { |
1009 | return NULL; | |
1010 | } | |
1011 | ||
a3a19592 LB |
1012 | static inline int |
1013 | iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) | |
1014 | { | |
1015 | return -ENODEV; | |
1016 | } | |
1017 | ||
1018 | static inline int | |
1019 | iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) | |
1020 | { | |
1021 | return -ENODEV; | |
1022 | } | |
1023 | ||
0008d0c3 JR |
1024 | static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) |
1025 | { | |
1026 | return NULL; | |
1027 | } | |
1ea2a07a LB |
1028 | |
1029 | static inline int iommu_device_use_default_domain(struct device *dev) | |
1030 | { | |
1031 | return 0; | |
1032 | } | |
1033 | ||
1034 | static inline void iommu_device_unuse_default_domain(struct device *dev) | |
1035 | { | |
1036 | } | |
1037 | ||
1038 | static inline int | |
1039 | iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) | |
1040 | { | |
1041 | return -ENODEV; | |
1042 | } | |
1043 | ||
1044 | static inline void iommu_group_release_dma_owner(struct iommu_group *group) | |
1045 | { | |
1046 | } | |
1047 | ||
1048 | static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group) | |
1049 | { | |
1050 | return false; | |
1051 | } | |
16603704 | 1052 | |
89395cce LB |
1053 | static inline void iommu_device_release_dma_owner(struct device *dev) |
1054 | { | |
1055 | } | |
1056 | ||
1057 | static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner) | |
1058 | { | |
1059 | return -ENODEV; | |
1060 | } | |
1061 | ||
13646796 LB |
1062 | static inline struct iommu_domain * |
1063 | iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm) | |
1064 | { | |
1065 | return NULL; | |
1066 | } | |
1067 | ||
16603704 LB |
1068 | static inline int iommu_attach_device_pasid(struct iommu_domain *domain, |
1069 | struct device *dev, ioasid_t pasid) | |
1070 | { | |
1071 | return -ENODEV; | |
1072 | } | |
1073 | ||
1074 | static inline void iommu_detach_device_pasid(struct iommu_domain *domain, | |
1075 | struct device *dev, ioasid_t pasid) | |
1076 | { | |
1077 | } | |
1078 | ||
1079 | static inline struct iommu_domain * | |
1080 | iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, | |
1081 | unsigned int type) | |
1082 | { | |
1083 | return NULL; | |
1084 | } | |
4a77a6cf JR |
1085 | #endif /* CONFIG_IOMMU_API */ |
1086 | ||
ca37faf3 MS |
1087 | /** |
1088 | * iommu_map_sgtable - Map the given buffer to the IOMMU domain | |
1089 | * @domain: The IOMMU domain to perform the mapping | |
1090 | * @iova: The start address to map the buffer | |
1091 | * @sgt: The sg_table object describing the buffer | |
1092 | * @prot: IOMMU protection bits | |
1093 | * | |
1094 | * Creates a mapping at @iova for the buffer described by a scatterlist | |
1095 | * stored in the given sg_table object in the provided IOMMU domain. | |
1096 | */ | |
1097 | static inline size_t iommu_map_sgtable(struct iommu_domain *domain, | |
1098 | unsigned long iova, struct sg_table *sgt, int prot) | |
1099 | { | |
f2b2c051 JG |
1100 | return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot, |
1101 | GFP_KERNEL); | |
ca37faf3 MS |
1102 | } |
1103 | ||
bad614b2 GH |
1104 | #ifdef CONFIG_IOMMU_DEBUGFS |
1105 | extern struct dentry *iommu_debugfs_dir; | |
1106 | void iommu_debugfs_setup(void); | |
1107 | #else | |
1108 | static inline void iommu_debugfs_setup(void) {} | |
1109 | #endif | |
1110 | ||
fa49364c RM |
1111 | #ifdef CONFIG_IOMMU_DMA |
1112 | #include <linux/msi.h> | |
1113 | ||
1114 | /* Setup call for arch DMA mapping code */ | |
1115 | void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); | |
1116 | ||
1117 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); | |
1118 | ||
1119 | int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); | |
1120 | void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg); | |
1121 | ||
1122 | #else /* CONFIG_IOMMU_DMA */ | |
1123 | ||
1124 | struct msi_desc; | |
1125 | struct msi_msg; | |
1126 | ||
1127 | static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) | |
1128 | { | |
1129 | } | |
1130 | ||
1131 | static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | |
1132 | { | |
1133 | return -ENODEV; | |
1134 | } | |
1135 | ||
1136 | static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) | |
1137 | { | |
1138 | return 0; | |
1139 | } | |
1140 | ||
1141 | static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) | |
1142 | { | |
1143 | } | |
1144 | ||
1145 | #endif /* CONFIG_IOMMU_DMA */ | |
1146 | ||
493c9b68 TR |
1147 | /* |
1148 | * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into | |
1149 | * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents | |
1150 | * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals. | |
1151 | */ | |
1152 | #define TEGRA_STREAM_ID_BYPASS 0x7f | |
1153 | ||
1154 | static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id) | |
1155 | { | |
1156 | #ifdef CONFIG_IOMMU_API | |
1157 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); | |
1158 | ||
1159 | if (fwspec && fwspec->num_ids == 1) { | |
1160 | *stream_id = fwspec->ids[0] & 0xffff; | |
1161 | return true; | |
1162 | } | |
1163 | #endif | |
1164 | ||
1165 | return false; | |
1166 | } | |
1167 | ||
be51b1d6 | 1168 | #ifdef CONFIG_IOMMU_SVA |
cd389115 JP |
1169 | static inline void mm_pasid_init(struct mm_struct *mm) |
1170 | { | |
fffaed1e | 1171 | mm->pasid = IOMMU_PASID_INVALID; |
cd389115 | 1172 | } |
58390c8c LT |
1173 | static inline bool mm_valid_pasid(struct mm_struct *mm) |
1174 | { | |
1175 | return mm->pasid != IOMMU_PASID_INVALID; | |
1176 | } | |
cd389115 | 1177 | void mm_pasid_drop(struct mm_struct *mm); |
be51b1d6 LB |
1178 | struct iommu_sva *iommu_sva_bind_device(struct device *dev, |
1179 | struct mm_struct *mm); | |
1180 | void iommu_sva_unbind_device(struct iommu_sva *handle); | |
1181 | u32 iommu_sva_get_pasid(struct iommu_sva *handle); | |
1182 | #else | |
1183 | static inline struct iommu_sva * | |
1184 | iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) | |
1185 | { | |
1186 | return NULL; | |
1187 | } | |
1188 | ||
1189 | static inline void iommu_sva_unbind_device(struct iommu_sva *handle) | |
1190 | { | |
1191 | } | |
1192 | ||
1193 | static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) | |
1194 | { | |
1195 | return IOMMU_PASID_INVALID; | |
1196 | } | |
cd389115 | 1197 | static inline void mm_pasid_init(struct mm_struct *mm) {} |
58390c8c | 1198 | static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; } |
cd389115 | 1199 | static inline void mm_pasid_drop(struct mm_struct *mm) {} |
be51b1d6 LB |
1200 | #endif /* CONFIG_IOMMU_SVA */ |
1201 | ||
4a77a6cf | 1202 | #endif /* __LINUX_IOMMU_H */ |