Merge tag 'mt76-for-kvalo-2020-06-07' of https://github.com/nbd168/wireless
[linux-2.6-block.git] / include / linux / iommu.h
CommitLineData
45051539 1/* SPDX-License-Identifier: GPL-2.0-only */
4a77a6cf
JR
2/*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
4a77a6cf
JR
5 */
6
7#ifndef __LINUX_IOMMU_H
8#define __LINUX_IOMMU_H
9
e8245c1b
JR
10#include <linux/scatterlist.h>
11#include <linux/device.h>
12#include <linux/types.h>
74315ccc 13#include <linux/errno.h>
9a08d376 14#include <linux/err.h>
d0f60a44 15#include <linux/of.h>
808be0aa 16#include <linux/ioasid.h>
4e32348b 17#include <uapi/linux/iommu.h>
74315ccc 18
ca13bb3d
WD
19#define IOMMU_READ (1 << 0)
20#define IOMMU_WRITE (1 << 1)
21#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
a720b41c 22#define IOMMU_NOEXEC (1 << 3)
31e6850e 23#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
579b2a65 24/*
adf5e516
RM
25 * Where the bus hardware includes a privilege level as part of its access type
26 * markings, and certain devices are capable of issuing transactions marked as
27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
28 * given permission flags only apply to accesses at the higher privilege level,
29 * and that unprivileged transactions should have as little access as possible.
30 * This would usually imply the same permissions as kernel mappings on the CPU,
31 * if the IOMMU page table format is equivalent.
579b2a65
MH
32 */
33#define IOMMU_PRIV (1 << 5)
90ec7a76 34/*
dd5ddd3c
WD
35 * Non-coherent masters can use this page protection flag to set cacheable
36 * memory attributes for only a transparent outer level of cache, also known as
37 * the last-level or system cache.
90ec7a76 38 */
dd5ddd3c 39#define IOMMU_SYS_CACHE_ONLY (1 << 6)
4a77a6cf 40
905d66c1 41struct iommu_ops;
d72e31c9 42struct iommu_group;
ff21776d 43struct bus_type;
4a77a6cf 44struct device;
4f3f8d9d 45struct iommu_domain;
ba1eabfa 46struct notifier_block;
26b25a2b 47struct iommu_sva;
4e32348b 48struct iommu_fault_event;
4f3f8d9d
OBC
49
50/* iommu fault flags */
51#define IOMMU_FAULT_READ 0x0
52#define IOMMU_FAULT_WRITE 0x1
53
54typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
77ca2332 55 struct device *, unsigned long, int, void *);
4e32348b 56typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
4a77a6cf 57
0ff64f80
JR
58struct iommu_domain_geometry {
59 dma_addr_t aperture_start; /* First address that can be mapped */
60 dma_addr_t aperture_end; /* Last address that can be mapped */
61 bool force_aperture; /* DMA only allowed in mappable range? */
62};
63
8539c7c1
JR
64/* Domain feature flags */
65#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
66#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
67 implementation */
68#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
69
70/*
71 * This are the possible domain-types
72 *
73 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
74 * devices
75 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
76 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
77 * for VMs
78 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
79 * This flag allows IOMMU drivers to implement
80 * certain optimizations for these domains
81 */
82#define IOMMU_DOMAIN_BLOCKED (0U)
83#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
84#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
85#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
86 __IOMMU_DOMAIN_DMA_API)
87
4a77a6cf 88struct iommu_domain {
8539c7c1 89 unsigned type;
b22f6434 90 const struct iommu_ops *ops;
d16e0faa 91 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
4f3f8d9d 92 iommu_fault_handler_t handler;
77ca2332 93 void *handler_token;
0ff64f80 94 struct iommu_domain_geometry geometry;
0db2e5d1 95 void *iova_cookie;
4a77a6cf
JR
96};
97
1aed0748
JR
98enum iommu_cap {
99 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
100 transactions */
101 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
c4986649 102 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
1aed0748 103};
dbb9fd86 104
7cabf491
VS
105/*
106 * Following constraints are specifc to FSL_PAMUV1:
107 * -aperture must be power of 2, and naturally aligned
108 * -number of windows must be power of 2, and address space size
109 * of each window is determined by aperture size / # of windows
110 * -the actual size of the mapped region of a window must be power
111 * of 2 starting with 4KB and physical address must be naturally
112 * aligned.
113 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
114 * The caller can invoke iommu_domain_get_attr to check if the underlying
115 * iommu implementation supports these constraints.
116 */
117
0cd76dd1 118enum iommu_attr {
0ff64f80 119 DOMAIN_ATTR_GEOMETRY,
d2e12160 120 DOMAIN_ATTR_PAGING,
69356712 121 DOMAIN_ATTR_WINDOWS,
7cabf491
VS
122 DOMAIN_ATTR_FSL_PAMU_STASH,
123 DOMAIN_ATTR_FSL_PAMU_ENABLE,
124 DOMAIN_ATTR_FSL_PAMUV1,
c02607aa 125 DOMAIN_ATTR_NESTING, /* two stages of translation */
2da274cd 126 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
a8b8a88a 127 DOMAIN_ATTR_MAX,
0cd76dd1
JR
128};
129
d30ddcaa 130/* These are the possible reserved region types */
9d3a4de4
RM
131enum iommu_resv_type {
132 /* Memory regions which must be mapped 1:1 at all times */
133 IOMMU_RESV_DIRECT,
adfd3738
EA
134 /*
135 * Memory regions which are advertised to be 1:1 but are
136 * commonly considered relaxable in some conditions,
137 * for instance in device assignment use case (USB, Graphics)
138 */
139 IOMMU_RESV_DIRECT_RELAXABLE,
9d3a4de4
RM
140 /* Arbitrary "never map this or give it to a device" address ranges */
141 IOMMU_RESV_RESERVED,
142 /* Hardware MSI region (untranslated) */
143 IOMMU_RESV_MSI,
144 /* Software-managed MSI translation window */
145 IOMMU_RESV_SW_MSI,
146};
d30ddcaa 147
a1015c2b 148/**
e5b5234a 149 * struct iommu_resv_region - descriptor for a reserved memory region
a1015c2b
JR
150 * @list: Linked list pointers
151 * @start: System physical start address of the region
152 * @length: Length of the region in bytes
153 * @prot: IOMMU Protection flags (READ/WRITE/...)
d30ddcaa 154 * @type: Type of the reserved region
a1015c2b 155 */
e5b5234a 156struct iommu_resv_region {
a1015c2b
JR
157 struct list_head list;
158 phys_addr_t start;
159 size_t length;
160 int prot;
9d3a4de4 161 enum iommu_resv_type type;
a1015c2b
JR
162};
163
a3a19592
LB
164/* Per device IOMMU features */
165enum iommu_dev_features {
166 IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */
26b25a2b
JPB
167 IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */
168};
169
170#define IOMMU_PASID_INVALID (-1U)
171
39d4ebb9
JR
172#ifdef CONFIG_IOMMU_API
173
a7d20dc1
WD
174/**
175 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
176 *
177 * @start: IOVA representing the start of the range to be flushed
178 * @end: IOVA representing the end of the range to be flushed (exclusive)
179 * @pgsize: The interval at which to perform the flush
180 *
181 * This structure is intended to be updated by multiple calls to the
182 * ->unmap() function in struct iommu_ops before eventually being passed
183 * into ->iotlb_sync().
184 */
185struct iommu_iotlb_gather {
186 unsigned long start;
187 unsigned long end;
188 size_t pgsize;
189};
190
7d3002cc
OBC
191/**
192 * struct iommu_ops - iommu ops and capabilities
0d9bacb6
MD
193 * @capable: check capability
194 * @domain_alloc: allocate iommu domain
195 * @domain_free: free iommu domain
7d3002cc
OBC
196 * @attach_dev: attach device to an iommu domain
197 * @detach_dev: detach device from an iommu domain
198 * @map: map a physically contiguous memory region to an iommu domain
199 * @unmap: unmap a physically contiguous memory region from an iommu domain
db04d4a3 200 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
2405bc16 201 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
51eb7809 202 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
add02cfd 203 * queue
7d3002cc 204 * @iova_to_phys: translate iova to physical address
a6a4c7e2
JR
205 * @probe_device: Add device to iommu driver handling
206 * @release_device: Remove device from iommu driver handling
207 * @probe_finalize: Do final setup work after the device is added to an IOMMU
208 * group and attached to the groups domain
0d9bacb6 209 * @device_group: find iommu group for a particular device
0cd76dd1
JR
210 * @domain_get_attr: Query domain attributes
211 * @domain_set_attr: Change domain attributes
e5b5234a
EA
212 * @get_resv_regions: Request list of reserved regions for a device
213 * @put_resv_regions: Free list of reserved regions for a device
214 * @apply_resv_region: Temporary helper call-back for iova reserved ranges
0d9bacb6
MD
215 * @domain_window_enable: Configure and enable a particular window for a domain
216 * @domain_window_disable: Disable a particular window for a domain
d0f60a44 217 * @of_xlate: add OF master IDs to iommu grouping
a7055d57
GU
218 * @is_attach_deferred: Check if domain attach should be deferred from iommu
219 * driver init to device driver init (default no)
a3a19592
LB
220 * @dev_has/enable/disable_feat: per device entries to check/enable/disable
221 * iommu specific features.
222 * @dev_feat_enabled: check enabled feature
223 * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
224 * @aux_get_pasid: get the pasid given an aux-domain
26b25a2b
JPB
225 * @sva_bind: Bind process address space to device
226 * @sva_unbind: Unbind process address space from device
227 * @sva_get_pasid: Get PASID associated to a SVA handle
bf3255b3 228 * @page_response: handle page request response
4c7c171f 229 * @cache_invalidate: invalidate translation caches
808be0aa
JP
230 * @sva_bind_gpasid: bind guest pasid and mm
231 * @sva_unbind_gpasid: unbind guest pasid and mm
4cbf3851
SPP
232 * @def_domain_type: device default domain type, return value:
233 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
234 * - IOMMU_DOMAIN_DMA: must use a dma domain
235 * - 0: use the default setting
25f003de
WD
236 * @pgsize_bitmap: bitmap of all possible supported page sizes
237 * @owner: Driver module providing these ops
7d3002cc 238 */
4a77a6cf 239struct iommu_ops {
3c0e0ca0 240 bool (*capable)(enum iommu_cap);
938c4709
JR
241
242 /* Domain allocation and freeing by the iommu driver */
8539c7c1 243 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
938c4709
JR
244 void (*domain_free)(struct iommu_domain *);
245
4a77a6cf
JR
246 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
247 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
67651786 248 int (*map)(struct iommu_domain *domain, unsigned long iova,
781ca2de 249 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
5009065d 250 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
56f8af5e 251 size_t size, struct iommu_iotlb_gather *iotlb_gather);
add02cfd 252 void (*flush_iotlb_all)(struct iommu_domain *domain);
1d7ae53b 253 void (*iotlb_sync_map)(struct iommu_domain *domain);
56f8af5e
WD
254 void (*iotlb_sync)(struct iommu_domain *domain,
255 struct iommu_iotlb_gather *iotlb_gather);
bb5547ac 256 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
a6a4c7e2
JR
257 struct iommu_device *(*probe_device)(struct device *dev);
258 void (*release_device)(struct device *dev);
259 void (*probe_finalize)(struct device *dev);
46c6b2bc 260 struct iommu_group *(*device_group)(struct device *dev);
0cd76dd1
JR
261 int (*domain_get_attr)(struct iommu_domain *domain,
262 enum iommu_attr attr, void *data);
263 int (*domain_set_attr)(struct iommu_domain *domain,
264 enum iommu_attr attr, void *data);
d7787d57 265
e5b5234a
EA
266 /* Request/Free a list of reserved regions for a device */
267 void (*get_resv_regions)(struct device *dev, struct list_head *list);
268 void (*put_resv_regions)(struct device *dev, struct list_head *list);
269 void (*apply_resv_region)(struct device *dev,
270 struct iommu_domain *domain,
271 struct iommu_resv_region *region);
a1015c2b 272
d7787d57
JR
273 /* Window handling functions */
274 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
80f97f0f 275 phys_addr_t paddr, u64 size, int prot);
d7787d57
JR
276 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
277
d0f60a44 278 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
e01d1913 279 bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
d0f60a44 280
a3a19592
LB
281 /* Per device IOMMU features */
282 bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
283 bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
284 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
285 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
286
287 /* Aux-domain specific attach/detach entries */
288 int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
289 void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
290 int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
291
26b25a2b
JPB
292 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
293 void *drvdata);
294 void (*sva_unbind)(struct iommu_sva *handle);
295 int (*sva_get_pasid)(struct iommu_sva *handle);
296
bf3255b3
JPB
297 int (*page_response)(struct device *dev,
298 struct iommu_fault_event *evt,
299 struct iommu_page_response *msg);
4c7c171f
YL
300 int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
301 struct iommu_cache_invalidate_info *inv_info);
808be0aa
JP
302 int (*sva_bind_gpasid)(struct iommu_domain *domain,
303 struct device *dev, struct iommu_gpasid_bind_data *data);
304
305 int (*sva_unbind_gpasid)(struct device *dev, int pasid);
bf3255b3 306
4cbf3851
SPP
307 int (*def_domain_type)(struct device *dev);
308
7d3002cc 309 unsigned long pgsize_bitmap;
25f003de 310 struct module *owner;
4a77a6cf
JR
311};
312
b0119e87
JR
313/**
314 * struct iommu_device - IOMMU core representation of one IOMMU hardware
315 * instance
316 * @list: Used by the iommu-core to keep a list of registered iommus
317 * @ops: iommu-ops for talking to this iommu
39ab9555 318 * @dev: struct device for sysfs handling
b0119e87
JR
319 */
320struct iommu_device {
321 struct list_head list;
322 const struct iommu_ops *ops;
c73e1ac8 323 struct fwnode_handle *fwnode;
2926a2aa 324 struct device *dev;
b0119e87
JR
325};
326
4e32348b
JP
327/**
328 * struct iommu_fault_event - Generic fault event
329 *
330 * Can represent recoverable faults such as a page requests or
331 * unrecoverable faults such as DMA or IRQ remapping faults.
332 *
333 * @fault: fault descriptor
bf3255b3 334 * @list: pending fault event list, used for tracking responses
4e32348b
JP
335 */
336struct iommu_fault_event {
337 struct iommu_fault fault;
bf3255b3 338 struct list_head list;
4e32348b
JP
339};
340
341/**
342 * struct iommu_fault_param - per-device IOMMU fault data
343 * @handler: Callback function to handle IOMMU faults at device level
344 * @data: handler private data
bf3255b3
JPB
345 * @faults: holds the pending faults which needs response
346 * @lock: protect pending faults list
4e32348b
JP
347 */
348struct iommu_fault_param {
349 iommu_dev_fault_handler_t handler;
350 void *data;
bf3255b3
JPB
351 struct list_head faults;
352 struct mutex lock;
4e32348b
JP
353};
354
355/**
045a7042 356 * struct dev_iommu - Collection of per-device IOMMU data
4e32348b
JP
357 *
358 * @fault_param: IOMMU detected device fault reporting data
72acd9df 359 * @fwspec: IOMMU fwspec data
a6a4c7e2 360 * @iommu_dev: IOMMU device this device is linked to
986d5ecc 361 * @priv: IOMMU Driver private data
4e32348b
JP
362 *
363 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
364 * struct iommu_group *iommu_group;
4e32348b 365 */
045a7042 366struct dev_iommu {
0c830e6b 367 struct mutex lock;
72acd9df
JR
368 struct iommu_fault_param *fault_param;
369 struct iommu_fwspec *fwspec;
a6a4c7e2 370 struct iommu_device *iommu_dev;
986d5ecc 371 void *priv;
4e32348b
JP
372};
373
b0119e87
JR
374int iommu_device_register(struct iommu_device *iommu);
375void iommu_device_unregister(struct iommu_device *iommu);
39ab9555
JR
376int iommu_device_sysfs_add(struct iommu_device *iommu,
377 struct device *parent,
378 const struct attribute_group **groups,
379 const char *fmt, ...) __printf(4, 5);
380void iommu_device_sysfs_remove(struct iommu_device *iommu);
e3d10af1
JR
381int iommu_device_link(struct iommu_device *iommu, struct device *link);
382void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
b0119e87 383
fc10cca6
WD
384static inline void __iommu_device_set_ops(struct iommu_device *iommu,
385 const struct iommu_ops *ops)
b0119e87
JR
386{
387 iommu->ops = ops;
388}
389
fc10cca6
WD
390#define iommu_device_set_ops(iommu, ops) \
391do { \
392 struct iommu_ops *__ops = (struct iommu_ops *)(ops); \
393 __ops->owner = THIS_MODULE; \
394 __iommu_device_set_ops(iommu, __ops); \
395} while (0)
396
c73e1ac8
JR
397static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
398 struct fwnode_handle *fwnode)
399{
400 iommu->fwnode = fwnode;
401}
402
2926a2aa
JR
403static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
404{
405 return (struct iommu_device *)dev_get_drvdata(dev);
406}
407
a7d20dc1
WD
408static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
409{
410 *gather = (struct iommu_iotlb_gather) {
411 .start = ULONG_MAX,
412 };
413}
414
d72e31c9
AW
415#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
416#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
417#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
418#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
419#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
420#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
421
b22f6434 422extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
5012c396 423extern int bus_iommu_probe(struct bus_type *bus);
a1b60c1c 424extern bool iommu_present(struct bus_type *bus);
3c0e0ca0 425extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
905d66c1 426extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
aa16bea9 427extern struct iommu_group *iommu_group_get_by_id(int id);
4a77a6cf
JR
428extern void iommu_domain_free(struct iommu_domain *domain);
429extern int iommu_attach_device(struct iommu_domain *domain,
430 struct device *dev);
431extern void iommu_detach_device(struct iommu_domain *domain,
432 struct device *dev);
4c7c171f
YL
433extern int iommu_cache_invalidate(struct iommu_domain *domain,
434 struct device *dev,
435 struct iommu_cache_invalidate_info *inv_info);
808be0aa
JP
436extern int iommu_sva_bind_gpasid(struct iommu_domain *domain,
437 struct device *dev, struct iommu_gpasid_bind_data *data);
438extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
439 struct device *dev, ioasid_t pasid);
2c1296d9 440extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
6af588fe 441extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
cefc53c7 442extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
7d3002cc 443 phys_addr_t paddr, size_t size, int prot);
781ca2de
TM
444extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
445 phys_addr_t paddr, size_t size, int prot);
7d3002cc 446extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
add02cfd
JR
447 size_t size);
448extern size_t iommu_unmap_fast(struct iommu_domain *domain,
a7d20dc1
WD
449 unsigned long iova, size_t size,
450 struct iommu_iotlb_gather *iotlb_gather);
d88e61fa
CH
451extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
452 struct scatterlist *sg,unsigned int nents, int prot);
781ca2de
TM
453extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
454 unsigned long iova, struct scatterlist *sg,
455 unsigned int nents, int prot);
bb5547ac 456extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
4f3f8d9d 457extern void iommu_set_fault_handler(struct iommu_domain *domain,
77ca2332 458 iommu_fault_handler_t handler, void *token);
d72e31c9 459
48530d9f
MS
460/**
461 * iommu_map_sgtable - Map the given buffer to the IOMMU domain
462 * @domain: The IOMMU domain to perform the mapping
463 * @iova: The start address to map the buffer
464 * @sgt: The sg_table object describing the buffer
465 * @prot: IOMMU protection bits
466 *
467 * Creates a mapping at @iova for the buffer described by a scatterlist
468 * stored in the given sg_table object in the provided IOMMU domain.
469 */
470static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
471 unsigned long iova, struct sg_table *sgt, int prot)
472{
473 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
474}
475
e5b5234a
EA
476extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
477extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
f9f6971e
TR
478extern void generic_iommu_put_resv_regions(struct device *dev,
479 struct list_head *list);
8a69961c
JR
480extern void iommu_set_default_passthrough(bool cmd_line);
481extern void iommu_set_default_translated(bool cmd_line);
482extern bool iommu_default_passthrough(void);
2b20cbba 483extern struct iommu_resv_region *
9d3a4de4
RM
484iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
485 enum iommu_resv_type type);
6c65fb31
EA
486extern int iommu_get_group_resv_regions(struct iommu_group *group,
487 struct list_head *head);
a1015c2b 488
d72e31c9
AW
489extern int iommu_attach_group(struct iommu_domain *domain,
490 struct iommu_group *group);
491extern void iommu_detach_group(struct iommu_domain *domain,
492 struct iommu_group *group);
493extern struct iommu_group *iommu_group_alloc(void);
494extern void *iommu_group_get_iommudata(struct iommu_group *group);
495extern void iommu_group_set_iommudata(struct iommu_group *group,
496 void *iommu_data,
497 void (*release)(void *iommu_data));
498extern int iommu_group_set_name(struct iommu_group *group, const char *name);
499extern int iommu_group_add_device(struct iommu_group *group,
500 struct device *dev);
501extern void iommu_group_remove_device(struct device *dev);
502extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
503 int (*fn)(struct device *, void *));
504extern struct iommu_group *iommu_group_get(struct device *dev);
13f59a78 505extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
d72e31c9
AW
506extern void iommu_group_put(struct iommu_group *group);
507extern int iommu_group_register_notifier(struct iommu_group *group,
508 struct notifier_block *nb);
509extern int iommu_group_unregister_notifier(struct iommu_group *group,
510 struct notifier_block *nb);
0c830e6b
JP
511extern int iommu_register_device_fault_handler(struct device *dev,
512 iommu_dev_fault_handler_t handler,
513 void *data);
514
515extern int iommu_unregister_device_fault_handler(struct device *dev);
516
517extern int iommu_report_device_fault(struct device *dev,
518 struct iommu_fault_event *evt);
bf3255b3
JPB
519extern int iommu_page_response(struct device *dev,
520 struct iommu_page_response *msg);
0c830e6b 521
d72e31c9 522extern int iommu_group_id(struct iommu_group *group);
6827ca83 523extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
4f3f8d9d 524
0cd76dd1
JR
525extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
526 void *data);
527extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
528 void *data);
4f3f8d9d 529
d7787d57
JR
530/* Window handling function prototypes */
531extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
80f97f0f
VS
532 phys_addr_t offset, u64 size,
533 int prot);
d7787d57 534extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
207c6e36
JR
535
536extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
537 unsigned long iova, int flags);
4a77a6cf 538
add02cfd
JR
539static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
540{
541 if (domain->ops->flush_iotlb_all)
542 domain->ops->flush_iotlb_all(domain);
543}
544
a7d20dc1
WD
545static inline void iommu_tlb_sync(struct iommu_domain *domain,
546 struct iommu_iotlb_gather *iotlb_gather)
add02cfd
JR
547{
548 if (domain->ops->iotlb_sync)
56f8af5e 549 domain->ops->iotlb_sync(domain, iotlb_gather);
a7d20dc1
WD
550
551 iommu_iotlb_gather_init(iotlb_gather);
add02cfd
JR
552}
553
4fcf8544
WD
554static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
555 struct iommu_iotlb_gather *gather,
556 unsigned long iova, size_t size)
557{
558 unsigned long start = iova, end = start + size;
559
560 /*
561 * If the new page is disjoint from the current range or is mapped at
562 * a different granularity, then sync the TLB so that the gather
563 * structure can be rewritten.
564 */
565 if (gather->pgsize != size ||
566 end < gather->start || start > gather->end) {
567 if (gather->pgsize)
568 iommu_tlb_sync(domain, gather);
569 gather->pgsize = size;
570 }
571
572 if (gather->end < end)
573 gather->end = end;
574
575 if (gather->start > start)
576 gather->start = start;
577}
578
5e62292b
JR
579/* PCI device grouping function */
580extern struct iommu_group *pci_device_group(struct device *dev);
6eab556a
JR
581/* Generic device grouping function */
582extern struct iommu_group *generic_device_group(struct device *dev);
eab03e2a
NG
583/* FSL-MC device grouping function */
584struct iommu_group *fsl_mc_device_group(struct device *dev);
5e62292b 585
57f98d2f
RM
586/**
587 * struct iommu_fwspec - per-device IOMMU instance data
588 * @ops: ops for this device's IOMMU
589 * @iommu_fwnode: firmware handle for this device's IOMMU
590 * @iommu_priv: IOMMU driver private data for this device
89535821 591 * @num_pasid_bits: number of PASID bits supported by this device
57f98d2f
RM
592 * @num_ids: number of associated device IDs
593 * @ids: IDs which this device may present to the IOMMU
594 */
595struct iommu_fwspec {
596 const struct iommu_ops *ops;
597 struct fwnode_handle *iommu_fwnode;
5702ee24 598 u32 flags;
89535821 599 u32 num_pasid_bits;
57f98d2f 600 unsigned int num_ids;
098accf2 601 u32 ids[];
57f98d2f
RM
602};
603
5702ee24
JPB
604/* ATS is supported */
605#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
606
26b25a2b
JPB
607/**
608 * struct iommu_sva - handle to a device-mm bond
609 */
610struct iommu_sva {
611 struct device *dev;
26b25a2b
JPB
612};
613
57f98d2f
RM
614int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
615 const struct iommu_ops *ops);
616void iommu_fwspec_free(struct device *dev);
617int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
534766df 618const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
57f98d2f 619
b4ef725e
JR
620static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
621{
72acd9df
JR
622 if (dev->iommu)
623 return dev->iommu->fwspec;
624 else
625 return NULL;
b4ef725e
JR
626}
627
628static inline void dev_iommu_fwspec_set(struct device *dev,
629 struct iommu_fwspec *fwspec)
630{
72acd9df 631 dev->iommu->fwspec = fwspec;
b4ef725e
JR
632}
633
f9867f41
JR
634static inline void *dev_iommu_priv_get(struct device *dev)
635{
986d5ecc 636 return dev->iommu->priv;
f9867f41
JR
637}
638
639static inline void dev_iommu_priv_set(struct device *dev, void *priv)
640{
986d5ecc 641 dev->iommu->priv = priv;
f9867f41
JR
642}
643
cc5aed44
JR
644int iommu_probe_device(struct device *dev);
645void iommu_release_device(struct device *dev);
646
a3a19592
LB
647bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f);
648int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
649int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
650bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
651int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
652void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
653int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
654
26b25a2b
JPB
655struct iommu_sva *iommu_sva_bind_device(struct device *dev,
656 struct mm_struct *mm,
657 void *drvdata);
658void iommu_sva_unbind_device(struct iommu_sva *handle);
26b25a2b
JPB
659int iommu_sva_get_pasid(struct iommu_sva *handle);
660
4a77a6cf
JR
661#else /* CONFIG_IOMMU_API */
662
39d4ebb9 663struct iommu_ops {};
d72e31c9 664struct iommu_group {};
57f98d2f 665struct iommu_fwspec {};
b0119e87 666struct iommu_device {};
4e32348b 667struct iommu_fault_param {};
a7d20dc1 668struct iommu_iotlb_gather {};
4a77a6cf 669
a1b60c1c 670static inline bool iommu_present(struct bus_type *bus)
4a77a6cf
JR
671{
672 return false;
673}
674
3c0e0ca0
JR
675static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
676{
677 return false;
678}
679
905d66c1 680static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
4a77a6cf
JR
681{
682 return NULL;
683}
684
b62dfd29
AK
685static inline struct iommu_group *iommu_group_get_by_id(int id)
686{
687 return NULL;
688}
689
4a77a6cf
JR
690static inline void iommu_domain_free(struct iommu_domain *domain)
691{
692}
693
694static inline int iommu_attach_device(struct iommu_domain *domain,
695 struct device *dev)
696{
697 return -ENODEV;
698}
699
700static inline void iommu_detach_device(struct iommu_domain *domain,
701 struct device *dev)
702{
703}
704
2c1296d9
JR
705static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
706{
707 return NULL;
708}
709
cefc53c7 710static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
ebae3e83 711 phys_addr_t paddr, size_t size, int prot)
cefc53c7
JR
712{
713 return -ENODEV;
714}
715
781ca2de
TM
716static inline int iommu_map_atomic(struct iommu_domain *domain,
717 unsigned long iova, phys_addr_t paddr,
718 size_t size, int prot)
719{
720 return -ENODEV;
721}
722
c5611a87
SS
723static inline size_t iommu_unmap(struct iommu_domain *domain,
724 unsigned long iova, size_t size)
cefc53c7 725{
c5611a87 726 return 0;
cefc53c7
JR
727}
728
c5611a87 729static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
a7d20dc1
WD
730 unsigned long iova, int gfp_order,
731 struct iommu_iotlb_gather *iotlb_gather)
cefc53c7 732{
c5611a87 733 return 0;
cefc53c7
JR
734}
735
315786eb
OH
736static inline size_t iommu_map_sg(struct iommu_domain *domain,
737 unsigned long iova, struct scatterlist *sg,
738 unsigned int nents, int prot)
739{
c5611a87 740 return 0;
315786eb
OH
741}
742
781ca2de
TM
743static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
744 unsigned long iova, struct scatterlist *sg,
745 unsigned int nents, int prot)
746{
747 return 0;
748}
749
add02cfd
JR
750static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
751{
752}
753
a7d20dc1
WD
754static inline void iommu_tlb_sync(struct iommu_domain *domain,
755 struct iommu_iotlb_gather *iotlb_gather)
add02cfd
JR
756{
757}
758
d7787d57
JR
759static inline int iommu_domain_window_enable(struct iommu_domain *domain,
760 u32 wnd_nr, phys_addr_t paddr,
80f97f0f 761 u64 size, int prot)
d7787d57
JR
762{
763 return -ENODEV;
764}
765
766static inline void iommu_domain_window_disable(struct iommu_domain *domain,
767 u32 wnd_nr)
768{
769}
770
bb5547ac 771static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
4a77a6cf
JR
772{
773 return 0;
774}
775
4f3f8d9d 776static inline void iommu_set_fault_handler(struct iommu_domain *domain,
77ca2332 777 iommu_fault_handler_t handler, void *token)
4f3f8d9d
OBC
778{
779}
780
e5b5234a 781static inline void iommu_get_resv_regions(struct device *dev,
a1015c2b
JR
782 struct list_head *list)
783{
784}
785
e5b5234a 786static inline void iommu_put_resv_regions(struct device *dev,
a1015c2b
JR
787 struct list_head *list)
788{
789}
790
6c65fb31
EA
791static inline int iommu_get_group_resv_regions(struct iommu_group *group,
792 struct list_head *head)
793{
794 return -ENODEV;
795}
796
8a69961c
JR
797static inline void iommu_set_default_passthrough(bool cmd_line)
798{
799}
800
801static inline void iommu_set_default_translated(bool cmd_line)
802{
803}
804
805static inline bool iommu_default_passthrough(void)
806{
807 return true;
808}
809
bef83de5
AW
810static inline int iommu_attach_group(struct iommu_domain *domain,
811 struct iommu_group *group)
d72e31c9
AW
812{
813 return -ENODEV;
814}
815
bef83de5
AW
816static inline void iommu_detach_group(struct iommu_domain *domain,
817 struct iommu_group *group)
d72e31c9
AW
818{
819}
820
bef83de5 821static inline struct iommu_group *iommu_group_alloc(void)
d72e31c9
AW
822{
823 return ERR_PTR(-ENODEV);
824}
825
bef83de5 826static inline void *iommu_group_get_iommudata(struct iommu_group *group)
d72e31c9
AW
827{
828 return NULL;
829}
830
bef83de5
AW
831static inline void iommu_group_set_iommudata(struct iommu_group *group,
832 void *iommu_data,
833 void (*release)(void *iommu_data))
d72e31c9
AW
834{
835}
836
bef83de5
AW
837static inline int iommu_group_set_name(struct iommu_group *group,
838 const char *name)
d72e31c9
AW
839{
840 return -ENODEV;
841}
842
bef83de5
AW
843static inline int iommu_group_add_device(struct iommu_group *group,
844 struct device *dev)
d72e31c9
AW
845{
846 return -ENODEV;
847}
848
bef83de5 849static inline void iommu_group_remove_device(struct device *dev)
d72e31c9
AW
850{
851}
852
bef83de5
AW
853static inline int iommu_group_for_each_dev(struct iommu_group *group,
854 void *data,
855 int (*fn)(struct device *, void *))
d72e31c9
AW
856{
857 return -ENODEV;
858}
859
bef83de5 860static inline struct iommu_group *iommu_group_get(struct device *dev)
d72e31c9
AW
861{
862 return NULL;
863}
864
bef83de5 865static inline void iommu_group_put(struct iommu_group *group)
d72e31c9
AW
866{
867}
868
bef83de5
AW
869static inline int iommu_group_register_notifier(struct iommu_group *group,
870 struct notifier_block *nb)
1460432c
AW
871{
872 return -ENODEV;
873}
874
bef83de5
AW
875static inline int iommu_group_unregister_notifier(struct iommu_group *group,
876 struct notifier_block *nb)
d72e31c9
AW
877{
878 return 0;
879}
880
0c830e6b
JP
881static inline
882int iommu_register_device_fault_handler(struct device *dev,
883 iommu_dev_fault_handler_t handler,
884 void *data)
885{
886 return -ENODEV;
887}
888
889static inline int iommu_unregister_device_fault_handler(struct device *dev)
890{
891 return 0;
892}
893
894static inline
895int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
896{
897 return -ENODEV;
898}
899
bf3255b3
JPB
900static inline int iommu_page_response(struct device *dev,
901 struct iommu_page_response *msg)
902{
903 return -ENODEV;
904}
905
bef83de5 906static inline int iommu_group_id(struct iommu_group *group)
d72e31c9
AW
907{
908 return -ENODEV;
909}
1460432c 910
0cd76dd1
JR
911static inline int iommu_domain_get_attr(struct iommu_domain *domain,
912 enum iommu_attr attr, void *data)
913{
914 return -EINVAL;
915}
916
917static inline int iommu_domain_set_attr(struct iommu_domain *domain,
918 enum iommu_attr attr, void *data)
919{
920 return -EINVAL;
921}
922
39ab9555 923static inline int iommu_device_register(struct iommu_device *iommu)
c61959ec 924{
39ab9555 925 return -ENODEV;
c61959ec
AW
926}
927
39ab9555
JR
928static inline void iommu_device_set_ops(struct iommu_device *iommu,
929 const struct iommu_ops *ops)
c61959ec 930{
c61959ec
AW
931}
932
c73e1ac8
JR
933static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
934 struct fwnode_handle *fwnode)
c61959ec 935{
c61959ec
AW
936}
937
2926a2aa
JR
938static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
939{
940 return NULL;
941}
942
a7d20dc1
WD
943static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
944{
945}
946
4fcf8544
WD
947static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
948 struct iommu_iotlb_gather *gather,
949 unsigned long iova, size_t size)
950{
951}
952
39ab9555 953static inline void iommu_device_unregister(struct iommu_device *iommu)
c61959ec 954{
c61959ec
AW
955}
956
39ab9555
JR
957static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
958 struct device *parent,
959 const struct attribute_group **groups,
960 const char *fmt, ...)
b0119e87 961{
39ab9555 962 return -ENODEV;
b0119e87
JR
963}
964
39ab9555 965static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
c61959ec
AW
966{
967}
968
e09f8ea5 969static inline int iommu_device_link(struct device *dev, struct device *link)
c61959ec
AW
970{
971 return -EINVAL;
972}
973
e09f8ea5 974static inline void iommu_device_unlink(struct device *dev, struct device *link)
c61959ec
AW
975{
976}
977
57f98d2f
RM
978static inline int iommu_fwspec_init(struct device *dev,
979 struct fwnode_handle *iommu_fwnode,
980 const struct iommu_ops *ops)
981{
982 return -ENODEV;
983}
984
985static inline void iommu_fwspec_free(struct device *dev)
986{
987}
988
989static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
990 int num_ids)
991{
992 return -ENODEV;
993}
994
e4f10ffe 995static inline
534766df 996const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
e4f10ffe
LP
997{
998 return NULL;
999}
1000
a3a19592
LB
1001static inline bool
1002iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
1003{
1004 return false;
1005}
1006
1007static inline bool
1008iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
1009{
1010 return false;
1011}
1012
1013static inline int
1014iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1015{
1016 return -ENODEV;
1017}
1018
1019static inline int
1020iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1021{
1022 return -ENODEV;
1023}
1024
1025static inline int
1026iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
1027{
1028 return -ENODEV;
1029}
1030
1031static inline void
1032iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
1033{
1034}
1035
1036static inline int
1037iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
1038{
1039 return -ENODEV;
1040}
1041
26b25a2b
JPB
1042static inline struct iommu_sva *
1043iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
1044{
1045 return NULL;
1046}
1047
1048static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1049{
1050}
1051
26b25a2b
JPB
1052static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
1053{
1054 return IOMMU_PASID_INVALID;
1055}
1056
4c7c171f
YL
1057static inline int
1058iommu_cache_invalidate(struct iommu_domain *domain,
1059 struct device *dev,
1060 struct iommu_cache_invalidate_info *inv_info)
1061{
1062 return -ENODEV;
1063}
808be0aa
JP
1064static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain,
1065 struct device *dev, struct iommu_gpasid_bind_data *data)
1066{
1067 return -ENODEV;
1068}
1069
1070static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
1071 struct device *dev, int pasid)
1072{
1073 return -ENODEV;
1074}
4c7c171f 1075
0008d0c3
JR
1076static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1077{
1078 return NULL;
1079}
4a77a6cf
JR
1080#endif /* CONFIG_IOMMU_API */
1081
bad614b2
GH
1082#ifdef CONFIG_IOMMU_DEBUGFS
1083extern struct dentry *iommu_debugfs_dir;
1084void iommu_debugfs_setup(void);
1085#else
1086static inline void iommu_debugfs_setup(void) {}
1087#endif
1088
4a77a6cf 1089#endif /* __LINUX_IOMMU_H */