1 // SPDX-License-Identifier: GPL-2.0
3 * Helpers for IOMMU drivers implementing SVA
5 #include <linux/mmu_context.h>
6 #include <linux/mutex.h>
7 #include <linux/sched/mm.h>
8 #include <linux/iommu.h>
10 #include "iommu-priv.h"
12 static DEFINE_MUTEX(iommu_sva_lock);
14 /* Allocate a PASID for the mm within range (inclusive) */
15 static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
17 struct iommu_mm_data *iommu_mm;
20 lockdep_assert_held(&iommu_sva_lock);
22 if (!arch_pgtable_dma_compat(mm))
23 return ERR_PTR(-EBUSY);
25 iommu_mm = mm->iommu_mm;
26 /* Is a PASID already associated with this mm? */
28 if (iommu_mm->pasid >= dev->iommu->max_pasids)
29 return ERR_PTR(-EOVERFLOW);
33 iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
35 return ERR_PTR(-ENOMEM);
37 pasid = iommu_alloc_global_pasid(dev);
38 if (pasid == IOMMU_PASID_INVALID) {
40 return ERR_PTR(-ENOSPC);
42 iommu_mm->pasid = pasid;
43 INIT_LIST_HEAD(&iommu_mm->sva_domains);
44 INIT_LIST_HEAD(&iommu_mm->sva_handles);
46 * Make sure the write to mm->iommu_mm is not reordered in front of
47 * initialization to iommu_mm fields. If it does, readers may see a
48 * valid iommu_mm with uninitialized values.
50 smp_store_release(&mm->iommu_mm, iommu_mm);
55 * iommu_sva_bind_device() - Bind a process address space to a device
57 * @mm: the mm to bind, caller must hold a reference to mm_users
59 * Create a bond between device and address space, allowing the device to
60 * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
61 * bond already exists between @device and @mm, an additional internal
62 * reference is taken. Caller must call iommu_sva_unbind_device()
63 * to release each reference.
65 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
66 * initialize the required SVA features.
68 * On error, returns an ERR_PTR value.
70 struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
72 struct iommu_mm_data *iommu_mm;
73 struct iommu_domain *domain;
74 struct iommu_sva *handle;
77 mutex_lock(&iommu_sva_lock);
79 /* Allocate mm->pasid if necessary. */
80 iommu_mm = iommu_alloc_mm_data(mm, dev);
81 if (IS_ERR(iommu_mm)) {
82 ret = PTR_ERR(iommu_mm);
86 list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
87 if (handle->dev == dev) {
88 refcount_inc(&handle->users);
89 mutex_unlock(&iommu_sva_lock);
94 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
100 /* Search for an existing domain. */
101 list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
102 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
109 /* Allocate a new domain and set it on device pasid. */
110 domain = iommu_sva_domain_alloc(dev, mm);
113 goto out_free_handle;
116 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
118 goto out_free_domain;
120 list_add(&domain->next, &mm->iommu_mm->sva_domains);
123 refcount_set(&handle->users, 1);
124 list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
125 mutex_unlock(&iommu_sva_lock);
127 handle->domain = domain;
131 iommu_domain_free(domain);
135 mutex_unlock(&iommu_sva_lock);
138 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
141 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
142 * @handle: the handle returned by iommu_sva_bind_device()
144 * Put reference to a bond between device and address space. The device should
145 * not be issuing any more transaction for this PASID. All outstanding page
146 * requests for this PASID must have been flushed to the IOMMU.
148 void iommu_sva_unbind_device(struct iommu_sva *handle)
150 struct iommu_domain *domain = handle->domain;
151 struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
152 struct device *dev = handle->dev;
154 mutex_lock(&iommu_sva_lock);
155 if (!refcount_dec_and_test(&handle->users)) {
156 mutex_unlock(&iommu_sva_lock);
159 list_del(&handle->handle_item);
161 iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
162 if (--domain->users == 0) {
163 list_del(&domain->next);
164 iommu_domain_free(domain);
166 mutex_unlock(&iommu_sva_lock);
169 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
171 u32 iommu_sva_get_pasid(struct iommu_sva *handle)
173 struct iommu_domain *domain = handle->domain;
175 return mm_get_enqcmd_pasid(domain->mm);
177 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
179 void mm_pasid_drop(struct mm_struct *mm)
181 struct iommu_mm_data *iommu_mm = mm->iommu_mm;
186 iommu_free_global_pasid(iommu_mm->pasid);
191 * I/O page fault handler for SVA
193 static enum iommu_page_response_code
194 iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
197 struct vm_area_struct *vma;
198 unsigned int access_flags = 0;
199 unsigned int fault_flags = FAULT_FLAG_REMOTE;
200 struct iommu_fault_page_request *prm = &fault->prm;
201 enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
203 if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
206 if (!mmget_not_zero(mm))
211 vma = vma_lookup(mm, prm->addr);
216 if (prm->perm & IOMMU_FAULT_PERM_READ)
217 access_flags |= VM_READ;
219 if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
220 access_flags |= VM_WRITE;
221 fault_flags |= FAULT_FLAG_WRITE;
224 if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
225 access_flags |= VM_EXEC;
226 fault_flags |= FAULT_FLAG_INSTRUCTION;
229 if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
230 fault_flags |= FAULT_FLAG_USER;
232 if (access_flags & ~vma->vm_flags)
236 ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
237 status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
238 IOMMU_PAGE_RESP_SUCCESS;
241 mmap_read_unlock(mm);
247 static void iommu_sva_handle_iopf(struct work_struct *work)
249 struct iopf_fault *iopf;
250 struct iopf_group *group;
251 enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
253 group = container_of(work, struct iopf_group, work);
254 list_for_each_entry(iopf, &group->faults, list) {
256 * For the moment, errors are sticky: don't handle subsequent
257 * faults in the group if there is an error.
259 if (status != IOMMU_PAGE_RESP_SUCCESS)
262 status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
265 iopf_group_response(group, status);
266 iopf_free_group(group);
269 static int iommu_sva_iopf_handler(struct iopf_group *group)
271 struct iommu_fault_param *fault_param = group->fault_param;
273 INIT_WORK(&group->work, iommu_sva_handle_iopf);
274 if (!queue_work(fault_param->queue->wq, &group->work))
280 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
281 struct mm_struct *mm)
283 const struct iommu_ops *ops = dev_iommu_ops(dev);
284 struct iommu_domain *domain;
286 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
290 domain->type = IOMMU_DOMAIN_SVA;
294 domain->iopf_handler = iommu_sva_iopf_handler;