Merge tag 'iommu-updates-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / iommu / iommu-sva.c
CommitLineData
cfc78dfd
JPB
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Helpers for IOMMU drivers implementing SVA
4 */
23e5d9ec 5#include <linux/mmu_context.h>
cfc78dfd
JPB
6#include <linux/mutex.h>
7#include <linux/sched/mm.h>
be51b1d6 8#include <linux/iommu.h>
cfc78dfd 9
17c51a0e 10#include "iommu-priv.h"
cfc78dfd
JPB
11
12static DEFINE_MUTEX(iommu_sva_lock);
cfc78dfd 13
4e14176a 14/* Allocate a PASID for the mm within range (inclusive) */
092edadd 15static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
cfc78dfd 16{
092edadd 17 struct iommu_mm_data *iommu_mm;
2dcebc7d 18 ioasid_t pasid;
092edadd
TZ
19
20 lockdep_assert_held(&iommu_sva_lock);
cfc78dfd 21
23e5d9ec 22 if (!arch_pgtable_dma_compat(mm))
092edadd 23 return ERR_PTR(-EBUSY);
23e5d9ec 24
092edadd 25 iommu_mm = mm->iommu_mm;
701fac40 26 /* Is a PASID already associated with this mm? */
092edadd
TZ
27 if (iommu_mm) {
28 if (iommu_mm->pasid >= dev->iommu->max_pasids)
29 return ERR_PTR(-EOVERFLOW);
30 return iommu_mm;
cfc78dfd 31 }
701fac40 32
092edadd
TZ
33 iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
34 if (!iommu_mm)
35 return ERR_PTR(-ENOMEM);
36
2dcebc7d
JP
37 pasid = iommu_alloc_global_pasid(dev);
38 if (pasid == IOMMU_PASID_INVALID) {
092edadd
TZ
39 kfree(iommu_mm);
40 return ERR_PTR(-ENOSPC);
2dcebc7d 41 }
092edadd
TZ
42 iommu_mm->pasid = pasid;
43 INIT_LIST_HEAD(&iommu_mm->sva_domains);
65d4418c 44 INIT_LIST_HEAD(&iommu_mm->sva_handles);
092edadd
TZ
45 /*
46 * Make sure the write to mm->iommu_mm is not reordered in front of
47 * initialization to iommu_mm fields. If it does, readers may see a
48 * valid iommu_mm with uninitialized values.
49 */
50 smp_store_release(&mm->iommu_mm, iommu_mm);
51 return iommu_mm;
cfc78dfd 52}
be51b1d6
LB
53
54/**
55 * iommu_sva_bind_device() - Bind a process address space to a device
56 * @dev: the device
57 * @mm: the mm to bind, caller must hold a reference to mm_users
58 *
59 * Create a bond between device and address space, allowing the device to
60 * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
61 * bond already exists between @device and @mm, an additional internal
62 * reference is taken. Caller must call iommu_sva_unbind_device()
63 * to release each reference.
64 *
65 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
66 * initialize the required SVA features.
67 *
68 * On error, returns an ERR_PTR value.
69 */
70struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
71{
092edadd 72 struct iommu_mm_data *iommu_mm;
be51b1d6
LB
73 struct iommu_domain *domain;
74 struct iommu_sva *handle;
be51b1d6
LB
75 int ret;
76
092edadd
TZ
77 mutex_lock(&iommu_sva_lock);
78
be51b1d6 79 /* Allocate mm->pasid if necessary. */
092edadd
TZ
80 iommu_mm = iommu_alloc_mm_data(mm, dev);
81 if (IS_ERR(iommu_mm)) {
82 ret = PTR_ERR(iommu_mm);
83 goto out_unlock;
84 }
be51b1d6 85
65d4418c
JG
86 list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
87 if (handle->dev == dev) {
88 refcount_inc(&handle->users);
89 mutex_unlock(&iommu_sva_lock);
90 return handle;
91 }
92 }
93
be51b1d6 94 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
092edadd
TZ
95 if (!handle) {
96 ret = -ENOMEM;
be51b1d6
LB
97 goto out_unlock;
98 }
99
092edadd
TZ
100 /* Search for an existing domain. */
101 list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
102 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
103 if (!ret) {
104 domain->users++;
105 goto out;
106 }
be51b1d6
LB
107 }
108
109 /* Allocate a new domain and set it on device pasid. */
110 domain = iommu_sva_domain_alloc(dev, mm);
80af5a45
JG
111 if (IS_ERR(domain)) {
112 ret = PTR_ERR(domain);
9991a82a 113 goto out_free_handle;
be51b1d6
LB
114 }
115
092edadd 116 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
be51b1d6
LB
117 if (ret)
118 goto out_free_domain;
119 domain->users = 1;
092edadd
TZ
120 list_add(&domain->next, &mm->iommu_mm->sva_domains);
121
be51b1d6 122out:
6384c56c
ZG
123 refcount_set(&handle->users, 1);
124 list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
be51b1d6
LB
125 mutex_unlock(&iommu_sva_lock);
126 handle->dev = dev;
127 handle->domain = domain;
be51b1d6
LB
128 return handle;
129
130out_free_domain:
131 iommu_domain_free(domain);
9991a82a 132out_free_handle:
092edadd 133 kfree(handle);
be51b1d6
LB
134out_unlock:
135 mutex_unlock(&iommu_sva_lock);
be51b1d6
LB
136 return ERR_PTR(ret);
137}
138EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
139
140/**
141 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
142 * @handle: the handle returned by iommu_sva_bind_device()
143 *
144 * Put reference to a bond between device and address space. The device should
145 * not be issuing any more transaction for this PASID. All outstanding page
146 * requests for this PASID must have been flushed to the IOMMU.
147 */
148void iommu_sva_unbind_device(struct iommu_sva *handle)
149{
150 struct iommu_domain *domain = handle->domain;
092edadd 151 struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
be51b1d6
LB
152 struct device *dev = handle->dev;
153
154 mutex_lock(&iommu_sva_lock);
65d4418c
JG
155 if (!refcount_dec_and_test(&handle->users)) {
156 mutex_unlock(&iommu_sva_lock);
157 return;
158 }
159 list_del(&handle->handle_item);
160
092edadd 161 iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
be51b1d6 162 if (--domain->users == 0) {
092edadd 163 list_del(&domain->next);
be51b1d6
LB
164 iommu_domain_free(domain);
165 }
166 mutex_unlock(&iommu_sva_lock);
167 kfree(handle);
168}
169EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
170
171u32 iommu_sva_get_pasid(struct iommu_sva *handle)
172{
173 struct iommu_domain *domain = handle->domain;
174
2396046d 175 return mm_get_enqcmd_pasid(domain->mm);
be51b1d6
LB
176}
177EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
8cc93159 178
17c51a0e
LB
179void mm_pasid_drop(struct mm_struct *mm)
180{
181 struct iommu_mm_data *iommu_mm = mm->iommu_mm;
182
183 if (!iommu_mm)
184 return;
185
186 iommu_free_global_pasid(iommu_mm->pasid);
187 kfree(iommu_mm);
188}
189
8cc93159
LB
190/*
191 * I/O page fault handler for SVA
192 */
17c51a0e 193static enum iommu_page_response_code
351ffcb1 194iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
8cc93159
LB
195{
196 vm_fault_t ret;
197 struct vm_area_struct *vma;
8cc93159
LB
198 unsigned int access_flags = 0;
199 unsigned int fault_flags = FAULT_FLAG_REMOTE;
200 struct iommu_fault_page_request *prm = &fault->prm;
201 enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
202
203 if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
204 return status;
205
206 if (!mmget_not_zero(mm))
207 return status;
208
209 mmap_read_lock(mm);
210
8d7071af 211 vma = vma_lookup(mm, prm->addr);
8cc93159
LB
212 if (!vma)
213 /* Unmapped area */
214 goto out_put_mm;
215
216 if (prm->perm & IOMMU_FAULT_PERM_READ)
217 access_flags |= VM_READ;
218
219 if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
220 access_flags |= VM_WRITE;
221 fault_flags |= FAULT_FLAG_WRITE;
222 }
223
224 if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
225 access_flags |= VM_EXEC;
226 fault_flags |= FAULT_FLAG_INSTRUCTION;
227 }
228
229 if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
230 fault_flags |= FAULT_FLAG_USER;
231
232 if (access_flags & ~vma->vm_flags)
233 /* Access fault */
234 goto out_put_mm;
235
236 ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
237 status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
238 IOMMU_PAGE_RESP_SUCCESS;
239
240out_put_mm:
241 mmap_read_unlock(mm);
242 mmput(mm);
243
244 return status;
245}
cd389115 246
17c51a0e 247static void iommu_sva_handle_iopf(struct work_struct *work)
cd389115 248{
17c51a0e
LB
249 struct iopf_fault *iopf;
250 struct iopf_group *group;
251 enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
252
253 group = container_of(work, struct iopf_group, work);
254 list_for_each_entry(iopf, &group->faults, list) {
255 /*
256 * For the moment, errors are sticky: don't handle subsequent
257 * faults in the group if there is an error.
258 */
259 if (status != IOMMU_PAGE_RESP_SUCCESS)
260 break;
261
262 status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
263 }
092edadd 264
17c51a0e
LB
265 iopf_group_response(group, status);
266 iopf_free_group(group);
267}
4e14176a 268
17c51a0e
LB
269static int iommu_sva_iopf_handler(struct iopf_group *group)
270{
a74c077b 271 struct iommu_fault_param *fault_param = group->fault_param;
17c51a0e
LB
272
273 INIT_WORK(&group->work, iommu_sva_handle_iopf);
274 if (!queue_work(fault_param->queue->wq, &group->work))
275 return -EBUSY;
276
277 return 0;
278}
279
280struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
281 struct mm_struct *mm)
282{
283 const struct iommu_ops *ops = dev_iommu_ops(dev);
284 struct iommu_domain *domain;
285
80af5a45
JG
286 if (ops->domain_alloc_sva) {
287 domain = ops->domain_alloc_sva(dev, mm);
288 if (IS_ERR(domain))
289 return domain;
290 } else {
291 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
292 if (!domain)
293 return ERR_PTR(-ENOMEM);
294 }
17c51a0e
LB
295
296 domain->type = IOMMU_DOMAIN_SVA;
297 mmgrab(mm);
298 domain->mm = mm;
299 domain->owner = ops;
300 domain->iopf_handler = iommu_sva_iopf_handler;
301
302 return domain;
cd389115 303}