x86/insn: Directly assign x86_64 state in insn_init()
[linux-2.6-block.git] / drivers / iommu / iommu-sva.c
CommitLineData
cfc78dfd
JPB
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Helpers for IOMMU drivers implementing SVA
4 */
23e5d9ec 5#include <linux/mmu_context.h>
cfc78dfd
JPB
6#include <linux/mutex.h>
7#include <linux/sched/mm.h>
be51b1d6 8#include <linux/iommu.h>
cfc78dfd 9
757636ed 10#include "iommu-sva.h"
cfc78dfd
JPB
11
12static DEFINE_MUTEX(iommu_sva_lock);
cfc78dfd 13
4e14176a 14/* Allocate a PASID for the mm within range (inclusive) */
092edadd 15static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
cfc78dfd 16{
092edadd 17 struct iommu_mm_data *iommu_mm;
2dcebc7d 18 ioasid_t pasid;
092edadd
TZ
19
20 lockdep_assert_held(&iommu_sva_lock);
cfc78dfd 21
23e5d9ec 22 if (!arch_pgtable_dma_compat(mm))
092edadd 23 return ERR_PTR(-EBUSY);
23e5d9ec 24
092edadd 25 iommu_mm = mm->iommu_mm;
701fac40 26 /* Is a PASID already associated with this mm? */
092edadd
TZ
27 if (iommu_mm) {
28 if (iommu_mm->pasid >= dev->iommu->max_pasids)
29 return ERR_PTR(-EOVERFLOW);
30 return iommu_mm;
cfc78dfd 31 }
701fac40 32
092edadd
TZ
33 iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
34 if (!iommu_mm)
35 return ERR_PTR(-ENOMEM);
36
2dcebc7d
JP
37 pasid = iommu_alloc_global_pasid(dev);
38 if (pasid == IOMMU_PASID_INVALID) {
092edadd
TZ
39 kfree(iommu_mm);
40 return ERR_PTR(-ENOSPC);
2dcebc7d 41 }
092edadd
TZ
42 iommu_mm->pasid = pasid;
43 INIT_LIST_HEAD(&iommu_mm->sva_domains);
44 /*
45 * Make sure the write to mm->iommu_mm is not reordered in front of
46 * initialization to iommu_mm fields. If it does, readers may see a
47 * valid iommu_mm with uninitialized values.
48 */
49 smp_store_release(&mm->iommu_mm, iommu_mm);
50 return iommu_mm;
cfc78dfd 51}
be51b1d6
LB
52
53/**
54 * iommu_sva_bind_device() - Bind a process address space to a device
55 * @dev: the device
56 * @mm: the mm to bind, caller must hold a reference to mm_users
57 *
58 * Create a bond between device and address space, allowing the device to
59 * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
60 * bond already exists between @device and @mm, an additional internal
61 * reference is taken. Caller must call iommu_sva_unbind_device()
62 * to release each reference.
63 *
64 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
65 * initialize the required SVA features.
66 *
67 * On error, returns an ERR_PTR value.
68 */
69struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
70{
092edadd 71 struct iommu_mm_data *iommu_mm;
be51b1d6
LB
72 struct iommu_domain *domain;
73 struct iommu_sva *handle;
be51b1d6
LB
74 int ret;
75
092edadd
TZ
76 mutex_lock(&iommu_sva_lock);
77
be51b1d6 78 /* Allocate mm->pasid if necessary. */
092edadd
TZ
79 iommu_mm = iommu_alloc_mm_data(mm, dev);
80 if (IS_ERR(iommu_mm)) {
81 ret = PTR_ERR(iommu_mm);
82 goto out_unlock;
83 }
be51b1d6
LB
84
85 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
092edadd
TZ
86 if (!handle) {
87 ret = -ENOMEM;
be51b1d6
LB
88 goto out_unlock;
89 }
90
092edadd
TZ
91 /* Search for an existing domain. */
92 list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
93 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
94 if (!ret) {
95 domain->users++;
96 goto out;
97 }
be51b1d6
LB
98 }
99
100 /* Allocate a new domain and set it on device pasid. */
101 domain = iommu_sva_domain_alloc(dev, mm);
102 if (!domain) {
103 ret = -ENOMEM;
9991a82a 104 goto out_free_handle;
be51b1d6
LB
105 }
106
092edadd 107 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
be51b1d6
LB
108 if (ret)
109 goto out_free_domain;
110 domain->users = 1;
092edadd
TZ
111 list_add(&domain->next, &mm->iommu_mm->sva_domains);
112
be51b1d6
LB
113out:
114 mutex_unlock(&iommu_sva_lock);
115 handle->dev = dev;
116 handle->domain = domain;
be51b1d6
LB
117 return handle;
118
119out_free_domain:
120 iommu_domain_free(domain);
9991a82a 121out_free_handle:
092edadd 122 kfree(handle);
be51b1d6
LB
123out_unlock:
124 mutex_unlock(&iommu_sva_lock);
be51b1d6
LB
125 return ERR_PTR(ret);
126}
127EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
128
129/**
130 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
131 * @handle: the handle returned by iommu_sva_bind_device()
132 *
133 * Put reference to a bond between device and address space. The device should
134 * not be issuing any more transaction for this PASID. All outstanding page
135 * requests for this PASID must have been flushed to the IOMMU.
136 */
137void iommu_sva_unbind_device(struct iommu_sva *handle)
138{
139 struct iommu_domain *domain = handle->domain;
092edadd 140 struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
be51b1d6
LB
141 struct device *dev = handle->dev;
142
143 mutex_lock(&iommu_sva_lock);
092edadd 144 iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
be51b1d6 145 if (--domain->users == 0) {
092edadd 146 list_del(&domain->next);
be51b1d6
LB
147 iommu_domain_free(domain);
148 }
149 mutex_unlock(&iommu_sva_lock);
150 kfree(handle);
151}
152EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
153
154u32 iommu_sva_get_pasid(struct iommu_sva *handle)
155{
156 struct iommu_domain *domain = handle->domain;
157
2396046d 158 return mm_get_enqcmd_pasid(domain->mm);
be51b1d6
LB
159}
160EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
8cc93159
LB
161
162/*
163 * I/O page fault handler for SVA
164 */
165enum iommu_page_response_code
166iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
167{
168 vm_fault_t ret;
169 struct vm_area_struct *vma;
170 struct mm_struct *mm = data;
171 unsigned int access_flags = 0;
172 unsigned int fault_flags = FAULT_FLAG_REMOTE;
173 struct iommu_fault_page_request *prm = &fault->prm;
174 enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
175
176 if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
177 return status;
178
179 if (!mmget_not_zero(mm))
180 return status;
181
182 mmap_read_lock(mm);
183
8d7071af 184 vma = vma_lookup(mm, prm->addr);
8cc93159
LB
185 if (!vma)
186 /* Unmapped area */
187 goto out_put_mm;
188
189 if (prm->perm & IOMMU_FAULT_PERM_READ)
190 access_flags |= VM_READ;
191
192 if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
193 access_flags |= VM_WRITE;
194 fault_flags |= FAULT_FLAG_WRITE;
195 }
196
197 if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
198 access_flags |= VM_EXEC;
199 fault_flags |= FAULT_FLAG_INSTRUCTION;
200 }
201
202 if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
203 fault_flags |= FAULT_FLAG_USER;
204
205 if (access_flags & ~vma->vm_flags)
206 /* Access fault */
207 goto out_put_mm;
208
209 ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
210 status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
211 IOMMU_PAGE_RESP_SUCCESS;
212
213out_put_mm:
214 mmap_read_unlock(mm);
215 mmput(mm);
216
217 return status;
218}
cd389115
JP
219
220void mm_pasid_drop(struct mm_struct *mm)
221{
092edadd
TZ
222 struct iommu_mm_data *iommu_mm = mm->iommu_mm;
223
224 if (!iommu_mm)
4e14176a
JG
225 return;
226
092edadd
TZ
227 iommu_free_global_pasid(iommu_mm->pasid);
228 kfree(iommu_mm);
cd389115 229}