Merge tag 'iommu-updates-v4.19' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / arch / powerpc / mm / mmu_context_iommu.c
1 /*
2  *  IOMMU helpers in MMU context.
3  *
4  *  Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  *
11  */
12
13 #include <linux/sched/signal.h>
14 #include <linux/slab.h>
15 #include <linux/rculist.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mutex.h>
18 #include <linux/migrate.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <asm/mmu_context.h>
22 #include <asm/pte-walk.h>
23
24 static DEFINE_MUTEX(mem_list_mutex);
25
26 struct mm_iommu_table_group_mem_t {
27         struct list_head next;
28         struct rcu_head rcu;
29         unsigned long used;
30         atomic64_t mapped;
31         unsigned int pageshift;
32         u64 ua;                 /* userspace address */
33         u64 entries;            /* number of entries in hpas[] */
34         u64 *hpas;              /* vmalloc'ed */
35 };
36
37 static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
38                 unsigned long npages, bool incr)
39 {
40         long ret = 0, locked, lock_limit;
41
42         if (!npages)
43                 return 0;
44
45         down_write(&mm->mmap_sem);
46
47         if (incr) {
48                 locked = mm->locked_vm + npages;
49                 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
50                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
51                         ret = -ENOMEM;
52                 else
53                         mm->locked_vm += npages;
54         } else {
55                 if (WARN_ON_ONCE(npages > mm->locked_vm))
56                         npages = mm->locked_vm;
57                 mm->locked_vm -= npages;
58         }
59
60         pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
61                         current ? current->pid : 0,
62                         incr ? '+' : '-',
63                         npages << PAGE_SHIFT,
64                         mm->locked_vm << PAGE_SHIFT,
65                         rlimit(RLIMIT_MEMLOCK));
66         up_write(&mm->mmap_sem);
67
68         return ret;
69 }
70
71 bool mm_iommu_preregistered(struct mm_struct *mm)
72 {
73         return !list_empty(&mm->context.iommu_group_mem_list);
74 }
75 EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
76
77 /*
78  * Taken from alloc_migrate_target with changes to remove CMA allocations
79  */
80 struct page *new_iommu_non_cma_page(struct page *page, unsigned long private)
81 {
82         gfp_t gfp_mask = GFP_USER;
83         struct page *new_page;
84
85         if (PageCompound(page))
86                 return NULL;
87
88         if (PageHighMem(page))
89                 gfp_mask |= __GFP_HIGHMEM;
90
91         /*
92          * We don't want the allocation to force an OOM if possibe
93          */
94         new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
95         return new_page;
96 }
97
98 static int mm_iommu_move_page_from_cma(struct page *page)
99 {
100         int ret = 0;
101         LIST_HEAD(cma_migrate_pages);
102
103         /* Ignore huge pages for now */
104         if (PageCompound(page))
105                 return -EBUSY;
106
107         lru_add_drain();
108         ret = isolate_lru_page(page);
109         if (ret)
110                 return ret;
111
112         list_add(&page->lru, &cma_migrate_pages);
113         put_page(page); /* Drop the gup reference */
114
115         ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
116                                 NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
117         if (ret) {
118                 if (!list_empty(&cma_migrate_pages))
119                         putback_movable_pages(&cma_migrate_pages);
120         }
121
122         return 0;
123 }
124
125 long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
126                 struct mm_iommu_table_group_mem_t **pmem)
127 {
128         struct mm_iommu_table_group_mem_t *mem;
129         long i, j, ret = 0, locked_entries = 0;
130         unsigned int pageshift;
131         unsigned long flags;
132         unsigned long cur_ua;
133         struct page *page = NULL;
134
135         mutex_lock(&mem_list_mutex);
136
137         list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
138                         next) {
139                 if ((mem->ua == ua) && (mem->entries == entries)) {
140                         ++mem->used;
141                         *pmem = mem;
142                         goto unlock_exit;
143                 }
144
145                 /* Overlap? */
146                 if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
147                                 (ua < (mem->ua +
148                                        (mem->entries << PAGE_SHIFT)))) {
149                         ret = -EINVAL;
150                         goto unlock_exit;
151                 }
152
153         }
154
155         ret = mm_iommu_adjust_locked_vm(mm, entries, true);
156         if (ret)
157                 goto unlock_exit;
158
159         locked_entries = entries;
160
161         mem = kzalloc(sizeof(*mem), GFP_KERNEL);
162         if (!mem) {
163                 ret = -ENOMEM;
164                 goto unlock_exit;
165         }
166
167         /*
168          * For a starting point for a maximum page size calculation
169          * we use @ua and @entries natural alignment to allow IOMMU pages
170          * smaller than huge pages but still bigger than PAGE_SIZE.
171          */
172         mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
173         mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
174         if (!mem->hpas) {
175                 kfree(mem);
176                 ret = -ENOMEM;
177                 goto unlock_exit;
178         }
179
180         for (i = 0; i < entries; ++i) {
181                 cur_ua = ua + (i << PAGE_SHIFT);
182                 if (1 != get_user_pages_fast(cur_ua,
183                                         1/* pages */, 1/* iswrite */, &page)) {
184                         ret = -EFAULT;
185                         for (j = 0; j < i; ++j)
186                                 put_page(pfn_to_page(mem->hpas[j] >>
187                                                 PAGE_SHIFT));
188                         vfree(mem->hpas);
189                         kfree(mem);
190                         goto unlock_exit;
191                 }
192                 /*
193                  * If we get a page from the CMA zone, since we are going to
194                  * be pinning these entries, we might as well move them out
195                  * of the CMA zone if possible. NOTE: faulting in + migration
196                  * can be expensive. Batching can be considered later
197                  */
198                 if (is_migrate_cma_page(page)) {
199                         if (mm_iommu_move_page_from_cma(page))
200                                 goto populate;
201                         if (1 != get_user_pages_fast(cur_ua,
202                                                 1/* pages */, 1/* iswrite */,
203                                                 &page)) {
204                                 ret = -EFAULT;
205                                 for (j = 0; j < i; ++j)
206                                         put_page(pfn_to_page(mem->hpas[j] >>
207                                                                 PAGE_SHIFT));
208                                 vfree(mem->hpas);
209                                 kfree(mem);
210                                 goto unlock_exit;
211                         }
212                 }
213 populate:
214                 pageshift = PAGE_SHIFT;
215                 if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) {
216                         pte_t *pte;
217                         struct page *head = compound_head(page);
218                         unsigned int compshift = compound_order(head);
219                         unsigned int pteshift;
220
221                         local_irq_save(flags); /* disables as well */
222                         pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift);
223
224                         /* Double check it is still the same pinned page */
225                         if (pte && pte_page(*pte) == head &&
226                             pteshift == compshift + PAGE_SHIFT)
227                                 pageshift = max_t(unsigned int, pteshift,
228                                                 PAGE_SHIFT);
229                         local_irq_restore(flags);
230                 }
231                 mem->pageshift = min(mem->pageshift, pageshift);
232                 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
233         }
234
235         atomic64_set(&mem->mapped, 1);
236         mem->used = 1;
237         mem->ua = ua;
238         mem->entries = entries;
239         *pmem = mem;
240
241         list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
242
243 unlock_exit:
244         if (locked_entries && ret)
245                 mm_iommu_adjust_locked_vm(mm, locked_entries, false);
246
247         mutex_unlock(&mem_list_mutex);
248
249         return ret;
250 }
251 EXPORT_SYMBOL_GPL(mm_iommu_get);
252
253 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
254 {
255         long i;
256         struct page *page = NULL;
257
258         for (i = 0; i < mem->entries; ++i) {
259                 if (!mem->hpas[i])
260                         continue;
261
262                 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
263                 if (!page)
264                         continue;
265
266                 put_page(page);
267                 mem->hpas[i] = 0;
268         }
269 }
270
271 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
272 {
273
274         mm_iommu_unpin(mem);
275         vfree(mem->hpas);
276         kfree(mem);
277 }
278
279 static void mm_iommu_free(struct rcu_head *head)
280 {
281         struct mm_iommu_table_group_mem_t *mem = container_of(head,
282                         struct mm_iommu_table_group_mem_t, rcu);
283
284         mm_iommu_do_free(mem);
285 }
286
287 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
288 {
289         list_del_rcu(&mem->next);
290         call_rcu(&mem->rcu, mm_iommu_free);
291 }
292
293 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
294 {
295         long ret = 0;
296
297         mutex_lock(&mem_list_mutex);
298
299         if (mem->used == 0) {
300                 ret = -ENOENT;
301                 goto unlock_exit;
302         }
303
304         --mem->used;
305         /* There are still users, exit */
306         if (mem->used)
307                 goto unlock_exit;
308
309         /* Are there still mappings? */
310         if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
311                 ++mem->used;
312                 ret = -EBUSY;
313                 goto unlock_exit;
314         }
315
316         /* @mapped became 0 so now mappings are disabled, release the region */
317         mm_iommu_release(mem);
318
319         mm_iommu_adjust_locked_vm(mm, mem->entries, false);
320
321 unlock_exit:
322         mutex_unlock(&mem_list_mutex);
323
324         return ret;
325 }
326 EXPORT_SYMBOL_GPL(mm_iommu_put);
327
328 struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
329                 unsigned long ua, unsigned long size)
330 {
331         struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
332
333         list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
334                 if ((mem->ua <= ua) &&
335                                 (ua + size <= mem->ua +
336                                  (mem->entries << PAGE_SHIFT))) {
337                         ret = mem;
338                         break;
339                 }
340         }
341
342         return ret;
343 }
344 EXPORT_SYMBOL_GPL(mm_iommu_lookup);
345
346 struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
347                 unsigned long ua, unsigned long size)
348 {
349         struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
350
351         list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
352                         next) {
353                 if ((mem->ua <= ua) &&
354                                 (ua + size <= mem->ua +
355                                  (mem->entries << PAGE_SHIFT))) {
356                         ret = mem;
357                         break;
358                 }
359         }
360
361         return ret;
362 }
363 EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
364
365 struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
366                 unsigned long ua, unsigned long entries)
367 {
368         struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
369
370         list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
371                 if ((mem->ua == ua) && (mem->entries == entries)) {
372                         ret = mem;
373                         break;
374                 }
375         }
376
377         return ret;
378 }
379 EXPORT_SYMBOL_GPL(mm_iommu_find);
380
381 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
382                 unsigned long ua, unsigned int pageshift, unsigned long *hpa)
383 {
384         const long entry = (ua - mem->ua) >> PAGE_SHIFT;
385         u64 *va = &mem->hpas[entry];
386
387         if (entry >= mem->entries)
388                 return -EFAULT;
389
390         if (pageshift > mem->pageshift)
391                 return -EFAULT;
392
393         *hpa = *va | (ua & ~PAGE_MASK);
394
395         return 0;
396 }
397 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
398
399 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
400                 unsigned long ua, unsigned int pageshift, unsigned long *hpa)
401 {
402         const long entry = (ua - mem->ua) >> PAGE_SHIFT;
403         void *va = &mem->hpas[entry];
404         unsigned long *pa;
405
406         if (entry >= mem->entries)
407                 return -EFAULT;
408
409         if (pageshift > mem->pageshift)
410                 return -EFAULT;
411
412         pa = (void *) vmalloc_to_phys(va);
413         if (!pa)
414                 return -EFAULT;
415
416         *hpa = *pa | (ua & ~PAGE_MASK);
417
418         return 0;
419 }
420 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
421
422 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
423 {
424         if (atomic64_inc_not_zero(&mem->mapped))
425                 return 0;
426
427         /* Last mm_iommu_put() has been called, no more mappings allowed() */
428         return -ENXIO;
429 }
430 EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
431
432 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
433 {
434         atomic64_add_unless(&mem->mapped, -1, 1);
435 }
436 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
437
438 void mm_iommu_init(struct mm_struct *mm)
439 {
440         INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
441 }