2 * IOMMU helpers in MMU context.
4 * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/sched/signal.h>
14 #include <linux/slab.h>
15 #include <linux/rculist.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mutex.h>
18 #include <linux/migrate.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <asm/mmu_context.h>
23 static DEFINE_MUTEX(mem_list_mutex);
25 struct mm_iommu_table_group_mem_t {
26 struct list_head next;
30 u64 ua; /* userspace address */
31 u64 entries; /* number of entries in hpas[] */
32 u64 *hpas; /* vmalloc'ed */
35 static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
36 unsigned long npages, bool incr)
38 long ret = 0, locked, lock_limit;
43 down_write(&mm->mmap_sem);
46 locked = mm->locked_vm + npages;
47 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
48 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
51 mm->locked_vm += npages;
53 if (WARN_ON_ONCE(npages > mm->locked_vm))
54 npages = mm->locked_vm;
55 mm->locked_vm -= npages;
58 pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
59 current ? current->pid : 0,
62 mm->locked_vm << PAGE_SHIFT,
63 rlimit(RLIMIT_MEMLOCK));
64 up_write(&mm->mmap_sem);
69 bool mm_iommu_preregistered(struct mm_struct *mm)
71 return !list_empty(&mm->context.iommu_group_mem_list);
73 EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
76 * Taken from alloc_migrate_target with changes to remove CMA allocations
78 struct page *new_iommu_non_cma_page(struct page *page, unsigned long private)
80 gfp_t gfp_mask = GFP_USER;
81 struct page *new_page;
83 if (PageCompound(page))
86 if (PageHighMem(page))
87 gfp_mask |= __GFP_HIGHMEM;
90 * We don't want the allocation to force an OOM if possibe
92 new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
96 static int mm_iommu_move_page_from_cma(struct page *page)
99 LIST_HEAD(cma_migrate_pages);
101 /* Ignore huge pages for now */
102 if (PageCompound(page))
106 ret = isolate_lru_page(page);
110 list_add(&page->lru, &cma_migrate_pages);
111 put_page(page); /* Drop the gup reference */
113 ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
114 NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
116 if (!list_empty(&cma_migrate_pages))
117 putback_movable_pages(&cma_migrate_pages);
123 long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
124 struct mm_iommu_table_group_mem_t **pmem)
126 struct mm_iommu_table_group_mem_t *mem;
127 long i, j, ret = 0, locked_entries = 0;
128 struct page *page = NULL;
130 mutex_lock(&mem_list_mutex);
132 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
134 if ((mem->ua == ua) && (mem->entries == entries)) {
141 if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
143 (mem->entries << PAGE_SHIFT)))) {
150 ret = mm_iommu_adjust_locked_vm(mm, entries, true);
154 locked_entries = entries;
156 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
162 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
169 for (i = 0; i < entries; ++i) {
170 if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
171 1/* pages */, 1/* iswrite */, &page)) {
173 for (j = 0; j < i; ++j)
174 put_page(pfn_to_page(mem->hpas[j] >>
181 * If we get a page from the CMA zone, since we are going to
182 * be pinning these entries, we might as well move them out
183 * of the CMA zone if possible. NOTE: faulting in + migration
184 * can be expensive. Batching can be considered later
186 if (is_migrate_cma_page(page)) {
187 if (mm_iommu_move_page_from_cma(page))
189 if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
190 1/* pages */, 1/* iswrite */,
193 for (j = 0; j < i; ++j)
194 put_page(pfn_to_page(mem->hpas[j] >>
202 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
205 atomic64_set(&mem->mapped, 1);
208 mem->entries = entries;
211 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
214 if (locked_entries && ret)
215 mm_iommu_adjust_locked_vm(mm, locked_entries, false);
217 mutex_unlock(&mem_list_mutex);
221 EXPORT_SYMBOL_GPL(mm_iommu_get);
223 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
226 struct page *page = NULL;
228 for (i = 0; i < mem->entries; ++i) {
232 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
241 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
249 static void mm_iommu_free(struct rcu_head *head)
251 struct mm_iommu_table_group_mem_t *mem = container_of(head,
252 struct mm_iommu_table_group_mem_t, rcu);
254 mm_iommu_do_free(mem);
257 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
259 list_del_rcu(&mem->next);
260 call_rcu(&mem->rcu, mm_iommu_free);
263 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
267 mutex_lock(&mem_list_mutex);
269 if (mem->used == 0) {
275 /* There are still users, exit */
279 /* Are there still mappings? */
280 if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
286 /* @mapped became 0 so now mappings are disabled, release the region */
287 mm_iommu_release(mem);
289 mm_iommu_adjust_locked_vm(mm, mem->entries, false);
292 mutex_unlock(&mem_list_mutex);
296 EXPORT_SYMBOL_GPL(mm_iommu_put);
298 struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
299 unsigned long ua, unsigned long size)
301 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
303 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
304 if ((mem->ua <= ua) &&
305 (ua + size <= mem->ua +
306 (mem->entries << PAGE_SHIFT))) {
314 EXPORT_SYMBOL_GPL(mm_iommu_lookup);
316 struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
317 unsigned long ua, unsigned long size)
319 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
321 list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
323 if ((mem->ua <= ua) &&
324 (ua + size <= mem->ua +
325 (mem->entries << PAGE_SHIFT))) {
333 EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
335 struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
336 unsigned long ua, unsigned long entries)
338 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
340 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
341 if ((mem->ua == ua) && (mem->entries == entries)) {
349 EXPORT_SYMBOL_GPL(mm_iommu_find);
351 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
352 unsigned long ua, unsigned long *hpa)
354 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
355 u64 *va = &mem->hpas[entry];
357 if (entry >= mem->entries)
360 *hpa = *va | (ua & ~PAGE_MASK);
364 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
366 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
367 unsigned long ua, unsigned long *hpa)
369 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
370 void *va = &mem->hpas[entry];
373 if (entry >= mem->entries)
376 pa = (void *) vmalloc_to_phys(va);
380 *hpa = *pa | (ua & ~PAGE_MASK);
384 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
386 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
388 if (atomic64_inc_not_zero(&mem->mapped))
391 /* Last mm_iommu_put() has been called, no more mappings allowed() */
394 EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
396 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
398 atomic64_add_unless(&mem->mapped, -1, 1);
400 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
402 void mm_iommu_init(struct mm_struct *mm)
404 INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);