Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
5bba49f5 VG |
2 | /* |
3 | * ARC700 mmap | |
4 | * | |
5 | * (started from arm version - for VIPT alias handling) | |
6 | * | |
7 | * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) | |
5bba49f5 VG |
8 | */ |
9 | ||
10 | #include <linux/fs.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/mman.h> | |
01042607 IM |
13 | #include <linux/sched/mm.h> |
14 | ||
5bba49f5 VG |
15 | #include <asm/cacheflush.h> |
16 | ||
5bba49f5 VG |
17 | /* |
18 | * Ensure that shared mappings are correctly aligned to | |
19 | * avoid aliasing issues with VIPT caches. | |
20 | * We need to ensure that | |
21 | * a specific page of an object is always mapped at a multiple of | |
22 | * SHMLBA bytes. | |
23 | */ | |
24 | unsigned long | |
25 | arch_get_unmapped_area(struct file *filp, unsigned long addr, | |
26 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
27 | { | |
28 | struct mm_struct *mm = current->mm; | |
29 | struct vm_area_struct *vma; | |
b80fa3cb | 30 | struct vm_unmapped_area_info info = {}; |
5bba49f5 | 31 | |
5bba49f5 VG |
32 | /* |
33 | * We enforce the MAP_FIXED case. | |
34 | */ | |
35 | if (flags & MAP_FIXED) { | |
6732c0e4 | 36 | if (flags & MAP_SHARED && |
5bba49f5 VG |
37 | (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) |
38 | return -EINVAL; | |
39 | return addr; | |
40 | } | |
41 | ||
42 | if (len > TASK_SIZE) | |
43 | return -ENOMEM; | |
44 | ||
45 | if (addr) { | |
6732c0e4 | 46 | addr = PAGE_ALIGN(addr); |
5bba49f5 VG |
47 | |
48 | vma = find_vma(mm, addr); | |
49 | if (TASK_SIZE - len >= addr && | |
1be7107f | 50 | (!vma || addr + len <= vm_start_gap(vma))) |
5bba49f5 VG |
51 | return addr; |
52 | } | |
53 | ||
5bba49f5 VG |
54 | info.length = len; |
55 | info.low_limit = mm->mmap_base; | |
56 | info.high_limit = TASK_SIZE; | |
5bba49f5 VG |
57 | info.align_offset = pgoff << PAGE_SHIFT; |
58 | return vm_unmapped_area(&info); | |
59 | } | |
5d260625 AK |
60 | |
61 | static const pgprot_t protection_map[16] = { | |
62 | [VM_NONE] = PAGE_U_NONE, | |
63 | [VM_READ] = PAGE_U_R, | |
64 | [VM_WRITE] = PAGE_U_R, | |
65 | [VM_WRITE | VM_READ] = PAGE_U_R, | |
66 | [VM_EXEC] = PAGE_U_X_R, | |
67 | [VM_EXEC | VM_READ] = PAGE_U_X_R, | |
68 | [VM_EXEC | VM_WRITE] = PAGE_U_X_R, | |
69 | [VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R, | |
70 | [VM_SHARED] = PAGE_U_NONE, | |
71 | [VM_SHARED | VM_READ] = PAGE_U_R, | |
72 | [VM_SHARED | VM_WRITE] = PAGE_U_W_R, | |
73 | [VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R, | |
74 | [VM_SHARED | VM_EXEC] = PAGE_U_X_R, | |
75 | [VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R, | |
76 | [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R, | |
77 | [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R | |
78 | }; | |
79 | DECLARE_VM_GET_PAGE_PROT |