Commit | Line | Data |
---|---|---|
e7cc9a73 MD |
1 | /* |
2 | * Trapped io support | |
3 | * | |
4 | * Copyright (C) 2008 Magnus Damm | |
5 | * | |
6 | * Intercept io operations by trapping. | |
7 | * | |
8 | * This file is subject to the terms and conditions of the GNU General Public | |
9 | * License. See the file "COPYING" in the main directory of this archive | |
10 | * for more details. | |
11 | */ | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/bitops.h> | |
15 | #include <linux/vmalloc.h> | |
16 | #include <asm/system.h> | |
17 | #include <asm/mmu_context.h> | |
18 | #include <asm/uaccess.h> | |
19 | #include <asm/io.h> | |
20 | #include <asm/io_trapped.h> | |
21 | ||
22 | #define TRAPPED_PAGES_MAX 16 | |
23 | #define MAX(a, b) (((a) >= (b)) ? (a) : (b)) | |
24 | ||
25 | #ifdef CONFIG_HAS_IOPORT | |
26 | LIST_HEAD(trapped_io); | |
27 | #endif | |
28 | #ifdef CONFIG_HAS_IOMEM | |
29 | LIST_HEAD(trapped_mem); | |
30 | #endif | |
31 | static DEFINE_SPINLOCK(trapped_lock); | |
32 | ||
33 | int __init register_trapped_io(struct trapped_io *tiop) | |
34 | { | |
35 | struct resource *res; | |
36 | unsigned long len = 0, flags = 0; | |
37 | struct page *pages[TRAPPED_PAGES_MAX]; | |
38 | int k, n; | |
39 | ||
40 | /* structure must be page aligned */ | |
41 | if ((unsigned long)tiop & (PAGE_SIZE - 1)) | |
42 | goto bad; | |
43 | ||
44 | for (k = 0; k < tiop->num_resources; k++) { | |
45 | res = tiop->resource + k; | |
46 | len += roundup((res->end - res->start) + 1, PAGE_SIZE); | |
47 | flags |= res->flags; | |
48 | } | |
49 | ||
50 | /* support IORESOURCE_IO _or_ MEM, not both */ | |
51 | if (hweight_long(flags) != 1) | |
52 | goto bad; | |
53 | ||
54 | n = len >> PAGE_SHIFT; | |
55 | ||
56 | if (n >= TRAPPED_PAGES_MAX) | |
57 | goto bad; | |
58 | ||
59 | for (k = 0; k < n; k++) | |
60 | pages[k] = virt_to_page(tiop); | |
61 | ||
62 | tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE); | |
63 | if (!tiop->virt_base) | |
64 | goto bad; | |
65 | ||
66 | len = 0; | |
67 | for (k = 0; k < tiop->num_resources; k++) { | |
68 | res = tiop->resource + k; | |
69 | pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n", | |
70 | (unsigned long)(tiop->virt_base + len), | |
71 | res->flags & IORESOURCE_IO ? "io" : "mmio", | |
72 | (unsigned long)res->start); | |
73 | len += roundup((res->end - res->start) + 1, PAGE_SIZE); | |
74 | } | |
75 | ||
76 | tiop->magic = IO_TRAPPED_MAGIC; | |
77 | INIT_LIST_HEAD(&tiop->list); | |
78 | spin_lock_irq(&trapped_lock); | |
79 | if (flags & IORESOURCE_IO) | |
80 | list_add(&tiop->list, &trapped_io); | |
81 | if (flags & IORESOURCE_MEM) | |
82 | list_add(&tiop->list, &trapped_mem); | |
83 | spin_unlock_irq(&trapped_lock); | |
84 | ||
85 | return 0; | |
86 | bad: | |
87 | pr_warning("unable to install trapped io filter\n"); | |
88 | return -1; | |
89 | } | |
90 | ||
91 | void __iomem *match_trapped_io_handler(struct list_head *list, | |
92 | unsigned long offset, | |
93 | unsigned long size) | |
94 | { | |
95 | unsigned long voffs; | |
96 | struct trapped_io *tiop; | |
97 | struct resource *res; | |
98 | int k, len; | |
99 | ||
100 | spin_lock_irq(&trapped_lock); | |
101 | list_for_each_entry(tiop, list, list) { | |
102 | voffs = 0; | |
103 | for (k = 0; k < tiop->num_resources; k++) { | |
104 | res = tiop->resource + k; | |
105 | if (res->start == offset) { | |
106 | spin_unlock_irq(&trapped_lock); | |
107 | return tiop->virt_base + voffs; | |
108 | } | |
109 | ||
110 | len = (res->end - res->start) + 1; | |
111 | voffs += roundup(len, PAGE_SIZE); | |
112 | } | |
113 | } | |
114 | spin_unlock_irq(&trapped_lock); | |
115 | return NULL; | |
116 | } | |
117 | ||
118 | static struct trapped_io *lookup_tiop(unsigned long address) | |
119 | { | |
120 | pgd_t *pgd_k; | |
121 | pud_t *pud_k; | |
122 | pmd_t *pmd_k; | |
123 | pte_t *pte_k; | |
124 | pte_t entry; | |
125 | ||
126 | pgd_k = swapper_pg_dir + pgd_index(address); | |
127 | if (!pgd_present(*pgd_k)) | |
128 | return NULL; | |
129 | ||
130 | pud_k = pud_offset(pgd_k, address); | |
131 | if (!pud_present(*pud_k)) | |
132 | return NULL; | |
133 | ||
134 | pmd_k = pmd_offset(pud_k, address); | |
135 | if (!pmd_present(*pmd_k)) | |
136 | return NULL; | |
137 | ||
138 | pte_k = pte_offset_kernel(pmd_k, address); | |
139 | entry = *pte_k; | |
140 | ||
141 | return pfn_to_kaddr(pte_pfn(entry)); | |
142 | } | |
143 | ||
144 | static unsigned long lookup_address(struct trapped_io *tiop, | |
145 | unsigned long address) | |
146 | { | |
147 | struct resource *res; | |
148 | unsigned long vaddr = (unsigned long)tiop->virt_base; | |
149 | unsigned long len; | |
150 | int k; | |
151 | ||
152 | for (k = 0; k < tiop->num_resources; k++) { | |
153 | res = tiop->resource + k; | |
154 | len = roundup((res->end - res->start) + 1, PAGE_SIZE); | |
155 | if (address < (vaddr + len)) | |
156 | return res->start + (address - vaddr); | |
157 | vaddr += len; | |
158 | } | |
159 | return 0; | |
160 | } | |
161 | ||
162 | static unsigned long long copy_word(unsigned long src_addr, int src_len, | |
163 | unsigned long dst_addr, int dst_len) | |
164 | { | |
165 | unsigned long long tmp = 0; | |
166 | ||
167 | switch (src_len) { | |
168 | case 1: | |
169 | tmp = ctrl_inb(src_addr); | |
170 | break; | |
171 | case 2: | |
172 | tmp = ctrl_inw(src_addr); | |
173 | break; | |
174 | case 4: | |
175 | tmp = ctrl_inl(src_addr); | |
176 | break; | |
177 | case 8: | |
178 | tmp = ctrl_inq(src_addr); | |
179 | break; | |
180 | } | |
181 | ||
182 | switch (dst_len) { | |
183 | case 1: | |
184 | ctrl_outb(tmp, dst_addr); | |
185 | break; | |
186 | case 2: | |
187 | ctrl_outw(tmp, dst_addr); | |
188 | break; | |
189 | case 4: | |
190 | ctrl_outl(tmp, dst_addr); | |
191 | break; | |
192 | case 8: | |
193 | ctrl_outq(tmp, dst_addr); | |
194 | break; | |
195 | } | |
196 | ||
197 | return tmp; | |
198 | } | |
199 | ||
200 | static unsigned long from_device(void *dst, const void *src, unsigned long cnt) | |
201 | { | |
202 | struct trapped_io *tiop; | |
203 | unsigned long src_addr = (unsigned long)src; | |
204 | unsigned long long tmp; | |
205 | ||
206 | pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt); | |
207 | tiop = lookup_tiop(src_addr); | |
208 | WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); | |
209 | ||
210 | src_addr = lookup_address(tiop, src_addr); | |
211 | if (!src_addr) | |
212 | return cnt; | |
213 | ||
214 | tmp = copy_word(src_addr, MAX(cnt, (tiop->minimum_bus_width / 8)), | |
215 | (unsigned long)dst, cnt); | |
216 | ||
217 | pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp); | |
218 | return 0; | |
219 | } | |
220 | ||
221 | static unsigned long to_device(void *dst, const void *src, unsigned long cnt) | |
222 | { | |
223 | struct trapped_io *tiop; | |
224 | unsigned long dst_addr = (unsigned long)dst; | |
225 | unsigned long long tmp; | |
226 | ||
227 | pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt); | |
228 | tiop = lookup_tiop(dst_addr); | |
229 | WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); | |
230 | ||
231 | dst_addr = lookup_address(tiop, dst_addr); | |
232 | if (!dst_addr) | |
233 | return cnt; | |
234 | ||
235 | tmp = copy_word((unsigned long)src, cnt, | |
236 | dst_addr, MAX(cnt, (tiop->minimum_bus_width / 8))); | |
237 | ||
238 | pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp); | |
239 | return 0; | |
240 | } | |
241 | ||
242 | static struct mem_access trapped_io_access = { | |
243 | from_device, | |
244 | to_device, | |
245 | }; | |
246 | ||
247 | int handle_trapped_io(struct pt_regs *regs, unsigned long address) | |
248 | { | |
249 | mm_segment_t oldfs; | |
250 | opcode_t instruction; | |
251 | int tmp; | |
252 | ||
253 | if (!lookup_tiop(address)) | |
254 | return 0; | |
255 | ||
256 | WARN_ON(user_mode(regs)); | |
257 | ||
258 | oldfs = get_fs(); | |
259 | set_fs(KERNEL_DS); | |
260 | if (copy_from_user(&instruction, (void *)(regs->pc), | |
261 | sizeof(instruction))) { | |
262 | set_fs(oldfs); | |
263 | return 0; | |
264 | } | |
265 | ||
266 | tmp = handle_unaligned_access(instruction, regs, &trapped_io_access); | |
267 | set_fs(oldfs); | |
268 | return tmp == 0; | |
269 | } |