Commit | Line | Data |
---|---|---|
2a96536e KC |
1 | /* linux/drivers/iommu/exynos_iommu.c |
2 | * | |
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | |
4 | * http://www.samsung.com | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #ifdef CONFIG_EXYNOS_IOMMU_DEBUG | |
12 | #define DEBUG | |
13 | #endif | |
14 | ||
15 | #include <linux/io.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/platform_device.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/pm_runtime.h> | |
20 | #include <linux/clk.h> | |
21 | #include <linux/err.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/iommu.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/list.h> | |
26 | #include <linux/memblock.h> | |
27 | #include <linux/export.h> | |
28 | ||
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/pgtable.h> | |
31 | ||
32 | #include <mach/sysmmu.h> | |
33 | ||
34 | /* We does not consider super section mapping (16MB) */ | |
35 | #define SECT_ORDER 20 | |
36 | #define LPAGE_ORDER 16 | |
37 | #define SPAGE_ORDER 12 | |
38 | ||
39 | #define SECT_SIZE (1 << SECT_ORDER) | |
40 | #define LPAGE_SIZE (1 << LPAGE_ORDER) | |
41 | #define SPAGE_SIZE (1 << SPAGE_ORDER) | |
42 | ||
43 | #define SECT_MASK (~(SECT_SIZE - 1)) | |
44 | #define LPAGE_MASK (~(LPAGE_SIZE - 1)) | |
45 | #define SPAGE_MASK (~(SPAGE_SIZE - 1)) | |
46 | ||
47 | #define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) | |
48 | #define lv1ent_page(sent) ((*(sent) & 3) == 1) | |
49 | #define lv1ent_section(sent) ((*(sent) & 3) == 2) | |
50 | ||
51 | #define lv2ent_fault(pent) ((*(pent) & 3) == 0) | |
52 | #define lv2ent_small(pent) ((*(pent) & 2) == 2) | |
53 | #define lv2ent_large(pent) ((*(pent) & 3) == 1) | |
54 | ||
55 | #define section_phys(sent) (*(sent) & SECT_MASK) | |
56 | #define section_offs(iova) ((iova) & 0xFFFFF) | |
57 | #define lpage_phys(pent) (*(pent) & LPAGE_MASK) | |
58 | #define lpage_offs(iova) ((iova) & 0xFFFF) | |
59 | #define spage_phys(pent) (*(pent) & SPAGE_MASK) | |
60 | #define spage_offs(iova) ((iova) & 0xFFF) | |
61 | ||
62 | #define lv1ent_offset(iova) ((iova) >> SECT_ORDER) | |
63 | #define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER) | |
64 | ||
65 | #define NUM_LV1ENTRIES 4096 | |
66 | #define NUM_LV2ENTRIES 256 | |
67 | ||
68 | #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long)) | |
69 | ||
70 | #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) | |
71 | ||
72 | #define lv2table_base(sent) (*(sent) & 0xFFFFFC00) | |
73 | ||
74 | #define mk_lv1ent_sect(pa) ((pa) | 2) | |
75 | #define mk_lv1ent_page(pa) ((pa) | 1) | |
76 | #define mk_lv2ent_lpage(pa) ((pa) | 1) | |
77 | #define mk_lv2ent_spage(pa) ((pa) | 2) | |
78 | ||
79 | #define CTRL_ENABLE 0x5 | |
80 | #define CTRL_BLOCK 0x7 | |
81 | #define CTRL_DISABLE 0x0 | |
82 | ||
83 | #define REG_MMU_CTRL 0x000 | |
84 | #define REG_MMU_CFG 0x004 | |
85 | #define REG_MMU_STATUS 0x008 | |
86 | #define REG_MMU_FLUSH 0x00C | |
87 | #define REG_MMU_FLUSH_ENTRY 0x010 | |
88 | #define REG_PT_BASE_ADDR 0x014 | |
89 | #define REG_INT_STATUS 0x018 | |
90 | #define REG_INT_CLEAR 0x01C | |
91 | ||
92 | #define REG_PAGE_FAULT_ADDR 0x024 | |
93 | #define REG_AW_FAULT_ADDR 0x028 | |
94 | #define REG_AR_FAULT_ADDR 0x02C | |
95 | #define REG_DEFAULT_SLAVE_ADDR 0x030 | |
96 | ||
97 | #define REG_MMU_VERSION 0x034 | |
98 | ||
99 | #define REG_PB0_SADDR 0x04C | |
100 | #define REG_PB0_EADDR 0x050 | |
101 | #define REG_PB1_SADDR 0x054 | |
102 | #define REG_PB1_EADDR 0x058 | |
103 | ||
104 | static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova) | |
105 | { | |
106 | return pgtable + lv1ent_offset(iova); | |
107 | } | |
108 | ||
109 | static unsigned long *page_entry(unsigned long *sent, unsigned long iova) | |
110 | { | |
111 | return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova); | |
112 | } | |
113 | ||
114 | enum exynos_sysmmu_inttype { | |
115 | SYSMMU_PAGEFAULT, | |
116 | SYSMMU_AR_MULTIHIT, | |
117 | SYSMMU_AW_MULTIHIT, | |
118 | SYSMMU_BUSERROR, | |
119 | SYSMMU_AR_SECURITY, | |
120 | SYSMMU_AR_ACCESS, | |
121 | SYSMMU_AW_SECURITY, | |
122 | SYSMMU_AW_PROTECTION, /* 7 */ | |
123 | SYSMMU_FAULT_UNKNOWN, | |
124 | SYSMMU_FAULTS_NUM | |
125 | }; | |
126 | ||
127 | /* | |
128 | * @itype: type of fault. | |
129 | * @pgtable_base: the physical address of page table base. This is 0 if @itype | |
130 | * is SYSMMU_BUSERROR. | |
131 | * @fault_addr: the device (virtual) address that the System MMU tried to | |
132 | * translated. This is 0 if @itype is SYSMMU_BUSERROR. | |
133 | */ | |
134 | typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype, | |
135 | unsigned long pgtable_base, unsigned long fault_addr); | |
136 | ||
137 | static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = { | |
138 | REG_PAGE_FAULT_ADDR, | |
139 | REG_AR_FAULT_ADDR, | |
140 | REG_AW_FAULT_ADDR, | |
141 | REG_DEFAULT_SLAVE_ADDR, | |
142 | REG_AR_FAULT_ADDR, | |
143 | REG_AR_FAULT_ADDR, | |
144 | REG_AW_FAULT_ADDR, | |
145 | REG_AW_FAULT_ADDR | |
146 | }; | |
147 | ||
148 | static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = { | |
149 | "PAGE FAULT", | |
150 | "AR MULTI-HIT FAULT", | |
151 | "AW MULTI-HIT FAULT", | |
152 | "BUS ERROR", | |
153 | "AR SECURITY PROTECTION FAULT", | |
154 | "AR ACCESS PROTECTION FAULT", | |
155 | "AW SECURITY PROTECTION FAULT", | |
156 | "AW ACCESS PROTECTION FAULT", | |
157 | "UNKNOWN FAULT" | |
158 | }; | |
159 | ||
160 | struct exynos_iommu_domain { | |
161 | struct list_head clients; /* list of sysmmu_drvdata.node */ | |
162 | unsigned long *pgtable; /* lv1 page table, 16KB */ | |
163 | short *lv2entcnt; /* free lv2 entry counter for each section */ | |
164 | spinlock_t lock; /* lock for this structure */ | |
165 | spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ | |
166 | }; | |
167 | ||
168 | struct sysmmu_drvdata { | |
169 | struct list_head node; /* entry of exynos_iommu_domain.clients */ | |
170 | struct device *sysmmu; /* System MMU's device descriptor */ | |
171 | struct device *dev; /* Owner of system MMU */ | |
172 | char *dbgname; | |
173 | int nsfrs; | |
174 | void __iomem **sfrbases; | |
175 | struct clk *clk[2]; | |
176 | int activations; | |
177 | rwlock_t lock; | |
178 | struct iommu_domain *domain; | |
179 | sysmmu_fault_handler_t fault_handler; | |
180 | unsigned long pgtable; | |
181 | }; | |
182 | ||
183 | static bool set_sysmmu_active(struct sysmmu_drvdata *data) | |
184 | { | |
185 | /* return true if the System MMU was not active previously | |
186 | and it needs to be initialized */ | |
187 | return ++data->activations == 1; | |
188 | } | |
189 | ||
190 | static bool set_sysmmu_inactive(struct sysmmu_drvdata *data) | |
191 | { | |
192 | /* return true if the System MMU is needed to be disabled */ | |
193 | BUG_ON(data->activations < 1); | |
194 | return --data->activations == 0; | |
195 | } | |
196 | ||
197 | static bool is_sysmmu_active(struct sysmmu_drvdata *data) | |
198 | { | |
199 | return data->activations > 0; | |
200 | } | |
201 | ||
202 | static void sysmmu_unblock(void __iomem *sfrbase) | |
203 | { | |
204 | __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL); | |
205 | } | |
206 | ||
207 | static bool sysmmu_block(void __iomem *sfrbase) | |
208 | { | |
209 | int i = 120; | |
210 | ||
211 | __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL); | |
212 | while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) | |
213 | --i; | |
214 | ||
215 | if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) { | |
216 | sysmmu_unblock(sfrbase); | |
217 | return false; | |
218 | } | |
219 | ||
220 | return true; | |
221 | } | |
222 | ||
223 | static void __sysmmu_tlb_invalidate(void __iomem *sfrbase) | |
224 | { | |
225 | __raw_writel(0x1, sfrbase + REG_MMU_FLUSH); | |
226 | } | |
227 | ||
228 | static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, | |
229 | unsigned long iova) | |
230 | { | |
231 | __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY); | |
232 | } | |
233 | ||
234 | static void __sysmmu_set_ptbase(void __iomem *sfrbase, | |
235 | unsigned long pgd) | |
236 | { | |
237 | __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */ | |
238 | __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR); | |
239 | ||
240 | __sysmmu_tlb_invalidate(sfrbase); | |
241 | } | |
242 | ||
243 | static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base, | |
244 | unsigned long size, int idx) | |
245 | { | |
246 | __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8); | |
247 | __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8); | |
248 | } | |
249 | ||
250 | void exynos_sysmmu_set_prefbuf(struct device *dev, | |
251 | unsigned long base0, unsigned long size0, | |
252 | unsigned long base1, unsigned long size1) | |
253 | { | |
254 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); | |
255 | unsigned long flags; | |
256 | int i; | |
257 | ||
258 | BUG_ON((base0 + size0) <= base0); | |
259 | BUG_ON((size1 > 0) && ((base1 + size1) <= base1)); | |
260 | ||
261 | read_lock_irqsave(&data->lock, flags); | |
262 | if (!is_sysmmu_active(data)) | |
263 | goto finish; | |
264 | ||
265 | for (i = 0; i < data->nsfrs; i++) { | |
266 | if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) { | |
267 | if (!sysmmu_block(data->sfrbases[i])) | |
268 | continue; | |
269 | ||
270 | if (size1 == 0) { | |
271 | if (size0 <= SZ_128K) { | |
272 | base1 = base0; | |
273 | size1 = size0; | |
274 | } else { | |
275 | size1 = size0 - | |
276 | ALIGN(size0 / 2, SZ_64K); | |
277 | size0 = size0 - size1; | |
278 | base1 = base0 + size0; | |
279 | } | |
280 | } | |
281 | ||
282 | __sysmmu_set_prefbuf( | |
283 | data->sfrbases[i], base0, size0, 0); | |
284 | __sysmmu_set_prefbuf( | |
285 | data->sfrbases[i], base1, size1, 1); | |
286 | ||
287 | sysmmu_unblock(data->sfrbases[i]); | |
288 | } | |
289 | } | |
290 | finish: | |
291 | read_unlock_irqrestore(&data->lock, flags); | |
292 | } | |
293 | ||
294 | static void __set_fault_handler(struct sysmmu_drvdata *data, | |
295 | sysmmu_fault_handler_t handler) | |
296 | { | |
297 | unsigned long flags; | |
298 | ||
299 | write_lock_irqsave(&data->lock, flags); | |
300 | data->fault_handler = handler; | |
301 | write_unlock_irqrestore(&data->lock, flags); | |
302 | } | |
303 | ||
304 | void exynos_sysmmu_set_fault_handler(struct device *dev, | |
305 | sysmmu_fault_handler_t handler) | |
306 | { | |
307 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); | |
308 | ||
309 | __set_fault_handler(data, handler); | |
310 | } | |
311 | ||
312 | static int default_fault_handler(enum exynos_sysmmu_inttype itype, | |
313 | unsigned long pgtable_base, unsigned long fault_addr) | |
314 | { | |
315 | unsigned long *ent; | |
316 | ||
317 | if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT)) | |
318 | itype = SYSMMU_FAULT_UNKNOWN; | |
319 | ||
320 | pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n", | |
321 | sysmmu_fault_name[itype], fault_addr, pgtable_base); | |
322 | ||
323 | ent = section_entry(__va(pgtable_base), fault_addr); | |
324 | pr_err("\tLv1 entry: 0x%lx\n", *ent); | |
325 | ||
326 | if (lv1ent_page(ent)) { | |
327 | ent = page_entry(ent, fault_addr); | |
328 | pr_err("\t Lv2 entry: 0x%lx\n", *ent); | |
329 | } | |
330 | ||
331 | pr_err("Generating Kernel OOPS... because it is unrecoverable.\n"); | |
332 | ||
333 | BUG(); | |
334 | ||
335 | return 0; | |
336 | } | |
337 | ||
338 | static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) | |
339 | { | |
340 | /* SYSMMU is in blocked when interrupt occurred. */ | |
341 | struct sysmmu_drvdata *data = dev_id; | |
342 | struct resource *irqres; | |
343 | struct platform_device *pdev; | |
344 | enum exynos_sysmmu_inttype itype; | |
345 | unsigned long addr = -1; | |
346 | ||
347 | int i, ret = -ENOSYS; | |
348 | ||
349 | read_lock(&data->lock); | |
350 | ||
351 | WARN_ON(!is_sysmmu_active(data)); | |
352 | ||
353 | pdev = to_platform_device(data->sysmmu); | |
354 | for (i = 0; i < (pdev->num_resources / 2); i++) { | |
355 | irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i); | |
356 | if (irqres && ((int)irqres->start == irq)) | |
357 | break; | |
358 | } | |
359 | ||
360 | if (i == pdev->num_resources) { | |
361 | itype = SYSMMU_FAULT_UNKNOWN; | |
362 | } else { | |
363 | itype = (enum exynos_sysmmu_inttype) | |
364 | __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS)); | |
365 | if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN)))) | |
366 | itype = SYSMMU_FAULT_UNKNOWN; | |
367 | else | |
368 | addr = __raw_readl( | |
369 | data->sfrbases[i] + fault_reg_offset[itype]); | |
370 | } | |
371 | ||
372 | if (data->domain) | |
373 | ret = report_iommu_fault(data->domain, data->dev, | |
374 | addr, itype); | |
375 | ||
376 | if ((ret == -ENOSYS) && data->fault_handler) { | |
377 | unsigned long base = data->pgtable; | |
378 | if (itype != SYSMMU_FAULT_UNKNOWN) | |
379 | base = __raw_readl( | |
380 | data->sfrbases[i] + REG_PT_BASE_ADDR); | |
381 | ret = data->fault_handler(itype, base, addr); | |
382 | } | |
383 | ||
384 | if (!ret && (itype != SYSMMU_FAULT_UNKNOWN)) | |
385 | __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR); | |
386 | else | |
387 | dev_dbg(data->sysmmu, "(%s) %s is not handled.\n", | |
388 | data->dbgname, sysmmu_fault_name[itype]); | |
389 | ||
390 | if (itype != SYSMMU_FAULT_UNKNOWN) | |
391 | sysmmu_unblock(data->sfrbases[i]); | |
392 | ||
393 | read_unlock(&data->lock); | |
394 | ||
395 | return IRQ_HANDLED; | |
396 | } | |
397 | ||
398 | static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data) | |
399 | { | |
400 | unsigned long flags; | |
401 | bool disabled = false; | |
402 | int i; | |
403 | ||
404 | write_lock_irqsave(&data->lock, flags); | |
405 | ||
406 | if (!set_sysmmu_inactive(data)) | |
407 | goto finish; | |
408 | ||
409 | for (i = 0; i < data->nsfrs; i++) | |
410 | __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL); | |
411 | ||
412 | if (data->clk[1]) | |
413 | clk_disable(data->clk[1]); | |
414 | if (data->clk[0]) | |
415 | clk_disable(data->clk[0]); | |
416 | ||
417 | disabled = true; | |
418 | data->pgtable = 0; | |
419 | data->domain = NULL; | |
420 | finish: | |
421 | write_unlock_irqrestore(&data->lock, flags); | |
422 | ||
423 | if (disabled) | |
424 | dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname); | |
425 | else | |
426 | dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n", | |
427 | data->dbgname, data->activations); | |
428 | ||
429 | return disabled; | |
430 | } | |
431 | ||
432 | /* __exynos_sysmmu_enable: Enables System MMU | |
433 | * | |
434 | * returns -error if an error occurred and System MMU is not enabled, | |
435 | * 0 if the System MMU has been just enabled and 1 if System MMU was already | |
436 | * enabled before. | |
437 | */ | |
438 | static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data, | |
439 | unsigned long pgtable, struct iommu_domain *domain) | |
440 | { | |
441 | int i, ret = 0; | |
442 | unsigned long flags; | |
443 | ||
444 | write_lock_irqsave(&data->lock, flags); | |
445 | ||
446 | if (!set_sysmmu_active(data)) { | |
447 | if (WARN_ON(pgtable != data->pgtable)) { | |
448 | ret = -EBUSY; | |
449 | set_sysmmu_inactive(data); | |
450 | } else { | |
451 | ret = 1; | |
452 | } | |
453 | ||
454 | dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname); | |
455 | goto finish; | |
456 | } | |
457 | ||
458 | if (data->clk[0]) | |
459 | clk_enable(data->clk[0]); | |
460 | if (data->clk[1]) | |
461 | clk_enable(data->clk[1]); | |
462 | ||
463 | data->pgtable = pgtable; | |
464 | ||
465 | for (i = 0; i < data->nsfrs; i++) { | |
466 | __sysmmu_set_ptbase(data->sfrbases[i], pgtable); | |
467 | ||
468 | if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) { | |
469 | /* System MMU version is 3.x */ | |
470 | __raw_writel((1 << 12) | (2 << 28), | |
471 | data->sfrbases[i] + REG_MMU_CFG); | |
472 | __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0); | |
473 | __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1); | |
474 | } | |
475 | ||
476 | __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL); | |
477 | } | |
478 | ||
479 | data->domain = domain; | |
480 | ||
481 | dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname); | |
482 | finish: | |
483 | write_unlock_irqrestore(&data->lock, flags); | |
484 | ||
485 | return ret; | |
486 | } | |
487 | ||
488 | int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable) | |
489 | { | |
490 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); | |
491 | int ret; | |
492 | ||
493 | BUG_ON(!memblock_is_memory(pgtable)); | |
494 | ||
495 | ret = pm_runtime_get_sync(data->sysmmu); | |
496 | if (ret < 0) { | |
497 | dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname); | |
498 | return ret; | |
499 | } | |
500 | ||
501 | ret = __exynos_sysmmu_enable(data, pgtable, NULL); | |
502 | if (WARN_ON(ret < 0)) { | |
503 | pm_runtime_put(data->sysmmu); | |
504 | dev_err(data->sysmmu, | |
505 | "(%s) Already enabled with page table %#lx\n", | |
506 | data->dbgname, data->pgtable); | |
507 | } else { | |
508 | data->dev = dev; | |
509 | } | |
510 | ||
511 | return ret; | |
512 | } | |
513 | ||
514 | bool exynos_sysmmu_disable(struct device *dev) | |
515 | { | |
516 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); | |
517 | bool disabled; | |
518 | ||
519 | disabled = __exynos_sysmmu_disable(data); | |
520 | pm_runtime_put(data->sysmmu); | |
521 | ||
522 | return disabled; | |
523 | } | |
524 | ||
525 | static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova) | |
526 | { | |
527 | unsigned long flags; | |
528 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); | |
529 | ||
530 | read_lock_irqsave(&data->lock, flags); | |
531 | ||
532 | if (is_sysmmu_active(data)) { | |
533 | int i; | |
534 | for (i = 0; i < data->nsfrs; i++) { | |
535 | if (sysmmu_block(data->sfrbases[i])) { | |
536 | __sysmmu_tlb_invalidate_entry( | |
537 | data->sfrbases[i], iova); | |
538 | sysmmu_unblock(data->sfrbases[i]); | |
539 | } | |
540 | } | |
541 | } else { | |
542 | dev_dbg(data->sysmmu, | |
543 | "(%s) Disabled. Skipping invalidating TLB.\n", | |
544 | data->dbgname); | |
545 | } | |
546 | ||
547 | read_unlock_irqrestore(&data->lock, flags); | |
548 | } | |
549 | ||
550 | void exynos_sysmmu_tlb_invalidate(struct device *dev) | |
551 | { | |
552 | unsigned long flags; | |
553 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); | |
554 | ||
555 | read_lock_irqsave(&data->lock, flags); | |
556 | ||
557 | if (is_sysmmu_active(data)) { | |
558 | int i; | |
559 | for (i = 0; i < data->nsfrs; i++) { | |
560 | if (sysmmu_block(data->sfrbases[i])) { | |
561 | __sysmmu_tlb_invalidate(data->sfrbases[i]); | |
562 | sysmmu_unblock(data->sfrbases[i]); | |
563 | } | |
564 | } | |
565 | } else { | |
566 | dev_dbg(data->sysmmu, | |
567 | "(%s) Disabled. Skipping invalidating TLB.\n", | |
568 | data->dbgname); | |
569 | } | |
570 | ||
571 | read_unlock_irqrestore(&data->lock, flags); | |
572 | } | |
573 | ||
574 | static int exynos_sysmmu_probe(struct platform_device *pdev) | |
575 | { | |
576 | int i, ret; | |
577 | struct device *dev; | |
578 | struct sysmmu_drvdata *data; | |
579 | ||
580 | dev = &pdev->dev; | |
581 | ||
582 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
583 | if (!data) { | |
584 | dev_dbg(dev, "Not enough memory\n"); | |
585 | ret = -ENOMEM; | |
586 | goto err_alloc; | |
587 | } | |
588 | ||
589 | ret = dev_set_drvdata(dev, data); | |
590 | if (ret) { | |
591 | dev_dbg(dev, "Unabled to initialize driver data\n"); | |
592 | goto err_init; | |
593 | } | |
594 | ||
595 | data->nsfrs = pdev->num_resources / 2; | |
596 | data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs, | |
597 | GFP_KERNEL); | |
598 | if (data->sfrbases == NULL) { | |
599 | dev_dbg(dev, "Not enough memory\n"); | |
600 | ret = -ENOMEM; | |
601 | goto err_init; | |
602 | } | |
603 | ||
604 | for (i = 0; i < data->nsfrs; i++) { | |
605 | struct resource *res; | |
606 | res = platform_get_resource(pdev, IORESOURCE_MEM, i); | |
607 | if (!res) { | |
608 | dev_dbg(dev, "Unable to find IOMEM region\n"); | |
609 | ret = -ENOENT; | |
610 | goto err_res; | |
611 | } | |
612 | ||
613 | data->sfrbases[i] = ioremap(res->start, resource_size(res)); | |
614 | if (!data->sfrbases[i]) { | |
615 | dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n", | |
616 | res->start); | |
617 | ret = -ENOENT; | |
618 | goto err_res; | |
619 | } | |
620 | } | |
621 | ||
622 | for (i = 0; i < data->nsfrs; i++) { | |
623 | ret = platform_get_irq(pdev, i); | |
624 | if (ret <= 0) { | |
625 | dev_dbg(dev, "Unable to find IRQ resource\n"); | |
626 | goto err_irq; | |
627 | } | |
628 | ||
629 | ret = request_irq(ret, exynos_sysmmu_irq, 0, | |
630 | dev_name(dev), data); | |
631 | if (ret) { | |
632 | dev_dbg(dev, "Unabled to register interrupt handler\n"); | |
633 | goto err_irq; | |
634 | } | |
635 | } | |
636 | ||
637 | if (dev_get_platdata(dev)) { | |
638 | char *deli, *beg; | |
639 | struct sysmmu_platform_data *platdata = dev_get_platdata(dev); | |
640 | ||
641 | beg = platdata->clockname; | |
642 | ||
643 | for (deli = beg; (*deli != '\0') && (*deli != ','); deli++) | |
644 | /* NOTHING */; | |
645 | ||
646 | if (*deli == '\0') | |
647 | deli = NULL; | |
648 | else | |
649 | *deli = '\0'; | |
650 | ||
651 | data->clk[0] = clk_get(dev, beg); | |
652 | if (IS_ERR(data->clk[0])) { | |
653 | data->clk[0] = NULL; | |
654 | dev_dbg(dev, "No clock descriptor registered\n"); | |
655 | } | |
656 | ||
657 | if (data->clk[0] && deli) { | |
658 | *deli = ','; | |
659 | data->clk[1] = clk_get(dev, deli + 1); | |
660 | if (IS_ERR(data->clk[1])) | |
661 | data->clk[1] = NULL; | |
662 | } | |
663 | ||
664 | data->dbgname = platdata->dbgname; | |
665 | } | |
666 | ||
667 | data->sysmmu = dev; | |
668 | rwlock_init(&data->lock); | |
669 | INIT_LIST_HEAD(&data->node); | |
670 | ||
671 | __set_fault_handler(data, &default_fault_handler); | |
672 | ||
673 | if (dev->parent) | |
674 | pm_runtime_enable(dev); | |
675 | ||
676 | dev_dbg(dev, "(%s) Initialized\n", data->dbgname); | |
677 | return 0; | |
678 | err_irq: | |
679 | while (i-- > 0) { | |
680 | int irq; | |
681 | ||
682 | irq = platform_get_irq(pdev, i); | |
683 | free_irq(irq, data); | |
684 | } | |
685 | err_res: | |
686 | while (data->nsfrs-- > 0) | |
687 | iounmap(data->sfrbases[data->nsfrs]); | |
688 | kfree(data->sfrbases); | |
689 | err_init: | |
690 | kfree(data); | |
691 | err_alloc: | |
692 | dev_err(dev, "Failed to initialize\n"); | |
693 | return ret; | |
694 | } | |
695 | ||
696 | static struct platform_driver exynos_sysmmu_driver = { | |
697 | .probe = exynos_sysmmu_probe, | |
698 | .driver = { | |
699 | .owner = THIS_MODULE, | |
700 | .name = "exynos-sysmmu", | |
701 | } | |
702 | }; | |
703 | ||
704 | static inline void pgtable_flush(void *vastart, void *vaend) | |
705 | { | |
706 | dmac_flush_range(vastart, vaend); | |
707 | outer_flush_range(virt_to_phys(vastart), | |
708 | virt_to_phys(vaend)); | |
709 | } | |
710 | ||
711 | static int exynos_iommu_domain_init(struct iommu_domain *domain) | |
712 | { | |
713 | struct exynos_iommu_domain *priv; | |
714 | ||
715 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
716 | if (!priv) | |
717 | return -ENOMEM; | |
718 | ||
719 | priv->pgtable = (unsigned long *)__get_free_pages( | |
720 | GFP_KERNEL | __GFP_ZERO, 2); | |
721 | if (!priv->pgtable) | |
722 | goto err_pgtable; | |
723 | ||
724 | priv->lv2entcnt = (short *)__get_free_pages( | |
725 | GFP_KERNEL | __GFP_ZERO, 1); | |
726 | if (!priv->lv2entcnt) | |
727 | goto err_counter; | |
728 | ||
729 | pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES); | |
730 | ||
731 | spin_lock_init(&priv->lock); | |
732 | spin_lock_init(&priv->pgtablelock); | |
733 | INIT_LIST_HEAD(&priv->clients); | |
734 | ||
735 | domain->priv = priv; | |
736 | return 0; | |
737 | ||
738 | err_counter: | |
739 | free_pages((unsigned long)priv->pgtable, 2); | |
740 | err_pgtable: | |
741 | kfree(priv); | |
742 | return -ENOMEM; | |
743 | } | |
744 | ||
745 | static void exynos_iommu_domain_destroy(struct iommu_domain *domain) | |
746 | { | |
747 | struct exynos_iommu_domain *priv = domain->priv; | |
748 | struct sysmmu_drvdata *data; | |
749 | unsigned long flags; | |
750 | int i; | |
751 | ||
752 | WARN_ON(!list_empty(&priv->clients)); | |
753 | ||
754 | spin_lock_irqsave(&priv->lock, flags); | |
755 | ||
756 | list_for_each_entry(data, &priv->clients, node) { | |
757 | while (!exynos_sysmmu_disable(data->dev)) | |
758 | ; /* until System MMU is actually disabled */ | |
759 | } | |
760 | ||
761 | spin_unlock_irqrestore(&priv->lock, flags); | |
762 | ||
763 | for (i = 0; i < NUM_LV1ENTRIES; i++) | |
764 | if (lv1ent_page(priv->pgtable + i)) | |
765 | kfree(__va(lv2table_base(priv->pgtable + i))); | |
766 | ||
767 | free_pages((unsigned long)priv->pgtable, 2); | |
768 | free_pages((unsigned long)priv->lv2entcnt, 1); | |
769 | kfree(domain->priv); | |
770 | domain->priv = NULL; | |
771 | } | |
772 | ||
773 | static int exynos_iommu_attach_device(struct iommu_domain *domain, | |
774 | struct device *dev) | |
775 | { | |
776 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); | |
777 | struct exynos_iommu_domain *priv = domain->priv; | |
778 | unsigned long flags; | |
779 | int ret; | |
780 | ||
781 | ret = pm_runtime_get_sync(data->sysmmu); | |
782 | if (ret < 0) | |
783 | return ret; | |
784 | ||
785 | ret = 0; | |
786 | ||
787 | spin_lock_irqsave(&priv->lock, flags); | |
788 | ||
789 | ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain); | |
790 | ||
791 | if (ret == 0) { | |
792 | /* 'data->node' must not be appeared in priv->clients */ | |
793 | BUG_ON(!list_empty(&data->node)); | |
794 | data->dev = dev; | |
795 | list_add_tail(&data->node, &priv->clients); | |
796 | } | |
797 | ||
798 | spin_unlock_irqrestore(&priv->lock, flags); | |
799 | ||
800 | if (ret < 0) { | |
801 | dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n", | |
802 | __func__, __pa(priv->pgtable)); | |
803 | pm_runtime_put(data->sysmmu); | |
804 | } else if (ret > 0) { | |
805 | dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n", | |
806 | __func__, __pa(priv->pgtable)); | |
807 | } else { | |
808 | dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n", | |
809 | __func__, __pa(priv->pgtable)); | |
810 | } | |
811 | ||
812 | return ret; | |
813 | } | |
814 | ||
815 | static void exynos_iommu_detach_device(struct iommu_domain *domain, | |
816 | struct device *dev) | |
817 | { | |
818 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); | |
819 | struct exynos_iommu_domain *priv = domain->priv; | |
820 | struct list_head *pos; | |
821 | unsigned long flags; | |
822 | bool found = false; | |
823 | ||
824 | spin_lock_irqsave(&priv->lock, flags); | |
825 | ||
826 | list_for_each(pos, &priv->clients) { | |
827 | if (list_entry(pos, struct sysmmu_drvdata, node) == data) { | |
828 | found = true; | |
829 | break; | |
830 | } | |
831 | } | |
832 | ||
833 | if (!found) | |
834 | goto finish; | |
835 | ||
836 | if (__exynos_sysmmu_disable(data)) { | |
837 | dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n", | |
838 | __func__, __pa(priv->pgtable)); | |
839 | list_del(&data->node); | |
840 | INIT_LIST_HEAD(&data->node); | |
841 | ||
842 | } else { | |
843 | dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed", | |
844 | __func__, __pa(priv->pgtable)); | |
845 | } | |
846 | ||
847 | finish: | |
848 | spin_unlock_irqrestore(&priv->lock, flags); | |
849 | ||
850 | if (found) | |
851 | pm_runtime_put(data->sysmmu); | |
852 | } | |
853 | ||
854 | static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova, | |
855 | short *pgcounter) | |
856 | { | |
857 | if (lv1ent_fault(sent)) { | |
858 | unsigned long *pent; | |
859 | ||
860 | pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC); | |
861 | BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1)); | |
862 | if (!pent) | |
863 | return NULL; | |
864 | ||
865 | *sent = mk_lv1ent_page(__pa(pent)); | |
866 | *pgcounter = NUM_LV2ENTRIES; | |
867 | pgtable_flush(pent, pent + NUM_LV2ENTRIES); | |
868 | pgtable_flush(sent, sent + 1); | |
869 | } | |
870 | ||
871 | return page_entry(sent, iova); | |
872 | } | |
873 | ||
874 | static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt) | |
875 | { | |
876 | if (lv1ent_section(sent)) | |
877 | return -EADDRINUSE; | |
878 | ||
879 | if (lv1ent_page(sent)) { | |
880 | if (*pgcnt != NUM_LV2ENTRIES) | |
881 | return -EADDRINUSE; | |
882 | ||
883 | kfree(page_entry(sent, 0)); | |
884 | ||
885 | *pgcnt = 0; | |
886 | } | |
887 | ||
888 | *sent = mk_lv1ent_sect(paddr); | |
889 | ||
890 | pgtable_flush(sent, sent + 1); | |
891 | ||
892 | return 0; | |
893 | } | |
894 | ||
895 | static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, | |
896 | short *pgcnt) | |
897 | { | |
898 | if (size == SPAGE_SIZE) { | |
899 | if (!lv2ent_fault(pent)) | |
900 | return -EADDRINUSE; | |
901 | ||
902 | *pent = mk_lv2ent_spage(paddr); | |
903 | pgtable_flush(pent, pent + 1); | |
904 | *pgcnt -= 1; | |
905 | } else { /* size == LPAGE_SIZE */ | |
906 | int i; | |
907 | for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { | |
908 | if (!lv2ent_fault(pent)) { | |
909 | memset(pent, 0, sizeof(*pent) * i); | |
910 | return -EADDRINUSE; | |
911 | } | |
912 | ||
913 | *pent = mk_lv2ent_lpage(paddr); | |
914 | } | |
915 | pgtable_flush(pent - SPAGES_PER_LPAGE, pent); | |
916 | *pgcnt -= SPAGES_PER_LPAGE; | |
917 | } | |
918 | ||
919 | return 0; | |
920 | } | |
921 | ||
922 | static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
923 | phys_addr_t paddr, size_t size, int prot) | |
924 | { | |
925 | struct exynos_iommu_domain *priv = domain->priv; | |
926 | unsigned long *entry; | |
927 | unsigned long flags; | |
928 | int ret = -ENOMEM; | |
929 | ||
930 | BUG_ON(priv->pgtable == NULL); | |
931 | ||
932 | spin_lock_irqsave(&priv->pgtablelock, flags); | |
933 | ||
934 | entry = section_entry(priv->pgtable, iova); | |
935 | ||
936 | if (size == SECT_SIZE) { | |
937 | ret = lv1set_section(entry, paddr, | |
938 | &priv->lv2entcnt[lv1ent_offset(iova)]); | |
939 | } else { | |
940 | unsigned long *pent; | |
941 | ||
942 | pent = alloc_lv2entry(entry, iova, | |
943 | &priv->lv2entcnt[lv1ent_offset(iova)]); | |
944 | ||
945 | if (!pent) | |
946 | ret = -ENOMEM; | |
947 | else | |
948 | ret = lv2set_page(pent, paddr, size, | |
949 | &priv->lv2entcnt[lv1ent_offset(iova)]); | |
950 | } | |
951 | ||
952 | if (ret) { | |
953 | pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n", | |
954 | __func__, iova, size); | |
955 | } | |
956 | ||
957 | spin_unlock_irqrestore(&priv->pgtablelock, flags); | |
958 | ||
959 | return ret; | |
960 | } | |
961 | ||
962 | static size_t exynos_iommu_unmap(struct iommu_domain *domain, | |
963 | unsigned long iova, size_t size) | |
964 | { | |
965 | struct exynos_iommu_domain *priv = domain->priv; | |
966 | struct sysmmu_drvdata *data; | |
967 | unsigned long flags; | |
968 | unsigned long *ent; | |
969 | ||
970 | BUG_ON(priv->pgtable == NULL); | |
971 | ||
972 | spin_lock_irqsave(&priv->pgtablelock, flags); | |
973 | ||
974 | ent = section_entry(priv->pgtable, iova); | |
975 | ||
976 | if (lv1ent_section(ent)) { | |
977 | BUG_ON(size < SECT_SIZE); | |
978 | ||
979 | *ent = 0; | |
980 | pgtable_flush(ent, ent + 1); | |
981 | size = SECT_SIZE; | |
982 | goto done; | |
983 | } | |
984 | ||
985 | if (unlikely(lv1ent_fault(ent))) { | |
986 | if (size > SECT_SIZE) | |
987 | size = SECT_SIZE; | |
988 | goto done; | |
989 | } | |
990 | ||
991 | /* lv1ent_page(sent) == true here */ | |
992 | ||
993 | ent = page_entry(ent, iova); | |
994 | ||
995 | if (unlikely(lv2ent_fault(ent))) { | |
996 | size = SPAGE_SIZE; | |
997 | goto done; | |
998 | } | |
999 | ||
1000 | if (lv2ent_small(ent)) { | |
1001 | *ent = 0; | |
1002 | size = SPAGE_SIZE; | |
1003 | priv->lv2entcnt[lv1ent_offset(iova)] += 1; | |
1004 | goto done; | |
1005 | } | |
1006 | ||
1007 | /* lv1ent_large(ent) == true here */ | |
1008 | BUG_ON(size < LPAGE_SIZE); | |
1009 | ||
1010 | memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); | |
1011 | ||
1012 | size = LPAGE_SIZE; | |
1013 | priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; | |
1014 | done: | |
1015 | spin_unlock_irqrestore(&priv->pgtablelock, flags); | |
1016 | ||
1017 | spin_lock_irqsave(&priv->lock, flags); | |
1018 | list_for_each_entry(data, &priv->clients, node) | |
1019 | sysmmu_tlb_invalidate_entry(data->dev, iova); | |
1020 | spin_unlock_irqrestore(&priv->lock, flags); | |
1021 | ||
1022 | ||
1023 | return size; | |
1024 | } | |
1025 | ||
1026 | static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, | |
1027 | unsigned long iova) | |
1028 | { | |
1029 | struct exynos_iommu_domain *priv = domain->priv; | |
1030 | unsigned long *entry; | |
1031 | unsigned long flags; | |
1032 | phys_addr_t phys = 0; | |
1033 | ||
1034 | spin_lock_irqsave(&priv->pgtablelock, flags); | |
1035 | ||
1036 | entry = section_entry(priv->pgtable, iova); | |
1037 | ||
1038 | if (lv1ent_section(entry)) { | |
1039 | phys = section_phys(entry) + section_offs(iova); | |
1040 | } else if (lv1ent_page(entry)) { | |
1041 | entry = page_entry(entry, iova); | |
1042 | ||
1043 | if (lv2ent_large(entry)) | |
1044 | phys = lpage_phys(entry) + lpage_offs(iova); | |
1045 | else if (lv2ent_small(entry)) | |
1046 | phys = spage_phys(entry) + spage_offs(iova); | |
1047 | } | |
1048 | ||
1049 | spin_unlock_irqrestore(&priv->pgtablelock, flags); | |
1050 | ||
1051 | return phys; | |
1052 | } | |
1053 | ||
1054 | static struct iommu_ops exynos_iommu_ops = { | |
1055 | .domain_init = &exynos_iommu_domain_init, | |
1056 | .domain_destroy = &exynos_iommu_domain_destroy, | |
1057 | .attach_dev = &exynos_iommu_attach_device, | |
1058 | .detach_dev = &exynos_iommu_detach_device, | |
1059 | .map = &exynos_iommu_map, | |
1060 | .unmap = &exynos_iommu_unmap, | |
1061 | .iova_to_phys = &exynos_iommu_iova_to_phys, | |
1062 | .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, | |
1063 | }; | |
1064 | ||
1065 | static int __init exynos_iommu_init(void) | |
1066 | { | |
1067 | int ret; | |
1068 | ||
1069 | ret = platform_driver_register(&exynos_sysmmu_driver); | |
1070 | ||
1071 | if (ret == 0) | |
1072 | bus_set_iommu(&platform_bus_type, &exynos_iommu_ops); | |
1073 | ||
1074 | return ret; | |
1075 | } | |
1076 | subsys_initcall(exynos_iommu_init); |