Commit | Line | Data |
---|---|---|
740a01ee MS |
1 | /* |
2 | * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd. | |
2a96536e KC |
3 | * http://www.samsung.com |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | */ | |
9 | ||
10 | #ifdef CONFIG_EXYNOS_IOMMU_DEBUG | |
11 | #define DEBUG | |
12 | #endif | |
13 | ||
2a96536e | 14 | #include <linux/clk.h> |
8ed55c81 | 15 | #include <linux/dma-mapping.h> |
2a96536e | 16 | #include <linux/err.h> |
312900c6 | 17 | #include <linux/io.h> |
2a96536e | 18 | #include <linux/iommu.h> |
312900c6 | 19 | #include <linux/interrupt.h> |
2a96536e | 20 | #include <linux/list.h> |
8ed55c81 MS |
21 | #include <linux/of.h> |
22 | #include <linux/of_iommu.h> | |
23 | #include <linux/of_platform.h> | |
312900c6 MS |
24 | #include <linux/platform_device.h> |
25 | #include <linux/pm_runtime.h> | |
26 | #include <linux/slab.h> | |
58c6f6a3 | 27 | #include <linux/dma-iommu.h> |
2a96536e | 28 | |
d09d78fc CK |
29 | typedef u32 sysmmu_iova_t; |
30 | typedef u32 sysmmu_pte_t; | |
31 | ||
f171abab | 32 | /* We do not consider super section mapping (16MB) */ |
2a96536e KC |
33 | #define SECT_ORDER 20 |
34 | #define LPAGE_ORDER 16 | |
35 | #define SPAGE_ORDER 12 | |
36 | ||
37 | #define SECT_SIZE (1 << SECT_ORDER) | |
38 | #define LPAGE_SIZE (1 << LPAGE_ORDER) | |
39 | #define SPAGE_SIZE (1 << SPAGE_ORDER) | |
40 | ||
41 | #define SECT_MASK (~(SECT_SIZE - 1)) | |
42 | #define LPAGE_MASK (~(LPAGE_SIZE - 1)) | |
43 | #define SPAGE_MASK (~(SPAGE_SIZE - 1)) | |
44 | ||
66a7ed84 CK |
45 | #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \ |
46 | ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) | |
47 | #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK) | |
48 | #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1) | |
49 | #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \ | |
50 | ((*(sent) & 3) == 1)) | |
2a96536e KC |
51 | #define lv1ent_section(sent) ((*(sent) & 3) == 2) |
52 | ||
53 | #define lv2ent_fault(pent) ((*(pent) & 3) == 0) | |
54 | #define lv2ent_small(pent) ((*(pent) & 2) == 2) | |
55 | #define lv2ent_large(pent) ((*(pent) & 3) == 1) | |
56 | ||
6ae5343c BD |
57 | #ifdef CONFIG_BIG_ENDIAN |
58 | #warning "revisit driver if we can enable big-endian ptes" | |
59 | #endif | |
60 | ||
740a01ee MS |
61 | /* |
62 | * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces | |
63 | * v5.0 introduced support for 36bit physical address space by shifting | |
64 | * all page entry values by 4 bits. | |
65 | * All SYSMMU controllers in the system support the address spaces of the same | |
66 | * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper | |
67 | * value (0 or 4). | |
68 | */ | |
69 | static short PG_ENT_SHIFT = -1; | |
70 | #define SYSMMU_PG_ENT_SHIFT 0 | |
71 | #define SYSMMU_V5_PG_ENT_SHIFT 4 | |
72 | ||
1a0d8dac MS |
73 | static const sysmmu_pte_t *LV1_PROT; |
74 | static const sysmmu_pte_t SYSMMU_LV1_PROT[] = { | |
75 | ((0 << 15) | (0 << 10)), /* no access */ | |
76 | ((1 << 15) | (1 << 10)), /* IOMMU_READ only */ | |
77 | ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */ | |
78 | ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */ | |
79 | }; | |
80 | static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = { | |
81 | (0 << 4), /* no access */ | |
82 | (1 << 4), /* IOMMU_READ only */ | |
83 | (2 << 4), /* IOMMU_WRITE only */ | |
84 | (3 << 4), /* IOMMU_READ | IOMMU_WRITE */ | |
85 | }; | |
86 | ||
87 | static const sysmmu_pte_t *LV2_PROT; | |
88 | static const sysmmu_pte_t SYSMMU_LV2_PROT[] = { | |
89 | ((0 << 9) | (0 << 4)), /* no access */ | |
90 | ((1 << 9) | (1 << 4)), /* IOMMU_READ only */ | |
91 | ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */ | |
92 | ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */ | |
93 | }; | |
94 | static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = { | |
95 | (0 << 2), /* no access */ | |
96 | (1 << 2), /* IOMMU_READ only */ | |
97 | (2 << 2), /* IOMMU_WRITE only */ | |
98 | (3 << 2), /* IOMMU_READ | IOMMU_WRITE */ | |
99 | }; | |
100 | ||
101 | #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE) | |
102 | ||
740a01ee MS |
103 | #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT) |
104 | #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK) | |
105 | #define section_offs(iova) (iova & (SECT_SIZE - 1)) | |
106 | #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK) | |
107 | #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) | |
108 | #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK) | |
109 | #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) | |
2a96536e KC |
110 | |
111 | #define NUM_LV1ENTRIES 4096 | |
d09d78fc | 112 | #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE) |
2a96536e | 113 | |
d09d78fc CK |
114 | static u32 lv1ent_offset(sysmmu_iova_t iova) |
115 | { | |
116 | return iova >> SECT_ORDER; | |
117 | } | |
118 | ||
119 | static u32 lv2ent_offset(sysmmu_iova_t iova) | |
120 | { | |
121 | return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); | |
122 | } | |
123 | ||
5e3435eb | 124 | #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t)) |
d09d78fc | 125 | #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) |
2a96536e KC |
126 | |
127 | #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) | |
740a01ee | 128 | #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0)) |
2a96536e | 129 | |
1a0d8dac | 130 | #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2) |
740a01ee | 131 | #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1) |
1a0d8dac MS |
132 | #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1) |
133 | #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2) | |
2a96536e KC |
134 | |
135 | #define CTRL_ENABLE 0x5 | |
136 | #define CTRL_BLOCK 0x7 | |
137 | #define CTRL_DISABLE 0x0 | |
138 | ||
eeb5184b | 139 | #define CFG_LRU 0x1 |
1a0d8dac | 140 | #define CFG_EAP (1 << 2) |
eeb5184b | 141 | #define CFG_QOS(n) ((n & 0xF) << 7) |
eeb5184b CK |
142 | #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */ |
143 | #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */ | |
144 | #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */ | |
145 | ||
740a01ee | 146 | /* common registers */ |
2a96536e KC |
147 | #define REG_MMU_CTRL 0x000 |
148 | #define REG_MMU_CFG 0x004 | |
149 | #define REG_MMU_STATUS 0x008 | |
740a01ee MS |
150 | #define REG_MMU_VERSION 0x034 |
151 | ||
152 | #define MMU_MAJ_VER(val) ((val) >> 7) | |
153 | #define MMU_MIN_VER(val) ((val) & 0x7F) | |
154 | #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */ | |
155 | ||
156 | #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F)) | |
157 | ||
158 | /* v1.x - v3.x registers */ | |
2a96536e KC |
159 | #define REG_MMU_FLUSH 0x00C |
160 | #define REG_MMU_FLUSH_ENTRY 0x010 | |
161 | #define REG_PT_BASE_ADDR 0x014 | |
162 | #define REG_INT_STATUS 0x018 | |
163 | #define REG_INT_CLEAR 0x01C | |
164 | ||
165 | #define REG_PAGE_FAULT_ADDR 0x024 | |
166 | #define REG_AW_FAULT_ADDR 0x028 | |
167 | #define REG_AR_FAULT_ADDR 0x02C | |
168 | #define REG_DEFAULT_SLAVE_ADDR 0x030 | |
169 | ||
740a01ee MS |
170 | /* v5.x registers */ |
171 | #define REG_V5_PT_BASE_PFN 0x00C | |
172 | #define REG_V5_MMU_FLUSH_ALL 0x010 | |
173 | #define REG_V5_MMU_FLUSH_ENTRY 0x014 | |
174 | #define REG_V5_INT_STATUS 0x060 | |
175 | #define REG_V5_INT_CLEAR 0x064 | |
176 | #define REG_V5_FAULT_AR_VA 0x070 | |
177 | #define REG_V5_FAULT_AW_VA 0x080 | |
2a96536e | 178 | |
6b21a5db CK |
179 | #define has_sysmmu(dev) (dev->archdata.iommu != NULL) |
180 | ||
5e3435eb | 181 | static struct device *dma_dev; |
734c3c73 | 182 | static struct kmem_cache *lv2table_kmem_cache; |
66a7ed84 CK |
183 | static sysmmu_pte_t *zero_lv2_table; |
184 | #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table)) | |
734c3c73 | 185 | |
d09d78fc | 186 | static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) |
2a96536e KC |
187 | { |
188 | return pgtable + lv1ent_offset(iova); | |
189 | } | |
190 | ||
d09d78fc | 191 | static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) |
2a96536e | 192 | { |
d09d78fc | 193 | return (sysmmu_pte_t *)phys_to_virt( |
7222e8db | 194 | lv2table_base(sent)) + lv2ent_offset(iova); |
2a96536e KC |
195 | } |
196 | ||
d093fc7e MS |
197 | /* |
198 | * IOMMU fault information register | |
199 | */ | |
200 | struct sysmmu_fault_info { | |
201 | unsigned int bit; /* bit number in STATUS register */ | |
202 | unsigned short addr_reg; /* register to read VA fault address */ | |
203 | const char *name; /* human readable fault name */ | |
204 | unsigned int type; /* fault type for report_iommu_fault */ | |
2a96536e KC |
205 | }; |
206 | ||
d093fc7e MS |
207 | static const struct sysmmu_fault_info sysmmu_faults[] = { |
208 | { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ }, | |
209 | { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ }, | |
210 | { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE }, | |
211 | { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ }, | |
212 | { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ }, | |
213 | { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ }, | |
214 | { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE }, | |
215 | { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE }, | |
2a96536e KC |
216 | }; |
217 | ||
740a01ee MS |
218 | static const struct sysmmu_fault_info sysmmu_v5_faults[] = { |
219 | { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ }, | |
220 | { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ }, | |
221 | { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ }, | |
222 | { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ }, | |
223 | { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ }, | |
224 | { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE }, | |
225 | { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE }, | |
226 | { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE }, | |
227 | { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE }, | |
228 | { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE }, | |
229 | }; | |
230 | ||
2860af3c MS |
231 | /* |
232 | * This structure is attached to dev.archdata.iommu of the master device | |
233 | * on device add, contains a list of SYSMMU controllers defined by device tree, | |
234 | * which are bound to given master device. It is usually referenced by 'owner' | |
235 | * pointer. | |
236 | */ | |
6b21a5db | 237 | struct exynos_iommu_owner { |
1b092054 | 238 | struct list_head controllers; /* list of sysmmu_drvdata.owner_node */ |
5fa61cbf | 239 | struct iommu_domain *domain; /* domain this device is attached */ |
9b265536 | 240 | struct mutex rpm_lock; /* for runtime pm of all sysmmus */ |
6b21a5db CK |
241 | }; |
242 | ||
2860af3c MS |
243 | /* |
244 | * This structure exynos specific generalization of struct iommu_domain. | |
245 | * It contains list of SYSMMU controllers from all master devices, which has | |
246 | * been attached to this domain and page tables of IO address space defined by | |
247 | * it. It is usually referenced by 'domain' pointer. | |
248 | */ | |
2a96536e | 249 | struct exynos_iommu_domain { |
2860af3c MS |
250 | struct list_head clients; /* list of sysmmu_drvdata.domain_node */ |
251 | sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */ | |
252 | short *lv2entcnt; /* free lv2 entry counter for each section */ | |
253 | spinlock_t lock; /* lock for modyfying list of clients */ | |
254 | spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ | |
e1fd1eaa | 255 | struct iommu_domain domain; /* generic domain data structure */ |
2a96536e KC |
256 | }; |
257 | ||
2860af3c MS |
258 | /* |
259 | * This structure hold all data of a single SYSMMU controller, this includes | |
260 | * hw resources like registers and clocks, pointers and list nodes to connect | |
261 | * it to all other structures, internal state and parameters read from device | |
262 | * tree. It is usually referenced by 'data' pointer. | |
263 | */ | |
2a96536e | 264 | struct sysmmu_drvdata { |
2860af3c MS |
265 | struct device *sysmmu; /* SYSMMU controller device */ |
266 | struct device *master; /* master device (owner) */ | |
267 | void __iomem *sfrbase; /* our registers */ | |
268 | struct clk *clk; /* SYSMMU's clock */ | |
740a01ee MS |
269 | struct clk *aclk; /* SYSMMU's aclk clock */ |
270 | struct clk *pclk; /* SYSMMU's pclk clock */ | |
2860af3c | 271 | struct clk *clk_master; /* master's device clock */ |
2860af3c | 272 | spinlock_t lock; /* lock for modyfying state */ |
47a574ff | 273 | bool active; /* current status */ |
2860af3c MS |
274 | struct exynos_iommu_domain *domain; /* domain we belong to */ |
275 | struct list_head domain_node; /* node for domain clients list */ | |
1b092054 | 276 | struct list_head owner_node; /* node for owner controllers list */ |
2860af3c MS |
277 | phys_addr_t pgtable; /* assigned page table structure */ |
278 | unsigned int version; /* our version */ | |
d2c302b6 JR |
279 | |
280 | struct iommu_device iommu; /* IOMMU core handle */ | |
2a96536e KC |
281 | }; |
282 | ||
e1fd1eaa JR |
283 | static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) |
284 | { | |
285 | return container_of(dom, struct exynos_iommu_domain, domain); | |
286 | } | |
287 | ||
02cdc365 | 288 | static void sysmmu_unblock(struct sysmmu_drvdata *data) |
2a96536e | 289 | { |
84bd0428 | 290 | writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); |
2a96536e KC |
291 | } |
292 | ||
02cdc365 | 293 | static bool sysmmu_block(struct sysmmu_drvdata *data) |
2a96536e KC |
294 | { |
295 | int i = 120; | |
296 | ||
84bd0428 MS |
297 | writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); |
298 | while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1)) | |
2a96536e KC |
299 | --i; |
300 | ||
84bd0428 | 301 | if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) { |
02cdc365 | 302 | sysmmu_unblock(data); |
2a96536e KC |
303 | return false; |
304 | } | |
305 | ||
306 | return true; | |
307 | } | |
308 | ||
02cdc365 | 309 | static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data) |
2a96536e | 310 | { |
740a01ee | 311 | if (MMU_MAJ_VER(data->version) < 5) |
84bd0428 | 312 | writel(0x1, data->sfrbase + REG_MMU_FLUSH); |
740a01ee | 313 | else |
84bd0428 | 314 | writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL); |
2a96536e KC |
315 | } |
316 | ||
02cdc365 | 317 | static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, |
d09d78fc | 318 | sysmmu_iova_t iova, unsigned int num_inv) |
2a96536e | 319 | { |
3ad6b7f3 | 320 | unsigned int i; |
365409db | 321 | |
3ad6b7f3 | 322 | for (i = 0; i < num_inv; i++) { |
740a01ee | 323 | if (MMU_MAJ_VER(data->version) < 5) |
84bd0428 | 324 | writel((iova & SPAGE_MASK) | 1, |
740a01ee MS |
325 | data->sfrbase + REG_MMU_FLUSH_ENTRY); |
326 | else | |
84bd0428 | 327 | writel((iova & SPAGE_MASK) | 1, |
740a01ee | 328 | data->sfrbase + REG_V5_MMU_FLUSH_ENTRY); |
3ad6b7f3 CK |
329 | iova += SPAGE_SIZE; |
330 | } | |
2a96536e KC |
331 | } |
332 | ||
02cdc365 | 333 | static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd) |
2a96536e | 334 | { |
740a01ee | 335 | if (MMU_MAJ_VER(data->version) < 5) |
84bd0428 | 336 | writel(pgd, data->sfrbase + REG_PT_BASE_ADDR); |
740a01ee | 337 | else |
84bd0428 | 338 | writel(pgd >> PAGE_SHIFT, |
740a01ee | 339 | data->sfrbase + REG_V5_PT_BASE_PFN); |
2a96536e | 340 | |
02cdc365 | 341 | __sysmmu_tlb_invalidate(data); |
2a96536e KC |
342 | } |
343 | ||
fecc49db MS |
344 | static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data) |
345 | { | |
346 | BUG_ON(clk_prepare_enable(data->clk_master)); | |
347 | BUG_ON(clk_prepare_enable(data->clk)); | |
348 | BUG_ON(clk_prepare_enable(data->pclk)); | |
349 | BUG_ON(clk_prepare_enable(data->aclk)); | |
350 | } | |
351 | ||
352 | static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data) | |
353 | { | |
354 | clk_disable_unprepare(data->aclk); | |
355 | clk_disable_unprepare(data->pclk); | |
356 | clk_disable_unprepare(data->clk); | |
357 | clk_disable_unprepare(data->clk_master); | |
358 | } | |
359 | ||
850d313e MS |
360 | static void __sysmmu_get_version(struct sysmmu_drvdata *data) |
361 | { | |
362 | u32 ver; | |
363 | ||
fecc49db | 364 | __sysmmu_enable_clocks(data); |
850d313e | 365 | |
84bd0428 | 366 | ver = readl(data->sfrbase + REG_MMU_VERSION); |
850d313e MS |
367 | |
368 | /* controllers on some SoCs don't report proper version */ | |
369 | if (ver == 0x80000001u) | |
370 | data->version = MAKE_MMU_VER(1, 0); | |
371 | else | |
372 | data->version = MMU_RAW_VER(ver); | |
373 | ||
374 | dev_dbg(data->sysmmu, "hardware version: %d.%d\n", | |
375 | MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); | |
376 | ||
fecc49db | 377 | __sysmmu_disable_clocks(data); |
850d313e MS |
378 | } |
379 | ||
d093fc7e MS |
380 | static void show_fault_information(struct sysmmu_drvdata *data, |
381 | const struct sysmmu_fault_info *finfo, | |
382 | sysmmu_iova_t fault_addr) | |
2a96536e | 383 | { |
d09d78fc | 384 | sysmmu_pte_t *ent; |
2a96536e | 385 | |
ec5d241b MS |
386 | dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n", |
387 | dev_name(data->master), finfo->name, fault_addr); | |
388 | dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable); | |
d093fc7e | 389 | ent = section_entry(phys_to_virt(data->pgtable), fault_addr); |
ec5d241b | 390 | dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); |
2a96536e KC |
391 | if (lv1ent_page(ent)) { |
392 | ent = page_entry(ent, fault_addr); | |
ec5d241b | 393 | dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); |
2a96536e | 394 | } |
2a96536e KC |
395 | } |
396 | ||
397 | static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) | |
398 | { | |
f171abab | 399 | /* SYSMMU is in blocked state when interrupt occurred. */ |
2a96536e | 400 | struct sysmmu_drvdata *data = dev_id; |
740a01ee MS |
401 | const struct sysmmu_fault_info *finfo; |
402 | unsigned int i, n, itype; | |
d093fc7e | 403 | sysmmu_iova_t fault_addr = -1; |
740a01ee | 404 | unsigned short reg_status, reg_clear; |
7222e8db | 405 | int ret = -ENOSYS; |
2a96536e | 406 | |
47a574ff | 407 | WARN_ON(!data->active); |
2a96536e | 408 | |
740a01ee MS |
409 | if (MMU_MAJ_VER(data->version) < 5) { |
410 | reg_status = REG_INT_STATUS; | |
411 | reg_clear = REG_INT_CLEAR; | |
412 | finfo = sysmmu_faults; | |
413 | n = ARRAY_SIZE(sysmmu_faults); | |
414 | } else { | |
415 | reg_status = REG_V5_INT_STATUS; | |
416 | reg_clear = REG_V5_INT_CLEAR; | |
417 | finfo = sysmmu_v5_faults; | |
418 | n = ARRAY_SIZE(sysmmu_v5_faults); | |
419 | } | |
420 | ||
9d4e7a24 CK |
421 | spin_lock(&data->lock); |
422 | ||
b398af21 | 423 | clk_enable(data->clk_master); |
9d4e7a24 | 424 | |
84bd0428 | 425 | itype = __ffs(readl(data->sfrbase + reg_status)); |
d093fc7e MS |
426 | for (i = 0; i < n; i++, finfo++) |
427 | if (finfo->bit == itype) | |
428 | break; | |
429 | /* unknown/unsupported fault */ | |
430 | BUG_ON(i == n); | |
431 | ||
432 | /* print debug message */ | |
84bd0428 | 433 | fault_addr = readl(data->sfrbase + finfo->addr_reg); |
d093fc7e | 434 | show_fault_information(data, finfo, fault_addr); |
2a96536e | 435 | |
d093fc7e MS |
436 | if (data->domain) |
437 | ret = report_iommu_fault(&data->domain->domain, | |
438 | data->master, fault_addr, finfo->type); | |
1fab7fa7 CK |
439 | /* fault is not recovered by fault handler */ |
440 | BUG_ON(ret != 0); | |
2a96536e | 441 | |
84bd0428 | 442 | writel(1 << itype, data->sfrbase + reg_clear); |
1fab7fa7 | 443 | |
02cdc365 | 444 | sysmmu_unblock(data); |
2a96536e | 445 | |
b398af21 | 446 | clk_disable(data->clk_master); |
70605870 | 447 | |
9d4e7a24 | 448 | spin_unlock(&data->lock); |
2a96536e KC |
449 | |
450 | return IRQ_HANDLED; | |
451 | } | |
452 | ||
47a574ff | 453 | static void __sysmmu_disable(struct sysmmu_drvdata *data) |
2a96536e | 454 | { |
47a574ff MS |
455 | unsigned long flags; |
456 | ||
b398af21 | 457 | clk_enable(data->clk_master); |
70605870 | 458 | |
47a574ff | 459 | spin_lock_irqsave(&data->lock, flags); |
84bd0428 MS |
460 | writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); |
461 | writel(0, data->sfrbase + REG_MMU_CFG); | |
47a574ff | 462 | data->active = false; |
6b21a5db CK |
463 | spin_unlock_irqrestore(&data->lock, flags); |
464 | ||
47a574ff | 465 | __sysmmu_disable_clocks(data); |
6b21a5db | 466 | } |
2a96536e | 467 | |
6b21a5db CK |
468 | static void __sysmmu_init_config(struct sysmmu_drvdata *data) |
469 | { | |
83addecd MS |
470 | unsigned int cfg; |
471 | ||
83addecd MS |
472 | if (data->version <= MAKE_MMU_VER(3, 1)) |
473 | cfg = CFG_LRU | CFG_QOS(15); | |
474 | else if (data->version <= MAKE_MMU_VER(3, 2)) | |
475 | cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL; | |
476 | else | |
477 | cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN; | |
6b21a5db | 478 | |
1a0d8dac MS |
479 | cfg |= CFG_EAP; /* enable access protection bits check */ |
480 | ||
84bd0428 | 481 | writel(cfg, data->sfrbase + REG_MMU_CFG); |
6b21a5db CK |
482 | } |
483 | ||
47a574ff | 484 | static void __sysmmu_enable(struct sysmmu_drvdata *data) |
6b21a5db | 485 | { |
47a574ff MS |
486 | unsigned long flags; |
487 | ||
fecc49db | 488 | __sysmmu_enable_clocks(data); |
70605870 | 489 | |
47a574ff | 490 | spin_lock_irqsave(&data->lock, flags); |
84bd0428 | 491 | writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); |
6b21a5db | 492 | __sysmmu_init_config(data); |
02cdc365 | 493 | __sysmmu_set_ptbase(data, data->pgtable); |
84bd0428 | 494 | writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); |
47a574ff MS |
495 | data->active = true; |
496 | spin_unlock_irqrestore(&data->lock, flags); | |
7222e8db | 497 | |
fecc49db MS |
498 | /* |
499 | * SYSMMU driver keeps master's clock enabled only for the short | |
500 | * time, while accessing the registers. For performing address | |
501 | * translation during DMA transaction it relies on the client | |
502 | * driver to enable it. | |
503 | */ | |
b398af21 | 504 | clk_disable(data->clk_master); |
6b21a5db | 505 | } |
70605870 | 506 | |
469acebe | 507 | static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, |
66a7ed84 CK |
508 | sysmmu_iova_t iova) |
509 | { | |
510 | unsigned long flags; | |
66a7ed84 | 511 | |
66a7ed84 | 512 | spin_lock_irqsave(&data->lock, flags); |
47a574ff | 513 | if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { |
01324ab2 | 514 | clk_enable(data->clk_master); |
7d2aa6b8 | 515 | if (sysmmu_block(data)) { |
cd37a296 MS |
516 | if (data->version >= MAKE_MMU_VER(5, 0)) |
517 | __sysmmu_tlb_invalidate(data); | |
518 | else | |
519 | __sysmmu_tlb_invalidate_entry(data, iova, 1); | |
7d2aa6b8 MS |
520 | sysmmu_unblock(data); |
521 | } | |
01324ab2 | 522 | clk_disable(data->clk_master); |
d631ea98 | 523 | } |
66a7ed84 | 524 | spin_unlock_irqrestore(&data->lock, flags); |
66a7ed84 CK |
525 | } |
526 | ||
469acebe MS |
527 | static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, |
528 | sysmmu_iova_t iova, size_t size) | |
2a96536e KC |
529 | { |
530 | unsigned long flags; | |
2a96536e | 531 | |
6b21a5db | 532 | spin_lock_irqsave(&data->lock, flags); |
47a574ff | 533 | if (data->active) { |
3ad6b7f3 | 534 | unsigned int num_inv = 1; |
70605870 | 535 | |
b398af21 | 536 | clk_enable(data->clk_master); |
70605870 | 537 | |
3ad6b7f3 CK |
538 | /* |
539 | * L2TLB invalidation required | |
540 | * 4KB page: 1 invalidation | |
f171abab SK |
541 | * 64KB page: 16 invalidations |
542 | * 1MB page: 64 invalidations | |
3ad6b7f3 CK |
543 | * because it is set-associative TLB |
544 | * with 8-way and 64 sets. | |
545 | * 1MB page can be cached in one of all sets. | |
546 | * 64KB page can be one of 16 consecutive sets. | |
547 | */ | |
512bd0c6 | 548 | if (MMU_MAJ_VER(data->version) == 2) |
3ad6b7f3 CK |
549 | num_inv = min_t(unsigned int, size / PAGE_SIZE, 64); |
550 | ||
02cdc365 MS |
551 | if (sysmmu_block(data)) { |
552 | __sysmmu_tlb_invalidate_entry(data, iova, num_inv); | |
553 | sysmmu_unblock(data); | |
2a96536e | 554 | } |
b398af21 | 555 | clk_disable(data->clk_master); |
2a96536e | 556 | } |
9d4e7a24 | 557 | spin_unlock_irqrestore(&data->lock, flags); |
2a96536e KC |
558 | } |
559 | ||
96f66557 MS |
560 | static struct iommu_ops exynos_iommu_ops; |
561 | ||
6b21a5db | 562 | static int __init exynos_sysmmu_probe(struct platform_device *pdev) |
2a96536e | 563 | { |
46c16d1e | 564 | int irq, ret; |
7222e8db | 565 | struct device *dev = &pdev->dev; |
2a96536e | 566 | struct sysmmu_drvdata *data; |
7222e8db | 567 | struct resource *res; |
2a96536e | 568 | |
46c16d1e CK |
569 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); |
570 | if (!data) | |
571 | return -ENOMEM; | |
2a96536e | 572 | |
7222e8db | 573 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
46c16d1e CK |
574 | data->sfrbase = devm_ioremap_resource(dev, res); |
575 | if (IS_ERR(data->sfrbase)) | |
576 | return PTR_ERR(data->sfrbase); | |
2a96536e | 577 | |
46c16d1e CK |
578 | irq = platform_get_irq(pdev, 0); |
579 | if (irq <= 0) { | |
0bf4e54d | 580 | dev_err(dev, "Unable to find IRQ resource\n"); |
46c16d1e | 581 | return irq; |
2a96536e KC |
582 | } |
583 | ||
46c16d1e | 584 | ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, |
7222e8db CK |
585 | dev_name(dev), data); |
586 | if (ret) { | |
46c16d1e CK |
587 | dev_err(dev, "Unabled to register handler of irq %d\n", irq); |
588 | return ret; | |
2a96536e KC |
589 | } |
590 | ||
46c16d1e | 591 | data->clk = devm_clk_get(dev, "sysmmu"); |
0c2b063f | 592 | if (PTR_ERR(data->clk) == -ENOENT) |
740a01ee | 593 | data->clk = NULL; |
0c2b063f MS |
594 | else if (IS_ERR(data->clk)) |
595 | return PTR_ERR(data->clk); | |
740a01ee MS |
596 | |
597 | data->aclk = devm_clk_get(dev, "aclk"); | |
0c2b063f | 598 | if (PTR_ERR(data->aclk) == -ENOENT) |
740a01ee | 599 | data->aclk = NULL; |
0c2b063f MS |
600 | else if (IS_ERR(data->aclk)) |
601 | return PTR_ERR(data->aclk); | |
740a01ee MS |
602 | |
603 | data->pclk = devm_clk_get(dev, "pclk"); | |
0c2b063f | 604 | if (PTR_ERR(data->pclk) == -ENOENT) |
740a01ee | 605 | data->pclk = NULL; |
0c2b063f MS |
606 | else if (IS_ERR(data->pclk)) |
607 | return PTR_ERR(data->pclk); | |
740a01ee MS |
608 | |
609 | if (!data->clk && (!data->aclk || !data->pclk)) { | |
610 | dev_err(dev, "Failed to get device clock(s)!\n"); | |
611 | return -ENOSYS; | |
2a96536e KC |
612 | } |
613 | ||
70605870 | 614 | data->clk_master = devm_clk_get(dev, "master"); |
0c2b063f | 615 | if (PTR_ERR(data->clk_master) == -ENOENT) |
b398af21 | 616 | data->clk_master = NULL; |
0c2b063f MS |
617 | else if (IS_ERR(data->clk_master)) |
618 | return PTR_ERR(data->clk_master); | |
70605870 | 619 | |
2a96536e | 620 | data->sysmmu = dev; |
9d4e7a24 | 621 | spin_lock_init(&data->lock); |
2a96536e | 622 | |
d2c302b6 JR |
623 | ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, |
624 | dev_name(data->sysmmu)); | |
625 | if (ret) | |
626 | return ret; | |
627 | ||
628 | iommu_device_set_ops(&data->iommu, &exynos_iommu_ops); | |
629 | iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode); | |
630 | ||
631 | ret = iommu_device_register(&data->iommu); | |
632 | if (ret) | |
633 | return ret; | |
634 | ||
7222e8db CK |
635 | platform_set_drvdata(pdev, data); |
636 | ||
850d313e | 637 | __sysmmu_get_version(data); |
740a01ee | 638 | if (PG_ENT_SHIFT < 0) { |
1a0d8dac | 639 | if (MMU_MAJ_VER(data->version) < 5) { |
740a01ee | 640 | PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT; |
1a0d8dac MS |
641 | LV1_PROT = SYSMMU_LV1_PROT; |
642 | LV2_PROT = SYSMMU_LV2_PROT; | |
643 | } else { | |
740a01ee | 644 | PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT; |
1a0d8dac MS |
645 | LV1_PROT = SYSMMU_V5_LV1_PROT; |
646 | LV2_PROT = SYSMMU_V5_LV2_PROT; | |
647 | } | |
740a01ee MS |
648 | } |
649 | ||
f4723ec1 | 650 | pm_runtime_enable(dev); |
2a96536e | 651 | |
2a96536e | 652 | return 0; |
2a96536e KC |
653 | } |
654 | ||
9b265536 | 655 | static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) |
622015e4 MS |
656 | { |
657 | struct sysmmu_drvdata *data = dev_get_drvdata(dev); | |
47a574ff | 658 | struct device *master = data->master; |
622015e4 | 659 | |
47a574ff | 660 | if (master) { |
9b265536 MS |
661 | struct exynos_iommu_owner *owner = master->archdata.iommu; |
662 | ||
663 | mutex_lock(&owner->rpm_lock); | |
92798b45 MS |
664 | if (data->domain) { |
665 | dev_dbg(data->sysmmu, "saving state\n"); | |
666 | __sysmmu_disable(data); | |
667 | } | |
9b265536 | 668 | mutex_unlock(&owner->rpm_lock); |
622015e4 MS |
669 | } |
670 | return 0; | |
671 | } | |
672 | ||
9b265536 | 673 | static int __maybe_unused exynos_sysmmu_resume(struct device *dev) |
622015e4 MS |
674 | { |
675 | struct sysmmu_drvdata *data = dev_get_drvdata(dev); | |
47a574ff | 676 | struct device *master = data->master; |
622015e4 | 677 | |
47a574ff | 678 | if (master) { |
9b265536 MS |
679 | struct exynos_iommu_owner *owner = master->archdata.iommu; |
680 | ||
681 | mutex_lock(&owner->rpm_lock); | |
92798b45 MS |
682 | if (data->domain) { |
683 | dev_dbg(data->sysmmu, "restoring state\n"); | |
684 | __sysmmu_enable(data); | |
685 | } | |
9b265536 | 686 | mutex_unlock(&owner->rpm_lock); |
622015e4 MS |
687 | } |
688 | return 0; | |
689 | } | |
622015e4 MS |
690 | |
691 | static const struct dev_pm_ops sysmmu_pm_ops = { | |
9b265536 | 692 | SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL) |
2f5f44f2 MS |
693 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
694 | pm_runtime_force_resume) | |
622015e4 MS |
695 | }; |
696 | ||
6b21a5db CK |
697 | static const struct of_device_id sysmmu_of_match[] __initconst = { |
698 | { .compatible = "samsung,exynos-sysmmu", }, | |
699 | { }, | |
700 | }; | |
701 | ||
702 | static struct platform_driver exynos_sysmmu_driver __refdata = { | |
703 | .probe = exynos_sysmmu_probe, | |
704 | .driver = { | |
2a96536e | 705 | .name = "exynos-sysmmu", |
6b21a5db | 706 | .of_match_table = sysmmu_of_match, |
622015e4 | 707 | .pm = &sysmmu_pm_ops, |
b54b874f | 708 | .suppress_bind_attrs = true, |
2a96536e KC |
709 | } |
710 | }; | |
711 | ||
5e3435eb | 712 | static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) |
2a96536e | 713 | { |
5e3435eb MS |
714 | dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent), |
715 | DMA_TO_DEVICE); | |
6ae5343c | 716 | *ent = cpu_to_le32(val); |
5e3435eb MS |
717 | dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent), |
718 | DMA_TO_DEVICE); | |
2a96536e KC |
719 | } |
720 | ||
e1fd1eaa | 721 | static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) |
2a96536e | 722 | { |
bfa00489 | 723 | struct exynos_iommu_domain *domain; |
5e3435eb | 724 | dma_addr_t handle; |
66a7ed84 | 725 | int i; |
2a96536e | 726 | |
740a01ee MS |
727 | /* Check if correct PTE offsets are initialized */ |
728 | BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev); | |
e1fd1eaa | 729 | |
bfa00489 MS |
730 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
731 | if (!domain) | |
e1fd1eaa | 732 | return NULL; |
2a96536e | 733 | |
58c6f6a3 MS |
734 | if (type == IOMMU_DOMAIN_DMA) { |
735 | if (iommu_get_dma_cookie(&domain->domain) != 0) | |
736 | goto err_pgtable; | |
737 | } else if (type != IOMMU_DOMAIN_UNMANAGED) { | |
738 | goto err_pgtable; | |
739 | } | |
740 | ||
bfa00489 MS |
741 | domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); |
742 | if (!domain->pgtable) | |
58c6f6a3 | 743 | goto err_dma_cookie; |
2a96536e | 744 | |
bfa00489 MS |
745 | domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); |
746 | if (!domain->lv2entcnt) | |
2a96536e KC |
747 | goto err_counter; |
748 | ||
f171abab | 749 | /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ |
66a7ed84 | 750 | for (i = 0; i < NUM_LV1ENTRIES; i += 8) { |
bfa00489 MS |
751 | domain->pgtable[i + 0] = ZERO_LV2LINK; |
752 | domain->pgtable[i + 1] = ZERO_LV2LINK; | |
753 | domain->pgtable[i + 2] = ZERO_LV2LINK; | |
754 | domain->pgtable[i + 3] = ZERO_LV2LINK; | |
755 | domain->pgtable[i + 4] = ZERO_LV2LINK; | |
756 | domain->pgtable[i + 5] = ZERO_LV2LINK; | |
757 | domain->pgtable[i + 6] = ZERO_LV2LINK; | |
758 | domain->pgtable[i + 7] = ZERO_LV2LINK; | |
66a7ed84 CK |
759 | } |
760 | ||
5e3435eb MS |
761 | handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, |
762 | DMA_TO_DEVICE); | |
763 | /* For mapping page table entries we rely on dma == phys */ | |
764 | BUG_ON(handle != virt_to_phys(domain->pgtable)); | |
0d6d3da4 MS |
765 | if (dma_mapping_error(dma_dev, handle)) |
766 | goto err_lv2ent; | |
2a96536e | 767 | |
bfa00489 MS |
768 | spin_lock_init(&domain->lock); |
769 | spin_lock_init(&domain->pgtablelock); | |
770 | INIT_LIST_HEAD(&domain->clients); | |
2a96536e | 771 | |
bfa00489 MS |
772 | domain->domain.geometry.aperture_start = 0; |
773 | domain->domain.geometry.aperture_end = ~0UL; | |
774 | domain->domain.geometry.force_aperture = true; | |
3177bb76 | 775 | |
bfa00489 | 776 | return &domain->domain; |
2a96536e | 777 | |
0d6d3da4 MS |
778 | err_lv2ent: |
779 | free_pages((unsigned long)domain->lv2entcnt, 1); | |
2a96536e | 780 | err_counter: |
bfa00489 | 781 | free_pages((unsigned long)domain->pgtable, 2); |
58c6f6a3 MS |
782 | err_dma_cookie: |
783 | if (type == IOMMU_DOMAIN_DMA) | |
784 | iommu_put_dma_cookie(&domain->domain); | |
2a96536e | 785 | err_pgtable: |
bfa00489 | 786 | kfree(domain); |
e1fd1eaa | 787 | return NULL; |
2a96536e KC |
788 | } |
789 | ||
bfa00489 | 790 | static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) |
2a96536e | 791 | { |
bfa00489 | 792 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
469acebe | 793 | struct sysmmu_drvdata *data, *next; |
2a96536e KC |
794 | unsigned long flags; |
795 | int i; | |
796 | ||
bfa00489 | 797 | WARN_ON(!list_empty(&domain->clients)); |
2a96536e | 798 | |
bfa00489 | 799 | spin_lock_irqsave(&domain->lock, flags); |
2a96536e | 800 | |
bfa00489 | 801 | list_for_each_entry_safe(data, next, &domain->clients, domain_node) { |
e1172300 | 802 | spin_lock(&data->lock); |
b0d4c861 | 803 | __sysmmu_disable(data); |
47a574ff MS |
804 | data->pgtable = 0; |
805 | data->domain = NULL; | |
469acebe | 806 | list_del_init(&data->domain_node); |
e1172300 | 807 | spin_unlock(&data->lock); |
2a96536e KC |
808 | } |
809 | ||
bfa00489 | 810 | spin_unlock_irqrestore(&domain->lock, flags); |
2a96536e | 811 | |
58c6f6a3 MS |
812 | if (iommu_domain->type == IOMMU_DOMAIN_DMA) |
813 | iommu_put_dma_cookie(iommu_domain); | |
814 | ||
5e3435eb MS |
815 | dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, |
816 | DMA_TO_DEVICE); | |
817 | ||
2a96536e | 818 | for (i = 0; i < NUM_LV1ENTRIES; i++) |
5e3435eb MS |
819 | if (lv1ent_page(domain->pgtable + i)) { |
820 | phys_addr_t base = lv2table_base(domain->pgtable + i); | |
821 | ||
822 | dma_unmap_single(dma_dev, base, LV2TABLE_SIZE, | |
823 | DMA_TO_DEVICE); | |
734c3c73 | 824 | kmem_cache_free(lv2table_kmem_cache, |
5e3435eb MS |
825 | phys_to_virt(base)); |
826 | } | |
2a96536e | 827 | |
bfa00489 MS |
828 | free_pages((unsigned long)domain->pgtable, 2); |
829 | free_pages((unsigned long)domain->lv2entcnt, 1); | |
830 | kfree(domain); | |
2a96536e KC |
831 | } |
832 | ||
5fa61cbf MS |
833 | static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, |
834 | struct device *dev) | |
835 | { | |
836 | struct exynos_iommu_owner *owner = dev->archdata.iommu; | |
837 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); | |
838 | phys_addr_t pagetable = virt_to_phys(domain->pgtable); | |
839 | struct sysmmu_drvdata *data, *next; | |
840 | unsigned long flags; | |
5fa61cbf MS |
841 | |
842 | if (!has_sysmmu(dev) || owner->domain != iommu_domain) | |
843 | return; | |
844 | ||
9b265536 MS |
845 | mutex_lock(&owner->rpm_lock); |
846 | ||
847 | list_for_each_entry(data, &owner->controllers, owner_node) { | |
848 | pm_runtime_get_noresume(data->sysmmu); | |
849 | if (pm_runtime_active(data->sysmmu)) | |
850 | __sysmmu_disable(data); | |
e1172300 MS |
851 | pm_runtime_put(data->sysmmu); |
852 | } | |
853 | ||
5fa61cbf MS |
854 | spin_lock_irqsave(&domain->lock, flags); |
855 | list_for_each_entry_safe(data, next, &domain->clients, domain_node) { | |
e1172300 | 856 | spin_lock(&data->lock); |
47a574ff MS |
857 | data->pgtable = 0; |
858 | data->domain = NULL; | |
b0d4c861 | 859 | list_del_init(&data->domain_node); |
e1172300 | 860 | spin_unlock(&data->lock); |
5fa61cbf | 861 | } |
e1172300 | 862 | owner->domain = NULL; |
5fa61cbf MS |
863 | spin_unlock_irqrestore(&domain->lock, flags); |
864 | ||
9b265536 | 865 | mutex_unlock(&owner->rpm_lock); |
5fa61cbf | 866 | |
b0d4c861 MS |
867 | dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__, |
868 | &pagetable); | |
5fa61cbf MS |
869 | } |
870 | ||
bfa00489 | 871 | static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, |
2a96536e KC |
872 | struct device *dev) |
873 | { | |
6b21a5db | 874 | struct exynos_iommu_owner *owner = dev->archdata.iommu; |
bfa00489 | 875 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
469acebe | 876 | struct sysmmu_drvdata *data; |
bfa00489 | 877 | phys_addr_t pagetable = virt_to_phys(domain->pgtable); |
2a96536e | 878 | unsigned long flags; |
2a96536e | 879 | |
469acebe MS |
880 | if (!has_sysmmu(dev)) |
881 | return -ENODEV; | |
2a96536e | 882 | |
5fa61cbf MS |
883 | if (owner->domain) |
884 | exynos_iommu_detach_device(owner->domain, dev); | |
885 | ||
9b265536 MS |
886 | mutex_lock(&owner->rpm_lock); |
887 | ||
e1172300 | 888 | spin_lock_irqsave(&domain->lock, flags); |
1b092054 | 889 | list_for_each_entry(data, &owner->controllers, owner_node) { |
e1172300 | 890 | spin_lock(&data->lock); |
47a574ff MS |
891 | data->pgtable = pagetable; |
892 | data->domain = domain; | |
e1172300 MS |
893 | list_add_tail(&data->domain_node, &domain->clients); |
894 | spin_unlock(&data->lock); | |
895 | } | |
896 | owner->domain = iommu_domain; | |
897 | spin_unlock_irqrestore(&domain->lock, flags); | |
898 | ||
9b265536 MS |
899 | list_for_each_entry(data, &owner->controllers, owner_node) { |
900 | pm_runtime_get_noresume(data->sysmmu); | |
901 | if (pm_runtime_active(data->sysmmu)) | |
902 | __sysmmu_enable(data); | |
903 | pm_runtime_put(data->sysmmu); | |
904 | } | |
905 | ||
906 | mutex_unlock(&owner->rpm_lock); | |
907 | ||
b0d4c861 MS |
908 | dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__, |
909 | &pagetable); | |
7222e8db | 910 | |
b0d4c861 | 911 | return 0; |
2a96536e KC |
912 | } |
913 | ||
bfa00489 | 914 | static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, |
66a7ed84 | 915 | sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter) |
2a96536e | 916 | { |
61128f08 | 917 | if (lv1ent_section(sent)) { |
d09d78fc | 918 | WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova); |
61128f08 CK |
919 | return ERR_PTR(-EADDRINUSE); |
920 | } | |
921 | ||
2a96536e | 922 | if (lv1ent_fault(sent)) { |
0d6d3da4 | 923 | dma_addr_t handle; |
d09d78fc | 924 | sysmmu_pte_t *pent; |
66a7ed84 | 925 | bool need_flush_flpd_cache = lv1ent_zero(sent); |
2a96536e | 926 | |
734c3c73 | 927 | pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); |
dbf6c6ef | 928 | BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); |
2a96536e | 929 | if (!pent) |
61128f08 | 930 | return ERR_PTR(-ENOMEM); |
2a96536e | 931 | |
5e3435eb | 932 | update_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); |
dc3814f4 | 933 | kmemleak_ignore(pent); |
2a96536e | 934 | *pgcounter = NUM_LV2ENTRIES; |
0d6d3da4 MS |
935 | handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE, |
936 | DMA_TO_DEVICE); | |
937 | if (dma_mapping_error(dma_dev, handle)) { | |
938 | kmem_cache_free(lv2table_kmem_cache, pent); | |
939 | return ERR_PTR(-EADDRINUSE); | |
940 | } | |
66a7ed84 CK |
941 | |
942 | /* | |
f171abab SK |
943 | * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, |
944 | * FLPD cache may cache the address of zero_l2_table. This | |
945 | * function replaces the zero_l2_table with new L2 page table | |
946 | * to write valid mappings. | |
66a7ed84 | 947 | * Accessing the valid area may cause page fault since FLPD |
f171abab SK |
948 | * cache may still cache zero_l2_table for the valid area |
949 | * instead of new L2 page table that has the mapping | |
950 | * information of the valid area. | |
66a7ed84 CK |
951 | * Thus any replacement of zero_l2_table with other valid L2 |
952 | * page table must involve FLPD cache invalidation for System | |
953 | * MMU v3.3. | |
954 | * FLPD cache invalidation is performed with TLB invalidation | |
955 | * by VPN without blocking. It is safe to invalidate TLB without | |
956 | * blocking because the target address of TLB invalidation is | |
957 | * not currently mapped. | |
958 | */ | |
959 | if (need_flush_flpd_cache) { | |
469acebe | 960 | struct sysmmu_drvdata *data; |
365409db | 961 | |
bfa00489 MS |
962 | spin_lock(&domain->lock); |
963 | list_for_each_entry(data, &domain->clients, domain_node) | |
469acebe | 964 | sysmmu_tlb_invalidate_flpdcache(data, iova); |
bfa00489 | 965 | spin_unlock(&domain->lock); |
66a7ed84 | 966 | } |
2a96536e KC |
967 | } |
968 | ||
969 | return page_entry(sent, iova); | |
970 | } | |
971 | ||
bfa00489 | 972 | static int lv1set_section(struct exynos_iommu_domain *domain, |
66a7ed84 | 973 | sysmmu_pte_t *sent, sysmmu_iova_t iova, |
1a0d8dac | 974 | phys_addr_t paddr, int prot, short *pgcnt) |
2a96536e | 975 | { |
61128f08 | 976 | if (lv1ent_section(sent)) { |
d09d78fc | 977 | WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", |
61128f08 | 978 | iova); |
2a96536e | 979 | return -EADDRINUSE; |
61128f08 | 980 | } |
2a96536e KC |
981 | |
982 | if (lv1ent_page(sent)) { | |
61128f08 | 983 | if (*pgcnt != NUM_LV2ENTRIES) { |
d09d78fc | 984 | WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", |
61128f08 | 985 | iova); |
2a96536e | 986 | return -EADDRINUSE; |
61128f08 | 987 | } |
2a96536e | 988 | |
734c3c73 | 989 | kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0)); |
2a96536e KC |
990 | *pgcnt = 0; |
991 | } | |
992 | ||
1a0d8dac | 993 | update_pte(sent, mk_lv1ent_sect(paddr, prot)); |
2a96536e | 994 | |
bfa00489 | 995 | spin_lock(&domain->lock); |
66a7ed84 | 996 | if (lv1ent_page_zero(sent)) { |
469acebe | 997 | struct sysmmu_drvdata *data; |
66a7ed84 CK |
998 | /* |
999 | * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD | |
1000 | * entry by speculative prefetch of SLPD which has no mapping. | |
1001 | */ | |
bfa00489 | 1002 | list_for_each_entry(data, &domain->clients, domain_node) |
469acebe | 1003 | sysmmu_tlb_invalidate_flpdcache(data, iova); |
66a7ed84 | 1004 | } |
bfa00489 | 1005 | spin_unlock(&domain->lock); |
66a7ed84 | 1006 | |
2a96536e KC |
1007 | return 0; |
1008 | } | |
1009 | ||
d09d78fc | 1010 | static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, |
1a0d8dac | 1011 | int prot, short *pgcnt) |
2a96536e KC |
1012 | { |
1013 | if (size == SPAGE_SIZE) { | |
0bf4e54d | 1014 | if (WARN_ON(!lv2ent_fault(pent))) |
2a96536e KC |
1015 | return -EADDRINUSE; |
1016 | ||
1a0d8dac | 1017 | update_pte(pent, mk_lv2ent_spage(paddr, prot)); |
2a96536e KC |
1018 | *pgcnt -= 1; |
1019 | } else { /* size == LPAGE_SIZE */ | |
1020 | int i; | |
5e3435eb | 1021 | dma_addr_t pent_base = virt_to_phys(pent); |
365409db | 1022 | |
5e3435eb MS |
1023 | dma_sync_single_for_cpu(dma_dev, pent_base, |
1024 | sizeof(*pent) * SPAGES_PER_LPAGE, | |
1025 | DMA_TO_DEVICE); | |
2a96536e | 1026 | for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { |
0bf4e54d | 1027 | if (WARN_ON(!lv2ent_fault(pent))) { |
61128f08 CK |
1028 | if (i > 0) |
1029 | memset(pent - i, 0, sizeof(*pent) * i); | |
2a96536e KC |
1030 | return -EADDRINUSE; |
1031 | } | |
1032 | ||
1a0d8dac | 1033 | *pent = mk_lv2ent_lpage(paddr, prot); |
2a96536e | 1034 | } |
5e3435eb MS |
1035 | dma_sync_single_for_device(dma_dev, pent_base, |
1036 | sizeof(*pent) * SPAGES_PER_LPAGE, | |
1037 | DMA_TO_DEVICE); | |
2a96536e KC |
1038 | *pgcnt -= SPAGES_PER_LPAGE; |
1039 | } | |
1040 | ||
1041 | return 0; | |
1042 | } | |
1043 | ||
66a7ed84 CK |
1044 | /* |
1045 | * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: | |
1046 | * | |
f171abab | 1047 | * System MMU v3.x has advanced logic to improve address translation |
66a7ed84 | 1048 | * performance with caching more page table entries by a page table walk. |
f171abab SK |
1049 | * However, the logic has a bug that while caching faulty page table entries, |
1050 | * System MMU reports page fault if the cached fault entry is hit even though | |
1051 | * the fault entry is updated to a valid entry after the entry is cached. | |
1052 | * To prevent caching faulty page table entries which may be updated to valid | |
1053 | * entries later, the virtual memory manager should care about the workaround | |
1054 | * for the problem. The following describes the workaround. | |
66a7ed84 CK |
1055 | * |
1056 | * Any two consecutive I/O virtual address regions must have a hole of 128KiB | |
f171abab | 1057 | * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug). |
66a7ed84 | 1058 | * |
f171abab | 1059 | * Precisely, any start address of I/O virtual region must be aligned with |
66a7ed84 CK |
1060 | * the following sizes for System MMU v3.1 and v3.2. |
1061 | * System MMU v3.1: 128KiB | |
1062 | * System MMU v3.2: 256KiB | |
1063 | * | |
1064 | * Because System MMU v3.3 caches page table entries more aggressively, it needs | |
f171abab SK |
1065 | * more workarounds. |
1066 | * - Any two consecutive I/O virtual regions must have a hole of size larger | |
1067 | * than or equal to 128KiB. | |
66a7ed84 CK |
1068 | * - Start address of an I/O virtual region must be aligned by 128KiB. |
1069 | */ | |
bfa00489 MS |
1070 | static int exynos_iommu_map(struct iommu_domain *iommu_domain, |
1071 | unsigned long l_iova, phys_addr_t paddr, size_t size, | |
1072 | int prot) | |
2a96536e | 1073 | { |
bfa00489 | 1074 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
d09d78fc CK |
1075 | sysmmu_pte_t *entry; |
1076 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; | |
2a96536e KC |
1077 | unsigned long flags; |
1078 | int ret = -ENOMEM; | |
1079 | ||
bfa00489 | 1080 | BUG_ON(domain->pgtable == NULL); |
1a0d8dac | 1081 | prot &= SYSMMU_SUPPORTED_PROT_BITS; |
2a96536e | 1082 | |
bfa00489 | 1083 | spin_lock_irqsave(&domain->pgtablelock, flags); |
2a96536e | 1084 | |
bfa00489 | 1085 | entry = section_entry(domain->pgtable, iova); |
2a96536e KC |
1086 | |
1087 | if (size == SECT_SIZE) { | |
1a0d8dac | 1088 | ret = lv1set_section(domain, entry, iova, paddr, prot, |
bfa00489 | 1089 | &domain->lv2entcnt[lv1ent_offset(iova)]); |
2a96536e | 1090 | } else { |
d09d78fc | 1091 | sysmmu_pte_t *pent; |
2a96536e | 1092 | |
bfa00489 MS |
1093 | pent = alloc_lv2entry(domain, entry, iova, |
1094 | &domain->lv2entcnt[lv1ent_offset(iova)]); | |
2a96536e | 1095 | |
61128f08 CK |
1096 | if (IS_ERR(pent)) |
1097 | ret = PTR_ERR(pent); | |
2a96536e | 1098 | else |
1a0d8dac | 1099 | ret = lv2set_page(pent, paddr, size, prot, |
bfa00489 | 1100 | &domain->lv2entcnt[lv1ent_offset(iova)]); |
2a96536e KC |
1101 | } |
1102 | ||
61128f08 | 1103 | if (ret) |
0bf4e54d CK |
1104 | pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n", |
1105 | __func__, ret, size, iova); | |
2a96536e | 1106 | |
bfa00489 | 1107 | spin_unlock_irqrestore(&domain->pgtablelock, flags); |
2a96536e KC |
1108 | |
1109 | return ret; | |
1110 | } | |
1111 | ||
bfa00489 MS |
1112 | static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, |
1113 | sysmmu_iova_t iova, size_t size) | |
66a7ed84 | 1114 | { |
469acebe | 1115 | struct sysmmu_drvdata *data; |
66a7ed84 CK |
1116 | unsigned long flags; |
1117 | ||
bfa00489 | 1118 | spin_lock_irqsave(&domain->lock, flags); |
66a7ed84 | 1119 | |
bfa00489 | 1120 | list_for_each_entry(data, &domain->clients, domain_node) |
469acebe | 1121 | sysmmu_tlb_invalidate_entry(data, iova, size); |
66a7ed84 | 1122 | |
bfa00489 | 1123 | spin_unlock_irqrestore(&domain->lock, flags); |
66a7ed84 CK |
1124 | } |
1125 | ||
bfa00489 MS |
1126 | static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, |
1127 | unsigned long l_iova, size_t size) | |
2a96536e | 1128 | { |
bfa00489 | 1129 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
d09d78fc CK |
1130 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; |
1131 | sysmmu_pte_t *ent; | |
61128f08 | 1132 | size_t err_pgsize; |
d09d78fc | 1133 | unsigned long flags; |
2a96536e | 1134 | |
bfa00489 | 1135 | BUG_ON(domain->pgtable == NULL); |
2a96536e | 1136 | |
bfa00489 | 1137 | spin_lock_irqsave(&domain->pgtablelock, flags); |
2a96536e | 1138 | |
bfa00489 | 1139 | ent = section_entry(domain->pgtable, iova); |
2a96536e KC |
1140 | |
1141 | if (lv1ent_section(ent)) { | |
0bf4e54d | 1142 | if (WARN_ON(size < SECT_SIZE)) { |
61128f08 CK |
1143 | err_pgsize = SECT_SIZE; |
1144 | goto err; | |
1145 | } | |
2a96536e | 1146 | |
f171abab | 1147 | /* workaround for h/w bug in System MMU v3.3 */ |
5e3435eb | 1148 | update_pte(ent, ZERO_LV2LINK); |
2a96536e KC |
1149 | size = SECT_SIZE; |
1150 | goto done; | |
1151 | } | |
1152 | ||
1153 | if (unlikely(lv1ent_fault(ent))) { | |
1154 | if (size > SECT_SIZE) | |
1155 | size = SECT_SIZE; | |
1156 | goto done; | |
1157 | } | |
1158 | ||
1159 | /* lv1ent_page(sent) == true here */ | |
1160 | ||
1161 | ent = page_entry(ent, iova); | |
1162 | ||
1163 | if (unlikely(lv2ent_fault(ent))) { | |
1164 | size = SPAGE_SIZE; | |
1165 | goto done; | |
1166 | } | |
1167 | ||
1168 | if (lv2ent_small(ent)) { | |
5e3435eb | 1169 | update_pte(ent, 0); |
2a96536e | 1170 | size = SPAGE_SIZE; |
bfa00489 | 1171 | domain->lv2entcnt[lv1ent_offset(iova)] += 1; |
2a96536e KC |
1172 | goto done; |
1173 | } | |
1174 | ||
1175 | /* lv1ent_large(ent) == true here */ | |
0bf4e54d | 1176 | if (WARN_ON(size < LPAGE_SIZE)) { |
61128f08 CK |
1177 | err_pgsize = LPAGE_SIZE; |
1178 | goto err; | |
1179 | } | |
2a96536e | 1180 | |
5e3435eb MS |
1181 | dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), |
1182 | sizeof(*ent) * SPAGES_PER_LPAGE, | |
1183 | DMA_TO_DEVICE); | |
2a96536e | 1184 | memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); |
5e3435eb MS |
1185 | dma_sync_single_for_device(dma_dev, virt_to_phys(ent), |
1186 | sizeof(*ent) * SPAGES_PER_LPAGE, | |
1187 | DMA_TO_DEVICE); | |
2a96536e | 1188 | size = LPAGE_SIZE; |
bfa00489 | 1189 | domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; |
2a96536e | 1190 | done: |
bfa00489 | 1191 | spin_unlock_irqrestore(&domain->pgtablelock, flags); |
2a96536e | 1192 | |
bfa00489 | 1193 | exynos_iommu_tlb_invalidate_entry(domain, iova, size); |
2a96536e | 1194 | |
2a96536e | 1195 | return size; |
61128f08 | 1196 | err: |
bfa00489 | 1197 | spin_unlock_irqrestore(&domain->pgtablelock, flags); |
61128f08 | 1198 | |
0bf4e54d CK |
1199 | pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n", |
1200 | __func__, size, iova, err_pgsize); | |
61128f08 CK |
1201 | |
1202 | return 0; | |
2a96536e KC |
1203 | } |
1204 | ||
bfa00489 | 1205 | static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain, |
bb5547ac | 1206 | dma_addr_t iova) |
2a96536e | 1207 | { |
bfa00489 | 1208 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
d09d78fc | 1209 | sysmmu_pte_t *entry; |
2a96536e KC |
1210 | unsigned long flags; |
1211 | phys_addr_t phys = 0; | |
1212 | ||
bfa00489 | 1213 | spin_lock_irqsave(&domain->pgtablelock, flags); |
2a96536e | 1214 | |
bfa00489 | 1215 | entry = section_entry(domain->pgtable, iova); |
2a96536e KC |
1216 | |
1217 | if (lv1ent_section(entry)) { | |
1218 | phys = section_phys(entry) + section_offs(iova); | |
1219 | } else if (lv1ent_page(entry)) { | |
1220 | entry = page_entry(entry, iova); | |
1221 | ||
1222 | if (lv2ent_large(entry)) | |
1223 | phys = lpage_phys(entry) + lpage_offs(iova); | |
1224 | else if (lv2ent_small(entry)) | |
1225 | phys = spage_phys(entry) + spage_offs(iova); | |
1226 | } | |
1227 | ||
bfa00489 | 1228 | spin_unlock_irqrestore(&domain->pgtablelock, flags); |
2a96536e KC |
1229 | |
1230 | return phys; | |
1231 | } | |
1232 | ||
6c2ae7e2 MS |
1233 | static struct iommu_group *get_device_iommu_group(struct device *dev) |
1234 | { | |
1235 | struct iommu_group *group; | |
1236 | ||
1237 | group = iommu_group_get(dev); | |
1238 | if (!group) | |
1239 | group = iommu_group_alloc(); | |
1240 | ||
1241 | return group; | |
1242 | } | |
1243 | ||
bf4a1c92 AM |
1244 | static int exynos_iommu_add_device(struct device *dev) |
1245 | { | |
1246 | struct iommu_group *group; | |
bf4a1c92 | 1247 | |
06801db0 MS |
1248 | if (!has_sysmmu(dev)) |
1249 | return -ENODEV; | |
1250 | ||
6c2ae7e2 | 1251 | group = iommu_group_get_for_dev(dev); |
bf4a1c92 | 1252 | |
6c2ae7e2 MS |
1253 | if (IS_ERR(group)) |
1254 | return PTR_ERR(group); | |
bf4a1c92 | 1255 | |
bf4a1c92 AM |
1256 | iommu_group_put(group); |
1257 | ||
6c2ae7e2 | 1258 | return 0; |
bf4a1c92 AM |
1259 | } |
1260 | ||
1261 | static void exynos_iommu_remove_device(struct device *dev) | |
1262 | { | |
fff2fd1a MS |
1263 | struct exynos_iommu_owner *owner = dev->archdata.iommu; |
1264 | ||
06801db0 MS |
1265 | if (!has_sysmmu(dev)) |
1266 | return; | |
1267 | ||
fff2fd1a MS |
1268 | if (owner->domain) { |
1269 | struct iommu_group *group = iommu_group_get(dev); | |
1270 | ||
1271 | if (group) { | |
1272 | WARN_ON(owner->domain != | |
1273 | iommu_group_default_domain(group)); | |
1274 | exynos_iommu_detach_device(owner->domain, dev); | |
1275 | iommu_group_put(group); | |
1276 | } | |
1277 | } | |
bf4a1c92 AM |
1278 | iommu_group_remove_device(dev); |
1279 | } | |
1280 | ||
aa759fd3 MS |
1281 | static int exynos_iommu_of_xlate(struct device *dev, |
1282 | struct of_phandle_args *spec) | |
1283 | { | |
1284 | struct exynos_iommu_owner *owner = dev->archdata.iommu; | |
1285 | struct platform_device *sysmmu = of_find_device_by_node(spec->np); | |
0bd5a0c7 | 1286 | struct sysmmu_drvdata *data, *entry; |
aa759fd3 MS |
1287 | |
1288 | if (!sysmmu) | |
1289 | return -ENODEV; | |
1290 | ||
1291 | data = platform_get_drvdata(sysmmu); | |
1292 | if (!data) | |
1293 | return -ENODEV; | |
1294 | ||
1295 | if (!owner) { | |
1296 | owner = kzalloc(sizeof(*owner), GFP_KERNEL); | |
1297 | if (!owner) | |
1298 | return -ENOMEM; | |
1299 | ||
1300 | INIT_LIST_HEAD(&owner->controllers); | |
9b265536 | 1301 | mutex_init(&owner->rpm_lock); |
aa759fd3 MS |
1302 | dev->archdata.iommu = owner; |
1303 | } | |
1304 | ||
0bd5a0c7 MS |
1305 | list_for_each_entry(entry, &owner->controllers, owner_node) |
1306 | if (entry == data) | |
1307 | return 0; | |
1308 | ||
aa759fd3 | 1309 | list_add_tail(&data->owner_node, &owner->controllers); |
92798b45 | 1310 | data->master = dev; |
2f5f44f2 MS |
1311 | |
1312 | /* | |
1313 | * SYSMMU will be runtime activated via device link (dependency) to its | |
1314 | * master device, so there are no direct calls to pm_runtime_get/put | |
1315 | * in this driver. | |
1316 | */ | |
1317 | device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME); | |
1318 | ||
aa759fd3 MS |
1319 | return 0; |
1320 | } | |
1321 | ||
8ed55c81 | 1322 | static struct iommu_ops exynos_iommu_ops = { |
e1fd1eaa JR |
1323 | .domain_alloc = exynos_iommu_domain_alloc, |
1324 | .domain_free = exynos_iommu_domain_free, | |
ba5fa6f6 BH |
1325 | .attach_dev = exynos_iommu_attach_device, |
1326 | .detach_dev = exynos_iommu_detach_device, | |
1327 | .map = exynos_iommu_map, | |
1328 | .unmap = exynos_iommu_unmap, | |
315786eb | 1329 | .map_sg = default_iommu_map_sg, |
ba5fa6f6 | 1330 | .iova_to_phys = exynos_iommu_iova_to_phys, |
6c2ae7e2 | 1331 | .device_group = get_device_iommu_group, |
ba5fa6f6 BH |
1332 | .add_device = exynos_iommu_add_device, |
1333 | .remove_device = exynos_iommu_remove_device, | |
2a96536e | 1334 | .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, |
aa759fd3 | 1335 | .of_xlate = exynos_iommu_of_xlate, |
2a96536e KC |
1336 | }; |
1337 | ||
8ed55c81 MS |
1338 | static bool init_done; |
1339 | ||
2a96536e KC |
1340 | static int __init exynos_iommu_init(void) |
1341 | { | |
1342 | int ret; | |
1343 | ||
734c3c73 CK |
1344 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", |
1345 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); | |
1346 | if (!lv2table_kmem_cache) { | |
1347 | pr_err("%s: Failed to create kmem cache\n", __func__); | |
1348 | return -ENOMEM; | |
1349 | } | |
1350 | ||
2a96536e | 1351 | ret = platform_driver_register(&exynos_sysmmu_driver); |
734c3c73 CK |
1352 | if (ret) { |
1353 | pr_err("%s: Failed to register driver\n", __func__); | |
1354 | goto err_reg_driver; | |
1355 | } | |
2a96536e | 1356 | |
66a7ed84 CK |
1357 | zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL); |
1358 | if (zero_lv2_table == NULL) { | |
1359 | pr_err("%s: Failed to allocate zero level2 page table\n", | |
1360 | __func__); | |
1361 | ret = -ENOMEM; | |
1362 | goto err_zero_lv2; | |
1363 | } | |
1364 | ||
734c3c73 CK |
1365 | ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops); |
1366 | if (ret) { | |
1367 | pr_err("%s: Failed to register exynos-iommu driver.\n", | |
1368 | __func__); | |
1369 | goto err_set_iommu; | |
1370 | } | |
2a96536e | 1371 | |
8ed55c81 MS |
1372 | init_done = true; |
1373 | ||
734c3c73 CK |
1374 | return 0; |
1375 | err_set_iommu: | |
66a7ed84 CK |
1376 | kmem_cache_free(lv2table_kmem_cache, zero_lv2_table); |
1377 | err_zero_lv2: | |
734c3c73 CK |
1378 | platform_driver_unregister(&exynos_sysmmu_driver); |
1379 | err_reg_driver: | |
1380 | kmem_cache_destroy(lv2table_kmem_cache); | |
2a96536e KC |
1381 | return ret; |
1382 | } | |
8ed55c81 MS |
1383 | |
1384 | static int __init exynos_iommu_of_setup(struct device_node *np) | |
1385 | { | |
1386 | struct platform_device *pdev; | |
1387 | ||
1388 | if (!init_done) | |
1389 | exynos_iommu_init(); | |
1390 | ||
1391 | pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root); | |
423595e8 AKC |
1392 | if (!pdev) |
1393 | return -ENODEV; | |
8ed55c81 | 1394 | |
5e3435eb MS |
1395 | /* |
1396 | * use the first registered sysmmu device for performing | |
1397 | * dma mapping operations on iommu page tables (cpu cache flush) | |
1398 | */ | |
1399 | if (!dma_dev) | |
1400 | dma_dev = &pdev->dev; | |
1401 | ||
8ed55c81 MS |
1402 | return 0; |
1403 | } | |
1404 | ||
1405 | IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", | |
1406 | exynos_iommu_of_setup); |