Commit | Line | Data |
---|---|---|
1802d0be | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b17336c5 | 2 | /* |
d4cf5bbd PG |
3 | * IOMMU API for MTK architected m4u v1 implementations |
4 | * | |
b17336c5 HZ |
5 | * Copyright (c) 2015-2016 MediaTek Inc. |
6 | * Author: Honghui Zhang <honghui.zhang@mediatek.com> | |
7 | * | |
8 | * Based on driver/iommu/mtk_iommu.c | |
b17336c5 | 9 | */ |
57c8a661 | 10 | #include <linux/memblock.h> |
b17336c5 HZ |
11 | #include <linux/bug.h> |
12 | #include <linux/clk.h> | |
13 | #include <linux/component.h> | |
14 | #include <linux/device.h> | |
745b6e74 | 15 | #include <linux/dma-mapping.h> |
b17336c5 HZ |
16 | #include <linux/dma-iommu.h> |
17 | #include <linux/err.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/io.h> | |
20 | #include <linux/iommu.h> | |
21 | #include <linux/iopoll.h> | |
b17336c5 HZ |
22 | #include <linux/list.h> |
23 | #include <linux/of_address.h> | |
24 | #include <linux/of_iommu.h> | |
25 | #include <linux/of_irq.h> | |
26 | #include <linux/of_platform.h> | |
27 | #include <linux/platform_device.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/spinlock.h> | |
30 | #include <asm/barrier.h> | |
31 | #include <asm/dma-iommu.h> | |
d4cf5bbd | 32 | #include <linux/init.h> |
b17336c5 HZ |
33 | #include <dt-bindings/memory/mt2701-larb-port.h> |
34 | #include <soc/mediatek/smi.h> | |
35 | #include "mtk_iommu.h" | |
36 | ||
37 | #define REG_MMU_PT_BASE_ADDR 0x000 | |
38 | ||
39 | #define F_ALL_INVLD 0x2 | |
40 | #define F_MMU_INV_RANGE 0x1 | |
41 | #define F_INVLD_EN0 BIT(0) | |
42 | #define F_INVLD_EN1 BIT(1) | |
43 | ||
44 | #define F_MMU_FAULT_VA_MSK 0xfffff000 | |
45 | #define MTK_PROTECT_PA_ALIGN 128 | |
46 | ||
47 | #define REG_MMU_CTRL_REG 0x210 | |
48 | #define F_MMU_CTRL_COHERENT_EN BIT(8) | |
49 | #define REG_MMU_IVRP_PADDR 0x214 | |
50 | #define REG_MMU_INT_CONTROL 0x220 | |
51 | #define F_INT_TRANSLATION_FAULT BIT(0) | |
52 | #define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) | |
53 | #define F_INT_INVALID_PA_FAULT BIT(2) | |
54 | #define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) | |
55 | #define F_INT_TABLE_WALK_FAULT BIT(4) | |
56 | #define F_INT_TLB_MISS_FAULT BIT(5) | |
57 | #define F_INT_PFH_DMA_FIFO_OVERFLOW BIT(6) | |
58 | #define F_INT_MISS_DMA_FIFO_OVERFLOW BIT(7) | |
59 | ||
60 | #define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5) | |
61 | #define F_INT_CLR_BIT BIT(12) | |
62 | ||
63 | #define REG_MMU_FAULT_ST 0x224 | |
64 | #define REG_MMU_FAULT_VA 0x228 | |
65 | #define REG_MMU_INVLD_PA 0x22C | |
66 | #define REG_MMU_INT_ID 0x388 | |
67 | #define REG_MMU_INVALIDATE 0x5c0 | |
68 | #define REG_MMU_INVLD_START_A 0x5c4 | |
69 | #define REG_MMU_INVLD_END_A 0x5c8 | |
70 | ||
71 | #define REG_MMU_INV_SEL 0x5d8 | |
72 | #define REG_MMU_STANDARD_AXI_MODE 0x5e8 | |
73 | ||
74 | #define REG_MMU_DCM 0x5f0 | |
75 | #define F_MMU_DCM_ON BIT(1) | |
76 | #define REG_MMU_CPE_DONE 0x60c | |
77 | #define F_DESC_VALID 0x2 | |
78 | #define F_DESC_NONSEC BIT(3) | |
79 | #define MT2701_M4U_TF_LARB(TF) (6 - (((TF) >> 13) & 0x7)) | |
80 | #define MT2701_M4U_TF_PORT(TF) (((TF) >> 8) & 0xF) | |
81 | /* MTK generation one iommu HW only support 4K size mapping */ | |
82 | #define MT2701_IOMMU_PAGE_SHIFT 12 | |
83 | #define MT2701_IOMMU_PAGE_SIZE (1UL << MT2701_IOMMU_PAGE_SHIFT) | |
84 | ||
85 | /* | |
86 | * MTK m4u support 4GB iova address space, and only support 4K page | |
87 | * mapping. So the pagetable size should be exactly as 4M. | |
88 | */ | |
89 | #define M2701_IOMMU_PGT_SIZE SZ_4M | |
90 | ||
91 | struct mtk_iommu_domain { | |
92 | spinlock_t pgtlock; /* lock for page table */ | |
93 | struct iommu_domain domain; | |
94 | u32 *pgt_va; | |
95 | dma_addr_t pgt_pa; | |
96 | struct mtk_iommu_data *data; | |
97 | }; | |
98 | ||
99 | static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) | |
100 | { | |
101 | return container_of(dom, struct mtk_iommu_domain, domain); | |
102 | } | |
103 | ||
104 | static const int mt2701_m4u_in_larb[] = { | |
105 | LARB0_PORT_OFFSET, LARB1_PORT_OFFSET, | |
106 | LARB2_PORT_OFFSET, LARB3_PORT_OFFSET | |
107 | }; | |
108 | ||
109 | static inline int mt2701_m4u_to_larb(int id) | |
110 | { | |
111 | int i; | |
112 | ||
113 | for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--) | |
114 | if ((id) >= mt2701_m4u_in_larb[i]) | |
115 | return i; | |
116 | ||
117 | return 0; | |
118 | } | |
119 | ||
120 | static inline int mt2701_m4u_to_port(int id) | |
121 | { | |
122 | int larb = mt2701_m4u_to_larb(id); | |
123 | ||
124 | return id - mt2701_m4u_in_larb[larb]; | |
125 | } | |
126 | ||
127 | static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) | |
128 | { | |
129 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, | |
130 | data->base + REG_MMU_INV_SEL); | |
131 | writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); | |
132 | wmb(); /* Make sure the tlb flush all done */ | |
133 | } | |
134 | ||
135 | static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data, | |
136 | unsigned long iova, size_t size) | |
137 | { | |
138 | int ret; | |
139 | u32 tmp; | |
140 | ||
141 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, | |
142 | data->base + REG_MMU_INV_SEL); | |
143 | writel_relaxed(iova & F_MMU_FAULT_VA_MSK, | |
144 | data->base + REG_MMU_INVLD_START_A); | |
145 | writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK, | |
146 | data->base + REG_MMU_INVLD_END_A); | |
147 | writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); | |
148 | ||
149 | ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, | |
150 | tmp, tmp != 0, 10, 100000); | |
151 | if (ret) { | |
152 | dev_warn(data->dev, | |
153 | "Partial TLB flush timed out, falling back to full flush\n"); | |
154 | mtk_iommu_tlb_flush_all(data); | |
155 | } | |
156 | /* Clear the CPE status */ | |
157 | writel_relaxed(0, data->base + REG_MMU_CPE_DONE); | |
158 | } | |
159 | ||
160 | static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) | |
161 | { | |
162 | struct mtk_iommu_data *data = dev_id; | |
163 | struct mtk_iommu_domain *dom = data->m4u_dom; | |
164 | u32 int_state, regval, fault_iova, fault_pa; | |
165 | unsigned int fault_larb, fault_port; | |
166 | ||
167 | /* Read error information from registers */ | |
168 | int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST); | |
169 | fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); | |
170 | ||
171 | fault_iova &= F_MMU_FAULT_VA_MSK; | |
172 | fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); | |
173 | regval = readl_relaxed(data->base + REG_MMU_INT_ID); | |
174 | fault_larb = MT2701_M4U_TF_LARB(regval); | |
175 | fault_port = MT2701_M4U_TF_PORT(regval); | |
176 | ||
177 | /* | |
178 | * MTK v1 iommu HW could not determine whether the fault is read or | |
179 | * write fault, report as read fault. | |
180 | */ | |
181 | if (report_iommu_fault(&dom->domain, data->dev, fault_iova, | |
182 | IOMMU_FAULT_READ)) | |
183 | dev_err_ratelimited(data->dev, | |
184 | "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n", | |
185 | int_state, fault_iova, fault_pa, | |
186 | fault_larb, fault_port); | |
187 | ||
188 | /* Interrupt clear */ | |
189 | regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL); | |
190 | regval |= F_INT_CLR_BIT; | |
191 | writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); | |
192 | ||
193 | mtk_iommu_tlb_flush_all(data); | |
194 | ||
195 | return IRQ_HANDLED; | |
196 | } | |
197 | ||
198 | static void mtk_iommu_config(struct mtk_iommu_data *data, | |
199 | struct device *dev, bool enable) | |
200 | { | |
b17336c5 HZ |
201 | struct mtk_smi_larb_iommu *larb_mmu; |
202 | unsigned int larbid, portid; | |
a9bf2eec | 203 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
84672f19 | 204 | int i; |
b17336c5 | 205 | |
84672f19 RM |
206 | for (i = 0; i < fwspec->num_ids; ++i) { |
207 | larbid = mt2701_m4u_to_larb(fwspec->ids[i]); | |
208 | portid = mt2701_m4u_to_port(fwspec->ids[i]); | |
1ee9feb2 | 209 | larb_mmu = &data->larb_imu[larbid]; |
b17336c5 HZ |
210 | |
211 | dev_dbg(dev, "%s iommu port: %d\n", | |
212 | enable ? "enable" : "disable", portid); | |
213 | ||
214 | if (enable) | |
215 | larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); | |
216 | else | |
217 | larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); | |
218 | } | |
219 | } | |
220 | ||
221 | static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) | |
222 | { | |
223 | struct mtk_iommu_domain *dom = data->m4u_dom; | |
224 | ||
225 | spin_lock_init(&dom->pgtlock); | |
226 | ||
750afb08 LC |
227 | dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE, |
228 | &dom->pgt_pa, GFP_KERNEL); | |
b17336c5 HZ |
229 | if (!dom->pgt_va) |
230 | return -ENOMEM; | |
231 | ||
232 | writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR); | |
233 | ||
234 | dom->data = data; | |
235 | ||
236 | return 0; | |
237 | } | |
238 | ||
239 | static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) | |
240 | { | |
241 | struct mtk_iommu_domain *dom; | |
242 | ||
243 | if (type != IOMMU_DOMAIN_UNMANAGED) | |
244 | return NULL; | |
245 | ||
246 | dom = kzalloc(sizeof(*dom), GFP_KERNEL); | |
247 | if (!dom) | |
248 | return NULL; | |
249 | ||
250 | return &dom->domain; | |
251 | } | |
252 | ||
253 | static void mtk_iommu_domain_free(struct iommu_domain *domain) | |
254 | { | |
255 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
256 | struct mtk_iommu_data *data = dom->data; | |
257 | ||
258 | dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE, | |
259 | dom->pgt_va, dom->pgt_pa); | |
260 | kfree(to_mtk_domain(domain)); | |
261 | } | |
262 | ||
263 | static int mtk_iommu_attach_device(struct iommu_domain *domain, | |
264 | struct device *dev) | |
265 | { | |
266 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
a9bf2eec | 267 | struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; |
b17336c5 HZ |
268 | int ret; |
269 | ||
84672f19 | 270 | if (!data) |
b17336c5 HZ |
271 | return -ENODEV; |
272 | ||
b17336c5 HZ |
273 | if (!data->m4u_dom) { |
274 | data->m4u_dom = dom; | |
275 | ret = mtk_iommu_domain_finalise(data); | |
276 | if (ret) { | |
277 | data->m4u_dom = NULL; | |
278 | return ret; | |
279 | } | |
280 | } | |
281 | ||
282 | mtk_iommu_config(data, dev, true); | |
283 | return 0; | |
284 | } | |
285 | ||
286 | static void mtk_iommu_detach_device(struct iommu_domain *domain, | |
287 | struct device *dev) | |
288 | { | |
a9bf2eec | 289 | struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; |
b17336c5 | 290 | |
84672f19 | 291 | if (!data) |
b17336c5 HZ |
292 | return; |
293 | ||
b17336c5 HZ |
294 | mtk_iommu_config(data, dev, false); |
295 | } | |
296 | ||
297 | static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
298 | phys_addr_t paddr, size_t size, int prot) | |
299 | { | |
300 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
301 | unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; | |
302 | unsigned long flags; | |
303 | unsigned int i; | |
304 | u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); | |
305 | u32 pabase = (u32)paddr; | |
306 | int map_size = 0; | |
307 | ||
308 | spin_lock_irqsave(&dom->pgtlock, flags); | |
309 | for (i = 0; i < page_num; i++) { | |
310 | if (pgt_base_iova[i]) { | |
311 | memset(pgt_base_iova, 0, i * sizeof(u32)); | |
312 | break; | |
313 | } | |
314 | pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC; | |
315 | pabase += MT2701_IOMMU_PAGE_SIZE; | |
316 | map_size += MT2701_IOMMU_PAGE_SIZE; | |
317 | } | |
318 | ||
319 | spin_unlock_irqrestore(&dom->pgtlock, flags); | |
320 | ||
321 | mtk_iommu_tlb_flush_range(dom->data, iova, size); | |
322 | ||
323 | return map_size == size ? 0 : -EEXIST; | |
324 | } | |
325 | ||
326 | static size_t mtk_iommu_unmap(struct iommu_domain *domain, | |
56f8af5e WD |
327 | unsigned long iova, size_t size, |
328 | struct iommu_iotlb_gather *gather) | |
b17336c5 HZ |
329 | { |
330 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
331 | unsigned long flags; | |
332 | u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); | |
333 | unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; | |
334 | ||
335 | spin_lock_irqsave(&dom->pgtlock, flags); | |
336 | memset(pgt_base_iova, 0, page_num * sizeof(u32)); | |
337 | spin_unlock_irqrestore(&dom->pgtlock, flags); | |
338 | ||
339 | mtk_iommu_tlb_flush_range(dom->data, iova, size); | |
340 | ||
341 | return size; | |
342 | } | |
343 | ||
344 | static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, | |
345 | dma_addr_t iova) | |
346 | { | |
347 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
348 | unsigned long flags; | |
349 | phys_addr_t pa; | |
350 | ||
351 | spin_lock_irqsave(&dom->pgtlock, flags); | |
352 | pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT)); | |
353 | pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1)); | |
354 | spin_unlock_irqrestore(&dom->pgtlock, flags); | |
355 | ||
356 | return pa; | |
357 | } | |
358 | ||
b65f5016 | 359 | static const struct iommu_ops mtk_iommu_ops; |
84672f19 | 360 | |
b17336c5 HZ |
361 | /* |
362 | * MTK generation one iommu HW only support one iommu domain, and all the client | |
363 | * sharing the same iova address space. | |
364 | */ | |
365 | static int mtk_iommu_create_mapping(struct device *dev, | |
366 | struct of_phandle_args *args) | |
367 | { | |
a9bf2eec | 368 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
84672f19 | 369 | struct mtk_iommu_data *data; |
b17336c5 HZ |
370 | struct platform_device *m4updev; |
371 | struct dma_iommu_mapping *mtk_mapping; | |
372 | struct device *m4udev; | |
373 | int ret; | |
374 | ||
375 | if (args->args_count != 1) { | |
376 | dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", | |
377 | args->args_count); | |
378 | return -EINVAL; | |
379 | } | |
380 | ||
a9bf2eec | 381 | if (!fwspec) { |
84672f19 RM |
382 | ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops); |
383 | if (ret) | |
384 | return ret; | |
a9bf2eec JR |
385 | fwspec = dev_iommu_fwspec_get(dev); |
386 | } else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_ops) { | |
84672f19 RM |
387 | return -EINVAL; |
388 | } | |
389 | ||
a9bf2eec | 390 | if (!fwspec->iommu_priv) { |
b17336c5 HZ |
391 | /* Get the m4u device */ |
392 | m4updev = of_find_device_by_node(args->np); | |
393 | if (WARN_ON(!m4updev)) | |
394 | return -EINVAL; | |
395 | ||
a9bf2eec | 396 | fwspec->iommu_priv = platform_get_drvdata(m4updev); |
b17336c5 HZ |
397 | } |
398 | ||
84672f19 RM |
399 | ret = iommu_fwspec_add_ids(dev, args->args, 1); |
400 | if (ret) | |
401 | return ret; | |
b17336c5 | 402 | |
a9bf2eec | 403 | data = fwspec->iommu_priv; |
84672f19 | 404 | m4udev = data->dev; |
b17336c5 HZ |
405 | mtk_mapping = m4udev->archdata.iommu; |
406 | if (!mtk_mapping) { | |
407 | /* MTK iommu support 4GB iova address space. */ | |
408 | mtk_mapping = arm_iommu_create_mapping(&platform_bus_type, | |
409 | 0, 1ULL << 32); | |
84672f19 RM |
410 | if (IS_ERR(mtk_mapping)) |
411 | return PTR_ERR(mtk_mapping); | |
412 | ||
b17336c5 HZ |
413 | m4udev->archdata.iommu = mtk_mapping; |
414 | } | |
415 | ||
b17336c5 | 416 | return 0; |
b17336c5 HZ |
417 | } |
418 | ||
419 | static int mtk_iommu_add_device(struct device *dev) | |
420 | { | |
a9bf2eec | 421 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
f3e827d7 | 422 | struct dma_iommu_mapping *mtk_mapping; |
b17336c5 HZ |
423 | struct of_phandle_args iommu_spec; |
424 | struct of_phandle_iterator it; | |
6f66ea09 JR |
425 | struct mtk_iommu_data *data; |
426 | struct iommu_group *group; | |
b17336c5 HZ |
427 | int err; |
428 | ||
429 | of_for_each_phandle(&it, err, dev->of_node, "iommus", | |
c680e9ab | 430 | "#iommu-cells", -1) { |
b17336c5 HZ |
431 | int count = of_phandle_iterator_args(&it, iommu_spec.args, |
432 | MAX_PHANDLE_ARGS); | |
433 | iommu_spec.np = of_node_get(it.node); | |
434 | iommu_spec.args_count = count; | |
435 | ||
436 | mtk_iommu_create_mapping(dev, &iommu_spec); | |
da5d2748 JR |
437 | |
438 | /* dev->iommu_fwspec might have changed */ | |
439 | fwspec = dev_iommu_fwspec_get(dev); | |
440 | ||
b17336c5 HZ |
441 | of_node_put(iommu_spec.np); |
442 | } | |
443 | ||
a9bf2eec | 444 | if (!fwspec || fwspec->ops != &mtk_iommu_ops) |
84672f19 | 445 | return -ENODEV; /* Not a iommu client device */ |
b17336c5 | 446 | |
f3e827d7 YW |
447 | /* |
448 | * This is a short-term bodge because the ARM DMA code doesn't | |
449 | * understand multi-device groups, but we have to call into it | |
450 | * successfully (and not just rely on a normal IOMMU API attach | |
451 | * here) in order to set the correct DMA API ops on @dev. | |
452 | */ | |
453 | group = iommu_group_alloc(); | |
b17336c5 HZ |
454 | if (IS_ERR(group)) |
455 | return PTR_ERR(group); | |
456 | ||
f3e827d7 | 457 | err = iommu_group_add_device(group, dev); |
b17336c5 | 458 | iommu_group_put(group); |
f3e827d7 YW |
459 | if (err) |
460 | return err; | |
461 | ||
a9bf2eec | 462 | data = fwspec->iommu_priv; |
f3e827d7 YW |
463 | mtk_mapping = data->dev->archdata.iommu; |
464 | err = arm_iommu_attach_device(dev, mtk_mapping); | |
465 | if (err) { | |
466 | iommu_group_remove_device(dev); | |
467 | return err; | |
468 | } | |
469 | ||
a947a45f | 470 | return iommu_device_link(&data->iommu, dev); |
b17336c5 HZ |
471 | } |
472 | ||
473 | static void mtk_iommu_remove_device(struct device *dev) | |
474 | { | |
a9bf2eec | 475 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
6f66ea09 JR |
476 | struct mtk_iommu_data *data; |
477 | ||
a9bf2eec | 478 | if (!fwspec || fwspec->ops != &mtk_iommu_ops) |
b17336c5 HZ |
479 | return; |
480 | ||
a9bf2eec | 481 | data = fwspec->iommu_priv; |
6f66ea09 JR |
482 | iommu_device_unlink(&data->iommu, dev); |
483 | ||
b17336c5 | 484 | iommu_group_remove_device(dev); |
84672f19 | 485 | iommu_fwspec_free(dev); |
b17336c5 HZ |
486 | } |
487 | ||
b17336c5 HZ |
488 | static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) |
489 | { | |
490 | u32 regval; | |
491 | int ret; | |
492 | ||
493 | ret = clk_prepare_enable(data->bclk); | |
494 | if (ret) { | |
495 | dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); | |
496 | return ret; | |
497 | } | |
498 | ||
499 | regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2); | |
500 | writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); | |
501 | ||
502 | regval = F_INT_TRANSLATION_FAULT | | |
503 | F_INT_MAIN_MULTI_HIT_FAULT | | |
504 | F_INT_INVALID_PA_FAULT | | |
505 | F_INT_ENTRY_REPLACEMENT_FAULT | | |
506 | F_INT_TABLE_WALK_FAULT | | |
507 | F_INT_TLB_MISS_FAULT | | |
508 | F_INT_PFH_DMA_FIFO_OVERFLOW | | |
509 | F_INT_MISS_DMA_FIFO_OVERFLOW; | |
510 | writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); | |
511 | ||
512 | /* protect memory,hw will write here while translation fault */ | |
513 | writel_relaxed(data->protect_base, | |
514 | data->base + REG_MMU_IVRP_PADDR); | |
515 | ||
516 | writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM); | |
517 | ||
518 | if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, | |
519 | dev_name(data->dev), (void *)data)) { | |
520 | writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); | |
521 | clk_disable_unprepare(data->bclk); | |
522 | dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); | |
523 | return -ENODEV; | |
524 | } | |
525 | ||
526 | return 0; | |
527 | } | |
528 | ||
b65f5016 | 529 | static const struct iommu_ops mtk_iommu_ops = { |
b17336c5 HZ |
530 | .domain_alloc = mtk_iommu_domain_alloc, |
531 | .domain_free = mtk_iommu_domain_free, | |
532 | .attach_dev = mtk_iommu_attach_device, | |
533 | .detach_dev = mtk_iommu_detach_device, | |
534 | .map = mtk_iommu_map, | |
535 | .unmap = mtk_iommu_unmap, | |
b17336c5 HZ |
536 | .iova_to_phys = mtk_iommu_iova_to_phys, |
537 | .add_device = mtk_iommu_add_device, | |
538 | .remove_device = mtk_iommu_remove_device, | |
b17336c5 HZ |
539 | .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, |
540 | }; | |
541 | ||
542 | static const struct of_device_id mtk_iommu_of_ids[] = { | |
543 | { .compatible = "mediatek,mt2701-m4u", }, | |
544 | {} | |
545 | }; | |
546 | ||
547 | static const struct component_master_ops mtk_iommu_com_ops = { | |
548 | .bind = mtk_iommu_bind, | |
549 | .unbind = mtk_iommu_unbind, | |
550 | }; | |
551 | ||
552 | static int mtk_iommu_probe(struct platform_device *pdev) | |
553 | { | |
554 | struct mtk_iommu_data *data; | |
555 | struct device *dev = &pdev->dev; | |
556 | struct resource *res; | |
557 | struct component_match *match = NULL; | |
558 | struct of_phandle_args larb_spec; | |
559 | struct of_phandle_iterator it; | |
560 | void *protect; | |
561 | int larb_nr, ret, err; | |
562 | ||
563 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); | |
564 | if (!data) | |
565 | return -ENOMEM; | |
566 | ||
567 | data->dev = dev; | |
568 | ||
569 | /* Protect memory. HW will access here while translation fault.*/ | |
570 | protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, | |
571 | GFP_KERNEL | GFP_DMA); | |
572 | if (!protect) | |
573 | return -ENOMEM; | |
574 | data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); | |
575 | ||
576 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
577 | data->base = devm_ioremap_resource(dev, res); | |
578 | if (IS_ERR(data->base)) | |
579 | return PTR_ERR(data->base); | |
580 | ||
581 | data->irq = platform_get_irq(pdev, 0); | |
582 | if (data->irq < 0) | |
583 | return data->irq; | |
584 | ||
585 | data->bclk = devm_clk_get(dev, "bclk"); | |
586 | if (IS_ERR(data->bclk)) | |
587 | return PTR_ERR(data->bclk); | |
588 | ||
589 | larb_nr = 0; | |
590 | of_for_each_phandle(&it, err, dev->of_node, | |
591 | "mediatek,larbs", NULL, 0) { | |
592 | struct platform_device *plarbdev; | |
593 | int count = of_phandle_iterator_args(&it, larb_spec.args, | |
594 | MAX_PHANDLE_ARGS); | |
595 | ||
596 | if (count) | |
597 | continue; | |
598 | ||
599 | larb_spec.np = of_node_get(it.node); | |
600 | if (!of_device_is_available(larb_spec.np)) | |
601 | continue; | |
602 | ||
603 | plarbdev = of_find_device_by_node(larb_spec.np); | |
b17336c5 HZ |
604 | if (!plarbdev) { |
605 | plarbdev = of_platform_device_create( | |
606 | larb_spec.np, NULL, | |
607 | platform_bus_type.dev_root); | |
00c7c81f RK |
608 | if (!plarbdev) { |
609 | of_node_put(larb_spec.np); | |
b17336c5 | 610 | return -EPROBE_DEFER; |
00c7c81f | 611 | } |
b17336c5 HZ |
612 | } |
613 | ||
1ee9feb2 | 614 | data->larb_imu[larb_nr].dev = &plarbdev->dev; |
00c7c81f RK |
615 | component_match_add_release(dev, &match, release_of, |
616 | compare_of, larb_spec.np); | |
b17336c5 HZ |
617 | larb_nr++; |
618 | } | |
619 | ||
b17336c5 HZ |
620 | platform_set_drvdata(pdev, data); |
621 | ||
622 | ret = mtk_iommu_hw_init(data); | |
623 | if (ret) | |
624 | return ret; | |
625 | ||
6f66ea09 JR |
626 | ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, |
627 | dev_name(&pdev->dev)); | |
628 | if (ret) | |
629 | return ret; | |
630 | ||
631 | iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); | |
632 | ||
633 | ret = iommu_device_register(&data->iommu); | |
634 | if (ret) | |
635 | return ret; | |
636 | ||
b17336c5 HZ |
637 | if (!iommu_present(&platform_bus_type)) |
638 | bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); | |
639 | ||
640 | return component_master_add_with_match(dev, &mtk_iommu_com_ops, match); | |
641 | } | |
642 | ||
643 | static int mtk_iommu_remove(struct platform_device *pdev) | |
644 | { | |
645 | struct mtk_iommu_data *data = platform_get_drvdata(pdev); | |
646 | ||
6f66ea09 JR |
647 | iommu_device_sysfs_remove(&data->iommu); |
648 | iommu_device_unregister(&data->iommu); | |
649 | ||
b17336c5 HZ |
650 | if (iommu_present(&platform_bus_type)) |
651 | bus_set_iommu(&platform_bus_type, NULL); | |
652 | ||
653 | clk_disable_unprepare(data->bclk); | |
654 | devm_free_irq(&pdev->dev, data->irq, data); | |
655 | component_master_del(&pdev->dev, &mtk_iommu_com_ops); | |
656 | return 0; | |
657 | } | |
658 | ||
659 | static int __maybe_unused mtk_iommu_suspend(struct device *dev) | |
660 | { | |
661 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | |
662 | struct mtk_iommu_suspend_reg *reg = &data->reg; | |
663 | void __iomem *base = data->base; | |
664 | ||
665 | reg->standard_axi_mode = readl_relaxed(base + | |
666 | REG_MMU_STANDARD_AXI_MODE); | |
667 | reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM); | |
668 | reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); | |
669 | reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL); | |
670 | return 0; | |
671 | } | |
672 | ||
673 | static int __maybe_unused mtk_iommu_resume(struct device *dev) | |
674 | { | |
675 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | |
676 | struct mtk_iommu_suspend_reg *reg = &data->reg; | |
677 | void __iomem *base = data->base; | |
678 | ||
679 | writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR); | |
680 | writel_relaxed(reg->standard_axi_mode, | |
681 | base + REG_MMU_STANDARD_AXI_MODE); | |
682 | writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM); | |
683 | writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); | |
684 | writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL); | |
685 | writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR); | |
686 | return 0; | |
687 | } | |
688 | ||
131bc8eb | 689 | static const struct dev_pm_ops mtk_iommu_pm_ops = { |
b17336c5 HZ |
690 | SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) |
691 | }; | |
692 | ||
693 | static struct platform_driver mtk_iommu_driver = { | |
694 | .probe = mtk_iommu_probe, | |
695 | .remove = mtk_iommu_remove, | |
696 | .driver = { | |
395df08d | 697 | .name = "mtk-iommu-v1", |
b17336c5 HZ |
698 | .of_match_table = mtk_iommu_of_ids, |
699 | .pm = &mtk_iommu_pm_ops, | |
700 | } | |
701 | }; | |
702 | ||
703 | static int __init m4u_init(void) | |
704 | { | |
705 | return platform_driver_register(&mtk_iommu_driver); | |
706 | } | |
b17336c5 | 707 | subsys_initcall(m4u_init); |