Commit | Line | Data |
---|---|---|
0df4fabe YW |
1 | /* |
2 | * Copyright (c) 2015-2016 MediaTek Inc. | |
3 | * Author: Yong Wu <yong.wu@mediatek.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | */ | |
01e23c93 | 14 | #include <linux/bootmem.h> |
0df4fabe YW |
15 | #include <linux/bug.h> |
16 | #include <linux/clk.h> | |
17 | #include <linux/component.h> | |
18 | #include <linux/device.h> | |
19 | #include <linux/dma-iommu.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/io.h> | |
23 | #include <linux/iommu.h> | |
24 | #include <linux/iopoll.h> | |
25 | #include <linux/list.h> | |
26 | #include <linux/of_address.h> | |
27 | #include <linux/of_iommu.h> | |
28 | #include <linux/of_irq.h> | |
29 | #include <linux/of_platform.h> | |
30 | #include <linux/platform_device.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/spinlock.h> | |
33 | #include <asm/barrier.h> | |
0df4fabe YW |
34 | #include <soc/mediatek/smi.h> |
35 | ||
9ca340c9 | 36 | #include "mtk_iommu.h" |
0df4fabe YW |
37 | |
38 | #define REG_MMU_PT_BASE_ADDR 0x000 | |
39 | ||
40 | #define REG_MMU_INVALIDATE 0x020 | |
41 | #define F_ALL_INVLD 0x2 | |
42 | #define F_MMU_INV_RANGE 0x1 | |
43 | ||
44 | #define REG_MMU_INVLD_START_A 0x024 | |
45 | #define REG_MMU_INVLD_END_A 0x028 | |
46 | ||
47 | #define REG_MMU_INV_SEL 0x038 | |
48 | #define F_INVLD_EN0 BIT(0) | |
49 | #define F_INVLD_EN1 BIT(1) | |
50 | ||
51 | #define REG_MMU_STANDARD_AXI_MODE 0x048 | |
52 | #define REG_MMU_DCM_DIS 0x050 | |
53 | ||
54 | #define REG_MMU_CTRL_REG 0x110 | |
55 | #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) | |
e6dec923 YW |
56 | #define F_MMU_TF_PROTECT_SEL_SHIFT(data) \ |
57 | ((data)->m4u_plat == M4U_MT2712 ? 4 : 5) | |
58 | /* It's named by F_MMU_TF_PROT_SEL in mt2712. */ | |
59 | #define F_MMU_TF_PROTECT_SEL(prot, data) \ | |
60 | (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data)) | |
0df4fabe YW |
61 | |
62 | #define REG_MMU_IVRP_PADDR 0x114 | |
70ca608b | 63 | |
30e2fccf YW |
64 | #define REG_MMU_VLD_PA_RNG 0x118 |
65 | #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) | |
0df4fabe YW |
66 | |
67 | #define REG_MMU_INT_CONTROL0 0x120 | |
68 | #define F_L2_MULIT_HIT_EN BIT(0) | |
69 | #define F_TABLE_WALK_FAULT_INT_EN BIT(1) | |
70 | #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2) | |
71 | #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3) | |
72 | #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5) | |
73 | #define F_MISS_FIFO_ERR_INT_EN BIT(6) | |
74 | #define F_INT_CLR_BIT BIT(12) | |
75 | ||
76 | #define REG_MMU_INT_MAIN_CONTROL 0x124 | |
77 | #define F_INT_TRANSLATION_FAULT BIT(0) | |
78 | #define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) | |
79 | #define F_INT_INVALID_PA_FAULT BIT(2) | |
80 | #define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) | |
81 | #define F_INT_TLB_MISS_FAULT BIT(4) | |
82 | #define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5) | |
83 | #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6) | |
84 | ||
85 | #define REG_MMU_CPE_DONE 0x12C | |
86 | ||
87 | #define REG_MMU_FAULT_ST1 0x134 | |
88 | ||
89 | #define REG_MMU_FAULT_VA 0x13c | |
0df4fabe YW |
90 | #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) |
91 | #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) | |
92 | ||
93 | #define REG_MMU_INVLD_PA 0x140 | |
94 | #define REG_MMU_INT_ID 0x150 | |
95 | #define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) | |
96 | #define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) | |
97 | ||
98 | #define MTK_PROTECT_PA_ALIGN 128 | |
99 | ||
a9467d95 YW |
100 | /* |
101 | * Get the local arbiter ID and the portid within the larb arbiter | |
102 | * from mtk_m4u_id which is defined by MTK_M4U_ID. | |
103 | */ | |
e6dec923 | 104 | #define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf) |
a9467d95 YW |
105 | #define MTK_M4U_TO_PORT(id) ((id) & 0x1f) |
106 | ||
0df4fabe YW |
107 | struct mtk_iommu_domain { |
108 | spinlock_t pgtlock; /* lock for page table */ | |
109 | ||
110 | struct io_pgtable_cfg cfg; | |
111 | struct io_pgtable_ops *iop; | |
112 | ||
113 | struct iommu_domain domain; | |
114 | }; | |
115 | ||
0df4fabe YW |
116 | static struct iommu_ops mtk_iommu_ops; |
117 | ||
7c3a2ec0 YW |
118 | static LIST_HEAD(m4ulist); /* List all the M4U HWs */ |
119 | ||
120 | #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list) | |
121 | ||
122 | /* | |
123 | * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain | |
124 | * for the performance. | |
125 | * | |
126 | * Here always return the mtk_iommu_data of the first probed M4U where the | |
127 | * iommu domain information is recorded. | |
128 | */ | |
129 | static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void) | |
130 | { | |
131 | struct mtk_iommu_data *data; | |
132 | ||
133 | for_each_m4u(data) | |
134 | return data; | |
135 | ||
136 | return NULL; | |
137 | } | |
138 | ||
0df4fabe YW |
139 | static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) |
140 | { | |
141 | return container_of(dom, struct mtk_iommu_domain, domain); | |
142 | } | |
143 | ||
144 | static void mtk_iommu_tlb_flush_all(void *cookie) | |
145 | { | |
146 | struct mtk_iommu_data *data = cookie; | |
147 | ||
7c3a2ec0 YW |
148 | for_each_m4u(data) { |
149 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, | |
150 | data->base + REG_MMU_INV_SEL); | |
151 | writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); | |
152 | wmb(); /* Make sure the tlb flush all done */ | |
153 | } | |
0df4fabe YW |
154 | } |
155 | ||
156 | static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, | |
157 | size_t granule, bool leaf, | |
158 | void *cookie) | |
159 | { | |
160 | struct mtk_iommu_data *data = cookie; | |
161 | ||
7c3a2ec0 YW |
162 | for_each_m4u(data) { |
163 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, | |
164 | data->base + REG_MMU_INV_SEL); | |
0df4fabe | 165 | |
7c3a2ec0 YW |
166 | writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); |
167 | writel_relaxed(iova + size - 1, | |
168 | data->base + REG_MMU_INVLD_END_A); | |
169 | writel_relaxed(F_MMU_INV_RANGE, | |
170 | data->base + REG_MMU_INVALIDATE); | |
171 | data->tlb_flush_active = true; | |
172 | } | |
0df4fabe YW |
173 | } |
174 | ||
175 | static void mtk_iommu_tlb_sync(void *cookie) | |
176 | { | |
177 | struct mtk_iommu_data *data = cookie; | |
178 | int ret; | |
179 | u32 tmp; | |
180 | ||
7c3a2ec0 YW |
181 | for_each_m4u(data) { |
182 | /* Avoid timing out if there's nothing to wait for */ | |
183 | if (!data->tlb_flush_active) | |
184 | return; | |
98a8f63e | 185 | |
7c3a2ec0 YW |
186 | ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, |
187 | tmp, tmp != 0, 10, 100000); | |
188 | if (ret) { | |
189 | dev_warn(data->dev, | |
190 | "Partial TLB flush timed out, falling back to full flush\n"); | |
191 | mtk_iommu_tlb_flush_all(cookie); | |
192 | } | |
193 | /* Clear the CPE status */ | |
194 | writel_relaxed(0, data->base + REG_MMU_CPE_DONE); | |
195 | data->tlb_flush_active = false; | |
0df4fabe | 196 | } |
0df4fabe YW |
197 | } |
198 | ||
199 | static const struct iommu_gather_ops mtk_iommu_gather_ops = { | |
200 | .tlb_flush_all = mtk_iommu_tlb_flush_all, | |
201 | .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, | |
202 | .tlb_sync = mtk_iommu_tlb_sync, | |
203 | }; | |
204 | ||
205 | static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) | |
206 | { | |
207 | struct mtk_iommu_data *data = dev_id; | |
208 | struct mtk_iommu_domain *dom = data->m4u_dom; | |
209 | u32 int_state, regval, fault_iova, fault_pa; | |
210 | unsigned int fault_larb, fault_port; | |
211 | bool layer, write; | |
212 | ||
213 | /* Read error info from registers */ | |
214 | int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1); | |
215 | fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); | |
216 | layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; | |
217 | write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; | |
0df4fabe YW |
218 | fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); |
219 | regval = readl_relaxed(data->base + REG_MMU_INT_ID); | |
220 | fault_larb = F_MMU0_INT_ID_LARB_ID(regval); | |
221 | fault_port = F_MMU0_INT_ID_PORT_ID(regval); | |
222 | ||
223 | if (report_iommu_fault(&dom->domain, data->dev, fault_iova, | |
224 | write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { | |
225 | dev_err_ratelimited( | |
226 | data->dev, | |
227 | "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n", | |
228 | int_state, fault_iova, fault_pa, fault_larb, fault_port, | |
229 | layer, write ? "write" : "read"); | |
230 | } | |
231 | ||
232 | /* Interrupt clear */ | |
233 | regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0); | |
234 | regval |= F_INT_CLR_BIT; | |
235 | writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); | |
236 | ||
237 | mtk_iommu_tlb_flush_all(data); | |
238 | ||
239 | return IRQ_HANDLED; | |
240 | } | |
241 | ||
242 | static void mtk_iommu_config(struct mtk_iommu_data *data, | |
243 | struct device *dev, bool enable) | |
244 | { | |
0df4fabe YW |
245 | struct mtk_smi_larb_iommu *larb_mmu; |
246 | unsigned int larbid, portid; | |
58f0d1d5 RM |
247 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
248 | int i; | |
0df4fabe | 249 | |
58f0d1d5 RM |
250 | for (i = 0; i < fwspec->num_ids; ++i) { |
251 | larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); | |
252 | portid = MTK_M4U_TO_PORT(fwspec->ids[i]); | |
0df4fabe YW |
253 | larb_mmu = &data->smi_imu.larb_imu[larbid]; |
254 | ||
255 | dev_dbg(dev, "%s iommu port: %d\n", | |
256 | enable ? "enable" : "disable", portid); | |
257 | ||
258 | if (enable) | |
259 | larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); | |
260 | else | |
261 | larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); | |
262 | } | |
263 | } | |
264 | ||
4b00f5ac | 265 | static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) |
0df4fabe | 266 | { |
4b00f5ac | 267 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe YW |
268 | |
269 | spin_lock_init(&dom->pgtlock); | |
270 | ||
271 | dom->cfg = (struct io_pgtable_cfg) { | |
272 | .quirks = IO_PGTABLE_QUIRK_ARM_NS | | |
273 | IO_PGTABLE_QUIRK_NO_PERMS | | |
274 | IO_PGTABLE_QUIRK_TLBI_ON_MAP, | |
275 | .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, | |
276 | .ias = 32, | |
277 | .oas = 32, | |
278 | .tlb = &mtk_iommu_gather_ops, | |
279 | .iommu_dev = data->dev, | |
280 | }; | |
281 | ||
01e23c93 YW |
282 | if (data->enable_4GB) |
283 | dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB; | |
284 | ||
0df4fabe YW |
285 | dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); |
286 | if (!dom->iop) { | |
287 | dev_err(data->dev, "Failed to alloc io pgtable\n"); | |
288 | return -EINVAL; | |
289 | } | |
290 | ||
291 | /* Update our support page sizes bitmap */ | |
d16e0faa | 292 | dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; |
0df4fabe YW |
293 | return 0; |
294 | } | |
295 | ||
296 | static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) | |
297 | { | |
298 | struct mtk_iommu_domain *dom; | |
299 | ||
300 | if (type != IOMMU_DOMAIN_DMA) | |
301 | return NULL; | |
302 | ||
303 | dom = kzalloc(sizeof(*dom), GFP_KERNEL); | |
304 | if (!dom) | |
305 | return NULL; | |
306 | ||
4b00f5ac YW |
307 | if (iommu_get_dma_cookie(&dom->domain)) |
308 | goto free_dom; | |
309 | ||
310 | if (mtk_iommu_domain_finalise(dom)) | |
311 | goto put_dma_cookie; | |
0df4fabe YW |
312 | |
313 | dom->domain.geometry.aperture_start = 0; | |
314 | dom->domain.geometry.aperture_end = DMA_BIT_MASK(32); | |
315 | dom->domain.geometry.force_aperture = true; | |
316 | ||
317 | return &dom->domain; | |
4b00f5ac YW |
318 | |
319 | put_dma_cookie: | |
320 | iommu_put_dma_cookie(&dom->domain); | |
321 | free_dom: | |
322 | kfree(dom); | |
323 | return NULL; | |
0df4fabe YW |
324 | } |
325 | ||
326 | static void mtk_iommu_domain_free(struct iommu_domain *domain) | |
327 | { | |
4b00f5ac YW |
328 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
329 | ||
330 | free_io_pgtable_ops(dom->iop); | |
0df4fabe YW |
331 | iommu_put_dma_cookie(domain); |
332 | kfree(to_mtk_domain(domain)); | |
333 | } | |
334 | ||
335 | static int mtk_iommu_attach_device(struct iommu_domain *domain, | |
336 | struct device *dev) | |
337 | { | |
338 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
4b00f5ac | 339 | struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv; |
0df4fabe | 340 | |
4b00f5ac | 341 | if (!data) |
0df4fabe YW |
342 | return -ENODEV; |
343 | ||
4b00f5ac | 344 | /* Update the pgtable base address register of the M4U HW */ |
0df4fabe YW |
345 | if (!data->m4u_dom) { |
346 | data->m4u_dom = dom; | |
4b00f5ac YW |
347 | writel(dom->cfg.arm_v7s_cfg.ttbr[0], |
348 | data->base + REG_MMU_PT_BASE_ADDR); | |
7c3a2ec0 YW |
349 | } |
350 | ||
4b00f5ac | 351 | mtk_iommu_config(data, dev, true); |
0df4fabe YW |
352 | return 0; |
353 | } | |
354 | ||
355 | static void mtk_iommu_detach_device(struct iommu_domain *domain, | |
356 | struct device *dev) | |
357 | { | |
58f0d1d5 | 358 | struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv; |
0df4fabe | 359 | |
58f0d1d5 | 360 | if (!data) |
0df4fabe YW |
361 | return; |
362 | ||
0df4fabe YW |
363 | mtk_iommu_config(data, dev, false); |
364 | } | |
365 | ||
366 | static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
367 | phys_addr_t paddr, size_t size, int prot) | |
368 | { | |
369 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
370 | unsigned long flags; | |
371 | int ret; | |
372 | ||
373 | spin_lock_irqsave(&dom->pgtlock, flags); | |
1ff9b17c YW |
374 | ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32), |
375 | size, prot); | |
0df4fabe YW |
376 | spin_unlock_irqrestore(&dom->pgtlock, flags); |
377 | ||
378 | return ret; | |
379 | } | |
380 | ||
381 | static size_t mtk_iommu_unmap(struct iommu_domain *domain, | |
382 | unsigned long iova, size_t size) | |
383 | { | |
384 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
385 | unsigned long flags; | |
386 | size_t unmapsz; | |
387 | ||
388 | spin_lock_irqsave(&dom->pgtlock, flags); | |
389 | unmapsz = dom->iop->unmap(dom->iop, iova, size); | |
390 | spin_unlock_irqrestore(&dom->pgtlock, flags); | |
391 | ||
392 | return unmapsz; | |
393 | } | |
394 | ||
4d689b61 RM |
395 | static void mtk_iommu_iotlb_sync(struct iommu_domain *domain) |
396 | { | |
397 | mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); | |
398 | } | |
399 | ||
0df4fabe YW |
400 | static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, |
401 | dma_addr_t iova) | |
402 | { | |
403 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
30e2fccf | 404 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe YW |
405 | unsigned long flags; |
406 | phys_addr_t pa; | |
407 | ||
408 | spin_lock_irqsave(&dom->pgtlock, flags); | |
409 | pa = dom->iop->iova_to_phys(dom->iop, iova); | |
410 | spin_unlock_irqrestore(&dom->pgtlock, flags); | |
411 | ||
30e2fccf | 412 | if (data->enable_4GB) |
41939980 | 413 | pa |= BIT_ULL(32); |
30e2fccf | 414 | |
0df4fabe YW |
415 | return pa; |
416 | } | |
417 | ||
418 | static int mtk_iommu_add_device(struct device *dev) | |
419 | { | |
b16c0170 | 420 | struct mtk_iommu_data *data; |
0df4fabe YW |
421 | struct iommu_group *group; |
422 | ||
58f0d1d5 RM |
423 | if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) |
424 | return -ENODEV; /* Not a iommu client device */ | |
0df4fabe | 425 | |
b16c0170 JR |
426 | data = dev->iommu_fwspec->iommu_priv; |
427 | iommu_device_link(&data->iommu, dev); | |
428 | ||
0df4fabe YW |
429 | group = iommu_group_get_for_dev(dev); |
430 | if (IS_ERR(group)) | |
431 | return PTR_ERR(group); | |
432 | ||
433 | iommu_group_put(group); | |
434 | return 0; | |
435 | } | |
436 | ||
437 | static void mtk_iommu_remove_device(struct device *dev) | |
438 | { | |
b16c0170 JR |
439 | struct mtk_iommu_data *data; |
440 | ||
58f0d1d5 | 441 | if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) |
0df4fabe YW |
442 | return; |
443 | ||
b16c0170 JR |
444 | data = dev->iommu_fwspec->iommu_priv; |
445 | iommu_device_unlink(&data->iommu, dev); | |
446 | ||
0df4fabe | 447 | iommu_group_remove_device(dev); |
58f0d1d5 | 448 | iommu_fwspec_free(dev); |
0df4fabe YW |
449 | } |
450 | ||
451 | static struct iommu_group *mtk_iommu_device_group(struct device *dev) | |
452 | { | |
7c3a2ec0 | 453 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe | 454 | |
58f0d1d5 | 455 | if (!data) |
0df4fabe YW |
456 | return ERR_PTR(-ENODEV); |
457 | ||
458 | /* All the client devices are in the same m4u iommu-group */ | |
0df4fabe YW |
459 | if (!data->m4u_group) { |
460 | data->m4u_group = iommu_group_alloc(); | |
461 | if (IS_ERR(data->m4u_group)) | |
462 | dev_err(dev, "Failed to allocate M4U IOMMU group\n"); | |
3a8d40b6 RM |
463 | } else { |
464 | iommu_group_ref_get(data->m4u_group); | |
0df4fabe YW |
465 | } |
466 | return data->m4u_group; | |
467 | } | |
468 | ||
469 | static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) | |
470 | { | |
0df4fabe YW |
471 | struct platform_device *m4updev; |
472 | ||
473 | if (args->args_count != 1) { | |
474 | dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", | |
475 | args->args_count); | |
476 | return -EINVAL; | |
477 | } | |
478 | ||
58f0d1d5 | 479 | if (!dev->iommu_fwspec->iommu_priv) { |
0df4fabe YW |
480 | /* Get the m4u device */ |
481 | m4updev = of_find_device_by_node(args->np); | |
0df4fabe YW |
482 | if (WARN_ON(!m4updev)) |
483 | return -EINVAL; | |
484 | ||
58f0d1d5 | 485 | dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev); |
0df4fabe YW |
486 | } |
487 | ||
58f0d1d5 | 488 | return iommu_fwspec_add_ids(dev, args->args, 1); |
0df4fabe YW |
489 | } |
490 | ||
491 | static struct iommu_ops mtk_iommu_ops = { | |
492 | .domain_alloc = mtk_iommu_domain_alloc, | |
493 | .domain_free = mtk_iommu_domain_free, | |
494 | .attach_dev = mtk_iommu_attach_device, | |
495 | .detach_dev = mtk_iommu_detach_device, | |
496 | .map = mtk_iommu_map, | |
497 | .unmap = mtk_iommu_unmap, | |
4d689b61 RM |
498 | .flush_iotlb_all = mtk_iommu_iotlb_sync, |
499 | .iotlb_sync = mtk_iommu_iotlb_sync, | |
0df4fabe YW |
500 | .iova_to_phys = mtk_iommu_iova_to_phys, |
501 | .add_device = mtk_iommu_add_device, | |
502 | .remove_device = mtk_iommu_remove_device, | |
503 | .device_group = mtk_iommu_device_group, | |
504 | .of_xlate = mtk_iommu_of_xlate, | |
505 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, | |
506 | }; | |
507 | ||
508 | static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) | |
509 | { | |
510 | u32 regval; | |
511 | int ret; | |
512 | ||
513 | ret = clk_prepare_enable(data->bclk); | |
514 | if (ret) { | |
515 | dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); | |
516 | return ret; | |
517 | } | |
518 | ||
e6dec923 YW |
519 | regval = F_MMU_TF_PROTECT_SEL(2, data); |
520 | if (data->m4u_plat == M4U_MT8173) | |
521 | regval |= F_MMU_PREFETCH_RT_REPLACE_MOD; | |
0df4fabe YW |
522 | writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); |
523 | ||
524 | regval = F_L2_MULIT_HIT_EN | | |
525 | F_TABLE_WALK_FAULT_INT_EN | | |
526 | F_PREETCH_FIFO_OVERFLOW_INT_EN | | |
527 | F_MISS_FIFO_OVERFLOW_INT_EN | | |
528 | F_PREFETCH_FIFO_ERR_INT_EN | | |
529 | F_MISS_FIFO_ERR_INT_EN; | |
530 | writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); | |
531 | ||
532 | regval = F_INT_TRANSLATION_FAULT | | |
533 | F_INT_MAIN_MULTI_HIT_FAULT | | |
534 | F_INT_INVALID_PA_FAULT | | |
535 | F_INT_ENTRY_REPLACEMENT_FAULT | | |
536 | F_INT_TLB_MISS_FAULT | | |
537 | F_INT_MISS_TRANSACTION_FIFO_FAULT | | |
538 | F_INT_PRETETCH_TRANSATION_FIFO_FAULT; | |
539 | writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); | |
540 | ||
70ca608b YW |
541 | if (data->m4u_plat == M4U_MT8173) |
542 | regval = (data->protect_base >> 1) | (data->enable_4GB << 31); | |
543 | else | |
544 | regval = lower_32_bits(data->protect_base) | | |
545 | upper_32_bits(data->protect_base); | |
546 | writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); | |
547 | ||
4f1c8ea1 | 548 | if (data->enable_4GB && data->m4u_plat != M4U_MT8173) { |
30e2fccf YW |
549 | /* |
550 | * If 4GB mode is enabled, the validate PA range is from | |
551 | * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. | |
552 | */ | |
553 | regval = F_MMU_VLD_PA_RNG(7, 4); | |
554 | writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG); | |
555 | } | |
0df4fabe | 556 | writel_relaxed(0, data->base + REG_MMU_DCM_DIS); |
e6dec923 YW |
557 | |
558 | /* It's MISC control register whose default value is ok except mt8173.*/ | |
559 | if (data->m4u_plat == M4U_MT8173) | |
560 | writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); | |
0df4fabe YW |
561 | |
562 | if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, | |
563 | dev_name(data->dev), (void *)data)) { | |
564 | writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); | |
565 | clk_disable_unprepare(data->bclk); | |
566 | dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); | |
567 | return -ENODEV; | |
568 | } | |
569 | ||
570 | return 0; | |
571 | } | |
572 | ||
0df4fabe YW |
573 | static const struct component_master_ops mtk_iommu_com_ops = { |
574 | .bind = mtk_iommu_bind, | |
575 | .unbind = mtk_iommu_unbind, | |
576 | }; | |
577 | ||
578 | static int mtk_iommu_probe(struct platform_device *pdev) | |
579 | { | |
580 | struct mtk_iommu_data *data; | |
581 | struct device *dev = &pdev->dev; | |
582 | struct resource *res; | |
b16c0170 | 583 | resource_size_t ioaddr; |
0df4fabe YW |
584 | struct component_match *match = NULL; |
585 | void *protect; | |
0b6c0ad3 | 586 | int i, larb_nr, ret; |
0df4fabe YW |
587 | |
588 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); | |
589 | if (!data) | |
590 | return -ENOMEM; | |
591 | data->dev = dev; | |
e6dec923 | 592 | data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev); |
0df4fabe YW |
593 | |
594 | /* Protect memory. HW will access here while translation fault.*/ | |
595 | protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); | |
596 | if (!protect) | |
597 | return -ENOMEM; | |
598 | data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); | |
599 | ||
01e23c93 | 600 | /* Whether the current dram is over 4GB */ |
41939980 | 601 | data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT)); |
01e23c93 | 602 | |
0df4fabe YW |
603 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
604 | data->base = devm_ioremap_resource(dev, res); | |
605 | if (IS_ERR(data->base)) | |
606 | return PTR_ERR(data->base); | |
b16c0170 | 607 | ioaddr = res->start; |
0df4fabe YW |
608 | |
609 | data->irq = platform_get_irq(pdev, 0); | |
610 | if (data->irq < 0) | |
611 | return data->irq; | |
612 | ||
613 | data->bclk = devm_clk_get(dev, "bclk"); | |
614 | if (IS_ERR(data->bclk)) | |
615 | return PTR_ERR(data->bclk); | |
616 | ||
617 | larb_nr = of_count_phandle_with_args(dev->of_node, | |
618 | "mediatek,larbs", NULL); | |
619 | if (larb_nr < 0) | |
620 | return larb_nr; | |
621 | data->smi_imu.larb_nr = larb_nr; | |
622 | ||
623 | for (i = 0; i < larb_nr; i++) { | |
624 | struct device_node *larbnode; | |
625 | struct platform_device *plarbdev; | |
e6dec923 | 626 | u32 id; |
0df4fabe YW |
627 | |
628 | larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); | |
629 | if (!larbnode) | |
630 | return -EINVAL; | |
631 | ||
632 | if (!of_device_is_available(larbnode)) | |
633 | continue; | |
634 | ||
e6dec923 YW |
635 | ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); |
636 | if (ret)/* The id is consecutive if there is no this property */ | |
637 | id = i; | |
638 | ||
0df4fabe | 639 | plarbdev = of_find_device_by_node(larbnode); |
e6dec923 YW |
640 | if (!plarbdev) |
641 | return -EPROBE_DEFER; | |
642 | data->smi_imu.larb_imu[id].dev = &plarbdev->dev; | |
0df4fabe | 643 | |
00c7c81f RK |
644 | component_match_add_release(dev, &match, release_of, |
645 | compare_of, larbnode); | |
0df4fabe YW |
646 | } |
647 | ||
648 | platform_set_drvdata(pdev, data); | |
649 | ||
650 | ret = mtk_iommu_hw_init(data); | |
651 | if (ret) | |
652 | return ret; | |
653 | ||
b16c0170 JR |
654 | ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, |
655 | "mtk-iommu.%pa", &ioaddr); | |
656 | if (ret) | |
657 | return ret; | |
658 | ||
659 | iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); | |
660 | iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode); | |
661 | ||
662 | ret = iommu_device_register(&data->iommu); | |
663 | if (ret) | |
664 | return ret; | |
665 | ||
7c3a2ec0 YW |
666 | list_add_tail(&data->list, &m4ulist); |
667 | ||
0df4fabe YW |
668 | if (!iommu_present(&platform_bus_type)) |
669 | bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); | |
670 | ||
671 | return component_master_add_with_match(dev, &mtk_iommu_com_ops, match); | |
672 | } | |
673 | ||
674 | static int mtk_iommu_remove(struct platform_device *pdev) | |
675 | { | |
676 | struct mtk_iommu_data *data = platform_get_drvdata(pdev); | |
677 | ||
b16c0170 JR |
678 | iommu_device_sysfs_remove(&data->iommu); |
679 | iommu_device_unregister(&data->iommu); | |
680 | ||
0df4fabe YW |
681 | if (iommu_present(&platform_bus_type)) |
682 | bus_set_iommu(&platform_bus_type, NULL); | |
683 | ||
0df4fabe YW |
684 | clk_disable_unprepare(data->bclk); |
685 | devm_free_irq(&pdev->dev, data->irq, data); | |
686 | component_master_del(&pdev->dev, &mtk_iommu_com_ops); | |
687 | return 0; | |
688 | } | |
689 | ||
fd99f796 | 690 | static int __maybe_unused mtk_iommu_suspend(struct device *dev) |
0df4fabe YW |
691 | { |
692 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | |
693 | struct mtk_iommu_suspend_reg *reg = &data->reg; | |
694 | void __iomem *base = data->base; | |
695 | ||
696 | reg->standard_axi_mode = readl_relaxed(base + | |
697 | REG_MMU_STANDARD_AXI_MODE); | |
698 | reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); | |
699 | reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); | |
700 | reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); | |
701 | reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); | |
70ca608b | 702 | reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); |
6254b64f | 703 | clk_disable_unprepare(data->bclk); |
0df4fabe YW |
704 | return 0; |
705 | } | |
706 | ||
fd99f796 | 707 | static int __maybe_unused mtk_iommu_resume(struct device *dev) |
0df4fabe YW |
708 | { |
709 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | |
710 | struct mtk_iommu_suspend_reg *reg = &data->reg; | |
711 | void __iomem *base = data->base; | |
6254b64f | 712 | int ret; |
0df4fabe | 713 | |
6254b64f YW |
714 | ret = clk_prepare_enable(data->bclk); |
715 | if (ret) { | |
716 | dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret); | |
717 | return ret; | |
718 | } | |
0df4fabe YW |
719 | writel_relaxed(reg->standard_axi_mode, |
720 | base + REG_MMU_STANDARD_AXI_MODE); | |
721 | writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); | |
722 | writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); | |
723 | writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); | |
724 | writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); | |
70ca608b | 725 | writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); |
e6dec923 YW |
726 | if (data->m4u_dom) |
727 | writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], | |
728 | base + REG_MMU_PT_BASE_ADDR); | |
0df4fabe YW |
729 | return 0; |
730 | } | |
731 | ||
e6dec923 | 732 | static const struct dev_pm_ops mtk_iommu_pm_ops = { |
6254b64f | 733 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) |
0df4fabe YW |
734 | }; |
735 | ||
736 | static const struct of_device_id mtk_iommu_of_ids[] = { | |
e6dec923 YW |
737 | { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712}, |
738 | { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173}, | |
0df4fabe YW |
739 | {} |
740 | }; | |
741 | ||
742 | static struct platform_driver mtk_iommu_driver = { | |
743 | .probe = mtk_iommu_probe, | |
744 | .remove = mtk_iommu_remove, | |
745 | .driver = { | |
746 | .name = "mtk-iommu", | |
e6dec923 | 747 | .of_match_table = of_match_ptr(mtk_iommu_of_ids), |
0df4fabe YW |
748 | .pm = &mtk_iommu_pm_ops, |
749 | } | |
750 | }; | |
751 | ||
e6dec923 | 752 | static int __init mtk_iommu_init(void) |
0df4fabe YW |
753 | { |
754 | int ret; | |
0df4fabe YW |
755 | |
756 | ret = platform_driver_register(&mtk_iommu_driver); | |
e6dec923 YW |
757 | if (ret != 0) |
758 | pr_err("Failed to register MTK IOMMU driver\n"); | |
0df4fabe | 759 | |
e6dec923 | 760 | return ret; |
0df4fabe YW |
761 | } |
762 | ||
e6dec923 | 763 | subsys_initcall(mtk_iommu_init) |