Commit | Line | Data |
---|---|---|
1802d0be | 1 | // SPDX-License-Identifier: GPL-2.0-only |
0df4fabe YW |
2 | /* |
3 | * Copyright (c) 2015-2016 MediaTek Inc. | |
4 | * Author: Yong Wu <yong.wu@mediatek.com> | |
0df4fabe | 5 | */ |
57c8a661 | 6 | #include <linux/memblock.h> |
0df4fabe YW |
7 | #include <linux/bug.h> |
8 | #include <linux/clk.h> | |
9 | #include <linux/component.h> | |
10 | #include <linux/device.h> | |
11 | #include <linux/dma-iommu.h> | |
12 | #include <linux/err.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/io.h> | |
15 | #include <linux/iommu.h> | |
16 | #include <linux/iopoll.h> | |
17 | #include <linux/list.h> | |
18 | #include <linux/of_address.h> | |
19 | #include <linux/of_iommu.h> | |
20 | #include <linux/of_irq.h> | |
21 | #include <linux/of_platform.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <asm/barrier.h> | |
0df4fabe YW |
26 | #include <soc/mediatek/smi.h> |
27 | ||
9ca340c9 | 28 | #include "mtk_iommu.h" |
0df4fabe YW |
29 | |
30 | #define REG_MMU_PT_BASE_ADDR 0x000 | |
31 | ||
32 | #define REG_MMU_INVALIDATE 0x020 | |
33 | #define F_ALL_INVLD 0x2 | |
34 | #define F_MMU_INV_RANGE 0x1 | |
35 | ||
36 | #define REG_MMU_INVLD_START_A 0x024 | |
37 | #define REG_MMU_INVLD_END_A 0x028 | |
38 | ||
39 | #define REG_MMU_INV_SEL 0x038 | |
40 | #define F_INVLD_EN0 BIT(0) | |
41 | #define F_INVLD_EN1 BIT(1) | |
42 | ||
43 | #define REG_MMU_STANDARD_AXI_MODE 0x048 | |
44 | #define REG_MMU_DCM_DIS 0x050 | |
45 | ||
46 | #define REG_MMU_CTRL_REG 0x110 | |
47 | #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) | |
e6dec923 YW |
48 | #define F_MMU_TF_PROTECT_SEL_SHIFT(data) \ |
49 | ((data)->m4u_plat == M4U_MT2712 ? 4 : 5) | |
50 | /* It's named by F_MMU_TF_PROT_SEL in mt2712. */ | |
51 | #define F_MMU_TF_PROTECT_SEL(prot, data) \ | |
52 | (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data)) | |
0df4fabe YW |
53 | |
54 | #define REG_MMU_IVRP_PADDR 0x114 | |
70ca608b | 55 | |
30e2fccf YW |
56 | #define REG_MMU_VLD_PA_RNG 0x118 |
57 | #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) | |
0df4fabe YW |
58 | |
59 | #define REG_MMU_INT_CONTROL0 0x120 | |
60 | #define F_L2_MULIT_HIT_EN BIT(0) | |
61 | #define F_TABLE_WALK_FAULT_INT_EN BIT(1) | |
62 | #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2) | |
63 | #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3) | |
64 | #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5) | |
65 | #define F_MISS_FIFO_ERR_INT_EN BIT(6) | |
66 | #define F_INT_CLR_BIT BIT(12) | |
67 | ||
68 | #define REG_MMU_INT_MAIN_CONTROL 0x124 | |
69 | #define F_INT_TRANSLATION_FAULT BIT(0) | |
70 | #define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) | |
71 | #define F_INT_INVALID_PA_FAULT BIT(2) | |
72 | #define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) | |
73 | #define F_INT_TLB_MISS_FAULT BIT(4) | |
74 | #define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5) | |
75 | #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6) | |
76 | ||
77 | #define REG_MMU_CPE_DONE 0x12C | |
78 | ||
79 | #define REG_MMU_FAULT_ST1 0x134 | |
80 | ||
81 | #define REG_MMU_FAULT_VA 0x13c | |
0df4fabe YW |
82 | #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) |
83 | #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) | |
84 | ||
85 | #define REG_MMU_INVLD_PA 0x140 | |
86 | #define REG_MMU_INT_ID 0x150 | |
87 | #define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) | |
88 | #define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) | |
89 | ||
90 | #define MTK_PROTECT_PA_ALIGN 128 | |
91 | ||
a9467d95 YW |
92 | /* |
93 | * Get the local arbiter ID and the portid within the larb arbiter | |
94 | * from mtk_m4u_id which is defined by MTK_M4U_ID. | |
95 | */ | |
e6dec923 | 96 | #define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf) |
a9467d95 YW |
97 | #define MTK_M4U_TO_PORT(id) ((id) & 0x1f) |
98 | ||
0df4fabe YW |
99 | struct mtk_iommu_domain { |
100 | spinlock_t pgtlock; /* lock for page table */ | |
101 | ||
102 | struct io_pgtable_cfg cfg; | |
103 | struct io_pgtable_ops *iop; | |
104 | ||
105 | struct iommu_domain domain; | |
106 | }; | |
107 | ||
b65f5016 | 108 | static const struct iommu_ops mtk_iommu_ops; |
0df4fabe | 109 | |
7c3a2ec0 YW |
110 | static LIST_HEAD(m4ulist); /* List all the M4U HWs */ |
111 | ||
112 | #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list) | |
113 | ||
114 | /* | |
115 | * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain | |
116 | * for the performance. | |
117 | * | |
118 | * Here always return the mtk_iommu_data of the first probed M4U where the | |
119 | * iommu domain information is recorded. | |
120 | */ | |
121 | static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void) | |
122 | { | |
123 | struct mtk_iommu_data *data; | |
124 | ||
125 | for_each_m4u(data) | |
126 | return data; | |
127 | ||
128 | return NULL; | |
129 | } | |
130 | ||
0df4fabe YW |
131 | static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) |
132 | { | |
133 | return container_of(dom, struct mtk_iommu_domain, domain); | |
134 | } | |
135 | ||
136 | static void mtk_iommu_tlb_flush_all(void *cookie) | |
137 | { | |
138 | struct mtk_iommu_data *data = cookie; | |
139 | ||
7c3a2ec0 YW |
140 | for_each_m4u(data) { |
141 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, | |
142 | data->base + REG_MMU_INV_SEL); | |
143 | writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); | |
144 | wmb(); /* Make sure the tlb flush all done */ | |
145 | } | |
0df4fabe YW |
146 | } |
147 | ||
148 | static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, | |
149 | size_t granule, bool leaf, | |
150 | void *cookie) | |
151 | { | |
152 | struct mtk_iommu_data *data = cookie; | |
153 | ||
7c3a2ec0 YW |
154 | for_each_m4u(data) { |
155 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, | |
156 | data->base + REG_MMU_INV_SEL); | |
0df4fabe | 157 | |
7c3a2ec0 YW |
158 | writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); |
159 | writel_relaxed(iova + size - 1, | |
160 | data->base + REG_MMU_INVLD_END_A); | |
161 | writel_relaxed(F_MMU_INV_RANGE, | |
162 | data->base + REG_MMU_INVALIDATE); | |
163 | data->tlb_flush_active = true; | |
164 | } | |
0df4fabe YW |
165 | } |
166 | ||
167 | static void mtk_iommu_tlb_sync(void *cookie) | |
168 | { | |
169 | struct mtk_iommu_data *data = cookie; | |
170 | int ret; | |
171 | u32 tmp; | |
172 | ||
7c3a2ec0 YW |
173 | for_each_m4u(data) { |
174 | /* Avoid timing out if there's nothing to wait for */ | |
175 | if (!data->tlb_flush_active) | |
176 | return; | |
98a8f63e | 177 | |
7c3a2ec0 YW |
178 | ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, |
179 | tmp, tmp != 0, 10, 100000); | |
180 | if (ret) { | |
181 | dev_warn(data->dev, | |
182 | "Partial TLB flush timed out, falling back to full flush\n"); | |
183 | mtk_iommu_tlb_flush_all(cookie); | |
184 | } | |
185 | /* Clear the CPE status */ | |
186 | writel_relaxed(0, data->base + REG_MMU_CPE_DONE); | |
187 | data->tlb_flush_active = false; | |
0df4fabe | 188 | } |
0df4fabe YW |
189 | } |
190 | ||
191 | static const struct iommu_gather_ops mtk_iommu_gather_ops = { | |
192 | .tlb_flush_all = mtk_iommu_tlb_flush_all, | |
193 | .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, | |
194 | .tlb_sync = mtk_iommu_tlb_sync, | |
195 | }; | |
196 | ||
197 | static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) | |
198 | { | |
199 | struct mtk_iommu_data *data = dev_id; | |
200 | struct mtk_iommu_domain *dom = data->m4u_dom; | |
201 | u32 int_state, regval, fault_iova, fault_pa; | |
202 | unsigned int fault_larb, fault_port; | |
203 | bool layer, write; | |
204 | ||
205 | /* Read error info from registers */ | |
206 | int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1); | |
207 | fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); | |
208 | layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; | |
209 | write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; | |
0df4fabe YW |
210 | fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); |
211 | regval = readl_relaxed(data->base + REG_MMU_INT_ID); | |
212 | fault_larb = F_MMU0_INT_ID_LARB_ID(regval); | |
213 | fault_port = F_MMU0_INT_ID_PORT_ID(regval); | |
214 | ||
215 | if (report_iommu_fault(&dom->domain, data->dev, fault_iova, | |
216 | write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { | |
217 | dev_err_ratelimited( | |
218 | data->dev, | |
219 | "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n", | |
220 | int_state, fault_iova, fault_pa, fault_larb, fault_port, | |
221 | layer, write ? "write" : "read"); | |
222 | } | |
223 | ||
224 | /* Interrupt clear */ | |
225 | regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0); | |
226 | regval |= F_INT_CLR_BIT; | |
227 | writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); | |
228 | ||
229 | mtk_iommu_tlb_flush_all(data); | |
230 | ||
231 | return IRQ_HANDLED; | |
232 | } | |
233 | ||
234 | static void mtk_iommu_config(struct mtk_iommu_data *data, | |
235 | struct device *dev, bool enable) | |
236 | { | |
0df4fabe YW |
237 | struct mtk_smi_larb_iommu *larb_mmu; |
238 | unsigned int larbid, portid; | |
a9bf2eec | 239 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
58f0d1d5 | 240 | int i; |
0df4fabe | 241 | |
58f0d1d5 RM |
242 | for (i = 0; i < fwspec->num_ids; ++i) { |
243 | larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); | |
244 | portid = MTK_M4U_TO_PORT(fwspec->ids[i]); | |
0df4fabe YW |
245 | larb_mmu = &data->smi_imu.larb_imu[larbid]; |
246 | ||
247 | dev_dbg(dev, "%s iommu port: %d\n", | |
248 | enable ? "enable" : "disable", portid); | |
249 | ||
250 | if (enable) | |
251 | larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); | |
252 | else | |
253 | larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); | |
254 | } | |
255 | } | |
256 | ||
4b00f5ac | 257 | static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) |
0df4fabe | 258 | { |
4b00f5ac | 259 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe YW |
260 | |
261 | spin_lock_init(&dom->pgtlock); | |
262 | ||
263 | dom->cfg = (struct io_pgtable_cfg) { | |
264 | .quirks = IO_PGTABLE_QUIRK_ARM_NS | | |
265 | IO_PGTABLE_QUIRK_NO_PERMS | | |
266 | IO_PGTABLE_QUIRK_TLBI_ON_MAP, | |
267 | .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, | |
268 | .ias = 32, | |
269 | .oas = 32, | |
270 | .tlb = &mtk_iommu_gather_ops, | |
271 | .iommu_dev = data->dev, | |
272 | }; | |
273 | ||
01e23c93 YW |
274 | if (data->enable_4GB) |
275 | dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB; | |
276 | ||
0df4fabe YW |
277 | dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); |
278 | if (!dom->iop) { | |
279 | dev_err(data->dev, "Failed to alloc io pgtable\n"); | |
280 | return -EINVAL; | |
281 | } | |
282 | ||
283 | /* Update our support page sizes bitmap */ | |
d16e0faa | 284 | dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; |
0df4fabe YW |
285 | return 0; |
286 | } | |
287 | ||
288 | static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) | |
289 | { | |
290 | struct mtk_iommu_domain *dom; | |
291 | ||
292 | if (type != IOMMU_DOMAIN_DMA) | |
293 | return NULL; | |
294 | ||
295 | dom = kzalloc(sizeof(*dom), GFP_KERNEL); | |
296 | if (!dom) | |
297 | return NULL; | |
298 | ||
4b00f5ac YW |
299 | if (iommu_get_dma_cookie(&dom->domain)) |
300 | goto free_dom; | |
301 | ||
302 | if (mtk_iommu_domain_finalise(dom)) | |
303 | goto put_dma_cookie; | |
0df4fabe YW |
304 | |
305 | dom->domain.geometry.aperture_start = 0; | |
306 | dom->domain.geometry.aperture_end = DMA_BIT_MASK(32); | |
307 | dom->domain.geometry.force_aperture = true; | |
308 | ||
309 | return &dom->domain; | |
4b00f5ac YW |
310 | |
311 | put_dma_cookie: | |
312 | iommu_put_dma_cookie(&dom->domain); | |
313 | free_dom: | |
314 | kfree(dom); | |
315 | return NULL; | |
0df4fabe YW |
316 | } |
317 | ||
318 | static void mtk_iommu_domain_free(struct iommu_domain *domain) | |
319 | { | |
4b00f5ac YW |
320 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
321 | ||
322 | free_io_pgtable_ops(dom->iop); | |
0df4fabe YW |
323 | iommu_put_dma_cookie(domain); |
324 | kfree(to_mtk_domain(domain)); | |
325 | } | |
326 | ||
327 | static int mtk_iommu_attach_device(struct iommu_domain *domain, | |
328 | struct device *dev) | |
329 | { | |
330 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
a9bf2eec | 331 | struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; |
0df4fabe | 332 | |
4b00f5ac | 333 | if (!data) |
0df4fabe YW |
334 | return -ENODEV; |
335 | ||
4b00f5ac | 336 | /* Update the pgtable base address register of the M4U HW */ |
0df4fabe YW |
337 | if (!data->m4u_dom) { |
338 | data->m4u_dom = dom; | |
4b00f5ac YW |
339 | writel(dom->cfg.arm_v7s_cfg.ttbr[0], |
340 | data->base + REG_MMU_PT_BASE_ADDR); | |
7c3a2ec0 YW |
341 | } |
342 | ||
4b00f5ac | 343 | mtk_iommu_config(data, dev, true); |
0df4fabe YW |
344 | return 0; |
345 | } | |
346 | ||
347 | static void mtk_iommu_detach_device(struct iommu_domain *domain, | |
348 | struct device *dev) | |
349 | { | |
a9bf2eec | 350 | struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; |
0df4fabe | 351 | |
58f0d1d5 | 352 | if (!data) |
0df4fabe YW |
353 | return; |
354 | ||
0df4fabe YW |
355 | mtk_iommu_config(data, dev, false); |
356 | } | |
357 | ||
358 | static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
359 | phys_addr_t paddr, size_t size, int prot) | |
360 | { | |
361 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
362 | unsigned long flags; | |
363 | int ret; | |
364 | ||
365 | spin_lock_irqsave(&dom->pgtlock, flags); | |
1ff9b17c YW |
366 | ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32), |
367 | size, prot); | |
0df4fabe YW |
368 | spin_unlock_irqrestore(&dom->pgtlock, flags); |
369 | ||
370 | return ret; | |
371 | } | |
372 | ||
373 | static size_t mtk_iommu_unmap(struct iommu_domain *domain, | |
374 | unsigned long iova, size_t size) | |
375 | { | |
376 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
377 | unsigned long flags; | |
378 | size_t unmapsz; | |
379 | ||
380 | spin_lock_irqsave(&dom->pgtlock, flags); | |
381 | unmapsz = dom->iop->unmap(dom->iop, iova, size); | |
382 | spin_unlock_irqrestore(&dom->pgtlock, flags); | |
383 | ||
384 | return unmapsz; | |
385 | } | |
386 | ||
4d689b61 RM |
387 | static void mtk_iommu_iotlb_sync(struct iommu_domain *domain) |
388 | { | |
389 | mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); | |
390 | } | |
391 | ||
0df4fabe YW |
392 | static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, |
393 | dma_addr_t iova) | |
394 | { | |
395 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
30e2fccf | 396 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe YW |
397 | unsigned long flags; |
398 | phys_addr_t pa; | |
399 | ||
400 | spin_lock_irqsave(&dom->pgtlock, flags); | |
401 | pa = dom->iop->iova_to_phys(dom->iop, iova); | |
402 | spin_unlock_irqrestore(&dom->pgtlock, flags); | |
403 | ||
30e2fccf | 404 | if (data->enable_4GB) |
41939980 | 405 | pa |= BIT_ULL(32); |
30e2fccf | 406 | |
0df4fabe YW |
407 | return pa; |
408 | } | |
409 | ||
410 | static int mtk_iommu_add_device(struct device *dev) | |
411 | { | |
a9bf2eec | 412 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
b16c0170 | 413 | struct mtk_iommu_data *data; |
0df4fabe YW |
414 | struct iommu_group *group; |
415 | ||
a9bf2eec | 416 | if (!fwspec || fwspec->ops != &mtk_iommu_ops) |
58f0d1d5 | 417 | return -ENODEV; /* Not a iommu client device */ |
0df4fabe | 418 | |
a9bf2eec | 419 | data = fwspec->iommu_priv; |
b16c0170 JR |
420 | iommu_device_link(&data->iommu, dev); |
421 | ||
0df4fabe YW |
422 | group = iommu_group_get_for_dev(dev); |
423 | if (IS_ERR(group)) | |
424 | return PTR_ERR(group); | |
425 | ||
426 | iommu_group_put(group); | |
427 | return 0; | |
428 | } | |
429 | ||
430 | static void mtk_iommu_remove_device(struct device *dev) | |
431 | { | |
a9bf2eec | 432 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
b16c0170 JR |
433 | struct mtk_iommu_data *data; |
434 | ||
a9bf2eec | 435 | if (!fwspec || fwspec->ops != &mtk_iommu_ops) |
0df4fabe YW |
436 | return; |
437 | ||
a9bf2eec | 438 | data = fwspec->iommu_priv; |
b16c0170 JR |
439 | iommu_device_unlink(&data->iommu, dev); |
440 | ||
0df4fabe | 441 | iommu_group_remove_device(dev); |
58f0d1d5 | 442 | iommu_fwspec_free(dev); |
0df4fabe YW |
443 | } |
444 | ||
445 | static struct iommu_group *mtk_iommu_device_group(struct device *dev) | |
446 | { | |
7c3a2ec0 | 447 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe | 448 | |
58f0d1d5 | 449 | if (!data) |
0df4fabe YW |
450 | return ERR_PTR(-ENODEV); |
451 | ||
452 | /* All the client devices are in the same m4u iommu-group */ | |
0df4fabe YW |
453 | if (!data->m4u_group) { |
454 | data->m4u_group = iommu_group_alloc(); | |
455 | if (IS_ERR(data->m4u_group)) | |
456 | dev_err(dev, "Failed to allocate M4U IOMMU group\n"); | |
3a8d40b6 RM |
457 | } else { |
458 | iommu_group_ref_get(data->m4u_group); | |
0df4fabe YW |
459 | } |
460 | return data->m4u_group; | |
461 | } | |
462 | ||
463 | static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) | |
464 | { | |
a9bf2eec | 465 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
0df4fabe YW |
466 | struct platform_device *m4updev; |
467 | ||
468 | if (args->args_count != 1) { | |
469 | dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", | |
470 | args->args_count); | |
471 | return -EINVAL; | |
472 | } | |
473 | ||
a9bf2eec | 474 | if (!fwspec->iommu_priv) { |
0df4fabe YW |
475 | /* Get the m4u device */ |
476 | m4updev = of_find_device_by_node(args->np); | |
0df4fabe YW |
477 | if (WARN_ON(!m4updev)) |
478 | return -EINVAL; | |
479 | ||
a9bf2eec | 480 | fwspec->iommu_priv = platform_get_drvdata(m4updev); |
0df4fabe YW |
481 | } |
482 | ||
58f0d1d5 | 483 | return iommu_fwspec_add_ids(dev, args->args, 1); |
0df4fabe YW |
484 | } |
485 | ||
b65f5016 | 486 | static const struct iommu_ops mtk_iommu_ops = { |
0df4fabe YW |
487 | .domain_alloc = mtk_iommu_domain_alloc, |
488 | .domain_free = mtk_iommu_domain_free, | |
489 | .attach_dev = mtk_iommu_attach_device, | |
490 | .detach_dev = mtk_iommu_detach_device, | |
491 | .map = mtk_iommu_map, | |
492 | .unmap = mtk_iommu_unmap, | |
4d689b61 RM |
493 | .flush_iotlb_all = mtk_iommu_iotlb_sync, |
494 | .iotlb_sync = mtk_iommu_iotlb_sync, | |
0df4fabe YW |
495 | .iova_to_phys = mtk_iommu_iova_to_phys, |
496 | .add_device = mtk_iommu_add_device, | |
497 | .remove_device = mtk_iommu_remove_device, | |
498 | .device_group = mtk_iommu_device_group, | |
499 | .of_xlate = mtk_iommu_of_xlate, | |
500 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, | |
501 | }; | |
502 | ||
503 | static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) | |
504 | { | |
505 | u32 regval; | |
506 | int ret; | |
507 | ||
508 | ret = clk_prepare_enable(data->bclk); | |
509 | if (ret) { | |
510 | dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); | |
511 | return ret; | |
512 | } | |
513 | ||
e6dec923 YW |
514 | regval = F_MMU_TF_PROTECT_SEL(2, data); |
515 | if (data->m4u_plat == M4U_MT8173) | |
516 | regval |= F_MMU_PREFETCH_RT_REPLACE_MOD; | |
0df4fabe YW |
517 | writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); |
518 | ||
519 | regval = F_L2_MULIT_HIT_EN | | |
520 | F_TABLE_WALK_FAULT_INT_EN | | |
521 | F_PREETCH_FIFO_OVERFLOW_INT_EN | | |
522 | F_MISS_FIFO_OVERFLOW_INT_EN | | |
523 | F_PREFETCH_FIFO_ERR_INT_EN | | |
524 | F_MISS_FIFO_ERR_INT_EN; | |
525 | writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); | |
526 | ||
527 | regval = F_INT_TRANSLATION_FAULT | | |
528 | F_INT_MAIN_MULTI_HIT_FAULT | | |
529 | F_INT_INVALID_PA_FAULT | | |
530 | F_INT_ENTRY_REPLACEMENT_FAULT | | |
531 | F_INT_TLB_MISS_FAULT | | |
532 | F_INT_MISS_TRANSACTION_FIFO_FAULT | | |
533 | F_INT_PRETETCH_TRANSATION_FIFO_FAULT; | |
534 | writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); | |
535 | ||
70ca608b YW |
536 | if (data->m4u_plat == M4U_MT8173) |
537 | regval = (data->protect_base >> 1) | (data->enable_4GB << 31); | |
538 | else | |
539 | regval = lower_32_bits(data->protect_base) | | |
540 | upper_32_bits(data->protect_base); | |
541 | writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); | |
542 | ||
4f1c8ea1 | 543 | if (data->enable_4GB && data->m4u_plat != M4U_MT8173) { |
30e2fccf YW |
544 | /* |
545 | * If 4GB mode is enabled, the validate PA range is from | |
546 | * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. | |
547 | */ | |
548 | regval = F_MMU_VLD_PA_RNG(7, 4); | |
549 | writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG); | |
550 | } | |
0df4fabe | 551 | writel_relaxed(0, data->base + REG_MMU_DCM_DIS); |
e6dec923 YW |
552 | |
553 | /* It's MISC control register whose default value is ok except mt8173.*/ | |
554 | if (data->m4u_plat == M4U_MT8173) | |
555 | writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); | |
0df4fabe YW |
556 | |
557 | if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, | |
558 | dev_name(data->dev), (void *)data)) { | |
559 | writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); | |
560 | clk_disable_unprepare(data->bclk); | |
561 | dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); | |
562 | return -ENODEV; | |
563 | } | |
564 | ||
565 | return 0; | |
566 | } | |
567 | ||
0df4fabe YW |
568 | static const struct component_master_ops mtk_iommu_com_ops = { |
569 | .bind = mtk_iommu_bind, | |
570 | .unbind = mtk_iommu_unbind, | |
571 | }; | |
572 | ||
573 | static int mtk_iommu_probe(struct platform_device *pdev) | |
574 | { | |
575 | struct mtk_iommu_data *data; | |
576 | struct device *dev = &pdev->dev; | |
577 | struct resource *res; | |
b16c0170 | 578 | resource_size_t ioaddr; |
0df4fabe YW |
579 | struct component_match *match = NULL; |
580 | void *protect; | |
0b6c0ad3 | 581 | int i, larb_nr, ret; |
0df4fabe YW |
582 | |
583 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); | |
584 | if (!data) | |
585 | return -ENOMEM; | |
586 | data->dev = dev; | |
e6dec923 | 587 | data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev); |
0df4fabe YW |
588 | |
589 | /* Protect memory. HW will access here while translation fault.*/ | |
590 | protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); | |
591 | if (!protect) | |
592 | return -ENOMEM; | |
593 | data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); | |
594 | ||
01e23c93 | 595 | /* Whether the current dram is over 4GB */ |
41939980 | 596 | data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT)); |
01e23c93 | 597 | |
0df4fabe YW |
598 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
599 | data->base = devm_ioremap_resource(dev, res); | |
600 | if (IS_ERR(data->base)) | |
601 | return PTR_ERR(data->base); | |
b16c0170 | 602 | ioaddr = res->start; |
0df4fabe YW |
603 | |
604 | data->irq = platform_get_irq(pdev, 0); | |
605 | if (data->irq < 0) | |
606 | return data->irq; | |
607 | ||
608 | data->bclk = devm_clk_get(dev, "bclk"); | |
609 | if (IS_ERR(data->bclk)) | |
610 | return PTR_ERR(data->bclk); | |
611 | ||
612 | larb_nr = of_count_phandle_with_args(dev->of_node, | |
613 | "mediatek,larbs", NULL); | |
614 | if (larb_nr < 0) | |
615 | return larb_nr; | |
616 | data->smi_imu.larb_nr = larb_nr; | |
617 | ||
618 | for (i = 0; i < larb_nr; i++) { | |
619 | struct device_node *larbnode; | |
620 | struct platform_device *plarbdev; | |
e6dec923 | 621 | u32 id; |
0df4fabe YW |
622 | |
623 | larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); | |
624 | if (!larbnode) | |
625 | return -EINVAL; | |
626 | ||
1eb8e4e2 WY |
627 | if (!of_device_is_available(larbnode)) { |
628 | of_node_put(larbnode); | |
0df4fabe | 629 | continue; |
1eb8e4e2 | 630 | } |
0df4fabe | 631 | |
e6dec923 YW |
632 | ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); |
633 | if (ret)/* The id is consecutive if there is no this property */ | |
634 | id = i; | |
635 | ||
0df4fabe | 636 | plarbdev = of_find_device_by_node(larbnode); |
1eb8e4e2 WY |
637 | if (!plarbdev) { |
638 | of_node_put(larbnode); | |
e6dec923 | 639 | return -EPROBE_DEFER; |
1eb8e4e2 | 640 | } |
e6dec923 | 641 | data->smi_imu.larb_imu[id].dev = &plarbdev->dev; |
0df4fabe | 642 | |
00c7c81f RK |
643 | component_match_add_release(dev, &match, release_of, |
644 | compare_of, larbnode); | |
0df4fabe YW |
645 | } |
646 | ||
647 | platform_set_drvdata(pdev, data); | |
648 | ||
649 | ret = mtk_iommu_hw_init(data); | |
650 | if (ret) | |
651 | return ret; | |
652 | ||
b16c0170 JR |
653 | ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, |
654 | "mtk-iommu.%pa", &ioaddr); | |
655 | if (ret) | |
656 | return ret; | |
657 | ||
658 | iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); | |
659 | iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode); | |
660 | ||
661 | ret = iommu_device_register(&data->iommu); | |
662 | if (ret) | |
663 | return ret; | |
664 | ||
7c3a2ec0 YW |
665 | list_add_tail(&data->list, &m4ulist); |
666 | ||
0df4fabe YW |
667 | if (!iommu_present(&platform_bus_type)) |
668 | bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); | |
669 | ||
670 | return component_master_add_with_match(dev, &mtk_iommu_com_ops, match); | |
671 | } | |
672 | ||
673 | static int mtk_iommu_remove(struct platform_device *pdev) | |
674 | { | |
675 | struct mtk_iommu_data *data = platform_get_drvdata(pdev); | |
676 | ||
b16c0170 JR |
677 | iommu_device_sysfs_remove(&data->iommu); |
678 | iommu_device_unregister(&data->iommu); | |
679 | ||
0df4fabe YW |
680 | if (iommu_present(&platform_bus_type)) |
681 | bus_set_iommu(&platform_bus_type, NULL); | |
682 | ||
0df4fabe YW |
683 | clk_disable_unprepare(data->bclk); |
684 | devm_free_irq(&pdev->dev, data->irq, data); | |
685 | component_master_del(&pdev->dev, &mtk_iommu_com_ops); | |
686 | return 0; | |
687 | } | |
688 | ||
fd99f796 | 689 | static int __maybe_unused mtk_iommu_suspend(struct device *dev) |
0df4fabe YW |
690 | { |
691 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | |
692 | struct mtk_iommu_suspend_reg *reg = &data->reg; | |
693 | void __iomem *base = data->base; | |
694 | ||
695 | reg->standard_axi_mode = readl_relaxed(base + | |
696 | REG_MMU_STANDARD_AXI_MODE); | |
697 | reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); | |
698 | reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); | |
699 | reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); | |
700 | reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); | |
70ca608b | 701 | reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); |
6254b64f | 702 | clk_disable_unprepare(data->bclk); |
0df4fabe YW |
703 | return 0; |
704 | } | |
705 | ||
fd99f796 | 706 | static int __maybe_unused mtk_iommu_resume(struct device *dev) |
0df4fabe YW |
707 | { |
708 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | |
709 | struct mtk_iommu_suspend_reg *reg = &data->reg; | |
710 | void __iomem *base = data->base; | |
6254b64f | 711 | int ret; |
0df4fabe | 712 | |
6254b64f YW |
713 | ret = clk_prepare_enable(data->bclk); |
714 | if (ret) { | |
715 | dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret); | |
716 | return ret; | |
717 | } | |
0df4fabe YW |
718 | writel_relaxed(reg->standard_axi_mode, |
719 | base + REG_MMU_STANDARD_AXI_MODE); | |
720 | writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); | |
721 | writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); | |
722 | writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); | |
723 | writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); | |
70ca608b | 724 | writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); |
e6dec923 YW |
725 | if (data->m4u_dom) |
726 | writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], | |
727 | base + REG_MMU_PT_BASE_ADDR); | |
0df4fabe YW |
728 | return 0; |
729 | } | |
730 | ||
e6dec923 | 731 | static const struct dev_pm_ops mtk_iommu_pm_ops = { |
6254b64f | 732 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) |
0df4fabe YW |
733 | }; |
734 | ||
735 | static const struct of_device_id mtk_iommu_of_ids[] = { | |
e6dec923 YW |
736 | { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712}, |
737 | { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173}, | |
0df4fabe YW |
738 | {} |
739 | }; | |
740 | ||
741 | static struct platform_driver mtk_iommu_driver = { | |
742 | .probe = mtk_iommu_probe, | |
743 | .remove = mtk_iommu_remove, | |
744 | .driver = { | |
745 | .name = "mtk-iommu", | |
e6dec923 | 746 | .of_match_table = of_match_ptr(mtk_iommu_of_ids), |
0df4fabe YW |
747 | .pm = &mtk_iommu_pm_ops, |
748 | } | |
749 | }; | |
750 | ||
e6dec923 | 751 | static int __init mtk_iommu_init(void) |
0df4fabe YW |
752 | { |
753 | int ret; | |
0df4fabe YW |
754 | |
755 | ret = platform_driver_register(&mtk_iommu_driver); | |
e6dec923 YW |
756 | if (ret != 0) |
757 | pr_err("Failed to register MTK IOMMU driver\n"); | |
0df4fabe | 758 | |
e6dec923 | 759 | return ret; |
0df4fabe YW |
760 | } |
761 | ||
e6dec923 | 762 | subsys_initcall(mtk_iommu_init) |