Commit | Line | Data |
---|---|---|
1802d0be | 1 | // SPDX-License-Identifier: GPL-2.0-only |
0df4fabe YW |
2 | /* |
3 | * Copyright (c) 2015-2016 MediaTek Inc. | |
4 | * Author: Yong Wu <yong.wu@mediatek.com> | |
0df4fabe | 5 | */ |
57c8a661 | 6 | #include <linux/memblock.h> |
0df4fabe YW |
7 | #include <linux/bug.h> |
8 | #include <linux/clk.h> | |
9 | #include <linux/component.h> | |
10 | #include <linux/device.h> | |
11 | #include <linux/dma-iommu.h> | |
12 | #include <linux/err.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/io.h> | |
15 | #include <linux/iommu.h> | |
16 | #include <linux/iopoll.h> | |
17 | #include <linux/list.h> | |
18 | #include <linux/of_address.h> | |
19 | #include <linux/of_iommu.h> | |
20 | #include <linux/of_irq.h> | |
21 | #include <linux/of_platform.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <asm/barrier.h> | |
0df4fabe YW |
26 | #include <soc/mediatek/smi.h> |
27 | ||
9ca340c9 | 28 | #include "mtk_iommu.h" |
0df4fabe YW |
29 | |
30 | #define REG_MMU_PT_BASE_ADDR 0x000 | |
31 | ||
32 | #define REG_MMU_INVALIDATE 0x020 | |
33 | #define F_ALL_INVLD 0x2 | |
34 | #define F_MMU_INV_RANGE 0x1 | |
35 | ||
36 | #define REG_MMU_INVLD_START_A 0x024 | |
37 | #define REG_MMU_INVLD_END_A 0x028 | |
38 | ||
39 | #define REG_MMU_INV_SEL 0x038 | |
40 | #define F_INVLD_EN0 BIT(0) | |
41 | #define F_INVLD_EN1 BIT(1) | |
42 | ||
43 | #define REG_MMU_STANDARD_AXI_MODE 0x048 | |
44 | #define REG_MMU_DCM_DIS 0x050 | |
45 | ||
46 | #define REG_MMU_CTRL_REG 0x110 | |
acb3c92a | 47 | #define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4) |
0df4fabe | 48 | #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) |
acb3c92a | 49 | #define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5) |
0df4fabe YW |
50 | |
51 | #define REG_MMU_IVRP_PADDR 0x114 | |
70ca608b | 52 | |
30e2fccf YW |
53 | #define REG_MMU_VLD_PA_RNG 0x118 |
54 | #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) | |
0df4fabe YW |
55 | |
56 | #define REG_MMU_INT_CONTROL0 0x120 | |
57 | #define F_L2_MULIT_HIT_EN BIT(0) | |
58 | #define F_TABLE_WALK_FAULT_INT_EN BIT(1) | |
59 | #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2) | |
60 | #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3) | |
61 | #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5) | |
62 | #define F_MISS_FIFO_ERR_INT_EN BIT(6) | |
63 | #define F_INT_CLR_BIT BIT(12) | |
64 | ||
65 | #define REG_MMU_INT_MAIN_CONTROL 0x124 | |
66 | #define F_INT_TRANSLATION_FAULT BIT(0) | |
67 | #define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) | |
68 | #define F_INT_INVALID_PA_FAULT BIT(2) | |
69 | #define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) | |
70 | #define F_INT_TLB_MISS_FAULT BIT(4) | |
71 | #define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5) | |
72 | #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6) | |
73 | ||
74 | #define REG_MMU_CPE_DONE 0x12C | |
75 | ||
76 | #define REG_MMU_FAULT_ST1 0x134 | |
77 | ||
78 | #define REG_MMU_FAULT_VA 0x13c | |
0df4fabe YW |
79 | #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) |
80 | #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) | |
81 | ||
82 | #define REG_MMU_INVLD_PA 0x140 | |
83 | #define REG_MMU_INT_ID 0x150 | |
84 | #define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) | |
85 | #define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) | |
86 | ||
87 | #define MTK_PROTECT_PA_ALIGN 128 | |
88 | ||
a9467d95 YW |
89 | /* |
90 | * Get the local arbiter ID and the portid within the larb arbiter | |
91 | * from mtk_m4u_id which is defined by MTK_M4U_ID. | |
92 | */ | |
e6dec923 | 93 | #define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf) |
a9467d95 YW |
94 | #define MTK_M4U_TO_PORT(id) ((id) & 0x1f) |
95 | ||
0df4fabe YW |
96 | struct mtk_iommu_domain { |
97 | spinlock_t pgtlock; /* lock for page table */ | |
98 | ||
99 | struct io_pgtable_cfg cfg; | |
100 | struct io_pgtable_ops *iop; | |
101 | ||
102 | struct iommu_domain domain; | |
103 | }; | |
104 | ||
b65f5016 | 105 | static const struct iommu_ops mtk_iommu_ops; |
0df4fabe | 106 | |
76ce6546 YW |
107 | /* |
108 | * In M4U 4GB mode, the physical address is remapped as below: | |
109 | * | |
110 | * CPU Physical address: | |
111 | * ==================== | |
112 | * | |
113 | * 0 1G 2G 3G 4G 5G | |
114 | * |---A---|---B---|---C---|---D---|---E---| | |
115 | * +--I/O--+------------Memory-------------+ | |
116 | * | |
117 | * IOMMU output physical address: | |
118 | * ============================= | |
119 | * | |
120 | * 4G 5G 6G 7G 8G | |
121 | * |---E---|---B---|---C---|---D---| | |
122 | * +------------Memory-------------+ | |
123 | * | |
124 | * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the | |
125 | * bit32 of the CPU physical address always is needed to set, and for Region | |
126 | * 'E', the CPU physical address keep as is. | |
127 | * Additionally, The iommu consumers always use the CPU phyiscal address. | |
128 | */ | |
b4dad40e | 129 | #define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL |
76ce6546 | 130 | |
7c3a2ec0 YW |
131 | static LIST_HEAD(m4ulist); /* List all the M4U HWs */ |
132 | ||
133 | #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list) | |
134 | ||
135 | /* | |
136 | * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain | |
137 | * for the performance. | |
138 | * | |
139 | * Here always return the mtk_iommu_data of the first probed M4U where the | |
140 | * iommu domain information is recorded. | |
141 | */ | |
142 | static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void) | |
143 | { | |
144 | struct mtk_iommu_data *data; | |
145 | ||
146 | for_each_m4u(data) | |
147 | return data; | |
148 | ||
149 | return NULL; | |
150 | } | |
151 | ||
0df4fabe YW |
152 | static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) |
153 | { | |
154 | return container_of(dom, struct mtk_iommu_domain, domain); | |
155 | } | |
156 | ||
157 | static void mtk_iommu_tlb_flush_all(void *cookie) | |
158 | { | |
159 | struct mtk_iommu_data *data = cookie; | |
160 | ||
7c3a2ec0 YW |
161 | for_each_m4u(data) { |
162 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, | |
163 | data->base + REG_MMU_INV_SEL); | |
164 | writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); | |
165 | wmb(); /* Make sure the tlb flush all done */ | |
166 | } | |
0df4fabe YW |
167 | } |
168 | ||
169 | static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, | |
170 | size_t granule, bool leaf, | |
171 | void *cookie) | |
172 | { | |
173 | struct mtk_iommu_data *data = cookie; | |
174 | ||
7c3a2ec0 YW |
175 | for_each_m4u(data) { |
176 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, | |
177 | data->base + REG_MMU_INV_SEL); | |
0df4fabe | 178 | |
7c3a2ec0 YW |
179 | writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); |
180 | writel_relaxed(iova + size - 1, | |
181 | data->base + REG_MMU_INVLD_END_A); | |
182 | writel_relaxed(F_MMU_INV_RANGE, | |
183 | data->base + REG_MMU_INVALIDATE); | |
184 | data->tlb_flush_active = true; | |
185 | } | |
0df4fabe YW |
186 | } |
187 | ||
188 | static void mtk_iommu_tlb_sync(void *cookie) | |
189 | { | |
190 | struct mtk_iommu_data *data = cookie; | |
191 | int ret; | |
192 | u32 tmp; | |
193 | ||
7c3a2ec0 YW |
194 | for_each_m4u(data) { |
195 | /* Avoid timing out if there's nothing to wait for */ | |
196 | if (!data->tlb_flush_active) | |
197 | return; | |
98a8f63e | 198 | |
7c3a2ec0 YW |
199 | ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, |
200 | tmp, tmp != 0, 10, 100000); | |
201 | if (ret) { | |
202 | dev_warn(data->dev, | |
203 | "Partial TLB flush timed out, falling back to full flush\n"); | |
204 | mtk_iommu_tlb_flush_all(cookie); | |
205 | } | |
206 | /* Clear the CPE status */ | |
207 | writel_relaxed(0, data->base + REG_MMU_CPE_DONE); | |
208 | data->tlb_flush_active = false; | |
0df4fabe | 209 | } |
0df4fabe YW |
210 | } |
211 | ||
212 | static const struct iommu_gather_ops mtk_iommu_gather_ops = { | |
213 | .tlb_flush_all = mtk_iommu_tlb_flush_all, | |
214 | .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, | |
215 | .tlb_sync = mtk_iommu_tlb_sync, | |
216 | }; | |
217 | ||
218 | static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) | |
219 | { | |
220 | struct mtk_iommu_data *data = dev_id; | |
221 | struct mtk_iommu_domain *dom = data->m4u_dom; | |
222 | u32 int_state, regval, fault_iova, fault_pa; | |
223 | unsigned int fault_larb, fault_port; | |
224 | bool layer, write; | |
225 | ||
226 | /* Read error info from registers */ | |
227 | int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1); | |
228 | fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); | |
229 | layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; | |
230 | write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; | |
0df4fabe YW |
231 | fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); |
232 | regval = readl_relaxed(data->base + REG_MMU_INT_ID); | |
233 | fault_larb = F_MMU0_INT_ID_LARB_ID(regval); | |
234 | fault_port = F_MMU0_INT_ID_PORT_ID(regval); | |
235 | ||
b3e5eee7 YW |
236 | fault_larb = data->plat_data->larbid_remap[fault_larb]; |
237 | ||
0df4fabe YW |
238 | if (report_iommu_fault(&dom->domain, data->dev, fault_iova, |
239 | write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { | |
240 | dev_err_ratelimited( | |
241 | data->dev, | |
242 | "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n", | |
243 | int_state, fault_iova, fault_pa, fault_larb, fault_port, | |
244 | layer, write ? "write" : "read"); | |
245 | } | |
246 | ||
247 | /* Interrupt clear */ | |
248 | regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0); | |
249 | regval |= F_INT_CLR_BIT; | |
250 | writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); | |
251 | ||
252 | mtk_iommu_tlb_flush_all(data); | |
253 | ||
254 | return IRQ_HANDLED; | |
255 | } | |
256 | ||
257 | static void mtk_iommu_config(struct mtk_iommu_data *data, | |
258 | struct device *dev, bool enable) | |
259 | { | |
0df4fabe YW |
260 | struct mtk_smi_larb_iommu *larb_mmu; |
261 | unsigned int larbid, portid; | |
a9bf2eec | 262 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
58f0d1d5 | 263 | int i; |
0df4fabe | 264 | |
58f0d1d5 RM |
265 | for (i = 0; i < fwspec->num_ids; ++i) { |
266 | larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); | |
267 | portid = MTK_M4U_TO_PORT(fwspec->ids[i]); | |
0df4fabe YW |
268 | larb_mmu = &data->smi_imu.larb_imu[larbid]; |
269 | ||
270 | dev_dbg(dev, "%s iommu port: %d\n", | |
271 | enable ? "enable" : "disable", portid); | |
272 | ||
273 | if (enable) | |
274 | larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); | |
275 | else | |
276 | larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); | |
277 | } | |
278 | } | |
279 | ||
4b00f5ac | 280 | static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) |
0df4fabe | 281 | { |
4b00f5ac | 282 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe YW |
283 | |
284 | spin_lock_init(&dom->pgtlock); | |
285 | ||
286 | dom->cfg = (struct io_pgtable_cfg) { | |
287 | .quirks = IO_PGTABLE_QUIRK_ARM_NS | | |
288 | IO_PGTABLE_QUIRK_NO_PERMS | | |
b4dad40e YW |
289 | IO_PGTABLE_QUIRK_TLBI_ON_MAP | |
290 | IO_PGTABLE_QUIRK_ARM_MTK_EXT, | |
0df4fabe YW |
291 | .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, |
292 | .ias = 32, | |
b4dad40e | 293 | .oas = 34, |
0df4fabe YW |
294 | .tlb = &mtk_iommu_gather_ops, |
295 | .iommu_dev = data->dev, | |
296 | }; | |
297 | ||
298 | dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); | |
299 | if (!dom->iop) { | |
300 | dev_err(data->dev, "Failed to alloc io pgtable\n"); | |
301 | return -EINVAL; | |
302 | } | |
303 | ||
304 | /* Update our support page sizes bitmap */ | |
d16e0faa | 305 | dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; |
0df4fabe YW |
306 | return 0; |
307 | } | |
308 | ||
309 | static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) | |
310 | { | |
311 | struct mtk_iommu_domain *dom; | |
312 | ||
313 | if (type != IOMMU_DOMAIN_DMA) | |
314 | return NULL; | |
315 | ||
316 | dom = kzalloc(sizeof(*dom), GFP_KERNEL); | |
317 | if (!dom) | |
318 | return NULL; | |
319 | ||
4b00f5ac YW |
320 | if (iommu_get_dma_cookie(&dom->domain)) |
321 | goto free_dom; | |
322 | ||
323 | if (mtk_iommu_domain_finalise(dom)) | |
324 | goto put_dma_cookie; | |
0df4fabe YW |
325 | |
326 | dom->domain.geometry.aperture_start = 0; | |
327 | dom->domain.geometry.aperture_end = DMA_BIT_MASK(32); | |
328 | dom->domain.geometry.force_aperture = true; | |
329 | ||
330 | return &dom->domain; | |
4b00f5ac YW |
331 | |
332 | put_dma_cookie: | |
333 | iommu_put_dma_cookie(&dom->domain); | |
334 | free_dom: | |
335 | kfree(dom); | |
336 | return NULL; | |
0df4fabe YW |
337 | } |
338 | ||
339 | static void mtk_iommu_domain_free(struct iommu_domain *domain) | |
340 | { | |
4b00f5ac YW |
341 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
342 | ||
343 | free_io_pgtable_ops(dom->iop); | |
0df4fabe YW |
344 | iommu_put_dma_cookie(domain); |
345 | kfree(to_mtk_domain(domain)); | |
346 | } | |
347 | ||
348 | static int mtk_iommu_attach_device(struct iommu_domain *domain, | |
349 | struct device *dev) | |
350 | { | |
351 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
a9bf2eec | 352 | struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; |
0df4fabe | 353 | |
4b00f5ac | 354 | if (!data) |
0df4fabe YW |
355 | return -ENODEV; |
356 | ||
4b00f5ac | 357 | /* Update the pgtable base address register of the M4U HW */ |
0df4fabe YW |
358 | if (!data->m4u_dom) { |
359 | data->m4u_dom = dom; | |
4b00f5ac YW |
360 | writel(dom->cfg.arm_v7s_cfg.ttbr[0], |
361 | data->base + REG_MMU_PT_BASE_ADDR); | |
7c3a2ec0 YW |
362 | } |
363 | ||
4b00f5ac | 364 | mtk_iommu_config(data, dev, true); |
0df4fabe YW |
365 | return 0; |
366 | } | |
367 | ||
368 | static void mtk_iommu_detach_device(struct iommu_domain *domain, | |
369 | struct device *dev) | |
370 | { | |
a9bf2eec | 371 | struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; |
0df4fabe | 372 | |
58f0d1d5 | 373 | if (!data) |
0df4fabe YW |
374 | return; |
375 | ||
0df4fabe YW |
376 | mtk_iommu_config(data, dev, false); |
377 | } | |
378 | ||
379 | static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
380 | phys_addr_t paddr, size_t size, int prot) | |
381 | { | |
382 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
b4dad40e | 383 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe YW |
384 | unsigned long flags; |
385 | int ret; | |
386 | ||
b4dad40e YW |
387 | /* The "4GB mode" M4U physically can not use the lower remap of Dram. */ |
388 | if (data->enable_4GB) | |
389 | paddr |= BIT_ULL(32); | |
390 | ||
0df4fabe | 391 | spin_lock_irqsave(&dom->pgtlock, flags); |
b4dad40e | 392 | ret = dom->iop->map(dom->iop, iova, paddr, size, prot); |
0df4fabe YW |
393 | spin_unlock_irqrestore(&dom->pgtlock, flags); |
394 | ||
395 | return ret; | |
396 | } | |
397 | ||
398 | static size_t mtk_iommu_unmap(struct iommu_domain *domain, | |
399 | unsigned long iova, size_t size) | |
400 | { | |
401 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
402 | unsigned long flags; | |
403 | size_t unmapsz; | |
404 | ||
405 | spin_lock_irqsave(&dom->pgtlock, flags); | |
406 | unmapsz = dom->iop->unmap(dom->iop, iova, size); | |
407 | spin_unlock_irqrestore(&dom->pgtlock, flags); | |
408 | ||
409 | return unmapsz; | |
410 | } | |
411 | ||
4d689b61 RM |
412 | static void mtk_iommu_iotlb_sync(struct iommu_domain *domain) |
413 | { | |
414 | mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); | |
415 | } | |
416 | ||
0df4fabe YW |
417 | static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, |
418 | dma_addr_t iova) | |
419 | { | |
420 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
30e2fccf | 421 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe YW |
422 | unsigned long flags; |
423 | phys_addr_t pa; | |
424 | ||
425 | spin_lock_irqsave(&dom->pgtlock, flags); | |
426 | pa = dom->iop->iova_to_phys(dom->iop, iova); | |
427 | spin_unlock_irqrestore(&dom->pgtlock, flags); | |
428 | ||
b4dad40e YW |
429 | if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) |
430 | pa &= ~BIT_ULL(32); | |
30e2fccf | 431 | |
0df4fabe YW |
432 | return pa; |
433 | } | |
434 | ||
435 | static int mtk_iommu_add_device(struct device *dev) | |
436 | { | |
a9bf2eec | 437 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
b16c0170 | 438 | struct mtk_iommu_data *data; |
0df4fabe YW |
439 | struct iommu_group *group; |
440 | ||
a9bf2eec | 441 | if (!fwspec || fwspec->ops != &mtk_iommu_ops) |
58f0d1d5 | 442 | return -ENODEV; /* Not a iommu client device */ |
0df4fabe | 443 | |
a9bf2eec | 444 | data = fwspec->iommu_priv; |
b16c0170 JR |
445 | iommu_device_link(&data->iommu, dev); |
446 | ||
0df4fabe YW |
447 | group = iommu_group_get_for_dev(dev); |
448 | if (IS_ERR(group)) | |
449 | return PTR_ERR(group); | |
450 | ||
451 | iommu_group_put(group); | |
452 | return 0; | |
453 | } | |
454 | ||
455 | static void mtk_iommu_remove_device(struct device *dev) | |
456 | { | |
a9bf2eec | 457 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
b16c0170 JR |
458 | struct mtk_iommu_data *data; |
459 | ||
a9bf2eec | 460 | if (!fwspec || fwspec->ops != &mtk_iommu_ops) |
0df4fabe YW |
461 | return; |
462 | ||
a9bf2eec | 463 | data = fwspec->iommu_priv; |
b16c0170 JR |
464 | iommu_device_unlink(&data->iommu, dev); |
465 | ||
0df4fabe | 466 | iommu_group_remove_device(dev); |
58f0d1d5 | 467 | iommu_fwspec_free(dev); |
0df4fabe YW |
468 | } |
469 | ||
470 | static struct iommu_group *mtk_iommu_device_group(struct device *dev) | |
471 | { | |
7c3a2ec0 | 472 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe | 473 | |
58f0d1d5 | 474 | if (!data) |
0df4fabe YW |
475 | return ERR_PTR(-ENODEV); |
476 | ||
477 | /* All the client devices are in the same m4u iommu-group */ | |
0df4fabe YW |
478 | if (!data->m4u_group) { |
479 | data->m4u_group = iommu_group_alloc(); | |
480 | if (IS_ERR(data->m4u_group)) | |
481 | dev_err(dev, "Failed to allocate M4U IOMMU group\n"); | |
3a8d40b6 RM |
482 | } else { |
483 | iommu_group_ref_get(data->m4u_group); | |
0df4fabe YW |
484 | } |
485 | return data->m4u_group; | |
486 | } | |
487 | ||
488 | static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) | |
489 | { | |
a9bf2eec | 490 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
0df4fabe YW |
491 | struct platform_device *m4updev; |
492 | ||
493 | if (args->args_count != 1) { | |
494 | dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", | |
495 | args->args_count); | |
496 | return -EINVAL; | |
497 | } | |
498 | ||
a9bf2eec | 499 | if (!fwspec->iommu_priv) { |
0df4fabe YW |
500 | /* Get the m4u device */ |
501 | m4updev = of_find_device_by_node(args->np); | |
0df4fabe YW |
502 | if (WARN_ON(!m4updev)) |
503 | return -EINVAL; | |
504 | ||
a9bf2eec | 505 | fwspec->iommu_priv = platform_get_drvdata(m4updev); |
0df4fabe YW |
506 | } |
507 | ||
58f0d1d5 | 508 | return iommu_fwspec_add_ids(dev, args->args, 1); |
0df4fabe YW |
509 | } |
510 | ||
b65f5016 | 511 | static const struct iommu_ops mtk_iommu_ops = { |
0df4fabe YW |
512 | .domain_alloc = mtk_iommu_domain_alloc, |
513 | .domain_free = mtk_iommu_domain_free, | |
514 | .attach_dev = mtk_iommu_attach_device, | |
515 | .detach_dev = mtk_iommu_detach_device, | |
516 | .map = mtk_iommu_map, | |
517 | .unmap = mtk_iommu_unmap, | |
4d689b61 RM |
518 | .flush_iotlb_all = mtk_iommu_iotlb_sync, |
519 | .iotlb_sync = mtk_iommu_iotlb_sync, | |
0df4fabe YW |
520 | .iova_to_phys = mtk_iommu_iova_to_phys, |
521 | .add_device = mtk_iommu_add_device, | |
522 | .remove_device = mtk_iommu_remove_device, | |
523 | .device_group = mtk_iommu_device_group, | |
524 | .of_xlate = mtk_iommu_of_xlate, | |
525 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, | |
526 | }; | |
527 | ||
528 | static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) | |
529 | { | |
530 | u32 regval; | |
531 | int ret; | |
532 | ||
533 | ret = clk_prepare_enable(data->bclk); | |
534 | if (ret) { | |
535 | dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); | |
536 | return ret; | |
537 | } | |
538 | ||
cecdce9d | 539 | if (data->plat_data->m4u_plat == M4U_MT8173) |
acb3c92a YW |
540 | regval = F_MMU_PREFETCH_RT_REPLACE_MOD | |
541 | F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; | |
542 | else | |
543 | regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR; | |
0df4fabe YW |
544 | writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); |
545 | ||
546 | regval = F_L2_MULIT_HIT_EN | | |
547 | F_TABLE_WALK_FAULT_INT_EN | | |
548 | F_PREETCH_FIFO_OVERFLOW_INT_EN | | |
549 | F_MISS_FIFO_OVERFLOW_INT_EN | | |
550 | F_PREFETCH_FIFO_ERR_INT_EN | | |
551 | F_MISS_FIFO_ERR_INT_EN; | |
552 | writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); | |
553 | ||
554 | regval = F_INT_TRANSLATION_FAULT | | |
555 | F_INT_MAIN_MULTI_HIT_FAULT | | |
556 | F_INT_INVALID_PA_FAULT | | |
557 | F_INT_ENTRY_REPLACEMENT_FAULT | | |
558 | F_INT_TLB_MISS_FAULT | | |
559 | F_INT_MISS_TRANSACTION_FIFO_FAULT | | |
560 | F_INT_PRETETCH_TRANSATION_FIFO_FAULT; | |
561 | writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); | |
562 | ||
cecdce9d | 563 | if (data->plat_data->m4u_plat == M4U_MT8173) |
70ca608b YW |
564 | regval = (data->protect_base >> 1) | (data->enable_4GB << 31); |
565 | else | |
566 | regval = lower_32_bits(data->protect_base) | | |
567 | upper_32_bits(data->protect_base); | |
568 | writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); | |
569 | ||
cecdce9d | 570 | if (data->enable_4GB && data->plat_data->m4u_plat != M4U_MT8173) { |
30e2fccf YW |
571 | /* |
572 | * If 4GB mode is enabled, the validate PA range is from | |
573 | * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. | |
574 | */ | |
575 | regval = F_MMU_VLD_PA_RNG(7, 4); | |
576 | writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG); | |
577 | } | |
0df4fabe | 578 | writel_relaxed(0, data->base + REG_MMU_DCM_DIS); |
e6dec923 YW |
579 | |
580 | /* It's MISC control register whose default value is ok except mt8173.*/ | |
cecdce9d | 581 | if (data->plat_data->m4u_plat == M4U_MT8173) |
e6dec923 | 582 | writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); |
0df4fabe YW |
583 | |
584 | if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, | |
585 | dev_name(data->dev), (void *)data)) { | |
586 | writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); | |
587 | clk_disable_unprepare(data->bclk); | |
588 | dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); | |
589 | return -ENODEV; | |
590 | } | |
591 | ||
592 | return 0; | |
593 | } | |
594 | ||
0df4fabe YW |
595 | static const struct component_master_ops mtk_iommu_com_ops = { |
596 | .bind = mtk_iommu_bind, | |
597 | .unbind = mtk_iommu_unbind, | |
598 | }; | |
599 | ||
600 | static int mtk_iommu_probe(struct platform_device *pdev) | |
601 | { | |
602 | struct mtk_iommu_data *data; | |
603 | struct device *dev = &pdev->dev; | |
604 | struct resource *res; | |
b16c0170 | 605 | resource_size_t ioaddr; |
0df4fabe YW |
606 | struct component_match *match = NULL; |
607 | void *protect; | |
0b6c0ad3 | 608 | int i, larb_nr, ret; |
0df4fabe YW |
609 | |
610 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); | |
611 | if (!data) | |
612 | return -ENOMEM; | |
613 | data->dev = dev; | |
cecdce9d | 614 | data->plat_data = of_device_get_match_data(dev); |
0df4fabe YW |
615 | |
616 | /* Protect memory. HW will access here while translation fault.*/ | |
617 | protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); | |
618 | if (!protect) | |
619 | return -ENOMEM; | |
620 | data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); | |
621 | ||
01e23c93 | 622 | /* Whether the current dram is over 4GB */ |
41939980 | 623 | data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT)); |
b4dad40e YW |
624 | if (!data->plat_data->has_4gb_mode) |
625 | data->enable_4GB = false; | |
01e23c93 | 626 | |
0df4fabe YW |
627 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
628 | data->base = devm_ioremap_resource(dev, res); | |
629 | if (IS_ERR(data->base)) | |
630 | return PTR_ERR(data->base); | |
b16c0170 | 631 | ioaddr = res->start; |
0df4fabe YW |
632 | |
633 | data->irq = platform_get_irq(pdev, 0); | |
634 | if (data->irq < 0) | |
635 | return data->irq; | |
636 | ||
2aa4c259 YW |
637 | if (data->plat_data->has_bclk) { |
638 | data->bclk = devm_clk_get(dev, "bclk"); | |
639 | if (IS_ERR(data->bclk)) | |
640 | return PTR_ERR(data->bclk); | |
641 | } | |
0df4fabe YW |
642 | |
643 | larb_nr = of_count_phandle_with_args(dev->of_node, | |
644 | "mediatek,larbs", NULL); | |
645 | if (larb_nr < 0) | |
646 | return larb_nr; | |
647 | data->smi_imu.larb_nr = larb_nr; | |
648 | ||
649 | for (i = 0; i < larb_nr; i++) { | |
650 | struct device_node *larbnode; | |
651 | struct platform_device *plarbdev; | |
e6dec923 | 652 | u32 id; |
0df4fabe YW |
653 | |
654 | larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); | |
655 | if (!larbnode) | |
656 | return -EINVAL; | |
657 | ||
1eb8e4e2 WY |
658 | if (!of_device_is_available(larbnode)) { |
659 | of_node_put(larbnode); | |
0df4fabe | 660 | continue; |
1eb8e4e2 | 661 | } |
0df4fabe | 662 | |
e6dec923 YW |
663 | ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); |
664 | if (ret)/* The id is consecutive if there is no this property */ | |
665 | id = i; | |
666 | ||
0df4fabe | 667 | plarbdev = of_find_device_by_node(larbnode); |
1eb8e4e2 WY |
668 | if (!plarbdev) { |
669 | of_node_put(larbnode); | |
e6dec923 | 670 | return -EPROBE_DEFER; |
1eb8e4e2 | 671 | } |
e6dec923 | 672 | data->smi_imu.larb_imu[id].dev = &plarbdev->dev; |
0df4fabe | 673 | |
00c7c81f RK |
674 | component_match_add_release(dev, &match, release_of, |
675 | compare_of, larbnode); | |
0df4fabe YW |
676 | } |
677 | ||
678 | platform_set_drvdata(pdev, data); | |
679 | ||
680 | ret = mtk_iommu_hw_init(data); | |
681 | if (ret) | |
682 | return ret; | |
683 | ||
b16c0170 JR |
684 | ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, |
685 | "mtk-iommu.%pa", &ioaddr); | |
686 | if (ret) | |
687 | return ret; | |
688 | ||
689 | iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); | |
690 | iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode); | |
691 | ||
692 | ret = iommu_device_register(&data->iommu); | |
693 | if (ret) | |
694 | return ret; | |
695 | ||
7c3a2ec0 YW |
696 | list_add_tail(&data->list, &m4ulist); |
697 | ||
0df4fabe YW |
698 | if (!iommu_present(&platform_bus_type)) |
699 | bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); | |
700 | ||
701 | return component_master_add_with_match(dev, &mtk_iommu_com_ops, match); | |
702 | } | |
703 | ||
704 | static int mtk_iommu_remove(struct platform_device *pdev) | |
705 | { | |
706 | struct mtk_iommu_data *data = platform_get_drvdata(pdev); | |
707 | ||
b16c0170 JR |
708 | iommu_device_sysfs_remove(&data->iommu); |
709 | iommu_device_unregister(&data->iommu); | |
710 | ||
0df4fabe YW |
711 | if (iommu_present(&platform_bus_type)) |
712 | bus_set_iommu(&platform_bus_type, NULL); | |
713 | ||
0df4fabe YW |
714 | clk_disable_unprepare(data->bclk); |
715 | devm_free_irq(&pdev->dev, data->irq, data); | |
716 | component_master_del(&pdev->dev, &mtk_iommu_com_ops); | |
717 | return 0; | |
718 | } | |
719 | ||
fd99f796 | 720 | static int __maybe_unused mtk_iommu_suspend(struct device *dev) |
0df4fabe YW |
721 | { |
722 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | |
723 | struct mtk_iommu_suspend_reg *reg = &data->reg; | |
724 | void __iomem *base = data->base; | |
725 | ||
726 | reg->standard_axi_mode = readl_relaxed(base + | |
727 | REG_MMU_STANDARD_AXI_MODE); | |
728 | reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); | |
729 | reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); | |
730 | reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); | |
731 | reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); | |
70ca608b | 732 | reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); |
6254b64f | 733 | clk_disable_unprepare(data->bclk); |
0df4fabe YW |
734 | return 0; |
735 | } | |
736 | ||
fd99f796 | 737 | static int __maybe_unused mtk_iommu_resume(struct device *dev) |
0df4fabe YW |
738 | { |
739 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | |
740 | struct mtk_iommu_suspend_reg *reg = &data->reg; | |
741 | void __iomem *base = data->base; | |
6254b64f | 742 | int ret; |
0df4fabe | 743 | |
6254b64f YW |
744 | ret = clk_prepare_enable(data->bclk); |
745 | if (ret) { | |
746 | dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret); | |
747 | return ret; | |
748 | } | |
0df4fabe YW |
749 | writel_relaxed(reg->standard_axi_mode, |
750 | base + REG_MMU_STANDARD_AXI_MODE); | |
751 | writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); | |
752 | writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); | |
753 | writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); | |
754 | writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); | |
70ca608b | 755 | writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); |
e6dec923 YW |
756 | if (data->m4u_dom) |
757 | writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], | |
758 | base + REG_MMU_PT_BASE_ADDR); | |
0df4fabe YW |
759 | return 0; |
760 | } | |
761 | ||
e6dec923 | 762 | static const struct dev_pm_ops mtk_iommu_pm_ops = { |
6254b64f | 763 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) |
0df4fabe YW |
764 | }; |
765 | ||
cecdce9d YW |
766 | static const struct mtk_iommu_plat_data mt2712_data = { |
767 | .m4u_plat = M4U_MT2712, | |
b4dad40e | 768 | .has_4gb_mode = true, |
2aa4c259 | 769 | .has_bclk = true, |
b3e5eee7 | 770 | .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, |
cecdce9d YW |
771 | }; |
772 | ||
773 | static const struct mtk_iommu_plat_data mt8173_data = { | |
774 | .m4u_plat = M4U_MT8173, | |
b4dad40e | 775 | .has_4gb_mode = true, |
2aa4c259 | 776 | .has_bclk = true, |
b3e5eee7 | 777 | .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */ |
cecdce9d YW |
778 | }; |
779 | ||
0df4fabe | 780 | static const struct of_device_id mtk_iommu_of_ids[] = { |
cecdce9d YW |
781 | { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, |
782 | { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, | |
0df4fabe YW |
783 | {} |
784 | }; | |
785 | ||
786 | static struct platform_driver mtk_iommu_driver = { | |
787 | .probe = mtk_iommu_probe, | |
788 | .remove = mtk_iommu_remove, | |
789 | .driver = { | |
790 | .name = "mtk-iommu", | |
e6dec923 | 791 | .of_match_table = of_match_ptr(mtk_iommu_of_ids), |
0df4fabe YW |
792 | .pm = &mtk_iommu_pm_ops, |
793 | } | |
794 | }; | |
795 | ||
e6dec923 | 796 | static int __init mtk_iommu_init(void) |
0df4fabe YW |
797 | { |
798 | int ret; | |
0df4fabe YW |
799 | |
800 | ret = platform_driver_register(&mtk_iommu_driver); | |
e6dec923 YW |
801 | if (ret != 0) |
802 | pr_err("Failed to register MTK IOMMU driver\n"); | |
0df4fabe | 803 | |
e6dec923 | 804 | return ret; |
0df4fabe YW |
805 | } |
806 | ||
e6dec923 | 807 | subsys_initcall(mtk_iommu_init) |