Commit | Line | Data |
---|---|---|
4100b8c2 MR |
1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) |
2 | // Copyright (C) 2016-2018, Allwinner Technology CO., LTD. | |
3 | // Copyright (C) 2019-2020, Cerno | |
4 | ||
5 | #include <linux/bitfield.h> | |
6 | #include <linux/bug.h> | |
7 | #include <linux/clk.h> | |
8 | #include <linux/device.h> | |
9 | #include <linux/dma-direction.h> | |
4100b8c2 MR |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/err.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/iommu.h> | |
15 | #include <linux/iopoll.h> | |
16 | #include <linux/ioport.h> | |
17 | #include <linux/log2.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/of_platform.h> | |
20 | #include <linux/platform_device.h> | |
21 | #include <linux/pm.h> | |
22 | #include <linux/pm_runtime.h> | |
23 | #include <linux/reset.h> | |
24 | #include <linux/sizes.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/spinlock.h> | |
27 | #include <linux/types.h> | |
28 | ||
29 | #define IOMMU_RESET_REG 0x010 | |
9ad0c125 | 30 | #define IOMMU_RESET_RELEASE_ALL 0xffffffff |
4100b8c2 MR |
31 | #define IOMMU_ENABLE_REG 0x020 |
32 | #define IOMMU_ENABLE_ENABLE BIT(0) | |
33 | ||
34 | #define IOMMU_BYPASS_REG 0x030 | |
35 | #define IOMMU_AUTO_GATING_REG 0x040 | |
36 | #define IOMMU_AUTO_GATING_ENABLE BIT(0) | |
37 | ||
38 | #define IOMMU_WBUF_CTRL_REG 0x044 | |
39 | #define IOMMU_OOO_CTRL_REG 0x048 | |
40 | #define IOMMU_4KB_BDY_PRT_CTRL_REG 0x04c | |
41 | #define IOMMU_TTB_REG 0x050 | |
42 | #define IOMMU_TLB_ENABLE_REG 0x060 | |
43 | #define IOMMU_TLB_PREFETCH_REG 0x070 | |
44 | #define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m) BIT(m) | |
45 | ||
46 | #define IOMMU_TLB_FLUSH_REG 0x080 | |
47 | #define IOMMU_TLB_FLUSH_PTW_CACHE BIT(17) | |
48 | #define IOMMU_TLB_FLUSH_MACRO_TLB BIT(16) | |
49 | #define IOMMU_TLB_FLUSH_MICRO_TLB(i) (BIT(i) & GENMASK(5, 0)) | |
50 | ||
51 | #define IOMMU_TLB_IVLD_ADDR_REG 0x090 | |
52 | #define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x094 | |
53 | #define IOMMU_TLB_IVLD_ENABLE_REG 0x098 | |
54 | #define IOMMU_TLB_IVLD_ENABLE_ENABLE BIT(0) | |
55 | ||
56 | #define IOMMU_PC_IVLD_ADDR_REG 0x0a0 | |
57 | #define IOMMU_PC_IVLD_ENABLE_REG 0x0a8 | |
58 | #define IOMMU_PC_IVLD_ENABLE_ENABLE BIT(0) | |
59 | ||
60 | #define IOMMU_DM_AUT_CTRL_REG(d) (0x0b0 + ((d) / 2) * 4) | |
61 | #define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2))) | |
62 | #define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2) + 1)) | |
63 | ||
64 | #define IOMMU_DM_AUT_OVWT_REG 0x0d0 | |
65 | #define IOMMU_INT_ENABLE_REG 0x100 | |
66 | #define IOMMU_INT_CLR_REG 0x104 | |
67 | #define IOMMU_INT_STA_REG 0x108 | |
68 | #define IOMMU_INT_ERR_ADDR_REG(i) (0x110 + (i) * 4) | |
69 | #define IOMMU_INT_ERR_ADDR_L1_REG 0x130 | |
70 | #define IOMMU_INT_ERR_ADDR_L2_REG 0x134 | |
71 | #define IOMMU_INT_ERR_DATA_REG(i) (0x150 + (i) * 4) | |
72 | #define IOMMU_L1PG_INT_REG 0x0180 | |
73 | #define IOMMU_L2PG_INT_REG 0x0184 | |
74 | ||
75 | #define IOMMU_INT_INVALID_L2PG BIT(17) | |
76 | #define IOMMU_INT_INVALID_L1PG BIT(16) | |
77 | #define IOMMU_INT_MASTER_PERMISSION(m) BIT(m) | |
78 | #define IOMMU_INT_MASTER_MASK (IOMMU_INT_MASTER_PERMISSION(0) | \ | |
79 | IOMMU_INT_MASTER_PERMISSION(1) | \ | |
80 | IOMMU_INT_MASTER_PERMISSION(2) | \ | |
81 | IOMMU_INT_MASTER_PERMISSION(3) | \ | |
82 | IOMMU_INT_MASTER_PERMISSION(4) | \ | |
83 | IOMMU_INT_MASTER_PERMISSION(5)) | |
84 | #define IOMMU_INT_MASK (IOMMU_INT_INVALID_L1PG | \ | |
85 | IOMMU_INT_INVALID_L2PG | \ | |
86 | IOMMU_INT_MASTER_MASK) | |
87 | ||
88 | #define PT_ENTRY_SIZE sizeof(u32) | |
89 | ||
90 | #define NUM_DT_ENTRIES 4096 | |
91 | #define DT_SIZE (NUM_DT_ENTRIES * PT_ENTRY_SIZE) | |
92 | ||
93 | #define NUM_PT_ENTRIES 256 | |
94 | #define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE) | |
95 | ||
e563cc0c JS |
96 | #define SPAGE_SIZE 4096 |
97 | ||
4100b8c2 MR |
98 | struct sun50i_iommu { |
99 | struct iommu_device iommu; | |
100 | ||
101 | /* Lock to modify the IOMMU registers */ | |
102 | spinlock_t iommu_lock; | |
103 | ||
104 | struct device *dev; | |
105 | void __iomem *base; | |
106 | struct reset_control *reset; | |
107 | struct clk *clk; | |
108 | ||
109 | struct iommu_domain *domain; | |
110 | struct iommu_group *group; | |
111 | struct kmem_cache *pt_pool; | |
112 | }; | |
113 | ||
114 | struct sun50i_iommu_domain { | |
115 | struct iommu_domain domain; | |
116 | ||
117 | /* Number of devices attached to the domain */ | |
118 | refcount_t refcnt; | |
119 | ||
120 | /* L1 Page Table */ | |
121 | u32 *dt; | |
122 | dma_addr_t dt_dma; | |
123 | ||
124 | struct sun50i_iommu *iommu; | |
125 | }; | |
126 | ||
127 | static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain) | |
128 | { | |
129 | return container_of(domain, struct sun50i_iommu_domain, domain); | |
130 | } | |
131 | ||
132 | static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev) | |
133 | { | |
134 | return dev_iommu_priv_get(dev); | |
135 | } | |
136 | ||
137 | static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset) | |
138 | { | |
139 | return readl(iommu->base + offset); | |
140 | } | |
141 | ||
142 | static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value) | |
143 | { | |
144 | writel(value, iommu->base + offset); | |
145 | } | |
146 | ||
147 | /* | |
148 | * The Allwinner H6 IOMMU uses a 2-level page table. | |
149 | * | |
150 | * The first level is the usual Directory Table (DT), that consists of | |
151 | * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page | |
152 | * Table (PT). | |
153 | * | |
154 | * Each PT consits of 256 4-bytes Page Table Entries (PTE), each | |
155 | * pointing to a 4kB page of physical memory. | |
156 | * | |
157 | * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG | |
158 | * register that contains its physical address. | |
159 | */ | |
160 | ||
161 | #define SUN50I_IOVA_DTE_MASK GENMASK(31, 20) | |
162 | #define SUN50I_IOVA_PTE_MASK GENMASK(19, 12) | |
163 | #define SUN50I_IOVA_PAGE_MASK GENMASK(11, 0) | |
164 | ||
165 | static u32 sun50i_iova_get_dte_index(dma_addr_t iova) | |
166 | { | |
167 | return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova); | |
168 | } | |
169 | ||
170 | static u32 sun50i_iova_get_pte_index(dma_addr_t iova) | |
171 | { | |
172 | return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova); | |
173 | } | |
174 | ||
175 | static u32 sun50i_iova_get_page_offset(dma_addr_t iova) | |
176 | { | |
177 | return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova); | |
178 | } | |
179 | ||
180 | /* | |
181 | * Each Directory Table Entry has a Page Table address and a valid | |
182 | * bit: | |
183 | ||
184 | * +---------------------+-----------+-+ | |
185 | * | PT address | Reserved |V| | |
186 | * +---------------------+-----------+-+ | |
187 | * 31:10 - Page Table address | |
188 | * 9:2 - Reserved | |
189 | * 1:0 - 1 if the entry is valid | |
190 | */ | |
191 | ||
192 | #define SUN50I_DTE_PT_ADDRESS_MASK GENMASK(31, 10) | |
193 | #define SUN50I_DTE_PT_ATTRS GENMASK(1, 0) | |
194 | #define SUN50I_DTE_PT_VALID 1 | |
195 | ||
196 | static phys_addr_t sun50i_dte_get_pt_address(u32 dte) | |
197 | { | |
198 | return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK; | |
199 | } | |
200 | ||
201 | static bool sun50i_dte_is_pt_valid(u32 dte) | |
202 | { | |
203 | return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID; | |
204 | } | |
205 | ||
206 | static u32 sun50i_mk_dte(dma_addr_t pt_dma) | |
207 | { | |
208 | return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID; | |
209 | } | |
210 | ||
211 | /* | |
212 | * Each PTE has a Page address, an authority index and a valid bit: | |
213 | * | |
214 | * +----------------+-----+-----+-----+---+-----+ | |
215 | * | Page address | Rsv | ACI | Rsv | V | Rsv | | |
216 | * +----------------+-----+-----+-----+---+-----+ | |
217 | * 31:12 - Page address | |
218 | * 11:8 - Reserved | |
219 | * 7:4 - Authority Control Index | |
220 | * 3:2 - Reserved | |
221 | * 1 - 1 if the entry is valid | |
222 | * 0 - Reserved | |
223 | * | |
224 | * The way permissions work is that the IOMMU has 16 "domains" that | |
225 | * can be configured to give each masters either read or write | |
226 | * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain | |
227 | * 0 seems like the default domain, and its permissions in the | |
228 | * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really | |
229 | * useful to enforce any particular permission. | |
230 | * | |
231 | * Each page entry will then have a reference to the domain they are | |
232 | * affected to, so that we can actually enforce them on a per-page | |
233 | * basis. | |
234 | * | |
235 | * In order to make it work with the IOMMU framework, we will be using | |
236 | * 4 different domains, starting at 1: RD_WR, RD, WR and NONE | |
237 | * depending on the permission we want to enforce. Each domain will | |
238 | * have each master setup in the same way, since the IOMMU framework | |
239 | * doesn't seem to restrict page access on a per-device basis. And | |
240 | * then we will use the relevant domain index when generating the page | |
241 | * table entry depending on the permissions we want to be enforced. | |
242 | */ | |
243 | ||
244 | enum sun50i_iommu_aci { | |
245 | SUN50I_IOMMU_ACI_DO_NOT_USE = 0, | |
246 | SUN50I_IOMMU_ACI_NONE, | |
247 | SUN50I_IOMMU_ACI_RD, | |
248 | SUN50I_IOMMU_ACI_WR, | |
249 | SUN50I_IOMMU_ACI_RD_WR, | |
250 | }; | |
251 | ||
252 | #define SUN50I_PTE_PAGE_ADDRESS_MASK GENMASK(31, 12) | |
253 | #define SUN50I_PTE_ACI_MASK GENMASK(7, 4) | |
254 | #define SUN50I_PTE_PAGE_VALID BIT(1) | |
255 | ||
256 | static phys_addr_t sun50i_pte_get_page_address(u32 pte) | |
257 | { | |
258 | return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK; | |
259 | } | |
260 | ||
261 | static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte) | |
262 | { | |
263 | return FIELD_GET(SUN50I_PTE_ACI_MASK, pte); | |
264 | } | |
265 | ||
266 | static bool sun50i_pte_is_page_valid(u32 pte) | |
267 | { | |
268 | return pte & SUN50I_PTE_PAGE_VALID; | |
269 | } | |
270 | ||
271 | static u32 sun50i_mk_pte(phys_addr_t page, int prot) | |
272 | { | |
273 | enum sun50i_iommu_aci aci; | |
274 | u32 flags = 0; | |
275 | ||
eac0104d | 276 | if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE)) |
4100b8c2 MR |
277 | aci = SUN50I_IOMMU_ACI_RD_WR; |
278 | else if (prot & IOMMU_READ) | |
279 | aci = SUN50I_IOMMU_ACI_RD; | |
280 | else if (prot & IOMMU_WRITE) | |
281 | aci = SUN50I_IOMMU_ACI_WR; | |
282 | else | |
283 | aci = SUN50I_IOMMU_ACI_NONE; | |
284 | ||
285 | flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci); | |
286 | page &= SUN50I_PTE_PAGE_ADDRESS_MASK; | |
287 | return page | flags | SUN50I_PTE_PAGE_VALID; | |
288 | } | |
289 | ||
290 | static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain, | |
291 | void *vaddr, unsigned int count) | |
292 | { | |
293 | struct sun50i_iommu *iommu = sun50i_domain->iommu; | |
294 | dma_addr_t dma = virt_to_phys(vaddr); | |
295 | size_t size = count * PT_ENTRY_SIZE; | |
296 | ||
297 | dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE); | |
298 | } | |
299 | ||
e563cc0c JS |
300 | static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu, |
301 | unsigned long iova) | |
302 | { | |
303 | u32 reg; | |
304 | int ret; | |
305 | ||
306 | iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova); | |
307 | iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(31, 12)); | |
308 | iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, | |
309 | IOMMU_TLB_IVLD_ENABLE_ENABLE); | |
310 | ||
311 | ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG, | |
312 | reg, !reg, 1, 2000); | |
313 | if (ret) | |
314 | dev_warn(iommu->dev, "TLB invalidation timed out!\n"); | |
315 | } | |
316 | ||
317 | static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu, | |
318 | unsigned long iova) | |
319 | { | |
320 | u32 reg; | |
321 | int ret; | |
322 | ||
323 | iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova); | |
324 | iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, | |
325 | IOMMU_PC_IVLD_ENABLE_ENABLE); | |
326 | ||
327 | ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG, | |
328 | reg, !reg, 1, 2000); | |
329 | if (ret) | |
330 | dev_warn(iommu->dev, "PTW cache invalidation timed out!\n"); | |
331 | } | |
332 | ||
333 | static void sun50i_iommu_zap_range(struct sun50i_iommu *iommu, | |
334 | unsigned long iova, size_t size) | |
335 | { | |
336 | assert_spin_locked(&iommu->iommu_lock); | |
337 | ||
338 | iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0); | |
339 | ||
340 | sun50i_iommu_zap_iova(iommu, iova); | |
341 | sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE); | |
342 | if (size > SPAGE_SIZE) { | |
343 | sun50i_iommu_zap_iova(iommu, iova + size); | |
344 | sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE); | |
345 | } | |
346 | sun50i_iommu_zap_ptw_cache(iommu, iova); | |
347 | sun50i_iommu_zap_ptw_cache(iommu, iova + SZ_1M); | |
348 | if (size > SZ_1M) { | |
349 | sun50i_iommu_zap_ptw_cache(iommu, iova + size); | |
350 | sun50i_iommu_zap_ptw_cache(iommu, iova + size + SZ_1M); | |
351 | } | |
352 | ||
353 | iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE); | |
354 | } | |
355 | ||
4100b8c2 MR |
356 | static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu) |
357 | { | |
358 | u32 reg; | |
359 | int ret; | |
360 | ||
361 | assert_spin_locked(&iommu->iommu_lock); | |
362 | ||
363 | iommu_write(iommu, | |
364 | IOMMU_TLB_FLUSH_REG, | |
365 | IOMMU_TLB_FLUSH_PTW_CACHE | | |
366 | IOMMU_TLB_FLUSH_MACRO_TLB | | |
367 | IOMMU_TLB_FLUSH_MICRO_TLB(5) | | |
368 | IOMMU_TLB_FLUSH_MICRO_TLB(4) | | |
369 | IOMMU_TLB_FLUSH_MICRO_TLB(3) | | |
370 | IOMMU_TLB_FLUSH_MICRO_TLB(2) | | |
371 | IOMMU_TLB_FLUSH_MICRO_TLB(1) | | |
372 | IOMMU_TLB_FLUSH_MICRO_TLB(0)); | |
373 | ||
bc8784f3 MR |
374 | ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG, |
375 | reg, !reg, | |
376 | 1, 2000); | |
4100b8c2 MR |
377 | if (ret) |
378 | dev_warn(iommu->dev, "TLB Flush timed out!\n"); | |
379 | ||
380 | return ret; | |
381 | } | |
382 | ||
383 | static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain) | |
384 | { | |
385 | struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); | |
386 | struct sun50i_iommu *iommu = sun50i_domain->iommu; | |
387 | unsigned long flags; | |
388 | ||
389 | /* | |
390 | * At boot, we'll have a first call into .flush_iotlb_all right after | |
391 | * .probe_device, and since we link our (single) domain to our iommu in | |
392 | * the .attach_device callback, we don't have that pointer set. | |
393 | * | |
394 | * It shouldn't really be any trouble to ignore it though since we flush | |
395 | * all caches as part of the device powerup. | |
396 | */ | |
397 | if (!iommu) | |
398 | return; | |
399 | ||
400 | spin_lock_irqsave(&iommu->iommu_lock, flags); | |
401 | sun50i_iommu_flush_all_tlb(iommu); | |
402 | spin_unlock_irqrestore(&iommu->iommu_lock, flags); | |
403 | } | |
404 | ||
e563cc0c JS |
405 | static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain, |
406 | unsigned long iova, size_t size) | |
407 | { | |
408 | struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); | |
409 | struct sun50i_iommu *iommu = sun50i_domain->iommu; | |
410 | unsigned long flags; | |
411 | ||
412 | spin_lock_irqsave(&iommu->iommu_lock, flags); | |
413 | sun50i_iommu_zap_range(iommu, iova, size); | |
414 | spin_unlock_irqrestore(&iommu->iommu_lock, flags); | |
415 | } | |
416 | ||
4100b8c2 MR |
417 | static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain, |
418 | struct iommu_iotlb_gather *gather) | |
419 | { | |
420 | sun50i_iommu_flush_iotlb_all(domain); | |
421 | } | |
422 | ||
423 | static int sun50i_iommu_enable(struct sun50i_iommu *iommu) | |
424 | { | |
425 | struct sun50i_iommu_domain *sun50i_domain; | |
426 | unsigned long flags; | |
427 | int ret; | |
428 | ||
429 | if (!iommu->domain) | |
430 | return 0; | |
431 | ||
432 | sun50i_domain = to_sun50i_domain(iommu->domain); | |
433 | ||
434 | ret = reset_control_deassert(iommu->reset); | |
435 | if (ret) | |
436 | return ret; | |
437 | ||
438 | ret = clk_prepare_enable(iommu->clk); | |
439 | if (ret) | |
440 | goto err_reset_assert; | |
441 | ||
442 | spin_lock_irqsave(&iommu->iommu_lock, flags); | |
443 | ||
444 | iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma); | |
445 | iommu_write(iommu, IOMMU_TLB_PREFETCH_REG, | |
446 | IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) | | |
447 | IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) | | |
448 | IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) | | |
449 | IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) | | |
450 | IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) | | |
451 | IOMMU_TLB_PREFETCH_MASTER_ENABLE(5)); | |
452 | iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK); | |
453 | iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE), | |
454 | IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) | | |
455 | IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) | | |
456 | IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) | | |
457 | IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) | | |
458 | IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) | | |
459 | IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) | | |
460 | IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) | | |
461 | IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) | | |
462 | IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) | | |
463 | IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) | | |
464 | IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) | | |
465 | IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5)); | |
466 | ||
467 | iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD), | |
468 | IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) | | |
469 | IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) | | |
470 | IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) | | |
471 | IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) | | |
472 | IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) | | |
473 | IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5)); | |
474 | ||
475 | iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR), | |
476 | IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) | | |
477 | IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) | | |
478 | IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) | | |
479 | IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) | | |
480 | IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) | | |
481 | IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5)); | |
482 | ||
483 | ret = sun50i_iommu_flush_all_tlb(iommu); | |
484 | if (ret) { | |
485 | spin_unlock_irqrestore(&iommu->iommu_lock, flags); | |
486 | goto err_clk_disable; | |
487 | } | |
488 | ||
489 | iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE); | |
490 | iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE); | |
491 | ||
492 | spin_unlock_irqrestore(&iommu->iommu_lock, flags); | |
493 | ||
494 | return 0; | |
495 | ||
496 | err_clk_disable: | |
497 | clk_disable_unprepare(iommu->clk); | |
498 | ||
499 | err_reset_assert: | |
500 | reset_control_assert(iommu->reset); | |
501 | ||
502 | return ret; | |
503 | } | |
504 | ||
505 | static void sun50i_iommu_disable(struct sun50i_iommu *iommu) | |
506 | { | |
507 | unsigned long flags; | |
508 | ||
509 | spin_lock_irqsave(&iommu->iommu_lock, flags); | |
510 | ||
511 | iommu_write(iommu, IOMMU_ENABLE_REG, 0); | |
512 | iommu_write(iommu, IOMMU_TTB_REG, 0); | |
513 | ||
514 | spin_unlock_irqrestore(&iommu->iommu_lock, flags); | |
515 | ||
516 | clk_disable_unprepare(iommu->clk); | |
517 | reset_control_assert(iommu->reset); | |
518 | } | |
519 | ||
520 | static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu, | |
521 | gfp_t gfp) | |
522 | { | |
523 | dma_addr_t pt_dma; | |
524 | u32 *page_table; | |
525 | ||
526 | page_table = kmem_cache_zalloc(iommu->pt_pool, gfp); | |
527 | if (!page_table) | |
528 | return ERR_PTR(-ENOMEM); | |
529 | ||
530 | pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE); | |
531 | if (dma_mapping_error(iommu->dev, pt_dma)) { | |
532 | dev_err(iommu->dev, "Couldn't map L2 Page Table\n"); | |
533 | kmem_cache_free(iommu->pt_pool, page_table); | |
534 | return ERR_PTR(-ENOMEM); | |
535 | } | |
536 | ||
537 | /* We rely on the physical address and DMA address being the same */ | |
538 | WARN_ON(pt_dma != virt_to_phys(page_table)); | |
539 | ||
540 | return page_table; | |
541 | } | |
542 | ||
543 | static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu, | |
544 | u32 *page_table) | |
545 | { | |
546 | phys_addr_t pt_phys = virt_to_phys(page_table); | |
547 | ||
548 | dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE); | |
549 | kmem_cache_free(iommu->pt_pool, page_table); | |
550 | } | |
551 | ||
552 | static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain, | |
553 | dma_addr_t iova, gfp_t gfp) | |
554 | { | |
555 | struct sun50i_iommu *iommu = sun50i_domain->iommu; | |
4100b8c2 MR |
556 | u32 *page_table; |
557 | u32 *dte_addr; | |
558 | u32 old_dte; | |
559 | u32 dte; | |
560 | ||
561 | dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; | |
562 | dte = *dte_addr; | |
563 | if (sun50i_dte_is_pt_valid(dte)) { | |
564 | phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte); | |
565 | return (u32 *)phys_to_virt(pt_phys); | |
566 | } | |
567 | ||
568 | page_table = sun50i_iommu_alloc_page_table(iommu, gfp); | |
569 | if (IS_ERR(page_table)) | |
570 | return page_table; | |
571 | ||
572 | dte = sun50i_mk_dte(virt_to_phys(page_table)); | |
573 | old_dte = cmpxchg(dte_addr, 0, dte); | |
574 | if (old_dte) { | |
575 | phys_addr_t installed_pt_phys = | |
576 | sun50i_dte_get_pt_address(old_dte); | |
577 | u32 *installed_pt = phys_to_virt(installed_pt_phys); | |
578 | u32 *drop_pt = page_table; | |
579 | ||
580 | page_table = installed_pt; | |
581 | dte = old_dte; | |
582 | sun50i_iommu_free_page_table(iommu, drop_pt); | |
583 | } | |
584 | ||
67a8a67f | 585 | sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES); |
4100b8c2 MR |
586 | sun50i_table_flush(sun50i_domain, dte_addr, 1); |
587 | ||
588 | return page_table; | |
589 | } | |
590 | ||
591 | static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
592 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp) | |
593 | { | |
594 | struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); | |
595 | struct sun50i_iommu *iommu = sun50i_domain->iommu; | |
596 | u32 pte_index; | |
597 | u32 *page_table, *pte_addr; | |
598 | int ret = 0; | |
599 | ||
600 | page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp); | |
601 | if (IS_ERR(page_table)) { | |
602 | ret = PTR_ERR(page_table); | |
603 | goto out; | |
604 | } | |
605 | ||
606 | pte_index = sun50i_iova_get_pte_index(iova); | |
607 | pte_addr = &page_table[pte_index]; | |
608 | if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) { | |
609 | phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr); | |
610 | dev_err(iommu->dev, | |
611 | "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n", | |
612 | &iova, &page_phys, &paddr, prot); | |
613 | ret = -EBUSY; | |
614 | goto out; | |
615 | } | |
616 | ||
617 | *pte_addr = sun50i_mk_pte(paddr, prot); | |
618 | sun50i_table_flush(sun50i_domain, pte_addr, 1); | |
619 | ||
620 | out: | |
621 | return ret; | |
622 | } | |
623 | ||
624 | static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |
625 | size_t size, struct iommu_iotlb_gather *gather) | |
626 | { | |
627 | struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); | |
4100b8c2 | 628 | phys_addr_t pt_phys; |
4100b8c2 MR |
629 | u32 *pte_addr; |
630 | u32 dte; | |
631 | ||
632 | dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; | |
633 | if (!sun50i_dte_is_pt_valid(dte)) | |
634 | return 0; | |
635 | ||
636 | pt_phys = sun50i_dte_get_pt_address(dte); | |
637 | pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova); | |
4100b8c2 MR |
638 | |
639 | if (!sun50i_pte_is_page_valid(*pte_addr)) | |
640 | return 0; | |
641 | ||
642 | memset(pte_addr, 0, sizeof(*pte_addr)); | |
643 | sun50i_table_flush(sun50i_domain, pte_addr, 1); | |
644 | ||
645 | return SZ_4K; | |
646 | } | |
647 | ||
648 | static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain, | |
649 | dma_addr_t iova) | |
650 | { | |
651 | struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); | |
652 | phys_addr_t pt_phys; | |
653 | u32 *page_table; | |
654 | u32 dte, pte; | |
655 | ||
656 | dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; | |
657 | if (!sun50i_dte_is_pt_valid(dte)) | |
658 | return 0; | |
659 | ||
660 | pt_phys = sun50i_dte_get_pt_address(dte); | |
661 | page_table = (u32 *)phys_to_virt(pt_phys); | |
662 | pte = page_table[sun50i_iova_get_pte_index(iova)]; | |
663 | if (!sun50i_pte_is_page_valid(pte)) | |
664 | return 0; | |
665 | ||
666 | return sun50i_pte_get_page_address(pte) + | |
667 | sun50i_iova_get_page_offset(iova); | |
668 | } | |
669 | ||
670 | static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type) | |
671 | { | |
672 | struct sun50i_iommu_domain *sun50i_domain; | |
673 | ||
674 | if (type != IOMMU_DOMAIN_DMA && | |
4100b8c2 MR |
675 | type != IOMMU_DOMAIN_UNMANAGED) |
676 | return NULL; | |
677 | ||
678 | sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL); | |
679 | if (!sun50i_domain) | |
680 | return NULL; | |
681 | ||
38b91f81 | 682 | sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
4100b8c2 MR |
683 | get_order(DT_SIZE)); |
684 | if (!sun50i_domain->dt) | |
aa654642 | 685 | goto err_free_domain; |
4100b8c2 MR |
686 | |
687 | refcount_set(&sun50i_domain->refcnt, 1); | |
688 | ||
689 | sun50i_domain->domain.geometry.aperture_start = 0; | |
690 | sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); | |
691 | sun50i_domain->domain.geometry.force_aperture = true; | |
692 | ||
693 | return &sun50i_domain->domain; | |
694 | ||
4100b8c2 MR |
695 | err_free_domain: |
696 | kfree(sun50i_domain); | |
697 | ||
698 | return NULL; | |
699 | } | |
700 | ||
701 | static void sun50i_iommu_domain_free(struct iommu_domain *domain) | |
702 | { | |
703 | struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); | |
704 | ||
705 | free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE)); | |
706 | sun50i_domain->dt = NULL; | |
707 | ||
4100b8c2 MR |
708 | kfree(sun50i_domain); |
709 | } | |
710 | ||
711 | static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu, | |
712 | struct sun50i_iommu_domain *sun50i_domain) | |
713 | { | |
714 | iommu->domain = &sun50i_domain->domain; | |
715 | sun50i_domain->iommu = iommu; | |
716 | ||
717 | sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt, | |
718 | DT_SIZE, DMA_TO_DEVICE); | |
719 | if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) { | |
720 | dev_err(iommu->dev, "Couldn't map L1 Page Table\n"); | |
721 | return -ENOMEM; | |
722 | } | |
723 | ||
724 | return sun50i_iommu_enable(iommu); | |
725 | } | |
726 | ||
727 | static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu, | |
728 | struct sun50i_iommu_domain *sun50i_domain) | |
729 | { | |
730 | unsigned int i; | |
731 | ||
732 | for (i = 0; i < NUM_DT_ENTRIES; i++) { | |
733 | phys_addr_t pt_phys; | |
734 | u32 *page_table; | |
735 | u32 *dte_addr; | |
736 | u32 dte; | |
737 | ||
738 | dte_addr = &sun50i_domain->dt[i]; | |
739 | dte = *dte_addr; | |
740 | if (!sun50i_dte_is_pt_valid(dte)) | |
741 | continue; | |
742 | ||
743 | memset(dte_addr, 0, sizeof(*dte_addr)); | |
744 | sun50i_table_flush(sun50i_domain, dte_addr, 1); | |
745 | ||
746 | pt_phys = sun50i_dte_get_pt_address(dte); | |
747 | page_table = phys_to_virt(pt_phys); | |
748 | sun50i_iommu_free_page_table(iommu, page_table); | |
749 | } | |
750 | ||
751 | ||
752 | sun50i_iommu_disable(iommu); | |
753 | ||
754 | dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt), | |
755 | DT_SIZE, DMA_TO_DEVICE); | |
756 | ||
757 | iommu->domain = NULL; | |
758 | } | |
759 | ||
760 | static void sun50i_iommu_detach_device(struct iommu_domain *domain, | |
761 | struct device *dev) | |
762 | { | |
763 | struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); | |
764 | struct sun50i_iommu *iommu = dev_iommu_priv_get(dev); | |
765 | ||
766 | dev_dbg(dev, "Detaching from IOMMU domain\n"); | |
767 | ||
768 | if (iommu->domain != domain) | |
769 | return; | |
770 | ||
771 | if (refcount_dec_and_test(&sun50i_domain->refcnt)) | |
772 | sun50i_iommu_detach_domain(iommu, sun50i_domain); | |
773 | } | |
774 | ||
775 | static int sun50i_iommu_attach_device(struct iommu_domain *domain, | |
776 | struct device *dev) | |
777 | { | |
778 | struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); | |
779 | struct sun50i_iommu *iommu; | |
780 | ||
781 | iommu = sun50i_iommu_from_dev(dev); | |
782 | if (!iommu) | |
783 | return -ENODEV; | |
784 | ||
785 | dev_dbg(dev, "Attaching to IOMMU domain\n"); | |
786 | ||
787 | refcount_inc(&sun50i_domain->refcnt); | |
788 | ||
789 | if (iommu->domain == domain) | |
790 | return 0; | |
791 | ||
792 | if (iommu->domain) | |
793 | sun50i_iommu_detach_device(iommu->domain, dev); | |
794 | ||
795 | sun50i_iommu_attach_domain(iommu, sun50i_domain); | |
796 | ||
797 | return 0; | |
798 | } | |
799 | ||
800 | static struct iommu_device *sun50i_iommu_probe_device(struct device *dev) | |
801 | { | |
802 | struct sun50i_iommu *iommu; | |
4100b8c2 MR |
803 | |
804 | iommu = sun50i_iommu_from_dev(dev); | |
805 | if (!iommu) | |
806 | return ERR_PTR(-ENODEV); | |
807 | ||
808 | return &iommu->iommu; | |
809 | } | |
810 | ||
4100b8c2 MR |
811 | static struct iommu_group *sun50i_iommu_device_group(struct device *dev) |
812 | { | |
813 | struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev); | |
814 | ||
815 | return iommu_group_ref_get(iommu->group); | |
816 | } | |
817 | ||
818 | static int sun50i_iommu_of_xlate(struct device *dev, | |
819 | struct of_phandle_args *args) | |
820 | { | |
821 | struct platform_device *iommu_pdev = of_find_device_by_node(args->np); | |
822 | unsigned id = args->args[0]; | |
823 | ||
824 | dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev)); | |
825 | ||
826 | return iommu_fwspec_add_ids(dev, &id, 1); | |
827 | } | |
828 | ||
79074f61 | 829 | static const struct iommu_ops sun50i_iommu_ops = { |
4100b8c2 | 830 | .pgsize_bitmap = SZ_4K, |
4100b8c2 MR |
831 | .device_group = sun50i_iommu_device_group, |
832 | .domain_alloc = sun50i_iommu_domain_alloc, | |
4100b8c2 MR |
833 | .of_xlate = sun50i_iommu_of_xlate, |
834 | .probe_device = sun50i_iommu_probe_device, | |
9a630a4b LB |
835 | .default_domain_ops = &(const struct iommu_domain_ops) { |
836 | .attach_dev = sun50i_iommu_attach_device, | |
9a630a4b | 837 | .flush_iotlb_all = sun50i_iommu_flush_iotlb_all, |
e563cc0c | 838 | .iotlb_sync_map = sun50i_iommu_iotlb_sync_map, |
9a630a4b LB |
839 | .iotlb_sync = sun50i_iommu_iotlb_sync, |
840 | .iova_to_phys = sun50i_iommu_iova_to_phys, | |
841 | .map = sun50i_iommu_map, | |
842 | .unmap = sun50i_iommu_unmap, | |
843 | .free = sun50i_iommu_domain_free, | |
844 | } | |
4100b8c2 MR |
845 | }; |
846 | ||
847 | static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu, | |
848 | unsigned master, phys_addr_t iova, | |
849 | unsigned prot) | |
850 | { | |
851 | dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n", | |
852 | &iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd"); | |
853 | ||
854 | if (iommu->domain) | |
855 | report_iommu_fault(iommu->domain, iommu->dev, iova, prot); | |
856 | else | |
857 | dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n"); | |
e563cc0c JS |
858 | |
859 | sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE); | |
4100b8c2 MR |
860 | } |
861 | ||
862 | static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu, | |
863 | unsigned addr_reg, | |
864 | unsigned blame_reg) | |
865 | { | |
866 | phys_addr_t iova; | |
867 | unsigned master; | |
868 | u32 blame; | |
869 | ||
870 | assert_spin_locked(&iommu->iommu_lock); | |
871 | ||
872 | iova = iommu_read(iommu, addr_reg); | |
873 | blame = iommu_read(iommu, blame_reg); | |
874 | master = ilog2(blame & IOMMU_INT_MASTER_MASK); | |
875 | ||
876 | /* | |
877 | * If the address is not in the page table, we can't get what | |
878 | * operation triggered the fault. Assume it's a read | |
879 | * operation. | |
880 | */ | |
881 | sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ); | |
882 | ||
883 | return iova; | |
884 | } | |
885 | ||
886 | static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu) | |
887 | { | |
888 | enum sun50i_iommu_aci aci; | |
889 | phys_addr_t iova; | |
890 | unsigned master; | |
891 | unsigned dir; | |
892 | u32 blame; | |
893 | ||
894 | assert_spin_locked(&iommu->iommu_lock); | |
895 | ||
896 | blame = iommu_read(iommu, IOMMU_INT_STA_REG); | |
897 | master = ilog2(blame & IOMMU_INT_MASTER_MASK); | |
898 | iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master)); | |
899 | aci = sun50i_get_pte_aci(iommu_read(iommu, | |
900 | IOMMU_INT_ERR_DATA_REG(master))); | |
901 | ||
902 | switch (aci) { | |
903 | /* | |
904 | * If we are in the read-only domain, then it means we | |
905 | * tried to write. | |
906 | */ | |
907 | case SUN50I_IOMMU_ACI_RD: | |
908 | dir = IOMMU_FAULT_WRITE; | |
909 | break; | |
910 | ||
911 | /* | |
912 | * If we are in the write-only domain, then it means | |
913 | * we tried to read. | |
914 | */ | |
915 | case SUN50I_IOMMU_ACI_WR: | |
916 | ||
917 | /* | |
918 | * If we are in the domain without any permission, we | |
919 | * can't really tell. Let's default to a read | |
920 | * operation. | |
921 | */ | |
922 | case SUN50I_IOMMU_ACI_NONE: | |
923 | ||
924 | /* WTF? */ | |
925 | case SUN50I_IOMMU_ACI_RD_WR: | |
926 | default: | |
927 | dir = IOMMU_FAULT_READ; | |
928 | break; | |
929 | } | |
930 | ||
931 | /* | |
932 | * If the address is not in the page table, we can't get what | |
933 | * operation triggered the fault. Assume it's a read | |
934 | * operation. | |
935 | */ | |
936 | sun50i_iommu_report_fault(iommu, master, iova, dir); | |
937 | ||
938 | return iova; | |
939 | } | |
940 | ||
941 | static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) | |
942 | { | |
cef20703 | 943 | u32 status, l1_status, l2_status, resets; |
4100b8c2 | 944 | struct sun50i_iommu *iommu = dev_id; |
4100b8c2 MR |
945 | |
946 | spin_lock(&iommu->iommu_lock); | |
947 | ||
948 | status = iommu_read(iommu, IOMMU_INT_STA_REG); | |
949 | if (!(status & IOMMU_INT_MASK)) { | |
950 | spin_unlock(&iommu->iommu_lock); | |
951 | return IRQ_NONE; | |
952 | } | |
953 | ||
cef20703 JS |
954 | l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG); |
955 | l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG); | |
956 | ||
4100b8c2 | 957 | if (status & IOMMU_INT_INVALID_L2PG) |
03c7b78b JR |
958 | sun50i_iommu_handle_pt_irq(iommu, |
959 | IOMMU_INT_ERR_ADDR_L2_REG, | |
960 | IOMMU_L2PG_INT_REG); | |
4100b8c2 | 961 | else if (status & IOMMU_INT_INVALID_L1PG) |
03c7b78b JR |
962 | sun50i_iommu_handle_pt_irq(iommu, |
963 | IOMMU_INT_ERR_ADDR_L1_REG, | |
964 | IOMMU_L1PG_INT_REG); | |
4100b8c2 | 965 | else |
03c7b78b | 966 | sun50i_iommu_handle_perm_irq(iommu); |
4100b8c2 MR |
967 | |
968 | iommu_write(iommu, IOMMU_INT_CLR_REG, status); | |
969 | ||
cef20703 JS |
970 | resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK; |
971 | iommu_write(iommu, IOMMU_RESET_REG, ~resets); | |
9ad0c125 | 972 | iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL); |
4100b8c2 MR |
973 | |
974 | spin_unlock(&iommu->iommu_lock); | |
975 | ||
976 | return IRQ_HANDLED; | |
977 | } | |
978 | ||
979 | static int sun50i_iommu_probe(struct platform_device *pdev) | |
980 | { | |
981 | struct sun50i_iommu *iommu; | |
982 | int ret, irq; | |
983 | ||
984 | iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL); | |
985 | if (!iommu) | |
986 | return -ENOMEM; | |
987 | spin_lock_init(&iommu->iommu_lock); | |
988 | platform_set_drvdata(pdev, iommu); | |
989 | iommu->dev = &pdev->dev; | |
990 | ||
991 | iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev), | |
992 | PT_SIZE, PT_SIZE, | |
993 | SLAB_HWCACHE_ALIGN, | |
994 | NULL); | |
995 | if (!iommu->pt_pool) | |
996 | return -ENOMEM; | |
997 | ||
998 | iommu->group = iommu_group_alloc(); | |
999 | if (IS_ERR(iommu->group)) { | |
1000 | ret = PTR_ERR(iommu->group); | |
1001 | goto err_free_cache; | |
1002 | } | |
1003 | ||
1004 | iommu->base = devm_platform_ioremap_resource(pdev, 0); | |
ae7d2923 | 1005 | if (IS_ERR(iommu->base)) { |
4100b8c2 MR |
1006 | ret = PTR_ERR(iommu->base); |
1007 | goto err_free_group; | |
1008 | } | |
1009 | ||
1010 | irq = platform_get_irq(pdev, 0); | |
1011 | if (irq < 0) { | |
1012 | ret = irq; | |
1013 | goto err_free_group; | |
1014 | } | |
1015 | ||
1016 | iommu->clk = devm_clk_get(&pdev->dev, NULL); | |
1017 | if (IS_ERR(iommu->clk)) { | |
1018 | dev_err(&pdev->dev, "Couldn't get our clock.\n"); | |
1019 | ret = PTR_ERR(iommu->clk); | |
1020 | goto err_free_group; | |
1021 | } | |
1022 | ||
1023 | iommu->reset = devm_reset_control_get(&pdev->dev, NULL); | |
1024 | if (IS_ERR(iommu->reset)) { | |
1025 | dev_err(&pdev->dev, "Couldn't get our reset line.\n"); | |
1026 | ret = PTR_ERR(iommu->reset); | |
1027 | goto err_free_group; | |
1028 | } | |
1029 | ||
1030 | ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev, | |
1031 | NULL, dev_name(&pdev->dev)); | |
1032 | if (ret) | |
1033 | goto err_free_group; | |
1034 | ||
2d471b20 | 1035 | ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev); |
4100b8c2 MR |
1036 | if (ret) |
1037 | goto err_remove_sysfs; | |
1038 | ||
1039 | ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0, | |
1040 | dev_name(&pdev->dev), iommu); | |
1041 | if (ret < 0) | |
1042 | goto err_unregister; | |
1043 | ||
4100b8c2 MR |
1044 | return 0; |
1045 | ||
1046 | err_unregister: | |
1047 | iommu_device_unregister(&iommu->iommu); | |
1048 | ||
1049 | err_remove_sysfs: | |
1050 | iommu_device_sysfs_remove(&iommu->iommu); | |
1051 | ||
1052 | err_free_group: | |
1053 | iommu_group_put(iommu->group); | |
1054 | ||
1055 | err_free_cache: | |
1056 | kmem_cache_destroy(iommu->pt_pool); | |
1057 | ||
1058 | return ret; | |
1059 | } | |
1060 | ||
1061 | static const struct of_device_id sun50i_iommu_dt[] = { | |
1062 | { .compatible = "allwinner,sun50i-h6-iommu", }, | |
1063 | { /* sentinel */ }, | |
1064 | }; | |
1065 | MODULE_DEVICE_TABLE(of, sun50i_iommu_dt); | |
1066 | ||
1067 | static struct platform_driver sun50i_iommu_driver = { | |
1068 | .driver = { | |
1069 | .name = "sun50i-iommu", | |
1070 | .of_match_table = sun50i_iommu_dt, | |
1071 | .suppress_bind_attrs = true, | |
1072 | } | |
1073 | }; | |
1074 | builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe); | |
1075 | ||
1076 | MODULE_DESCRIPTION("Allwinner H6 IOMMU driver"); | |
1077 | MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>"); | |
1078 | MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>"); |