Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c68a2921 | 2 | /* |
669a047b PG |
3 | * IOMMU API for Rockchip |
4 | * | |
5 | * Module Authors: Simon Xue <xxm@rock-chips.com> | |
6 | * Daniel Kurtz <djkurtz@chromium.org> | |
c68a2921 DK |
7 | */ |
8 | ||
f2e3a5f5 | 9 | #include <linux/clk.h> |
c68a2921 DK |
10 | #include <linux/compiler.h> |
11 | #include <linux/delay.h> | |
12 | #include <linux/device.h> | |
461a6946 | 13 | #include <linux/dma-mapping.h> |
c68a2921 DK |
14 | #include <linux/errno.h> |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/io.h> | |
17 | #include <linux/iommu.h> | |
0416bf64 | 18 | #include <linux/iopoll.h> |
c68a2921 DK |
19 | #include <linux/list.h> |
20 | #include <linux/mm.h> | |
669a047b | 21 | #include <linux/init.h> |
c68a2921 DK |
22 | #include <linux/of.h> |
23 | #include <linux/of_platform.h> | |
24 | #include <linux/platform_device.h> | |
0f181d3c | 25 | #include <linux/pm_runtime.h> |
c68a2921 DK |
26 | #include <linux/slab.h> |
27 | #include <linux/spinlock.h> | |
28 | ||
29 | /** MMU register offsets */ | |
30 | #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ | |
31 | #define RK_MMU_STATUS 0x04 | |
32 | #define RK_MMU_COMMAND 0x08 | |
33 | #define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */ | |
34 | #define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */ | |
35 | #define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */ | |
36 | #define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */ | |
37 | #define RK_MMU_INT_MASK 0x1C /* IRQ enable */ | |
38 | #define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */ | |
39 | #define RK_MMU_AUTO_GATING 0x24 | |
40 | ||
41 | #define DTE_ADDR_DUMMY 0xCAFEBABE | |
0416bf64 TF |
42 | |
43 | #define RK_MMU_POLL_PERIOD_US 100 | |
44 | #define RK_MMU_FORCE_RESET_TIMEOUT_US 100000 | |
45 | #define RK_MMU_POLL_TIMEOUT_US 1000 | |
c68a2921 DK |
46 | |
47 | /* RK_MMU_STATUS fields */ | |
48 | #define RK_MMU_STATUS_PAGING_ENABLED BIT(0) | |
49 | #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1) | |
50 | #define RK_MMU_STATUS_STALL_ACTIVE BIT(2) | |
51 | #define RK_MMU_STATUS_IDLE BIT(3) | |
52 | #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4) | |
53 | #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5) | |
54 | #define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31) | |
55 | ||
56 | /* RK_MMU_COMMAND command values */ | |
57 | #define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */ | |
58 | #define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */ | |
59 | #define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */ | |
60 | #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */ | |
61 | #define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */ | |
62 | #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */ | |
63 | #define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */ | |
64 | ||
65 | /* RK_MMU_INT_* register fields */ | |
66 | #define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */ | |
67 | #define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */ | |
68 | #define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR) | |
69 | ||
70 | #define NUM_DT_ENTRIES 1024 | |
71 | #define NUM_PT_ENTRIES 1024 | |
72 | ||
73 | #define SPAGE_ORDER 12 | |
74 | #define SPAGE_SIZE (1 << SPAGE_ORDER) | |
75 | ||
76 | /* | |
77 | * Support mapping any size that fits in one page table: | |
78 | * 4 KiB to 4 MiB | |
79 | */ | |
80 | #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 | |
81 | ||
c68a2921 DK |
82 | struct rk_iommu_domain { |
83 | struct list_head iommus; | |
84 | u32 *dt; /* page directory table */ | |
4f0aba67 | 85 | dma_addr_t dt_dma; |
c68a2921 DK |
86 | spinlock_t iommus_lock; /* lock for iommus list */ |
87 | spinlock_t dt_lock; /* lock for modifying page directory table */ | |
bcd516a3 JR |
88 | |
89 | struct iommu_domain domain; | |
c68a2921 DK |
90 | }; |
91 | ||
f2e3a5f5 TF |
92 | /* list of clocks required by IOMMU */ |
93 | static const char * const rk_iommu_clocks[] = { | |
94 | "aclk", "iface", | |
95 | }; | |
96 | ||
227014b3 BG |
97 | struct rk_iommu_ops { |
98 | phys_addr_t (*pt_address)(u32 dte); | |
99 | u32 (*mk_dtentries)(dma_addr_t pt_dma); | |
100 | u32 (*mk_ptentries)(phys_addr_t page, int prot); | |
101 | phys_addr_t (*dte_addr_phys)(u32 addr); | |
102 | u32 (*dma_addr_dte)(dma_addr_t dt_dma); | |
103 | u64 dma_bit_mask; | |
104 | }; | |
105 | ||
c68a2921 DK |
106 | struct rk_iommu { |
107 | struct device *dev; | |
cd6438c5 Z |
108 | void __iomem **bases; |
109 | int num_mmu; | |
f9258156 | 110 | int num_irq; |
f2e3a5f5 TF |
111 | struct clk_bulk_data *clocks; |
112 | int num_clocks; | |
c3aa4742 | 113 | bool reset_disabled; |
c9d9f239 | 114 | struct iommu_device iommu; |
c68a2921 DK |
115 | struct list_head node; /* entry in rk_iommu_domain.iommus */ |
116 | struct iommu_domain *domain; /* domain to which iommu is attached */ | |
57c26957 | 117 | struct iommu_group *group; |
c68a2921 DK |
118 | }; |
119 | ||
5fd577c3 | 120 | struct rk_iommudata { |
0f181d3c | 121 | struct device_link *link; /* runtime PM link from IOMMU to master */ |
5fd577c3 JC |
122 | struct rk_iommu *iommu; |
123 | }; | |
124 | ||
9176a303 | 125 | static struct device *dma_dev; |
227014b3 | 126 | static const struct rk_iommu_ops *rk_ops; |
9176a303 | 127 | |
4f0aba67 SZ |
128 | static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma, |
129 | unsigned int count) | |
c68a2921 | 130 | { |
4f0aba67 | 131 | size_t size = count * sizeof(u32); /* count of u32 entry */ |
c68a2921 | 132 | |
9176a303 | 133 | dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE); |
c68a2921 DK |
134 | } |
135 | ||
bcd516a3 JR |
136 | static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom) |
137 | { | |
138 | return container_of(dom, struct rk_iommu_domain, domain); | |
139 | } | |
140 | ||
c68a2921 DK |
141 | /* |
142 | * The Rockchip rk3288 iommu uses a 2-level page table. | |
143 | * The first level is the "Directory Table" (DT). | |
144 | * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing | |
145 | * to a "Page Table". | |
146 | * The second level is the 1024 Page Tables (PT). | |
147 | * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to | |
148 | * a 4 KB page of physical memory. | |
149 | * | |
150 | * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries). | |
151 | * Each iommu device has a MMU_DTE_ADDR register that contains the physical | |
152 | * address of the start of the DT page. | |
153 | * | |
154 | * The structure of the page table is as follows: | |
155 | * | |
156 | * DT | |
157 | * MMU_DTE_ADDR -> +-----+ | |
158 | * | | | |
159 | * +-----+ PT | |
160 | * | DTE | -> +-----+ | |
161 | * +-----+ | | Memory | |
162 | * | | +-----+ Page | |
163 | * | | | PTE | -> +-----+ | |
164 | * +-----+ +-----+ | | | |
165 | * | | | | | |
166 | * | | | | | |
167 | * +-----+ | | | |
168 | * | | | |
169 | * | | | |
170 | * +-----+ | |
171 | */ | |
172 | ||
173 | /* | |
174 | * Each DTE has a PT address and a valid bit: | |
175 | * +---------------------+-----------+-+ | |
176 | * | PT address | Reserved |V| | |
177 | * +---------------------+-----------+-+ | |
178 | * 31:12 - PT address (PTs always starts on a 4 KB boundary) | |
179 | * 11: 1 - Reserved | |
180 | * 0 - 1 if PT @ PT address is valid | |
181 | */ | |
182 | #define RK_DTE_PT_ADDRESS_MASK 0xfffff000 | |
183 | #define RK_DTE_PT_VALID BIT(0) | |
184 | ||
185 | static inline phys_addr_t rk_dte_pt_address(u32 dte) | |
186 | { | |
187 | return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; | |
188 | } | |
189 | ||
c55356c5 BG |
190 | /* |
191 | * In v2: | |
192 | * 31:12 - PT address bit 31:0 | |
193 | * 11: 8 - PT address bit 35:32 | |
194 | * 7: 4 - PT address bit 39:36 | |
195 | * 3: 1 - Reserved | |
196 | * 0 - 1 if PT @ PT address is valid | |
197 | */ | |
198 | #define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4) | |
199 | #define DTE_HI_MASK1 GENMASK(11, 8) | |
200 | #define DTE_HI_MASK2 GENMASK(7, 4) | |
201 | #define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */ | |
202 | #define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */ | |
f7ff3cff AB |
203 | #define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32) |
204 | #define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36) | |
c55356c5 BG |
205 | |
206 | static inline phys_addr_t rk_dte_pt_address_v2(u32 dte) | |
207 | { | |
208 | u64 dte_v2 = dte; | |
209 | ||
210 | dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) | | |
211 | ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) | | |
212 | (dte_v2 & RK_DTE_PT_ADDRESS_MASK); | |
213 | ||
214 | return (phys_addr_t)dte_v2; | |
215 | } | |
216 | ||
c68a2921 DK |
217 | static inline bool rk_dte_is_pt_valid(u32 dte) |
218 | { | |
219 | return dte & RK_DTE_PT_VALID; | |
220 | } | |
221 | ||
4f0aba67 | 222 | static inline u32 rk_mk_dte(dma_addr_t pt_dma) |
c68a2921 | 223 | { |
4f0aba67 | 224 | return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; |
c68a2921 DK |
225 | } |
226 | ||
c55356c5 BG |
227 | static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma) |
228 | { | |
229 | pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) | | |
230 | ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) | | |
231 | (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2; | |
232 | ||
233 | return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID; | |
234 | } | |
235 | ||
c68a2921 DK |
236 | /* |
237 | * Each PTE has a Page address, some flags and a valid bit: | |
238 | * +---------------------+---+-------+-+ | |
239 | * | Page address |Rsv| Flags |V| | |
240 | * +---------------------+---+-------+-+ | |
241 | * 31:12 - Page address (Pages always start on a 4 KB boundary) | |
242 | * 11: 9 - Reserved | |
243 | * 8: 1 - Flags | |
244 | * 8 - Read allocate - allocate cache space on read misses | |
245 | * 7 - Read cache - enable cache & prefetch of data | |
246 | * 6 - Write buffer - enable delaying writes on their way to memory | |
247 | * 5 - Write allocate - allocate cache space on write misses | |
248 | * 4 - Write cache - different writes can be merged together | |
249 | * 3 - Override cache attributes | |
250 | * if 1, bits 4-8 control cache attributes | |
251 | * if 0, the system bus defaults are used | |
252 | * 2 - Writable | |
253 | * 1 - Readable | |
254 | * 0 - 1 if Page @ Page address is valid | |
255 | */ | |
256 | #define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000 | |
257 | #define RK_PTE_PAGE_FLAGS_MASK 0x000001fe | |
258 | #define RK_PTE_PAGE_WRITABLE BIT(2) | |
259 | #define RK_PTE_PAGE_READABLE BIT(1) | |
260 | #define RK_PTE_PAGE_VALID BIT(0) | |
261 | ||
c68a2921 DK |
262 | static inline bool rk_pte_is_page_valid(u32 pte) |
263 | { | |
264 | return pte & RK_PTE_PAGE_VALID; | |
265 | } | |
266 | ||
267 | /* TODO: set cache flags per prot IOMMU_CACHE */ | |
268 | static u32 rk_mk_pte(phys_addr_t page, int prot) | |
269 | { | |
270 | u32 flags = 0; | |
271 | flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; | |
272 | flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; | |
273 | page &= RK_PTE_PAGE_ADDRESS_MASK; | |
274 | return page | flags | RK_PTE_PAGE_VALID; | |
275 | } | |
276 | ||
c55356c5 BG |
277 | /* |
278 | * In v2: | |
279 | * 31:12 - Page address bit 31:0 | |
280 | * 11:9 - Page address bit 34:32 | |
281 | * 8:4 - Page address bit 39:35 | |
282 | * 3 - Security | |
7eb99841 MR |
283 | * 2 - Writable |
284 | * 1 - Readable | |
c55356c5 BG |
285 | * 0 - 1 if Page @ Page address is valid |
286 | */ | |
c55356c5 BG |
287 | |
288 | static u32 rk_mk_pte_v2(phys_addr_t page, int prot) | |
289 | { | |
290 | u32 flags = 0; | |
291 | ||
7eb99841 MR |
292 | flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; |
293 | flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; | |
c55356c5 BG |
294 | |
295 | return rk_mk_dte_v2(page) | flags; | |
296 | } | |
297 | ||
c68a2921 DK |
298 | static u32 rk_mk_pte_invalid(u32 pte) |
299 | { | |
300 | return pte & ~RK_PTE_PAGE_VALID; | |
301 | } | |
302 | ||
303 | /* | |
304 | * rk3288 iova (IOMMU Virtual Address) format | |
305 | * 31 22.21 12.11 0 | |
306 | * +-----------+-----------+-------------+ | |
307 | * | DTE index | PTE index | Page offset | | |
308 | * +-----------+-----------+-------------+ | |
309 | * 31:22 - DTE index - index of DTE in DT | |
310 | * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address | |
311 | * 11: 0 - Page offset - offset into page @ PTE.page_address | |
312 | */ | |
313 | #define RK_IOVA_DTE_MASK 0xffc00000 | |
314 | #define RK_IOVA_DTE_SHIFT 22 | |
315 | #define RK_IOVA_PTE_MASK 0x003ff000 | |
316 | #define RK_IOVA_PTE_SHIFT 12 | |
317 | #define RK_IOVA_PAGE_MASK 0x00000fff | |
318 | #define RK_IOVA_PAGE_SHIFT 0 | |
319 | ||
320 | static u32 rk_iova_dte_index(dma_addr_t iova) | |
321 | { | |
322 | return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; | |
323 | } | |
324 | ||
325 | static u32 rk_iova_pte_index(dma_addr_t iova) | |
326 | { | |
327 | return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; | |
328 | } | |
329 | ||
330 | static u32 rk_iova_page_offset(dma_addr_t iova) | |
331 | { | |
332 | return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; | |
333 | } | |
334 | ||
cd6438c5 | 335 | static u32 rk_iommu_read(void __iomem *base, u32 offset) |
c68a2921 | 336 | { |
cd6438c5 | 337 | return readl(base + offset); |
c68a2921 DK |
338 | } |
339 | ||
cd6438c5 | 340 | static void rk_iommu_write(void __iomem *base, u32 offset, u32 value) |
c68a2921 | 341 | { |
cd6438c5 | 342 | writel(value, base + offset); |
c68a2921 DK |
343 | } |
344 | ||
345 | static void rk_iommu_command(struct rk_iommu *iommu, u32 command) | |
346 | { | |
cd6438c5 Z |
347 | int i; |
348 | ||
349 | for (i = 0; i < iommu->num_mmu; i++) | |
350 | writel(command, iommu->bases[i] + RK_MMU_COMMAND); | |
c68a2921 DK |
351 | } |
352 | ||
cd6438c5 Z |
353 | static void rk_iommu_base_command(void __iomem *base, u32 command) |
354 | { | |
355 | writel(command, base + RK_MMU_COMMAND); | |
356 | } | |
bf2a5e71 | 357 | static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, |
c68a2921 DK |
358 | size_t size) |
359 | { | |
cd6438c5 | 360 | int i; |
bf2a5e71 | 361 | dma_addr_t iova_end = iova_start + size; |
c68a2921 DK |
362 | /* |
363 | * TODO(djkurtz): Figure out when it is more efficient to shootdown the | |
364 | * entire iotlb rather than iterate over individual iovas. | |
365 | */ | |
bf2a5e71 TF |
366 | for (i = 0; i < iommu->num_mmu; i++) { |
367 | dma_addr_t iova; | |
368 | ||
369 | for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) | |
cd6438c5 | 370 | rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); |
bf2a5e71 | 371 | } |
c68a2921 DK |
372 | } |
373 | ||
374 | static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) | |
375 | { | |
cd6438c5 Z |
376 | bool active = true; |
377 | int i; | |
378 | ||
379 | for (i = 0; i < iommu->num_mmu; i++) | |
fbedd9b9 JK |
380 | active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & |
381 | RK_MMU_STATUS_STALL_ACTIVE); | |
cd6438c5 Z |
382 | |
383 | return active; | |
c68a2921 DK |
384 | } |
385 | ||
386 | static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) | |
387 | { | |
cd6438c5 Z |
388 | bool enable = true; |
389 | int i; | |
390 | ||
391 | for (i = 0; i < iommu->num_mmu; i++) | |
fbedd9b9 JK |
392 | enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & |
393 | RK_MMU_STATUS_PAGING_ENABLED); | |
cd6438c5 Z |
394 | |
395 | return enable; | |
c68a2921 DK |
396 | } |
397 | ||
0416bf64 TF |
398 | static bool rk_iommu_is_reset_done(struct rk_iommu *iommu) |
399 | { | |
400 | bool done = true; | |
401 | int i; | |
402 | ||
403 | for (i = 0; i < iommu->num_mmu; i++) | |
404 | done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0; | |
405 | ||
406 | return done; | |
407 | } | |
408 | ||
c68a2921 DK |
409 | static int rk_iommu_enable_stall(struct rk_iommu *iommu) |
410 | { | |
cd6438c5 | 411 | int ret, i; |
0416bf64 | 412 | bool val; |
c68a2921 DK |
413 | |
414 | if (rk_iommu_is_stall_active(iommu)) | |
415 | return 0; | |
416 | ||
417 | /* Stall can only be enabled if paging is enabled */ | |
418 | if (!rk_iommu_is_paging_enabled(iommu)) | |
419 | return 0; | |
420 | ||
421 | rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); | |
422 | ||
0416bf64 TF |
423 | ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, |
424 | val, RK_MMU_POLL_PERIOD_US, | |
425 | RK_MMU_POLL_TIMEOUT_US); | |
c68a2921 | 426 | if (ret) |
cd6438c5 Z |
427 | for (i = 0; i < iommu->num_mmu; i++) |
428 | dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", | |
429 | rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); | |
c68a2921 DK |
430 | |
431 | return ret; | |
432 | } | |
433 | ||
434 | static int rk_iommu_disable_stall(struct rk_iommu *iommu) | |
435 | { | |
cd6438c5 | 436 | int ret, i; |
0416bf64 | 437 | bool val; |
c68a2921 DK |
438 | |
439 | if (!rk_iommu_is_stall_active(iommu)) | |
440 | return 0; | |
441 | ||
442 | rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); | |
443 | ||
0416bf64 TF |
444 | ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, |
445 | !val, RK_MMU_POLL_PERIOD_US, | |
446 | RK_MMU_POLL_TIMEOUT_US); | |
c68a2921 | 447 | if (ret) |
cd6438c5 Z |
448 | for (i = 0; i < iommu->num_mmu; i++) |
449 | dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", | |
450 | rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); | |
c68a2921 DK |
451 | |
452 | return ret; | |
453 | } | |
454 | ||
455 | static int rk_iommu_enable_paging(struct rk_iommu *iommu) | |
456 | { | |
cd6438c5 | 457 | int ret, i; |
0416bf64 | 458 | bool val; |
c68a2921 DK |
459 | |
460 | if (rk_iommu_is_paging_enabled(iommu)) | |
461 | return 0; | |
462 | ||
463 | rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); | |
464 | ||
0416bf64 TF |
465 | ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, |
466 | val, RK_MMU_POLL_PERIOD_US, | |
467 | RK_MMU_POLL_TIMEOUT_US); | |
c68a2921 | 468 | if (ret) |
cd6438c5 Z |
469 | for (i = 0; i < iommu->num_mmu; i++) |
470 | dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", | |
471 | rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); | |
c68a2921 DK |
472 | |
473 | return ret; | |
474 | } | |
475 | ||
476 | static int rk_iommu_disable_paging(struct rk_iommu *iommu) | |
477 | { | |
cd6438c5 | 478 | int ret, i; |
0416bf64 | 479 | bool val; |
c68a2921 DK |
480 | |
481 | if (!rk_iommu_is_paging_enabled(iommu)) | |
482 | return 0; | |
483 | ||
484 | rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); | |
485 | ||
0416bf64 TF |
486 | ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, |
487 | !val, RK_MMU_POLL_PERIOD_US, | |
488 | RK_MMU_POLL_TIMEOUT_US); | |
c68a2921 | 489 | if (ret) |
cd6438c5 Z |
490 | for (i = 0; i < iommu->num_mmu; i++) |
491 | dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", | |
492 | rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); | |
c68a2921 DK |
493 | |
494 | return ret; | |
495 | } | |
496 | ||
497 | static int rk_iommu_force_reset(struct rk_iommu *iommu) | |
498 | { | |
cd6438c5 | 499 | int ret, i; |
c68a2921 | 500 | u32 dte_addr; |
0416bf64 | 501 | bool val; |
c68a2921 | 502 | |
c3aa4742 SX |
503 | if (iommu->reset_disabled) |
504 | return 0; | |
505 | ||
c68a2921 DK |
506 | /* |
507 | * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY | |
508 | * and verifying that upper 5 nybbles are read back. | |
509 | */ | |
cd6438c5 | 510 | for (i = 0; i < iommu->num_mmu; i++) { |
227014b3 BG |
511 | dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY); |
512 | rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); | |
c68a2921 | 513 | |
227014b3 | 514 | if (dte_addr != rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR)) { |
cd6438c5 Z |
515 | dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); |
516 | return -EFAULT; | |
517 | } | |
c68a2921 DK |
518 | } |
519 | ||
520 | rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); | |
521 | ||
0416bf64 TF |
522 | ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val, |
523 | val, RK_MMU_FORCE_RESET_TIMEOUT_US, | |
524 | RK_MMU_POLL_TIMEOUT_US); | |
525 | if (ret) { | |
526 | dev_err(iommu->dev, "FORCE_RESET command timed out\n"); | |
527 | return ret; | |
cd6438c5 | 528 | } |
c68a2921 | 529 | |
cd6438c5 | 530 | return 0; |
c68a2921 DK |
531 | } |
532 | ||
227014b3 BG |
533 | static inline phys_addr_t rk_dte_addr_phys(u32 addr) |
534 | { | |
535 | return (phys_addr_t)addr; | |
536 | } | |
537 | ||
538 | static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma) | |
539 | { | |
540 | return dt_dma; | |
541 | } | |
542 | ||
c55356c5 | 543 | #define DT_HI_MASK GENMASK_ULL(39, 32) |
c987b65a | 544 | #define DTE_BASE_HI_MASK GENMASK(11, 4) |
c55356c5 BG |
545 | #define DT_SHIFT 28 |
546 | ||
547 | static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr) | |
548 | { | |
c987b65a BG |
549 | u64 addr64 = addr; |
550 | return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) | | |
551 | ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT); | |
c55356c5 BG |
552 | } |
553 | ||
554 | static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma) | |
555 | { | |
556 | return (dt_dma & RK_DTE_PT_ADDRESS_MASK) | | |
557 | ((dt_dma & DT_HI_MASK) >> DT_SHIFT); | |
558 | } | |
559 | ||
cd6438c5 | 560 | static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) |
c68a2921 | 561 | { |
cd6438c5 | 562 | void __iomem *base = iommu->bases[index]; |
c68a2921 DK |
563 | u32 dte_index, pte_index, page_offset; |
564 | u32 mmu_dte_addr; | |
565 | phys_addr_t mmu_dte_addr_phys, dte_addr_phys; | |
566 | u32 *dte_addr; | |
567 | u32 dte; | |
568 | phys_addr_t pte_addr_phys = 0; | |
569 | u32 *pte_addr = NULL; | |
570 | u32 pte = 0; | |
571 | phys_addr_t page_addr_phys = 0; | |
572 | u32 page_flags = 0; | |
573 | ||
574 | dte_index = rk_iova_dte_index(iova); | |
575 | pte_index = rk_iova_pte_index(iova); | |
576 | page_offset = rk_iova_page_offset(iova); | |
577 | ||
cd6438c5 | 578 | mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR); |
227014b3 | 579 | mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr); |
c68a2921 DK |
580 | |
581 | dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); | |
582 | dte_addr = phys_to_virt(dte_addr_phys); | |
583 | dte = *dte_addr; | |
584 | ||
585 | if (!rk_dte_is_pt_valid(dte)) | |
586 | goto print_it; | |
587 | ||
227014b3 | 588 | pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4); |
c68a2921 DK |
589 | pte_addr = phys_to_virt(pte_addr_phys); |
590 | pte = *pte_addr; | |
591 | ||
592 | if (!rk_pte_is_page_valid(pte)) | |
593 | goto print_it; | |
594 | ||
227014b3 | 595 | page_addr_phys = rk_ops->pt_address(pte) + page_offset; |
c68a2921 DK |
596 | page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; |
597 | ||
598 | print_it: | |
599 | dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", | |
600 | &iova, dte_index, pte_index, page_offset); | |
601 | dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n", | |
602 | &mmu_dte_addr_phys, &dte_addr_phys, dte, | |
603 | rk_dte_is_pt_valid(dte), &pte_addr_phys, pte, | |
604 | rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); | |
605 | } | |
606 | ||
607 | static irqreturn_t rk_iommu_irq(int irq, void *dev_id) | |
608 | { | |
609 | struct rk_iommu *iommu = dev_id; | |
610 | u32 status; | |
611 | u32 int_status; | |
612 | dma_addr_t iova; | |
cd6438c5 | 613 | irqreturn_t ret = IRQ_NONE; |
3fc7c5c0 | 614 | int i, err; |
c68a2921 | 615 | |
3fc7c5c0 | 616 | err = pm_runtime_get_if_in_use(iommu->dev); |
5b47748e | 617 | if (!err || WARN_ON_ONCE(err < 0)) |
3fc7c5c0 | 618 | return ret; |
0f181d3c JC |
619 | |
620 | if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) | |
621 | goto out; | |
f2e3a5f5 | 622 | |
cd6438c5 Z |
623 | for (i = 0; i < iommu->num_mmu; i++) { |
624 | int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); | |
625 | if (int_status == 0) | |
626 | continue; | |
c68a2921 | 627 | |
cd6438c5 Z |
628 | ret = IRQ_HANDLED; |
629 | iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR); | |
c68a2921 | 630 | |
cd6438c5 Z |
631 | if (int_status & RK_MMU_IRQ_PAGE_FAULT) { |
632 | int flags; | |
c68a2921 | 633 | |
cd6438c5 Z |
634 | status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS); |
635 | flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? | |
636 | IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | |
c68a2921 | 637 | |
cd6438c5 Z |
638 | dev_err(iommu->dev, "Page fault at %pad of type %s\n", |
639 | &iova, | |
640 | (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); | |
c68a2921 | 641 | |
cd6438c5 | 642 | log_iova(iommu, i, iova); |
c68a2921 | 643 | |
cd6438c5 Z |
644 | /* |
645 | * Report page fault to any installed handlers. | |
646 | * Ignore the return code, though, since we always zap cache | |
647 | * and clear the page fault anyway. | |
648 | */ | |
649 | if (iommu->domain) | |
650 | report_iommu_fault(iommu->domain, iommu->dev, iova, | |
651 | flags); | |
652 | else | |
653 | dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); | |
c68a2921 | 654 | |
cd6438c5 Z |
655 | rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); |
656 | rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE); | |
657 | } | |
c68a2921 | 658 | |
cd6438c5 Z |
659 | if (int_status & RK_MMU_IRQ_BUS_ERROR) |
660 | dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); | |
c68a2921 | 661 | |
cd6438c5 Z |
662 | if (int_status & ~RK_MMU_IRQ_MASK) |
663 | dev_err(iommu->dev, "unexpected int_status: %#08x\n", | |
664 | int_status); | |
c68a2921 | 665 | |
cd6438c5 Z |
666 | rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); |
667 | } | |
c68a2921 | 668 | |
f2e3a5f5 TF |
669 | clk_bulk_disable(iommu->num_clocks, iommu->clocks); |
670 | ||
0f181d3c JC |
671 | out: |
672 | pm_runtime_put(iommu->dev); | |
cd6438c5 | 673 | return ret; |
c68a2921 DK |
674 | } |
675 | ||
676 | static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, | |
677 | dma_addr_t iova) | |
678 | { | |
bcd516a3 | 679 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
c68a2921 DK |
680 | unsigned long flags; |
681 | phys_addr_t pt_phys, phys = 0; | |
682 | u32 dte, pte; | |
683 | u32 *page_table; | |
684 | ||
685 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | |
686 | ||
687 | dte = rk_domain->dt[rk_iova_dte_index(iova)]; | |
688 | if (!rk_dte_is_pt_valid(dte)) | |
689 | goto out; | |
690 | ||
227014b3 | 691 | pt_phys = rk_ops->pt_address(dte); |
c68a2921 DK |
692 | page_table = (u32 *)phys_to_virt(pt_phys); |
693 | pte = page_table[rk_iova_pte_index(iova)]; | |
694 | if (!rk_pte_is_page_valid(pte)) | |
695 | goto out; | |
696 | ||
227014b3 | 697 | phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova); |
c68a2921 DK |
698 | out: |
699 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
700 | ||
701 | return phys; | |
702 | } | |
703 | ||
704 | static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, | |
705 | dma_addr_t iova, size_t size) | |
706 | { | |
707 | struct list_head *pos; | |
708 | unsigned long flags; | |
709 | ||
710 | /* shootdown these iova from all iommus using this domain */ | |
711 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | |
712 | list_for_each(pos, &rk_domain->iommus) { | |
713 | struct rk_iommu *iommu; | |
3fc7c5c0 | 714 | int ret; |
0f181d3c | 715 | |
c68a2921 | 716 | iommu = list_entry(pos, struct rk_iommu, node); |
0f181d3c JC |
717 | |
718 | /* Only zap TLBs of IOMMUs that are powered on. */ | |
3fc7c5c0 MZ |
719 | ret = pm_runtime_get_if_in_use(iommu->dev); |
720 | if (WARN_ON_ONCE(ret < 0)) | |
721 | continue; | |
722 | if (ret) { | |
0f181d3c JC |
723 | WARN_ON(clk_bulk_enable(iommu->num_clocks, |
724 | iommu->clocks)); | |
725 | rk_iommu_zap_lines(iommu, iova, size); | |
726 | clk_bulk_disable(iommu->num_clocks, iommu->clocks); | |
727 | pm_runtime_put(iommu->dev); | |
728 | } | |
c68a2921 DK |
729 | } |
730 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | |
731 | } | |
732 | ||
d4dd920c TF |
733 | static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, |
734 | dma_addr_t iova, size_t size) | |
735 | { | |
736 | rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); | |
737 | if (size > SPAGE_SIZE) | |
738 | rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE, | |
739 | SPAGE_SIZE); | |
740 | } | |
741 | ||
c68a2921 DK |
742 | static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, |
743 | dma_addr_t iova) | |
744 | { | |
745 | u32 *page_table, *dte_addr; | |
4f0aba67 | 746 | u32 dte_index, dte; |
c68a2921 | 747 | phys_addr_t pt_phys; |
4f0aba67 | 748 | dma_addr_t pt_dma; |
c68a2921 DK |
749 | |
750 | assert_spin_locked(&rk_domain->dt_lock); | |
751 | ||
4f0aba67 SZ |
752 | dte_index = rk_iova_dte_index(iova); |
753 | dte_addr = &rk_domain->dt[dte_index]; | |
c68a2921 DK |
754 | dte = *dte_addr; |
755 | if (rk_dte_is_pt_valid(dte)) | |
756 | goto done; | |
757 | ||
758 | page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); | |
759 | if (!page_table) | |
760 | return ERR_PTR(-ENOMEM); | |
761 | ||
9176a303 JC |
762 | pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); |
763 | if (dma_mapping_error(dma_dev, pt_dma)) { | |
764 | dev_err(dma_dev, "DMA mapping error while allocating page table\n"); | |
4f0aba67 SZ |
765 | free_page((unsigned long)page_table); |
766 | return ERR_PTR(-ENOMEM); | |
767 | } | |
c68a2921 | 768 | |
227014b3 | 769 | dte = rk_ops->mk_dtentries(pt_dma); |
4f0aba67 | 770 | *dte_addr = dte; |
c68a2921 | 771 | |
4f0aba67 SZ |
772 | rk_table_flush(rk_domain, |
773 | rk_domain->dt_dma + dte_index * sizeof(u32), 1); | |
c68a2921 | 774 | done: |
227014b3 | 775 | pt_phys = rk_ops->pt_address(dte); |
c68a2921 DK |
776 | return (u32 *)phys_to_virt(pt_phys); |
777 | } | |
778 | ||
779 | static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, | |
4f0aba67 SZ |
780 | u32 *pte_addr, dma_addr_t pte_dma, |
781 | size_t size) | |
c68a2921 DK |
782 | { |
783 | unsigned int pte_count; | |
784 | unsigned int pte_total = size / SPAGE_SIZE; | |
785 | ||
786 | assert_spin_locked(&rk_domain->dt_lock); | |
787 | ||
788 | for (pte_count = 0; pte_count < pte_total; pte_count++) { | |
789 | u32 pte = pte_addr[pte_count]; | |
790 | if (!rk_pte_is_page_valid(pte)) | |
791 | break; | |
792 | ||
793 | pte_addr[pte_count] = rk_mk_pte_invalid(pte); | |
794 | } | |
795 | ||
4f0aba67 | 796 | rk_table_flush(rk_domain, pte_dma, pte_count); |
c68a2921 DK |
797 | |
798 | return pte_count * SPAGE_SIZE; | |
799 | } | |
800 | ||
801 | static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, | |
4f0aba67 SZ |
802 | dma_addr_t pte_dma, dma_addr_t iova, |
803 | phys_addr_t paddr, size_t size, int prot) | |
c68a2921 DK |
804 | { |
805 | unsigned int pte_count; | |
806 | unsigned int pte_total = size / SPAGE_SIZE; | |
807 | phys_addr_t page_phys; | |
808 | ||
809 | assert_spin_locked(&rk_domain->dt_lock); | |
810 | ||
811 | for (pte_count = 0; pte_count < pte_total; pte_count++) { | |
812 | u32 pte = pte_addr[pte_count]; | |
813 | ||
814 | if (rk_pte_is_page_valid(pte)) | |
815 | goto unwind; | |
816 | ||
227014b3 | 817 | pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot); |
c68a2921 DK |
818 | |
819 | paddr += SPAGE_SIZE; | |
820 | } | |
821 | ||
4f0aba67 | 822 | rk_table_flush(rk_domain, pte_dma, pte_total); |
c68a2921 | 823 | |
d4dd920c TF |
824 | /* |
825 | * Zap the first and last iova to evict from iotlb any previously | |
826 | * mapped cachelines holding stale values for its dte and pte. | |
827 | * We only zap the first and last iova, since only they could have | |
828 | * dte or pte shared with an existing mapping. | |
829 | */ | |
830 | rk_iommu_zap_iova_first_last(rk_domain, iova, size); | |
831 | ||
c68a2921 DK |
832 | return 0; |
833 | unwind: | |
834 | /* Unmap the range of iovas that we just mapped */ | |
4f0aba67 SZ |
835 | rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, |
836 | pte_count * SPAGE_SIZE); | |
c68a2921 DK |
837 | |
838 | iova += pte_count * SPAGE_SIZE; | |
227014b3 | 839 | page_phys = rk_ops->pt_address(pte_addr[pte_count]); |
c68a2921 DK |
840 | pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", |
841 | &iova, &page_phys, &paddr, prot); | |
842 | ||
843 | return -EADDRINUSE; | |
844 | } | |
845 | ||
846 | static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, | |
781ca2de | 847 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
c68a2921 | 848 | { |
bcd516a3 | 849 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
c68a2921 | 850 | unsigned long flags; |
4f0aba67 | 851 | dma_addr_t pte_dma, iova = (dma_addr_t)_iova; |
c68a2921 | 852 | u32 *page_table, *pte_addr; |
4f0aba67 | 853 | u32 dte_index, pte_index; |
c68a2921 DK |
854 | int ret; |
855 | ||
856 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | |
857 | ||
858 | /* | |
859 | * pgsize_bitmap specifies iova sizes that fit in one page table | |
860 | * (1024 4-KiB pages = 4 MiB). | |
861 | * So, size will always be 4096 <= size <= 4194304. | |
862 | * Since iommu_map() guarantees that both iova and size will be | |
863 | * aligned, we will always only be mapping from a single dte here. | |
864 | */ | |
865 | page_table = rk_dte_get_page_table(rk_domain, iova); | |
866 | if (IS_ERR(page_table)) { | |
867 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
868 | return PTR_ERR(page_table); | |
869 | } | |
870 | ||
4f0aba67 SZ |
871 | dte_index = rk_domain->dt[rk_iova_dte_index(iova)]; |
872 | pte_index = rk_iova_pte_index(iova); | |
873 | pte_addr = &page_table[pte_index]; | |
227014b3 BG |
874 | |
875 | pte_dma = rk_ops->pt_address(dte_index) + pte_index * sizeof(u32); | |
4f0aba67 SZ |
876 | ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, |
877 | paddr, size, prot); | |
878 | ||
c68a2921 DK |
879 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); |
880 | ||
881 | return ret; | |
882 | } | |
883 | ||
884 | static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, | |
56f8af5e | 885 | size_t size, struct iommu_iotlb_gather *gather) |
c68a2921 | 886 | { |
bcd516a3 | 887 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
c68a2921 | 888 | unsigned long flags; |
4f0aba67 | 889 | dma_addr_t pte_dma, iova = (dma_addr_t)_iova; |
c68a2921 DK |
890 | phys_addr_t pt_phys; |
891 | u32 dte; | |
892 | u32 *pte_addr; | |
893 | size_t unmap_size; | |
894 | ||
895 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | |
896 | ||
897 | /* | |
898 | * pgsize_bitmap specifies iova sizes that fit in one page table | |
899 | * (1024 4-KiB pages = 4 MiB). | |
900 | * So, size will always be 4096 <= size <= 4194304. | |
901 | * Since iommu_unmap() guarantees that both iova and size will be | |
902 | * aligned, we will always only be unmapping from a single dte here. | |
903 | */ | |
904 | dte = rk_domain->dt[rk_iova_dte_index(iova)]; | |
905 | /* Just return 0 if iova is unmapped */ | |
906 | if (!rk_dte_is_pt_valid(dte)) { | |
907 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
908 | return 0; | |
909 | } | |
910 | ||
227014b3 | 911 | pt_phys = rk_ops->pt_address(dte); |
c68a2921 | 912 | pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); |
4f0aba67 SZ |
913 | pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); |
914 | unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); | |
c68a2921 DK |
915 | |
916 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
917 | ||
918 | /* Shootdown iotlb entries for iova range that was just unmapped */ | |
919 | rk_iommu_zap_iova(rk_domain, iova, unmap_size); | |
920 | ||
921 | return unmap_size; | |
922 | } | |
923 | ||
924 | static struct rk_iommu *rk_iommu_from_dev(struct device *dev) | |
925 | { | |
8b9cc3b7 | 926 | struct rk_iommudata *data = dev_iommu_priv_get(dev); |
c68a2921 | 927 | |
5fd577c3 | 928 | return data ? data->iommu : NULL; |
c68a2921 DK |
929 | } |
930 | ||
0f181d3c JC |
931 | /* Must be called with iommu powered on and attached */ |
932 | static void rk_iommu_disable(struct rk_iommu *iommu) | |
c68a2921 | 933 | { |
0f181d3c JC |
934 | int i; |
935 | ||
936 | /* Ignore error while disabling, just keep going */ | |
937 | WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); | |
938 | rk_iommu_enable_stall(iommu); | |
939 | rk_iommu_disable_paging(iommu); | |
940 | for (i = 0; i < iommu->num_mmu; i++) { | |
941 | rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); | |
942 | rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); | |
943 | } | |
944 | rk_iommu_disable_stall(iommu); | |
945 | clk_bulk_disable(iommu->num_clocks, iommu->clocks); | |
946 | } | |
947 | ||
948 | /* Must be called with iommu powered on and attached */ | |
949 | static int rk_iommu_enable(struct rk_iommu *iommu) | |
950 | { | |
951 | struct iommu_domain *domain = iommu->domain; | |
bcd516a3 | 952 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
cd6438c5 | 953 | int ret, i; |
c68a2921 | 954 | |
f2e3a5f5 | 955 | ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); |
c68a2921 DK |
956 | if (ret) |
957 | return ret; | |
958 | ||
f2e3a5f5 TF |
959 | ret = rk_iommu_enable_stall(iommu); |
960 | if (ret) | |
961 | goto out_disable_clocks; | |
962 | ||
c68a2921 DK |
963 | ret = rk_iommu_force_reset(iommu); |
964 | if (ret) | |
f6717d72 | 965 | goto out_disable_stall; |
c68a2921 | 966 | |
cd6438c5 | 967 | for (i = 0; i < iommu->num_mmu; i++) { |
4f0aba67 | 968 | rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, |
227014b3 | 969 | rk_ops->dma_addr_dte(rk_domain->dt_dma)); |
ae8a7910 | 970 | rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); |
cd6438c5 Z |
971 | rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); |
972 | } | |
c68a2921 DK |
973 | |
974 | ret = rk_iommu_enable_paging(iommu); | |
c68a2921 | 975 | |
f6717d72 | 976 | out_disable_stall: |
c68a2921 | 977 | rk_iommu_disable_stall(iommu); |
f2e3a5f5 TF |
978 | out_disable_clocks: |
979 | clk_bulk_disable(iommu->num_clocks, iommu->clocks); | |
f6717d72 | 980 | return ret; |
c68a2921 DK |
981 | } |
982 | ||
983 | static void rk_iommu_detach_device(struct iommu_domain *domain, | |
984 | struct device *dev) | |
985 | { | |
986 | struct rk_iommu *iommu; | |
bcd516a3 | 987 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
c68a2921 | 988 | unsigned long flags; |
3fc7c5c0 | 989 | int ret; |
c68a2921 DK |
990 | |
991 | /* Allow 'virtual devices' (eg drm) to detach from domain */ | |
992 | iommu = rk_iommu_from_dev(dev); | |
993 | if (!iommu) | |
994 | return; | |
995 | ||
0f181d3c JC |
996 | dev_dbg(dev, "Detaching from iommu domain\n"); |
997 | ||
998 | /* iommu already detached */ | |
999 | if (iommu->domain != domain) | |
1000 | return; | |
1001 | ||
1002 | iommu->domain = NULL; | |
1003 | ||
c68a2921 DK |
1004 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); |
1005 | list_del_init(&iommu->node); | |
1006 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | |
1007 | ||
3fc7c5c0 MZ |
1008 | ret = pm_runtime_get_if_in_use(iommu->dev); |
1009 | WARN_ON_ONCE(ret < 0); | |
1010 | if (ret > 0) { | |
0f181d3c JC |
1011 | rk_iommu_disable(iommu); |
1012 | pm_runtime_put(iommu->dev); | |
cd6438c5 | 1013 | } |
0f181d3c | 1014 | } |
c68a2921 | 1015 | |
0f181d3c JC |
1016 | static int rk_iommu_attach_device(struct iommu_domain *domain, |
1017 | struct device *dev) | |
1018 | { | |
1019 | struct rk_iommu *iommu; | |
1020 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); | |
1021 | unsigned long flags; | |
1022 | int ret; | |
c68a2921 | 1023 | |
0f181d3c JC |
1024 | /* |
1025 | * Allow 'virtual devices' (e.g., drm) to attach to domain. | |
1026 | * Such a device does not belong to an iommu group. | |
1027 | */ | |
1028 | iommu = rk_iommu_from_dev(dev); | |
1029 | if (!iommu) | |
1030 | return 0; | |
1031 | ||
1032 | dev_dbg(dev, "Attaching to iommu domain\n"); | |
1033 | ||
1034 | /* iommu already attached */ | |
1035 | if (iommu->domain == domain) | |
1036 | return 0; | |
1037 | ||
1038 | if (iommu->domain) | |
1039 | rk_iommu_detach_device(iommu->domain, dev); | |
1040 | ||
1041 | iommu->domain = domain; | |
1042 | ||
1043 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | |
1044 | list_add_tail(&iommu->node, &rk_domain->iommus); | |
1045 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | |
1046 | ||
3fc7c5c0 MZ |
1047 | ret = pm_runtime_get_if_in_use(iommu->dev); |
1048 | if (!ret || WARN_ON_ONCE(ret < 0)) | |
0f181d3c JC |
1049 | return 0; |
1050 | ||
1051 | ret = rk_iommu_enable(iommu); | |
1052 | if (ret) | |
1053 | rk_iommu_detach_device(iommu->domain, dev); | |
1054 | ||
1055 | pm_runtime_put(iommu->dev); | |
1056 | ||
1057 | return ret; | |
c68a2921 DK |
1058 | } |
1059 | ||
bcd516a3 | 1060 | static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) |
c68a2921 DK |
1061 | { |
1062 | struct rk_iommu_domain *rk_domain; | |
1063 | ||
a93db2f2 | 1064 | if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) |
bcd516a3 JR |
1065 | return NULL; |
1066 | ||
9176a303 | 1067 | if (!dma_dev) |
bcd516a3 | 1068 | return NULL; |
c68a2921 | 1069 | |
42bb97b8 | 1070 | rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); |
4f0aba67 | 1071 | if (!rk_domain) |
9176a303 | 1072 | return NULL; |
4f0aba67 | 1073 | |
c68a2921 DK |
1074 | /* |
1075 | * rk32xx iommus use a 2 level pagetable. | |
1076 | * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. | |
1077 | * Allocate one 4 KiB page for each table. | |
1078 | */ | |
1079 | rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); | |
1080 | if (!rk_domain->dt) | |
b811a451 | 1081 | goto err_free_domain; |
4f0aba67 | 1082 | |
9176a303 | 1083 | rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt, |
4f0aba67 | 1084 | SPAGE_SIZE, DMA_TO_DEVICE); |
9176a303 JC |
1085 | if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) { |
1086 | dev_err(dma_dev, "DMA map error for DT\n"); | |
4f0aba67 SZ |
1087 | goto err_free_dt; |
1088 | } | |
c68a2921 | 1089 | |
c68a2921 DK |
1090 | spin_lock_init(&rk_domain->iommus_lock); |
1091 | spin_lock_init(&rk_domain->dt_lock); | |
1092 | INIT_LIST_HEAD(&rk_domain->iommus); | |
1093 | ||
a93db2f2 SZ |
1094 | rk_domain->domain.geometry.aperture_start = 0; |
1095 | rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); | |
1096 | rk_domain->domain.geometry.force_aperture = true; | |
1097 | ||
bcd516a3 | 1098 | return &rk_domain->domain; |
c68a2921 | 1099 | |
4f0aba67 SZ |
1100 | err_free_dt: |
1101 | free_page((unsigned long)rk_domain->dt); | |
42bb97b8 EG |
1102 | err_free_domain: |
1103 | kfree(rk_domain); | |
4f0aba67 | 1104 | |
bcd516a3 | 1105 | return NULL; |
c68a2921 DK |
1106 | } |
1107 | ||
bcd516a3 | 1108 | static void rk_iommu_domain_free(struct iommu_domain *domain) |
c68a2921 | 1109 | { |
bcd516a3 | 1110 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
c68a2921 DK |
1111 | int i; |
1112 | ||
1113 | WARN_ON(!list_empty(&rk_domain->iommus)); | |
1114 | ||
1115 | for (i = 0; i < NUM_DT_ENTRIES; i++) { | |
1116 | u32 dte = rk_domain->dt[i]; | |
1117 | if (rk_dte_is_pt_valid(dte)) { | |
227014b3 | 1118 | phys_addr_t pt_phys = rk_ops->pt_address(dte); |
c68a2921 | 1119 | u32 *page_table = phys_to_virt(pt_phys); |
9176a303 | 1120 | dma_unmap_single(dma_dev, pt_phys, |
4f0aba67 | 1121 | SPAGE_SIZE, DMA_TO_DEVICE); |
c68a2921 DK |
1122 | free_page((unsigned long)page_table); |
1123 | } | |
1124 | } | |
1125 | ||
9176a303 | 1126 | dma_unmap_single(dma_dev, rk_domain->dt_dma, |
4f0aba67 | 1127 | SPAGE_SIZE, DMA_TO_DEVICE); |
c68a2921 | 1128 | free_page((unsigned long)rk_domain->dt); |
4f0aba67 | 1129 | |
42bb97b8 | 1130 | kfree(rk_domain); |
c68a2921 DK |
1131 | } |
1132 | ||
d8260443 | 1133 | static struct iommu_device *rk_iommu_probe_device(struct device *dev) |
c68a2921 | 1134 | { |
0f181d3c | 1135 | struct rk_iommudata *data; |
d8260443 | 1136 | struct rk_iommu *iommu; |
c68a2921 | 1137 | |
8b9cc3b7 | 1138 | data = dev_iommu_priv_get(dev); |
0f181d3c | 1139 | if (!data) |
d8260443 | 1140 | return ERR_PTR(-ENODEV); |
c68a2921 | 1141 | |
0f181d3c JC |
1142 | iommu = rk_iommu_from_dev(dev); |
1143 | ||
ea4f6400 RW |
1144 | data->link = device_link_add(dev, iommu->dev, |
1145 | DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); | |
c68a2921 | 1146 | |
d8260443 | 1147 | return &iommu->iommu; |
c68a2921 DK |
1148 | } |
1149 | ||
d8260443 | 1150 | static void rk_iommu_release_device(struct device *dev) |
c68a2921 | 1151 | { |
8b9cc3b7 | 1152 | struct rk_iommudata *data = dev_iommu_priv_get(dev); |
c68a2921 | 1153 | |
0f181d3c | 1154 | device_link_del(data->link); |
c68a2921 DK |
1155 | } |
1156 | ||
57c26957 JC |
1157 | static struct iommu_group *rk_iommu_device_group(struct device *dev) |
1158 | { | |
1159 | struct rk_iommu *iommu; | |
1160 | ||
1161 | iommu = rk_iommu_from_dev(dev); | |
1162 | ||
1163 | return iommu_group_ref_get(iommu->group); | |
1164 | } | |
1165 | ||
5fd577c3 JC |
1166 | static int rk_iommu_of_xlate(struct device *dev, |
1167 | struct of_phandle_args *args) | |
c68a2921 | 1168 | { |
5fd577c3 JC |
1169 | struct platform_device *iommu_dev; |
1170 | struct rk_iommudata *data; | |
c9d9f239 | 1171 | |
5fd577c3 JC |
1172 | data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL); |
1173 | if (!data) | |
1174 | return -ENOMEM; | |
c68a2921 | 1175 | |
5fd577c3 | 1176 | iommu_dev = of_find_device_by_node(args->np); |
c9d9f239 | 1177 | |
5fd577c3 | 1178 | data->iommu = platform_get_drvdata(iommu_dev); |
8b9cc3b7 | 1179 | dev_iommu_priv_set(dev, data); |
5fd577c3 | 1180 | |
40fa84e1 | 1181 | platform_device_put(iommu_dev); |
5fd577c3 JC |
1182 | |
1183 | return 0; | |
c68a2921 DK |
1184 | } |
1185 | ||
1186 | static const struct iommu_ops rk_iommu_ops = { | |
bcd516a3 | 1187 | .domain_alloc = rk_iommu_domain_alloc, |
d8260443 JR |
1188 | .probe_device = rk_iommu_probe_device, |
1189 | .release_device = rk_iommu_release_device, | |
57c26957 | 1190 | .device_group = rk_iommu_device_group, |
c68a2921 | 1191 | .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, |
5fd577c3 | 1192 | .of_xlate = rk_iommu_of_xlate, |
9a630a4b LB |
1193 | .default_domain_ops = &(const struct iommu_domain_ops) { |
1194 | .attach_dev = rk_iommu_attach_device, | |
9a630a4b LB |
1195 | .map = rk_iommu_map, |
1196 | .unmap = rk_iommu_unmap, | |
1197 | .iova_to_phys = rk_iommu_iova_to_phys, | |
1198 | .free = rk_iommu_domain_free, | |
1199 | } | |
c68a2921 DK |
1200 | }; |
1201 | ||
1202 | static int rk_iommu_probe(struct platform_device *pdev) | |
1203 | { | |
1204 | struct device *dev = &pdev->dev; | |
1205 | struct rk_iommu *iommu; | |
1206 | struct resource *res; | |
227014b3 | 1207 | const struct rk_iommu_ops *ops; |
3d08f434 | 1208 | int num_res = pdev->num_resources; |
f9258156 | 1209 | int err, i; |
c68a2921 DK |
1210 | |
1211 | iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); | |
1212 | if (!iommu) | |
1213 | return -ENOMEM; | |
1214 | ||
1215 | platform_set_drvdata(pdev, iommu); | |
1216 | iommu->dev = dev; | |
cd6438c5 | 1217 | iommu->num_mmu = 0; |
3d08f434 | 1218 | |
227014b3 BG |
1219 | ops = of_device_get_match_data(dev); |
1220 | if (!rk_ops) | |
1221 | rk_ops = ops; | |
1222 | ||
1223 | /* | |
1224 | * That should not happen unless different versions of the | |
1225 | * hardware block are embedded the same SoC | |
1226 | */ | |
1227 | if (WARN_ON(rk_ops != ops)) | |
1228 | return -EINVAL; | |
1229 | ||
a86854d0 | 1230 | iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases), |
cd6438c5 Z |
1231 | GFP_KERNEL); |
1232 | if (!iommu->bases) | |
1233 | return -ENOMEM; | |
c68a2921 | 1234 | |
3d08f434 | 1235 | for (i = 0; i < num_res; i++) { |
cd6438c5 | 1236 | res = platform_get_resource(pdev, IORESOURCE_MEM, i); |
8d7f2d84 TV |
1237 | if (!res) |
1238 | continue; | |
cd6438c5 Z |
1239 | iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res); |
1240 | if (IS_ERR(iommu->bases[i])) | |
1241 | continue; | |
1242 | iommu->num_mmu++; | |
1243 | } | |
1244 | if (iommu->num_mmu == 0) | |
1245 | return PTR_ERR(iommu->bases[0]); | |
c68a2921 | 1246 | |
f9258156 HS |
1247 | iommu->num_irq = platform_irq_count(pdev); |
1248 | if (iommu->num_irq < 0) | |
1249 | return iommu->num_irq; | |
1250 | ||
c3aa4742 SX |
1251 | iommu->reset_disabled = device_property_read_bool(dev, |
1252 | "rockchip,disable-mmu-reset"); | |
1253 | ||
f2e3a5f5 TF |
1254 | iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks); |
1255 | iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks, | |
1256 | sizeof(*iommu->clocks), GFP_KERNEL); | |
1257 | if (!iommu->clocks) | |
1258 | return -ENOMEM; | |
1259 | ||
1260 | for (i = 0; i < iommu->num_clocks; ++i) | |
1261 | iommu->clocks[i].id = rk_iommu_clocks[i]; | |
1262 | ||
2f8c7f2e HS |
1263 | /* |
1264 | * iommu clocks should be present for all new devices and devicetrees | |
1265 | * but there are older devicetrees without clocks out in the wild. | |
1266 | * So clocks as optional for the time being. | |
1267 | */ | |
f2e3a5f5 | 1268 | err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks); |
2f8c7f2e HS |
1269 | if (err == -ENOENT) |
1270 | iommu->num_clocks = 0; | |
1271 | else if (err) | |
f2e3a5f5 TF |
1272 | return err; |
1273 | ||
1274 | err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks); | |
c9d9f239 JR |
1275 | if (err) |
1276 | return err; | |
1277 | ||
57c26957 JC |
1278 | iommu->group = iommu_group_alloc(); |
1279 | if (IS_ERR(iommu->group)) { | |
1280 | err = PTR_ERR(iommu->group); | |
1281 | goto err_unprepare_clocks; | |
1282 | } | |
1283 | ||
f2e3a5f5 TF |
1284 | err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); |
1285 | if (err) | |
57c26957 | 1286 | goto err_put_group; |
f2e3a5f5 | 1287 | |
2d471b20 | 1288 | err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev); |
6d9ffaad | 1289 | if (err) |
f2e3a5f5 | 1290 | goto err_remove_sysfs; |
c9d9f239 | 1291 | |
9176a303 JC |
1292 | /* |
1293 | * Use the first registered IOMMU device for domain to use with DMA | |
1294 | * API, since a domain might not physically correspond to a single | |
1295 | * IOMMU device.. | |
1296 | */ | |
1297 | if (!dma_dev) | |
1298 | dma_dev = &pdev->dev; | |
1299 | ||
0f181d3c JC |
1300 | pm_runtime_enable(dev); |
1301 | ||
f9258156 HS |
1302 | for (i = 0; i < iommu->num_irq; i++) { |
1303 | int irq = platform_get_irq(pdev, i); | |
1304 | ||
1aa55ca9 MZ |
1305 | if (irq < 0) |
1306 | return irq; | |
1307 | ||
1308 | err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, | |
1309 | IRQF_SHARED, dev_name(dev), iommu); | |
1310 | if (err) { | |
1311 | pm_runtime_disable(dev); | |
1312 | goto err_remove_sysfs; | |
1313 | } | |
1314 | } | |
1315 | ||
227014b3 BG |
1316 | dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask); |
1317 | ||
f2e3a5f5 TF |
1318 | return 0; |
1319 | err_remove_sysfs: | |
1320 | iommu_device_sysfs_remove(&iommu->iommu); | |
57c26957 JC |
1321 | err_put_group: |
1322 | iommu_group_put(iommu->group); | |
f2e3a5f5 TF |
1323 | err_unprepare_clocks: |
1324 | clk_bulk_unprepare(iommu->num_clocks, iommu->clocks); | |
c9d9f239 | 1325 | return err; |
c68a2921 DK |
1326 | } |
1327 | ||
1a4e90f2 MZ |
1328 | static void rk_iommu_shutdown(struct platform_device *pdev) |
1329 | { | |
74bc2abc | 1330 | struct rk_iommu *iommu = platform_get_drvdata(pdev); |
f9258156 HS |
1331 | int i; |
1332 | ||
1333 | for (i = 0; i < iommu->num_irq; i++) { | |
1334 | int irq = platform_get_irq(pdev, i); | |
74bc2abc | 1335 | |
74bc2abc | 1336 | devm_free_irq(iommu->dev, irq, iommu); |
f9258156 | 1337 | } |
74bc2abc | 1338 | |
0f181d3c JC |
1339 | pm_runtime_force_suspend(&pdev->dev); |
1340 | } | |
1a4e90f2 | 1341 | |
0f181d3c JC |
1342 | static int __maybe_unused rk_iommu_suspend(struct device *dev) |
1343 | { | |
1344 | struct rk_iommu *iommu = dev_get_drvdata(dev); | |
1345 | ||
1346 | if (!iommu->domain) | |
1347 | return 0; | |
1348 | ||
1349 | rk_iommu_disable(iommu); | |
1350 | return 0; | |
1351 | } | |
1352 | ||
1353 | static int __maybe_unused rk_iommu_resume(struct device *dev) | |
1354 | { | |
1355 | struct rk_iommu *iommu = dev_get_drvdata(dev); | |
1356 | ||
1357 | if (!iommu->domain) | |
1358 | return 0; | |
1359 | ||
1360 | return rk_iommu_enable(iommu); | |
1a4e90f2 MZ |
1361 | } |
1362 | ||
0f181d3c JC |
1363 | static const struct dev_pm_ops rk_iommu_pm_ops = { |
1364 | SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL) | |
1365 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, | |
1366 | pm_runtime_force_resume) | |
1367 | }; | |
1368 | ||
227014b3 BG |
1369 | static struct rk_iommu_ops iommu_data_ops_v1 = { |
1370 | .pt_address = &rk_dte_pt_address, | |
1371 | .mk_dtentries = &rk_mk_dte, | |
1372 | .mk_ptentries = &rk_mk_pte, | |
1373 | .dte_addr_phys = &rk_dte_addr_phys, | |
1374 | .dma_addr_dte = &rk_dma_addr_dte, | |
1375 | .dma_bit_mask = DMA_BIT_MASK(32), | |
1376 | }; | |
1377 | ||
c55356c5 BG |
1378 | static struct rk_iommu_ops iommu_data_ops_v2 = { |
1379 | .pt_address = &rk_dte_pt_address_v2, | |
1380 | .mk_dtentries = &rk_mk_dte_v2, | |
1381 | .mk_ptentries = &rk_mk_pte_v2, | |
1382 | .dte_addr_phys = &rk_dte_addr_phys_v2, | |
1383 | .dma_addr_dte = &rk_dma_addr_dte_v2, | |
1384 | .dma_bit_mask = DMA_BIT_MASK(40), | |
1385 | }; | |
227014b3 | 1386 | |
c68a2921 | 1387 | static const struct of_device_id rk_iommu_dt_ids[] = { |
227014b3 BG |
1388 | { .compatible = "rockchip,iommu", |
1389 | .data = &iommu_data_ops_v1, | |
1390 | }, | |
c55356c5 BG |
1391 | { .compatible = "rockchip,rk3568-iommu", |
1392 | .data = &iommu_data_ops_v2, | |
1393 | }, | |
c68a2921 DK |
1394 | { /* sentinel */ } |
1395 | }; | |
c68a2921 DK |
1396 | |
1397 | static struct platform_driver rk_iommu_driver = { | |
1398 | .probe = rk_iommu_probe, | |
1a4e90f2 | 1399 | .shutdown = rk_iommu_shutdown, |
c68a2921 DK |
1400 | .driver = { |
1401 | .name = "rk_iommu", | |
d9e7eb15 | 1402 | .of_match_table = rk_iommu_dt_ids, |
0f181d3c | 1403 | .pm = &rk_iommu_pm_ops, |
98b72b94 | 1404 | .suppress_bind_attrs = true, |
c68a2921 DK |
1405 | }, |
1406 | }; | |
6efd3b83 | 1407 | builtin_platform_driver(rk_iommu_driver); |