Commit | Line | Data |
---|---|---|
26f56895 TF |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (C) 2018 Intel Corporation. | |
4 | * Copyright 2018 Google LLC. | |
5 | * | |
6 | * Author: Tuukka Toivonen <tuukka.toivonen@intel.com> | |
7 | * Author: Sakari Ailus <sakari.ailus@linux.intel.com> | |
8 | * Author: Samu Onkalo <samu.onkalo@intel.com> | |
9 | * Author: Tomasz Figa <tfiga@chromium.org> | |
10 | * | |
11 | */ | |
12 | ||
13 | #include <linux/dma-mapping.h> | |
14 | #include <linux/iopoll.h> | |
15 | #include <linux/pm_runtime.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/vmalloc.h> | |
18 | ||
19 | #include <asm/set_memory.h> | |
20 | ||
21 | #include "ipu3-mmu.h" | |
22 | ||
26f56895 TF |
23 | #define IPU3_PT_BITS 10 |
24 | #define IPU3_PT_PTES (1UL << IPU3_PT_BITS) | |
25 | #define IPU3_PT_SIZE (IPU3_PT_PTES << 2) | |
26 | #define IPU3_PT_ORDER (IPU3_PT_SIZE >> PAGE_SHIFT) | |
27 | ||
28 | #define IPU3_ADDR2PTE(addr) ((addr) >> IPU3_PAGE_SHIFT) | |
29 | #define IPU3_PTE2ADDR(pte) ((phys_addr_t)(pte) << IPU3_PAGE_SHIFT) | |
30 | ||
31 | #define IPU3_L2PT_SHIFT IPU3_PT_BITS | |
32 | #define IPU3_L2PT_MASK ((1UL << IPU3_L2PT_SHIFT) - 1) | |
33 | ||
34 | #define IPU3_L1PT_SHIFT IPU3_PT_BITS | |
35 | #define IPU3_L1PT_MASK ((1UL << IPU3_L1PT_SHIFT) - 1) | |
36 | ||
37 | #define IPU3_MMU_ADDRESS_BITS (IPU3_PAGE_SHIFT + \ | |
38 | IPU3_L2PT_SHIFT + \ | |
39 | IPU3_L1PT_SHIFT) | |
40 | ||
41 | #define IMGU_REG_BASE 0x4000 | |
42 | #define REG_TLB_INVALIDATE (IMGU_REG_BASE + 0x300) | |
43 | #define TLB_INVALIDATE 1 | |
44 | #define REG_L1_PHYS (IMGU_REG_BASE + 0x304) /* 27-bit pfn */ | |
45 | #define REG_GP_HALT (IMGU_REG_BASE + 0x5dc) | |
46 | #define REG_GP_HALTED (IMGU_REG_BASE + 0x5e0) | |
47 | ||
27b795ad | 48 | struct imgu_mmu { |
26f56895 TF |
49 | struct device *dev; |
50 | void __iomem *base; | |
51 | /* protect access to l2pts, l1pt */ | |
52 | spinlock_t lock; | |
53 | ||
54 | void *dummy_page; | |
55 | u32 dummy_page_pteval; | |
56 | ||
57 | u32 *dummy_l2pt; | |
58 | u32 dummy_l2pt_pteval; | |
59 | ||
60 | u32 **l2pts; | |
61 | u32 *l1pt; | |
62 | ||
27b795ad | 63 | struct imgu_mmu_info geometry; |
26f56895 TF |
64 | }; |
65 | ||
27b795ad | 66 | static inline struct imgu_mmu *to_imgu_mmu(struct imgu_mmu_info *info) |
26f56895 | 67 | { |
27b795ad | 68 | return container_of(info, struct imgu_mmu, geometry); |
26f56895 TF |
69 | } |
70 | ||
71 | /** | |
27b795ad | 72 | * imgu_mmu_tlb_invalidate - invalidate translation look-aside buffer |
26f56895 TF |
73 | * @mmu: MMU to perform the invalidate operation on |
74 | * | |
75 | * This function invalidates the whole TLB. Must be called when the hardware | |
76 | * is powered on. | |
77 | */ | |
27b795ad | 78 | static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu) |
26f56895 TF |
79 | { |
80 | writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE); | |
81 | } | |
82 | ||
27b795ad YZ |
83 | static void call_if_imgu_is_powered(struct imgu_mmu *mmu, |
84 | void (*func)(struct imgu_mmu *mmu)) | |
26f56895 TF |
85 | { |
86 | if (!pm_runtime_get_if_in_use(mmu->dev)) | |
87 | return; | |
88 | ||
89 | func(mmu); | |
90 | pm_runtime_put(mmu->dev); | |
91 | } | |
92 | ||
93 | /** | |
27b795ad | 94 | * imgu_mmu_set_halt - set CIO gate halt bit |
26f56895 TF |
95 | * @mmu: MMU to set the CIO gate bit in. |
96 | * @halt: Desired state of the gate bit. | |
97 | * | |
98 | * This function sets the CIO gate bit that controls whether external memory | |
99 | * accesses are allowed. Must be called when the hardware is powered on. | |
100 | */ | |
27b795ad | 101 | static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt) |
26f56895 TF |
102 | { |
103 | int ret; | |
104 | u32 val; | |
105 | ||
106 | writel(halt, mmu->base + REG_GP_HALT); | |
107 | ret = readl_poll_timeout(mmu->base + REG_GP_HALTED, | |
108 | val, (val & 1) == halt, 1000, 100000); | |
109 | ||
110 | if (ret) | |
111 | dev_err(mmu->dev, "failed to %s CIO gate halt\n", | |
112 | halt ? "set" : "clear"); | |
113 | } | |
114 | ||
115 | /** | |
27b795ad | 116 | * imgu_mmu_alloc_page_table - allocate a pre-filled page table |
26f56895 TF |
117 | * @pteval: Value to initialize for page table entries with. |
118 | * | |
119 | * Return: Pointer to allocated page table or NULL on failure. | |
120 | */ | |
27b795ad | 121 | static u32 *imgu_mmu_alloc_page_table(u32 pteval) |
26f56895 TF |
122 | { |
123 | u32 *pt; | |
124 | int pte; | |
125 | ||
126 | pt = (u32 *)__get_free_page(GFP_KERNEL); | |
127 | if (!pt) | |
128 | return NULL; | |
129 | ||
130 | for (pte = 0; pte < IPU3_PT_PTES; pte++) | |
131 | pt[pte] = pteval; | |
132 | ||
31e0a455 | 133 | set_memory_uc((unsigned long)pt, IPU3_PT_ORDER); |
26f56895 TF |
134 | |
135 | return pt; | |
136 | } | |
137 | ||
138 | /** | |
27b795ad | 139 | * imgu_mmu_free_page_table - free page table |
26f56895 TF |
140 | * @pt: Page table to free. |
141 | */ | |
27b795ad | 142 | static void imgu_mmu_free_page_table(u32 *pt) |
26f56895 | 143 | { |
31e0a455 | 144 | set_memory_wb((unsigned long)pt, IPU3_PT_ORDER); |
26f56895 TF |
145 | free_page((unsigned long)pt); |
146 | } | |
147 | ||
148 | /** | |
149 | * address_to_pte_idx - split IOVA into L1 and L2 page table indices | |
150 | * @iova: IOVA to split. | |
151 | * @l1pt_idx: Output for the L1 page table index. | |
152 | * @l2pt_idx: Output for the L2 page index. | |
153 | */ | |
154 | static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx, | |
155 | u32 *l2pt_idx) | |
156 | { | |
157 | iova >>= IPU3_PAGE_SHIFT; | |
158 | ||
159 | if (l2pt_idx) | |
160 | *l2pt_idx = iova & IPU3_L2PT_MASK; | |
161 | ||
162 | iova >>= IPU3_L2PT_SHIFT; | |
163 | ||
164 | if (l1pt_idx) | |
165 | *l1pt_idx = iova & IPU3_L1PT_MASK; | |
166 | } | |
167 | ||
27b795ad | 168 | static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx) |
26f56895 TF |
169 | { |
170 | unsigned long flags; | |
171 | u32 *l2pt, *new_l2pt; | |
172 | u32 pteval; | |
173 | ||
174 | spin_lock_irqsave(&mmu->lock, flags); | |
175 | ||
176 | l2pt = mmu->l2pts[l1pt_idx]; | |
177 | if (l2pt) | |
178 | goto done; | |
179 | ||
180 | spin_unlock_irqrestore(&mmu->lock, flags); | |
181 | ||
27b795ad | 182 | new_l2pt = imgu_mmu_alloc_page_table(mmu->dummy_page_pteval); |
26f56895 TF |
183 | if (!new_l2pt) |
184 | return NULL; | |
185 | ||
186 | spin_lock_irqsave(&mmu->lock, flags); | |
187 | ||
188 | dev_dbg(mmu->dev, "allocated page table %p for l1pt_idx %u\n", | |
189 | new_l2pt, l1pt_idx); | |
190 | ||
191 | l2pt = mmu->l2pts[l1pt_idx]; | |
192 | if (l2pt) { | |
27b795ad | 193 | imgu_mmu_free_page_table(new_l2pt); |
26f56895 TF |
194 | goto done; |
195 | } | |
196 | ||
197 | l2pt = new_l2pt; | |
198 | mmu->l2pts[l1pt_idx] = new_l2pt; | |
199 | ||
200 | pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt)); | |
201 | mmu->l1pt[l1pt_idx] = pteval; | |
202 | ||
203 | done: | |
204 | spin_unlock_irqrestore(&mmu->lock, flags); | |
205 | return l2pt; | |
206 | } | |
207 | ||
27b795ad | 208 | static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova, |
26f56895 TF |
209 | phys_addr_t paddr) |
210 | { | |
211 | u32 l1pt_idx, l2pt_idx; | |
212 | unsigned long flags; | |
213 | u32 *l2pt; | |
214 | ||
215 | if (!mmu) | |
216 | return -ENODEV; | |
217 | ||
218 | address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx); | |
219 | ||
27b795ad | 220 | l2pt = imgu_mmu_get_l2pt(mmu, l1pt_idx); |
26f56895 TF |
221 | if (!l2pt) |
222 | return -ENOMEM; | |
223 | ||
224 | spin_lock_irqsave(&mmu->lock, flags); | |
225 | ||
226 | if (l2pt[l2pt_idx] != mmu->dummy_page_pteval) { | |
227 | spin_unlock_irqrestore(&mmu->lock, flags); | |
228 | return -EBUSY; | |
229 | } | |
230 | ||
231 | l2pt[l2pt_idx] = IPU3_ADDR2PTE(paddr); | |
232 | ||
233 | spin_unlock_irqrestore(&mmu->lock, flags); | |
234 | ||
235 | return 0; | |
236 | } | |
237 | ||
3efcbe3e SA |
238 | /** |
239 | * imgu_mmu_map - map a buffer to a physical address | |
240 | * | |
241 | * @info: MMU mappable range | |
242 | * @iova: the virtual address | |
243 | * @paddr: the physical address | |
244 | * @size: length of the mappable area | |
245 | * | |
246 | * The function has been adapted from iommu_map() in | |
247 | * drivers/iommu/iommu.c . | |
248 | */ | |
27b795ad | 249 | int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova, |
26f56895 TF |
250 | phys_addr_t paddr, size_t size) |
251 | { | |
27b795ad | 252 | struct imgu_mmu *mmu = to_imgu_mmu(info); |
26f56895 TF |
253 | int ret = 0; |
254 | ||
26f56895 TF |
255 | /* |
256 | * both the virtual address and the physical one, as well as | |
257 | * the size of the mapping, must be aligned (at least) to the | |
258 | * size of the smallest page supported by the hardware | |
259 | */ | |
17f61abb SA |
260 | if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) { |
261 | dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n", | |
262 | iova, &paddr, size); | |
26f56895 TF |
263 | return -EINVAL; |
264 | } | |
265 | ||
266 | dev_dbg(mmu->dev, "map: iova 0x%lx pa %pa size 0x%zx\n", | |
267 | iova, &paddr, size); | |
268 | ||
269 | while (size) { | |
17f61abb | 270 | dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr); |
26f56895 | 271 | |
27b795ad | 272 | ret = __imgu_mmu_map(mmu, iova, paddr); |
26f56895 TF |
273 | if (ret) |
274 | break; | |
275 | ||
17f61abb SA |
276 | iova += IPU3_PAGE_SIZE; |
277 | paddr += IPU3_PAGE_SIZE; | |
278 | size -= IPU3_PAGE_SIZE; | |
26f56895 TF |
279 | } |
280 | ||
27b795ad | 281 | call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate); |
26f56895 TF |
282 | |
283 | return ret; | |
284 | } | |
285 | ||
3efcbe3e SA |
286 | /** |
287 | * imgu_mmu_map_sg - Map a scatterlist | |
288 | * | |
289 | * @info: MMU mappable range | |
290 | * @iova: the virtual address | |
291 | * @sg: the scatterlist to map | |
292 | * @nents: number of entries in the scatterlist | |
293 | * | |
294 | * The function has been adapted from default_iommu_map_sg() in | |
295 | * drivers/iommu/iommu.c . | |
296 | */ | |
27b795ad | 297 | size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova, |
26f56895 TF |
298 | struct scatterlist *sg, unsigned int nents) |
299 | { | |
27b795ad | 300 | struct imgu_mmu *mmu = to_imgu_mmu(info); |
26f56895 TF |
301 | struct scatterlist *s; |
302 | size_t s_length, mapped = 0; | |
17f61abb | 303 | unsigned int i; |
26f56895 TF |
304 | int ret; |
305 | ||
26f56895 TF |
306 | for_each_sg(sg, s, nents, i) { |
307 | phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; | |
308 | ||
309 | s_length = s->length; | |
310 | ||
17f61abb | 311 | if (!IS_ALIGNED(s->offset, IPU3_PAGE_SIZE)) |
26f56895 TF |
312 | goto out_err; |
313 | ||
17f61abb SA |
314 | /* must be IPU3_PAGE_SIZE aligned to be mapped singlely */ |
315 | if (i == nents - 1 && !IS_ALIGNED(s->length, IPU3_PAGE_SIZE)) | |
26f56895 TF |
316 | s_length = PAGE_ALIGN(s->length); |
317 | ||
27b795ad | 318 | ret = imgu_mmu_map(info, iova + mapped, phys, s_length); |
26f56895 TF |
319 | if (ret) |
320 | goto out_err; | |
321 | ||
322 | mapped += s_length; | |
323 | } | |
324 | ||
27b795ad | 325 | call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate); |
26f56895 TF |
326 | |
327 | return mapped; | |
328 | ||
329 | out_err: | |
330 | /* undo mappings already done */ | |
27b795ad | 331 | imgu_mmu_unmap(info, iova, mapped); |
26f56895 TF |
332 | |
333 | return 0; | |
334 | } | |
335 | ||
27b795ad | 336 | static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu, |
26f56895 TF |
337 | unsigned long iova, size_t size) |
338 | { | |
339 | u32 l1pt_idx, l2pt_idx; | |
340 | unsigned long flags; | |
341 | size_t unmap = size; | |
342 | u32 *l2pt; | |
343 | ||
344 | if (!mmu) | |
345 | return 0; | |
346 | ||
347 | address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx); | |
348 | ||
349 | spin_lock_irqsave(&mmu->lock, flags); | |
350 | ||
351 | l2pt = mmu->l2pts[l1pt_idx]; | |
352 | if (!l2pt) { | |
353 | spin_unlock_irqrestore(&mmu->lock, flags); | |
354 | return 0; | |
355 | } | |
356 | ||
357 | if (l2pt[l2pt_idx] == mmu->dummy_page_pteval) | |
358 | unmap = 0; | |
359 | ||
360 | l2pt[l2pt_idx] = mmu->dummy_page_pteval; | |
361 | ||
362 | spin_unlock_irqrestore(&mmu->lock, flags); | |
363 | ||
364 | return unmap; | |
365 | } | |
366 | ||
3efcbe3e SA |
367 | /** |
368 | * imgu_mmu_unmap - Unmap a buffer | |
369 | * | |
370 | * @info: MMU mappable range | |
371 | * @iova: the virtual address | |
372 | * @size: the length of the buffer | |
373 | * | |
374 | * The function has been adapted from iommu_unmap() in | |
375 | * drivers/iommu/iommu.c . | |
376 | */ | |
27b795ad | 377 | size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova, |
26f56895 TF |
378 | size_t size) |
379 | { | |
27b795ad | 380 | struct imgu_mmu *mmu = to_imgu_mmu(info); |
26f56895 | 381 | size_t unmapped_page, unmapped = 0; |
26f56895 TF |
382 | |
383 | /* | |
384 | * The virtual address, as well as the size of the mapping, must be | |
385 | * aligned (at least) to the size of the smallest page supported | |
386 | * by the hardware | |
387 | */ | |
17f61abb SA |
388 | if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) { |
389 | dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n", | |
390 | iova, size); | |
26f56895 TF |
391 | return -EINVAL; |
392 | } | |
393 | ||
394 | dev_dbg(mmu->dev, "unmap this: iova 0x%lx size 0x%zx\n", iova, size); | |
395 | ||
396 | /* | |
397 | * Keep iterating until we either unmap 'size' bytes (or more) | |
398 | * or we hit an area that isn't mapped. | |
399 | */ | |
400 | while (unmapped < size) { | |
17f61abb | 401 | unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE); |
26f56895 TF |
402 | if (!unmapped_page) |
403 | break; | |
404 | ||
405 | dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n", | |
406 | iova, unmapped_page); | |
407 | ||
408 | iova += unmapped_page; | |
409 | unmapped += unmapped_page; | |
410 | } | |
411 | ||
27b795ad | 412 | call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate); |
26f56895 TF |
413 | |
414 | return unmapped; | |
415 | } | |
416 | ||
417 | /** | |
27b795ad | 418 | * imgu_mmu_init() - initialize IPU3 MMU block |
3efcbe3e | 419 | * |
9fabe1d1 | 420 | * @parent: struct device parent |
26f56895 TF |
421 | * @base: IOMEM base of hardware registers. |
422 | * | |
423 | * Return: Pointer to IPU3 MMU private data pointer or ERR_PTR() on error. | |
424 | */ | |
27b795ad | 425 | struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base) |
26f56895 | 426 | { |
27b795ad | 427 | struct imgu_mmu *mmu; |
26f56895 TF |
428 | u32 pteval; |
429 | ||
430 | mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); | |
431 | if (!mmu) | |
432 | return ERR_PTR(-ENOMEM); | |
433 | ||
434 | mmu->dev = parent; | |
435 | mmu->base = base; | |
436 | spin_lock_init(&mmu->lock); | |
437 | ||
438 | /* Disallow external memory access when having no valid page tables. */ | |
27b795ad | 439 | imgu_mmu_set_halt(mmu, true); |
26f56895 TF |
440 | |
441 | /* | |
442 | * The MMU does not have a "valid" bit, so we have to use a dummy | |
443 | * page for invalid entries. | |
444 | */ | |
445 | mmu->dummy_page = (void *)__get_free_page(GFP_KERNEL); | |
446 | if (!mmu->dummy_page) | |
447 | goto fail_group; | |
448 | pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_page)); | |
449 | mmu->dummy_page_pteval = pteval; | |
450 | ||
451 | /* | |
452 | * Allocate a dummy L2 page table with all entries pointing to | |
453 | * the dummy page. | |
454 | */ | |
27b795ad | 455 | mmu->dummy_l2pt = imgu_mmu_alloc_page_table(pteval); |
26f56895 TF |
456 | if (!mmu->dummy_l2pt) |
457 | goto fail_dummy_page; | |
458 | pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_l2pt)); | |
459 | mmu->dummy_l2pt_pteval = pteval; | |
460 | ||
461 | /* | |
462 | * Allocate the array of L2PT CPU pointers, initialized to zero, | |
463 | * which means the dummy L2PT allocated above. | |
464 | */ | |
465 | mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts)); | |
466 | if (!mmu->l2pts) | |
467 | goto fail_l2pt; | |
468 | ||
469 | /* Allocate the L1 page table. */ | |
27b795ad | 470 | mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval); |
26f56895 TF |
471 | if (!mmu->l1pt) |
472 | goto fail_l2pts; | |
473 | ||
474 | pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt)); | |
475 | writel(pteval, mmu->base + REG_L1_PHYS); | |
27b795ad YZ |
476 | imgu_mmu_tlb_invalidate(mmu); |
477 | imgu_mmu_set_halt(mmu, false); | |
26f56895 TF |
478 | |
479 | mmu->geometry.aperture_start = 0; | |
480 | mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS); | |
26f56895 TF |
481 | |
482 | return &mmu->geometry; | |
483 | ||
484 | fail_l2pts: | |
485 | vfree(mmu->l2pts); | |
486 | fail_l2pt: | |
27b795ad | 487 | imgu_mmu_free_page_table(mmu->dummy_l2pt); |
26f56895 TF |
488 | fail_dummy_page: |
489 | free_page((unsigned long)mmu->dummy_page); | |
490 | fail_group: | |
491 | kfree(mmu); | |
492 | ||
493 | return ERR_PTR(-ENOMEM); | |
494 | } | |
495 | ||
496 | /** | |
27b795ad | 497 | * imgu_mmu_exit() - clean up IPU3 MMU block |
3efcbe3e SA |
498 | * |
499 | * @info: MMU mappable range | |
26f56895 | 500 | */ |
27b795ad | 501 | void imgu_mmu_exit(struct imgu_mmu_info *info) |
26f56895 | 502 | { |
27b795ad | 503 | struct imgu_mmu *mmu = to_imgu_mmu(info); |
26f56895 TF |
504 | |
505 | /* We are going to free our page tables, no more memory access. */ | |
27b795ad YZ |
506 | imgu_mmu_set_halt(mmu, true); |
507 | imgu_mmu_tlb_invalidate(mmu); | |
26f56895 | 508 | |
27b795ad | 509 | imgu_mmu_free_page_table(mmu->l1pt); |
26f56895 | 510 | vfree(mmu->l2pts); |
27b795ad | 511 | imgu_mmu_free_page_table(mmu->dummy_l2pt); |
26f56895 TF |
512 | free_page((unsigned long)mmu->dummy_page); |
513 | kfree(mmu); | |
514 | } | |
515 | ||
27b795ad | 516 | void imgu_mmu_suspend(struct imgu_mmu_info *info) |
26f56895 | 517 | { |
27b795ad | 518 | struct imgu_mmu *mmu = to_imgu_mmu(info); |
26f56895 | 519 | |
27b795ad | 520 | imgu_mmu_set_halt(mmu, true); |
26f56895 TF |
521 | } |
522 | ||
27b795ad | 523 | void imgu_mmu_resume(struct imgu_mmu_info *info) |
26f56895 | 524 | { |
27b795ad | 525 | struct imgu_mmu *mmu = to_imgu_mmu(info); |
26f56895 TF |
526 | u32 pteval; |
527 | ||
27b795ad | 528 | imgu_mmu_set_halt(mmu, true); |
26f56895 TF |
529 | |
530 | pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt)); | |
531 | writel(pteval, mmu->base + REG_L1_PHYS); | |
532 | ||
27b795ad YZ |
533 | imgu_mmu_tlb_invalidate(mmu); |
534 | imgu_mmu_set_halt(mmu, false); | |
26f56895 | 535 | } |