iommu/exynos: Block SYSMMU while invalidating FLPD cache
[linux-block.git] / drivers / iommu / exynos-iommu.c
CommitLineData
740a01ee
MS
1/*
2 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
2a96536e
KC
3 * http://www.samsung.com
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
11#define DEBUG
12#endif
13
2a96536e 14#include <linux/clk.h>
8ed55c81 15#include <linux/dma-mapping.h>
2a96536e 16#include <linux/err.h>
312900c6 17#include <linux/io.h>
2a96536e 18#include <linux/iommu.h>
312900c6 19#include <linux/interrupt.h>
2a96536e 20#include <linux/list.h>
8ed55c81
MS
21#include <linux/of.h>
22#include <linux/of_iommu.h>
23#include <linux/of_platform.h>
312900c6
MS
24#include <linux/platform_device.h>
25#include <linux/pm_runtime.h>
26#include <linux/slab.h>
58c6f6a3 27#include <linux/dma-iommu.h>
2a96536e 28
d09d78fc
CK
29typedef u32 sysmmu_iova_t;
30typedef u32 sysmmu_pte_t;
31
f171abab 32/* We do not consider super section mapping (16MB) */
2a96536e
KC
33#define SECT_ORDER 20
34#define LPAGE_ORDER 16
35#define SPAGE_ORDER 12
36
37#define SECT_SIZE (1 << SECT_ORDER)
38#define LPAGE_SIZE (1 << LPAGE_ORDER)
39#define SPAGE_SIZE (1 << SPAGE_ORDER)
40
41#define SECT_MASK (~(SECT_SIZE - 1))
42#define LPAGE_MASK (~(LPAGE_SIZE - 1))
43#define SPAGE_MASK (~(SPAGE_SIZE - 1))
44
66a7ed84
CK
45#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
46 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
47#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
48#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
49#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
50 ((*(sent) & 3) == 1))
2a96536e
KC
51#define lv1ent_section(sent) ((*(sent) & 3) == 2)
52
53#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
54#define lv2ent_small(pent) ((*(pent) & 2) == 2)
55#define lv2ent_large(pent) ((*(pent) & 3) == 1)
56
6ae5343c
BD
57#ifdef CONFIG_BIG_ENDIAN
58#warning "revisit driver if we can enable big-endian ptes"
59#endif
60
740a01ee
MS
61/*
62 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
63 * v5.0 introduced support for 36bit physical address space by shifting
64 * all page entry values by 4 bits.
65 * All SYSMMU controllers in the system support the address spaces of the same
66 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
67 * value (0 or 4).
68 */
69static short PG_ENT_SHIFT = -1;
70#define SYSMMU_PG_ENT_SHIFT 0
71#define SYSMMU_V5_PG_ENT_SHIFT 4
72
1a0d8dac
MS
73static const sysmmu_pte_t *LV1_PROT;
74static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
75 ((0 << 15) | (0 << 10)), /* no access */
76 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
77 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
78 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
79};
80static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
81 (0 << 4), /* no access */
82 (1 << 4), /* IOMMU_READ only */
83 (2 << 4), /* IOMMU_WRITE only */
84 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
85};
86
87static const sysmmu_pte_t *LV2_PROT;
88static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
89 ((0 << 9) | (0 << 4)), /* no access */
90 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
91 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
92 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
93};
94static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
95 (0 << 2), /* no access */
96 (1 << 2), /* IOMMU_READ only */
97 (2 << 2), /* IOMMU_WRITE only */
98 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
99};
100
101#define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
102
740a01ee
MS
103#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
104#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
105#define section_offs(iova) (iova & (SECT_SIZE - 1))
106#define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
107#define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
108#define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
109#define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
2a96536e
KC
110
111#define NUM_LV1ENTRIES 4096
d09d78fc 112#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
2a96536e 113
d09d78fc
CK
114static u32 lv1ent_offset(sysmmu_iova_t iova)
115{
116 return iova >> SECT_ORDER;
117}
118
119static u32 lv2ent_offset(sysmmu_iova_t iova)
120{
121 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
122}
123
5e3435eb 124#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
d09d78fc 125#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
2a96536e
KC
126
127#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
740a01ee 128#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
2a96536e 129
1a0d8dac 130#define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
740a01ee 131#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
1a0d8dac
MS
132#define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
133#define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
2a96536e
KC
134
135#define CTRL_ENABLE 0x5
136#define CTRL_BLOCK 0x7
137#define CTRL_DISABLE 0x0
138
eeb5184b 139#define CFG_LRU 0x1
1a0d8dac 140#define CFG_EAP (1 << 2)
eeb5184b 141#define CFG_QOS(n) ((n & 0xF) << 7)
eeb5184b
CK
142#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
143#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
144#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
145
740a01ee 146/* common registers */
2a96536e
KC
147#define REG_MMU_CTRL 0x000
148#define REG_MMU_CFG 0x004
149#define REG_MMU_STATUS 0x008
740a01ee
MS
150#define REG_MMU_VERSION 0x034
151
152#define MMU_MAJ_VER(val) ((val) >> 7)
153#define MMU_MIN_VER(val) ((val) & 0x7F)
154#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
155
156#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
157
158/* v1.x - v3.x registers */
2a96536e
KC
159#define REG_MMU_FLUSH 0x00C
160#define REG_MMU_FLUSH_ENTRY 0x010
161#define REG_PT_BASE_ADDR 0x014
162#define REG_INT_STATUS 0x018
163#define REG_INT_CLEAR 0x01C
164
165#define REG_PAGE_FAULT_ADDR 0x024
166#define REG_AW_FAULT_ADDR 0x028
167#define REG_AR_FAULT_ADDR 0x02C
168#define REG_DEFAULT_SLAVE_ADDR 0x030
169
740a01ee
MS
170/* v5.x registers */
171#define REG_V5_PT_BASE_PFN 0x00C
172#define REG_V5_MMU_FLUSH_ALL 0x010
173#define REG_V5_MMU_FLUSH_ENTRY 0x014
174#define REG_V5_INT_STATUS 0x060
175#define REG_V5_INT_CLEAR 0x064
176#define REG_V5_FAULT_AR_VA 0x070
177#define REG_V5_FAULT_AW_VA 0x080
2a96536e 178
6b21a5db
CK
179#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
180
5e3435eb 181static struct device *dma_dev;
734c3c73 182static struct kmem_cache *lv2table_kmem_cache;
66a7ed84
CK
183static sysmmu_pte_t *zero_lv2_table;
184#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
734c3c73 185
d09d78fc 186static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
2a96536e
KC
187{
188 return pgtable + lv1ent_offset(iova);
189}
190
d09d78fc 191static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
2a96536e 192{
d09d78fc 193 return (sysmmu_pte_t *)phys_to_virt(
7222e8db 194 lv2table_base(sent)) + lv2ent_offset(iova);
2a96536e
KC
195}
196
d093fc7e
MS
197/*
198 * IOMMU fault information register
199 */
200struct sysmmu_fault_info {
201 unsigned int bit; /* bit number in STATUS register */
202 unsigned short addr_reg; /* register to read VA fault address */
203 const char *name; /* human readable fault name */
204 unsigned int type; /* fault type for report_iommu_fault */
2a96536e
KC
205};
206
d093fc7e
MS
207static const struct sysmmu_fault_info sysmmu_faults[] = {
208 { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
209 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
210 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
211 { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
212 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
213 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
214 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
215 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
2a96536e
KC
216};
217
740a01ee
MS
218static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
219 { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
220 { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
221 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
222 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
223 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
224 { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
225 { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
226 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
227 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
228 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
229};
230
2860af3c
MS
231/*
232 * This structure is attached to dev.archdata.iommu of the master device
233 * on device add, contains a list of SYSMMU controllers defined by device tree,
234 * which are bound to given master device. It is usually referenced by 'owner'
235 * pointer.
236*/
6b21a5db 237struct exynos_iommu_owner {
1b092054 238 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
5fa61cbf 239 struct iommu_domain *domain; /* domain this device is attached */
9b265536 240 struct mutex rpm_lock; /* for runtime pm of all sysmmus */
6b21a5db
CK
241};
242
2860af3c
MS
243/*
244 * This structure exynos specific generalization of struct iommu_domain.
245 * It contains list of SYSMMU controllers from all master devices, which has
246 * been attached to this domain and page tables of IO address space defined by
247 * it. It is usually referenced by 'domain' pointer.
248 */
2a96536e 249struct exynos_iommu_domain {
2860af3c
MS
250 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
251 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
252 short *lv2entcnt; /* free lv2 entry counter for each section */
253 spinlock_t lock; /* lock for modyfying list of clients */
254 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
e1fd1eaa 255 struct iommu_domain domain; /* generic domain data structure */
2a96536e
KC
256};
257
2860af3c
MS
258/*
259 * This structure hold all data of a single SYSMMU controller, this includes
260 * hw resources like registers and clocks, pointers and list nodes to connect
261 * it to all other structures, internal state and parameters read from device
262 * tree. It is usually referenced by 'data' pointer.
263 */
2a96536e 264struct sysmmu_drvdata {
2860af3c
MS
265 struct device *sysmmu; /* SYSMMU controller device */
266 struct device *master; /* master device (owner) */
267 void __iomem *sfrbase; /* our registers */
268 struct clk *clk; /* SYSMMU's clock */
740a01ee
MS
269 struct clk *aclk; /* SYSMMU's aclk clock */
270 struct clk *pclk; /* SYSMMU's pclk clock */
2860af3c 271 struct clk *clk_master; /* master's device clock */
2860af3c 272 spinlock_t lock; /* lock for modyfying state */
47a574ff 273 bool active; /* current status */
2860af3c
MS
274 struct exynos_iommu_domain *domain; /* domain we belong to */
275 struct list_head domain_node; /* node for domain clients list */
1b092054 276 struct list_head owner_node; /* node for owner controllers list */
2860af3c
MS
277 phys_addr_t pgtable; /* assigned page table structure */
278 unsigned int version; /* our version */
d2c302b6
JR
279
280 struct iommu_device iommu; /* IOMMU core handle */
2a96536e
KC
281};
282
e1fd1eaa
JR
283static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
284{
285 return container_of(dom, struct exynos_iommu_domain, domain);
286}
287
02cdc365 288static void sysmmu_unblock(struct sysmmu_drvdata *data)
2a96536e 289{
84bd0428 290 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
2a96536e
KC
291}
292
02cdc365 293static bool sysmmu_block(struct sysmmu_drvdata *data)
2a96536e
KC
294{
295 int i = 120;
296
84bd0428
MS
297 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
298 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
2a96536e
KC
299 --i;
300
84bd0428 301 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
02cdc365 302 sysmmu_unblock(data);
2a96536e
KC
303 return false;
304 }
305
306 return true;
307}
308
02cdc365 309static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
2a96536e 310{
740a01ee 311 if (MMU_MAJ_VER(data->version) < 5)
84bd0428 312 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
740a01ee 313 else
84bd0428 314 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
2a96536e
KC
315}
316
02cdc365 317static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
d09d78fc 318 sysmmu_iova_t iova, unsigned int num_inv)
2a96536e 319{
3ad6b7f3 320 unsigned int i;
365409db 321
3ad6b7f3 322 for (i = 0; i < num_inv; i++) {
740a01ee 323 if (MMU_MAJ_VER(data->version) < 5)
84bd0428 324 writel((iova & SPAGE_MASK) | 1,
740a01ee
MS
325 data->sfrbase + REG_MMU_FLUSH_ENTRY);
326 else
84bd0428 327 writel((iova & SPAGE_MASK) | 1,
740a01ee 328 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
3ad6b7f3
CK
329 iova += SPAGE_SIZE;
330 }
2a96536e
KC
331}
332
02cdc365 333static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
2a96536e 334{
740a01ee 335 if (MMU_MAJ_VER(data->version) < 5)
84bd0428 336 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
740a01ee 337 else
84bd0428 338 writel(pgd >> PAGE_SHIFT,
740a01ee 339 data->sfrbase + REG_V5_PT_BASE_PFN);
2a96536e 340
02cdc365 341 __sysmmu_tlb_invalidate(data);
2a96536e
KC
342}
343
fecc49db
MS
344static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
345{
346 BUG_ON(clk_prepare_enable(data->clk_master));
347 BUG_ON(clk_prepare_enable(data->clk));
348 BUG_ON(clk_prepare_enable(data->pclk));
349 BUG_ON(clk_prepare_enable(data->aclk));
350}
351
352static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
353{
354 clk_disable_unprepare(data->aclk);
355 clk_disable_unprepare(data->pclk);
356 clk_disable_unprepare(data->clk);
357 clk_disable_unprepare(data->clk_master);
358}
359
850d313e
MS
360static void __sysmmu_get_version(struct sysmmu_drvdata *data)
361{
362 u32 ver;
363
fecc49db 364 __sysmmu_enable_clocks(data);
850d313e 365
84bd0428 366 ver = readl(data->sfrbase + REG_MMU_VERSION);
850d313e
MS
367
368 /* controllers on some SoCs don't report proper version */
369 if (ver == 0x80000001u)
370 data->version = MAKE_MMU_VER(1, 0);
371 else
372 data->version = MMU_RAW_VER(ver);
373
374 dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
375 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
376
fecc49db 377 __sysmmu_disable_clocks(data);
850d313e
MS
378}
379
d093fc7e
MS
380static void show_fault_information(struct sysmmu_drvdata *data,
381 const struct sysmmu_fault_info *finfo,
382 sysmmu_iova_t fault_addr)
2a96536e 383{
d09d78fc 384 sysmmu_pte_t *ent;
2a96536e 385
ec5d241b
MS
386 dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
387 dev_name(data->master), finfo->name, fault_addr);
388 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
d093fc7e 389 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
ec5d241b 390 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
2a96536e
KC
391 if (lv1ent_page(ent)) {
392 ent = page_entry(ent, fault_addr);
ec5d241b 393 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
2a96536e 394 }
2a96536e
KC
395}
396
397static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
398{
f171abab 399 /* SYSMMU is in blocked state when interrupt occurred. */
2a96536e 400 struct sysmmu_drvdata *data = dev_id;
740a01ee
MS
401 const struct sysmmu_fault_info *finfo;
402 unsigned int i, n, itype;
d093fc7e 403 sysmmu_iova_t fault_addr = -1;
740a01ee 404 unsigned short reg_status, reg_clear;
7222e8db 405 int ret = -ENOSYS;
2a96536e 406
47a574ff 407 WARN_ON(!data->active);
2a96536e 408
740a01ee
MS
409 if (MMU_MAJ_VER(data->version) < 5) {
410 reg_status = REG_INT_STATUS;
411 reg_clear = REG_INT_CLEAR;
412 finfo = sysmmu_faults;
413 n = ARRAY_SIZE(sysmmu_faults);
414 } else {
415 reg_status = REG_V5_INT_STATUS;
416 reg_clear = REG_V5_INT_CLEAR;
417 finfo = sysmmu_v5_faults;
418 n = ARRAY_SIZE(sysmmu_v5_faults);
419 }
420
9d4e7a24
CK
421 spin_lock(&data->lock);
422
b398af21 423 clk_enable(data->clk_master);
9d4e7a24 424
84bd0428 425 itype = __ffs(readl(data->sfrbase + reg_status));
d093fc7e
MS
426 for (i = 0; i < n; i++, finfo++)
427 if (finfo->bit == itype)
428 break;
429 /* unknown/unsupported fault */
430 BUG_ON(i == n);
431
432 /* print debug message */
84bd0428 433 fault_addr = readl(data->sfrbase + finfo->addr_reg);
d093fc7e 434 show_fault_information(data, finfo, fault_addr);
2a96536e 435
d093fc7e
MS
436 if (data->domain)
437 ret = report_iommu_fault(&data->domain->domain,
438 data->master, fault_addr, finfo->type);
1fab7fa7
CK
439 /* fault is not recovered by fault handler */
440 BUG_ON(ret != 0);
2a96536e 441
84bd0428 442 writel(1 << itype, data->sfrbase + reg_clear);
1fab7fa7 443
02cdc365 444 sysmmu_unblock(data);
2a96536e 445
b398af21 446 clk_disable(data->clk_master);
70605870 447
9d4e7a24 448 spin_unlock(&data->lock);
2a96536e
KC
449
450 return IRQ_HANDLED;
451}
452
47a574ff 453static void __sysmmu_disable(struct sysmmu_drvdata *data)
2a96536e 454{
47a574ff
MS
455 unsigned long flags;
456
b398af21 457 clk_enable(data->clk_master);
70605870 458
47a574ff 459 spin_lock_irqsave(&data->lock, flags);
84bd0428
MS
460 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
461 writel(0, data->sfrbase + REG_MMU_CFG);
47a574ff 462 data->active = false;
6b21a5db
CK
463 spin_unlock_irqrestore(&data->lock, flags);
464
47a574ff 465 __sysmmu_disable_clocks(data);
6b21a5db 466}
2a96536e 467
6b21a5db
CK
468static void __sysmmu_init_config(struct sysmmu_drvdata *data)
469{
83addecd
MS
470 unsigned int cfg;
471
83addecd
MS
472 if (data->version <= MAKE_MMU_VER(3, 1))
473 cfg = CFG_LRU | CFG_QOS(15);
474 else if (data->version <= MAKE_MMU_VER(3, 2))
475 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
476 else
477 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
6b21a5db 478
1a0d8dac
MS
479 cfg |= CFG_EAP; /* enable access protection bits check */
480
84bd0428 481 writel(cfg, data->sfrbase + REG_MMU_CFG);
6b21a5db
CK
482}
483
47a574ff 484static void __sysmmu_enable(struct sysmmu_drvdata *data)
6b21a5db 485{
47a574ff
MS
486 unsigned long flags;
487
fecc49db 488 __sysmmu_enable_clocks(data);
70605870 489
47a574ff 490 spin_lock_irqsave(&data->lock, flags);
84bd0428 491 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
6b21a5db 492 __sysmmu_init_config(data);
02cdc365 493 __sysmmu_set_ptbase(data, data->pgtable);
84bd0428 494 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
47a574ff
MS
495 data->active = true;
496 spin_unlock_irqrestore(&data->lock, flags);
7222e8db 497
fecc49db
MS
498 /*
499 * SYSMMU driver keeps master's clock enabled only for the short
500 * time, while accessing the registers. For performing address
501 * translation during DMA transaction it relies on the client
502 * driver to enable it.
503 */
b398af21 504 clk_disable(data->clk_master);
6b21a5db 505}
70605870 506
469acebe 507static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
66a7ed84
CK
508 sysmmu_iova_t iova)
509{
510 unsigned long flags;
66a7ed84 511
66a7ed84 512 spin_lock_irqsave(&data->lock, flags);
47a574ff 513 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
01324ab2 514 clk_enable(data->clk_master);
7d2aa6b8
MS
515 if (sysmmu_block(data)) {
516 __sysmmu_tlb_invalidate_entry(data, iova, 1);
517 sysmmu_unblock(data);
518 }
01324ab2 519 clk_disable(data->clk_master);
d631ea98 520 }
66a7ed84 521 spin_unlock_irqrestore(&data->lock, flags);
66a7ed84
CK
522}
523
469acebe
MS
524static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
525 sysmmu_iova_t iova, size_t size)
2a96536e
KC
526{
527 unsigned long flags;
2a96536e 528
6b21a5db 529 spin_lock_irqsave(&data->lock, flags);
47a574ff 530 if (data->active) {
3ad6b7f3 531 unsigned int num_inv = 1;
70605870 532
b398af21 533 clk_enable(data->clk_master);
70605870 534
3ad6b7f3
CK
535 /*
536 * L2TLB invalidation required
537 * 4KB page: 1 invalidation
f171abab
SK
538 * 64KB page: 16 invalidations
539 * 1MB page: 64 invalidations
3ad6b7f3
CK
540 * because it is set-associative TLB
541 * with 8-way and 64 sets.
542 * 1MB page can be cached in one of all sets.
543 * 64KB page can be one of 16 consecutive sets.
544 */
512bd0c6 545 if (MMU_MAJ_VER(data->version) == 2)
3ad6b7f3
CK
546 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
547
02cdc365
MS
548 if (sysmmu_block(data)) {
549 __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
550 sysmmu_unblock(data);
2a96536e 551 }
b398af21 552 clk_disable(data->clk_master);
2a96536e 553 }
9d4e7a24 554 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
555}
556
96f66557
MS
557static struct iommu_ops exynos_iommu_ops;
558
6b21a5db 559static int __init exynos_sysmmu_probe(struct platform_device *pdev)
2a96536e 560{
46c16d1e 561 int irq, ret;
7222e8db 562 struct device *dev = &pdev->dev;
2a96536e 563 struct sysmmu_drvdata *data;
7222e8db 564 struct resource *res;
2a96536e 565
46c16d1e
CK
566 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
567 if (!data)
568 return -ENOMEM;
2a96536e 569
7222e8db 570 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
46c16d1e
CK
571 data->sfrbase = devm_ioremap_resource(dev, res);
572 if (IS_ERR(data->sfrbase))
573 return PTR_ERR(data->sfrbase);
2a96536e 574
46c16d1e
CK
575 irq = platform_get_irq(pdev, 0);
576 if (irq <= 0) {
0bf4e54d 577 dev_err(dev, "Unable to find IRQ resource\n");
46c16d1e 578 return irq;
2a96536e
KC
579 }
580
46c16d1e 581 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
7222e8db
CK
582 dev_name(dev), data);
583 if (ret) {
46c16d1e
CK
584 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
585 return ret;
2a96536e
KC
586 }
587
46c16d1e 588 data->clk = devm_clk_get(dev, "sysmmu");
0c2b063f 589 if (PTR_ERR(data->clk) == -ENOENT)
740a01ee 590 data->clk = NULL;
0c2b063f
MS
591 else if (IS_ERR(data->clk))
592 return PTR_ERR(data->clk);
740a01ee
MS
593
594 data->aclk = devm_clk_get(dev, "aclk");
0c2b063f 595 if (PTR_ERR(data->aclk) == -ENOENT)
740a01ee 596 data->aclk = NULL;
0c2b063f
MS
597 else if (IS_ERR(data->aclk))
598 return PTR_ERR(data->aclk);
740a01ee
MS
599
600 data->pclk = devm_clk_get(dev, "pclk");
0c2b063f 601 if (PTR_ERR(data->pclk) == -ENOENT)
740a01ee 602 data->pclk = NULL;
0c2b063f
MS
603 else if (IS_ERR(data->pclk))
604 return PTR_ERR(data->pclk);
740a01ee
MS
605
606 if (!data->clk && (!data->aclk || !data->pclk)) {
607 dev_err(dev, "Failed to get device clock(s)!\n");
608 return -ENOSYS;
2a96536e
KC
609 }
610
70605870 611 data->clk_master = devm_clk_get(dev, "master");
0c2b063f 612 if (PTR_ERR(data->clk_master) == -ENOENT)
b398af21 613 data->clk_master = NULL;
0c2b063f
MS
614 else if (IS_ERR(data->clk_master))
615 return PTR_ERR(data->clk_master);
70605870 616
2a96536e 617 data->sysmmu = dev;
9d4e7a24 618 spin_lock_init(&data->lock);
2a96536e 619
d2c302b6
JR
620 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
621 dev_name(data->sysmmu));
622 if (ret)
623 return ret;
624
625 iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
626 iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
627
628 ret = iommu_device_register(&data->iommu);
629 if (ret)
630 return ret;
631
7222e8db
CK
632 platform_set_drvdata(pdev, data);
633
850d313e 634 __sysmmu_get_version(data);
740a01ee 635 if (PG_ENT_SHIFT < 0) {
1a0d8dac 636 if (MMU_MAJ_VER(data->version) < 5) {
740a01ee 637 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
1a0d8dac
MS
638 LV1_PROT = SYSMMU_LV1_PROT;
639 LV2_PROT = SYSMMU_LV2_PROT;
640 } else {
740a01ee 641 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
1a0d8dac
MS
642 LV1_PROT = SYSMMU_V5_LV1_PROT;
643 LV2_PROT = SYSMMU_V5_LV2_PROT;
644 }
740a01ee
MS
645 }
646
f4723ec1 647 pm_runtime_enable(dev);
2a96536e 648
2a96536e 649 return 0;
2a96536e
KC
650}
651
9b265536 652static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
622015e4
MS
653{
654 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
47a574ff 655 struct device *master = data->master;
622015e4 656
47a574ff 657 if (master) {
9b265536
MS
658 struct exynos_iommu_owner *owner = master->archdata.iommu;
659
660 mutex_lock(&owner->rpm_lock);
92798b45
MS
661 if (data->domain) {
662 dev_dbg(data->sysmmu, "saving state\n");
663 __sysmmu_disable(data);
664 }
9b265536 665 mutex_unlock(&owner->rpm_lock);
622015e4
MS
666 }
667 return 0;
668}
669
9b265536 670static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
622015e4
MS
671{
672 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
47a574ff 673 struct device *master = data->master;
622015e4 674
47a574ff 675 if (master) {
9b265536
MS
676 struct exynos_iommu_owner *owner = master->archdata.iommu;
677
678 mutex_lock(&owner->rpm_lock);
92798b45
MS
679 if (data->domain) {
680 dev_dbg(data->sysmmu, "restoring state\n");
681 __sysmmu_enable(data);
682 }
9b265536 683 mutex_unlock(&owner->rpm_lock);
622015e4
MS
684 }
685 return 0;
686}
622015e4
MS
687
688static const struct dev_pm_ops sysmmu_pm_ops = {
9b265536 689 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
2f5f44f2
MS
690 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
691 pm_runtime_force_resume)
622015e4
MS
692};
693
6b21a5db
CK
694static const struct of_device_id sysmmu_of_match[] __initconst = {
695 { .compatible = "samsung,exynos-sysmmu", },
696 { },
697};
698
699static struct platform_driver exynos_sysmmu_driver __refdata = {
700 .probe = exynos_sysmmu_probe,
701 .driver = {
2a96536e 702 .name = "exynos-sysmmu",
6b21a5db 703 .of_match_table = sysmmu_of_match,
622015e4 704 .pm = &sysmmu_pm_ops,
b54b874f 705 .suppress_bind_attrs = true,
2a96536e
KC
706 }
707};
708
5e3435eb 709static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
2a96536e 710{
5e3435eb
MS
711 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
712 DMA_TO_DEVICE);
6ae5343c 713 *ent = cpu_to_le32(val);
5e3435eb
MS
714 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
715 DMA_TO_DEVICE);
2a96536e
KC
716}
717
e1fd1eaa 718static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
2a96536e 719{
bfa00489 720 struct exynos_iommu_domain *domain;
5e3435eb 721 dma_addr_t handle;
66a7ed84 722 int i;
2a96536e 723
740a01ee
MS
724 /* Check if correct PTE offsets are initialized */
725 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
e1fd1eaa 726
bfa00489
MS
727 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
728 if (!domain)
e1fd1eaa 729 return NULL;
2a96536e 730
58c6f6a3
MS
731 if (type == IOMMU_DOMAIN_DMA) {
732 if (iommu_get_dma_cookie(&domain->domain) != 0)
733 goto err_pgtable;
734 } else if (type != IOMMU_DOMAIN_UNMANAGED) {
735 goto err_pgtable;
736 }
737
bfa00489
MS
738 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
739 if (!domain->pgtable)
58c6f6a3 740 goto err_dma_cookie;
2a96536e 741
bfa00489
MS
742 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
743 if (!domain->lv2entcnt)
2a96536e
KC
744 goto err_counter;
745
f171abab 746 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
66a7ed84 747 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
bfa00489
MS
748 domain->pgtable[i + 0] = ZERO_LV2LINK;
749 domain->pgtable[i + 1] = ZERO_LV2LINK;
750 domain->pgtable[i + 2] = ZERO_LV2LINK;
751 domain->pgtable[i + 3] = ZERO_LV2LINK;
752 domain->pgtable[i + 4] = ZERO_LV2LINK;
753 domain->pgtable[i + 5] = ZERO_LV2LINK;
754 domain->pgtable[i + 6] = ZERO_LV2LINK;
755 domain->pgtable[i + 7] = ZERO_LV2LINK;
66a7ed84
CK
756 }
757
5e3435eb
MS
758 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
759 DMA_TO_DEVICE);
760 /* For mapping page table entries we rely on dma == phys */
761 BUG_ON(handle != virt_to_phys(domain->pgtable));
0d6d3da4
MS
762 if (dma_mapping_error(dma_dev, handle))
763 goto err_lv2ent;
2a96536e 764
bfa00489
MS
765 spin_lock_init(&domain->lock);
766 spin_lock_init(&domain->pgtablelock);
767 INIT_LIST_HEAD(&domain->clients);
2a96536e 768
bfa00489
MS
769 domain->domain.geometry.aperture_start = 0;
770 domain->domain.geometry.aperture_end = ~0UL;
771 domain->domain.geometry.force_aperture = true;
3177bb76 772
bfa00489 773 return &domain->domain;
2a96536e 774
0d6d3da4
MS
775err_lv2ent:
776 free_pages((unsigned long)domain->lv2entcnt, 1);
2a96536e 777err_counter:
bfa00489 778 free_pages((unsigned long)domain->pgtable, 2);
58c6f6a3
MS
779err_dma_cookie:
780 if (type == IOMMU_DOMAIN_DMA)
781 iommu_put_dma_cookie(&domain->domain);
2a96536e 782err_pgtable:
bfa00489 783 kfree(domain);
e1fd1eaa 784 return NULL;
2a96536e
KC
785}
786
bfa00489 787static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
2a96536e 788{
bfa00489 789 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
469acebe 790 struct sysmmu_drvdata *data, *next;
2a96536e
KC
791 unsigned long flags;
792 int i;
793
bfa00489 794 WARN_ON(!list_empty(&domain->clients));
2a96536e 795
bfa00489 796 spin_lock_irqsave(&domain->lock, flags);
2a96536e 797
bfa00489 798 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
e1172300 799 spin_lock(&data->lock);
b0d4c861 800 __sysmmu_disable(data);
47a574ff
MS
801 data->pgtable = 0;
802 data->domain = NULL;
469acebe 803 list_del_init(&data->domain_node);
e1172300 804 spin_unlock(&data->lock);
2a96536e
KC
805 }
806
bfa00489 807 spin_unlock_irqrestore(&domain->lock, flags);
2a96536e 808
58c6f6a3
MS
809 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
810 iommu_put_dma_cookie(iommu_domain);
811
5e3435eb
MS
812 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
813 DMA_TO_DEVICE);
814
2a96536e 815 for (i = 0; i < NUM_LV1ENTRIES; i++)
5e3435eb
MS
816 if (lv1ent_page(domain->pgtable + i)) {
817 phys_addr_t base = lv2table_base(domain->pgtable + i);
818
819 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
820 DMA_TO_DEVICE);
734c3c73 821 kmem_cache_free(lv2table_kmem_cache,
5e3435eb
MS
822 phys_to_virt(base));
823 }
2a96536e 824
bfa00489
MS
825 free_pages((unsigned long)domain->pgtable, 2);
826 free_pages((unsigned long)domain->lv2entcnt, 1);
827 kfree(domain);
2a96536e
KC
828}
829
5fa61cbf
MS
830static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
831 struct device *dev)
832{
833 struct exynos_iommu_owner *owner = dev->archdata.iommu;
834 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
835 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
836 struct sysmmu_drvdata *data, *next;
837 unsigned long flags;
5fa61cbf
MS
838
839 if (!has_sysmmu(dev) || owner->domain != iommu_domain)
840 return;
841
9b265536
MS
842 mutex_lock(&owner->rpm_lock);
843
844 list_for_each_entry(data, &owner->controllers, owner_node) {
845 pm_runtime_get_noresume(data->sysmmu);
846 if (pm_runtime_active(data->sysmmu))
847 __sysmmu_disable(data);
e1172300
MS
848 pm_runtime_put(data->sysmmu);
849 }
850
5fa61cbf
MS
851 spin_lock_irqsave(&domain->lock, flags);
852 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
e1172300 853 spin_lock(&data->lock);
47a574ff
MS
854 data->pgtable = 0;
855 data->domain = NULL;
b0d4c861 856 list_del_init(&data->domain_node);
e1172300 857 spin_unlock(&data->lock);
5fa61cbf 858 }
e1172300 859 owner->domain = NULL;
5fa61cbf
MS
860 spin_unlock_irqrestore(&domain->lock, flags);
861
9b265536 862 mutex_unlock(&owner->rpm_lock);
5fa61cbf 863
b0d4c861
MS
864 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
865 &pagetable);
5fa61cbf
MS
866}
867
bfa00489 868static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
2a96536e
KC
869 struct device *dev)
870{
6b21a5db 871 struct exynos_iommu_owner *owner = dev->archdata.iommu;
bfa00489 872 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
469acebe 873 struct sysmmu_drvdata *data;
bfa00489 874 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
2a96536e 875 unsigned long flags;
2a96536e 876
469acebe
MS
877 if (!has_sysmmu(dev))
878 return -ENODEV;
2a96536e 879
5fa61cbf
MS
880 if (owner->domain)
881 exynos_iommu_detach_device(owner->domain, dev);
882
9b265536
MS
883 mutex_lock(&owner->rpm_lock);
884
e1172300 885 spin_lock_irqsave(&domain->lock, flags);
1b092054 886 list_for_each_entry(data, &owner->controllers, owner_node) {
e1172300 887 spin_lock(&data->lock);
47a574ff
MS
888 data->pgtable = pagetable;
889 data->domain = domain;
e1172300
MS
890 list_add_tail(&data->domain_node, &domain->clients);
891 spin_unlock(&data->lock);
892 }
893 owner->domain = iommu_domain;
894 spin_unlock_irqrestore(&domain->lock, flags);
895
9b265536
MS
896 list_for_each_entry(data, &owner->controllers, owner_node) {
897 pm_runtime_get_noresume(data->sysmmu);
898 if (pm_runtime_active(data->sysmmu))
899 __sysmmu_enable(data);
900 pm_runtime_put(data->sysmmu);
901 }
902
903 mutex_unlock(&owner->rpm_lock);
904
b0d4c861
MS
905 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
906 &pagetable);
7222e8db 907
b0d4c861 908 return 0;
2a96536e
KC
909}
910
bfa00489 911static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
66a7ed84 912 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
2a96536e 913{
61128f08 914 if (lv1ent_section(sent)) {
d09d78fc 915 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
61128f08
CK
916 return ERR_PTR(-EADDRINUSE);
917 }
918
2a96536e 919 if (lv1ent_fault(sent)) {
0d6d3da4 920 dma_addr_t handle;
d09d78fc 921 sysmmu_pte_t *pent;
66a7ed84 922 bool need_flush_flpd_cache = lv1ent_zero(sent);
2a96536e 923
734c3c73 924 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
dbf6c6ef 925 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
2a96536e 926 if (!pent)
61128f08 927 return ERR_PTR(-ENOMEM);
2a96536e 928
5e3435eb 929 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
dc3814f4 930 kmemleak_ignore(pent);
2a96536e 931 *pgcounter = NUM_LV2ENTRIES;
0d6d3da4
MS
932 handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
933 DMA_TO_DEVICE);
934 if (dma_mapping_error(dma_dev, handle)) {
935 kmem_cache_free(lv2table_kmem_cache, pent);
936 return ERR_PTR(-EADDRINUSE);
937 }
66a7ed84
CK
938
939 /*
f171abab
SK
940 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
941 * FLPD cache may cache the address of zero_l2_table. This
942 * function replaces the zero_l2_table with new L2 page table
943 * to write valid mappings.
66a7ed84 944 * Accessing the valid area may cause page fault since FLPD
f171abab
SK
945 * cache may still cache zero_l2_table for the valid area
946 * instead of new L2 page table that has the mapping
947 * information of the valid area.
66a7ed84
CK
948 * Thus any replacement of zero_l2_table with other valid L2
949 * page table must involve FLPD cache invalidation for System
950 * MMU v3.3.
951 * FLPD cache invalidation is performed with TLB invalidation
952 * by VPN without blocking. It is safe to invalidate TLB without
953 * blocking because the target address of TLB invalidation is
954 * not currently mapped.
955 */
956 if (need_flush_flpd_cache) {
469acebe 957 struct sysmmu_drvdata *data;
365409db 958
bfa00489
MS
959 spin_lock(&domain->lock);
960 list_for_each_entry(data, &domain->clients, domain_node)
469acebe 961 sysmmu_tlb_invalidate_flpdcache(data, iova);
bfa00489 962 spin_unlock(&domain->lock);
66a7ed84 963 }
2a96536e
KC
964 }
965
966 return page_entry(sent, iova);
967}
968
bfa00489 969static int lv1set_section(struct exynos_iommu_domain *domain,
66a7ed84 970 sysmmu_pte_t *sent, sysmmu_iova_t iova,
1a0d8dac 971 phys_addr_t paddr, int prot, short *pgcnt)
2a96536e 972{
61128f08 973 if (lv1ent_section(sent)) {
d09d78fc 974 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
61128f08 975 iova);
2a96536e 976 return -EADDRINUSE;
61128f08 977 }
2a96536e
KC
978
979 if (lv1ent_page(sent)) {
61128f08 980 if (*pgcnt != NUM_LV2ENTRIES) {
d09d78fc 981 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
61128f08 982 iova);
2a96536e 983 return -EADDRINUSE;
61128f08 984 }
2a96536e 985
734c3c73 986 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
2a96536e
KC
987 *pgcnt = 0;
988 }
989
1a0d8dac 990 update_pte(sent, mk_lv1ent_sect(paddr, prot));
2a96536e 991
bfa00489 992 spin_lock(&domain->lock);
66a7ed84 993 if (lv1ent_page_zero(sent)) {
469acebe 994 struct sysmmu_drvdata *data;
66a7ed84
CK
995 /*
996 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
997 * entry by speculative prefetch of SLPD which has no mapping.
998 */
bfa00489 999 list_for_each_entry(data, &domain->clients, domain_node)
469acebe 1000 sysmmu_tlb_invalidate_flpdcache(data, iova);
66a7ed84 1001 }
bfa00489 1002 spin_unlock(&domain->lock);
66a7ed84 1003
2a96536e
KC
1004 return 0;
1005}
1006
d09d78fc 1007static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
1a0d8dac 1008 int prot, short *pgcnt)
2a96536e
KC
1009{
1010 if (size == SPAGE_SIZE) {
0bf4e54d 1011 if (WARN_ON(!lv2ent_fault(pent)))
2a96536e
KC
1012 return -EADDRINUSE;
1013
1a0d8dac 1014 update_pte(pent, mk_lv2ent_spage(paddr, prot));
2a96536e
KC
1015 *pgcnt -= 1;
1016 } else { /* size == LPAGE_SIZE */
1017 int i;
5e3435eb 1018 dma_addr_t pent_base = virt_to_phys(pent);
365409db 1019
5e3435eb
MS
1020 dma_sync_single_for_cpu(dma_dev, pent_base,
1021 sizeof(*pent) * SPAGES_PER_LPAGE,
1022 DMA_TO_DEVICE);
2a96536e 1023 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
0bf4e54d 1024 if (WARN_ON(!lv2ent_fault(pent))) {
61128f08
CK
1025 if (i > 0)
1026 memset(pent - i, 0, sizeof(*pent) * i);
2a96536e
KC
1027 return -EADDRINUSE;
1028 }
1029
1a0d8dac 1030 *pent = mk_lv2ent_lpage(paddr, prot);
2a96536e 1031 }
5e3435eb
MS
1032 dma_sync_single_for_device(dma_dev, pent_base,
1033 sizeof(*pent) * SPAGES_PER_LPAGE,
1034 DMA_TO_DEVICE);
2a96536e
KC
1035 *pgcnt -= SPAGES_PER_LPAGE;
1036 }
1037
1038 return 0;
1039}
1040
66a7ed84
CK
1041/*
1042 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1043 *
f171abab 1044 * System MMU v3.x has advanced logic to improve address translation
66a7ed84 1045 * performance with caching more page table entries by a page table walk.
f171abab
SK
1046 * However, the logic has a bug that while caching faulty page table entries,
1047 * System MMU reports page fault if the cached fault entry is hit even though
1048 * the fault entry is updated to a valid entry after the entry is cached.
1049 * To prevent caching faulty page table entries which may be updated to valid
1050 * entries later, the virtual memory manager should care about the workaround
1051 * for the problem. The following describes the workaround.
66a7ed84
CK
1052 *
1053 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
f171abab 1054 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
66a7ed84 1055 *
f171abab 1056 * Precisely, any start address of I/O virtual region must be aligned with
66a7ed84
CK
1057 * the following sizes for System MMU v3.1 and v3.2.
1058 * System MMU v3.1: 128KiB
1059 * System MMU v3.2: 256KiB
1060 *
1061 * Because System MMU v3.3 caches page table entries more aggressively, it needs
f171abab
SK
1062 * more workarounds.
1063 * - Any two consecutive I/O virtual regions must have a hole of size larger
1064 * than or equal to 128KiB.
66a7ed84
CK
1065 * - Start address of an I/O virtual region must be aligned by 128KiB.
1066 */
bfa00489
MS
1067static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1068 unsigned long l_iova, phys_addr_t paddr, size_t size,
1069 int prot)
2a96536e 1070{
bfa00489 1071 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
d09d78fc
CK
1072 sysmmu_pte_t *entry;
1073 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
2a96536e
KC
1074 unsigned long flags;
1075 int ret = -ENOMEM;
1076
bfa00489 1077 BUG_ON(domain->pgtable == NULL);
1a0d8dac 1078 prot &= SYSMMU_SUPPORTED_PROT_BITS;
2a96536e 1079
bfa00489 1080 spin_lock_irqsave(&domain->pgtablelock, flags);
2a96536e 1081
bfa00489 1082 entry = section_entry(domain->pgtable, iova);
2a96536e
KC
1083
1084 if (size == SECT_SIZE) {
1a0d8dac 1085 ret = lv1set_section(domain, entry, iova, paddr, prot,
bfa00489 1086 &domain->lv2entcnt[lv1ent_offset(iova)]);
2a96536e 1087 } else {
d09d78fc 1088 sysmmu_pte_t *pent;
2a96536e 1089
bfa00489
MS
1090 pent = alloc_lv2entry(domain, entry, iova,
1091 &domain->lv2entcnt[lv1ent_offset(iova)]);
2a96536e 1092
61128f08
CK
1093 if (IS_ERR(pent))
1094 ret = PTR_ERR(pent);
2a96536e 1095 else
1a0d8dac 1096 ret = lv2set_page(pent, paddr, size, prot,
bfa00489 1097 &domain->lv2entcnt[lv1ent_offset(iova)]);
2a96536e
KC
1098 }
1099
61128f08 1100 if (ret)
0bf4e54d
CK
1101 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1102 __func__, ret, size, iova);
2a96536e 1103
bfa00489 1104 spin_unlock_irqrestore(&domain->pgtablelock, flags);
2a96536e
KC
1105
1106 return ret;
1107}
1108
bfa00489
MS
1109static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1110 sysmmu_iova_t iova, size_t size)
66a7ed84 1111{
469acebe 1112 struct sysmmu_drvdata *data;
66a7ed84
CK
1113 unsigned long flags;
1114
bfa00489 1115 spin_lock_irqsave(&domain->lock, flags);
66a7ed84 1116
bfa00489 1117 list_for_each_entry(data, &domain->clients, domain_node)
469acebe 1118 sysmmu_tlb_invalidate_entry(data, iova, size);
66a7ed84 1119
bfa00489 1120 spin_unlock_irqrestore(&domain->lock, flags);
66a7ed84
CK
1121}
1122
bfa00489
MS
1123static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1124 unsigned long l_iova, size_t size)
2a96536e 1125{
bfa00489 1126 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
d09d78fc
CK
1127 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1128 sysmmu_pte_t *ent;
61128f08 1129 size_t err_pgsize;
d09d78fc 1130 unsigned long flags;
2a96536e 1131
bfa00489 1132 BUG_ON(domain->pgtable == NULL);
2a96536e 1133
bfa00489 1134 spin_lock_irqsave(&domain->pgtablelock, flags);
2a96536e 1135
bfa00489 1136 ent = section_entry(domain->pgtable, iova);
2a96536e
KC
1137
1138 if (lv1ent_section(ent)) {
0bf4e54d 1139 if (WARN_ON(size < SECT_SIZE)) {
61128f08
CK
1140 err_pgsize = SECT_SIZE;
1141 goto err;
1142 }
2a96536e 1143
f171abab 1144 /* workaround for h/w bug in System MMU v3.3 */
5e3435eb 1145 update_pte(ent, ZERO_LV2LINK);
2a96536e
KC
1146 size = SECT_SIZE;
1147 goto done;
1148 }
1149
1150 if (unlikely(lv1ent_fault(ent))) {
1151 if (size > SECT_SIZE)
1152 size = SECT_SIZE;
1153 goto done;
1154 }
1155
1156 /* lv1ent_page(sent) == true here */
1157
1158 ent = page_entry(ent, iova);
1159
1160 if (unlikely(lv2ent_fault(ent))) {
1161 size = SPAGE_SIZE;
1162 goto done;
1163 }
1164
1165 if (lv2ent_small(ent)) {
5e3435eb 1166 update_pte(ent, 0);
2a96536e 1167 size = SPAGE_SIZE;
bfa00489 1168 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
2a96536e
KC
1169 goto done;
1170 }
1171
1172 /* lv1ent_large(ent) == true here */
0bf4e54d 1173 if (WARN_ON(size < LPAGE_SIZE)) {
61128f08
CK
1174 err_pgsize = LPAGE_SIZE;
1175 goto err;
1176 }
2a96536e 1177
5e3435eb
MS
1178 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1179 sizeof(*ent) * SPAGES_PER_LPAGE,
1180 DMA_TO_DEVICE);
2a96536e 1181 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
5e3435eb
MS
1182 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1183 sizeof(*ent) * SPAGES_PER_LPAGE,
1184 DMA_TO_DEVICE);
2a96536e 1185 size = LPAGE_SIZE;
bfa00489 1186 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
2a96536e 1187done:
bfa00489 1188 spin_unlock_irqrestore(&domain->pgtablelock, flags);
2a96536e 1189
bfa00489 1190 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
2a96536e 1191
2a96536e 1192 return size;
61128f08 1193err:
bfa00489 1194 spin_unlock_irqrestore(&domain->pgtablelock, flags);
61128f08 1195
0bf4e54d
CK
1196 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1197 __func__, size, iova, err_pgsize);
61128f08
CK
1198
1199 return 0;
2a96536e
KC
1200}
1201
bfa00489 1202static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
bb5547ac 1203 dma_addr_t iova)
2a96536e 1204{
bfa00489 1205 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
d09d78fc 1206 sysmmu_pte_t *entry;
2a96536e
KC
1207 unsigned long flags;
1208 phys_addr_t phys = 0;
1209
bfa00489 1210 spin_lock_irqsave(&domain->pgtablelock, flags);
2a96536e 1211
bfa00489 1212 entry = section_entry(domain->pgtable, iova);
2a96536e
KC
1213
1214 if (lv1ent_section(entry)) {
1215 phys = section_phys(entry) + section_offs(iova);
1216 } else if (lv1ent_page(entry)) {
1217 entry = page_entry(entry, iova);
1218
1219 if (lv2ent_large(entry))
1220 phys = lpage_phys(entry) + lpage_offs(iova);
1221 else if (lv2ent_small(entry))
1222 phys = spage_phys(entry) + spage_offs(iova);
1223 }
1224
bfa00489 1225 spin_unlock_irqrestore(&domain->pgtablelock, flags);
2a96536e
KC
1226
1227 return phys;
1228}
1229
6c2ae7e2
MS
1230static struct iommu_group *get_device_iommu_group(struct device *dev)
1231{
1232 struct iommu_group *group;
1233
1234 group = iommu_group_get(dev);
1235 if (!group)
1236 group = iommu_group_alloc();
1237
1238 return group;
1239}
1240
bf4a1c92
AM
1241static int exynos_iommu_add_device(struct device *dev)
1242{
1243 struct iommu_group *group;
bf4a1c92 1244
06801db0
MS
1245 if (!has_sysmmu(dev))
1246 return -ENODEV;
1247
6c2ae7e2 1248 group = iommu_group_get_for_dev(dev);
bf4a1c92 1249
6c2ae7e2
MS
1250 if (IS_ERR(group))
1251 return PTR_ERR(group);
bf4a1c92 1252
bf4a1c92
AM
1253 iommu_group_put(group);
1254
6c2ae7e2 1255 return 0;
bf4a1c92
AM
1256}
1257
1258static void exynos_iommu_remove_device(struct device *dev)
1259{
fff2fd1a
MS
1260 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1261
06801db0
MS
1262 if (!has_sysmmu(dev))
1263 return;
1264
fff2fd1a
MS
1265 if (owner->domain) {
1266 struct iommu_group *group = iommu_group_get(dev);
1267
1268 if (group) {
1269 WARN_ON(owner->domain !=
1270 iommu_group_default_domain(group));
1271 exynos_iommu_detach_device(owner->domain, dev);
1272 iommu_group_put(group);
1273 }
1274 }
bf4a1c92
AM
1275 iommu_group_remove_device(dev);
1276}
1277
aa759fd3
MS
1278static int exynos_iommu_of_xlate(struct device *dev,
1279 struct of_phandle_args *spec)
1280{
1281 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1282 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
0bd5a0c7 1283 struct sysmmu_drvdata *data, *entry;
aa759fd3
MS
1284
1285 if (!sysmmu)
1286 return -ENODEV;
1287
1288 data = platform_get_drvdata(sysmmu);
1289 if (!data)
1290 return -ENODEV;
1291
1292 if (!owner) {
1293 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1294 if (!owner)
1295 return -ENOMEM;
1296
1297 INIT_LIST_HEAD(&owner->controllers);
9b265536 1298 mutex_init(&owner->rpm_lock);
aa759fd3
MS
1299 dev->archdata.iommu = owner;
1300 }
1301
0bd5a0c7
MS
1302 list_for_each_entry(entry, &owner->controllers, owner_node)
1303 if (entry == data)
1304 return 0;
1305
aa759fd3 1306 list_add_tail(&data->owner_node, &owner->controllers);
92798b45 1307 data->master = dev;
2f5f44f2
MS
1308
1309 /*
1310 * SYSMMU will be runtime activated via device link (dependency) to its
1311 * master device, so there are no direct calls to pm_runtime_get/put
1312 * in this driver.
1313 */
1314 device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME);
1315
aa759fd3
MS
1316 return 0;
1317}
1318
8ed55c81 1319static struct iommu_ops exynos_iommu_ops = {
e1fd1eaa
JR
1320 .domain_alloc = exynos_iommu_domain_alloc,
1321 .domain_free = exynos_iommu_domain_free,
ba5fa6f6
BH
1322 .attach_dev = exynos_iommu_attach_device,
1323 .detach_dev = exynos_iommu_detach_device,
1324 .map = exynos_iommu_map,
1325 .unmap = exynos_iommu_unmap,
315786eb 1326 .map_sg = default_iommu_map_sg,
ba5fa6f6 1327 .iova_to_phys = exynos_iommu_iova_to_phys,
6c2ae7e2 1328 .device_group = get_device_iommu_group,
ba5fa6f6
BH
1329 .add_device = exynos_iommu_add_device,
1330 .remove_device = exynos_iommu_remove_device,
2a96536e 1331 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
aa759fd3 1332 .of_xlate = exynos_iommu_of_xlate,
2a96536e
KC
1333};
1334
8ed55c81
MS
1335static bool init_done;
1336
2a96536e
KC
1337static int __init exynos_iommu_init(void)
1338{
1339 int ret;
1340
734c3c73
CK
1341 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1342 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1343 if (!lv2table_kmem_cache) {
1344 pr_err("%s: Failed to create kmem cache\n", __func__);
1345 return -ENOMEM;
1346 }
1347
2a96536e 1348 ret = platform_driver_register(&exynos_sysmmu_driver);
734c3c73
CK
1349 if (ret) {
1350 pr_err("%s: Failed to register driver\n", __func__);
1351 goto err_reg_driver;
1352 }
2a96536e 1353
66a7ed84
CK
1354 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1355 if (zero_lv2_table == NULL) {
1356 pr_err("%s: Failed to allocate zero level2 page table\n",
1357 __func__);
1358 ret = -ENOMEM;
1359 goto err_zero_lv2;
1360 }
1361
734c3c73
CK
1362 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1363 if (ret) {
1364 pr_err("%s: Failed to register exynos-iommu driver.\n",
1365 __func__);
1366 goto err_set_iommu;
1367 }
2a96536e 1368
8ed55c81
MS
1369 init_done = true;
1370
734c3c73
CK
1371 return 0;
1372err_set_iommu:
66a7ed84
CK
1373 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1374err_zero_lv2:
734c3c73
CK
1375 platform_driver_unregister(&exynos_sysmmu_driver);
1376err_reg_driver:
1377 kmem_cache_destroy(lv2table_kmem_cache);
2a96536e
KC
1378 return ret;
1379}
8ed55c81
MS
1380
1381static int __init exynos_iommu_of_setup(struct device_node *np)
1382{
1383 struct platform_device *pdev;
1384
1385 if (!init_done)
1386 exynos_iommu_init();
1387
1388 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
423595e8
AKC
1389 if (!pdev)
1390 return -ENODEV;
8ed55c81 1391
5e3435eb
MS
1392 /*
1393 * use the first registered sysmmu device for performing
1394 * dma mapping operations on iommu page tables (cpu cache flush)
1395 */
1396 if (!dma_dev)
1397 dma_dev = &pdev->dev;
1398
8ed55c81
MS
1399 return 0;
1400}
1401
1402IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
1403 exynos_iommu_of_setup);