iommu/arm-smmu: Keep track of S2CR state
[linux-block.git] / drivers / iommu / arm-smmu.c
CommitLineData
45ae7cff
WD
1/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
45ae7cff
WD
26 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
1f3d5ca4 31#include <linux/atomic.h>
45ae7cff 32#include <linux/delay.h>
9adb9594 33#include <linux/dma-iommu.h>
45ae7cff
WD
34#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
f9a05f05 38#include <linux/io-64-nonatomic-hi-lo.h>
45ae7cff 39#include <linux/iommu.h>
859a732e 40#include <linux/iopoll.h>
45ae7cff
WD
41#include <linux/module.h>
42#include <linux/of.h>
bae2c2d4 43#include <linux/of_address.h>
a9a1b0b5 44#include <linux/pci.h>
45ae7cff
WD
45#include <linux/platform_device.h>
46#include <linux/slab.h>
47#include <linux/spinlock.h>
48
49#include <linux/amba/bus.h>
50
518f7136 51#include "io-pgtable.h"
45ae7cff
WD
52
53/* Maximum number of stream IDs assigned to a single device */
cb6c27bb 54#define MAX_MASTER_STREAMIDS 128
45ae7cff
WD
55
56/* Maximum number of context banks per SMMU */
57#define ARM_SMMU_MAX_CBS 128
58
45ae7cff
WD
59/* SMMU global address space */
60#define ARM_SMMU_GR0(smmu) ((smmu)->base)
c757e852 61#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
45ae7cff 62
3a5df8ff
AH
63/*
64 * SMMU global address space with conditional offset to access secure
65 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
66 * nsGFSYNR0: 0x450)
67 */
68#define ARM_SMMU_GR0_NS(smmu) \
69 ((smmu)->base + \
70 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
71 ? 0x400 : 0))
72
f9a05f05
RM
73/*
74 * Some 64-bit registers only make sense to write atomically, but in such
75 * cases all the data relevant to AArch32 formats lies within the lower word,
76 * therefore this actually makes more sense than it might first appear.
77 */
668b4ada 78#ifdef CONFIG_64BIT
f9a05f05 79#define smmu_write_atomic_lq writeq_relaxed
668b4ada 80#else
f9a05f05 81#define smmu_write_atomic_lq writel_relaxed
668b4ada
TC
82#endif
83
45ae7cff
WD
84/* Configuration registers */
85#define ARM_SMMU_GR0_sCR0 0x0
86#define sCR0_CLIENTPD (1 << 0)
87#define sCR0_GFRE (1 << 1)
88#define sCR0_GFIE (1 << 2)
89#define sCR0_GCFGFRE (1 << 4)
90#define sCR0_GCFGFIE (1 << 5)
91#define sCR0_USFCFG (1 << 10)
92#define sCR0_VMIDPNE (1 << 11)
93#define sCR0_PTM (1 << 12)
94#define sCR0_FB (1 << 13)
4e3e9b69 95#define sCR0_VMID16EN (1 << 31)
45ae7cff
WD
96#define sCR0_BSU_SHIFT 14
97#define sCR0_BSU_MASK 0x3
98
3ca3712a
PF
99/* Auxiliary Configuration register */
100#define ARM_SMMU_GR0_sACR 0x10
101
45ae7cff
WD
102/* Identification registers */
103#define ARM_SMMU_GR0_ID0 0x20
104#define ARM_SMMU_GR0_ID1 0x24
105#define ARM_SMMU_GR0_ID2 0x28
106#define ARM_SMMU_GR0_ID3 0x2c
107#define ARM_SMMU_GR0_ID4 0x30
108#define ARM_SMMU_GR0_ID5 0x34
109#define ARM_SMMU_GR0_ID6 0x38
110#define ARM_SMMU_GR0_ID7 0x3c
111#define ARM_SMMU_GR0_sGFSR 0x48
112#define ARM_SMMU_GR0_sGFSYNR0 0x50
113#define ARM_SMMU_GR0_sGFSYNR1 0x54
114#define ARM_SMMU_GR0_sGFSYNR2 0x58
45ae7cff
WD
115
116#define ID0_S1TS (1 << 30)
117#define ID0_S2TS (1 << 29)
118#define ID0_NTS (1 << 28)
119#define ID0_SMS (1 << 27)
859a732e 120#define ID0_ATOSNS (1 << 26)
7602b871
RM
121#define ID0_PTFS_NO_AARCH32 (1 << 25)
122#define ID0_PTFS_NO_AARCH32S (1 << 24)
45ae7cff
WD
123#define ID0_CTTW (1 << 14)
124#define ID0_NUMIRPT_SHIFT 16
125#define ID0_NUMIRPT_MASK 0xff
3c8766d0
OH
126#define ID0_NUMSIDB_SHIFT 9
127#define ID0_NUMSIDB_MASK 0xf
45ae7cff
WD
128#define ID0_NUMSMRG_SHIFT 0
129#define ID0_NUMSMRG_MASK 0xff
130
131#define ID1_PAGESIZE (1 << 31)
132#define ID1_NUMPAGENDXB_SHIFT 28
133#define ID1_NUMPAGENDXB_MASK 7
134#define ID1_NUMS2CB_SHIFT 16
135#define ID1_NUMS2CB_MASK 0xff
136#define ID1_NUMCB_SHIFT 0
137#define ID1_NUMCB_MASK 0xff
138
139#define ID2_OAS_SHIFT 4
140#define ID2_OAS_MASK 0xf
141#define ID2_IAS_SHIFT 0
142#define ID2_IAS_MASK 0xf
143#define ID2_UBS_SHIFT 8
144#define ID2_UBS_MASK 0xf
145#define ID2_PTFS_4K (1 << 12)
146#define ID2_PTFS_16K (1 << 13)
147#define ID2_PTFS_64K (1 << 14)
4e3e9b69 148#define ID2_VMID16 (1 << 15)
45ae7cff 149
3ca3712a
PF
150#define ID7_MAJOR_SHIFT 4
151#define ID7_MAJOR_MASK 0xf
45ae7cff 152
45ae7cff 153/* Global TLB invalidation */
45ae7cff
WD
154#define ARM_SMMU_GR0_TLBIVMID 0x64
155#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
156#define ARM_SMMU_GR0_TLBIALLH 0x6c
157#define ARM_SMMU_GR0_sTLBGSYNC 0x70
158#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
159#define sTLBGSTATUS_GSACTIVE (1 << 0)
160#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
161
162/* Stream mapping registers */
163#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
164#define SMR_VALID (1 << 31)
165#define SMR_MASK_SHIFT 16
45ae7cff 166#define SMR_ID_SHIFT 0
45ae7cff
WD
167
168#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
169#define S2CR_CBNDX_SHIFT 0
170#define S2CR_CBNDX_MASK 0xff
171#define S2CR_TYPE_SHIFT 16
172#define S2CR_TYPE_MASK 0x3
8e8b203e
RM
173enum arm_smmu_s2cr_type {
174 S2CR_TYPE_TRANS,
175 S2CR_TYPE_BYPASS,
176 S2CR_TYPE_FAULT,
177};
45ae7cff 178
d346180e 179#define S2CR_PRIVCFG_SHIFT 24
8e8b203e
RM
180#define S2CR_PRIVCFG_MASK 0x3
181enum arm_smmu_s2cr_privcfg {
182 S2CR_PRIVCFG_DEFAULT,
183 S2CR_PRIVCFG_DIPAN,
184 S2CR_PRIVCFG_UNPRIV,
185 S2CR_PRIVCFG_PRIV,
186};
d346180e 187
45ae7cff
WD
188/* Context bank attribute registers */
189#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
190#define CBAR_VMID_SHIFT 0
191#define CBAR_VMID_MASK 0xff
57ca90f6
WD
192#define CBAR_S1_BPSHCFG_SHIFT 8
193#define CBAR_S1_BPSHCFG_MASK 3
194#define CBAR_S1_BPSHCFG_NSH 3
45ae7cff
WD
195#define CBAR_S1_MEMATTR_SHIFT 12
196#define CBAR_S1_MEMATTR_MASK 0xf
197#define CBAR_S1_MEMATTR_WB 0xf
198#define CBAR_TYPE_SHIFT 16
199#define CBAR_TYPE_MASK 0x3
200#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
201#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
202#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
203#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
204#define CBAR_IRPTNDX_SHIFT 24
205#define CBAR_IRPTNDX_MASK 0xff
206
207#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
208#define CBA2R_RW64_32BIT (0 << 0)
209#define CBA2R_RW64_64BIT (1 << 0)
4e3e9b69
TC
210#define CBA2R_VMID_SHIFT 16
211#define CBA2R_VMID_MASK 0xffff
45ae7cff
WD
212
213/* Translation context bank */
214#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
c757e852 215#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
45ae7cff
WD
216
217#define ARM_SMMU_CB_SCTLR 0x0
f0cfffc4 218#define ARM_SMMU_CB_ACTLR 0x4
45ae7cff
WD
219#define ARM_SMMU_CB_RESUME 0x8
220#define ARM_SMMU_CB_TTBCR2 0x10
668b4ada
TC
221#define ARM_SMMU_CB_TTBR0 0x20
222#define ARM_SMMU_CB_TTBR1 0x28
45ae7cff 223#define ARM_SMMU_CB_TTBCR 0x30
6070529b 224#define ARM_SMMU_CB_CONTEXTIDR 0x34
45ae7cff 225#define ARM_SMMU_CB_S1_MAIR0 0x38
518f7136 226#define ARM_SMMU_CB_S1_MAIR1 0x3c
f9a05f05 227#define ARM_SMMU_CB_PAR 0x50
45ae7cff 228#define ARM_SMMU_CB_FSR 0x58
f9a05f05 229#define ARM_SMMU_CB_FAR 0x60
45ae7cff 230#define ARM_SMMU_CB_FSYNR0 0x68
518f7136 231#define ARM_SMMU_CB_S1_TLBIVA 0x600
1463fe44 232#define ARM_SMMU_CB_S1_TLBIASID 0x610
518f7136
WD
233#define ARM_SMMU_CB_S1_TLBIVAL 0x620
234#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
235#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
661d962f 236#define ARM_SMMU_CB_ATS1PR 0x800
859a732e 237#define ARM_SMMU_CB_ATSR 0x8f0
45ae7cff
WD
238
239#define SCTLR_S1_ASIDPNE (1 << 12)
240#define SCTLR_CFCFG (1 << 7)
241#define SCTLR_CFIE (1 << 6)
242#define SCTLR_CFRE (1 << 5)
243#define SCTLR_E (1 << 4)
244#define SCTLR_AFE (1 << 2)
245#define SCTLR_TRE (1 << 1)
246#define SCTLR_M (1 << 0)
45ae7cff 247
f0cfffc4
RM
248#define ARM_MMU500_ACTLR_CPRE (1 << 1)
249
3ca3712a
PF
250#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
251
859a732e
MH
252#define CB_PAR_F (1 << 0)
253
254#define ATSR_ACTIVE (1 << 0)
255
45ae7cff
WD
256#define RESUME_RETRY (0 << 0)
257#define RESUME_TERMINATE (1 << 0)
258
45ae7cff 259#define TTBCR2_SEP_SHIFT 15
5dc5616e 260#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
45ae7cff 261
668b4ada 262#define TTBRn_ASID_SHIFT 48
45ae7cff
WD
263
264#define FSR_MULTI (1 << 31)
265#define FSR_SS (1 << 30)
266#define FSR_UUT (1 << 8)
267#define FSR_ASF (1 << 7)
268#define FSR_TLBLKF (1 << 6)
269#define FSR_TLBMCF (1 << 5)
270#define FSR_EF (1 << 4)
271#define FSR_PF (1 << 3)
272#define FSR_AFF (1 << 2)
273#define FSR_TF (1 << 1)
274
2907320d
MH
275#define FSR_IGN (FSR_AFF | FSR_ASF | \
276 FSR_TLBMCF | FSR_TLBLKF)
277#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
adaba320 278 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
45ae7cff
WD
279
280#define FSYNR0_WNR (1 << 4)
281
4cf740b0 282static int force_stage;
25a1c96c 283module_param(force_stage, int, S_IRUGO);
4cf740b0
WD
284MODULE_PARM_DESC(force_stage,
285 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
25a1c96c
RM
286static bool disable_bypass;
287module_param(disable_bypass, bool, S_IRUGO);
288MODULE_PARM_DESC(disable_bypass,
289 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
4cf740b0 290
09360403 291enum arm_smmu_arch_version {
b7862e35
RM
292 ARM_SMMU_V1,
293 ARM_SMMU_V1_64K,
09360403
RM
294 ARM_SMMU_V2,
295};
296
67b65a3f
RM
297enum arm_smmu_implementation {
298 GENERIC_SMMU,
f0cfffc4 299 ARM_MMU500,
e086d912 300 CAVIUM_SMMUV2,
67b65a3f
RM
301};
302
8e8b203e
RM
303struct arm_smmu_s2cr {
304 enum arm_smmu_s2cr_type type;
305 enum arm_smmu_s2cr_privcfg privcfg;
306 u8 cbndx;
307};
308
309#define s2cr_init_val (struct arm_smmu_s2cr){ \
310 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
311}
312
45ae7cff 313struct arm_smmu_smr {
45ae7cff
WD
314 u16 mask;
315 u16 id;
1f3d5ca4 316 bool valid;
45ae7cff
WD
317};
318
a9a1b0b5 319struct arm_smmu_master_cfg {
45ae7cff
WD
320 int num_streamids;
321 u16 streamids[MAX_MASTER_STREAMIDS];
1f3d5ca4 322 s16 smendx[MAX_MASTER_STREAMIDS];
45ae7cff 323};
1f3d5ca4 324#define INVALID_SMENDX -1
45ae7cff 325
a9a1b0b5
WD
326struct arm_smmu_master {
327 struct device_node *of_node;
a9a1b0b5
WD
328 struct rb_node node;
329 struct arm_smmu_master_cfg cfg;
330};
331
45ae7cff
WD
332struct arm_smmu_device {
333 struct device *dev;
45ae7cff
WD
334
335 void __iomem *base;
336 unsigned long size;
c757e852 337 unsigned long pgshift;
45ae7cff
WD
338
339#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
340#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
341#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
342#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
343#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
859a732e 344#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
4e3e9b69 345#define ARM_SMMU_FEAT_VMID16 (1 << 6)
7602b871
RM
346#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
347#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
348#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
349#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
350#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
45ae7cff 351 u32 features;
3a5df8ff
AH
352
353#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
354 u32 options;
09360403 355 enum arm_smmu_arch_version version;
67b65a3f 356 enum arm_smmu_implementation model;
45ae7cff
WD
357
358 u32 num_context_banks;
359 u32 num_s2_context_banks;
360 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
361 atomic_t irptndx;
362
363 u32 num_mapping_groups;
21174240
RM
364 u16 streamid_mask;
365 u16 smr_mask_mask;
1f3d5ca4 366 struct arm_smmu_smr *smrs;
8e8b203e 367 struct arm_smmu_s2cr *s2crs;
45ae7cff 368
518f7136
WD
369 unsigned long va_size;
370 unsigned long ipa_size;
371 unsigned long pa_size;
d5466357 372 unsigned long pgsize_bitmap;
45ae7cff
WD
373
374 u32 num_global_irqs;
375 u32 num_context_irqs;
376 unsigned int *irqs;
377
45ae7cff
WD
378 struct list_head list;
379 struct rb_root masters;
1bd37a68
TC
380
381 u32 cavium_id_base; /* Specific to Cavium */
45ae7cff
WD
382};
383
7602b871
RM
384enum arm_smmu_context_fmt {
385 ARM_SMMU_CTX_FMT_NONE,
386 ARM_SMMU_CTX_FMT_AARCH64,
387 ARM_SMMU_CTX_FMT_AARCH32_L,
388 ARM_SMMU_CTX_FMT_AARCH32_S,
45ae7cff
WD
389};
390
391struct arm_smmu_cfg {
45ae7cff
WD
392 u8 cbndx;
393 u8 irptndx;
394 u32 cbar;
7602b871 395 enum arm_smmu_context_fmt fmt;
45ae7cff 396};
faea13b7 397#define INVALID_IRPTNDX 0xff
45ae7cff 398
1bd37a68
TC
399#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
400#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
ecfadb6e 401
c752ce45
WD
402enum arm_smmu_domain_stage {
403 ARM_SMMU_DOMAIN_S1 = 0,
404 ARM_SMMU_DOMAIN_S2,
405 ARM_SMMU_DOMAIN_NESTED,
406};
407
45ae7cff 408struct arm_smmu_domain {
44680eed 409 struct arm_smmu_device *smmu;
518f7136
WD
410 struct io_pgtable_ops *pgtbl_ops;
411 spinlock_t pgtbl_lock;
44680eed 412 struct arm_smmu_cfg cfg;
c752ce45 413 enum arm_smmu_domain_stage stage;
518f7136 414 struct mutex init_mutex; /* Protects smmu pointer */
1d672638 415 struct iommu_domain domain;
45ae7cff
WD
416};
417
cb6c27bb
JR
418struct arm_smmu_phandle_args {
419 struct device_node *np;
420 int args_count;
421 uint32_t args[MAX_MASTER_STREAMIDS];
422};
423
45ae7cff
WD
424static DEFINE_SPINLOCK(arm_smmu_devices_lock);
425static LIST_HEAD(arm_smmu_devices);
426
3a5df8ff
AH
427struct arm_smmu_option_prop {
428 u32 opt;
429 const char *prop;
430};
431
1bd37a68
TC
432static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
433
2907320d 434static struct arm_smmu_option_prop arm_smmu_options[] = {
3a5df8ff
AH
435 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
436 { 0, NULL},
437};
438
1d672638
JR
439static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
440{
441 return container_of(dom, struct arm_smmu_domain, domain);
442}
443
3a5df8ff
AH
444static void parse_driver_options(struct arm_smmu_device *smmu)
445{
446 int i = 0;
2907320d 447
3a5df8ff
AH
448 do {
449 if (of_property_read_bool(smmu->dev->of_node,
450 arm_smmu_options[i].prop)) {
451 smmu->options |= arm_smmu_options[i].opt;
452 dev_notice(smmu->dev, "option %s\n",
453 arm_smmu_options[i].prop);
454 }
455 } while (arm_smmu_options[++i].opt);
456}
457
8f68f8e2 458static struct device_node *dev_get_dev_node(struct device *dev)
a9a1b0b5
WD
459{
460 if (dev_is_pci(dev)) {
461 struct pci_bus *bus = to_pci_dev(dev)->bus;
2907320d 462
a9a1b0b5
WD
463 while (!pci_is_root_bus(bus))
464 bus = bus->parent;
8f68f8e2 465 return bus->bridge->parent->of_node;
a9a1b0b5
WD
466 }
467
8f68f8e2 468 return dev->of_node;
a9a1b0b5
WD
469}
470
45ae7cff
WD
471static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
472 struct device_node *dev_node)
473{
474 struct rb_node *node = smmu->masters.rb_node;
475
476 while (node) {
477 struct arm_smmu_master *master;
2907320d 478
45ae7cff
WD
479 master = container_of(node, struct arm_smmu_master, node);
480
481 if (dev_node < master->of_node)
482 node = node->rb_left;
483 else if (dev_node > master->of_node)
484 node = node->rb_right;
485 else
486 return master;
487 }
488
489 return NULL;
490}
491
a9a1b0b5 492static struct arm_smmu_master_cfg *
8f68f8e2 493find_smmu_master_cfg(struct device *dev)
a9a1b0b5 494{
8f68f8e2
WD
495 struct arm_smmu_master_cfg *cfg = NULL;
496 struct iommu_group *group = iommu_group_get(dev);
a9a1b0b5 497
8f68f8e2
WD
498 if (group) {
499 cfg = iommu_group_get_iommudata(group);
500 iommu_group_put(group);
501 }
a9a1b0b5 502
8f68f8e2 503 return cfg;
a9a1b0b5
WD
504}
505
45ae7cff
WD
506static int insert_smmu_master(struct arm_smmu_device *smmu,
507 struct arm_smmu_master *master)
508{
509 struct rb_node **new, *parent;
510
511 new = &smmu->masters.rb_node;
512 parent = NULL;
513 while (*new) {
2907320d
MH
514 struct arm_smmu_master *this
515 = container_of(*new, struct arm_smmu_master, node);
45ae7cff
WD
516
517 parent = *new;
518 if (master->of_node < this->of_node)
519 new = &((*new)->rb_left);
520 else if (master->of_node > this->of_node)
521 new = &((*new)->rb_right);
522 else
523 return -EEXIST;
524 }
525
526 rb_link_node(&master->node, parent, new);
527 rb_insert_color(&master->node, &smmu->masters);
528 return 0;
529}
530
531static int register_smmu_master(struct arm_smmu_device *smmu,
532 struct device *dev,
cb6c27bb 533 struct arm_smmu_phandle_args *masterspec)
45ae7cff
WD
534{
535 int i;
536 struct arm_smmu_master *master;
537
538 master = find_smmu_master(smmu, masterspec->np);
539 if (master) {
540 dev_err(dev,
541 "rejecting multiple registrations for master device %s\n",
542 masterspec->np->name);
543 return -EBUSY;
544 }
545
546 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
547 dev_err(dev,
548 "reached maximum number (%d) of stream IDs for master device %s\n",
549 MAX_MASTER_STREAMIDS, masterspec->np->name);
550 return -ENOSPC;
551 }
552
553 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
554 if (!master)
555 return -ENOMEM;
556
a9a1b0b5
WD
557 master->of_node = masterspec->np;
558 master->cfg.num_streamids = masterspec->args_count;
45ae7cff 559
3c8766d0
OH
560 for (i = 0; i < master->cfg.num_streamids; ++i) {
561 u16 streamid = masterspec->args[i];
45ae7cff 562
3c8766d0
OH
563 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
564 (streamid >= smmu->num_mapping_groups)) {
565 dev_err(dev,
566 "stream ID for master device %s greater than maximum allowed (%d)\n",
567 masterspec->np->name, smmu->num_mapping_groups);
568 return -ERANGE;
569 }
570 master->cfg.streamids[i] = streamid;
1f3d5ca4 571 master->cfg.smendx[i] = INVALID_SMENDX;
3c8766d0 572 }
45ae7cff
WD
573 return insert_smmu_master(smmu, master);
574}
575
44680eed 576static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
45ae7cff 577{
44680eed 578 struct arm_smmu_device *smmu;
a9a1b0b5 579 struct arm_smmu_master *master = NULL;
8f68f8e2 580 struct device_node *dev_node = dev_get_dev_node(dev);
45ae7cff
WD
581
582 spin_lock(&arm_smmu_devices_lock);
44680eed 583 list_for_each_entry(smmu, &arm_smmu_devices, list) {
a9a1b0b5
WD
584 master = find_smmu_master(smmu, dev_node);
585 if (master)
586 break;
587 }
45ae7cff 588 spin_unlock(&arm_smmu_devices_lock);
44680eed 589
a9a1b0b5 590 return master ? smmu : NULL;
45ae7cff
WD
591}
592
593static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
594{
595 int idx;
596
597 do {
598 idx = find_next_zero_bit(map, end, start);
599 if (idx == end)
600 return -ENOSPC;
601 } while (test_and_set_bit(idx, map));
602
603 return idx;
604}
605
606static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
607{
608 clear_bit(idx, map);
609}
610
611/* Wait for any pending TLB invalidations to complete */
518f7136 612static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
45ae7cff
WD
613{
614 int count = 0;
615 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
616
617 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
618 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
619 & sTLBGSTATUS_GSACTIVE) {
620 cpu_relax();
621 if (++count == TLB_LOOP_TIMEOUT) {
622 dev_err_ratelimited(smmu->dev,
623 "TLB sync timed out -- SMMU may be deadlocked\n");
624 return;
625 }
626 udelay(1);
627 }
628}
629
518f7136
WD
630static void arm_smmu_tlb_sync(void *cookie)
631{
632 struct arm_smmu_domain *smmu_domain = cookie;
633 __arm_smmu_tlb_sync(smmu_domain->smmu);
634}
635
636static void arm_smmu_tlb_inv_context(void *cookie)
1463fe44 637{
518f7136 638 struct arm_smmu_domain *smmu_domain = cookie;
44680eed
WD
639 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
640 struct arm_smmu_device *smmu = smmu_domain->smmu;
1463fe44 641 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
518f7136 642 void __iomem *base;
1463fe44
WD
643
644 if (stage1) {
645 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1bd37a68 646 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
ecfadb6e 647 base + ARM_SMMU_CB_S1_TLBIASID);
1463fe44
WD
648 } else {
649 base = ARM_SMMU_GR0(smmu);
1bd37a68 650 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
ecfadb6e 651 base + ARM_SMMU_GR0_TLBIVMID);
1463fe44
WD
652 }
653
518f7136
WD
654 __arm_smmu_tlb_sync(smmu);
655}
656
657static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
06c610e8 658 size_t granule, bool leaf, void *cookie)
518f7136
WD
659{
660 struct arm_smmu_domain *smmu_domain = cookie;
661 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
662 struct arm_smmu_device *smmu = smmu_domain->smmu;
663 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
664 void __iomem *reg;
665
666 if (stage1) {
667 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
668 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
669
7602b871 670 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
518f7136 671 iova &= ~12UL;
1bd37a68 672 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
75df1386
RM
673 do {
674 writel_relaxed(iova, reg);
675 iova += granule;
676 } while (size -= granule);
518f7136
WD
677 } else {
678 iova >>= 12;
1bd37a68 679 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
75df1386
RM
680 do {
681 writeq_relaxed(iova, reg);
682 iova += granule >> 12;
683 } while (size -= granule);
518f7136 684 }
518f7136
WD
685 } else if (smmu->version == ARM_SMMU_V2) {
686 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
687 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
688 ARM_SMMU_CB_S2_TLBIIPAS2;
75df1386
RM
689 iova >>= 12;
690 do {
f9a05f05 691 smmu_write_atomic_lq(iova, reg);
75df1386
RM
692 iova += granule >> 12;
693 } while (size -= granule);
518f7136
WD
694 } else {
695 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
1bd37a68 696 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
518f7136
WD
697 }
698}
699
518f7136
WD
700static struct iommu_gather_ops arm_smmu_gather_ops = {
701 .tlb_flush_all = arm_smmu_tlb_inv_context,
702 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
703 .tlb_sync = arm_smmu_tlb_sync,
518f7136
WD
704};
705
45ae7cff
WD
706static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
707{
3714ce1d 708 u32 fsr, fsynr;
45ae7cff
WD
709 unsigned long iova;
710 struct iommu_domain *domain = dev;
1d672638 711 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44680eed
WD
712 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
713 struct arm_smmu_device *smmu = smmu_domain->smmu;
45ae7cff
WD
714 void __iomem *cb_base;
715
44680eed 716 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
45ae7cff
WD
717 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
718
719 if (!(fsr & FSR_FAULT))
720 return IRQ_NONE;
721
45ae7cff 722 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
f9a05f05 723 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
45ae7cff 724
3714ce1d
WD
725 dev_err_ratelimited(smmu->dev,
726 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
727 fsr, iova, fsynr, cfg->cbndx);
45ae7cff 728
3714ce1d
WD
729 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
730 return IRQ_HANDLED;
45ae7cff
WD
731}
732
733static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
734{
735 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
736 struct arm_smmu_device *smmu = dev;
3a5df8ff 737 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
45ae7cff
WD
738
739 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
740 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
741 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
742 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
743
3a5df8ff
AH
744 if (!gfsr)
745 return IRQ_NONE;
746
45ae7cff
WD
747 dev_err_ratelimited(smmu->dev,
748 "Unexpected global fault, this could be serious\n");
749 dev_err_ratelimited(smmu->dev,
750 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
751 gfsr, gfsynr0, gfsynr1, gfsynr2);
752
753 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
adaba320 754 return IRQ_HANDLED;
45ae7cff
WD
755}
756
518f7136
WD
757static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
758 struct io_pgtable_cfg *pgtbl_cfg)
45ae7cff 759{
6070529b 760 u32 reg, reg2;
668b4ada 761 u64 reg64;
45ae7cff 762 bool stage1;
44680eed
WD
763 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
764 struct arm_smmu_device *smmu = smmu_domain->smmu;
c88ae5de 765 void __iomem *cb_base, *gr1_base;
45ae7cff 766
45ae7cff 767 gr1_base = ARM_SMMU_GR1(smmu);
44680eed
WD
768 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
769 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
45ae7cff 770
4a1c93cb 771 if (smmu->version > ARM_SMMU_V1) {
7602b871
RM
772 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
773 reg = CBA2R_RW64_64BIT;
774 else
775 reg = CBA2R_RW64_32BIT;
4e3e9b69
TC
776 /* 16-bit VMIDs live in CBA2R */
777 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1bd37a68 778 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
4e3e9b69 779
4a1c93cb
WD
780 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
781 }
782
45ae7cff 783 /* CBAR */
44680eed 784 reg = cfg->cbar;
b7862e35 785 if (smmu->version < ARM_SMMU_V2)
2907320d 786 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
45ae7cff 787
57ca90f6
WD
788 /*
789 * Use the weakest shareability/memory types, so they are
790 * overridden by the ttbcr/pte.
791 */
792 if (stage1) {
793 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
794 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
4e3e9b69
TC
795 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
796 /* 8-bit VMIDs live in CBAR */
1bd37a68 797 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
57ca90f6 798 }
44680eed 799 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
45ae7cff 800
518f7136
WD
801 /* TTBRs */
802 if (stage1) {
6070529b
RM
803 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
804
805 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
806 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
807 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
808 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
809 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
810 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
811 } else {
812 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
813 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
814 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
815 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
816 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
817 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
818 }
518f7136 819 } else {
668b4ada 820 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
f9a05f05 821 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
518f7136 822 }
a65217a4 823
518f7136
WD
824 /* TTBCR */
825 if (stage1) {
6070529b
RM
826 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
827 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
828 reg2 = 0;
829 } else {
830 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
831 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
832 reg2 |= TTBCR2_SEP_UPSTREAM;
45ae7cff 833 }
6070529b
RM
834 if (smmu->version > ARM_SMMU_V1)
835 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
45ae7cff 836 } else {
518f7136 837 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
45ae7cff 838 }
6070529b 839 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
45ae7cff 840
518f7136 841 /* MAIRs (stage-1 only) */
45ae7cff 842 if (stage1) {
6070529b
RM
843 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
844 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
845 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
846 } else {
847 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
848 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
849 }
45ae7cff 850 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
6070529b 851 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
45ae7cff
WD
852 }
853
45ae7cff 854 /* SCTLR */
6070529b 855 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
45ae7cff
WD
856 if (stage1)
857 reg |= SCTLR_S1_ASIDPNE;
858#ifdef __BIG_ENDIAN
859 reg |= SCTLR_E;
860#endif
25724841 861 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
45ae7cff
WD
862}
863
864static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44680eed 865 struct arm_smmu_device *smmu)
45ae7cff 866{
a18037b2 867 int irq, start, ret = 0;
518f7136
WD
868 unsigned long ias, oas;
869 struct io_pgtable_ops *pgtbl_ops;
870 struct io_pgtable_cfg pgtbl_cfg;
871 enum io_pgtable_fmt fmt;
1d672638 872 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44680eed 873 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
45ae7cff 874
518f7136 875 mutex_lock(&smmu_domain->init_mutex);
a18037b2
MH
876 if (smmu_domain->smmu)
877 goto out_unlock;
878
9800699c
RM
879 /* We're bypassing these SIDs, so don't allocate an actual context */
880 if (domain->type == IOMMU_DOMAIN_DMA) {
881 smmu_domain->smmu = smmu;
882 goto out_unlock;
883 }
884
c752ce45
WD
885 /*
886 * Mapping the requested stage onto what we support is surprisingly
887 * complicated, mainly because the spec allows S1+S2 SMMUs without
888 * support for nested translation. That means we end up with the
889 * following table:
890 *
891 * Requested Supported Actual
892 * S1 N S1
893 * S1 S1+S2 S1
894 * S1 S2 S2
895 * S1 S1 S1
896 * N N N
897 * N S1+S2 S2
898 * N S2 S2
899 * N S1 S1
900 *
901 * Note that you can't actually request stage-2 mappings.
902 */
903 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
904 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
905 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
906 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
907
7602b871
RM
908 /*
909 * Choosing a suitable context format is even more fiddly. Until we
910 * grow some way for the caller to express a preference, and/or move
911 * the decision into the io-pgtable code where it arguably belongs,
912 * just aim for the closest thing to the rest of the system, and hope
913 * that the hardware isn't esoteric enough that we can't assume AArch64
914 * support to be a superset of AArch32 support...
915 */
916 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
917 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
6070529b
RM
918 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
919 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
920 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
921 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
922 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
7602b871
RM
923 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
924 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
925 ARM_SMMU_FEAT_FMT_AARCH64_16K |
926 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
927 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
928
929 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
930 ret = -EINVAL;
931 goto out_unlock;
932 }
933
c752ce45
WD
934 switch (smmu_domain->stage) {
935 case ARM_SMMU_DOMAIN_S1:
936 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
937 start = smmu->num_s2_context_banks;
518f7136
WD
938 ias = smmu->va_size;
939 oas = smmu->ipa_size;
7602b871 940 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
518f7136 941 fmt = ARM_64_LPAE_S1;
6070529b 942 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
518f7136 943 fmt = ARM_32_LPAE_S1;
7602b871
RM
944 ias = min(ias, 32UL);
945 oas = min(oas, 40UL);
6070529b
RM
946 } else {
947 fmt = ARM_V7S;
948 ias = min(ias, 32UL);
949 oas = min(oas, 32UL);
7602b871 950 }
c752ce45
WD
951 break;
952 case ARM_SMMU_DOMAIN_NESTED:
45ae7cff
WD
953 /*
954 * We will likely want to change this if/when KVM gets
955 * involved.
956 */
c752ce45 957 case ARM_SMMU_DOMAIN_S2:
9c5c92e3
WD
958 cfg->cbar = CBAR_TYPE_S2_TRANS;
959 start = 0;
518f7136
WD
960 ias = smmu->ipa_size;
961 oas = smmu->pa_size;
7602b871 962 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
518f7136 963 fmt = ARM_64_LPAE_S2;
7602b871 964 } else {
518f7136 965 fmt = ARM_32_LPAE_S2;
7602b871
RM
966 ias = min(ias, 40UL);
967 oas = min(oas, 40UL);
968 }
c752ce45
WD
969 break;
970 default:
971 ret = -EINVAL;
972 goto out_unlock;
45ae7cff
WD
973 }
974
975 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
976 smmu->num_context_banks);
287980e4 977 if (ret < 0)
a18037b2 978 goto out_unlock;
45ae7cff 979
44680eed 980 cfg->cbndx = ret;
b7862e35 981 if (smmu->version < ARM_SMMU_V2) {
44680eed
WD
982 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
983 cfg->irptndx %= smmu->num_context_irqs;
45ae7cff 984 } else {
44680eed 985 cfg->irptndx = cfg->cbndx;
45ae7cff
WD
986 }
987
518f7136 988 pgtbl_cfg = (struct io_pgtable_cfg) {
d5466357 989 .pgsize_bitmap = smmu->pgsize_bitmap,
518f7136
WD
990 .ias = ias,
991 .oas = oas,
992 .tlb = &arm_smmu_gather_ops,
2df7a25c 993 .iommu_dev = smmu->dev,
518f7136
WD
994 };
995
996 smmu_domain->smmu = smmu;
997 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
998 if (!pgtbl_ops) {
999 ret = -ENOMEM;
1000 goto out_clear_smmu;
1001 }
1002
d5466357
RM
1003 /* Update the domain's page sizes to reflect the page table format */
1004 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
a18037b2 1005
518f7136
WD
1006 /* Initialise the context bank with our page table cfg */
1007 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1008
1009 /*
1010 * Request context fault interrupt. Do this last to avoid the
1011 * handler seeing a half-initialised domain state.
1012 */
44680eed 1013 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
bee14004
PF
1014 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
1015 IRQF_SHARED, "arm-smmu-context-fault", domain);
287980e4 1016 if (ret < 0) {
45ae7cff 1017 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
44680eed
WD
1018 cfg->irptndx, irq);
1019 cfg->irptndx = INVALID_IRPTNDX;
45ae7cff
WD
1020 }
1021
518f7136
WD
1022 mutex_unlock(&smmu_domain->init_mutex);
1023
1024 /* Publish page table ops for map/unmap */
1025 smmu_domain->pgtbl_ops = pgtbl_ops;
a9a1b0b5 1026 return 0;
45ae7cff 1027
518f7136
WD
1028out_clear_smmu:
1029 smmu_domain->smmu = NULL;
a18037b2 1030out_unlock:
518f7136 1031 mutex_unlock(&smmu_domain->init_mutex);
45ae7cff
WD
1032 return ret;
1033}
1034
1035static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1036{
1d672638 1037 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44680eed
WD
1038 struct arm_smmu_device *smmu = smmu_domain->smmu;
1039 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1463fe44 1040 void __iomem *cb_base;
45ae7cff
WD
1041 int irq;
1042
9800699c 1043 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
45ae7cff
WD
1044 return;
1045
518f7136
WD
1046 /*
1047 * Disable the context bank and free the page tables before freeing
1048 * it.
1049 */
44680eed 1050 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1463fe44 1051 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1463fe44 1052
44680eed
WD
1053 if (cfg->irptndx != INVALID_IRPTNDX) {
1054 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
bee14004 1055 devm_free_irq(smmu->dev, irq, domain);
45ae7cff
WD
1056 }
1057
44830b0c 1058 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
44680eed 1059 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
45ae7cff
WD
1060}
1061
1d672638 1062static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
45ae7cff
WD
1063{
1064 struct arm_smmu_domain *smmu_domain;
45ae7cff 1065
9adb9594 1066 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1d672638 1067 return NULL;
45ae7cff
WD
1068 /*
1069 * Allocate the domain and initialise some of its data structures.
1070 * We can't really do anything meaningful until we've added a
1071 * master.
1072 */
1073 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1074 if (!smmu_domain)
1d672638 1075 return NULL;
45ae7cff 1076
9adb9594
RM
1077 if (type == IOMMU_DOMAIN_DMA &&
1078 iommu_get_dma_cookie(&smmu_domain->domain)) {
1079 kfree(smmu_domain);
1080 return NULL;
1081 }
1082
518f7136
WD
1083 mutex_init(&smmu_domain->init_mutex);
1084 spin_lock_init(&smmu_domain->pgtbl_lock);
1d672638
JR
1085
1086 return &smmu_domain->domain;
45ae7cff
WD
1087}
1088
1d672638 1089static void arm_smmu_domain_free(struct iommu_domain *domain)
45ae7cff 1090{
1d672638 1091 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1463fe44
WD
1092
1093 /*
1094 * Free the domain resources. We assume that all devices have
1095 * already been detached.
1096 */
9adb9594 1097 iommu_put_dma_cookie(domain);
45ae7cff 1098 arm_smmu_destroy_domain_context(domain);
45ae7cff
WD
1099 kfree(smmu_domain);
1100}
1101
1f3d5ca4 1102static int arm_smmu_alloc_smr(struct arm_smmu_device *smmu)
45ae7cff
WD
1103{
1104 int i;
45ae7cff 1105
1f3d5ca4
RM
1106 for (i = 0; i < smmu->num_mapping_groups; i++)
1107 if (!cmpxchg(&smmu->smrs[i].valid, false, true))
1108 return i;
45ae7cff 1109
1f3d5ca4
RM
1110 return INVALID_SMENDX;
1111}
45ae7cff 1112
1f3d5ca4
RM
1113static void arm_smmu_free_smr(struct arm_smmu_device *smmu, int idx)
1114{
1115 writel_relaxed(~SMR_VALID, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1116 WRITE_ONCE(smmu->smrs[idx].valid, false);
1117}
1118
1119static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1120{
1121 struct arm_smmu_smr *smr = smmu->smrs + idx;
1122 u32 reg = (smr->id & smmu->streamid_mask) << SMR_ID_SHIFT |
1123 (smr->mask & smmu->smr_mask_mask) << SMR_MASK_SHIFT;
1124
1125 if (smr->valid)
1126 reg |= SMR_VALID;
1127 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1128}
1129
8e8b203e
RM
1130static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1131{
1132 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1133 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1134 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1135 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1136
1137 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1138}
1139
1140static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1141{
1142 arm_smmu_write_s2cr(smmu, idx);
1143 if (smmu->smrs)
1144 arm_smmu_write_smr(smmu, idx);
1145}
1146
1f3d5ca4
RM
1147static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu,
1148 struct arm_smmu_master_cfg *cfg)
1149{
1150 struct arm_smmu_smr *smrs = smmu->smrs;
1151 int i, idx;
45ae7cff 1152
44680eed 1153 /* Allocate the SMRs on the SMMU */
a9a1b0b5 1154 for (i = 0; i < cfg->num_streamids; ++i) {
1f3d5ca4
RM
1155 if (cfg->smendx[i] != INVALID_SMENDX)
1156 return -EEXIST;
1157
1158 /* ...except on stream indexing hardware, of course */
1159 if (!smrs) {
1160 cfg->smendx[i] = cfg->streamids[i];
1161 continue;
1162 }
1163
1164 idx = arm_smmu_alloc_smr(smmu);
287980e4 1165 if (idx < 0) {
45ae7cff
WD
1166 dev_err(smmu->dev, "failed to allocate free SMR\n");
1167 goto err_free_smrs;
1168 }
1f3d5ca4 1169 cfg->smendx[i] = idx;
45ae7cff 1170
1f3d5ca4
RM
1171 smrs[idx].id = cfg->streamids[i];
1172 smrs[idx].mask = 0; /* We don't currently share SMRs */
45ae7cff
WD
1173 }
1174
1f3d5ca4
RM
1175 if (!smrs)
1176 return 0;
1177
45ae7cff 1178 /* It worked! Now, poke the actual hardware */
1f3d5ca4
RM
1179 for (i = 0; i < cfg->num_streamids; ++i)
1180 arm_smmu_write_smr(smmu, cfg->smendx[i]);
45ae7cff 1181
45ae7cff
WD
1182 return 0;
1183
1184err_free_smrs:
1f3d5ca4
RM
1185 while (i--) {
1186 arm_smmu_free_smr(smmu, cfg->smendx[i]);
1187 cfg->smendx[i] = INVALID_SMENDX;
1188 }
45ae7cff
WD
1189 return -ENOSPC;
1190}
1191
1f3d5ca4 1192static void arm_smmu_master_free_smes(struct arm_smmu_device *smmu,
a9a1b0b5 1193 struct arm_smmu_master_cfg *cfg)
45ae7cff
WD
1194{
1195 int i;
43b412be 1196
8e8b203e
RM
1197 /*
1198 * We *must* clear the S2CR first, because freeing the SMR means
1199 * that it can be re-allocated immediately.
1200 */
1201 for (i = 0; i < cfg->num_streamids; ++i) {
1202 int idx = cfg->smendx[i];
1203
1204 /* An IOMMU group is torn down by the first device to be removed */
1205 if (idx == INVALID_SMENDX)
1206 return;
1207
1208 smmu->s2crs[idx] = s2cr_init_val;
1209 arm_smmu_write_s2cr(smmu, idx);
1210 }
1211 /* Sync S2CR updates before touching anything else */
1212 __iowmb();
1213
45ae7cff 1214 /* Invalidate the SMRs before freeing back to the allocator */
a9a1b0b5 1215 for (i = 0; i < cfg->num_streamids; ++i) {
1f3d5ca4
RM
1216 if (smmu->smrs)
1217 arm_smmu_free_smr(smmu, cfg->smendx[i]);
2907320d 1218
1f3d5ca4 1219 cfg->smendx[i] = INVALID_SMENDX;
45ae7cff 1220 }
45ae7cff
WD
1221}
1222
45ae7cff 1223static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
a9a1b0b5 1224 struct arm_smmu_master_cfg *cfg)
45ae7cff 1225{
8e8b203e 1226 int i, ret = 0;
44680eed 1227 struct arm_smmu_device *smmu = smmu_domain->smmu;
8e8b203e
RM
1228 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1229 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1230 u8 cbndx = smmu_domain->cfg.cbndx;
1231
1232 if (cfg->smendx[0] == INVALID_SMENDX)
1233 ret = arm_smmu_master_alloc_smes(smmu, cfg);
1234 if (ret)
1235 return ret;
45ae7cff 1236
cbf8277e
WD
1237 /*
1238 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
5f634956
WD
1239 * for all devices behind the SMMU. Note that we need to take
1240 * care configuring SMRs for devices both a platform_device and
1241 * and a PCI device (i.e. a PCI host controller)
cbf8277e
WD
1242 */
1243 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
8e8b203e 1244 type = S2CR_TYPE_BYPASS;
5f634956 1245
43b412be 1246 for (i = 0; i < cfg->num_streamids; ++i) {
1f3d5ca4 1247 int idx = cfg->smendx[i];
43b412be 1248
8e8b203e
RM
1249 /* Devices in an IOMMU group may already be configured */
1250 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1251 break;
1f3d5ca4 1252
8e8b203e
RM
1253 s2cr[idx].type = type;
1254 s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
1255 s2cr[idx].cbndx = cbndx;
1256 arm_smmu_write_s2cr(smmu, idx);
43b412be 1257 }
8e8b203e 1258 return 0;
bc7f2ce0
WD
1259}
1260
45ae7cff
WD
1261static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1262{
a18037b2 1263 int ret;
1d672638 1264 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1265 struct arm_smmu_device *smmu;
a9a1b0b5 1266 struct arm_smmu_master_cfg *cfg;
45ae7cff 1267
8f68f8e2 1268 smmu = find_smmu_for_device(dev);
44680eed 1269 if (!smmu) {
45ae7cff
WD
1270 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1271 return -ENXIO;
1272 }
1273
518f7136
WD
1274 /* Ensure that the domain is finalised */
1275 ret = arm_smmu_init_domain_context(domain, smmu);
287980e4 1276 if (ret < 0)
518f7136
WD
1277 return ret;
1278
45ae7cff 1279 /*
44680eed
WD
1280 * Sanity check the domain. We don't support domains across
1281 * different SMMUs.
45ae7cff 1282 */
518f7136 1283 if (smmu_domain->smmu != smmu) {
45ae7cff
WD
1284 dev_err(dev,
1285 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
a18037b2
MH
1286 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1287 return -EINVAL;
45ae7cff 1288 }
45ae7cff
WD
1289
1290 /* Looks ok, so add the device to the domain */
8f68f8e2 1291 cfg = find_smmu_master_cfg(dev);
a9a1b0b5 1292 if (!cfg)
45ae7cff
WD
1293 return -ENODEV;
1294
8e8b203e 1295 return arm_smmu_domain_add_master(smmu_domain, cfg);
45ae7cff
WD
1296}
1297
45ae7cff 1298static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
b410aed9 1299 phys_addr_t paddr, size_t size, int prot)
45ae7cff 1300{
518f7136
WD
1301 int ret;
1302 unsigned long flags;
1d672638 1303 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1304 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
45ae7cff 1305
518f7136 1306 if (!ops)
45ae7cff
WD
1307 return -ENODEV;
1308
518f7136
WD
1309 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1310 ret = ops->map(ops, iova, paddr, size, prot);
1311 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1312 return ret;
45ae7cff
WD
1313}
1314
1315static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1316 size_t size)
1317{
518f7136
WD
1318 size_t ret;
1319 unsigned long flags;
1d672638 1320 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1321 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
45ae7cff 1322
518f7136
WD
1323 if (!ops)
1324 return 0;
1325
1326 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1327 ret = ops->unmap(ops, iova, size);
1328 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1329 return ret;
45ae7cff
WD
1330}
1331
859a732e
MH
1332static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1333 dma_addr_t iova)
1334{
1d672638 1335 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
859a732e
MH
1336 struct arm_smmu_device *smmu = smmu_domain->smmu;
1337 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1338 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1339 struct device *dev = smmu->dev;
1340 void __iomem *cb_base;
1341 u32 tmp;
1342 u64 phys;
661d962f 1343 unsigned long va;
859a732e
MH
1344
1345 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1346
661d962f
RM
1347 /* ATS1 registers can only be written atomically */
1348 va = iova & ~0xfffUL;
661d962f 1349 if (smmu->version == ARM_SMMU_V2)
f9a05f05
RM
1350 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1351 else /* Register is only 32-bit in v1 */
661d962f 1352 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
859a732e
MH
1353
1354 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1355 !(tmp & ATSR_ACTIVE), 5, 50)) {
1356 dev_err(dev,
077124c9 1357 "iova to phys timed out on %pad. Falling back to software table walk.\n",
859a732e
MH
1358 &iova);
1359 return ops->iova_to_phys(ops, iova);
1360 }
1361
f9a05f05 1362 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
859a732e
MH
1363 if (phys & CB_PAR_F) {
1364 dev_err(dev, "translation fault!\n");
1365 dev_err(dev, "PAR = 0x%llx\n", phys);
1366 return 0;
1367 }
1368
1369 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1370}
1371
45ae7cff 1372static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
859a732e 1373 dma_addr_t iova)
45ae7cff 1374{
518f7136
WD
1375 phys_addr_t ret;
1376 unsigned long flags;
1d672638 1377 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1378 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
45ae7cff 1379
518f7136 1380 if (!ops)
a44a9791 1381 return 0;
45ae7cff 1382
518f7136 1383 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
83a60ed8
BR
1384 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1385 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
859a732e 1386 ret = arm_smmu_iova_to_phys_hard(domain, iova);
83a60ed8 1387 } else {
859a732e 1388 ret = ops->iova_to_phys(ops, iova);
83a60ed8
BR
1389 }
1390
518f7136 1391 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
859a732e 1392
518f7136 1393 return ret;
45ae7cff
WD
1394}
1395
1fd0c775 1396static bool arm_smmu_capable(enum iommu_cap cap)
45ae7cff 1397{
d0948945
WD
1398 switch (cap) {
1399 case IOMMU_CAP_CACHE_COHERENCY:
1fd0c775
JR
1400 /*
1401 * Return true here as the SMMU can always send out coherent
1402 * requests.
1403 */
1404 return true;
d0948945 1405 case IOMMU_CAP_INTR_REMAP:
1fd0c775 1406 return true; /* MSIs are just memory writes */
0029a8dd
AM
1407 case IOMMU_CAP_NOEXEC:
1408 return true;
d0948945 1409 default:
1fd0c775 1410 return false;
d0948945 1411 }
45ae7cff 1412}
45ae7cff 1413
a9a1b0b5
WD
1414static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1415{
1416 *((u16 *)data) = alias;
1417 return 0; /* Continue walking */
45ae7cff
WD
1418}
1419
8f68f8e2
WD
1420static void __arm_smmu_release_pci_iommudata(void *data)
1421{
1422 kfree(data);
1423}
1424
af659932
JR
1425static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1426 struct iommu_group *group)
45ae7cff 1427{
03edb226 1428 struct arm_smmu_master_cfg *cfg;
af659932
JR
1429 u16 sid;
1430 int i;
a9a1b0b5 1431
03edb226
WD
1432 cfg = iommu_group_get_iommudata(group);
1433 if (!cfg) {
a9a1b0b5 1434 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
af659932
JR
1435 if (!cfg)
1436 return -ENOMEM;
a9a1b0b5 1437
03edb226
WD
1438 iommu_group_set_iommudata(group, cfg,
1439 __arm_smmu_release_pci_iommudata);
1440 }
8f68f8e2 1441
af659932
JR
1442 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1443 return -ENOSPC;
a9a1b0b5 1444
03edb226
WD
1445 /*
1446 * Assume Stream ID == Requester ID for now.
1447 * We need a way to describe the ID mappings in FDT.
1448 */
1449 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1450 for (i = 0; i < cfg->num_streamids; ++i)
1451 if (cfg->streamids[i] == sid)
1452 break;
1453
1454 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1f3d5ca4
RM
1455 if (i == cfg->num_streamids) {
1456 cfg->streamids[i] = sid;
1457 cfg->smendx[i] = INVALID_SMENDX;
1458 cfg->num_streamids++;
1459 }
5fc63a7c 1460
03edb226 1461 return 0;
45ae7cff
WD
1462}
1463
af659932
JR
1464static int arm_smmu_init_platform_device(struct device *dev,
1465 struct iommu_group *group)
03edb226 1466{
03edb226 1467 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
af659932 1468 struct arm_smmu_master *master;
03edb226
WD
1469
1470 if (!smmu)
1471 return -ENODEV;
1472
1473 master = find_smmu_master(smmu, dev->of_node);
1474 if (!master)
1475 return -ENODEV;
1476
03edb226 1477 iommu_group_set_iommudata(group, &master->cfg, NULL);
af659932
JR
1478
1479 return 0;
03edb226
WD
1480}
1481
1482static int arm_smmu_add_device(struct device *dev)
1483{
af659932 1484 struct iommu_group *group;
03edb226 1485
af659932
JR
1486 group = iommu_group_get_for_dev(dev);
1487 if (IS_ERR(group))
1488 return PTR_ERR(group);
03edb226 1489
9a4a9d8c 1490 iommu_group_put(group);
af659932 1491 return 0;
03edb226
WD
1492}
1493
45ae7cff
WD
1494static void arm_smmu_remove_device(struct device *dev)
1495{
8e8b203e
RM
1496 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
1497 struct arm_smmu_master_cfg *cfg = find_smmu_master_cfg(dev);
1498
1499 if (smmu && cfg)
1500 arm_smmu_master_free_smes(smmu, cfg);
1501
5fc63a7c 1502 iommu_group_remove_device(dev);
45ae7cff
WD
1503}
1504
af659932
JR
1505static struct iommu_group *arm_smmu_device_group(struct device *dev)
1506{
1507 struct iommu_group *group;
1508 int ret;
1509
1510 if (dev_is_pci(dev))
1511 group = pci_device_group(dev);
1512 else
1513 group = generic_device_group(dev);
1514
1515 if (IS_ERR(group))
1516 return group;
1517
1518 if (dev_is_pci(dev))
1519 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1520 else
1521 ret = arm_smmu_init_platform_device(dev, group);
1522
1523 if (ret) {
1524 iommu_group_put(group);
1525 group = ERR_PTR(ret);
1526 }
1527
1528 return group;
1529}
1530
c752ce45
WD
1531static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1532 enum iommu_attr attr, void *data)
1533{
1d672638 1534 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
c752ce45
WD
1535
1536 switch (attr) {
1537 case DOMAIN_ATTR_NESTING:
1538 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1539 return 0;
1540 default:
1541 return -ENODEV;
1542 }
1543}
1544
1545static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1546 enum iommu_attr attr, void *data)
1547{
518f7136 1548 int ret = 0;
1d672638 1549 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
c752ce45 1550
518f7136
WD
1551 mutex_lock(&smmu_domain->init_mutex);
1552
c752ce45
WD
1553 switch (attr) {
1554 case DOMAIN_ATTR_NESTING:
518f7136
WD
1555 if (smmu_domain->smmu) {
1556 ret = -EPERM;
1557 goto out_unlock;
1558 }
1559
c752ce45
WD
1560 if (*(int *)data)
1561 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1562 else
1563 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1564
518f7136 1565 break;
c752ce45 1566 default:
518f7136 1567 ret = -ENODEV;
c752ce45 1568 }
518f7136
WD
1569
1570out_unlock:
1571 mutex_unlock(&smmu_domain->init_mutex);
1572 return ret;
c752ce45
WD
1573}
1574
518f7136 1575static struct iommu_ops arm_smmu_ops = {
c752ce45 1576 .capable = arm_smmu_capable,
1d672638
JR
1577 .domain_alloc = arm_smmu_domain_alloc,
1578 .domain_free = arm_smmu_domain_free,
c752ce45 1579 .attach_dev = arm_smmu_attach_dev,
c752ce45
WD
1580 .map = arm_smmu_map,
1581 .unmap = arm_smmu_unmap,
76771c93 1582 .map_sg = default_iommu_map_sg,
c752ce45
WD
1583 .iova_to_phys = arm_smmu_iova_to_phys,
1584 .add_device = arm_smmu_add_device,
1585 .remove_device = arm_smmu_remove_device,
af659932 1586 .device_group = arm_smmu_device_group,
c752ce45
WD
1587 .domain_get_attr = arm_smmu_domain_get_attr,
1588 .domain_set_attr = arm_smmu_domain_set_attr,
518f7136 1589 .pgsize_bitmap = -1UL, /* Restricted during device attach */
45ae7cff
WD
1590};
1591
1592static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1593{
1594 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
659db6f6 1595 void __iomem *cb_base;
1f3d5ca4 1596 int i;
3ca3712a 1597 u32 reg, major;
659db6f6 1598
3a5df8ff
AH
1599 /* clear global FSR */
1600 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1601 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
45ae7cff 1602
1f3d5ca4
RM
1603 /*
1604 * Reset stream mapping groups: Initial values mark all SMRn as
1605 * invalid and all S2CRn as bypass unless overridden.
1606 */
8e8b203e
RM
1607 for (i = 0; i < smmu->num_mapping_groups; ++i)
1608 arm_smmu_write_sme(smmu, i);
45ae7cff 1609
3ca3712a
PF
1610 /*
1611 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1612 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1613 * bit is only present in MMU-500r2 onwards.
1614 */
1615 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1616 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1617 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1618 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1619 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1620 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1621 }
1622
659db6f6
AH
1623 /* Make sure all context banks are disabled and clear CB_FSR */
1624 for (i = 0; i < smmu->num_context_banks; ++i) {
1625 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1626 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1627 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
f0cfffc4
RM
1628 /*
1629 * Disable MMU-500's not-particularly-beneficial next-page
1630 * prefetcher for the sake of errata #841119 and #826419.
1631 */
1632 if (smmu->model == ARM_MMU500) {
1633 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1634 reg &= ~ARM_MMU500_ACTLR_CPRE;
1635 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1636 }
659db6f6 1637 }
1463fe44 1638
45ae7cff 1639 /* Invalidate the TLB, just in case */
45ae7cff
WD
1640 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1641 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1642
3a5df8ff 1643 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
659db6f6 1644
45ae7cff 1645 /* Enable fault reporting */
659db6f6 1646 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
45ae7cff
WD
1647
1648 /* Disable TLB broadcasting. */
659db6f6 1649 reg |= (sCR0_VMIDPNE | sCR0_PTM);
45ae7cff 1650
25a1c96c
RM
1651 /* Enable client access, handling unmatched streams as appropriate */
1652 reg &= ~sCR0_CLIENTPD;
1653 if (disable_bypass)
1654 reg |= sCR0_USFCFG;
1655 else
1656 reg &= ~sCR0_USFCFG;
45ae7cff
WD
1657
1658 /* Disable forced broadcasting */
659db6f6 1659 reg &= ~sCR0_FB;
45ae7cff
WD
1660
1661 /* Don't upgrade barriers */
659db6f6 1662 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
45ae7cff 1663
4e3e9b69
TC
1664 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1665 reg |= sCR0_VMID16EN;
1666
45ae7cff 1667 /* Push the button */
518f7136 1668 __arm_smmu_tlb_sync(smmu);
3a5df8ff 1669 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
45ae7cff
WD
1670}
1671
1672static int arm_smmu_id_size_to_bits(int size)
1673{
1674 switch (size) {
1675 case 0:
1676 return 32;
1677 case 1:
1678 return 36;
1679 case 2:
1680 return 40;
1681 case 3:
1682 return 42;
1683 case 4:
1684 return 44;
1685 case 5:
1686 default:
1687 return 48;
1688 }
1689}
1690
1691static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1692{
1693 unsigned long size;
1694 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1695 u32 id;
bae2c2d4 1696 bool cttw_dt, cttw_reg;
8e8b203e 1697 int i;
45ae7cff
WD
1698
1699 dev_notice(smmu->dev, "probing hardware configuration...\n");
b7862e35
RM
1700 dev_notice(smmu->dev, "SMMUv%d with:\n",
1701 smmu->version == ARM_SMMU_V2 ? 2 : 1);
45ae7cff
WD
1702
1703 /* ID0 */
1704 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
4cf740b0
WD
1705
1706 /* Restrict available stages based on module parameter */
1707 if (force_stage == 1)
1708 id &= ~(ID0_S2TS | ID0_NTS);
1709 else if (force_stage == 2)
1710 id &= ~(ID0_S1TS | ID0_NTS);
1711
45ae7cff
WD
1712 if (id & ID0_S1TS) {
1713 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1714 dev_notice(smmu->dev, "\tstage 1 translation\n");
1715 }
1716
1717 if (id & ID0_S2TS) {
1718 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1719 dev_notice(smmu->dev, "\tstage 2 translation\n");
1720 }
1721
1722 if (id & ID0_NTS) {
1723 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1724 dev_notice(smmu->dev, "\tnested translation\n");
1725 }
1726
1727 if (!(smmu->features &
4cf740b0 1728 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
45ae7cff
WD
1729 dev_err(smmu->dev, "\tno translation support!\n");
1730 return -ENODEV;
1731 }
1732
b7862e35
RM
1733 if ((id & ID0_S1TS) &&
1734 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
859a732e
MH
1735 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1736 dev_notice(smmu->dev, "\taddress translation ops\n");
1737 }
1738
bae2c2d4
RM
1739 /*
1740 * In order for DMA API calls to work properly, we must defer to what
1741 * the DT says about coherency, regardless of what the hardware claims.
1742 * Fortunately, this also opens up a workaround for systems where the
1743 * ID register value has ended up configured incorrectly.
1744 */
1745 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1746 cttw_reg = !!(id & ID0_CTTW);
1747 if (cttw_dt)
45ae7cff 1748 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
bae2c2d4
RM
1749 if (cttw_dt || cttw_reg)
1750 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1751 cttw_dt ? "" : "non-");
1752 if (cttw_dt != cttw_reg)
1753 dev_notice(smmu->dev,
1754 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
45ae7cff 1755
21174240
RM
1756 /* Max. number of entries we have for stream matching/indexing */
1757 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1758 smmu->streamid_mask = size - 1;
45ae7cff 1759 if (id & ID0_SMS) {
21174240 1760 u32 smr;
45ae7cff
WD
1761
1762 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
21174240
RM
1763 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1764 if (size == 0) {
45ae7cff
WD
1765 dev_err(smmu->dev,
1766 "stream-matching supported, but no SMRs present!\n");
1767 return -ENODEV;
1768 }
1769
21174240
RM
1770 /*
1771 * SMR.ID bits may not be preserved if the corresponding MASK
1772 * bits are set, so check each one separately. We can reject
1773 * masters later if they try to claim IDs outside these masks.
1774 */
1775 smr = smmu->streamid_mask << SMR_ID_SHIFT;
45ae7cff
WD
1776 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1777 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
21174240 1778 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
45ae7cff 1779
21174240
RM
1780 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1781 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1782 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1783 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
45ae7cff 1784
1f3d5ca4
RM
1785 /* Zero-initialised to mark as invalid */
1786 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1787 GFP_KERNEL);
1788 if (!smmu->smrs)
1789 return -ENOMEM;
1790
45ae7cff 1791 dev_notice(smmu->dev,
21174240
RM
1792 "\tstream matching with %lu register groups, mask 0x%x",
1793 size, smmu->smr_mask_mask);
45ae7cff 1794 }
8e8b203e
RM
1795 /* s2cr->type == 0 means translation, so initialise explicitly */
1796 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1797 GFP_KERNEL);
1798 if (!smmu->s2crs)
1799 return -ENOMEM;
1800 for (i = 0; i < size; i++)
1801 smmu->s2crs[i] = s2cr_init_val;
1802
21174240 1803 smmu->num_mapping_groups = size;
45ae7cff 1804
7602b871
RM
1805 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1806 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1807 if (!(id & ID0_PTFS_NO_AARCH32S))
1808 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1809 }
1810
45ae7cff
WD
1811 /* ID1 */
1812 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
c757e852 1813 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
45ae7cff 1814
c55af7f7 1815 /* Check for size mismatch of SMMU address space from mapped region */
518f7136 1816 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
c757e852 1817 size *= 2 << smmu->pgshift;
c55af7f7 1818 if (smmu->size != size)
2907320d
MH
1819 dev_warn(smmu->dev,
1820 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1821 size, smmu->size);
45ae7cff 1822
518f7136 1823 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
45ae7cff
WD
1824 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1825 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1826 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1827 return -ENODEV;
1828 }
1829 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1830 smmu->num_context_banks, smmu->num_s2_context_banks);
e086d912
RM
1831 /*
1832 * Cavium CN88xx erratum #27704.
1833 * Ensure ASID and VMID allocation is unique across all SMMUs in
1834 * the system.
1835 */
1836 if (smmu->model == CAVIUM_SMMUV2) {
1837 smmu->cavium_id_base =
1838 atomic_add_return(smmu->num_context_banks,
1839 &cavium_smmu_context_count);
1840 smmu->cavium_id_base -= smmu->num_context_banks;
1841 }
45ae7cff
WD
1842
1843 /* ID2 */
1844 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1845 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
518f7136 1846 smmu->ipa_size = size;
45ae7cff 1847
518f7136 1848 /* The output mask is also applied for bypass */
45ae7cff 1849 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
518f7136 1850 smmu->pa_size = size;
45ae7cff 1851
4e3e9b69
TC
1852 if (id & ID2_VMID16)
1853 smmu->features |= ARM_SMMU_FEAT_VMID16;
1854
f1d84548
RM
1855 /*
1856 * What the page table walker can address actually depends on which
1857 * descriptor format is in use, but since a) we don't know that yet,
1858 * and b) it can vary per context bank, this will have to do...
1859 */
1860 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1861 dev_warn(smmu->dev,
1862 "failed to set DMA mask for table walker\n");
1863
b7862e35 1864 if (smmu->version < ARM_SMMU_V2) {
518f7136 1865 smmu->va_size = smmu->ipa_size;
b7862e35
RM
1866 if (smmu->version == ARM_SMMU_V1_64K)
1867 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
45ae7cff 1868 } else {
45ae7cff 1869 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
518f7136 1870 smmu->va_size = arm_smmu_id_size_to_bits(size);
518f7136 1871 if (id & ID2_PTFS_4K)
7602b871 1872 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
518f7136 1873 if (id & ID2_PTFS_16K)
7602b871 1874 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
518f7136 1875 if (id & ID2_PTFS_64K)
7602b871 1876 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
45ae7cff
WD
1877 }
1878
7602b871 1879 /* Now we've corralled the various formats, what'll it do? */
7602b871 1880 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
d5466357 1881 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
7602b871
RM
1882 if (smmu->features &
1883 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
d5466357 1884 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
7602b871 1885 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
d5466357 1886 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
7602b871 1887 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
d5466357
RM
1888 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1889
1890 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1891 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1892 else
1893 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1894 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1895 smmu->pgsize_bitmap);
7602b871 1896
518f7136 1897
28d6007b
WD
1898 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1899 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
518f7136 1900 smmu->va_size, smmu->ipa_size);
28d6007b
WD
1901
1902 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1903 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
518f7136 1904 smmu->ipa_size, smmu->pa_size);
28d6007b 1905
45ae7cff
WD
1906 return 0;
1907}
1908
67b65a3f
RM
1909struct arm_smmu_match_data {
1910 enum arm_smmu_arch_version version;
1911 enum arm_smmu_implementation model;
1912};
1913
1914#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1915static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1916
1917ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1918ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
b7862e35 1919ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
f0cfffc4 1920ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
e086d912 1921ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
67b65a3f 1922
09b5269a 1923static const struct of_device_id arm_smmu_of_match[] = {
67b65a3f
RM
1924 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1925 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1926 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
b7862e35 1927 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
f0cfffc4 1928 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
e086d912 1929 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
09360403
RM
1930 { },
1931};
1932MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1933
45ae7cff
WD
1934static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1935{
09360403 1936 const struct of_device_id *of_id;
67b65a3f 1937 const struct arm_smmu_match_data *data;
45ae7cff
WD
1938 struct resource *res;
1939 struct arm_smmu_device *smmu;
45ae7cff
WD
1940 struct device *dev = &pdev->dev;
1941 struct rb_node *node;
cb6c27bb
JR
1942 struct of_phandle_iterator it;
1943 struct arm_smmu_phandle_args *masterspec;
45ae7cff
WD
1944 int num_irqs, i, err;
1945
1946 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1947 if (!smmu) {
1948 dev_err(dev, "failed to allocate arm_smmu_device\n");
1949 return -ENOMEM;
1950 }
1951 smmu->dev = dev;
1952
09360403 1953 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
67b65a3f
RM
1954 data = of_id->data;
1955 smmu->version = data->version;
1956 smmu->model = data->model;
09360403 1957
45ae7cff 1958 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
8a7f4312
JL
1959 smmu->base = devm_ioremap_resource(dev, res);
1960 if (IS_ERR(smmu->base))
1961 return PTR_ERR(smmu->base);
45ae7cff 1962 smmu->size = resource_size(res);
45ae7cff
WD
1963
1964 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1965 &smmu->num_global_irqs)) {
1966 dev_err(dev, "missing #global-interrupts property\n");
1967 return -ENODEV;
1968 }
1969
1970 num_irqs = 0;
1971 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1972 num_irqs++;
1973 if (num_irqs > smmu->num_global_irqs)
1974 smmu->num_context_irqs++;
1975 }
1976
44a08de2
AH
1977 if (!smmu->num_context_irqs) {
1978 dev_err(dev, "found %d interrupts but expected at least %d\n",
1979 num_irqs, smmu->num_global_irqs + 1);
1980 return -ENODEV;
45ae7cff 1981 }
45ae7cff
WD
1982
1983 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1984 GFP_KERNEL);
1985 if (!smmu->irqs) {
1986 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1987 return -ENOMEM;
1988 }
1989
1990 for (i = 0; i < num_irqs; ++i) {
1991 int irq = platform_get_irq(pdev, i);
2907320d 1992
45ae7cff
WD
1993 if (irq < 0) {
1994 dev_err(dev, "failed to get irq index %d\n", i);
1995 return -ENODEV;
1996 }
1997 smmu->irqs[i] = irq;
1998 }
1999
3c8766d0
OH
2000 err = arm_smmu_device_cfg_probe(smmu);
2001 if (err)
2002 return err;
2003
45ae7cff
WD
2004 i = 0;
2005 smmu->masters = RB_ROOT;
cb6c27bb
JR
2006
2007 err = -ENOMEM;
2008 /* No need to zero the memory for masterspec */
2009 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2010 if (!masterspec)
2011 goto out_put_masters;
2012
2013 of_for_each_phandle(&it, err, dev->of_node,
2014 "mmu-masters", "#stream-id-cells", 0) {
2015 int count = of_phandle_iterator_args(&it, masterspec->args,
2016 MAX_MASTER_STREAMIDS);
2017 masterspec->np = of_node_get(it.node);
2018 masterspec->args_count = count;
2019
2020 err = register_smmu_master(smmu, dev, masterspec);
45ae7cff
WD
2021 if (err) {
2022 dev_err(dev, "failed to add master %s\n",
cb6c27bb
JR
2023 masterspec->np->name);
2024 kfree(masterspec);
45ae7cff
WD
2025 goto out_put_masters;
2026 }
2027
2028 i++;
2029 }
cb6c27bb 2030
45ae7cff
WD
2031 dev_notice(dev, "registered %d master devices\n", i);
2032
cb6c27bb
JR
2033 kfree(masterspec);
2034
3a5df8ff
AH
2035 parse_driver_options(smmu);
2036
b7862e35 2037 if (smmu->version == ARM_SMMU_V2 &&
45ae7cff
WD
2038 smmu->num_context_banks != smmu->num_context_irqs) {
2039 dev_err(dev,
2040 "found only %d context interrupt(s) but %d required\n",
2041 smmu->num_context_irqs, smmu->num_context_banks);
89a23cde 2042 err = -ENODEV;
44680eed 2043 goto out_put_masters;
45ae7cff
WD
2044 }
2045
45ae7cff 2046 for (i = 0; i < smmu->num_global_irqs; ++i) {
bee14004
PF
2047 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2048 arm_smmu_global_fault,
2049 IRQF_SHARED,
2050 "arm-smmu global fault",
2051 smmu);
45ae7cff
WD
2052 if (err) {
2053 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2054 i, smmu->irqs[i]);
bee14004 2055 goto out_put_masters;
45ae7cff
WD
2056 }
2057 }
2058
2059 INIT_LIST_HEAD(&smmu->list);
2060 spin_lock(&arm_smmu_devices_lock);
2061 list_add(&smmu->list, &arm_smmu_devices);
2062 spin_unlock(&arm_smmu_devices_lock);
fd90cecb
WD
2063
2064 arm_smmu_device_reset(smmu);
45ae7cff
WD
2065 return 0;
2066
45ae7cff
WD
2067out_put_masters:
2068 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2907320d
MH
2069 struct arm_smmu_master *master
2070 = container_of(node, struct arm_smmu_master, node);
45ae7cff
WD
2071 of_node_put(master->of_node);
2072 }
2073
2074 return err;
2075}
2076
2077static int arm_smmu_device_remove(struct platform_device *pdev)
2078{
45ae7cff
WD
2079 struct device *dev = &pdev->dev;
2080 struct arm_smmu_device *curr, *smmu = NULL;
2081 struct rb_node *node;
2082
2083 spin_lock(&arm_smmu_devices_lock);
2084 list_for_each_entry(curr, &arm_smmu_devices, list) {
2085 if (curr->dev == dev) {
2086 smmu = curr;
2087 list_del(&smmu->list);
2088 break;
2089 }
2090 }
2091 spin_unlock(&arm_smmu_devices_lock);
2092
2093 if (!smmu)
2094 return -ENODEV;
2095
45ae7cff 2096 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2907320d
MH
2097 struct arm_smmu_master *master
2098 = container_of(node, struct arm_smmu_master, node);
45ae7cff
WD
2099 of_node_put(master->of_node);
2100 }
2101
ecfadb6e 2102 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
45ae7cff
WD
2103 dev_err(dev, "removing device with active domains!\n");
2104
45ae7cff 2105 /* Turn the thing off */
2907320d 2106 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
45ae7cff
WD
2107 return 0;
2108}
2109
45ae7cff
WD
2110static struct platform_driver arm_smmu_driver = {
2111 .driver = {
45ae7cff
WD
2112 .name = "arm-smmu",
2113 .of_match_table = of_match_ptr(arm_smmu_of_match),
2114 },
2115 .probe = arm_smmu_device_dt_probe,
2116 .remove = arm_smmu_device_remove,
2117};
2118
2119static int __init arm_smmu_init(void)
2120{
0e7d37ad 2121 struct device_node *np;
45ae7cff
WD
2122 int ret;
2123
0e7d37ad
TR
2124 /*
2125 * Play nice with systems that don't have an ARM SMMU by checking that
2126 * an ARM SMMU exists in the system before proceeding with the driver
2127 * and IOMMU bus operation registration.
2128 */
2129 np = of_find_matching_node(NULL, arm_smmu_of_match);
2130 if (!np)
2131 return 0;
2132
2133 of_node_put(np);
2134
45ae7cff
WD
2135 ret = platform_driver_register(&arm_smmu_driver);
2136 if (ret)
2137 return ret;
2138
2139 /* Oh, for a proper bus abstraction */
6614ee77 2140 if (!iommu_present(&platform_bus_type))
45ae7cff
WD
2141 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2142
d123cf82 2143#ifdef CONFIG_ARM_AMBA
6614ee77 2144 if (!iommu_present(&amba_bustype))
45ae7cff 2145 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
d123cf82 2146#endif
45ae7cff 2147
a9a1b0b5 2148#ifdef CONFIG_PCI
112c898b
WC
2149 if (!iommu_present(&pci_bus_type)) {
2150 pci_request_acs();
a9a1b0b5 2151 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
112c898b 2152 }
a9a1b0b5
WD
2153#endif
2154
45ae7cff
WD
2155 return 0;
2156}
2157
2158static void __exit arm_smmu_exit(void)
2159{
2160 return platform_driver_unregister(&arm_smmu_driver);
2161}
2162
b1950b27 2163subsys_initcall(arm_smmu_init);
45ae7cff
WD
2164module_exit(arm_smmu_exit);
2165
2166MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2167MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2168MODULE_LICENSE("GPL v2");