Commit | Line | Data |
---|---|---|
695093e3 VS |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright (C) 2013 Freescale Semiconductor, Inc. | |
16 | * Author: Varun Sethi <varun.sethi@freescale.com> | |
17 | * | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ | |
21 | ||
695093e3 | 22 | #include "fsl_pamu_domain.h" |
695093e3 | 23 | |
cd70d465 EM |
24 | #include <sysdev/fsl_pci.h> |
25 | ||
695093e3 VS |
26 | /* |
27 | * Global spinlock that needs to be held while | |
28 | * configuring PAMU. | |
29 | */ | |
30 | static DEFINE_SPINLOCK(iommu_lock); | |
31 | ||
32 | static struct kmem_cache *fsl_pamu_domain_cache; | |
33 | static struct kmem_cache *iommu_devinfo_cache; | |
34 | static DEFINE_SPINLOCK(device_domain_lock); | |
35 | ||
3ff2dcc0 JR |
36 | struct iommu_device pamu_iommu; /* IOMMU core code handle */ |
37 | ||
8d4bfe40 JR |
38 | static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom) |
39 | { | |
40 | return container_of(dom, struct fsl_dma_domain, iommu_domain); | |
41 | } | |
42 | ||
695093e3 VS |
43 | static int __init iommu_init_mempool(void) |
44 | { | |
695093e3 | 45 | fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", |
cd70d465 EM |
46 | sizeof(struct fsl_dma_domain), |
47 | 0, | |
48 | SLAB_HWCACHE_ALIGN, | |
49 | NULL); | |
695093e3 VS |
50 | if (!fsl_pamu_domain_cache) { |
51 | pr_debug("Couldn't create fsl iommu_domain cache\n"); | |
52 | return -ENOMEM; | |
53 | } | |
54 | ||
55 | iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", | |
cd70d465 EM |
56 | sizeof(struct device_domain_info), |
57 | 0, | |
58 | SLAB_HWCACHE_ALIGN, | |
59 | NULL); | |
695093e3 VS |
60 | if (!iommu_devinfo_cache) { |
61 | pr_debug("Couldn't create devinfo cache\n"); | |
62 | kmem_cache_destroy(fsl_pamu_domain_cache); | |
63 | return -ENOMEM; | |
64 | } | |
65 | ||
66 | return 0; | |
67 | } | |
68 | ||
69 | static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) | |
70 | { | |
71 | u32 win_cnt = dma_domain->win_cnt; | |
cd70d465 | 72 | struct dma_window *win_ptr = &dma_domain->win_arr[0]; |
695093e3 VS |
73 | struct iommu_domain_geometry *geom; |
74 | ||
8d4bfe40 | 75 | geom = &dma_domain->iommu_domain.geometry; |
695093e3 VS |
76 | |
77 | if (!win_cnt || !dma_domain->geom_size) { | |
78 | pr_debug("Number of windows/geometry not configured for the domain\n"); | |
79 | return 0; | |
80 | } | |
81 | ||
82 | if (win_cnt > 1) { | |
83 | u64 subwin_size; | |
84 | dma_addr_t subwin_iova; | |
85 | u32 wnd; | |
86 | ||
87 | subwin_size = dma_domain->geom_size >> ilog2(win_cnt); | |
88 | subwin_iova = iova & ~(subwin_size - 1); | |
89 | wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size); | |
90 | win_ptr = &dma_domain->win_arr[wnd]; | |
91 | } | |
92 | ||
93 | if (win_ptr->valid) | |
cd70d465 | 94 | return win_ptr->paddr + (iova & (win_ptr->size - 1)); |
695093e3 VS |
95 | |
96 | return 0; | |
97 | } | |
98 | ||
99 | static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) | |
100 | { | |
cd70d465 | 101 | struct dma_window *sub_win_ptr = &dma_domain->win_arr[0]; |
695093e3 VS |
102 | int i, ret; |
103 | unsigned long rpn, flags; | |
104 | ||
105 | for (i = 0; i < dma_domain->win_cnt; i++) { | |
106 | if (sub_win_ptr[i].valid) { | |
cd70d465 | 107 | rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT; |
695093e3 VS |
108 | spin_lock_irqsave(&iommu_lock, flags); |
109 | ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, | |
110 | sub_win_ptr[i].size, | |
111 | ~(u32)0, | |
112 | rpn, | |
113 | dma_domain->snoop_id, | |
114 | dma_domain->stash_id, | |
115 | (i > 0) ? 1 : 0, | |
116 | sub_win_ptr[i].prot); | |
117 | spin_unlock_irqrestore(&iommu_lock, flags); | |
118 | if (ret) { | |
cd70d465 | 119 | pr_debug("SPAACE configuration failed for liodn %d\n", |
695093e3 VS |
120 | liodn); |
121 | return ret; | |
122 | } | |
123 | } | |
124 | } | |
125 | ||
126 | return ret; | |
127 | } | |
128 | ||
129 | static int map_win(int liodn, struct fsl_dma_domain *dma_domain) | |
130 | { | |
131 | int ret; | |
132 | struct dma_window *wnd = &dma_domain->win_arr[0]; | |
8d4bfe40 | 133 | phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; |
695093e3 VS |
134 | unsigned long flags; |
135 | ||
136 | spin_lock_irqsave(&iommu_lock, flags); | |
137 | ret = pamu_config_ppaace(liodn, wnd_addr, | |
138 | wnd->size, | |
139 | ~(u32)0, | |
140 | wnd->paddr >> PAMU_PAGE_SHIFT, | |
141 | dma_domain->snoop_id, dma_domain->stash_id, | |
142 | 0, wnd->prot); | |
143 | spin_unlock_irqrestore(&iommu_lock, flags); | |
144 | if (ret) | |
cd70d465 | 145 | pr_debug("PAACE configuration failed for liodn %d\n", liodn); |
695093e3 VS |
146 | |
147 | return ret; | |
148 | } | |
149 | ||
150 | /* Map the DMA window corresponding to the LIODN */ | |
151 | static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) | |
152 | { | |
153 | if (dma_domain->win_cnt > 1) | |
154 | return map_subwins(liodn, dma_domain); | |
155 | else | |
156 | return map_win(liodn, dma_domain); | |
695093e3 VS |
157 | } |
158 | ||
159 | /* Update window/subwindow mapping for the LIODN */ | |
160 | static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr) | |
161 | { | |
162 | int ret; | |
163 | struct dma_window *wnd = &dma_domain->win_arr[wnd_nr]; | |
164 | unsigned long flags; | |
165 | ||
166 | spin_lock_irqsave(&iommu_lock, flags); | |
167 | if (dma_domain->win_cnt > 1) { | |
168 | ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr, | |
169 | wnd->size, | |
170 | ~(u32)0, | |
171 | wnd->paddr >> PAMU_PAGE_SHIFT, | |
172 | dma_domain->snoop_id, | |
173 | dma_domain->stash_id, | |
174 | (wnd_nr > 0) ? 1 : 0, | |
175 | wnd->prot); | |
176 | if (ret) | |
cd70d465 EM |
177 | pr_debug("Subwindow reconfiguration failed for liodn %d\n", |
178 | liodn); | |
695093e3 VS |
179 | } else { |
180 | phys_addr_t wnd_addr; | |
181 | ||
8d4bfe40 | 182 | wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; |
695093e3 VS |
183 | |
184 | ret = pamu_config_ppaace(liodn, wnd_addr, | |
185 | wnd->size, | |
186 | ~(u32)0, | |
187 | wnd->paddr >> PAMU_PAGE_SHIFT, | |
cd70d465 EM |
188 | dma_domain->snoop_id, dma_domain->stash_id, |
189 | 0, wnd->prot); | |
695093e3 | 190 | if (ret) |
cd70d465 EM |
191 | pr_debug("Window reconfiguration failed for liodn %d\n", |
192 | liodn); | |
695093e3 VS |
193 | } |
194 | ||
195 | spin_unlock_irqrestore(&iommu_lock, flags); | |
196 | ||
197 | return ret; | |
198 | } | |
199 | ||
200 | static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, | |
cd70d465 | 201 | u32 val) |
695093e3 VS |
202 | { |
203 | int ret = 0, i; | |
204 | unsigned long flags; | |
205 | ||
206 | spin_lock_irqsave(&iommu_lock, flags); | |
207 | if (!dma_domain->win_arr) { | |
cd70d465 EM |
208 | pr_debug("Windows not configured, stash destination update failed for liodn %d\n", |
209 | liodn); | |
695093e3 VS |
210 | spin_unlock_irqrestore(&iommu_lock, flags); |
211 | return -EINVAL; | |
212 | } | |
213 | ||
214 | for (i = 0; i < dma_domain->win_cnt; i++) { | |
215 | ret = pamu_update_paace_stash(liodn, i, val); | |
216 | if (ret) { | |
cd70d465 EM |
217 | pr_debug("Failed to update SPAACE %d field for liodn %d\n ", |
218 | i, liodn); | |
695093e3 VS |
219 | spin_unlock_irqrestore(&iommu_lock, flags); |
220 | return ret; | |
221 | } | |
222 | } | |
223 | ||
224 | spin_unlock_irqrestore(&iommu_lock, flags); | |
225 | ||
226 | return ret; | |
227 | } | |
228 | ||
229 | /* Set the geometry parameters for a LIODN */ | |
230 | static int pamu_set_liodn(int liodn, struct device *dev, | |
cd70d465 EM |
231 | struct fsl_dma_domain *dma_domain, |
232 | struct iommu_domain_geometry *geom_attr, | |
233 | u32 win_cnt) | |
695093e3 VS |
234 | { |
235 | phys_addr_t window_addr, window_size; | |
236 | phys_addr_t subwin_size; | |
237 | int ret = 0, i; | |
238 | u32 omi_index = ~(u32)0; | |
239 | unsigned long flags; | |
240 | ||
241 | /* | |
242 | * Configure the omi_index at the geometry setup time. | |
243 | * This is a static value which depends on the type of | |
244 | * device and would not change thereafter. | |
245 | */ | |
246 | get_ome_index(&omi_index, dev); | |
247 | ||
248 | window_addr = geom_attr->aperture_start; | |
249 | window_size = dma_domain->geom_size; | |
250 | ||
251 | spin_lock_irqsave(&iommu_lock, flags); | |
252 | ret = pamu_disable_liodn(liodn); | |
253 | if (!ret) | |
254 | ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index, | |
255 | 0, dma_domain->snoop_id, | |
256 | dma_domain->stash_id, win_cnt, 0); | |
257 | spin_unlock_irqrestore(&iommu_lock, flags); | |
258 | if (ret) { | |
cd70d465 EM |
259 | pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n", |
260 | liodn, win_cnt); | |
695093e3 VS |
261 | return ret; |
262 | } | |
263 | ||
264 | if (win_cnt > 1) { | |
265 | subwin_size = window_size >> ilog2(win_cnt); | |
266 | for (i = 0; i < win_cnt; i++) { | |
267 | spin_lock_irqsave(&iommu_lock, flags); | |
268 | ret = pamu_disable_spaace(liodn, i); | |
269 | if (!ret) | |
270 | ret = pamu_config_spaace(liodn, win_cnt, i, | |
271 | subwin_size, omi_index, | |
272 | 0, dma_domain->snoop_id, | |
273 | dma_domain->stash_id, | |
274 | 0, 0); | |
275 | spin_unlock_irqrestore(&iommu_lock, flags); | |
276 | if (ret) { | |
cd70d465 EM |
277 | pr_debug("SPAACE configuration failed for liodn %d\n", |
278 | liodn); | |
695093e3 VS |
279 | return ret; |
280 | } | |
281 | } | |
282 | } | |
283 | ||
284 | return ret; | |
285 | } | |
286 | ||
287 | static int check_size(u64 size, dma_addr_t iova) | |
288 | { | |
289 | /* | |
290 | * Size must be a power of two and at least be equal | |
291 | * to PAMU page size. | |
292 | */ | |
d033f48f | 293 | if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { |
cd70d465 | 294 | pr_debug("Size too small or not a power of two\n"); |
695093e3 VS |
295 | return -EINVAL; |
296 | } | |
297 | ||
cd70d465 | 298 | /* iova must be page size aligned */ |
695093e3 | 299 | if (iova & (size - 1)) { |
cd70d465 | 300 | pr_debug("Address is not aligned with window size\n"); |
695093e3 VS |
301 | return -EINVAL; |
302 | } | |
303 | ||
304 | return 0; | |
305 | } | |
306 | ||
307 | static struct fsl_dma_domain *iommu_alloc_dma_domain(void) | |
308 | { | |
309 | struct fsl_dma_domain *domain; | |
310 | ||
311 | domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); | |
312 | if (!domain) | |
313 | return NULL; | |
314 | ||
315 | domain->stash_id = ~(u32)0; | |
316 | domain->snoop_id = ~(u32)0; | |
317 | domain->win_cnt = pamu_get_max_subwin_cnt(); | |
318 | domain->geom_size = 0; | |
319 | ||
320 | INIT_LIST_HEAD(&domain->devices); | |
321 | ||
322 | spin_lock_init(&domain->domain_lock); | |
323 | ||
324 | return domain; | |
325 | } | |
326 | ||
695093e3 VS |
327 | static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) |
328 | { | |
329 | unsigned long flags; | |
330 | ||
331 | list_del(&info->link); | |
332 | spin_lock_irqsave(&iommu_lock, flags); | |
333 | if (win_cnt > 1) | |
334 | pamu_free_subwins(info->liodn); | |
335 | pamu_disable_liodn(info->liodn); | |
336 | spin_unlock_irqrestore(&iommu_lock, flags); | |
337 | spin_lock_irqsave(&device_domain_lock, flags); | |
338 | info->dev->archdata.iommu_domain = NULL; | |
339 | kmem_cache_free(iommu_devinfo_cache, info); | |
340 | spin_unlock_irqrestore(&device_domain_lock, flags); | |
341 | } | |
342 | ||
343 | static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain) | |
344 | { | |
345 | struct device_domain_info *info, *tmp; | |
346 | unsigned long flags; | |
347 | ||
348 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | |
349 | /* Remove the device from the domain device list */ | |
350 | list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) { | |
351 | if (!dev || (info->dev == dev)) | |
352 | remove_device_ref(info, dma_domain->win_cnt); | |
353 | } | |
354 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
355 | } | |
356 | ||
357 | static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev) | |
358 | { | |
359 | struct device_domain_info *info, *old_domain_info; | |
360 | unsigned long flags; | |
361 | ||
362 | spin_lock_irqsave(&device_domain_lock, flags); | |
363 | /* | |
364 | * Check here if the device is already attached to domain or not. | |
365 | * If the device is already attached to a domain detach it. | |
366 | */ | |
75f0e461 | 367 | old_domain_info = dev->archdata.iommu_domain; |
695093e3 VS |
368 | if (old_domain_info && old_domain_info->domain != dma_domain) { |
369 | spin_unlock_irqrestore(&device_domain_lock, flags); | |
370 | detach_device(dev, old_domain_info->domain); | |
371 | spin_lock_irqsave(&device_domain_lock, flags); | |
372 | } | |
373 | ||
374 | info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC); | |
375 | ||
376 | info->dev = dev; | |
377 | info->liodn = liodn; | |
378 | info->domain = dma_domain; | |
379 | ||
380 | list_add(&info->link, &dma_domain->devices); | |
381 | /* | |
382 | * In case of devices with multiple LIODNs just store | |
383 | * the info for the first LIODN as all | |
384 | * LIODNs share the same domain | |
385 | */ | |
75f0e461 | 386 | if (!dev->archdata.iommu_domain) |
695093e3 VS |
387 | dev->archdata.iommu_domain = info; |
388 | spin_unlock_irqrestore(&device_domain_lock, flags); | |
695093e3 VS |
389 | } |
390 | ||
391 | static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, | |
cd70d465 | 392 | dma_addr_t iova) |
695093e3 | 393 | { |
8d4bfe40 | 394 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
695093e3 | 395 | |
cd70d465 EM |
396 | if (iova < domain->geometry.aperture_start || |
397 | iova > domain->geometry.aperture_end) | |
695093e3 VS |
398 | return 0; |
399 | ||
400 | return get_phys_addr(dma_domain, iova); | |
401 | } | |
402 | ||
b7eb6785 | 403 | static bool fsl_pamu_capable(enum iommu_cap cap) |
695093e3 VS |
404 | { |
405 | return cap == IOMMU_CAP_CACHE_COHERENCY; | |
406 | } | |
407 | ||
8d4bfe40 | 408 | static void fsl_pamu_domain_free(struct iommu_domain *domain) |
695093e3 | 409 | { |
8d4bfe40 | 410 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
695093e3 VS |
411 | |
412 | /* remove all the devices from the device list */ | |
413 | detach_device(NULL, dma_domain); | |
414 | ||
415 | dma_domain->enabled = 0; | |
416 | dma_domain->mapped = 0; | |
417 | ||
418 | kmem_cache_free(fsl_pamu_domain_cache, dma_domain); | |
419 | } | |
420 | ||
8d4bfe40 | 421 | static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) |
695093e3 VS |
422 | { |
423 | struct fsl_dma_domain *dma_domain; | |
424 | ||
8d4bfe40 JR |
425 | if (type != IOMMU_DOMAIN_UNMANAGED) |
426 | return NULL; | |
427 | ||
695093e3 VS |
428 | dma_domain = iommu_alloc_dma_domain(); |
429 | if (!dma_domain) { | |
430 | pr_debug("dma_domain allocation failed\n"); | |
8d4bfe40 | 431 | return NULL; |
695093e3 | 432 | } |
695093e3 | 433 | /* defaul geometry 64 GB i.e. maximum system address */ |
8d4bfe40 JR |
434 | dma_domain->iommu_domain. geometry.aperture_start = 0; |
435 | dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1; | |
436 | dma_domain->iommu_domain.geometry.force_aperture = true; | |
695093e3 | 437 | |
8d4bfe40 | 438 | return &dma_domain->iommu_domain; |
695093e3 VS |
439 | } |
440 | ||
441 | /* Configure geometry settings for all LIODNs associated with domain */ | |
442 | static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain, | |
443 | struct iommu_domain_geometry *geom_attr, | |
444 | u32 win_cnt) | |
445 | { | |
446 | struct device_domain_info *info; | |
447 | int ret = 0; | |
448 | ||
449 | list_for_each_entry(info, &dma_domain->devices, link) { | |
450 | ret = pamu_set_liodn(info->liodn, info->dev, dma_domain, | |
cd70d465 | 451 | geom_attr, win_cnt); |
695093e3 VS |
452 | if (ret) |
453 | break; | |
454 | } | |
455 | ||
456 | return ret; | |
457 | } | |
458 | ||
459 | /* Update stash destination for all LIODNs associated with the domain */ | |
460 | static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) | |
461 | { | |
462 | struct device_domain_info *info; | |
463 | int ret = 0; | |
464 | ||
465 | list_for_each_entry(info, &dma_domain->devices, link) { | |
466 | ret = update_liodn_stash(info->liodn, dma_domain, val); | |
467 | if (ret) | |
468 | break; | |
469 | } | |
470 | ||
471 | return ret; | |
472 | } | |
473 | ||
474 | /* Update domain mappings for all LIODNs associated with the domain */ | |
475 | static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr) | |
476 | { | |
477 | struct device_domain_info *info; | |
478 | int ret = 0; | |
479 | ||
480 | list_for_each_entry(info, &dma_domain->devices, link) { | |
481 | ret = update_liodn(info->liodn, dma_domain, wnd_nr); | |
482 | if (ret) | |
483 | break; | |
484 | } | |
485 | return ret; | |
486 | } | |
487 | ||
488 | static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr) | |
489 | { | |
490 | struct device_domain_info *info; | |
491 | int ret = 0; | |
492 | ||
493 | list_for_each_entry(info, &dma_domain->devices, link) { | |
494 | if (dma_domain->win_cnt == 1 && dma_domain->enabled) { | |
495 | ret = pamu_disable_liodn(info->liodn); | |
496 | if (!ret) | |
497 | dma_domain->enabled = 0; | |
498 | } else { | |
499 | ret = pamu_disable_spaace(info->liodn, wnd_nr); | |
500 | } | |
501 | } | |
502 | ||
503 | return ret; | |
504 | } | |
505 | ||
506 | static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) | |
507 | { | |
8d4bfe40 | 508 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
695093e3 VS |
509 | unsigned long flags; |
510 | int ret; | |
511 | ||
512 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | |
513 | if (!dma_domain->win_arr) { | |
514 | pr_debug("Number of windows not configured\n"); | |
515 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
516 | return; | |
517 | } | |
518 | ||
519 | if (wnd_nr >= dma_domain->win_cnt) { | |
520 | pr_debug("Invalid window index\n"); | |
521 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
522 | return; | |
523 | } | |
524 | ||
525 | if (dma_domain->win_arr[wnd_nr].valid) { | |
526 | ret = disable_domain_win(dma_domain, wnd_nr); | |
527 | if (!ret) { | |
528 | dma_domain->win_arr[wnd_nr].valid = 0; | |
529 | dma_domain->mapped--; | |
530 | } | |
531 | } | |
532 | ||
533 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
695093e3 VS |
534 | } |
535 | ||
536 | static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, | |
537 | phys_addr_t paddr, u64 size, int prot) | |
538 | { | |
8d4bfe40 | 539 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
695093e3 VS |
540 | struct dma_window *wnd; |
541 | int pamu_prot = 0; | |
542 | int ret; | |
543 | unsigned long flags; | |
544 | u64 win_size; | |
545 | ||
546 | if (prot & IOMMU_READ) | |
547 | pamu_prot |= PAACE_AP_PERMS_QUERY; | |
548 | if (prot & IOMMU_WRITE) | |
549 | pamu_prot |= PAACE_AP_PERMS_UPDATE; | |
550 | ||
551 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | |
552 | if (!dma_domain->win_arr) { | |
553 | pr_debug("Number of windows not configured\n"); | |
554 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
555 | return -ENODEV; | |
556 | } | |
557 | ||
558 | if (wnd_nr >= dma_domain->win_cnt) { | |
559 | pr_debug("Invalid window index\n"); | |
560 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
561 | return -EINVAL; | |
562 | } | |
563 | ||
564 | win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt); | |
565 | if (size > win_size) { | |
cd70d465 | 566 | pr_debug("Invalid window size\n"); |
695093e3 VS |
567 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); |
568 | return -EINVAL; | |
569 | } | |
570 | ||
571 | if (dma_domain->win_cnt == 1) { | |
572 | if (dma_domain->enabled) { | |
573 | pr_debug("Disable the window before updating the mapping\n"); | |
574 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
575 | return -EBUSY; | |
576 | } | |
577 | ||
578 | ret = check_size(size, domain->geometry.aperture_start); | |
579 | if (ret) { | |
580 | pr_debug("Aperture start not aligned to the size\n"); | |
581 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
582 | return -EINVAL; | |
583 | } | |
584 | } | |
585 | ||
586 | wnd = &dma_domain->win_arr[wnd_nr]; | |
587 | if (!wnd->valid) { | |
588 | wnd->paddr = paddr; | |
589 | wnd->size = size; | |
590 | wnd->prot = pamu_prot; | |
591 | ||
592 | ret = update_domain_mapping(dma_domain, wnd_nr); | |
593 | if (!ret) { | |
594 | wnd->valid = 1; | |
595 | dma_domain->mapped++; | |
596 | } | |
597 | } else { | |
598 | pr_debug("Disable the window before updating the mapping\n"); | |
599 | ret = -EBUSY; | |
600 | } | |
601 | ||
602 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
603 | ||
604 | return ret; | |
605 | } | |
606 | ||
607 | /* | |
608 | * Attach the LIODN to the DMA domain and configure the geometry | |
609 | * and window mappings. | |
610 | */ | |
611 | static int handle_attach_device(struct fsl_dma_domain *dma_domain, | |
cd70d465 EM |
612 | struct device *dev, const u32 *liodn, |
613 | int num) | |
695093e3 VS |
614 | { |
615 | unsigned long flags; | |
8d4bfe40 | 616 | struct iommu_domain *domain = &dma_domain->iommu_domain; |
695093e3 VS |
617 | int ret = 0; |
618 | int i; | |
619 | ||
620 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | |
621 | for (i = 0; i < num; i++) { | |
695093e3 VS |
622 | /* Ensure that LIODN value is valid */ |
623 | if (liodn[i] >= PAACE_NUMBER_ENTRIES) { | |
6bd4f1c7 RH |
624 | pr_debug("Invalid liodn %d, attach device failed for %pOF\n", |
625 | liodn[i], dev->of_node); | |
695093e3 VS |
626 | ret = -EINVAL; |
627 | break; | |
628 | } | |
629 | ||
630 | attach_device(dma_domain, liodn[i], dev); | |
631 | /* | |
632 | * Check if geometry has already been configured | |
633 | * for the domain. If yes, set the geometry for | |
634 | * the LIODN. | |
635 | */ | |
636 | if (dma_domain->win_arr) { | |
637 | u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0; | |
cd70d465 | 638 | |
695093e3 | 639 | ret = pamu_set_liodn(liodn[i], dev, dma_domain, |
cd70d465 | 640 | &domain->geometry, win_cnt); |
695093e3 VS |
641 | if (ret) |
642 | break; | |
643 | if (dma_domain->mapped) { | |
644 | /* | |
645 | * Create window/subwindow mapping for | |
646 | * the LIODN. | |
647 | */ | |
648 | ret = map_liodn(liodn[i], dma_domain); | |
649 | if (ret) | |
650 | break; | |
651 | } | |
652 | } | |
653 | } | |
654 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
655 | ||
656 | return ret; | |
657 | } | |
658 | ||
659 | static int fsl_pamu_attach_device(struct iommu_domain *domain, | |
660 | struct device *dev) | |
661 | { | |
8d4bfe40 | 662 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
695093e3 VS |
663 | const u32 *liodn; |
664 | u32 liodn_cnt; | |
665 | int len, ret = 0; | |
666 | struct pci_dev *pdev = NULL; | |
667 | struct pci_controller *pci_ctl; | |
668 | ||
669 | /* | |
670 | * Use LIODN of the PCI controller while attaching a | |
671 | * PCI device. | |
672 | */ | |
b3eb76d1 | 673 | if (dev_is_pci(dev)) { |
695093e3 VS |
674 | pdev = to_pci_dev(dev); |
675 | pci_ctl = pci_bus_to_host(pdev->bus); | |
676 | /* | |
677 | * make dev point to pci controller device | |
678 | * so we can get the LIODN programmed by | |
679 | * u-boot. | |
680 | */ | |
681 | dev = pci_ctl->parent; | |
682 | } | |
683 | ||
684 | liodn = of_get_property(dev->of_node, "fsl,liodn", &len); | |
685 | if (liodn) { | |
686 | liodn_cnt = len / sizeof(u32); | |
cd70d465 | 687 | ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt); |
695093e3 | 688 | } else { |
6bd4f1c7 | 689 | pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); |
cd70d465 | 690 | ret = -EINVAL; |
695093e3 VS |
691 | } |
692 | ||
693 | return ret; | |
694 | } | |
695 | ||
696 | static void fsl_pamu_detach_device(struct iommu_domain *domain, | |
cd70d465 | 697 | struct device *dev) |
695093e3 | 698 | { |
8d4bfe40 | 699 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
695093e3 VS |
700 | const u32 *prop; |
701 | int len; | |
702 | struct pci_dev *pdev = NULL; | |
703 | struct pci_controller *pci_ctl; | |
704 | ||
705 | /* | |
706 | * Use LIODN of the PCI controller while detaching a | |
707 | * PCI device. | |
708 | */ | |
b3eb76d1 | 709 | if (dev_is_pci(dev)) { |
695093e3 VS |
710 | pdev = to_pci_dev(dev); |
711 | pci_ctl = pci_bus_to_host(pdev->bus); | |
712 | /* | |
713 | * make dev point to pci controller device | |
714 | * so we can get the LIODN programmed by | |
715 | * u-boot. | |
716 | */ | |
717 | dev = pci_ctl->parent; | |
718 | } | |
719 | ||
720 | prop = of_get_property(dev->of_node, "fsl,liodn", &len); | |
721 | if (prop) | |
722 | detach_device(dev, dma_domain); | |
723 | else | |
6bd4f1c7 | 724 | pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); |
695093e3 VS |
725 | } |
726 | ||
727 | static int configure_domain_geometry(struct iommu_domain *domain, void *data) | |
728 | { | |
729 | struct iommu_domain_geometry *geom_attr = data; | |
8d4bfe40 | 730 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
695093e3 VS |
731 | dma_addr_t geom_size; |
732 | unsigned long flags; | |
733 | ||
734 | geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1; | |
735 | /* | |
736 | * Sanity check the geometry size. Also, we do not support | |
737 | * DMA outside of the geometry. | |
738 | */ | |
739 | if (check_size(geom_size, geom_attr->aperture_start) || | |
cd70d465 EM |
740 | !geom_attr->force_aperture) { |
741 | pr_debug("Invalid PAMU geometry attributes\n"); | |
742 | return -EINVAL; | |
743 | } | |
695093e3 VS |
744 | |
745 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | |
746 | if (dma_domain->enabled) { | |
747 | pr_debug("Can't set geometry attributes as domain is active\n"); | |
748 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
749 | return -EBUSY; | |
750 | } | |
751 | ||
752 | /* Copy the domain geometry information */ | |
753 | memcpy(&domain->geometry, geom_attr, | |
754 | sizeof(struct iommu_domain_geometry)); | |
755 | dma_domain->geom_size = geom_size; | |
756 | ||
757 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
758 | ||
759 | return 0; | |
760 | } | |
761 | ||
762 | /* Set the domain stash attribute */ | |
763 | static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) | |
764 | { | |
765 | struct pamu_stash_attribute *stash_attr = data; | |
766 | unsigned long flags; | |
767 | int ret; | |
768 | ||
769 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | |
770 | ||
771 | memcpy(&dma_domain->dma_stash, stash_attr, | |
cd70d465 | 772 | sizeof(struct pamu_stash_attribute)); |
695093e3 VS |
773 | |
774 | dma_domain->stash_id = get_stash_id(stash_attr->cache, | |
775 | stash_attr->cpu); | |
776 | if (dma_domain->stash_id == ~(u32)0) { | |
777 | pr_debug("Invalid stash attributes\n"); | |
778 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
779 | return -EINVAL; | |
780 | } | |
781 | ||
782 | ret = update_domain_stash(dma_domain, dma_domain->stash_id); | |
783 | ||
784 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
785 | ||
786 | return ret; | |
787 | } | |
788 | ||
cd70d465 | 789 | /* Configure domain dma state i.e. enable/disable DMA */ |
695093e3 VS |
790 | static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable) |
791 | { | |
792 | struct device_domain_info *info; | |
793 | unsigned long flags; | |
794 | int ret; | |
795 | ||
796 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | |
797 | ||
798 | if (enable && !dma_domain->mapped) { | |
799 | pr_debug("Can't enable DMA domain without valid mapping\n"); | |
800 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
801 | return -ENODEV; | |
802 | } | |
803 | ||
804 | dma_domain->enabled = enable; | |
cd70d465 | 805 | list_for_each_entry(info, &dma_domain->devices, link) { |
695093e3 VS |
806 | ret = (enable) ? pamu_enable_liodn(info->liodn) : |
807 | pamu_disable_liodn(info->liodn); | |
808 | if (ret) | |
809 | pr_debug("Unable to set dma state for liodn %d", | |
810 | info->liodn); | |
811 | } | |
812 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
813 | ||
814 | return 0; | |
815 | } | |
816 | ||
5131e08c RM |
817 | static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) |
818 | { | |
819 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); | |
820 | unsigned long flags; | |
821 | int ret; | |
822 | ||
823 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | |
824 | /* Ensure domain is inactive i.e. DMA should be disabled for the domain */ | |
825 | if (dma_domain->enabled) { | |
826 | pr_debug("Can't set geometry attributes as domain is active\n"); | |
827 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
828 | return -EBUSY; | |
829 | } | |
830 | ||
831 | /* Ensure that the geometry has been set for the domain */ | |
832 | if (!dma_domain->geom_size) { | |
833 | pr_debug("Please configure geometry before setting the number of windows\n"); | |
834 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
835 | return -EINVAL; | |
836 | } | |
837 | ||
838 | /* | |
839 | * Ensure we have valid window count i.e. it should be less than | |
840 | * maximum permissible limit and should be a power of two. | |
841 | */ | |
842 | if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) { | |
843 | pr_debug("Invalid window count\n"); | |
844 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
845 | return -EINVAL; | |
846 | } | |
847 | ||
848 | ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, | |
849 | w_count > 1 ? w_count : 0); | |
850 | if (!ret) { | |
851 | kfree(dma_domain->win_arr); | |
852 | dma_domain->win_arr = kcalloc(w_count, | |
853 | sizeof(*dma_domain->win_arr), | |
854 | GFP_ATOMIC); | |
855 | if (!dma_domain->win_arr) { | |
856 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
857 | return -ENOMEM; | |
858 | } | |
859 | dma_domain->win_cnt = w_count; | |
860 | } | |
861 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
862 | ||
863 | return ret; | |
864 | } | |
865 | ||
695093e3 | 866 | static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, |
cd70d465 | 867 | enum iommu_attr attr_type, void *data) |
695093e3 | 868 | { |
8d4bfe40 | 869 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
695093e3 VS |
870 | int ret = 0; |
871 | ||
695093e3 VS |
872 | switch (attr_type) { |
873 | case DOMAIN_ATTR_GEOMETRY: | |
874 | ret = configure_domain_geometry(domain, data); | |
875 | break; | |
876 | case DOMAIN_ATTR_FSL_PAMU_STASH: | |
877 | ret = configure_domain_stash(dma_domain, data); | |
878 | break; | |
879 | case DOMAIN_ATTR_FSL_PAMU_ENABLE: | |
880 | ret = configure_domain_dma_state(dma_domain, *(int *)data); | |
881 | break; | |
701d8a62 | 882 | case DOMAIN_ATTR_WINDOWS: |
5131e08c | 883 | ret = fsl_pamu_set_windows(domain, *(u32 *)data); |
695093e3 VS |
884 | break; |
885 | default: | |
886 | pr_debug("Unsupported attribute type\n"); | |
887 | ret = -EINVAL; | |
888 | break; | |
cd70d465 | 889 | } |
695093e3 VS |
890 | |
891 | return ret; | |
892 | } | |
893 | ||
894 | static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, | |
cd70d465 | 895 | enum iommu_attr attr_type, void *data) |
695093e3 | 896 | { |
8d4bfe40 | 897 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
695093e3 VS |
898 | int ret = 0; |
899 | ||
695093e3 VS |
900 | switch (attr_type) { |
901 | case DOMAIN_ATTR_FSL_PAMU_STASH: | |
cd70d465 EM |
902 | memcpy(data, &dma_domain->dma_stash, |
903 | sizeof(struct pamu_stash_attribute)); | |
695093e3 VS |
904 | break; |
905 | case DOMAIN_ATTR_FSL_PAMU_ENABLE: | |
906 | *(int *)data = dma_domain->enabled; | |
907 | break; | |
908 | case DOMAIN_ATTR_FSL_PAMUV1: | |
909 | *(int *)data = DOMAIN_ATTR_FSL_PAMUV1; | |
910 | break; | |
701d8a62 | 911 | case DOMAIN_ATTR_WINDOWS: |
5131e08c | 912 | *(u32 *)data = dma_domain->win_cnt; |
695093e3 VS |
913 | break; |
914 | default: | |
915 | pr_debug("Unsupported attribute type\n"); | |
916 | ret = -EINVAL; | |
917 | break; | |
cd70d465 | 918 | } |
695093e3 VS |
919 | |
920 | return ret; | |
921 | } | |
922 | ||
695093e3 VS |
923 | static struct iommu_group *get_device_iommu_group(struct device *dev) |
924 | { | |
925 | struct iommu_group *group; | |
926 | ||
927 | group = iommu_group_get(dev); | |
928 | if (!group) | |
929 | group = iommu_group_alloc(); | |
930 | ||
931 | return group; | |
932 | } | |
933 | ||
934 | static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) | |
935 | { | |
936 | u32 version; | |
937 | ||
938 | /* Check the PCI controller version number by readding BRR1 register */ | |
939 | version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); | |
940 | version &= PCI_FSL_BRR1_VER; | |
cd70d465 EM |
941 | /* If PCI controller version is >= 0x204 we can partition endpoints */ |
942 | return version >= 0x204; | |
695093e3 VS |
943 | } |
944 | ||
945 | /* Get iommu group information from peer devices or devices on the parent bus */ | |
946 | static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev) | |
947 | { | |
948 | struct pci_dev *tmp; | |
949 | struct iommu_group *group; | |
950 | struct pci_bus *bus = pdev->bus; | |
951 | ||
9ed43662 | 952 | /* |
695093e3 VS |
953 | * Traverese the pci bus device list to get |
954 | * the shared iommu group. | |
955 | */ | |
956 | while (bus) { | |
957 | list_for_each_entry(tmp, &bus->devices, bus_list) { | |
958 | if (tmp == pdev) | |
959 | continue; | |
960 | group = iommu_group_get(&tmp->dev); | |
961 | if (group) | |
962 | return group; | |
963 | } | |
964 | ||
965 | bus = bus->parent; | |
966 | } | |
967 | ||
968 | return NULL; | |
969 | } | |
970 | ||
971 | static struct iommu_group *get_pci_device_group(struct pci_dev *pdev) | |
972 | { | |
973 | struct pci_controller *pci_ctl; | |
bc46c229 | 974 | bool pci_endpt_partitioning; |
695093e3 | 975 | struct iommu_group *group = NULL; |
695093e3 VS |
976 | |
977 | pci_ctl = pci_bus_to_host(pdev->bus); | |
bc46c229 | 978 | pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl); |
695093e3 | 979 | /* We can partition PCIe devices so assign device group to the device */ |
bc46c229 | 980 | if (pci_endpt_partitioning) { |
d5e58297 | 981 | group = pci_device_group(&pdev->dev); |
695093e3 | 982 | |
695093e3 VS |
983 | /* |
984 | * PCIe controller is not a paritionable entity | |
985 | * free the controller device iommu_group. | |
986 | */ | |
987 | if (pci_ctl->parent->iommu_group) | |
988 | iommu_group_remove_device(pci_ctl->parent); | |
989 | } else { | |
990 | /* | |
991 | * All devices connected to the controller will share the | |
992 | * PCI controllers device group. If this is the first | |
993 | * device to be probed for the pci controller, copy the | |
994 | * device group information from the PCI controller device | |
995 | * node and remove the PCI controller iommu group. | |
996 | * For subsequent devices, the iommu group information can | |
997 | * be obtained from sibling devices (i.e. from the bus_devices | |
998 | * link list). | |
999 | */ | |
1000 | if (pci_ctl->parent->iommu_group) { | |
1001 | group = get_device_iommu_group(pci_ctl->parent); | |
1002 | iommu_group_remove_device(pci_ctl->parent); | |
cd70d465 | 1003 | } else { |
695093e3 | 1004 | group = get_shared_pci_device_group(pdev); |
cd70d465 | 1005 | } |
695093e3 VS |
1006 | } |
1007 | ||
3170447c VS |
1008 | if (!group) |
1009 | group = ERR_PTR(-ENODEV); | |
1010 | ||
695093e3 VS |
1011 | return group; |
1012 | } | |
1013 | ||
d5e58297 | 1014 | static struct iommu_group *fsl_pamu_device_group(struct device *dev) |
695093e3 | 1015 | { |
3170447c | 1016 | struct iommu_group *group = ERR_PTR(-ENODEV); |
d5e58297 | 1017 | int len; |
695093e3 VS |
1018 | |
1019 | /* | |
1020 | * For platform devices we allocate a separate group for | |
1021 | * each of the devices. | |
1022 | */ | |
d5e58297 JR |
1023 | if (dev_is_pci(dev)) |
1024 | group = get_pci_device_group(to_pci_dev(dev)); | |
1025 | else if (of_get_property(dev->of_node, "fsl,liodn", &len)) | |
1026 | group = get_device_iommu_group(dev); | |
695093e3 | 1027 | |
d5e58297 JR |
1028 | return group; |
1029 | } | |
695093e3 | 1030 | |
d5e58297 JR |
1031 | static int fsl_pamu_add_device(struct device *dev) |
1032 | { | |
1033 | struct iommu_group *group; | |
695093e3 | 1034 | |
d5e58297 | 1035 | group = iommu_group_get_for_dev(dev); |
3170447c | 1036 | if (IS_ERR(group)) |
695093e3 VS |
1037 | return PTR_ERR(group); |
1038 | ||
695093e3 | 1039 | iommu_group_put(group); |
d5e58297 | 1040 | |
68a17f0b JR |
1041 | iommu_device_link(&pamu_iommu, dev); |
1042 | ||
d5e58297 | 1043 | return 0; |
695093e3 VS |
1044 | } |
1045 | ||
1046 | static void fsl_pamu_remove_device(struct device *dev) | |
1047 | { | |
68a17f0b | 1048 | iommu_device_unlink(&pamu_iommu, dev); |
695093e3 VS |
1049 | iommu_group_remove_device(dev); |
1050 | } | |
1051 | ||
b22f6434 | 1052 | static const struct iommu_ops fsl_pamu_ops = { |
b7eb6785 | 1053 | .capable = fsl_pamu_capable, |
8d4bfe40 JR |
1054 | .domain_alloc = fsl_pamu_domain_alloc, |
1055 | .domain_free = fsl_pamu_domain_free, | |
695093e3 VS |
1056 | .attach_dev = fsl_pamu_attach_device, |
1057 | .detach_dev = fsl_pamu_detach_device, | |
1058 | .domain_window_enable = fsl_pamu_window_enable, | |
1059 | .domain_window_disable = fsl_pamu_window_disable, | |
695093e3 | 1060 | .iova_to_phys = fsl_pamu_iova_to_phys, |
695093e3 VS |
1061 | .domain_set_attr = fsl_pamu_set_domain_attr, |
1062 | .domain_get_attr = fsl_pamu_get_domain_attr, | |
1063 | .add_device = fsl_pamu_add_device, | |
1064 | .remove_device = fsl_pamu_remove_device, | |
d5e58297 | 1065 | .device_group = fsl_pamu_device_group, |
695093e3 VS |
1066 | }; |
1067 | ||
cd70d465 | 1068 | int __init pamu_domain_init(void) |
695093e3 VS |
1069 | { |
1070 | int ret = 0; | |
1071 | ||
1072 | ret = iommu_init_mempool(); | |
1073 | if (ret) | |
1074 | return ret; | |
1075 | ||
3ff2dcc0 JR |
1076 | ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0"); |
1077 | if (ret) | |
1078 | return ret; | |
1079 | ||
1080 | iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops); | |
1081 | ||
1082 | ret = iommu_device_register(&pamu_iommu); | |
1083 | if (ret) { | |
1084 | iommu_device_sysfs_remove(&pamu_iommu); | |
1085 | pr_err("Can't register iommu device\n"); | |
1086 | return ret; | |
1087 | } | |
1088 | ||
695093e3 VS |
1089 | bus_set_iommu(&platform_bus_type, &fsl_pamu_ops); |
1090 | bus_set_iommu(&pci_bus_type, &fsl_pamu_ops); | |
1091 | ||
1092 | return ret; | |
1093 | } |