License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-block.git] / arch / sparc / kernel / pci_sun4v.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
8f6a93a1
DM
2/* pci_sun4v.c: SUN4V specific PCI controller support.
3 *
d284142c 4 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
8f6a93a1
DM
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/pci.h>
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/interrupt.h>
18397944 13#include <linux/percpu.h>
35a17eb6
DM
14#include <linux/irq.h>
15#include <linux/msi.h>
7b64db60 16#include <linux/export.h>
59db8102 17#include <linux/log2.h>
3822b509 18#include <linux/of_device.h>
bb620c3d 19#include <linux/iommu-common.h>
8f6a93a1 20
8f6a93a1
DM
21#include <asm/iommu.h>
22#include <asm/irq.h>
8f6a93a1 23#include <asm/hypervisor.h>
e87dc350 24#include <asm/prom.h>
8f6a93a1
DM
25
26#include "pci_impl.h"
27#include "iommu_common.h"
b02c2b0b 28#include "kernel.h"
8f6a93a1 29
bade5622
DM
30#include "pci_sun4v.h"
31
3822b509
DM
32#define DRIVER_NAME "pci_sun4v"
33#define PFX DRIVER_NAME ": "
34
8914391b 35static unsigned long vpci_major;
36static unsigned long vpci_minor;
37
38struct vpci_version {
39 unsigned long major;
40 unsigned long minor;
41};
42
43/* Ordered from largest major to lowest */
44static struct vpci_version vpci_versions[] = {
45 { .major = 2, .minor = 0 },
46 { .major = 1, .minor = 1 },
47};
e01c0d6d 48
f0248c15
TD
49static unsigned long vatu_major = 1;
50static unsigned long vatu_minor = 1;
51
7c8f486a 52#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
18397944 53
16ce82d8 54struct iommu_batch {
ad7ad57c 55 struct device *dev; /* Device mapping is for. */
6a32fd4d
DM
56 unsigned long prot; /* IOMMU page protections */
57 unsigned long entry; /* Index into IOTSB. */
58 u64 *pglist; /* List of physical pages */
59 unsigned long npages; /* Number of pages in list. */
18397944
DM
60};
61
ad7ad57c 62static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
d3ae4b5b 63static int iommu_batch_initialized;
6a32fd4d
DM
64
65/* Interrupts must be disabled. */
ad7ad57c 66static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
6a32fd4d 67{
494fc421 68 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
6a32fd4d 69
ad7ad57c 70 p->dev = dev;
6a32fd4d
DM
71 p->prot = prot;
72 p->entry = entry;
73 p->npages = 0;
74}
75
76/* Interrupts must be disabled. */
f08978b0 77static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
6a32fd4d 78{
ad7ad57c 79 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
f08978b0
TD
80 u64 *pglist = p->pglist;
81 u64 index_count;
a2fb23af 82 unsigned long devhandle = pbm->devhandle;
6a32fd4d
DM
83 unsigned long prot = p->prot;
84 unsigned long entry = p->entry;
6a32fd4d 85 unsigned long npages = p->npages;
f08978b0
TD
86 unsigned long iotsb_num;
87 unsigned long ret;
88 long num;
6a32fd4d 89
aa7bde1a 90 /* VPCI maj=1, min=[0,1] only supports read and write */
91 if (vpci_major < 2)
92 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
93
d82965c1 94 while (npages != 0) {
f08978b0
TD
95 if (mask <= DMA_BIT_MASK(32)) {
96 num = pci_sun4v_iommu_map(devhandle,
97 HV_PCI_TSBID(0, entry),
98 npages,
99 prot,
100 __pa(pglist));
101 if (unlikely(num < 0)) {
102 pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
103 __func__,
104 devhandle,
105 HV_PCI_TSBID(0, entry),
106 npages, prot, __pa(pglist),
107 num);
108 return -1;
109 }
110 } else {
111 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
112 iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
113 ret = pci_sun4v_iotsb_map(devhandle,
114 iotsb_num,
115 index_count,
116 prot,
117 __pa(pglist),
118 &num);
119 if (unlikely(ret != HV_EOK)) {
120 pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
121 __func__,
122 devhandle, iotsb_num,
123 index_count, prot,
124 __pa(pglist), ret);
125 return -1;
126 }
6a32fd4d 127 }
6a32fd4d
DM
128 entry += num;
129 npages -= num;
130 pglist += num;
d82965c1 131 }
6a32fd4d
DM
132
133 p->entry = entry;
134 p->npages = 0;
135
136 return 0;
137}
138
f08978b0 139static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
13fa14e1 140{
494fc421 141 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
13fa14e1
DM
142
143 if (p->entry + p->npages == entry)
144 return;
145 if (p->entry != ~0UL)
f08978b0 146 iommu_batch_flush(p, mask);
13fa14e1
DM
147 p->entry = entry;
148}
149
6a32fd4d 150/* Interrupts must be disabled. */
f08978b0 151static inline long iommu_batch_add(u64 phys_page, u64 mask)
6a32fd4d 152{
494fc421 153 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
6a32fd4d
DM
154
155 BUG_ON(p->npages >= PGLIST_NENTS);
156
157 p->pglist[p->npages++] = phys_page;
158 if (p->npages == PGLIST_NENTS)
f08978b0 159 return iommu_batch_flush(p, mask);
6a32fd4d
DM
160
161 return 0;
162}
163
164/* Interrupts must be disabled. */
f08978b0 165static inline long iommu_batch_end(u64 mask)
6a32fd4d 166{
494fc421 167 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
6a32fd4d
DM
168
169 BUG_ON(p->npages >= PGLIST_NENTS);
170
f08978b0 171 return iommu_batch_flush(p, mask);
6a32fd4d 172}
18397944 173
ad7ad57c 174static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
c416258a 175 dma_addr_t *dma_addrp, gfp_t gfp,
00085f1e 176 unsigned long attrs)
8f6a93a1 177{
f08978b0 178 u64 mask;
7c8f486a 179 unsigned long flags, order, first_page, npages, n;
aa7bde1a 180 unsigned long prot = 0;
c1b1a5f1 181 struct iommu *iommu;
f08978b0
TD
182 struct atu *atu;
183 struct iommu_map_table *tbl;
c1b1a5f1 184 struct page *page;
18397944
DM
185 void *ret;
186 long entry;
c1b1a5f1 187 int nid;
18397944
DM
188
189 size = IO_PAGE_ALIGN(size);
190 order = get_order(size);
6a32fd4d 191 if (unlikely(order >= MAX_ORDER))
18397944
DM
192 return NULL;
193
194 npages = size >> IO_PAGE_SHIFT;
18397944 195
aa7bde1a 196 if (attrs & DMA_ATTR_WEAK_ORDERING)
197 prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
198
c1b1a5f1
DM
199 nid = dev->archdata.numa_node;
200 page = alloc_pages_node(nid, gfp, order);
201 if (unlikely(!page))
18397944 202 return NULL;
e7a0453e 203
c1b1a5f1 204 first_page = (unsigned long) page_address(page);
18397944
DM
205 memset((char *)first_page, 0, PAGE_SIZE << order);
206
ad7ad57c 207 iommu = dev->archdata.iommu;
f08978b0 208 atu = iommu->atu;
18397944 209
f08978b0
TD
210 mask = dev->coherent_dma_mask;
211 if (mask <= DMA_BIT_MASK(32))
212 tbl = &iommu->tbl;
213 else
214 tbl = &atu->tbl;
215
216 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
bb620c3d 217 (unsigned long)(-1), 0);
18397944 218
d618382b 219 if (unlikely(entry == IOMMU_ERROR_CODE))
d284142c 220 goto range_alloc_fail;
18397944 221
f08978b0 222 *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
18397944
DM
223 ret = (void *) first_page;
224 first_page = __pa(first_page);
225
6a32fd4d 226 local_irq_save(flags);
18397944 227
ad7ad57c 228 iommu_batch_start(dev,
aa7bde1a 229 (HV_PCI_MAP_ATTR_READ | prot |
ad7ad57c
DM
230 HV_PCI_MAP_ATTR_WRITE),
231 entry);
18397944 232
6a32fd4d 233 for (n = 0; n < npages; n++) {
f08978b0 234 long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
6a32fd4d
DM
235 if (unlikely(err < 0L))
236 goto iommu_map_fail;
237 }
18397944 238
f08978b0 239 if (unlikely(iommu_batch_end(mask) < 0L))
6a32fd4d 240 goto iommu_map_fail;
18397944 241
6a32fd4d 242 local_irq_restore(flags);
18397944
DM
243
244 return ret;
6a32fd4d
DM
245
246iommu_map_fail:
e241cfd3 247 local_irq_restore(flags);
f08978b0 248 iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
6a32fd4d 249
d284142c 250range_alloc_fail:
6a32fd4d
DM
251 free_pages(first_page, order);
252 return NULL;
8f6a93a1
DM
253}
254
5116ab4e
TD
255unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
256 unsigned long iotsb_num,
257 struct pci_bus *bus_dev)
258{
259 struct pci_dev *pdev;
260 unsigned long err;
261 unsigned int bus;
262 unsigned int device;
263 unsigned int fun;
264
265 list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
266 if (pdev->subordinate) {
267 /* No need to bind pci bridge */
268 dma_4v_iotsb_bind(devhandle, iotsb_num,
269 pdev->subordinate);
270 } else {
271 bus = bus_dev->number;
272 device = PCI_SLOT(pdev->devfn);
273 fun = PCI_FUNC(pdev->devfn);
274 err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
275 HV_PCI_DEVICE_BUILD(bus,
276 device,
277 fun));
278
279 /* If bind fails for one device it is going to fail
280 * for rest of the devices because we are sharing
281 * IOTSB. So in case of failure simply return with
282 * error.
283 */
284 if (err)
285 return err;
286 }
287 }
288
289 return 0;
290}
291
f08978b0
TD
292static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
293 dma_addr_t dvma, unsigned long iotsb_num,
294 unsigned long entry, unsigned long npages)
bb620c3d 295{
bb620c3d 296 unsigned long num, flags;
f08978b0 297 unsigned long ret;
bb620c3d
SV
298
299 local_irq_save(flags);
300 do {
f08978b0
TD
301 if (dvma <= DMA_BIT_MASK(32)) {
302 num = pci_sun4v_iommu_demap(devhandle,
303 HV_PCI_TSBID(0, entry),
304 npages);
305 } else {
306 ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
307 entry, npages, &num);
308 if (unlikely(ret != HV_EOK)) {
309 pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
310 ret);
311 }
312 }
bb620c3d
SV
313 entry += num;
314 npages -= num;
315 } while (npages != 0);
316 local_irq_restore(flags);
317}
318
ad7ad57c 319static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
00085f1e 320 dma_addr_t dvma, unsigned long attrs)
8f6a93a1 321{
a2fb23af 322 struct pci_pbm_info *pbm;
16ce82d8 323 struct iommu *iommu;
f08978b0
TD
324 struct atu *atu;
325 struct iommu_map_table *tbl;
bb620c3d 326 unsigned long order, npages, entry;
f08978b0 327 unsigned long iotsb_num;
7c8f486a 328 u32 devhandle;
18397944
DM
329
330 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
ad7ad57c
DM
331 iommu = dev->archdata.iommu;
332 pbm = dev->archdata.host_controller;
f08978b0 333 atu = iommu->atu;
a2fb23af 334 devhandle = pbm->devhandle;
f08978b0
TD
335
336 if (dvma <= DMA_BIT_MASK(32)) {
337 tbl = &iommu->tbl;
338 iotsb_num = 0; /* we don't care for legacy iommu */
339 } else {
340 tbl = &atu->tbl;
341 iotsb_num = atu->iotsb->iotsb_num;
342 }
343 entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
344 dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
345 iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
18397944
DM
346 order = get_order(size);
347 if (order < 10)
348 free_pages((unsigned long)cpu, order);
8f6a93a1
DM
349}
350
797a7568
FT
351static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
352 unsigned long offset, size_t sz,
bc0a14f1 353 enum dma_data_direction direction,
00085f1e 354 unsigned long attrs)
8f6a93a1 355{
16ce82d8 356 struct iommu *iommu;
f08978b0
TD
357 struct atu *atu;
358 struct iommu_map_table *tbl;
359 u64 mask;
18397944 360 unsigned long flags, npages, oaddr;
7c8f486a 361 unsigned long i, base_paddr;
18397944 362 unsigned long prot;
f08978b0 363 dma_addr_t bus_addr, ret;
18397944 364 long entry;
18397944 365
ad7ad57c 366 iommu = dev->archdata.iommu;
f08978b0 367 atu = iommu->atu;
18397944 368
ad7ad57c 369 if (unlikely(direction == DMA_NONE))
18397944
DM
370 goto bad;
371
797a7568 372 oaddr = (unsigned long)(page_address(page) + offset);
18397944
DM
373 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
374 npages >>= IO_PAGE_SHIFT;
18397944 375
f08978b0
TD
376 mask = *dev->dma_mask;
377 if (mask <= DMA_BIT_MASK(32))
378 tbl = &iommu->tbl;
379 else
380 tbl = &atu->tbl;
381
382 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
bb620c3d 383 (unsigned long)(-1), 0);
18397944 384
d618382b 385 if (unlikely(entry == IOMMU_ERROR_CODE))
18397944
DM
386 goto bad;
387
f08978b0 388 bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
18397944
DM
389 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
390 base_paddr = __pa(oaddr & IO_PAGE_MASK);
391 prot = HV_PCI_MAP_ATTR_READ;
ad7ad57c 392 if (direction != DMA_TO_DEVICE)
18397944
DM
393 prot |= HV_PCI_MAP_ATTR_WRITE;
394
aa7bde1a 395 if (attrs & DMA_ATTR_WEAK_ORDERING)
396 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
397
6a32fd4d 398 local_irq_save(flags);
18397944 399
ad7ad57c 400 iommu_batch_start(dev, prot, entry);
18397944 401
6a32fd4d 402 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
f08978b0 403 long err = iommu_batch_add(base_paddr, mask);
6a32fd4d
DM
404 if (unlikely(err < 0L))
405 goto iommu_map_fail;
406 }
f08978b0 407 if (unlikely(iommu_batch_end(mask) < 0L))
6a32fd4d 408 goto iommu_map_fail;
18397944 409
6a32fd4d 410 local_irq_restore(flags);
18397944
DM
411
412 return ret;
413
414bad:
415 if (printk_ratelimit())
416 WARN_ON(1);
ceaf481c 417 return SPARC_MAPPING_ERROR;
6a32fd4d
DM
418
419iommu_map_fail:
e241cfd3 420 local_irq_restore(flags);
f08978b0 421 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
ceaf481c 422 return SPARC_MAPPING_ERROR;
8f6a93a1
DM
423}
424
797a7568 425static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
bc0a14f1 426 size_t sz, enum dma_data_direction direction,
00085f1e 427 unsigned long attrs)
8f6a93a1 428{
a2fb23af 429 struct pci_pbm_info *pbm;
16ce82d8 430 struct iommu *iommu;
f08978b0
TD
431 struct atu *atu;
432 struct iommu_map_table *tbl;
bb620c3d 433 unsigned long npages;
f08978b0 434 unsigned long iotsb_num;
18397944 435 long entry;
7c8f486a 436 u32 devhandle;
18397944 437
ad7ad57c 438 if (unlikely(direction == DMA_NONE)) {
18397944
DM
439 if (printk_ratelimit())
440 WARN_ON(1);
441 return;
442 }
443
ad7ad57c
DM
444 iommu = dev->archdata.iommu;
445 pbm = dev->archdata.host_controller;
f08978b0 446 atu = iommu->atu;
a2fb23af 447 devhandle = pbm->devhandle;
18397944
DM
448
449 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
450 npages >>= IO_PAGE_SHIFT;
451 bus_addr &= IO_PAGE_MASK;
f08978b0
TD
452
453 if (bus_addr <= DMA_BIT_MASK(32)) {
454 iotsb_num = 0; /* we don't care for legacy iommu */
455 tbl = &iommu->tbl;
456 } else {
457 iotsb_num = atu->iotsb->iotsb_num;
458 tbl = &atu->tbl;
459 }
460 entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
461 dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
462 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
18397944
DM
463}
464
ad7ad57c 465static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
bc0a14f1 466 int nelems, enum dma_data_direction direction,
00085f1e 467 unsigned long attrs)
8f6a93a1 468{
13fa14e1
DM
469 struct scatterlist *s, *outs, *segstart;
470 unsigned long flags, handle, prot;
471 dma_addr_t dma_next = 0, dma_addr;
472 unsigned int max_seg_size;
f0880257 473 unsigned long seg_boundary_size;
13fa14e1 474 int outcount, incount, i;
16ce82d8 475 struct iommu *iommu;
f08978b0
TD
476 struct atu *atu;
477 struct iommu_map_table *tbl;
478 u64 mask;
f0880257 479 unsigned long base_shift;
13fa14e1
DM
480 long err;
481
482 BUG_ON(direction == DMA_NONE);
18397944 483
ad7ad57c 484 iommu = dev->archdata.iommu;
13fa14e1
DM
485 if (nelems == 0 || !iommu)
486 return 0;
efca4885
DC
487 atu = iommu->atu;
488
13fa14e1
DM
489 prot = HV_PCI_MAP_ATTR_READ;
490 if (direction != DMA_TO_DEVICE)
491 prot |= HV_PCI_MAP_ATTR_WRITE;
18397944 492
aa7bde1a 493 if (attrs & DMA_ATTR_WEAK_ORDERING)
494 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
495
13fa14e1
DM
496 outs = s = segstart = &sglist[0];
497 outcount = 1;
498 incount = nelems;
499 handle = 0;
18397944 500
13fa14e1
DM
501 /* Init first segment length for backout at failure */
502 outs->dma_length = 0;
18397944 503
bb620c3d 504 local_irq_save(flags);
18397944 505
13fa14e1 506 iommu_batch_start(dev, prot, ~0UL);
18397944 507
13fa14e1 508 max_seg_size = dma_get_max_seg_size(dev);
f0880257
FT
509 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
510 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
f08978b0
TD
511
512 mask = *dev->dma_mask;
513 if (mask <= DMA_BIT_MASK(32))
514 tbl = &iommu->tbl;
515 else
516 tbl = &atu->tbl;
517
518 base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
519
13fa14e1 520 for_each_sg(sglist, s, nelems, i) {
f0880257 521 unsigned long paddr, npages, entry, out_entry = 0, slen;
38192d52 522
13fa14e1
DM
523 slen = s->length;
524 /* Sanity check */
525 if (slen == 0) {
526 dma_next = 0;
527 continue;
528 }
529 /* Allocate iommu entries for that segment */
530 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
0fcff28f 531 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
f08978b0 532 entry = iommu_tbl_range_alloc(dev, tbl, npages,
bb620c3d 533 &handle, (unsigned long)(-1), 0);
38192d52 534
13fa14e1 535 /* Handle failure */
d618382b 536 if (unlikely(entry == IOMMU_ERROR_CODE)) {
f08978b0
TD
537 pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
538 tbl, paddr, npages);
13fa14e1
DM
539 goto iommu_map_failed;
540 }
38192d52 541
f08978b0 542 iommu_batch_new_entry(entry, mask);
38192d52 543
13fa14e1 544 /* Convert entry to a dma_addr_t */
f08978b0 545 dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
13fa14e1 546 dma_addr |= (s->offset & ~IO_PAGE_MASK);
38192d52 547
13fa14e1 548 /* Insert into HW table */
38192d52 549 paddr &= IO_PAGE_MASK;
13fa14e1 550 while (npages--) {
f08978b0 551 err = iommu_batch_add(paddr, mask);
13fa14e1 552 if (unlikely(err < 0L))
38192d52 553 goto iommu_map_failed;
13fa14e1
DM
554 paddr += IO_PAGE_SIZE;
555 }
556
557 /* If we are in an open segment, try merging */
558 if (segstart != s) {
559 /* We cannot merge if:
560 * - allocated dma_addr isn't contiguous to previous allocation
561 */
562 if ((dma_addr != dma_next) ||
f0880257
FT
563 (outs->dma_length + s->length > max_seg_size) ||
564 (is_span_boundary(out_entry, base_shift,
565 seg_boundary_size, outs, s))) {
13fa14e1
DM
566 /* Can't merge: create a new segment */
567 segstart = s;
568 outcount++;
569 outs = sg_next(outs);
570 } else {
571 outs->dma_length += s->length;
38192d52 572 }
13fa14e1 573 }
38192d52 574
13fa14e1
DM
575 if (segstart == s) {
576 /* This is a new segment, fill entries */
577 outs->dma_address = dma_addr;
578 outs->dma_length = slen;
f0880257 579 out_entry = entry;
38192d52 580 }
13fa14e1
DM
581
582 /* Calculate next page pointer for contiguous check */
583 dma_next = dma_addr + slen;
38192d52
DM
584 }
585
f08978b0 586 err = iommu_batch_end(mask);
38192d52 587
6a32fd4d
DM
588 if (unlikely(err < 0L))
589 goto iommu_map_failed;
18397944 590
bb620c3d 591 local_irq_restore(flags);
18397944 592
13fa14e1
DM
593 if (outcount < incount) {
594 outs = sg_next(outs);
ceaf481c 595 outs->dma_address = SPARC_MAPPING_ERROR;
13fa14e1
DM
596 outs->dma_length = 0;
597 }
598
599 return outcount;
6a32fd4d
DM
600
601iommu_map_failed:
13fa14e1
DM
602 for_each_sg(sglist, s, nelems, i) {
603 if (s->dma_length != 0) {
604 unsigned long vaddr, npages;
605
606 vaddr = s->dma_address & IO_PAGE_MASK;
0fcff28f
JR
607 npages = iommu_num_pages(s->dma_address, s->dma_length,
608 IO_PAGE_SIZE);
f08978b0 609 iommu_tbl_range_free(tbl, vaddr, npages,
d618382b 610 IOMMU_ERROR_CODE);
13fa14e1 611 /* XXX demap? XXX */
ceaf481c 612 s->dma_address = SPARC_MAPPING_ERROR;
13fa14e1
DM
613 s->dma_length = 0;
614 }
615 if (s == outs)
616 break;
617 }
bb620c3d 618 local_irq_restore(flags);
6a32fd4d
DM
619
620 return 0;
8f6a93a1
DM
621}
622
ad7ad57c 623static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
bc0a14f1 624 int nelems, enum dma_data_direction direction,
00085f1e 625 unsigned long attrs)
8f6a93a1 626{
a2fb23af 627 struct pci_pbm_info *pbm;
13fa14e1 628 struct scatterlist *sg;
16ce82d8 629 struct iommu *iommu;
f08978b0 630 struct atu *atu;
bb620c3d 631 unsigned long flags, entry;
f08978b0 632 unsigned long iotsb_num;
13fa14e1 633 u32 devhandle;
18397944 634
13fa14e1 635 BUG_ON(direction == DMA_NONE);
18397944 636
ad7ad57c
DM
637 iommu = dev->archdata.iommu;
638 pbm = dev->archdata.host_controller;
f08978b0 639 atu = iommu->atu;
a2fb23af 640 devhandle = pbm->devhandle;
18397944 641
bb620c3d 642 local_irq_save(flags);
18397944 643
13fa14e1
DM
644 sg = sglist;
645 while (nelems--) {
646 dma_addr_t dma_handle = sg->dma_address;
647 unsigned int len = sg->dma_length;
bb620c3d 648 unsigned long npages;
f08978b0 649 struct iommu_map_table *tbl;
bb620c3d 650 unsigned long shift = IO_PAGE_SHIFT;
13fa14e1
DM
651
652 if (!len)
653 break;
0fcff28f 654 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
f08978b0
TD
655
656 if (dma_handle <= DMA_BIT_MASK(32)) {
657 iotsb_num = 0; /* we don't care for legacy iommu */
658 tbl = &iommu->tbl;
659 } else {
660 iotsb_num = atu->iotsb->iotsb_num;
661 tbl = &atu->tbl;
662 }
bb620c3d 663 entry = ((dma_handle - tbl->table_map_base) >> shift);
f08978b0
TD
664 dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
665 entry, npages);
666 iommu_tbl_range_free(tbl, dma_handle, npages,
d618382b 667 IOMMU_ERROR_CODE);
13fa14e1
DM
668 sg = sg_next(sg);
669 }
18397944 670
bb620c3d 671 local_irq_restore(flags);
8f6a93a1
DM
672}
673
b02c2b0b
CH
674static int dma_4v_supported(struct device *dev, u64 device_mask)
675{
676 struct iommu *iommu = dev->archdata.iommu;
2ad67141 677 u64 dma_addr_mask = iommu->dma_addr_mask;
b02c2b0b 678
2ad67141
TD
679 if (device_mask > DMA_BIT_MASK(32)) {
680 if (iommu->atu)
681 dma_addr_mask = iommu->atu->dma_addr_mask;
682 else
683 return 0;
684 }
b02c2b0b
CH
685
686 if ((device_mask & dma_addr_mask) == dma_addr_mask)
687 return 1;
688 return pci64_dma_supported(to_pci_dev(dev), device_mask);
689}
690
ceaf481c
CH
691static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr)
692{
693 return dma_addr == SPARC_MAPPING_ERROR;
694}
695
5299709d 696static const struct dma_map_ops sun4v_dma_ops = {
c416258a
AP
697 .alloc = dma_4v_alloc_coherent,
698 .free = dma_4v_free_coherent,
797a7568
FT
699 .map_page = dma_4v_map_page,
700 .unmap_page = dma_4v_unmap_page,
ad7ad57c
DM
701 .map_sg = dma_4v_map_sg,
702 .unmap_sg = dma_4v_unmap_sg,
b02c2b0b 703 .dma_supported = dma_4v_supported,
ceaf481c 704 .mapping_error = dma_4v_mapping_error,
8f6a93a1
DM
705};
706
7c9503b8 707static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
bade5622 708{
e87dc350
DM
709 struct property *prop;
710 struct device_node *dp;
711
61c7a080 712 dp = pbm->op->dev.of_node;
34768bc8
DM
713 prop = of_find_property(dp, "66mhz-capable", NULL);
714 pbm->is_66mhz_capable = (prop != NULL);
e822358a 715 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
c2609267
DM
716
717 /* XXX register error interrupt handlers XXX */
bade5622
DM
718}
719
7c9503b8 720static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
bb620c3d 721 struct iommu_map_table *iommu)
18397944 722{
bb620c3d
SV
723 struct iommu_pool *pool;
724 unsigned long i, pool_nr, cnt = 0;
7c8f486a 725 u32 devhandle;
18397944
DM
726
727 devhandle = pbm->devhandle;
bb620c3d
SV
728 for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
729 pool = &(iommu->pools[pool_nr]);
730 for (i = pool->start; i <= pool->end; i++) {
731 unsigned long ret, io_attrs, ra;
732
733 ret = pci_sun4v_iommu_getmap(devhandle,
734 HV_PCI_TSBID(0, i),
735 &io_attrs, &ra);
736 if (ret == HV_EOK) {
737 if (page_in_phys_avail(ra)) {
738 pci_sun4v_iommu_demap(devhandle,
739 HV_PCI_TSBID(0,
740 i), 1);
741 } else {
742 cnt++;
743 __set_bit(i, iommu->map);
744 }
c2a5a46b 745 }
e7a0453e 746 }
18397944 747 }
e7a0453e 748 return cnt;
18397944
DM
749}
750
f0248c15
TD
751static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
752{
753 struct atu *atu = pbm->iommu->atu;
754 struct atu_iotsb *iotsb;
755 void *table;
756 u64 table_size;
757 u64 iotsb_num;
758 unsigned long order;
759 unsigned long err;
760
761 iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
762 if (!iotsb) {
763 err = -ENOMEM;
764 goto out_err;
765 }
766 atu->iotsb = iotsb;
767
768 /* calculate size of IOTSB */
769 table_size = (atu->size / IO_PAGE_SIZE) * 8;
770 order = get_order(table_size);
771 table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
772 if (!table) {
773 err = -ENOMEM;
774 goto table_failed;
775 }
776 iotsb->table = table;
777 iotsb->ra = __pa(table);
778 iotsb->dvma_size = atu->size;
779 iotsb->dvma_base = atu->base;
780 iotsb->table_size = table_size;
781 iotsb->page_size = IO_PAGE_SIZE;
782
783 /* configure and register IOTSB with HV */
784 err = pci_sun4v_iotsb_conf(pbm->devhandle,
785 iotsb->ra,
786 iotsb->table_size,
787 iotsb->page_size,
788 iotsb->dvma_base,
789 &iotsb_num);
790 if (err) {
791 pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
792 goto iotsb_conf_failed;
793 }
794 iotsb->iotsb_num = iotsb_num;
795
5116ab4e
TD
796 err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
797 if (err) {
798 pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
799 goto iotsb_conf_failed;
800 }
801
f0248c15
TD
802 return 0;
803
804iotsb_conf_failed:
805 free_pages((unsigned long)table, order);
806table_failed:
807 kfree(iotsb);
808out_err:
809 return err;
810}
811
812static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
813{
814 struct atu *atu = pbm->iommu->atu;
815 unsigned long err;
816 const u64 *ranges;
31f077dc
TD
817 u64 map_size, num_iotte;
818 u64 dma_mask;
f0248c15
TD
819 const u32 *page_size;
820 int len;
821
822 ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
823 &len);
824 if (!ranges) {
825 pr_err(PFX "No iommu-address-ranges\n");
826 return -EINVAL;
827 }
828
829 page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
830 NULL);
831 if (!page_size) {
832 pr_err(PFX "No iommu-pagesizes\n");
833 return -EINVAL;
834 }
835
836 /* There are 4 iommu-address-ranges supported. Each range is pair of
837 * {base, size}. The ranges[0] and ranges[1] are 32bit address space
838 * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
839 * address ranges to support 64bit addressing. Because 'size' for
840 * address ranges[2] and ranges[3] are same we can select either of
841 * ranges[2] or ranges[3] for mapping. However due to 'size' is too
842 * large for OS to allocate IOTSB we are using fix size 32G
843 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
844 * to share.
845 */
846 atu->ranges = (struct atu_ranges *)ranges;
847 atu->base = atu->ranges[3].base;
848 atu->size = ATU_64_SPACE_SIZE;
849
850 /* Create IOTSB */
851 err = pci_sun4v_atu_alloc_iotsb(pbm);
852 if (err) {
853 pr_err(PFX "Error creating ATU IOTSB\n");
854 return err;
855 }
856
31f077dc
TD
857 /* Create ATU iommu map.
858 * One bit represents one iotte in IOTSB table.
859 */
860 dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
861 num_iotte = atu->size / IO_PAGE_SIZE;
862 map_size = num_iotte / 8;
863 atu->tbl.table_map_base = atu->base;
864 atu->dma_addr_mask = dma_mask;
865 atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
866 if (!atu->tbl.map)
867 return -ENOMEM;
868
869 iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
870 NULL, false /* no large_pool */,
871 0 /* default npools */,
872 false /* want span boundary checking */);
873
f0248c15
TD
874 return 0;
875}
876
7c9503b8 877static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
bade5622 878{
8aef7278 879 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
16ce82d8 880 struct iommu *iommu = pbm->iommu;
c6fee081 881 unsigned long num_tsb_entries, sz;
8aef7278
DM
882 u32 dma_mask, dma_offset;
883 const u32 *vdma;
884
61c7a080 885 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
8aef7278
DM
886 if (!vdma)
887 vdma = vdma_default;
18397944 888
59db8102 889 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
3822b509
DM
890 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
891 vdma[0], vdma[1]);
892 return -EINVAL;
20b739fe 893 }
18397944 894
59db8102
DM
895 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
896 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
18397944
DM
897
898 dma_offset = vdma[0];
899
900 /* Setup initial software IOMMU state. */
c12f048f 901 spin_lock_init(&iommu->lock);
18397944 902 iommu->ctx_lowest_free = 1;
bb620c3d 903 iommu->tbl.table_map_base = dma_offset;
18397944
DM
904 iommu->dma_addr_mask = dma_mask;
905
906 /* Allocate and initialize the free area map. */
59db8102 907 sz = (num_tsb_entries + 7) / 8;
18397944 908 sz = (sz + 7UL) & ~7UL;
bb620c3d
SV
909 iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
910 if (!iommu->tbl.map) {
3822b509
DM
911 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
912 return -ENOMEM;
18397944 913 }
bb620c3d
SV
914 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
915 NULL, false /* no large_pool */,
916 0 /* default npools */,
917 false /* want span boundary checking */);
918 sz = probe_existing_entries(pbm, &iommu->tbl);
c2a5a46b
DM
919 if (sz)
920 printk("%s: Imported %lu TSB entries from OBP\n",
921 pbm->name, sz);
3822b509
DM
922
923 return 0;
bade5622
DM
924}
925
35a17eb6
DM
926#ifdef CONFIG_PCI_MSI
927struct pci_sun4v_msiq_entry {
928 u64 version_type;
929#define MSIQ_VERSION_MASK 0xffffffff00000000UL
930#define MSIQ_VERSION_SHIFT 32
931#define MSIQ_TYPE_MASK 0x00000000000000ffUL
932#define MSIQ_TYPE_SHIFT 0
933#define MSIQ_TYPE_NONE 0x00
934#define MSIQ_TYPE_MSG 0x01
935#define MSIQ_TYPE_MSI32 0x02
936#define MSIQ_TYPE_MSI64 0x03
937#define MSIQ_TYPE_INTX 0x08
938#define MSIQ_TYPE_NONE2 0xff
939
940 u64 intx_sysino;
941 u64 reserved1;
942 u64 stick;
943 u64 req_id; /* bus/device/func */
944#define MSIQ_REQID_BUS_MASK 0xff00UL
945#define MSIQ_REQID_BUS_SHIFT 8
946#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
947#define MSIQ_REQID_DEVICE_SHIFT 3
948#define MSIQ_REQID_FUNC_MASK 0x0007UL
949#define MSIQ_REQID_FUNC_SHIFT 0
950
951 u64 msi_address;
952
e5dd42e4 953 /* The format of this value is message type dependent.
35a17eb6
DM
954 * For MSI bits 15:0 are the data from the MSI packet.
955 * For MSI-X bits 31:0 are the data from the MSI packet.
956 * For MSG, the message code and message routing code where:
957 * bits 39:32 is the bus/device/fn of the msg target-id
958 * bits 18:16 is the message routing code
959 * bits 7:0 is the message code
960 * For INTx the low order 2-bits are:
961 * 00 - INTA
962 * 01 - INTB
963 * 10 - INTC
964 * 11 - INTD
965 */
966 u64 msi_data;
967
968 u64 reserved2;
969};
970
759f89e0
DM
971static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
972 unsigned long *head)
35a17eb6 973{
759f89e0 974 unsigned long err, limit;
35a17eb6 975
759f89e0 976 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
35a17eb6 977 if (unlikely(err))
759f89e0 978 return -ENXIO;
35a17eb6 979
759f89e0
DM
980 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
981 if (unlikely(*head >= limit))
982 return -EFBIG;
983
984 return 0;
985}
986
987static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
988 unsigned long msiqid, unsigned long *head,
989 unsigned long *msi)
990{
991 struct pci_sun4v_msiq_entry *ep;
992 unsigned long err, type;
993
994 /* Note: void pointer arithmetic, 'head' is a byte offset */
995 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
996 (pbm->msiq_ent_count *
997 sizeof(struct pci_sun4v_msiq_entry))) +
998 *head);
999
1000 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
1001 return 0;
35a17eb6 1002
759f89e0
DM
1003 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
1004 if (unlikely(type != MSIQ_TYPE_MSI32 &&
1005 type != MSIQ_TYPE_MSI64))
1006 return -EINVAL;
35a17eb6 1007
759f89e0
DM
1008 *msi = ep->msi_data;
1009
1010 err = pci_sun4v_msi_setstate(pbm->devhandle,
1011 ep->msi_data /* msi_num */,
1012 HV_MSISTATE_IDLE);
1013 if (unlikely(err))
1014 return -ENXIO;
35a17eb6 1015
759f89e0
DM
1016 /* Clear the entry. */
1017 ep->version_type &= ~MSIQ_TYPE_MASK;
35a17eb6 1018
759f89e0
DM
1019 (*head) += sizeof(struct pci_sun4v_msiq_entry);
1020 if (*head >=
1021 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
1022 *head = 0;
35a17eb6 1023
759f89e0 1024 return 1;
35a17eb6
DM
1025}
1026
759f89e0
DM
1027static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
1028 unsigned long head)
35a17eb6 1029{
759f89e0 1030 unsigned long err;
35a17eb6 1031
759f89e0
DM
1032 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
1033 if (unlikely(err))
1034 return -EINVAL;
35a17eb6 1035
759f89e0
DM
1036 return 0;
1037}
35a17eb6 1038
759f89e0
DM
1039static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
1040 unsigned long msi, int is_msi64)
1041{
1042 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
1043 (is_msi64 ?
1044 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
1045 return -ENXIO;
1046 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
1047 return -ENXIO;
1048 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
1049 return -ENXIO;
35a17eb6
DM
1050 return 0;
1051}
1052
759f89e0 1053static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
35a17eb6 1054{
759f89e0
DM
1055 unsigned long err, msiqid;
1056
1057 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
1058 if (err)
1059 return -ENXIO;
1060
1061 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
1062
1063 return 0;
35a17eb6
DM
1064}
1065
759f89e0 1066static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
35a17eb6
DM
1067{
1068 unsigned long q_size, alloc_size, pages, order;
1069 int i;
1070
1071 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1072 alloc_size = (pbm->msiq_num * q_size);
1073 order = get_order(alloc_size);
1074 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
1075 if (pages == 0UL) {
1076 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
1077 order);
1078 return -ENOMEM;
1079 }
1080 memset((char *)pages, 0, PAGE_SIZE << order);
1081 pbm->msi_queues = (void *) pages;
1082
1083 for (i = 0; i < pbm->msiq_num; i++) {
1084 unsigned long err, base = __pa(pages + (i * q_size));
1085 unsigned long ret1, ret2;
1086
1087 err = pci_sun4v_msiq_conf(pbm->devhandle,
1088 pbm->msiq_first + i,
1089 base, pbm->msiq_ent_count);
1090 if (err) {
1091 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
1092 err);
1093 goto h_error;
1094 }
1095
1096 err = pci_sun4v_msiq_info(pbm->devhandle,
1097 pbm->msiq_first + i,
1098 &ret1, &ret2);
1099 if (err) {
1100 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
1101 err);
1102 goto h_error;
1103 }
1104 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
1105 printk(KERN_ERR "MSI: Bogus qconf "
1106 "expected[%lx:%x] got[%lx:%lx]\n",
1107 base, pbm->msiq_ent_count,
1108 ret1, ret2);
1109 goto h_error;
1110 }
1111 }
1112
1113 return 0;
1114
1115h_error:
1116 free_pages(pages, order);
1117 return -EINVAL;
1118}
1119
759f89e0 1120static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
35a17eb6 1121{
759f89e0 1122 unsigned long q_size, alloc_size, pages, order;
35a17eb6
DM
1123 int i;
1124
759f89e0
DM
1125 for (i = 0; i < pbm->msiq_num; i++) {
1126 unsigned long msiqid = pbm->msiq_first + i;
35a17eb6 1127
759f89e0 1128 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
35a17eb6 1129 }
7fe3730d 1130
759f89e0
DM
1131 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1132 alloc_size = (pbm->msiq_num * q_size);
1133 order = get_order(alloc_size);
35a17eb6 1134
759f89e0 1135 pages = (unsigned long) pbm->msi_queues;
35a17eb6 1136
759f89e0 1137 free_pages(pages, order);
35a17eb6 1138
759f89e0 1139 pbm->msi_queues = NULL;
35a17eb6
DM
1140}
1141
759f89e0
DM
1142static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
1143 unsigned long msiqid,
1144 unsigned long devino)
35a17eb6 1145{
44ed3c0c 1146 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
35a17eb6 1147
44ed3c0c 1148 if (!irq)
759f89e0 1149 return -ENOMEM;
35a17eb6 1150
759f89e0
DM
1151 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
1152 return -EINVAL;
7cc85833
DM
1153 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
1154 return -EINVAL;
35a17eb6 1155
44ed3c0c 1156 return irq;
35a17eb6 1157}
e9870c4c 1158
759f89e0
DM
1159static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
1160 .get_head = pci_sun4v_get_head,
1161 .dequeue_msi = pci_sun4v_dequeue_msi,
1162 .set_head = pci_sun4v_set_head,
1163 .msi_setup = pci_sun4v_msi_setup,
1164 .msi_teardown = pci_sun4v_msi_teardown,
1165 .msiq_alloc = pci_sun4v_msiq_alloc,
1166 .msiq_free = pci_sun4v_msiq_free,
1167 .msiq_build_irq = pci_sun4v_msiq_build_irq,
1168};
1169
e9870c4c
DM
1170static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1171{
759f89e0 1172 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
e9870c4c 1173}
35a17eb6
DM
1174#else /* CONFIG_PCI_MSI */
1175static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1176{
1177}
1178#endif /* !(CONFIG_PCI_MSI) */
1179
7c9503b8
GKH
1180static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
1181 struct platform_device *op, u32 devhandle)
bade5622 1182{
61c7a080 1183 struct device_node *dp = op->dev.of_node;
3822b509 1184 int err;
bade5622 1185
c1b1a5f1
DM
1186 pbm->numa_node = of_node_to_nid(dp);
1187
ca3dd88e
DM
1188 pbm->pci_ops = &sun4v_pci_ops;
1189 pbm->config_space_reg_bits = 12;
34768bc8 1190
6c108f12
DM
1191 pbm->index = pci_num_pbms++;
1192
22fecbae 1193 pbm->op = op;
bade5622 1194
3833789b 1195 pbm->devhandle = devhandle;
bade5622 1196
e87dc350 1197 pbm->name = dp->full_name;
bade5622 1198
e87dc350 1199 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
c1b1a5f1 1200 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
bade5622 1201
9fd8b647 1202 pci_determine_mem_io_space(pbm);
bade5622 1203
cfa0652c 1204 pci_get_pbm_props(pbm);
3822b509
DM
1205
1206 err = pci_sun4v_iommu_init(pbm);
1207 if (err)
1208 return err;
1209
35a17eb6 1210 pci_sun4v_msi_init(pbm);
3822b509 1211
e822358a 1212 pci_sun4v_scan_bus(pbm, &op->dev);
3822b509 1213
f0248c15
TD
1214 /* if atu_init fails its not complete failure.
1215 * we can still continue using legacy iommu.
1216 */
1217 if (pbm->iommu->atu) {
1218 err = pci_sun4v_atu_init(pbm);
1219 if (err) {
1220 kfree(pbm->iommu->atu);
1221 pbm->iommu->atu = NULL;
1222 pr_err(PFX "ATU init failed, err=%d\n", err);
1223 }
1224 }
1225
d3ae4b5b
DM
1226 pbm->next = pci_pbm_root;
1227 pci_pbm_root = pbm;
1228
3822b509 1229 return 0;
bade5622
DM
1230}
1231
7c9503b8 1232static int pci_sun4v_probe(struct platform_device *op)
8f6a93a1 1233{
3822b509 1234 const struct linux_prom64_registers *regs;
e01c0d6d 1235 static int hvapi_negotiated = 0;
34768bc8 1236 struct pci_pbm_info *pbm;
3822b509 1237 struct device_node *dp;
16ce82d8 1238 struct iommu *iommu;
f0248c15 1239 struct atu *atu;
7c8f486a 1240 u32 devhandle;
8914391b 1241 int i, err = -ENODEV;
f0248c15 1242 static bool hv_atu = true;
3833789b 1243
61c7a080 1244 dp = op->dev.of_node;
3822b509 1245
e01c0d6d 1246 if (!hvapi_negotiated++) {
8914391b 1247 for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
1248 vpci_major = vpci_versions[i].major;
1249 vpci_minor = vpci_versions[i].minor;
1250
1251 err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
1252 &vpci_minor);
1253 if (!err)
1254 break;
1255 }
e01c0d6d
DM
1256
1257 if (err) {
8914391b 1258 pr_err(PFX "Could not register hvapi, err=%d\n", err);
3822b509 1259 return err;
e01c0d6d 1260 }
8914391b 1261 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
1262 vpci_major, vpci_minor);
ad7ad57c 1263
f0248c15
TD
1264 err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
1265 if (err) {
1266 /* don't return an error if we fail to register the
1267 * ATU group, but ATU hcalls won't be available.
1268 */
1269 hv_atu = false;
f0248c15
TD
1270 } else {
1271 pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
1272 vatu_major, vatu_minor);
1273 }
1274
ad7ad57c 1275 dma_ops = &sun4v_dma_ops;
e01c0d6d
DM
1276 }
1277
3822b509 1278 regs = of_get_property(dp, "reg", NULL);
d7472c38 1279 err = -ENODEV;
3822b509
DM
1280 if (!regs) {
1281 printk(KERN_ERR PFX "Could not find config registers\n");
d7472c38 1282 goto out_err;
75c6d141 1283 }
e87dc350 1284 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
3833789b 1285
d7472c38 1286 err = -ENOMEM;
d3ae4b5b
DM
1287 if (!iommu_batch_initialized) {
1288 for_each_possible_cpu(i) {
1289 unsigned long page = get_zeroed_page(GFP_KERNEL);
7c8f486a 1290
d3ae4b5b
DM
1291 if (!page)
1292 goto out_err;
7c8f486a 1293
d3ae4b5b
DM
1294 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1295 }
1296 iommu_batch_initialized = 1;
bade5622 1297 }
7c8f486a 1298
d3ae4b5b
DM
1299 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
1300 if (!pbm) {
1301 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
d7472c38 1302 goto out_err;
3822b509 1303 }
7c8f486a 1304
d3ae4b5b 1305 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
3822b509 1306 if (!iommu) {
d3ae4b5b 1307 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
d7472c38 1308 goto out_free_controller;
3822b509 1309 }
7c8f486a 1310
d3ae4b5b 1311 pbm->iommu = iommu;
f0248c15
TD
1312 iommu->atu = NULL;
1313 if (hv_atu) {
1314 atu = kzalloc(sizeof(*atu), GFP_KERNEL);
1315 if (!atu)
1316 pr_err(PFX "Could not allocate atu\n");
1317 else
1318 iommu->atu = atu;
1319 }
bade5622 1320
d3ae4b5b
DM
1321 err = pci_sun4v_pbm_init(pbm, op, devhandle);
1322 if (err)
1323 goto out_free_iommu;
7c8f486a 1324
d3ae4b5b 1325 dev_set_drvdata(&op->dev, pbm);
bade5622 1326
d3ae4b5b 1327 return 0;
7c8f486a 1328
d3ae4b5b 1329out_free_iommu:
f0248c15 1330 kfree(iommu->atu);
d3ae4b5b 1331 kfree(pbm->iommu);
d7472c38
DM
1332
1333out_free_controller:
d3ae4b5b 1334 kfree(pbm);
d7472c38
DM
1335
1336out_err:
1337 return err;
8f6a93a1 1338}
3822b509 1339
3628aa06 1340static const struct of_device_id pci_sun4v_match[] = {
3822b509
DM
1341 {
1342 .name = "pci",
1343 .compatible = "SUNW,sun4v-pci",
1344 },
1345 {},
1346};
1347
4ebb24f7 1348static struct platform_driver pci_sun4v_driver = {
4018294b
GL
1349 .driver = {
1350 .name = DRIVER_NAME,
4018294b
GL
1351 .of_match_table = pci_sun4v_match,
1352 },
3822b509
DM
1353 .probe = pci_sun4v_probe,
1354};
1355
1356static int __init pci_sun4v_init(void)
1357{
4ebb24f7 1358 return platform_driver_register(&pci_sun4v_driver);
3822b509
DM
1359}
1360
1361subsys_initcall(pci_sun4v_init);