1 // SPDX-License-Identifier: GPL-2.0-or-later
3 ** IA64 System Bus Adapter (SBA) I/O MMU manager
5 ** (c) Copyright 2002-2005 Alex Williamson
6 ** (c) Copyright 2002-2003 Grant Grundler
7 ** (c) Copyright 2002-2005 Hewlett-Packard Company
9 ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
10 ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
14 ** This module initializes the IOC (I/O Controller) found on HP
15 ** McKinley machines and their successors.
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/init.h>
26 #include <linux/string.h>
27 #include <linux/pci.h>
28 #include <linux/proc_fs.h>
29 #include <linux/seq_file.h>
30 #include <linux/acpi.h>
31 #include <linux/efi.h>
32 #include <linux/nodemask.h>
33 #include <linux/bitops.h> /* hweight64() */
34 #include <linux/crash_dump.h>
35 #include <linux/iommu-helper.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/prefetch.h>
39 #include <asm/delay.h> /* ia64_get_itc() */
41 #include <asm/page.h> /* PAGE_OFFSET */
44 #include <asm/acpi-ext.h>
46 extern int swiotlb_late_init_with_default_size (size_t size);
51 ** Enabling timing search of the pdir resource map. Output in /proc.
52 ** Disabled by default to optimize performance.
54 #undef PDIR_SEARCH_TIMING
57 ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
58 ** not defined, all DMA will be 32bit and go through the TLB.
59 ** There's potentially a conflict in the bio merge code with us
60 ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
61 ** appears to give more performance than bio-level virtual merging, we'll
62 ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
63 ** completely restrict DMA to the IOMMU.
65 #define ALLOW_IOV_BYPASS
68 ** This option specifically allows/disallows bypassing scatterlists with
69 ** multiple entries. Coalescing these entries can allow better DMA streaming
70 ** and in some cases shows better performance than entirely bypassing the
71 ** IOMMU. Performance increase on the order of 1-2% sequential output/input
72 ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
74 #undef ALLOW_IOV_BYPASS_SG
77 ** If a device prefetches beyond the end of a valid pdir entry, it will cause
78 ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
79 ** disconnect on 4k boundaries and prevent such issues. If the device is
80 ** particularly aggressive, this option will keep the entire pdir valid such
81 ** that prefetching will hit a valid address. This could severely impact
82 ** error containment, and is therefore off by default. The page that is
83 ** used for spill-over is poisoned, so that should help debugging somewhat.
85 #undef FULL_VALID_PDIR
87 #define ENABLE_MARK_CLEAN
90 ** The number of debug flags is a clue - this code is fragile. NOTE: since
91 ** tightening the use of res_lock the resource bitmap and actual pdir are no
92 ** longer guaranteed to stay in sync. The sanity checking code isn't going to
97 #undef DEBUG_SBA_RUN_SG
98 #undef DEBUG_SBA_RESOURCE
99 #undef ASSERT_PDIR_SANITY
100 #undef DEBUG_LARGE_SG_ENTRIES
103 #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
104 #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
107 #define SBA_INLINE __inline__
108 /* #define SBA_INLINE */
110 #ifdef DEBUG_SBA_INIT
111 #define DBG_INIT(x...) printk(x)
113 #define DBG_INIT(x...)
117 #define DBG_RUN(x...) printk(x)
119 #define DBG_RUN(x...)
122 #ifdef DEBUG_SBA_RUN_SG
123 #define DBG_RUN_SG(x...) printk(x)
125 #define DBG_RUN_SG(x...)
129 #ifdef DEBUG_SBA_RESOURCE
130 #define DBG_RES(x...) printk(x)
132 #define DBG_RES(x...)
136 #define DBG_BYPASS(x...) printk(x)
138 #define DBG_BYPASS(x...)
141 #ifdef ASSERT_PDIR_SANITY
142 #define ASSERT(expr) \
144 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
152 ** The number of pdir entries to "free" before issuing
153 ** a read to PCOM register to flush out PCOM writes.
154 ** Interacts with allocation granularity (ie 4 or 8 entries
155 ** allocated and free'd/purged at a time might make this
156 ** less interesting).
158 #define DELAYED_RESOURCE_CNT 64
160 #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
162 #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
163 #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
164 #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
165 #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
166 #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
168 #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
170 #define IOC_FUNC_ID 0x000
171 #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
172 #define IOC_IBASE 0x300 /* IO TLB */
173 #define IOC_IMASK 0x308
174 #define IOC_PCOM 0x310
175 #define IOC_TCNFG 0x318
176 #define IOC_PDIR_BASE 0x320
178 #define IOC_ROPE0_CFG 0x500
179 #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
182 /* AGP GART driver looks for this */
183 #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
186 ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
188 ** Some IOCs (sx1000) can run at the above pages sizes, but are
189 ** really only supported using the IOC at a 4k page size.
191 ** iovp_size could only be greater than PAGE_SIZE if we are
192 ** confident the drivers really only touch the next physical
193 ** page iff that driver instance owns it.
195 static unsigned long iovp_size;
196 static unsigned long iovp_shift;
197 static unsigned long iovp_mask;
200 void __iomem *ioc_hpa; /* I/O MMU base address */
201 char *res_map; /* resource map, bit == pdir entry */
202 u64 *pdir_base; /* physical base address */
203 unsigned long ibase; /* pdir IOV Space base */
204 unsigned long imask; /* pdir IOV Space mask */
206 unsigned long *res_hint; /* next avail IOVP - circular search */
207 unsigned long dma_mask;
208 spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
209 /* clearing pdir to prevent races with allocations. */
210 unsigned int res_bitshift; /* from the RIGHT! */
211 unsigned int res_size; /* size of resource map in bytes */
213 unsigned int node; /* node where this IOC lives */
215 #if DELAYED_RESOURCE_CNT > 0
216 spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
217 /* than res_lock for bigger systems. */
219 struct sba_dma_pair {
222 } saved[DELAYED_RESOURCE_CNT];
225 #ifdef PDIR_SEARCH_TIMING
226 #define SBA_SEARCH_SAMPLE 0x100
227 unsigned long avg_search[SBA_SEARCH_SAMPLE];
228 unsigned long avg_idx; /* current index into avg_search */
231 /* Stuff we don't need in performance path */
232 struct ioc *next; /* list of IOC's in system */
233 acpi_handle handle; /* for multiple IOC's */
235 unsigned int func_id;
236 unsigned int rev; /* HW revision of chip */
238 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
239 struct pci_dev *sac_only_dev;
242 static struct ioc *ioc_list, *ioc_found;
243 static int reserve_sba_gart = 1;
245 static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
246 static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
248 #define sba_sg_address(sg) sg_virt((sg))
250 #ifdef FULL_VALID_PDIR
251 static u64 prefetch_spill_page;
255 # define GET_IOC(dev) ((dev_is_pci(dev)) \
256 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
258 # define GET_IOC(dev) NULL
262 ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
263 ** (or rather not merge) DMAs into manageable chunks.
264 ** On parisc, this is more of the software/tuning constraint
265 ** rather than the HW. I/O MMU allocation algorithms can be
266 ** faster with smaller sizes (to some degree).
268 #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
270 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
272 /************************************
273 ** SBA register read and write support
275 ** BE WARNED: register writes are posted.
276 ** (ie follow writes which must reach HW with a read)
279 #define READ_REG(addr) __raw_readq(addr)
280 #define WRITE_REG(val, addr) __raw_writeq(val, addr)
282 #ifdef DEBUG_SBA_INIT
285 * sba_dump_tlb - debugging only - print IOMMU operating parameters
286 * @hpa: base address of the IOMMU
288 * Print the size/location of the IO MMU PDIR.
291 sba_dump_tlb(char *hpa)
293 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
294 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
295 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
296 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
297 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
303 #ifdef ASSERT_PDIR_SANITY
306 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
307 * @ioc: IO MMU structure which owns the pdir we are interested in.
308 * @msg: text to print ont the output line.
311 * Print one entry of the IO MMU PDIR in human readable form.
314 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
316 /* start printing from lowest pde in rval */
317 u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
318 unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
321 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
322 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
325 while (rcnt < BITS_PER_LONG) {
326 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
327 (rcnt == (pide & (BITS_PER_LONG - 1)))
329 rcnt, ptr, (unsigned long long) *ptr );
333 printk(KERN_DEBUG "%s", msg);
338 * sba_check_pdir - debugging only - consistency checker
339 * @ioc: IO MMU structure which owns the pdir we are interested in.
340 * @msg: text to print ont the output line.
342 * Verify the resource map and pdir state is consistent
345 sba_check_pdir(struct ioc *ioc, char *msg)
347 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
348 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
349 u64 *pptr = ioc->pdir_base; /* pdir ptr */
352 while (rptr < rptr_end) {
354 int rcnt; /* number of bits we might check */
360 /* Get last byte and highest bit from that */
361 u32 pde = ((u32)((*pptr >> (63)) & 0x1));
362 if ((rval & 0x1) ^ pde)
365 ** BUMMER! -- res_map != pdir --
366 ** Dump rval and matching pdir entries
368 sba_dump_pdir_entry(ioc, msg, pide);
372 rval >>= 1; /* try the next bit */
376 rptr++; /* look at next word of res_map */
378 /* It'd be nice if we always got here :^) */
384 * sba_dump_sg - debugging only - print Scatter-Gather list
385 * @ioc: IO MMU structure which owns the pdir we are interested in.
386 * @startsg: head of the SG list
387 * @nents: number of entries in SG list
389 * print the SG list so we can verify it's correct by hand.
392 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
394 while (nents-- > 0) {
395 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
396 startsg->dma_address, startsg->dma_length,
397 sba_sg_address(startsg));
398 startsg = sg_next(startsg);
403 sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
405 struct scatterlist *the_sg = startsg;
406 int the_nents = nents;
408 while (the_nents-- > 0) {
409 if (sba_sg_address(the_sg) == 0x0UL)
410 sba_dump_sg(NULL, startsg, nents);
411 the_sg = sg_next(the_sg);
415 #endif /* ASSERT_PDIR_SANITY */
420 /**************************************************************
422 * I/O Pdir Resource Management
424 * Bits set in the resource map are in use.
425 * Each bit can represent a number of pages.
426 * LSbs represent lower addresses (IOVA's).
428 ***************************************************************/
429 #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
431 /* Convert from IOVP to IOVA and vice versa. */
432 #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
433 #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
435 #define PDIR_ENTRY_SIZE sizeof(u64)
437 #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
439 #define RESMAP_MASK(n) ~(~0UL << (n))
440 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
444 * For most cases the normal get_order is sufficient, however it limits us
445 * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
446 * It only incurs about 1 clock cycle to use this one with the static variable
447 * and makes the code more intuitive.
449 static SBA_INLINE int
450 get_iovp_order (unsigned long size)
452 long double d = size - 1;
455 order = ia64_getf_exp(d);
456 order = order - iovp_shift - 0xffff + 1;
462 static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
463 unsigned int bitshiftcnt)
465 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
470 * sba_search_bitmap - find free space in IO PDIR resource bitmap
471 * @ioc: IO MMU structure which owns the pdir we are interested in.
472 * @bits_wanted: number of entries we need.
473 * @use_hint: use res_hint to indicate where to start looking
475 * Find consecutive free bits in resource bitmap.
476 * Each bit represents one entry in the IO Pdir.
477 * Cool perf optimization: search for log2(size) bits at a time.
479 static SBA_INLINE unsigned long
480 sba_search_bitmap(struct ioc *ioc, struct device *dev,
481 unsigned long bits_wanted, int use_hint)
483 unsigned long *res_ptr;
484 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
485 unsigned long flags, pide = ~0UL, tpide;
486 unsigned long boundary_size;
490 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
491 ASSERT(res_ptr < res_end);
493 boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
494 boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
496 BUG_ON(ioc->ibase & ~iovp_mask);
497 shift = ioc->ibase >> iovp_shift;
499 spin_lock_irqsave(&ioc->res_lock, flags);
501 /* Allow caller to force a search through the entire resource space */
502 if (likely(use_hint)) {
503 res_ptr = ioc->res_hint;
505 res_ptr = (ulong *)ioc->res_map;
506 ioc->res_bitshift = 0;
510 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
511 * if a TLB entry is purged while in use. sba_mark_invalid()
512 * purges IOTLB entries in power-of-two sizes, so we also
513 * allocate IOVA space in power-of-two sizes.
515 bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
517 if (likely(bits_wanted == 1)) {
518 unsigned int bitshiftcnt;
519 for(; res_ptr < res_end ; res_ptr++) {
520 if (likely(*res_ptr != ~0UL)) {
521 bitshiftcnt = ffz(*res_ptr);
522 *res_ptr |= (1UL << bitshiftcnt);
523 pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
524 ioc->res_bitshift = bitshiftcnt + bits_wanted;
532 if (likely(bits_wanted <= BITS_PER_LONG/2)) {
534 ** Search the resource bit map on well-aligned values.
535 ** "o" is the alignment.
536 ** We need the alignment to invalidate I/O TLB using
537 ** SBA HW features in the unmap path.
539 unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
540 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
541 unsigned long mask, base_mask;
543 base_mask = RESMAP_MASK(bits_wanted);
544 mask = base_mask << bitshiftcnt;
546 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
547 for(; res_ptr < res_end ; res_ptr++)
549 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
551 for (; mask ; mask <<= o, bitshiftcnt += o) {
552 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
553 ret = iommu_is_span_boundary(tpide, bits_wanted,
556 if ((0 == ((*res_ptr) & mask)) && !ret) {
557 *res_ptr |= mask; /* mark resources busy! */
559 ioc->res_bitshift = bitshiftcnt + bits_wanted;
573 qwords = bits_wanted >> 6; /* /64 */
574 bits = bits_wanted - (qwords * BITS_PER_LONG);
576 end = res_end - qwords;
578 for (; res_ptr < end; res_ptr++) {
579 tpide = ptr_to_pide(ioc, res_ptr, 0);
580 ret = iommu_is_span_boundary(tpide, bits_wanted,
581 shift, boundary_size);
584 for (i = 0 ; i < qwords ; i++) {
588 if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
591 /* Found it, mark it */
592 for (i = 0 ; i < qwords ; i++)
594 res_ptr[i] |= RESMAP_MASK(bits);
598 ioc->res_bitshift = bits;
606 prefetch(ioc->res_map);
607 ioc->res_hint = (unsigned long *) ioc->res_map;
608 ioc->res_bitshift = 0;
609 spin_unlock_irqrestore(&ioc->res_lock, flags);
613 ioc->res_hint = res_ptr;
614 spin_unlock_irqrestore(&ioc->res_lock, flags);
620 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
621 * @ioc: IO MMU structure which owns the pdir we are interested in.
622 * @size: number of bytes to create a mapping for
624 * Given a size, find consecutive unmarked and then mark those bits in the
628 sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
630 unsigned int pages_needed = size >> iovp_shift;
631 #ifdef PDIR_SEARCH_TIMING
632 unsigned long itc_start;
636 ASSERT(pages_needed);
637 ASSERT(0 == (size & ~iovp_mask));
639 #ifdef PDIR_SEARCH_TIMING
640 itc_start = ia64_get_itc();
643 ** "seek and ye shall find"...praying never hurts either...
645 pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
646 if (unlikely(pide >= (ioc->res_size << 3))) {
647 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
648 if (unlikely(pide >= (ioc->res_size << 3))) {
649 #if DELAYED_RESOURCE_CNT > 0
653 ** With delayed resource freeing, we can give this one more shot. We're
654 ** getting close to being in trouble here, so do what we can to make this
657 spin_lock_irqsave(&ioc->saved_lock, flags);
658 if (ioc->saved_cnt > 0) {
659 struct sba_dma_pair *d;
660 int cnt = ioc->saved_cnt;
662 d = &(ioc->saved[ioc->saved_cnt - 1]);
664 spin_lock(&ioc->res_lock);
666 sba_mark_invalid(ioc, d->iova, d->size);
667 sba_free_range(ioc, d->iova, d->size);
671 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
672 spin_unlock(&ioc->res_lock);
674 spin_unlock_irqrestore(&ioc->saved_lock, flags);
676 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
677 if (unlikely(pide >= (ioc->res_size << 3))) {
678 printk(KERN_WARNING "%s: I/O MMU @ %p is"
679 "out of mapping resources, %u %u %lx\n",
680 __func__, ioc->ioc_hpa, ioc->res_size,
681 pages_needed, dma_get_seg_boundary(dev));
685 printk(KERN_WARNING "%s: I/O MMU @ %p is"
686 "out of mapping resources, %u %u %lx\n",
687 __func__, ioc->ioc_hpa, ioc->res_size,
688 pages_needed, dma_get_seg_boundary(dev));
694 #ifdef PDIR_SEARCH_TIMING
695 ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
696 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
699 prefetchw(&(ioc->pdir_base[pide]));
701 #ifdef ASSERT_PDIR_SANITY
702 /* verify the first enable bit is clear */
703 if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
704 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
708 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
709 __func__, size, pages_needed, pide,
710 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
718 * sba_free_range - unmark bits in IO PDIR resource bitmap
719 * @ioc: IO MMU structure which owns the pdir we are interested in.
720 * @iova: IO virtual address which was previously allocated.
721 * @size: number of bytes to create a mapping for
723 * clear bits in the ioc's resource map
725 static SBA_INLINE void
726 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
728 unsigned long iovp = SBA_IOVP(ioc, iova);
729 unsigned int pide = PDIR_INDEX(iovp);
730 unsigned int ridx = pide >> 3; /* convert bit to byte address */
731 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
732 int bits_not_wanted = size >> iovp_shift;
735 /* Round up to power-of-two size: see AR2305 note above */
736 bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
737 for (; bits_not_wanted > 0 ; res_ptr++) {
739 if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
741 /* these mappings start 64bit aligned */
743 bits_not_wanted -= BITS_PER_LONG;
744 pide += BITS_PER_LONG;
748 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
749 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
752 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
753 bits_not_wanted, m, pide, res_ptr, *res_ptr);
756 ASSERT(bits_not_wanted);
757 ASSERT((*res_ptr & m) == m); /* verify same bits are set */
764 /**************************************************************
766 * "Dynamic DMA Mapping" support (aka "Coherent I/O")
768 ***************************************************************/
771 * sba_io_pdir_entry - fill in one IO PDIR entry
772 * @pdir_ptr: pointer to IO PDIR entry
773 * @vba: Virtual CPU address of buffer to map
775 * SBA Mapping Routine
777 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
778 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
779 * Each IO Pdir entry consists of 8 bytes as shown below
783 * +-+---------------------+----------------------------------+----+--------+
784 * |V| U | PPN[39:12] | U | FF |
785 * +-+---------------------+----------------------------------+----+--------+
789 * PPN == Physical Page Number
791 * The physical address fields are filled with the results of virt_to_phys()
796 #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
797 | 0x8000000000000000ULL)
800 sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
802 *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
806 #ifdef ENABLE_MARK_CLEAN
808 * Since DMA is i-cache coherent, any (complete) pages that were written via
809 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
810 * flush them when they get mapped into an executable vm-area.
813 mark_clean (void *addr, size_t size)
815 unsigned long pg_addr, end;
817 pg_addr = PAGE_ALIGN((unsigned long) addr);
818 end = (unsigned long) addr + size;
819 while (pg_addr + PAGE_SIZE <= end) {
820 struct page *page = virt_to_page((void *)pg_addr);
821 set_bit(PG_arch_1, &page->flags);
822 pg_addr += PAGE_SIZE;
828 * sba_mark_invalid - invalidate one or more IO PDIR entries
829 * @ioc: IO MMU structure which owns the pdir we are interested in.
830 * @iova: IO Virtual Address mapped earlier
831 * @byte_cnt: number of bytes this mapping covers.
833 * Marking the IO PDIR entry(ies) as Invalid and invalidate
834 * corresponding IO TLB entry. The PCOM (Purge Command Register)
835 * is to purge stale entries in the IO TLB when unmapping entries.
837 * The PCOM register supports purging of multiple pages, with a minium
838 * of 1 page and a maximum of 2GB. Hardware requires the address be
839 * aligned to the size of the range being purged. The size of the range
840 * must be a power of 2. The "Cool perf optimization" in the
841 * allocation routine helps keep that true.
843 static SBA_INLINE void
844 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
846 u32 iovp = (u32) SBA_IOVP(ioc,iova);
848 int off = PDIR_INDEX(iovp);
850 /* Must be non-zero and rounded up */
851 ASSERT(byte_cnt > 0);
852 ASSERT(0 == (byte_cnt & ~iovp_mask));
854 #ifdef ASSERT_PDIR_SANITY
855 /* Assert first pdir entry is set */
856 if (!(ioc->pdir_base[off] >> 60)) {
857 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
861 if (byte_cnt <= iovp_size)
863 ASSERT(off < ioc->pdir_size);
865 iovp |= iovp_shift; /* set "size" field for PCOM */
867 #ifndef FULL_VALID_PDIR
869 ** clear I/O PDIR entry "valid" bit
870 ** Do NOT clear the rest - save it for debugging.
871 ** We should only clear bits that have previously
874 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
877 ** If we want to maintain the PDIR as valid, put in
878 ** the spill page so devices prefetching won't
879 ** cause a hard fail.
881 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
884 u32 t = get_iovp_order(byte_cnt) + iovp_shift;
887 ASSERT(t <= 31); /* 2GB! Max value of "size" field */
890 /* verify this pdir entry is enabled */
891 ASSERT(ioc->pdir_base[off] >> 63);
892 #ifndef FULL_VALID_PDIR
893 /* clear I/O Pdir entry "valid" bit first */
894 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
896 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
899 byte_cnt -= iovp_size;
900 } while (byte_cnt > 0);
903 WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
907 * sba_map_page - map one buffer and return IOVA for DMA
908 * @dev: instance of PCI owned by the driver that's asking.
910 * @poff: offset into page
911 * @size: number of bytes to map
912 * @dir: dma direction
913 * @attrs: optional dma attributes
915 * See Documentation/DMA-API-HOWTO.txt
917 static dma_addr_t sba_map_page(struct device *dev, struct page *page,
918 unsigned long poff, size_t size,
919 enum dma_data_direction dir,
923 void *addr = page_address(page) + poff;
928 #ifdef ASSERT_PDIR_SANITY
931 #ifdef ALLOW_IOV_BYPASS
932 unsigned long pci_addr = virt_to_phys(addr);
935 #ifdef ALLOW_IOV_BYPASS
936 ASSERT(to_pci_dev(dev)->dma_mask);
938 ** Check if the PCI device can DMA to ptr... if so, just return ptr
940 if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
942 ** Device is bit capable of DMA'ing to the buffer...
943 ** just return the PCI address of ptr
945 DBG_BYPASS("sba_map_page() bypass mask/addr: "
947 to_pci_dev(dev)->dma_mask, pci_addr);
954 prefetch(ioc->res_hint);
957 ASSERT(size <= DMA_CHUNK_SIZE);
959 /* save offset bits */
960 offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
962 /* round up to nearest iovp_size */
963 size = (size + offset + ~iovp_mask) & iovp_mask;
965 #ifdef ASSERT_PDIR_SANITY
966 spin_lock_irqsave(&ioc->res_lock, flags);
967 if (sba_check_pdir(ioc,"Check before sba_map_page()"))
968 panic("Sanity check failed");
969 spin_unlock_irqrestore(&ioc->res_lock, flags);
972 pide = sba_alloc_range(ioc, dev, size);
974 return DMA_MAPPING_ERROR;
976 iovp = (dma_addr_t) pide << iovp_shift;
978 DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
980 pdir_start = &(ioc->pdir_base[pide]);
983 ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
984 sba_io_pdir_entry(pdir_start, (unsigned long) addr);
986 DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
992 /* force pdir update */
995 /* form complete address */
996 #ifdef ASSERT_PDIR_SANITY
997 spin_lock_irqsave(&ioc->res_lock, flags);
998 sba_check_pdir(ioc,"Check after sba_map_page()");
999 spin_unlock_irqrestore(&ioc->res_lock, flags);
1001 return SBA_IOVA(ioc, iovp, offset);
1004 #ifdef ENABLE_MARK_CLEAN
1005 static SBA_INLINE void
1006 sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1008 u32 iovp = (u32) SBA_IOVP(ioc,iova);
1009 int off = PDIR_INDEX(iovp);
1012 if (size <= iovp_size) {
1013 addr = phys_to_virt(ioc->pdir_base[off] &
1014 ~0xE000000000000FFFULL);
1015 mark_clean(addr, size);
1018 addr = phys_to_virt(ioc->pdir_base[off] &
1019 ~0xE000000000000FFFULL);
1020 mark_clean(addr, min(size, iovp_size));
1029 * sba_unmap_page - unmap one IOVA and free resources
1030 * @dev: instance of PCI owned by the driver that's asking.
1031 * @iova: IOVA of driver buffer previously mapped.
1032 * @size: number of bytes mapped in driver buffer.
1033 * @dir: R/W or both.
1034 * @attrs: optional dma attributes
1036 * See Documentation/DMA-API-HOWTO.txt
1038 static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1039 enum dma_data_direction dir, unsigned long attrs)
1042 #if DELAYED_RESOURCE_CNT > 0
1043 struct sba_dma_pair *d;
1045 unsigned long flags;
1051 #ifdef ALLOW_IOV_BYPASS
1052 if (likely((iova & ioc->imask) != ioc->ibase)) {
1054 ** Address does not fall w/in IOVA, must be bypassing
1056 DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n",
1059 #ifdef ENABLE_MARK_CLEAN
1060 if (dir == DMA_FROM_DEVICE) {
1061 mark_clean(phys_to_virt(iova), size);
1067 offset = iova & ~iovp_mask;
1069 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
1071 iova ^= offset; /* clear offset bits */
1073 size = ROUNDUP(size, iovp_size);
1075 #ifdef ENABLE_MARK_CLEAN
1076 if (dir == DMA_FROM_DEVICE)
1077 sba_mark_clean(ioc, iova, size);
1080 #if DELAYED_RESOURCE_CNT > 0
1081 spin_lock_irqsave(&ioc->saved_lock, flags);
1082 d = &(ioc->saved[ioc->saved_cnt]);
1085 if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1086 int cnt = ioc->saved_cnt;
1087 spin_lock(&ioc->res_lock);
1089 sba_mark_invalid(ioc, d->iova, d->size);
1090 sba_free_range(ioc, d->iova, d->size);
1094 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1095 spin_unlock(&ioc->res_lock);
1097 spin_unlock_irqrestore(&ioc->saved_lock, flags);
1098 #else /* DELAYED_RESOURCE_CNT == 0 */
1099 spin_lock_irqsave(&ioc->res_lock, flags);
1100 sba_mark_invalid(ioc, iova, size);
1101 sba_free_range(ioc, iova, size);
1102 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1103 spin_unlock_irqrestore(&ioc->res_lock, flags);
1104 #endif /* DELAYED_RESOURCE_CNT == 0 */
1108 * sba_alloc_coherent - allocate/map shared mem for DMA
1109 * @dev: instance of PCI owned by the driver that's asking.
1110 * @size: number of bytes mapped in driver buffer.
1111 * @dma_handle: IOVA of new buffer.
1113 * See Documentation/DMA-API-HOWTO.txt
1116 sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
1117 gfp_t flags, unsigned long attrs)
1130 page = alloc_pages_node(node, flags, get_order(size));
1131 if (unlikely(!page))
1134 addr = page_address(page);
1135 memset(addr, 0, size);
1136 *dma_handle = page_to_phys(page);
1138 #ifdef ALLOW_IOV_BYPASS
1139 ASSERT(dev->coherent_dma_mask);
1141 ** Check if the PCI device can DMA to ptr... if so, just return ptr
1143 if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
1144 DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1145 dev->coherent_dma_mask, *dma_handle);
1152 * If device can't bypass or bypass is disabled, pass the 32bit fake
1153 * device to map single to get an iova mapping.
1155 *dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size,
1156 DMA_BIDIRECTIONAL, 0);
1157 if (dma_mapping_error(dev, *dma_handle))
1164 * sba_free_coherent - free/unmap shared mem for DMA
1165 * @dev: instance of PCI owned by the driver that's asking.
1166 * @size: number of bytes mapped in driver buffer.
1167 * @vaddr: virtual address IOVA of "consistent" buffer.
1168 * @dma_handler: IO virtual address of "consistent" buffer.
1170 * See Documentation/DMA-API-HOWTO.txt
1172 static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
1173 dma_addr_t dma_handle, unsigned long attrs)
1175 sba_unmap_page(dev, dma_handle, size, 0, 0);
1176 free_pages((unsigned long) vaddr, get_order(size));
1181 ** Since 0 is a valid pdir_base index value, can't use that
1182 ** to determine if a value is valid or not. Use a flag to indicate
1183 ** the SG list entry contains a valid pdir index.
1185 #define PIDE_FLAG 0x1UL
1187 #ifdef DEBUG_LARGE_SG_ENTRIES
1188 int dump_run_sg = 0;
1193 * sba_fill_pdir - write allocated SG entries into IO PDIR
1194 * @ioc: IO MMU structure which owns the pdir we are interested in.
1195 * @startsg: list of IOVA/size pairs
1196 * @nents: number of entries in startsg list
1198 * Take preprocessed SG list and write corresponding entries
1202 static SBA_INLINE int
1205 struct scatterlist *startsg,
1208 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
1211 unsigned long dma_offset = 0;
1213 while (nents-- > 0) {
1214 int cnt = startsg->dma_length;
1215 startsg->dma_length = 0;
1217 #ifdef DEBUG_LARGE_SG_ENTRIES
1219 printk(" %2d : %08lx/%05x %p\n",
1220 nents, startsg->dma_address, cnt,
1221 sba_sg_address(startsg));
1223 DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1224 nents, startsg->dma_address, cnt,
1225 sba_sg_address(startsg));
1228 ** Look for the start of a new DMA stream
1230 if (startsg->dma_address & PIDE_FLAG) {
1231 u32 pide = startsg->dma_address & ~PIDE_FLAG;
1232 dma_offset = (unsigned long) pide & ~iovp_mask;
1233 startsg->dma_address = 0;
1235 dma_sg = sg_next(dma_sg);
1236 dma_sg->dma_address = pide | ioc->ibase;
1237 pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1242 ** Look for a VCONTIG chunk
1245 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1248 /* Since multiple Vcontig blocks could make up
1249 ** one DMA stream, *add* cnt to dma_len.
1251 dma_sg->dma_length += cnt;
1253 dma_offset=0; /* only want offset on first chunk */
1254 cnt = ROUNDUP(cnt, iovp_size);
1256 sba_io_pdir_entry(pdirp, vaddr);
1262 startsg = sg_next(startsg);
1264 /* force pdir update */
1267 #ifdef DEBUG_LARGE_SG_ENTRIES
1275 ** Two address ranges are DMA contiguous *iff* "end of prev" and
1276 ** "start of next" are both on an IOV page boundary.
1278 ** (shift left is a quick trick to mask off upper bits)
1280 #define DMA_CONTIG(__X, __Y) \
1281 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1285 * sba_coalesce_chunks - preprocess the SG list
1286 * @ioc: IO MMU structure which owns the pdir we are interested in.
1287 * @startsg: list of IOVA/size pairs
1288 * @nents: number of entries in startsg list
1290 * First pass is to walk the SG list and determine where the breaks are
1291 * in the DMA stream. Allocates PDIR entries but does not fill them.
1292 * Returns the number of DMA chunks.
1294 * Doing the fill separate from the coalescing/allocation keeps the
1295 * code simpler. Future enhancement could make one pass through
1296 * the sglist do both.
1298 static SBA_INLINE int
1299 sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1300 struct scatterlist *startsg,
1303 struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
1304 unsigned long vcontig_len; /* len of VCONTIG chunk */
1305 unsigned long vcontig_end;
1306 struct scatterlist *dma_sg; /* next DMA stream head */
1307 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1309 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1313 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1316 ** Prepare for first/next DMA stream
1318 dma_sg = vcontig_sg = startsg;
1319 dma_len = vcontig_len = vcontig_end = startsg->length;
1320 vcontig_end += vaddr;
1321 dma_offset = vaddr & ~iovp_mask;
1323 /* PARANOID: clear entries */
1324 startsg->dma_address = startsg->dma_length = 0;
1327 ** This loop terminates one iteration "early" since
1328 ** it's always looking one "ahead".
1330 while (--nents > 0) {
1331 unsigned long vaddr; /* tmp */
1333 startsg = sg_next(startsg);
1336 startsg->dma_address = startsg->dma_length = 0;
1338 /* catch brokenness in SCSI layer */
1339 ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1342 ** First make sure current dma stream won't
1343 ** exceed DMA_CHUNK_SIZE if we coalesce the
1346 if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
1350 if (dma_len + startsg->length > max_seg_size)
1354 ** Then look for virtually contiguous blocks.
1356 ** append the next transaction?
1358 vaddr = (unsigned long) sba_sg_address(startsg);
1359 if (vcontig_end == vaddr)
1361 vcontig_len += startsg->length;
1362 vcontig_end += startsg->length;
1363 dma_len += startsg->length;
1367 #ifdef DEBUG_LARGE_SG_ENTRIES
1368 dump_run_sg = (vcontig_len > iovp_size);
1372 ** Not virtually contiguous.
1373 ** Terminate prev chunk.
1374 ** Start a new chunk.
1376 ** Once we start a new VCONTIG chunk, dma_offset
1377 ** can't change. And we need the offset from the first
1378 ** chunk - not the last one. Ergo Successive chunks
1379 ** must start on page boundaries and dove tail
1380 ** with it's predecessor.
1382 vcontig_sg->dma_length = vcontig_len;
1384 vcontig_sg = startsg;
1385 vcontig_len = startsg->length;
1388 ** 3) do the entries end/start on page boundaries?
1389 ** Don't update vcontig_end until we've checked.
1391 if (DMA_CONTIG(vcontig_end, vaddr))
1393 vcontig_end = vcontig_len + vaddr;
1394 dma_len += vcontig_len;
1402 ** End of DMA Stream
1403 ** Terminate last VCONTIG block.
1404 ** Allocate space for DMA stream.
1406 vcontig_sg->dma_length = vcontig_len;
1407 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1408 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1409 idx = sba_alloc_range(ioc, dev, dma_len);
1411 dma_sg->dma_length = 0;
1414 dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
1422 static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1423 int nents, enum dma_data_direction dir,
1424 unsigned long attrs);
1426 * sba_map_sg - map Scatter/Gather list
1427 * @dev: instance of PCI owned by the driver that's asking.
1428 * @sglist: array of buffer/length pairs
1429 * @nents: number of entries in list
1430 * @dir: R/W or both.
1431 * @attrs: optional dma attributes
1433 * See Documentation/DMA-API-HOWTO.txt
1435 static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1436 int nents, enum dma_data_direction dir,
1437 unsigned long attrs)
1440 int coalesced, filled = 0;
1441 #ifdef ASSERT_PDIR_SANITY
1442 unsigned long flags;
1444 #ifdef ALLOW_IOV_BYPASS_SG
1445 struct scatterlist *sg;
1448 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1452 #ifdef ALLOW_IOV_BYPASS_SG
1453 ASSERT(to_pci_dev(dev)->dma_mask);
1454 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1455 for_each_sg(sglist, sg, nents, filled) {
1456 sg->dma_length = sg->length;
1457 sg->dma_address = virt_to_phys(sba_sg_address(sg));
1462 /* Fast path single entry scatterlists. */
1464 sglist->dma_length = sglist->length;
1465 sglist->dma_address = sba_map_page(dev, sg_page(sglist),
1466 sglist->offset, sglist->length, dir, attrs);
1467 if (dma_mapping_error(dev, sglist->dma_address))
1472 #ifdef ASSERT_PDIR_SANITY
1473 spin_lock_irqsave(&ioc->res_lock, flags);
1474 if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1476 sba_dump_sg(ioc, sglist, nents);
1477 panic("Check before sba_map_sg_attrs()");
1479 spin_unlock_irqrestore(&ioc->res_lock, flags);
1482 prefetch(ioc->res_hint);
1485 ** First coalesce the chunks and allocate I/O pdir space
1487 ** If this is one DMA stream, we can properly map using the
1488 ** correct virtual address associated with each DMA page.
1489 ** w/o this association, we wouldn't have coherent DMA!
1490 ** Access to the virtual address is what forces a two pass algorithm.
1492 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1493 if (coalesced < 0) {
1494 sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
1499 ** Program the I/O Pdir
1501 ** map the virtual addresses to the I/O Pdir
1502 ** o dma_address will contain the pdir index
1503 ** o dma_len will contain the number of bytes to map
1504 ** o address contains the virtual address.
1506 filled = sba_fill_pdir(ioc, sglist, nents);
1508 #ifdef ASSERT_PDIR_SANITY
1509 spin_lock_irqsave(&ioc->res_lock, flags);
1510 if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1512 sba_dump_sg(ioc, sglist, nents);
1513 panic("Check after sba_map_sg_attrs()\n");
1515 spin_unlock_irqrestore(&ioc->res_lock, flags);
1518 ASSERT(coalesced == filled);
1519 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1525 * sba_unmap_sg_attrs - unmap Scatter/Gather list
1526 * @dev: instance of PCI owned by the driver that's asking.
1527 * @sglist: array of buffer/length pairs
1528 * @nents: number of entries in list
1529 * @dir: R/W or both.
1530 * @attrs: optional dma attributes
1532 * See Documentation/DMA-API-HOWTO.txt
1534 static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1535 int nents, enum dma_data_direction dir,
1536 unsigned long attrs)
1538 #ifdef ASSERT_PDIR_SANITY
1540 unsigned long flags;
1543 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1544 __func__, nents, sba_sg_address(sglist), sglist->length);
1546 #ifdef ASSERT_PDIR_SANITY
1550 spin_lock_irqsave(&ioc->res_lock, flags);
1551 sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1552 spin_unlock_irqrestore(&ioc->res_lock, flags);
1555 while (nents && sglist->dma_length) {
1557 sba_unmap_page(dev, sglist->dma_address, sglist->dma_length,
1559 sglist = sg_next(sglist);
1563 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1565 #ifdef ASSERT_PDIR_SANITY
1566 spin_lock_irqsave(&ioc->res_lock, flags);
1567 sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1568 spin_unlock_irqrestore(&ioc->res_lock, flags);
1573 /**************************************************************
1575 * Initialization and claim
1577 ***************************************************************/
1580 ioc_iova_init(struct ioc *ioc)
1584 struct pci_dev *device = NULL;
1585 #ifdef FULL_VALID_PDIR
1586 unsigned long index;
1590 ** Firmware programs the base and size of a "safe IOVA space"
1591 ** (one that doesn't overlap memory or LMMIO space) in the
1592 ** IBASE and IMASK registers.
1594 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1595 ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1597 ioc->iov_size = ~ioc->imask + 1;
1599 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1600 __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1601 ioc->iov_size >> 20);
1603 switch (iovp_size) {
1604 case 4*1024: tcnfg = 0; break;
1605 case 8*1024: tcnfg = 1; break;
1606 case 16*1024: tcnfg = 2; break;
1607 case 64*1024: tcnfg = 3; break;
1609 panic(PFX "Unsupported IOTLB page size %ldK",
1613 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1615 ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1616 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1617 get_order(ioc->pdir_size));
1618 if (!ioc->pdir_base)
1619 panic(PFX "Couldn't allocate I/O Page Table\n");
1621 memset(ioc->pdir_base, 0, ioc->pdir_size);
1623 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
1624 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1626 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1627 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1630 ** If an AGP device is present, only use half of the IOV space
1631 ** for PCI DMA. Unfortunately we can't know ahead of time
1632 ** whether GART support will actually be used, for now we
1633 ** can just key on an AGP device found in the system.
1634 ** We program the next pdir index after we stop w/ a key for
1635 ** the GART code to handshake on.
1637 for_each_pci_dev(device)
1638 agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1640 if (agp_found && reserve_sba_gart) {
1641 printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1642 ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1643 ioc->pdir_size /= 2;
1644 ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1646 #ifdef FULL_VALID_PDIR
1648 ** Check to see if the spill page has been allocated, we don't need more than
1649 ** one across multiple SBAs.
1651 if (!prefetch_spill_page) {
1652 char *spill_poison = "SBAIOMMU POISON";
1653 int poison_size = 16;
1654 void *poison_addr, *addr;
1656 addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
1658 panic(PFX "Couldn't allocate PDIR spill page\n");
1661 for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1662 memcpy(poison_addr, spill_poison, poison_size);
1664 prefetch_spill_page = virt_to_phys(addr);
1666 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
1669 ** Set all the PDIR entries valid w/ the spill page as the target
1671 for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1672 ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1675 /* Clear I/O TLB of any possible entries */
1676 WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1677 READ_REG(ioc->ioc_hpa + IOC_PCOM);
1679 /* Enable IOVA translation */
1680 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1681 READ_REG(ioc->ioc_hpa + IOC_IBASE);
1685 ioc_resource_init(struct ioc *ioc)
1687 spin_lock_init(&ioc->res_lock);
1688 #if DELAYED_RESOURCE_CNT > 0
1689 spin_lock_init(&ioc->saved_lock);
1692 /* resource map size dictated by pdir_size */
1693 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1694 ioc->res_size >>= 3; /* convert bit count to byte count */
1695 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1697 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1698 get_order(ioc->res_size));
1700 panic(PFX "Couldn't allocate resource map\n");
1702 memset(ioc->res_map, 0, ioc->res_size);
1703 /* next available IOVP - circular search */
1704 ioc->res_hint = (unsigned long *) ioc->res_map;
1706 #ifdef ASSERT_PDIR_SANITY
1707 /* Mark first bit busy - ie no IOVA 0 */
1708 ioc->res_map[0] = 0x1;
1709 ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1711 #ifdef FULL_VALID_PDIR
1712 /* Mark the last resource used so we don't prefetch beyond IOVA space */
1713 ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1714 ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1715 | prefetch_spill_page);
1718 DBG_INIT("%s() res_map %x %p\n", __func__,
1719 ioc->res_size, (void *) ioc->res_map);
1723 ioc_sac_init(struct ioc *ioc)
1725 struct pci_dev *sac = NULL;
1726 struct pci_controller *controller = NULL;
1729 * pci_alloc_coherent() must return a DMA address which is
1730 * SAC (single address cycle) addressable, so allocate a
1731 * pseudo-device to enforce that.
1733 sac = kzalloc(sizeof(*sac), GFP_KERNEL);
1735 panic(PFX "Couldn't allocate struct pci_dev");
1737 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
1739 panic(PFX "Couldn't allocate struct pci_controller");
1741 controller->iommu = ioc;
1742 sac->sysdata = controller;
1743 sac->dma_mask = 0xFFFFFFFFUL;
1745 sac->dev.bus = &pci_bus_type;
1747 ioc->sac_only_dev = sac;
1751 ioc_zx1_init(struct ioc *ioc)
1753 unsigned long rope_config;
1756 if (ioc->rev < 0x20)
1757 panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1759 /* 38 bit memory controller + extra bit for range displaced by MMIO */
1760 ioc->dma_mask = (0x1UL << 39) - 1;
1763 ** Clear ROPE(N)_CONFIG AO bit.
1764 ** Disables "NT Ordering" (~= !"Relaxed Ordering")
1765 ** Overrides bit 1 in DMA Hint Sets.
1766 ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1768 for (i=0; i<(8*8); i+=8) {
1769 rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1770 rope_config &= ~IOC_ROPE_AO;
1771 WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1775 typedef void (initfunc)(struct ioc *);
1783 static struct ioc_iommu ioc_iommu_info[] __initdata = {
1784 { ZX1_IOC_ID, "zx1", ioc_zx1_init },
1785 { ZX2_IOC_ID, "zx2", NULL },
1786 { SX1000_IOC_ID, "sx1000", NULL },
1787 { SX2000_IOC_ID, "sx2000", NULL },
1790 static void __init ioc_init(unsigned long hpa, struct ioc *ioc)
1792 struct ioc_iommu *info;
1794 ioc->next = ioc_list;
1797 ioc->ioc_hpa = ioremap(hpa, 0x1000);
1799 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1800 ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1801 ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
1803 for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1804 if (ioc->func_id == info->func_id) {
1805 ioc->name = info->name;
1811 iovp_size = (1 << iovp_shift);
1812 iovp_mask = ~(iovp_size - 1);
1814 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
1815 PAGE_SIZE >> 10, iovp_size >> 10);
1818 ioc->name = kmalloc(24, GFP_KERNEL);
1820 sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1821 ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1823 ioc->name = "Unknown";
1827 ioc_resource_init(ioc);
1830 printk(KERN_INFO PFX
1831 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1832 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1833 hpa, ioc->iov_size >> 20, ioc->ibase);
1838 /**************************************************************************
1840 ** SBA initialization code (HW and SW)
1842 ** o identify SBA chip itself
1843 ** o FIXME: initialize DMA hints for reasonable defaults
1845 **************************************************************************/
1847 #ifdef CONFIG_PROC_FS
1849 ioc_start(struct seq_file *s, loff_t *pos)
1854 for (ioc = ioc_list; ioc; ioc = ioc->next)
1862 ioc_next(struct seq_file *s, void *v, loff_t *pos)
1864 struct ioc *ioc = v;
1871 ioc_stop(struct seq_file *s, void *v)
1876 ioc_show(struct seq_file *s, void *v)
1878 struct ioc *ioc = v;
1879 unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1882 seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1883 ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1885 if (ioc->node != NUMA_NO_NODE)
1886 seq_printf(s, "NUMA node : %d\n", ioc->node);
1888 seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1889 seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
1891 for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1892 used += hweight64(*res_ptr);
1894 seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
1895 seq_printf(s, "PDIR used : %d entries\n", used);
1897 #ifdef PDIR_SEARCH_TIMING
1899 unsigned long i = 0, avg = 0, min, max;
1900 min = max = ioc->avg_search[0];
1901 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1902 avg += ioc->avg_search[i];
1903 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1904 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1906 avg /= SBA_SEARCH_SAMPLE;
1907 seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1911 #ifndef ALLOW_IOV_BYPASS
1912 seq_printf(s, "IOVA bypass disabled\n");
1917 static const struct seq_operations ioc_seq_ops = {
1927 struct proc_dir_entry *dir;
1929 dir = proc_mkdir("bus/mckinley", NULL);
1933 proc_create_seq(ioc_list->name, 0, dir, &ioc_seq_ops);
1938 sba_connect_bus(struct pci_bus *bus)
1940 acpi_handle handle, parent;
1944 if (!PCI_CONTROLLER(bus))
1945 panic(PFX "no sysdata on bus %d!\n", bus->number);
1947 if (PCI_CONTROLLER(bus)->iommu)
1950 handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
1955 * The IOC scope encloses PCI root bridges in the ACPI
1956 * namespace, so work our way out until we find an IOC we
1957 * claimed previously.
1960 for (ioc = ioc_list; ioc; ioc = ioc->next)
1961 if (ioc->handle == handle) {
1962 PCI_CONTROLLER(bus)->iommu = ioc;
1966 status = acpi_get_parent(handle, &parent);
1968 } while (ACPI_SUCCESS(status));
1970 printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
1974 sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
1979 node = acpi_get_node(handle);
1980 if (node != NUMA_NO_NODE && !node_online(node))
1981 node = NUMA_NO_NODE;
1987 static void __init acpi_sba_ioc_add(struct ioc *ioc)
1989 acpi_handle handle = ioc->handle;
1992 struct acpi_device_info *adi;
1994 ioc_found = ioc->next;
1995 status = hp_acpi_csr_space(handle, &hpa, &length);
1996 if (ACPI_FAILURE(status))
1999 status = acpi_get_object_info(handle, &adi);
2000 if (ACPI_FAILURE(status))
2004 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
2005 * root bridges, and its CSR space includes the IOC function.
2007 if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
2008 hpa += ZX1_IOC_OFFSET;
2009 /* zx1 based systems default to kernel page size iommu pages */
2011 iovp_shift = min(PAGE_SHIFT, 16);
2016 * default anything not caught above or specified on cmdline to 4k
2023 /* setup NUMA node association */
2024 sba_map_ioc_to_node(ioc, handle);
2031 static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2037 static int acpi_sba_ioc_attach(struct acpi_device *device,
2038 const struct acpi_device_id *not_used)
2042 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2046 ioc->next = ioc_found;
2048 ioc->handle = device->handle;
2053 static struct acpi_scan_handler acpi_sba_ioc_handler = {
2054 .ids = hp_ioc_iommu_device_ids,
2055 .attach = acpi_sba_ioc_attach,
2058 static int __init acpi_sba_ioc_init_acpi(void)
2060 return acpi_scan_add_handler(&acpi_sba_ioc_handler);
2062 /* This has to run before acpi_scan_init(). */
2063 arch_initcall(acpi_sba_ioc_init_acpi);
2068 if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
2071 #if defined(CONFIG_IA64_GENERIC)
2072 /* If we are booting a kdump kernel, the sba_iommu will
2073 * cause devices that were not shutdown properly to MCA
2074 * as soon as they are turned back on. Our only option for
2075 * a successful kdump kernel boot is to use the swiotlb.
2077 if (is_kdump_kernel()) {
2079 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2080 panic("Unable to initialize software I/O TLB:"
2081 " Try machvec=dig boot option");
2082 machvec_init("dig");
2088 * ioc_found should be populated by the acpi_sba_ioc_handler's .attach()
2089 * routine, but that only happens if acpi_scan_init() has already run.
2092 acpi_sba_ioc_add(ioc_found);
2095 #ifdef CONFIG_IA64_GENERIC
2097 * If we didn't find something sba_iommu can claim, we
2098 * need to setup the swiotlb and switch to the dig machvec.
2101 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2102 panic("Unable to find SBA IOMMU or initialize "
2103 "software I/O TLB: Try machvec=dig boot option");
2104 machvec_init("dig");
2106 panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
2111 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
2113 * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
2114 * buffer setup to support devices with smaller DMA masks than
2115 * sba_iommu can handle.
2117 if (ia64_platform_is("hpzx1_swiotlb")) {
2118 extern void hwsw_init(void);
2126 struct pci_bus *b = NULL;
2127 while ((b = pci_find_next_bus(b)) != NULL)
2132 #ifdef CONFIG_PROC_FS
2138 subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2141 nosbagart(char *str)
2143 reserve_sba_gart = 0;
2147 static int sba_dma_supported (struct device *dev, u64 mask)
2149 /* make sure it's at least 32bit capable */
2150 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2153 __setup("nosbagart", nosbagart);
2156 sba_page_override(char *str)
2158 unsigned long page_size;
2160 page_size = memparse(str, &str);
2161 switch (page_size) {
2166 iovp_shift = ffs(page_size) - 1;
2169 printk("%s: unknown/unsupported iommu page size %ld\n",
2170 __func__, page_size);
2176 __setup("sbapagesize=",sba_page_override);
2178 const struct dma_map_ops sba_dma_ops = {
2179 .alloc = sba_alloc_coherent,
2180 .free = sba_free_coherent,
2181 .map_page = sba_map_page,
2182 .unmap_page = sba_unmap_page,
2183 .map_sg = sba_map_sg_attrs,
2184 .unmap_sg = sba_unmap_sg_attrs,
2185 .dma_supported = sba_dma_supported,
2186 .mmap = dma_common_mmap,
2187 .get_sgtable = dma_common_get_sgtable,
2190 void sba_dma_init(void)
2192 dma_ops = &sba_dma_ops;