1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/alpha/kernel/pci_iommu.c
6 #include <linux/kernel.h>
10 #include <linux/memblock.h>
11 #include <linux/export.h>
12 #include <linux/scatterlist.h>
13 #include <linux/log2.h>
14 #include <linux/dma-map-ops.h>
15 #include <linux/iommu-helper.h>
18 #include <asm/hwrpb.h>
26 # define DBGA(args...) printk(KERN_DEBUG args)
28 # define DBGA(args...)
31 # define DBGA2(args...) printk(KERN_DEBUG args)
33 # define DBGA2(args...)
36 #define DEBUG_NODIRECT 0
38 #define ISA_DMA_MASK 0x00ffffff
40 static inline unsigned long
41 mk_iommu_pte(unsigned long paddr)
43 return (paddr >> (PAGE_SHIFT-1)) | 1;
46 /* Return the minimum of MAX or the first power of two larger
50 size_for_memory(unsigned long max)
52 unsigned long mem = max_low_pfn << PAGE_SHIFT;
54 max = roundup_pow_of_two(mem);
58 struct pci_iommu_arena * __init
59 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
60 unsigned long window_size, unsigned long align)
62 unsigned long mem_size;
63 struct pci_iommu_arena *arena;
65 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
67 /* Note that the TLB lookup logic uses bitwise concatenation,
68 not addition, so the required arena alignment is based on
69 the size of the window. Retain the align parameter so that
70 particular systems can over-align the arena. */
74 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
76 panic("%s: Failed to allocate %zu bytes\n", __func__,
78 arena->ptes = memblock_alloc(mem_size, align);
80 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
81 __func__, mem_size, align);
83 spin_lock_init(&arena->lock);
85 arena->dma_base = base;
86 arena->size = window_size;
87 arena->next_entry = 0;
89 /* Align allocations to a multiple of a page size. Not needed
90 unless there are chip bugs. */
91 arena->align_entry = 1;
96 struct pci_iommu_arena * __init
97 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
98 unsigned long window_size, unsigned long align)
100 return iommu_arena_new_node(0, hose, base, window_size, align);
103 /* Must be called with the arena lock held */
105 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
112 unsigned long boundary_size;
114 base = arena->dma_base >> PAGE_SHIFT;
115 boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
117 /* Search forward for the first mask-aligned sequence of N free ptes */
119 nent = arena->size >> PAGE_SHIFT;
120 p = ALIGN(arena->next_entry, mask + 1);
124 while (i < n && p+i < nent) {
125 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
126 p = ALIGN(p + 1, mask + 1);
131 p = ALIGN(p + i + 1, mask + 1);
141 * Reached the end. Flush the TLB and restart
142 * the search from the beginning.
144 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
154 /* Success. It's the responsibility of the caller to mark them
155 in use before releasing the lock */
160 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
167 spin_lock_irqsave(&arena->lock, flags);
169 /* Search for N empty ptes */
171 mask = max(align, arena->align_entry) - 1;
172 p = iommu_arena_find_pages(dev, arena, n, mask);
174 spin_unlock_irqrestore(&arena->lock, flags);
178 /* Success. Mark them all in use, ie not zero and invalid
179 for the iommu tlb that could load them from under us.
180 The chip specific bits will fill this in with something
181 kosher when we return. */
182 for (i = 0; i < n; ++i)
183 ptes[p+i] = IOMMU_INVALID_PTE;
185 arena->next_entry = p + n;
186 spin_unlock_irqrestore(&arena->lock, flags);
192 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
197 p = arena->ptes + ofs;
198 for (i = 0; i < n; ++i)
203 * True if the machine supports DAC addressing, and DEV can
204 * make use of it given MASK.
206 static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
208 dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
211 /* If this is not set, the machine doesn't support DAC at all. */
215 /* The device has to be able to address our DAC bit. */
216 if ((dac_offset & dev->dma_mask) != dac_offset)
219 /* If both conditions above are met, we are fine. */
220 DBGA("pci_dac_dma_supported %s from %ps\n",
221 ok ? "yes" : "no", __builtin_return_address(0));
226 /* Map a single buffer of the indicated size for PCI DMA in streaming
227 mode. The 32-bit PCI bus mastering address to use is returned.
228 Once the device is given the dma address, the device owns this memory
229 until either pci_unmap_single or pci_dma_sync_single is performed. */
232 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
235 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
236 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
237 struct pci_iommu_arena *arena;
238 long npages, dma_ofs, i;
241 unsigned int align = 0;
242 struct device *dev = pdev ? &pdev->dev : NULL;
244 paddr = __pa(cpu_addr);
247 /* First check to see if we can use the direct map window. */
248 if (paddr + size + __direct_map_base - 1 <= max_dma
249 && paddr + size <= __direct_map_size) {
250 ret = paddr + __direct_map_base;
252 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
253 cpu_addr, size, ret, __builtin_return_address(0));
259 /* Next, use DAC if selected earlier. */
261 ret = paddr + alpha_mv.pci_dac_offset;
263 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
264 cpu_addr, size, ret, __builtin_return_address(0));
269 /* If the machine doesn't define a pci_tbi routine, we have to
270 assume it doesn't support sg mapping, and, since we tried to
271 use direct_map above, it now must be considered an error. */
272 if (! alpha_mv.mv_pci_tbi) {
273 printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
274 return DMA_MAPPING_ERROR;
277 arena = hose->sg_pci;
278 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
279 arena = hose->sg_isa;
281 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
283 /* Force allocation to 64KB boundary for ISA bridges. */
284 if (pdev && pdev == isa_bridge)
286 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
288 printk(KERN_WARNING "pci_map_single failed: "
289 "could not allocate dma page tables\n");
290 return DMA_MAPPING_ERROR;
294 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
295 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
297 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
298 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
300 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
301 cpu_addr, size, npages, ret, __builtin_return_address(0));
306 /* Helper for generic DMA-mapping functions. */
307 static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
309 if (dev && dev_is_pci(dev))
310 return to_pci_dev(dev);
312 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
316 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
317 bridge is bus master then). */
318 if (!dev || !dev->dma_mask || !*dev->dma_mask)
321 /* For EISA bus masters, return isa_bridge (it might have smaller
322 dma_mask due to wiring limitations). */
323 if (*dev->dma_mask >= isa_bridge->dma_mask)
326 /* This assumes ISA bus master with dma_mask 0xffffff. */
330 static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
331 unsigned long offset, size_t size,
332 enum dma_data_direction dir,
335 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
338 BUG_ON(dir == DMA_NONE);
340 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
341 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
345 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
346 SIZE must match what was provided for in a previous pci_map_single
347 call. All other usages are undefined. After this call, reads by
348 the cpu to the buffer are guaranteed to see whatever the device
351 static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
352 size_t size, enum dma_data_direction dir,
356 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
357 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
358 struct pci_iommu_arena *arena;
359 long dma_ofs, npages;
361 BUG_ON(dir == DMA_NONE);
363 if (dma_addr >= __direct_map_base
364 && dma_addr < __direct_map_base + __direct_map_size) {
367 DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n",
368 dma_addr, size, __builtin_return_address(0));
373 if (dma_addr > 0xffffffff) {
374 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n",
375 dma_addr, size, __builtin_return_address(0));
379 arena = hose->sg_pci;
380 if (!arena || dma_addr < arena->dma_base)
381 arena = hose->sg_isa;
383 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
384 if (dma_ofs * PAGE_SIZE >= arena->size) {
385 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
386 " base %llx size %x\n",
387 dma_addr, arena->dma_base, arena->size);
392 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
394 spin_lock_irqsave(&arena->lock, flags);
396 iommu_arena_free(arena, dma_ofs, npages);
398 /* If we're freeing ptes above the `next_entry' pointer (they
399 may have snuck back into the TLB since the last wrap flush),
400 we need to flush the TLB before reallocating the latter. */
401 if (dma_ofs >= arena->next_entry)
402 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
404 spin_unlock_irqrestore(&arena->lock, flags);
406 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n",
407 dma_addr, size, npages, __builtin_return_address(0));
410 /* Allocate and map kernel buffer using consistent mode DMA for PCI
411 device. Returns non-NULL cpu-view pointer to the buffer if
412 successful and sets *DMA_ADDRP to the pci side dma address as well,
413 else DMA_ADDRP is undefined. */
415 static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
416 dma_addr_t *dma_addrp, gfp_t gfp,
419 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
421 long order = get_order(size);
426 cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
428 printk(KERN_INFO "pci_alloc_consistent: "
429 "get_free_pages failed from %ps\n",
430 __builtin_return_address(0));
431 /* ??? Really atomic allocation? Otherwise we could play
432 with vmalloc and sg if we can't find contiguous memory. */
435 memset(cpu_addr, 0, size);
437 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
438 if (*dma_addrp == DMA_MAPPING_ERROR) {
439 free_pages((unsigned long)cpu_addr, order);
440 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
442 /* The address doesn't fit required mask and we
443 do not have iommu. Try again with GFP_DMA. */
448 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n",
449 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
454 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
455 be values that were returned from pci_alloc_consistent. SIZE must
456 be the same as what as passed into pci_alloc_consistent.
457 References to the memory and mappings associated with CPU_ADDR or
458 DMA_ADDR past this call are illegal. */
460 static void alpha_pci_free_coherent(struct device *dev, size_t size,
461 void *cpu_addr, dma_addr_t dma_addr,
464 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
465 dma_unmap_single(&pdev->dev, dma_addr, size, DMA_BIDIRECTIONAL);
466 free_pages((unsigned long)cpu_addr, get_order(size));
468 DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
469 dma_addr, size, __builtin_return_address(0));
472 /* Classify the elements of the scatterlist. Write dma_address
473 of each element with:
474 0 : Followers all physically adjacent.
475 1 : Followers all virtually adjacent.
476 -1 : Not leader, physically adjacent to previous.
477 -2 : Not leader, virtually adjacent to previous.
478 Write dma_length of each leader with the combined lengths of
479 the mergable followers. */
481 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
482 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
485 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
488 unsigned long next_paddr;
489 struct scatterlist *leader;
490 long leader_flag, leader_length;
491 unsigned int max_seg_size;
495 leader_length = leader->length;
496 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
498 /* we will not marge sg without device. */
499 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
500 for (++sg; sg < end; ++sg) {
501 unsigned long addr, len;
502 addr = SG_ENT_PHYS_ADDRESS(sg);
505 if (leader_length + len > max_seg_size)
508 if (next_paddr == addr) {
509 sg->dma_address = -1;
510 leader_length += len;
511 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
512 sg->dma_address = -2;
514 leader_length += len;
517 leader->dma_address = leader_flag;
518 leader->dma_length = leader_length;
524 next_paddr = addr + len;
527 leader->dma_address = leader_flag;
528 leader->dma_length = leader_length;
531 /* Given a scatterlist leader, choose an allocation method and fill
535 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
536 struct scatterlist *out, struct pci_iommu_arena *arena,
537 dma_addr_t max_dma, int dac_allowed)
539 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
540 long size = leader->dma_length;
541 struct scatterlist *sg;
543 long npages, dma_ofs, i;
546 /* If everything is physically contiguous, and the addresses
547 fall into the direct-map window, use it. */
548 if (leader->dma_address == 0
549 && paddr + size + __direct_map_base - 1 <= max_dma
550 && paddr + size <= __direct_map_size) {
551 out->dma_address = paddr + __direct_map_base;
552 out->dma_length = size;
554 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
555 __va(paddr), size, out->dma_address);
561 /* If physically contiguous and DAC is available, use it. */
562 if (leader->dma_address == 0 && dac_allowed) {
563 out->dma_address = paddr + alpha_mv.pci_dac_offset;
564 out->dma_length = size;
566 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
567 __va(paddr), size, out->dma_address);
572 /* Otherwise, we'll use the iommu to make the pages virtually
576 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
577 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
579 /* If we attempted a direct map above but failed, die. */
580 if (leader->dma_address == 0)
583 /* Otherwise, break up the remaining virtually contiguous
584 hunks into individual direct maps and retry. */
585 sg_classify(dev, leader, end, 0);
586 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
589 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
590 out->dma_length = size;
592 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
593 __va(paddr), size, out->dma_address, npages);
595 /* All virtually contiguous. We need to find the length of each
596 physically contiguous subsegment to fill in the ptes. */
597 ptes = &arena->ptes[dma_ofs];
601 struct scatterlist *last_sg = sg;
605 paddr = SG_ENT_PHYS_ADDRESS(sg);
607 while (sg+1 < end && (int) sg[1].dma_address == -1) {
608 size += sg[1].length;
612 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
615 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
616 *ptes++ = mk_iommu_pte(paddr);
619 DBGA(" (%ld) [%p,%x] np %ld\n",
620 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
621 last_sg->length, npages);
622 while (++last_sg <= sg) {
623 DBGA(" (%ld) [%p,%x] cont\n",
624 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
628 } while (++sg < end && (int) sg->dma_address < 0);
633 static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
634 int nents, enum dma_data_direction dir,
637 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
638 struct scatterlist *start, *end, *out;
639 struct pci_controller *hose;
640 struct pci_iommu_arena *arena;
644 BUG_ON(dir == DMA_NONE);
646 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
648 /* Fast path single entry scatterlists. */
650 sg->dma_length = sg->length;
652 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
653 sg->length, dac_allowed);
654 if (sg->dma_address == DMA_MAPPING_ERROR)
662 /* First, prepare information about the entries. */
663 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
665 /* Second, figure out where we're going to map things. */
666 if (alpha_mv.mv_pci_tbi) {
667 hose = pdev ? pdev->sysdata : pci_isa_hose;
668 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
669 arena = hose->sg_pci;
670 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
671 arena = hose->sg_isa;
678 /* Third, iterate over the scatterlist leaders and allocate
679 dma space as needed. */
680 for (out = sg; sg < end; ++sg) {
681 if ((int) sg->dma_address < 0)
683 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
688 /* Mark the end of the list for pci_unmap_sg. */
692 if (out - start == 0) {
693 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
696 DBGA("pci_map_sg: %ld entries\n", out - start);
701 printk(KERN_WARNING "pci_map_sg failed: "
702 "could not allocate dma page tables\n");
704 /* Some allocation failed while mapping the scatterlist
705 entries. Unmap them now. */
707 dma_unmap_sg(&pdev->dev, start, out - start, dir);
711 /* Unmap a set of streaming mode DMA translations. Again, cpu read
712 rules concerning calls here are the same as for pci_unmap_single()
715 static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
716 int nents, enum dma_data_direction dir,
719 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
721 struct pci_controller *hose;
722 struct pci_iommu_arena *arena;
723 struct scatterlist *end;
725 dma_addr_t fbeg, fend;
727 BUG_ON(dir == DMA_NONE);
729 if (! alpha_mv.mv_pci_tbi)
732 hose = pdev ? pdev->sysdata : pci_isa_hose;
733 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
734 arena = hose->sg_pci;
735 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
736 arena = hose->sg_isa;
740 spin_lock_irqsave(&arena->lock, flags);
742 for (end = sg + nents; sg < end; ++sg) {
748 addr = sg->dma_address;
749 size = sg->dma_length;
753 if (addr > 0xffffffff) {
754 /* It's a DAC address -- nothing to do. */
755 DBGA(" (%ld) DAC [%llx,%zx]\n",
756 sg - end + nents, addr, size);
760 if (addr >= __direct_map_base
761 && addr < __direct_map_base + __direct_map_size) {
763 DBGA(" (%ld) direct [%llx,%zx]\n",
764 sg - end + nents, addr, size);
768 DBGA(" (%ld) sg [%llx,%zx]\n",
769 sg - end + nents, addr, size);
771 npages = iommu_num_pages(addr, size, PAGE_SIZE);
772 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
773 iommu_arena_free(arena, ofs, npages);
775 tend = addr + size - 1;
776 if (fbeg > addr) fbeg = addr;
777 if (fend < tend) fend = tend;
780 /* If we're freeing ptes above the `next_entry' pointer (they
781 may have snuck back into the TLB since the last wrap flush),
782 we need to flush the TLB before reallocating the latter. */
783 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
784 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
786 spin_unlock_irqrestore(&arena->lock, flags);
788 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
791 /* Return whether the given PCI device DMA address mask can be
792 supported properly. */
794 static int alpha_pci_supported(struct device *dev, u64 mask)
796 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
797 struct pci_controller *hose;
798 struct pci_iommu_arena *arena;
800 /* If there exists a direct map, and the mask fits either
801 the entire direct mapped space or the total system memory as
802 shifted by the map base */
803 if (__direct_map_size != 0
804 && (__direct_map_base + __direct_map_size - 1 <= mask ||
805 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
808 /* Check that we have a scatter-gather arena that fits. */
809 hose = pdev ? pdev->sysdata : pci_isa_hose;
810 arena = hose->sg_isa;
811 if (arena && arena->dma_base + arena->size - 1 <= mask)
813 arena = hose->sg_pci;
814 if (arena && arena->dma_base + arena->size - 1 <= mask)
817 /* As last resort try ZONE_DMA. */
818 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
826 * AGP GART extensions to the IOMMU
829 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
835 if (!arena) return -EINVAL;
837 spin_lock_irqsave(&arena->lock, flags);
839 /* Search for N empty ptes. */
841 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
843 spin_unlock_irqrestore(&arena->lock, flags);
847 /* Success. Mark them all reserved (ie not zero and invalid)
848 for the iommu tlb that could load them from under us.
849 They will be filled in with valid bits by _bind() */
850 for (i = 0; i < pg_count; ++i)
851 ptes[p+i] = IOMMU_RESERVED_PTE;
853 arena->next_entry = p + pg_count;
854 spin_unlock_irqrestore(&arena->lock, flags);
860 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
865 if (!arena) return -EINVAL;
869 /* Make sure they're all reserved first... */
870 for(i = pg_start; i < pg_start + pg_count; i++)
871 if (ptes[i] != IOMMU_RESERVED_PTE)
874 iommu_arena_free(arena, pg_start, pg_count);
879 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
886 if (!arena) return -EINVAL;
888 spin_lock_irqsave(&arena->lock, flags);
892 for(j = pg_start; j < pg_start + pg_count; j++) {
893 if (ptes[j] != IOMMU_RESERVED_PTE) {
894 spin_unlock_irqrestore(&arena->lock, flags);
899 for(i = 0, j = pg_start; i < pg_count; i++, j++)
900 ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
902 spin_unlock_irqrestore(&arena->lock, flags);
908 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
913 if (!arena) return -EINVAL;
915 p = arena->ptes + pg_start;
916 for(i = 0; i < pg_count; i++)
917 p[i] = IOMMU_RESERVED_PTE;
922 const struct dma_map_ops alpha_pci_ops = {
923 .alloc = alpha_pci_alloc_coherent,
924 .free = alpha_pci_free_coherent,
925 .map_page = alpha_pci_map_page,
926 .unmap_page = alpha_pci_unmap_page,
927 .map_sg = alpha_pci_map_sg,
928 .unmap_sg = alpha_pci_unmap_sg,
929 .dma_supported = alpha_pci_supported,
930 .mmap = dma_common_mmap,
931 .get_sgtable = dma_common_get_sgtable,
932 .alloc_pages_op = dma_common_alloc_pages,
933 .free_pages = dma_common_free_pages,
935 EXPORT_SYMBOL(alpha_pci_ops);