Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: ioport.c,v 1.45 2001/10/30 04:54:21 davem Exp $ |
2 | * ioport.c: Simple io mapping allocator. | |
3 | * | |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | |
5 | * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) | |
6 | * | |
7 | * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev. | |
8 | * | |
9 | * 2000/01/29 | |
10 | * <rth> zait: as long as pci_alloc_consistent produces something addressable, | |
11 | * things are ok. | |
12 | * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a | |
13 | * pointer into the big page mapping | |
14 | * <rth> zait: so what? | |
15 | * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page())) | |
16 | * <zaitcev> Hmm | |
17 | * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())). | |
18 | * So far so good. | |
19 | * <zaitcev> Now, driver calls pci_free_consistent(with result of | |
20 | * remap_it_my_way()). | |
21 | * <zaitcev> How do you find the address to pass to free_pages()? | |
22 | * <rth> zait: walk the page tables? It's only two or three level after all. | |
23 | * <rth> zait: you have to walk them anyway to remove the mapping. | |
24 | * <zaitcev> Hmm | |
25 | * <zaitcev> Sounds reasonable | |
26 | */ | |
27 | ||
28 | #include <linux/config.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/kernel.h> | |
31 | #include <linux/errno.h> | |
32 | #include <linux/types.h> | |
33 | #include <linux/ioport.h> | |
34 | #include <linux/mm.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/pci.h> /* struct pci_dev */ | |
37 | #include <linux/proc_fs.h> | |
38 | ||
39 | #include <asm/io.h> | |
40 | #include <asm/vaddrs.h> | |
41 | #include <asm/oplib.h> | |
576c352e DM |
42 | #include <asm/prom.h> |
43 | #include <asm/sbus.h> | |
1da177e4 LT |
44 | #include <asm/page.h> |
45 | #include <asm/pgalloc.h> | |
46 | #include <asm/dma.h> | |
47 | ||
48 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ | |
49 | ||
50 | struct resource *_sparc_find_resource(struct resource *r, unsigned long); | |
51 | ||
52 | static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz); | |
53 | static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, | |
54 | unsigned long size, char *name); | |
55 | static void _sparc_free_io(struct resource *res); | |
56 | ||
57 | /* This points to the next to use virtual memory for DVMA mappings */ | |
58 | static struct resource _sparc_dvma = { | |
59 | .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1 | |
60 | }; | |
61 | /* This points to the start of I/O mappings, cluable from outside. */ | |
62 | /*ext*/ struct resource sparc_iomap = { | |
63 | .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1 | |
64 | }; | |
65 | ||
66 | /* | |
67 | * Our mini-allocator... | |
68 | * Boy this is gross! We need it because we must map I/O for | |
69 | * timers and interrupt controller before the kmalloc is available. | |
70 | */ | |
71 | ||
72 | #define XNMLN 15 | |
73 | #define XNRES 10 /* SS-10 uses 8 */ | |
74 | ||
75 | struct xresource { | |
76 | struct resource xres; /* Must be first */ | |
77 | int xflag; /* 1 == used */ | |
78 | char xname[XNMLN+1]; | |
79 | }; | |
80 | ||
81 | static struct xresource xresv[XNRES]; | |
82 | ||
83 | static struct xresource *xres_alloc(void) { | |
84 | struct xresource *xrp; | |
85 | int n; | |
86 | ||
87 | xrp = xresv; | |
88 | for (n = 0; n < XNRES; n++) { | |
89 | if (xrp->xflag == 0) { | |
90 | xrp->xflag = 1; | |
91 | return xrp; | |
92 | } | |
93 | xrp++; | |
94 | } | |
95 | return NULL; | |
96 | } | |
97 | ||
98 | static void xres_free(struct xresource *xrp) { | |
99 | xrp->xflag = 0; | |
100 | } | |
101 | ||
102 | /* | |
103 | * These are typically used in PCI drivers | |
104 | * which are trying to be cross-platform. | |
105 | * | |
106 | * Bus type is always zero on IIep. | |
107 | */ | |
108 | void __iomem *ioremap(unsigned long offset, unsigned long size) | |
109 | { | |
110 | char name[14]; | |
111 | ||
112 | sprintf(name, "phys_%08x", (u32)offset); | |
113 | return _sparc_alloc_io(0, offset, size, name); | |
114 | } | |
115 | ||
116 | /* | |
117 | * Comlimentary to ioremap(). | |
118 | */ | |
119 | void iounmap(volatile void __iomem *virtual) | |
120 | { | |
121 | unsigned long vaddr = (unsigned long) virtual & PAGE_MASK; | |
122 | struct resource *res; | |
123 | ||
124 | if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) { | |
125 | printk("free_io/iounmap: cannot free %lx\n", vaddr); | |
126 | return; | |
127 | } | |
128 | _sparc_free_io(res); | |
129 | ||
130 | if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) { | |
131 | xres_free((struct xresource *)res); | |
132 | } else { | |
133 | kfree(res); | |
134 | } | |
135 | } | |
136 | ||
137 | /* | |
138 | */ | |
139 | void __iomem *sbus_ioremap(struct resource *phyres, unsigned long offset, | |
140 | unsigned long size, char *name) | |
141 | { | |
142 | return _sparc_alloc_io(phyres->flags & 0xF, | |
143 | phyres->start + offset, size, name); | |
144 | } | |
145 | ||
146 | /* | |
147 | */ | |
148 | void sbus_iounmap(volatile void __iomem *addr, unsigned long size) | |
149 | { | |
150 | iounmap(addr); | |
151 | } | |
152 | ||
153 | /* | |
154 | * Meat of mapping | |
155 | */ | |
156 | static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, | |
157 | unsigned long size, char *name) | |
158 | { | |
159 | static int printed_full; | |
160 | struct xresource *xres; | |
161 | struct resource *res; | |
162 | char *tack; | |
163 | int tlen; | |
164 | void __iomem *va; /* P3 diag */ | |
165 | ||
166 | if (name == NULL) name = "???"; | |
167 | ||
168 | if ((xres = xres_alloc()) != 0) { | |
169 | tack = xres->xname; | |
170 | res = &xres->xres; | |
171 | } else { | |
172 | if (!printed_full) { | |
173 | printk("ioremap: done with statics, switching to malloc\n"); | |
174 | printed_full = 1; | |
175 | } | |
176 | tlen = strlen(name); | |
177 | tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); | |
178 | if (tack == NULL) return NULL; | |
179 | memset(tack, 0, sizeof(struct resource)); | |
180 | res = (struct resource *) tack; | |
181 | tack += sizeof (struct resource); | |
182 | } | |
183 | ||
184 | strlcpy(tack, name, XNMLN+1); | |
185 | res->name = tack; | |
186 | ||
187 | va = _sparc_ioremap(res, busno, phys, size); | |
188 | /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */ | |
189 | return va; | |
190 | } | |
191 | ||
192 | /* | |
193 | */ | |
194 | static void __iomem * | |
195 | _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz) | |
196 | { | |
197 | unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); | |
198 | ||
199 | if (allocate_resource(&sparc_iomap, res, | |
200 | (offset + sz + PAGE_SIZE-1) & PAGE_MASK, | |
201 | sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) { | |
202 | /* Usually we cannot see printks in this case. */ | |
203 | prom_printf("alloc_io_res(%s): cannot occupy\n", | |
204 | (res->name != NULL)? res->name: "???"); | |
205 | prom_halt(); | |
206 | } | |
207 | ||
208 | pa &= PAGE_MASK; | |
209 | sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1); | |
210 | ||
d75fc8bb | 211 | return (void __iomem *)(unsigned long)(res->start + offset); |
1da177e4 LT |
212 | } |
213 | ||
214 | /* | |
215 | * Comlimentary to _sparc_ioremap(). | |
216 | */ | |
217 | static void _sparc_free_io(struct resource *res) | |
218 | { | |
219 | unsigned long plen; | |
220 | ||
221 | plen = res->end - res->start + 1; | |
30d4d1ff | 222 | BUG_ON((plen & (PAGE_SIZE-1)) != 0); |
1da177e4 LT |
223 | sparc_unmapiorange(res->start, plen); |
224 | release_resource(res); | |
225 | } | |
226 | ||
227 | #ifdef CONFIG_SBUS | |
228 | ||
8fae097d DM |
229 | void sbus_set_sbus64(struct sbus_dev *sdev, int x) |
230 | { | |
1da177e4 LT |
231 | printk("sbus_set_sbus64: unsupported\n"); |
232 | } | |
233 | ||
8fae097d DM |
234 | extern unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq); |
235 | void __init sbus_fill_device_irq(struct sbus_dev *sdev) | |
236 | { | |
237 | struct linux_prom_irqs irqs[PROMINTR_MAX]; | |
238 | int len; | |
239 | ||
240 | len = prom_getproperty(sdev->prom_node, "intr", | |
241 | (char *)irqs, sizeof(irqs)); | |
242 | if (len != -1) { | |
243 | sdev->num_irqs = len / 8; | |
244 | if (sdev->num_irqs == 0) { | |
245 | sdev->irqs[0] = 0; | |
246 | } else if (sparc_cpu_model == sun4d) { | |
247 | for (len = 0; len < sdev->num_irqs; len++) | |
248 | sdev->irqs[len] = | |
249 | sun4d_build_irq(sdev, irqs[len].pri); | |
250 | } else { | |
251 | for (len = 0; len < sdev->num_irqs; len++) | |
252 | sdev->irqs[len] = irqs[len].pri; | |
253 | } | |
254 | } else { | |
255 | int interrupts[PROMINTR_MAX]; | |
256 | ||
257 | /* No "intr" node found-- check for "interrupts" node. | |
258 | * This node contains SBus interrupt levels, not IPLs | |
259 | * as in "intr", and no vector values. We convert | |
260 | * SBus interrupt levels to PILs (platform specific). | |
261 | */ | |
262 | len = prom_getproperty(sdev->prom_node, "interrupts", | |
263 | (char *)interrupts, sizeof(interrupts)); | |
264 | if (len == -1) { | |
265 | sdev->irqs[0] = 0; | |
266 | sdev->num_irqs = 0; | |
267 | } else { | |
268 | sdev->num_irqs = len / sizeof(int); | |
269 | for (len = 0; len < sdev->num_irqs; len++) { | |
270 | sdev->irqs[len] = | |
271 | sbint_to_irq(sdev, interrupts[len]); | |
272 | } | |
273 | } | |
274 | } | |
275 | } | |
276 | ||
1da177e4 LT |
277 | /* |
278 | * Allocate a chunk of memory suitable for DMA. | |
279 | * Typically devices use them for control blocks. | |
280 | * CPU may access them without any explicit flushing. | |
281 | * | |
282 | * XXX Some clever people know that sdev is not used and supply NULL. Watch. | |
283 | */ | |
284 | void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp) | |
285 | { | |
286 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | |
287 | unsigned long va; | |
288 | struct resource *res; | |
289 | int order; | |
290 | ||
291 | /* XXX why are some lenghts signed, others unsigned? */ | |
292 | if (len <= 0) { | |
293 | return NULL; | |
294 | } | |
295 | /* XXX So what is maxphys for us and how do drivers know it? */ | |
296 | if (len > 256*1024) { /* __get_free_pages() limit */ | |
297 | return NULL; | |
298 | } | |
299 | ||
300 | order = get_order(len_total); | |
f3d48f03 | 301 | if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0) |
1da177e4 LT |
302 | goto err_nopages; |
303 | ||
304 | if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) | |
305 | goto err_nomem; | |
306 | memset((char*)res, 0, sizeof(struct resource)); | |
307 | ||
308 | if (allocate_resource(&_sparc_dvma, res, len_total, | |
309 | _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { | |
310 | printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); | |
311 | goto err_nova; | |
312 | } | |
313 | mmu_inval_dma_area(va, len_total); | |
314 | // XXX The mmu_map_dma_area does this for us below, see comments. | |
315 | // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | |
316 | /* | |
317 | * XXX That's where sdev would be used. Currently we load | |
318 | * all iommu tables with the same translations. | |
319 | */ | |
320 | if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0) | |
321 | goto err_noiommu; | |
322 | ||
4cfbd7eb MH |
323 | /* Set the resource name, if known. */ |
324 | if (sdev) { | |
325 | res->name = sdev->prom_name; | |
326 | } | |
327 | ||
d75fc8bb | 328 | return (void *)(unsigned long)res->start; |
1da177e4 LT |
329 | |
330 | err_noiommu: | |
331 | release_resource(res); | |
332 | err_nova: | |
333 | free_pages(va, order); | |
334 | err_nomem: | |
335 | kfree(res); | |
336 | err_nopages: | |
337 | return NULL; | |
338 | } | |
339 | ||
340 | void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba) | |
341 | { | |
342 | struct resource *res; | |
343 | struct page *pgv; | |
344 | ||
345 | if ((res = _sparc_find_resource(&_sparc_dvma, | |
346 | (unsigned long)p)) == NULL) { | |
347 | printk("sbus_free_consistent: cannot free %p\n", p); | |
348 | return; | |
349 | } | |
350 | ||
351 | if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { | |
352 | printk("sbus_free_consistent: unaligned va %p\n", p); | |
353 | return; | |
354 | } | |
355 | ||
356 | n = (n + PAGE_SIZE-1) & PAGE_MASK; | |
357 | if ((res->end-res->start)+1 != n) { | |
358 | printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", | |
359 | (long)((res->end-res->start)+1), n); | |
360 | return; | |
361 | } | |
362 | ||
363 | release_resource(res); | |
364 | kfree(res); | |
365 | ||
366 | /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */ | |
367 | pgv = mmu_translate_dvma(ba); | |
368 | mmu_unmap_dma_area(ba, n); | |
369 | ||
370 | __free_pages(pgv, get_order(n)); | |
371 | } | |
372 | ||
373 | /* | |
374 | * Map a chunk of memory so that devices can see it. | |
375 | * CPU view of this memory may be inconsistent with | |
376 | * a device view and explicit flushing is necessary. | |
377 | */ | |
378 | dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int direction) | |
379 | { | |
380 | /* XXX why are some lenghts signed, others unsigned? */ | |
381 | if (len <= 0) { | |
382 | return 0; | |
383 | } | |
384 | /* XXX So what is maxphys for us and how do drivers know it? */ | |
385 | if (len > 256*1024) { /* __get_free_pages() limit */ | |
386 | return 0; | |
387 | } | |
388 | return mmu_get_scsi_one(va, len, sdev->bus); | |
389 | } | |
390 | ||
391 | void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t ba, size_t n, int direction) | |
392 | { | |
393 | mmu_release_scsi_one(ba, n, sdev->bus); | |
394 | } | |
395 | ||
396 | int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction) | |
397 | { | |
398 | mmu_get_scsi_sgl(sg, n, sdev->bus); | |
399 | ||
400 | /* | |
401 | * XXX sparc64 can return a partial length here. sun4c should do this | |
402 | * but it currently panics if it can't fulfill the request - Anton | |
403 | */ | |
404 | return n; | |
405 | } | |
406 | ||
407 | void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction) | |
408 | { | |
409 | mmu_release_scsi_sgl(sg, n, sdev->bus); | |
410 | } | |
411 | ||
412 | /* | |
413 | */ | |
414 | void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction) | |
415 | { | |
416 | #if 0 | |
417 | unsigned long va; | |
418 | struct resource *res; | |
419 | ||
420 | /* We do not need the resource, just print a message if invalid. */ | |
421 | res = _sparc_find_resource(&_sparc_dvma, ba); | |
422 | if (res == NULL) | |
423 | panic("sbus_dma_sync_single: 0x%x\n", ba); | |
424 | ||
425 | va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */ | |
426 | /* | |
427 | * XXX This bogosity will be fixed with the iommu rewrite coming soon | |
428 | * to a kernel near you. - Anton | |
429 | */ | |
430 | /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */ | |
431 | #endif | |
432 | } | |
433 | ||
434 | void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction) | |
435 | { | |
436 | #if 0 | |
437 | unsigned long va; | |
438 | struct resource *res; | |
439 | ||
440 | /* We do not need the resource, just print a message if invalid. */ | |
441 | res = _sparc_find_resource(&_sparc_dvma, ba); | |
442 | if (res == NULL) | |
443 | panic("sbus_dma_sync_single: 0x%x\n", ba); | |
444 | ||
445 | va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */ | |
446 | /* | |
447 | * XXX This bogosity will be fixed with the iommu rewrite coming soon | |
448 | * to a kernel near you. - Anton | |
449 | */ | |
450 | /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */ | |
451 | #endif | |
452 | } | |
453 | ||
454 | void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction) | |
455 | { | |
456 | printk("sbus_dma_sync_sg_for_cpu: not implemented yet\n"); | |
457 | } | |
458 | ||
459 | void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction) | |
460 | { | |
461 | printk("sbus_dma_sync_sg_for_device: not implemented yet\n"); | |
462 | } | |
576c352e DM |
463 | |
464 | /* Support code for sbus_init(). */ | |
465 | /* | |
466 | * XXX This functions appears to be a distorted version of | |
467 | * prom_sbus_ranges_init(), with all sun4d stuff cut away. | |
468 | * Ask DaveM what is going on here, how is sun4d supposed to work... XXX | |
469 | */ | |
470 | /* added back sun4d patch from Thomas Bogendoerfer - should be OK (crn) */ | |
471 | void __init sbus_arch_bus_ranges_init(struct device_node *pn, struct sbus_bus *sbus) | |
472 | { | |
473 | int parent_node = pn->node; | |
474 | ||
475 | if (sparc_cpu_model == sun4d) { | |
476 | struct linux_prom_ranges iounit_ranges[PROMREG_MAX]; | |
477 | int num_iounit_ranges, len; | |
478 | ||
479 | len = prom_getproperty(parent_node, "ranges", | |
480 | (char *) iounit_ranges, | |
481 | sizeof (iounit_ranges)); | |
482 | if (len != -1) { | |
483 | num_iounit_ranges = | |
484 | (len / sizeof(struct linux_prom_ranges)); | |
485 | prom_adjust_ranges(sbus->sbus_ranges, | |
486 | sbus->num_sbus_ranges, | |
487 | iounit_ranges, num_iounit_ranges); | |
488 | } | |
489 | } | |
490 | } | |
491 | ||
492 | void __init sbus_setup_iommu(struct sbus_bus *sbus, struct device_node *dp) | |
493 | { | |
494 | struct device_node *parent = dp->parent; | |
495 | ||
496 | if (sparc_cpu_model != sun4d && | |
497 | parent != NULL && | |
498 | !strcmp(parent->name, "iommu")) { | |
499 | extern void iommu_init(int iommu_node, struct sbus_bus *sbus); | |
500 | ||
501 | iommu_init(parent->node, sbus); | |
502 | } | |
503 | ||
504 | if (sparc_cpu_model == sun4d) { | |
505 | extern void iounit_init(int sbi_node, int iounit_node, | |
506 | struct sbus_bus *sbus); | |
507 | ||
508 | iounit_init(dp->node, parent->node, sbus); | |
509 | } | |
510 | } | |
511 | ||
512 | void __init sbus_setup_arch_props(struct sbus_bus *sbus, struct device_node *dp) | |
513 | { | |
514 | if (sparc_cpu_model == sun4d) { | |
515 | struct device_node *parent = dp->parent; | |
516 | ||
517 | sbus->devid = of_getintprop_default(parent, "device-id", 0); | |
518 | sbus->board = of_getintprop_default(parent, "board#", 0); | |
519 | } | |
520 | } | |
521 | ||
522 | int __init sbus_arch_preinit(void) | |
523 | { | |
524 | extern void register_proc_sparc_ioport(void); | |
525 | ||
526 | register_proc_sparc_ioport(); | |
527 | ||
528 | #ifdef CONFIG_SUN4 | |
529 | { | |
530 | extern void sun4_dvma_init(void); | |
531 | sun4_dvma_init(); | |
532 | } | |
533 | return 1; | |
534 | #else | |
535 | return 0; | |
536 | #endif | |
537 | } | |
538 | ||
539 | void __init sbus_arch_postinit(void) | |
540 | { | |
541 | if (sparc_cpu_model == sun4d) { | |
542 | extern void sun4d_init_sbi_irq(void); | |
543 | sun4d_init_sbi_irq(); | |
544 | } | |
545 | } | |
1da177e4 LT |
546 | #endif /* CONFIG_SBUS */ |
547 | ||
548 | #ifdef CONFIG_PCI | |
549 | ||
550 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | |
551 | * hwdev should be valid struct pci_dev pointer for PCI devices. | |
552 | */ | |
553 | void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) | |
554 | { | |
555 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | |
556 | unsigned long va; | |
557 | struct resource *res; | |
558 | int order; | |
559 | ||
560 | if (len == 0) { | |
561 | return NULL; | |
562 | } | |
563 | if (len > 256*1024) { /* __get_free_pages() limit */ | |
564 | return NULL; | |
565 | } | |
566 | ||
567 | order = get_order(len_total); | |
568 | va = __get_free_pages(GFP_KERNEL, order); | |
569 | if (va == 0) { | |
570 | printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); | |
571 | return NULL; | |
572 | } | |
573 | ||
574 | if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { | |
575 | free_pages(va, order); | |
576 | printk("pci_alloc_consistent: no core\n"); | |
577 | return NULL; | |
578 | } | |
579 | memset((char*)res, 0, sizeof(struct resource)); | |
580 | ||
581 | if (allocate_resource(&_sparc_dvma, res, len_total, | |
582 | _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { | |
583 | printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); | |
584 | free_pages(va, order); | |
585 | kfree(res); | |
586 | return NULL; | |
587 | } | |
588 | mmu_inval_dma_area(va, len_total); | |
589 | #if 0 | |
590 | /* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n", | |
591 | (long)va, (long)res->start, (long)virt_to_phys(va), len_total); | |
592 | #endif | |
593 | sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | |
594 | ||
595 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ | |
596 | return (void *) res->start; | |
597 | } | |
598 | ||
599 | /* Free and unmap a consistent DMA buffer. | |
600 | * cpu_addr is what was returned from pci_alloc_consistent, | |
601 | * size must be the same as what as passed into pci_alloc_consistent, | |
602 | * and likewise dma_addr must be the same as what *dma_addrp was set to. | |
603 | * | |
604 | * References to the memory and mappings assosciated with cpu_addr/dma_addr | |
605 | * past this call are illegal. | |
606 | */ | |
607 | void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) | |
608 | { | |
609 | struct resource *res; | |
610 | unsigned long pgp; | |
611 | ||
612 | if ((res = _sparc_find_resource(&_sparc_dvma, | |
613 | (unsigned long)p)) == NULL) { | |
614 | printk("pci_free_consistent: cannot free %p\n", p); | |
615 | return; | |
616 | } | |
617 | ||
618 | if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { | |
619 | printk("pci_free_consistent: unaligned va %p\n", p); | |
620 | return; | |
621 | } | |
622 | ||
623 | n = (n + PAGE_SIZE-1) & PAGE_MASK; | |
624 | if ((res->end-res->start)+1 != n) { | |
625 | printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", | |
626 | (long)((res->end-res->start)+1), (long)n); | |
627 | return; | |
628 | } | |
629 | ||
630 | pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */ | |
631 | mmu_inval_dma_area(pgp, n); | |
632 | sparc_unmapiorange((unsigned long)p, n); | |
633 | ||
634 | release_resource(res); | |
635 | kfree(res); | |
636 | ||
637 | free_pages(pgp, get_order(n)); | |
638 | } | |
639 | ||
640 | /* Map a single buffer of the indicated size for DMA in streaming mode. | |
641 | * The 32-bit bus address to use is returned. | |
642 | * | |
643 | * Once the device is given the dma address, the device owns this memory | |
644 | * until either pci_unmap_single or pci_dma_sync_single_* is performed. | |
645 | */ | |
646 | dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | |
647 | int direction) | |
648 | { | |
30d4d1ff | 649 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
650 | /* IIep is write-through, not flushing. */ |
651 | return virt_to_phys(ptr); | |
652 | } | |
653 | ||
654 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | |
655 | * must match what was provided for in a previous pci_map_single call. All | |
656 | * other usages are undefined. | |
657 | * | |
658 | * After this call, reads by the cpu to the buffer are guaranteed to see | |
659 | * whatever the device wrote there. | |
660 | */ | |
661 | void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, | |
662 | int direction) | |
663 | { | |
30d4d1ff | 664 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
665 | if (direction != PCI_DMA_TODEVICE) { |
666 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | |
667 | (size + PAGE_SIZE-1) & PAGE_MASK); | |
668 | } | |
669 | } | |
670 | ||
671 | /* | |
672 | * Same as pci_map_single, but with pages. | |
673 | */ | |
674 | dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | |
675 | unsigned long offset, size_t size, int direction) | |
676 | { | |
30d4d1ff | 677 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
678 | /* IIep is write-through, not flushing. */ |
679 | return page_to_phys(page) + offset; | |
680 | } | |
681 | ||
682 | void pci_unmap_page(struct pci_dev *hwdev, | |
683 | dma_addr_t dma_address, size_t size, int direction) | |
684 | { | |
30d4d1ff | 685 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
686 | /* mmu_inval_dma_area XXX */ |
687 | } | |
688 | ||
689 | /* Map a set of buffers described by scatterlist in streaming | |
690 | * mode for DMA. This is the scather-gather version of the | |
691 | * above pci_map_single interface. Here the scatter gather list | |
692 | * elements are each tagged with the appropriate dma address | |
693 | * and length. They are obtained via sg_dma_{address,length}(SG). | |
694 | * | |
695 | * NOTE: An implementation may be able to use a smaller number of | |
696 | * DMA address/length pairs than there are SG table elements. | |
697 | * (for example via virtual mapping capabilities) | |
698 | * The routine returns the number of addr/length pairs actually | |
699 | * used, at most nents. | |
700 | * | |
701 | * Device ownership issues as mentioned above for pci_map_single are | |
702 | * the same here. | |
703 | */ | |
704 | int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | |
705 | int direction) | |
706 | { | |
707 | int n; | |
708 | ||
30d4d1ff | 709 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
710 | /* IIep is write-through, not flushing. */ |
711 | for (n = 0; n < nents; n++) { | |
30d4d1ff | 712 | BUG_ON(page_address(sg->page) == NULL); |
1da177e4 LT |
713 | sg->dvma_address = virt_to_phys(page_address(sg->page)); |
714 | sg->dvma_length = sg->length; | |
715 | sg++; | |
716 | } | |
717 | return nents; | |
718 | } | |
719 | ||
720 | /* Unmap a set of streaming mode DMA translations. | |
721 | * Again, cpu read rules concerning calls here are the same as for | |
722 | * pci_unmap_single() above. | |
723 | */ | |
724 | void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | |
725 | int direction) | |
726 | { | |
727 | int n; | |
728 | ||
30d4d1ff | 729 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
730 | if (direction != PCI_DMA_TODEVICE) { |
731 | for (n = 0; n < nents; n++) { | |
30d4d1ff | 732 | BUG_ON(page_address(sg->page) == NULL); |
1da177e4 LT |
733 | mmu_inval_dma_area( |
734 | (unsigned long) page_address(sg->page), | |
735 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | |
736 | sg++; | |
737 | } | |
738 | } | |
739 | } | |
740 | ||
741 | /* Make physical memory consistent for a single | |
742 | * streaming mode DMA translation before or after a transfer. | |
743 | * | |
744 | * If you perform a pci_map_single() but wish to interrogate the | |
745 | * buffer using the cpu, yet do not wish to teardown the PCI dma | |
746 | * mapping, you must call this function before doing so. At the | |
747 | * next point you give the PCI dma address back to the card, you | |
748 | * must first perform a pci_dma_sync_for_device, and then the | |
749 | * device again owns the buffer. | |
750 | */ | |
751 | void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | |
752 | { | |
30d4d1ff | 753 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
754 | if (direction != PCI_DMA_TODEVICE) { |
755 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | |
756 | (size + PAGE_SIZE-1) & PAGE_MASK); | |
757 | } | |
758 | } | |
759 | ||
760 | void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | |
761 | { | |
30d4d1ff | 762 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
763 | if (direction != PCI_DMA_TODEVICE) { |
764 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | |
765 | (size + PAGE_SIZE-1) & PAGE_MASK); | |
766 | } | |
767 | } | |
768 | ||
769 | /* Make physical memory consistent for a set of streaming | |
770 | * mode DMA translations after a transfer. | |
771 | * | |
772 | * The same as pci_dma_sync_single_* but for a scatter-gather list, | |
773 | * same rules and usage. | |
774 | */ | |
775 | void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) | |
776 | { | |
777 | int n; | |
778 | ||
30d4d1ff | 779 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
780 | if (direction != PCI_DMA_TODEVICE) { |
781 | for (n = 0; n < nents; n++) { | |
30d4d1ff | 782 | BUG_ON(page_address(sg->page) == NULL); |
1da177e4 LT |
783 | mmu_inval_dma_area( |
784 | (unsigned long) page_address(sg->page), | |
785 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | |
786 | sg++; | |
787 | } | |
788 | } | |
789 | } | |
790 | ||
791 | void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) | |
792 | { | |
793 | int n; | |
794 | ||
30d4d1ff | 795 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
796 | if (direction != PCI_DMA_TODEVICE) { |
797 | for (n = 0; n < nents; n++) { | |
30d4d1ff | 798 | BUG_ON(page_address(sg->page) == NULL); |
1da177e4 LT |
799 | mmu_inval_dma_area( |
800 | (unsigned long) page_address(sg->page), | |
801 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | |
802 | sg++; | |
803 | } | |
804 | } | |
805 | } | |
806 | #endif /* CONFIG_PCI */ | |
807 | ||
808 | #ifdef CONFIG_PROC_FS | |
809 | ||
810 | static int | |
811 | _sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof, | |
812 | void *data) | |
813 | { | |
814 | char *p = buf, *e = buf + length; | |
815 | struct resource *r; | |
816 | const char *nm; | |
817 | ||
818 | for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { | |
819 | if (p + 32 >= e) /* Better than nothing */ | |
820 | break; | |
821 | if ((nm = r->name) == 0) nm = "???"; | |
685143ac GKH |
822 | p += sprintf(p, "%016llx-%016llx: %s\n", |
823 | (unsigned long long)r->start, | |
824 | (unsigned long long)r->end, nm); | |
1da177e4 LT |
825 | } |
826 | ||
827 | return p-buf; | |
828 | } | |
829 | ||
830 | #endif /* CONFIG_PROC_FS */ | |
831 | ||
832 | /* | |
833 | * This is a version of find_resource and it belongs to kernel/resource.c. | |
834 | * Until we have agreement with Linus and Martin, it lingers here. | |
835 | * | |
836 | * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case. | |
837 | * This probably warrants some sort of hashing. | |
838 | */ | |
839 | struct resource * | |
840 | _sparc_find_resource(struct resource *root, unsigned long hit) | |
841 | { | |
842 | struct resource *tmp; | |
843 | ||
844 | for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { | |
845 | if (tmp->start <= hit && tmp->end >= hit) | |
846 | return tmp; | |
847 | } | |
848 | return NULL; | |
849 | } | |
850 | ||
851 | void register_proc_sparc_ioport(void) | |
852 | { | |
853 | #ifdef CONFIG_PROC_FS | |
854 | create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap); | |
855 | create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma); | |
856 | #endif | |
857 | } |