Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * Scatter-Gather buffer | |
4 | * | |
5 | * Copyright (c) by Takashi Iwai <tiwai@suse.de> | |
1da177e4 LT |
6 | */ |
7 | ||
1da177e4 LT |
8 | #include <linux/slab.h> |
9 | #include <linux/mm.h> | |
10 | #include <linux/vmalloc.h> | |
9d069dc0 | 11 | #include <linux/export.h> |
42e748a0 | 12 | #include <asm/pgtable.h> |
1da177e4 LT |
13 | #include <sound/memalloc.h> |
14 | ||
15 | ||
16 | /* table entries are align to 32 */ | |
17 | #define SGBUF_TBL_ALIGN 32 | |
7ab39926 | 18 | #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN) |
1da177e4 LT |
19 | |
20 | int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab) | |
21 | { | |
22 | struct snd_sg_buf *sgbuf = dmab->private_data; | |
23 | struct snd_dma_buffer tmpb; | |
24 | int i; | |
25 | ||
26 | if (! sgbuf) | |
27 | return -EINVAL; | |
28 | ||
d712eaf2 | 29 | vunmap(dmab->area); |
6af845e4 TI |
30 | dmab->area = NULL; |
31 | ||
1da177e4 | 32 | tmpb.dev.type = SNDRV_DMA_TYPE_DEV; |
42e748a0 TI |
33 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) |
34 | tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC; | |
1da177e4 LT |
35 | tmpb.dev.dev = sgbuf->dev; |
36 | for (i = 0; i < sgbuf->pages; i++) { | |
51e9f2e6 TI |
37 | if (!(sgbuf->table[i].addr & ~PAGE_MASK)) |
38 | continue; /* continuous pages */ | |
1da177e4 | 39 | tmpb.area = sgbuf->table[i].buf; |
51e9f2e6 TI |
40 | tmpb.addr = sgbuf->table[i].addr & PAGE_MASK; |
41 | tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; | |
1da177e4 LT |
42 | snd_dma_free_pages(&tmpb); |
43 | } | |
1da177e4 LT |
44 | |
45 | kfree(sgbuf->table); | |
46 | kfree(sgbuf->page_table); | |
47 | kfree(sgbuf); | |
48 | dmab->private_data = NULL; | |
49 | ||
50 | return 0; | |
51 | } | |
52 | ||
51e9f2e6 TI |
53 | #define MAX_ALLOC_PAGES 32 |
54 | ||
1da177e4 LT |
55 | void *snd_malloc_sgbuf_pages(struct device *device, |
56 | size_t size, struct snd_dma_buffer *dmab, | |
57 | size_t *res_size) | |
58 | { | |
59 | struct snd_sg_buf *sgbuf; | |
51e9f2e6 | 60 | unsigned int i, pages, chunk, maxpages; |
1da177e4 | 61 | struct snd_dma_buffer tmpb; |
51e9f2e6 TI |
62 | struct snd_sg_page *table; |
63 | struct page **pgtable; | |
42e748a0 TI |
64 | int type = SNDRV_DMA_TYPE_DEV; |
65 | pgprot_t prot = PAGE_KERNEL; | |
1da177e4 LT |
66 | |
67 | dmab->area = NULL; | |
68 | dmab->addr = 0; | |
59feddb2 | 69 | dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); |
1da177e4 LT |
70 | if (! sgbuf) |
71 | return NULL; | |
42e748a0 TI |
72 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) { |
73 | type = SNDRV_DMA_TYPE_DEV_UC; | |
74 | #ifdef pgprot_noncached | |
75 | prot = pgprot_noncached(PAGE_KERNEL); | |
76 | #endif | |
77 | } | |
1da177e4 LT |
78 | sgbuf->dev = device; |
79 | pages = snd_sgbuf_aligned_pages(size); | |
80 | sgbuf->tblsize = sgbuf_align_table(pages); | |
51e9f2e6 TI |
81 | table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL); |
82 | if (!table) | |
1da177e4 | 83 | goto _failed; |
51e9f2e6 TI |
84 | sgbuf->table = table; |
85 | pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL); | |
86 | if (!pgtable) | |
1da177e4 | 87 | goto _failed; |
51e9f2e6 | 88 | sgbuf->page_table = pgtable; |
1da177e4 | 89 | |
51e9f2e6 TI |
90 | /* allocate pages */ |
91 | maxpages = MAX_ALLOC_PAGES; | |
92 | while (pages > 0) { | |
93 | chunk = pages; | |
94 | /* don't be too eager to take a huge chunk */ | |
95 | if (chunk > maxpages) | |
96 | chunk = maxpages; | |
97 | chunk <<= PAGE_SHIFT; | |
42e748a0 | 98 | if (snd_dma_alloc_pages_fallback(type, device, |
51e9f2e6 TI |
99 | chunk, &tmpb) < 0) { |
100 | if (!sgbuf->pages) | |
c810f903 | 101 | goto _failed; |
51e9f2e6 | 102 | if (!res_size) |
1da177e4 | 103 | goto _failed; |
51e9f2e6 | 104 | size = sgbuf->pages * PAGE_SIZE; |
1da177e4 LT |
105 | break; |
106 | } | |
51e9f2e6 TI |
107 | chunk = tmpb.bytes >> PAGE_SHIFT; |
108 | for (i = 0; i < chunk; i++) { | |
109 | table->buf = tmpb.area; | |
110 | table->addr = tmpb.addr; | |
111 | if (!i) | |
112 | table->addr |= chunk; /* mark head */ | |
113 | table++; | |
114 | *pgtable++ = virt_to_page(tmpb.area); | |
115 | tmpb.area += PAGE_SIZE; | |
116 | tmpb.addr += PAGE_SIZE; | |
117 | } | |
118 | sgbuf->pages += chunk; | |
119 | pages -= chunk; | |
120 | if (chunk < maxpages) | |
121 | maxpages = chunk; | |
1da177e4 LT |
122 | } |
123 | ||
124 | sgbuf->size = size; | |
42e748a0 | 125 | dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot); |
1da177e4 LT |
126 | if (! dmab->area) |
127 | goto _failed; | |
51e9f2e6 TI |
128 | if (res_size) |
129 | *res_size = sgbuf->size; | |
1da177e4 LT |
130 | return dmab->area; |
131 | ||
132 | _failed: | |
133 | snd_free_sgbuf_pages(dmab); /* free the table */ | |
134 | return NULL; | |
135 | } | |
9d069dc0 TI |
136 | |
137 | /* | |
138 | * compute the max chunk size with continuous pages on sg-buffer | |
139 | */ | |
140 | unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, | |
141 | unsigned int ofs, unsigned int size) | |
142 | { | |
143 | struct snd_sg_buf *sg = dmab->private_data; | |
144 | unsigned int start, end, pg; | |
145 | ||
146 | start = ofs >> PAGE_SHIFT; | |
147 | end = (ofs + size - 1) >> PAGE_SHIFT; | |
148 | /* check page continuity */ | |
149 | pg = sg->table[start].addr >> PAGE_SHIFT; | |
150 | for (;;) { | |
151 | start++; | |
152 | if (start > end) | |
153 | break; | |
154 | pg++; | |
155 | if ((sg->table[start].addr >> PAGE_SHIFT) != pg) | |
156 | return (start << PAGE_SHIFT) - ofs; | |
157 | } | |
158 | /* ok, all on continuous pages */ | |
159 | return size; | |
160 | } | |
161 | EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); |