Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
f30c2269 | 3 | * linux/arch/m68k/sun3/sun3dvma.c |
1da177e4 LT |
4 | * |
5 | * Copyright (C) 2000 Sam Creasey | |
6 | * | |
7 | * Contains common routines for sun3/sun3x DVMA management. | |
8 | */ | |
9 | ||
57c8a661 | 10 | #include <linux/memblock.h> |
a4df02a2 | 11 | #include <linux/init.h> |
2e811488 | 12 | #include <linux/module.h> |
1da177e4 | 13 | #include <linux/kernel.h> |
5a0e3ad6 | 14 | #include <linux/gfp.h> |
1da177e4 LT |
15 | #include <linux/mm.h> |
16 | #include <linux/list.h> | |
17 | ||
18 | #include <asm/page.h> | |
19 | #include <asm/pgtable.h> | |
20 | #include <asm/dvma.h> | |
21 | ||
22 | #undef DVMA_DEBUG | |
23 | ||
24 | #ifdef CONFIG_SUN3X | |
25 | extern void dvma_unmap_iommu(unsigned long baddr, int len); | |
26 | #else | |
27 | static inline void dvma_unmap_iommu(unsigned long a, int b) | |
28 | { | |
29 | } | |
30 | #endif | |
31 | ||
32 | #ifdef CONFIG_SUN3 | |
33 | extern void sun3_dvma_init(void); | |
34 | #endif | |
35 | ||
7b3e8de9 | 36 | static unsigned long *iommu_use; |
1da177e4 LT |
37 | |
38 | #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT) | |
39 | ||
40 | #define dvma_entry_use(baddr) (iommu_use[dvma_index(baddr)]) | |
41 | ||
42 | struct hole { | |
43 | unsigned long start; | |
44 | unsigned long end; | |
45 | unsigned long size; | |
46 | struct list_head list; | |
47 | }; | |
48 | ||
49 | static struct list_head hole_list; | |
50 | static struct list_head hole_cache; | |
51 | static struct hole initholes[64]; | |
52 | ||
53 | #ifdef DVMA_DEBUG | |
54 | ||
55 | static unsigned long dvma_allocs; | |
56 | static unsigned long dvma_frees; | |
57 | static unsigned long long dvma_alloc_bytes; | |
58 | static unsigned long long dvma_free_bytes; | |
59 | ||
60 | static void print_use(void) | |
61 | { | |
62 | ||
63 | int i; | |
64 | int j = 0; | |
65 | ||
56bbd862 | 66 | pr_info("dvma entry usage:\n"); |
1da177e4 LT |
67 | |
68 | for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) { | |
69 | if(!iommu_use[i]) | |
70 | continue; | |
71 | ||
72 | j++; | |
73 | ||
56bbd862 GU |
74 | pr_info("dvma entry: %08x len %08lx\n", |
75 | (i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]); | |
1da177e4 LT |
76 | } |
77 | ||
56bbd862 | 78 | pr_info("%d entries in use total\n", j); |
1da177e4 | 79 | |
56bbd862 GU |
80 | pr_info("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees); |
81 | pr_info("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes, | |
82 | dvma_free_bytes); | |
1da177e4 LT |
83 | } |
84 | ||
85 | static void print_holes(struct list_head *holes) | |
86 | { | |
87 | ||
88 | struct list_head *cur; | |
89 | struct hole *hole; | |
90 | ||
56bbd862 | 91 | pr_info("listing dvma holes\n"); |
1da177e4 LT |
92 | list_for_each(cur, holes) { |
93 | hole = list_entry(cur, struct hole, list); | |
94 | ||
95 | if((hole->start == 0) && (hole->end == 0) && (hole->size == 0)) | |
96 | continue; | |
97 | ||
56bbd862 GU |
98 | pr_info("hole: start %08lx end %08lx size %08lx\n", |
99 | hole->start, hole->end, hole->size); | |
1da177e4 LT |
100 | } |
101 | ||
56bbd862 | 102 | pr_info("end of hole listing...\n"); |
1da177e4 LT |
103 | } |
104 | #endif /* DVMA_DEBUG */ | |
105 | ||
106 | static inline int refill(void) | |
107 | { | |
108 | ||
109 | struct hole *hole; | |
110 | struct hole *prev = NULL; | |
111 | struct list_head *cur; | |
112 | int ret = 0; | |
113 | ||
114 | list_for_each(cur, &hole_list) { | |
115 | hole = list_entry(cur, struct hole, list); | |
116 | ||
117 | if(!prev) { | |
118 | prev = hole; | |
119 | continue; | |
120 | } | |
121 | ||
122 | if(hole->end == prev->start) { | |
123 | hole->size += prev->size; | |
124 | hole->end = prev->end; | |
a7addcea | 125 | list_move(&(prev->list), &hole_cache); |
1da177e4 LT |
126 | ret++; |
127 | } | |
128 | ||
129 | } | |
130 | ||
131 | return ret; | |
132 | } | |
133 | ||
134 | static inline struct hole *rmcache(void) | |
135 | { | |
136 | struct hole *ret; | |
137 | ||
138 | if(list_empty(&hole_cache)) { | |
139 | if(!refill()) { | |
56bbd862 | 140 | pr_crit("out of dvma hole cache!\n"); |
1da177e4 LT |
141 | BUG(); |
142 | } | |
143 | } | |
144 | ||
145 | ret = list_entry(hole_cache.next, struct hole, list); | |
146 | list_del(&(ret->list)); | |
147 | ||
148 | return ret; | |
149 | ||
150 | } | |
151 | ||
152 | static inline unsigned long get_baddr(int len, unsigned long align) | |
153 | { | |
154 | ||
155 | struct list_head *cur; | |
156 | struct hole *hole; | |
157 | ||
158 | if(list_empty(&hole_list)) { | |
159 | #ifdef DVMA_DEBUG | |
56bbd862 | 160 | pr_crit("out of dvma holes! (printing hole cache)\n"); |
1da177e4 LT |
161 | print_holes(&hole_cache); |
162 | print_use(); | |
163 | #endif | |
164 | BUG(); | |
165 | } | |
166 | ||
167 | list_for_each(cur, &hole_list) { | |
168 | unsigned long newlen; | |
169 | ||
170 | hole = list_entry(cur, struct hole, list); | |
171 | ||
172 | if(align > DVMA_PAGE_SIZE) | |
173 | newlen = len + ((hole->end - len) & (align-1)); | |
174 | else | |
175 | newlen = len; | |
176 | ||
177 | if(hole->size > newlen) { | |
178 | hole->end -= newlen; | |
179 | hole->size -= newlen; | |
180 | dvma_entry_use(hole->end) = newlen; | |
181 | #ifdef DVMA_DEBUG | |
182 | dvma_allocs++; | |
183 | dvma_alloc_bytes += newlen; | |
184 | #endif | |
185 | return hole->end; | |
186 | } else if(hole->size == newlen) { | |
a7addcea | 187 | list_move(&(hole->list), &hole_cache); |
1da177e4 LT |
188 | dvma_entry_use(hole->start) = newlen; |
189 | #ifdef DVMA_DEBUG | |
190 | dvma_allocs++; | |
191 | dvma_alloc_bytes += newlen; | |
192 | #endif | |
193 | return hole->start; | |
194 | } | |
195 | ||
196 | } | |
197 | ||
56bbd862 | 198 | pr_crit("unable to find dvma hole!\n"); |
1da177e4 LT |
199 | BUG(); |
200 | return 0; | |
201 | } | |
202 | ||
203 | static inline int free_baddr(unsigned long baddr) | |
204 | { | |
205 | ||
206 | unsigned long len; | |
207 | struct hole *hole; | |
208 | struct list_head *cur; | |
209 | unsigned long orig_baddr; | |
210 | ||
211 | orig_baddr = baddr; | |
212 | len = dvma_entry_use(baddr); | |
213 | dvma_entry_use(baddr) = 0; | |
214 | baddr &= DVMA_PAGE_MASK; | |
215 | dvma_unmap_iommu(baddr, len); | |
216 | ||
217 | #ifdef DVMA_DEBUG | |
218 | dvma_frees++; | |
219 | dvma_free_bytes += len; | |
220 | #endif | |
221 | ||
222 | list_for_each(cur, &hole_list) { | |
223 | hole = list_entry(cur, struct hole, list); | |
224 | ||
225 | if(hole->end == baddr) { | |
226 | hole->end += len; | |
227 | hole->size += len; | |
228 | return 0; | |
229 | } else if(hole->start == (baddr + len)) { | |
230 | hole->start = baddr; | |
231 | hole->size += len; | |
232 | return 0; | |
233 | } | |
234 | ||
235 | } | |
236 | ||
237 | hole = rmcache(); | |
238 | ||
239 | hole->start = baddr; | |
240 | hole->end = baddr + len; | |
241 | hole->size = len; | |
242 | ||
243 | // list_add_tail(&(hole->list), cur); | |
244 | list_add(&(hole->list), cur); | |
245 | ||
246 | return 0; | |
247 | ||
248 | } | |
249 | ||
a4df02a2 | 250 | void __init dvma_init(void) |
1da177e4 LT |
251 | { |
252 | ||
253 | struct hole *hole; | |
254 | int i; | |
255 | ||
256 | INIT_LIST_HEAD(&hole_list); | |
257 | INIT_LIST_HEAD(&hole_cache); | |
258 | ||
259 | /* prepare the hole cache */ | |
260 | for(i = 0; i < 64; i++) | |
261 | list_add(&(initholes[i].list), &hole_cache); | |
262 | ||
263 | hole = rmcache(); | |
264 | hole->start = DVMA_START; | |
265 | hole->end = DVMA_END; | |
266 | hole->size = DVMA_SIZE; | |
267 | ||
268 | list_add(&(hole->list), &hole_list); | |
269 | ||
2a5bda5a | 270 | iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long), |
7e1c4e27 | 271 | SMP_CACHE_BYTES); |
1da177e4 LT |
272 | |
273 | dvma_unmap_iommu(DVMA_START, DVMA_SIZE); | |
274 | ||
275 | #ifdef CONFIG_SUN3 | |
276 | sun3_dvma_init(); | |
277 | #endif | |
278 | ||
279 | } | |
280 | ||
5ecf85f0 | 281 | unsigned long dvma_map_align(unsigned long kaddr, int len, int align) |
1da177e4 LT |
282 | { |
283 | ||
284 | unsigned long baddr; | |
285 | unsigned long off; | |
286 | ||
287 | if(!len) | |
288 | len = 0x800; | |
289 | ||
290 | if(!kaddr || !len) { | |
56bbd862 | 291 | // pr_err("error: kaddr %lx len %x\n", kaddr, len); |
1da177e4 LT |
292 | // *(int *)4 = 0; |
293 | return 0; | |
294 | } | |
295 | ||
56bbd862 | 296 | pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr); |
1da177e4 LT |
297 | off = kaddr & ~DVMA_PAGE_MASK; |
298 | kaddr &= PAGE_MASK; | |
299 | len += off; | |
300 | len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); | |
301 | ||
302 | if(align == 0) | |
303 | align = DVMA_PAGE_SIZE; | |
304 | else | |
305 | align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); | |
306 | ||
307 | baddr = get_baddr(len, align); | |
56bbd862 | 308 | // pr_info("using baddr %lx\n", baddr); |
1da177e4 LT |
309 | |
310 | if(!dvma_map_iommu(kaddr, baddr, len)) | |
311 | return (baddr + off); | |
312 | ||
56bbd862 GU |
313 | pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr, |
314 | len); | |
1da177e4 LT |
315 | BUG(); |
316 | return 0; | |
317 | } | |
2e811488 | 318 | EXPORT_SYMBOL(dvma_map_align); |
1da177e4 LT |
319 | |
320 | void dvma_unmap(void *baddr) | |
321 | { | |
322 | unsigned long addr; | |
323 | ||
324 | addr = (unsigned long)baddr; | |
325 | /* check if this is a vme mapping */ | |
326 | if(!(addr & 0x00f00000)) | |
327 | addr |= 0xf00000; | |
328 | ||
329 | free_baddr(addr); | |
330 | ||
331 | return; | |
332 | ||
333 | } | |
2e811488 | 334 | EXPORT_SYMBOL(dvma_unmap); |
1da177e4 LT |
335 | |
336 | void *dvma_malloc_align(unsigned long len, unsigned long align) | |
337 | { | |
338 | unsigned long kaddr; | |
339 | unsigned long baddr; | |
340 | unsigned long vaddr; | |
341 | ||
342 | if(!len) | |
343 | return NULL; | |
344 | ||
56bbd862 | 345 | pr_debug("dvma_malloc request %lx bytes\n", len); |
1da177e4 LT |
346 | len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); |
347 | ||
348 | if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0) | |
349 | return NULL; | |
350 | ||
351 | if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) { | |
352 | free_pages(kaddr, get_order(len)); | |
353 | return NULL; | |
354 | } | |
355 | ||
356 | vaddr = dvma_btov(baddr); | |
357 | ||
358 | if(dvma_map_cpu(kaddr, vaddr, len) < 0) { | |
359 | dvma_unmap((void *)baddr); | |
360 | free_pages(kaddr, get_order(len)); | |
361 | return NULL; | |
362 | } | |
363 | ||
56bbd862 GU |
364 | pr_debug("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr, |
365 | baddr); | |
1da177e4 LT |
366 | |
367 | return (void *)vaddr; | |
368 | ||
369 | } | |
2e811488 | 370 | EXPORT_SYMBOL(dvma_malloc_align); |
1da177e4 LT |
371 | |
372 | void dvma_free(void *vaddr) | |
373 | { | |
374 | ||
375 | return; | |
376 | ||
377 | } | |
2e811488 | 378 | EXPORT_SYMBOL(dvma_free); |