Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /** |
b5e89ed5 | 2 | * \file drm_bufs.c |
1da177e4 | 3 | * Generic buffer template |
b5e89ed5 | 4 | * |
1da177e4 LT |
5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> |
6 | * \author Gareth Hughes <gareth@valinux.com> | |
7 | */ | |
8 | ||
9 | /* | |
10 | * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com | |
11 | * | |
12 | * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. | |
13 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | |
14 | * All Rights Reserved. | |
15 | * | |
16 | * Permission is hereby granted, free of charge, to any person obtaining a | |
17 | * copy of this software and associated documentation files (the "Software"), | |
18 | * to deal in the Software without restriction, including without limitation | |
19 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
20 | * and/or sell copies of the Software, and to permit persons to whom the | |
21 | * Software is furnished to do so, subject to the following conditions: | |
22 | * | |
23 | * The above copyright notice and this permission notice (including the next | |
24 | * paragraph) shall be included in all copies or substantial portions of the | |
25 | * Software. | |
26 | * | |
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
28 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
29 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
30 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
31 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
32 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
33 | * OTHER DEALINGS IN THE SOFTWARE. | |
34 | */ | |
35 | ||
36 | #include <linux/vmalloc.h> | |
f1a2a9b6 DM |
37 | #include <linux/log2.h> |
38 | #include <asm/shmparam.h> | |
1da177e4 LT |
39 | #include "drmP.h" |
40 | ||
d883f7f1 | 41 | resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource) |
1da177e4 | 42 | { |
836cf046 DA |
43 | return pci_resource_start(dev->pdev, resource); |
44 | } | |
45 | EXPORT_SYMBOL(drm_get_resource_start); | |
1da177e4 | 46 | |
d883f7f1 | 47 | resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource) |
836cf046 DA |
48 | { |
49 | return pci_resource_len(dev->pdev, resource); | |
50 | } | |
b5e89ed5 | 51 | |
836cf046 | 52 | EXPORT_SYMBOL(drm_get_resource_len); |
1da177e4 | 53 | |
55910517 | 54 | static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, |
f77d390c | 55 | struct drm_local_map *map) |
836cf046 | 56 | { |
55910517 | 57 | struct drm_map_list *entry; |
bd1b331f | 58 | list_for_each_entry(entry, &dev->maplist, head) { |
41c2e75e BH |
59 | /* |
60 | * Because the kernel-userspace ABI is fixed at a 32-bit offset | |
61 | * while PCI resources may live above that, we ignore the map | |
62 | * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS. | |
63 | * It is assumed that each driver will have only one resource of | |
64 | * each type. | |
65 | */ | |
66 | if (!entry->map || | |
67 | map->type != entry->map->type || | |
68 | entry->master != dev->primary->master) | |
69 | continue; | |
70 | switch (map->type) { | |
71 | case _DRM_SHM: | |
72 | if (map->flags != _DRM_CONTAINS_LOCK) | |
73 | break; | |
74 | case _DRM_REGISTERS: | |
75 | case _DRM_FRAME_BUFFER: | |
89625eb1 | 76 | return entry; |
41c2e75e BH |
77 | default: /* Make gcc happy */ |
78 | ; | |
836cf046 | 79 | } |
41c2e75e BH |
80 | if (entry->map->offset == map->offset) |
81 | return entry; | |
836cf046 DA |
82 | } |
83 | ||
84 | return NULL; | |
1da177e4 | 85 | } |
1da177e4 | 86 | |
e0be428e | 87 | static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, |
f1a2a9b6 | 88 | unsigned long user_token, int hashed_handle, int shm) |
d1f2b55a | 89 | { |
f1a2a9b6 DM |
90 | int use_hashed_handle, shift; |
91 | unsigned long add; | |
92 | ||
c2604ce0 | 93 | #if (BITS_PER_LONG == 64) |
8d153f71 TH |
94 | use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); |
95 | #elif (BITS_PER_LONG == 32) | |
96 | use_hashed_handle = hashed_handle; | |
97 | #else | |
98 | #error Unsupported long size. Neither 64 nor 32 bits. | |
99 | #endif | |
d1f2b55a | 100 | |
e08870c8 TH |
101 | if (!use_hashed_handle) { |
102 | int ret; | |
1545085a | 103 | hash->key = user_token >> PAGE_SHIFT; |
e08870c8 TH |
104 | ret = drm_ht_insert_item(&dev->map_hash, hash); |
105 | if (ret != -EINVAL) | |
106 | return ret; | |
d1f2b55a | 107 | } |
f1a2a9b6 DM |
108 | |
109 | shift = 0; | |
110 | add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; | |
111 | if (shm && (SHMLBA > PAGE_SIZE)) { | |
112 | int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; | |
113 | ||
114 | /* For shared memory, we have to preserve the SHMLBA | |
115 | * bits of the eventual vma->vm_pgoff value during | |
116 | * mmap(). Otherwise we run into cache aliasing problems | |
117 | * on some platforms. On these platforms, the pgoff of | |
118 | * a mmap() request is used to pick a suitable virtual | |
119 | * address for the mmap() region such that it will not | |
120 | * cause cache aliasing problems. | |
121 | * | |
122 | * Therefore, make sure the SHMLBA relevant bits of the | |
123 | * hash value we use are equal to those in the original | |
124 | * kernel virtual address. | |
125 | */ | |
126 | shift = bits; | |
127 | add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); | |
128 | } | |
129 | ||
e08870c8 TH |
130 | return drm_ht_just_insert_please(&dev->map_hash, hash, |
131 | user_token, 32 - PAGE_SHIFT - 3, | |
f1a2a9b6 | 132 | shift, add); |
d1f2b55a | 133 | } |
9a186645 | 134 | |
1da177e4 | 135 | /** |
f77d390c BH |
136 | * Core function to create a range of memory available for mapping by a |
137 | * non-root process. | |
1da177e4 LT |
138 | * |
139 | * Adjusts the memory offset to its absolute value according to the mapping | |
140 | * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where | |
141 | * applicable and if supported by the kernel. | |
142 | */ | |
41c2e75e | 143 | static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, |
c60ce623 | 144 | unsigned int size, enum drm_map_type type, |
55910517 DA |
145 | enum drm_map_flags flags, |
146 | struct drm_map_list ** maplist) | |
1da177e4 | 147 | { |
f77d390c | 148 | struct drm_local_map *map; |
55910517 | 149 | struct drm_map_list *list; |
9c8da5eb | 150 | drm_dma_handle_t *dmah; |
8d153f71 TH |
151 | unsigned long user_token; |
152 | int ret; | |
1da177e4 | 153 | |
b5e89ed5 DA |
154 | map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); |
155 | if (!map) | |
1da177e4 LT |
156 | return -ENOMEM; |
157 | ||
7ab98401 DA |
158 | map->offset = offset; |
159 | map->size = size; | |
160 | map->flags = flags; | |
161 | map->type = type; | |
1da177e4 LT |
162 | |
163 | /* Only allow shared memory to be removable since we only keep enough | |
164 | * book keeping information about shared memory to allow for removal | |
165 | * when processes fork. | |
166 | */ | |
b5e89ed5 DA |
167 | if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { |
168 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
1da177e4 LT |
169 | return -EINVAL; |
170 | } | |
41c2e75e BH |
171 | DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", |
172 | (unsigned long long)map->offset, map->size, map->type); | |
b6741377 BH |
173 | |
174 | /* page-align _DRM_SHM maps. They are allocated here so there is no security | |
175 | * hole created by that and it works around various broken drivers that use | |
176 | * a non-aligned quantity to map the SAREA. --BenH | |
177 | */ | |
178 | if (map->type == _DRM_SHM) | |
179 | map->size = PAGE_ALIGN(map->size); | |
180 | ||
41c2e75e | 181 | if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { |
b5e89ed5 | 182 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
1da177e4 LT |
183 | return -EINVAL; |
184 | } | |
b5e89ed5 | 185 | map->mtrr = -1; |
1da177e4 LT |
186 | map->handle = NULL; |
187 | ||
b5e89ed5 | 188 | switch (map->type) { |
1da177e4 LT |
189 | case _DRM_REGISTERS: |
190 | case _DRM_FRAME_BUFFER: | |
88f399cd | 191 | #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) |
8d2ea625 | 192 | if (map->offset + (map->size-1) < map->offset || |
b5e89ed5 DA |
193 | map->offset < virt_to_phys(high_memory)) { |
194 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
1da177e4 LT |
195 | return -EINVAL; |
196 | } | |
197 | #endif | |
198 | #ifdef __alpha__ | |
199 | map->offset += dev->hose->mem_space->start; | |
200 | #endif | |
836cf046 DA |
201 | /* Some drivers preinitialize some maps, without the X Server |
202 | * needing to be aware of it. Therefore, we just return success | |
203 | * when the server tries to create a duplicate map. | |
204 | */ | |
89625eb1 DA |
205 | list = drm_find_matching_map(dev, map); |
206 | if (list != NULL) { | |
207 | if (list->map->size != map->size) { | |
836cf046 | 208 | DRM_DEBUG("Matching maps of type %d with " |
b5e89ed5 DA |
209 | "mismatched sizes, (%ld vs %ld)\n", |
210 | map->type, map->size, | |
211 | list->map->size); | |
89625eb1 | 212 | list->map->size = map->size; |
836cf046 DA |
213 | } |
214 | ||
215 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
89625eb1 | 216 | *maplist = list; |
836cf046 DA |
217 | return 0; |
218 | } | |
219 | ||
1da177e4 | 220 | if (drm_core_has_MTRR(dev)) { |
b5e89ed5 DA |
221 | if (map->type == _DRM_FRAME_BUFFER || |
222 | (map->flags & _DRM_WRITE_COMBINING)) { | |
223 | map->mtrr = mtrr_add(map->offset, map->size, | |
224 | MTRR_TYPE_WRCOMB, 1); | |
1da177e4 LT |
225 | } |
226 | } | |
0769d39c | 227 | if (map->type == _DRM_REGISTERS) { |
004a7727 | 228 | map->handle = ioremap(map->offset, map->size); |
0769d39c ST |
229 | if (!map->handle) { |
230 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
231 | return -ENOMEM; | |
232 | } | |
233 | } | |
bc5f4523 | 234 | |
1da177e4 | 235 | break; |
1da177e4 | 236 | case _DRM_SHM: |
54ba2f76 DA |
237 | list = drm_find_matching_map(dev, map); |
238 | if (list != NULL) { | |
239 | if(list->map->size != map->size) { | |
240 | DRM_DEBUG("Matching maps of type %d with " | |
241 | "mismatched sizes, (%ld vs %ld)\n", | |
242 | map->type, map->size, list->map->size); | |
243 | list->map->size = map->size; | |
244 | } | |
245 | ||
246 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
247 | *maplist = list; | |
248 | return 0; | |
249 | } | |
f239b7b0 | 250 | map->handle = vmalloc_user(map->size); |
b5e89ed5 DA |
251 | DRM_DEBUG("%lu %d %p\n", |
252 | map->size, drm_order(map->size), map->handle); | |
253 | if (!map->handle) { | |
254 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
1da177e4 LT |
255 | return -ENOMEM; |
256 | } | |
257 | map->offset = (unsigned long)map->handle; | |
b5e89ed5 | 258 | if (map->flags & _DRM_CONTAINS_LOCK) { |
1da177e4 | 259 | /* Prevent a 2nd X Server from creating a 2nd lock */ |
7c1c2871 | 260 | if (dev->primary->master->lock.hw_lock != NULL) { |
b5e89ed5 DA |
261 | vfree(map->handle); |
262 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
1da177e4 LT |
263 | return -EBUSY; |
264 | } | |
7c1c2871 | 265 | dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ |
1da177e4 LT |
266 | } |
267 | break; | |
54ba2f76 | 268 | case _DRM_AGP: { |
55910517 | 269 | struct drm_agp_mem *entry; |
54ba2f76 DA |
270 | int valid = 0; |
271 | ||
272 | if (!drm_core_has_AGP(dev)) { | |
273 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
274 | return -EINVAL; | |
275 | } | |
1da177e4 | 276 | #ifdef __alpha__ |
54ba2f76 | 277 | map->offset += dev->hose->mem_space->start; |
1da177e4 | 278 | #endif |
47a184a8 EA |
279 | /* In some cases (i810 driver), user space may have already |
280 | * added the AGP base itself, because dev->agp->base previously | |
281 | * only got set during AGP enable. So, only add the base | |
282 | * address if the map's offset isn't already within the | |
283 | * aperture. | |
54ba2f76 | 284 | */ |
47a184a8 EA |
285 | if (map->offset < dev->agp->base || |
286 | map->offset > dev->agp->base + | |
287 | dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { | |
288 | map->offset += dev->agp->base; | |
289 | } | |
54ba2f76 DA |
290 | map->mtrr = dev->agp->agp_mtrr; /* for getmap */ |
291 | ||
292 | /* This assumes the DRM is in total control of AGP space. | |
293 | * It's not always the case as AGP can be in the control | |
294 | * of user space (i.e. i810 driver). So this loop will get | |
295 | * skipped and we double check that dev->agp->memory is | |
296 | * actually set as well as being invalid before EPERM'ing | |
297 | */ | |
bd1b331f | 298 | list_for_each_entry(entry, &dev->agp->memory, head) { |
54ba2f76 DA |
299 | if ((map->offset >= entry->bound) && |
300 | (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { | |
301 | valid = 1; | |
302 | break; | |
303 | } | |
1da177e4 | 304 | } |
bd1b331f | 305 | if (!list_empty(&dev->agp->memory) && !valid) { |
54ba2f76 DA |
306 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
307 | return -EPERM; | |
308 | } | |
41c2e75e BH |
309 | DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", |
310 | (unsigned long long)map->offset, map->size); | |
54ba2f76 | 311 | |
a2c0a97b JB |
312 | break; |
313 | case _DRM_GEM: | |
314 | DRM_ERROR("tried to rmmap GEM object\n"); | |
1da177e4 | 315 | break; |
54ba2f76 | 316 | } |
1da177e4 LT |
317 | case _DRM_SCATTER_GATHER: |
318 | if (!dev->sg) { | |
319 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
320 | return -EINVAL; | |
321 | } | |
d1f2b55a | 322 | map->offset += (unsigned long)dev->sg->virtual; |
1da177e4 | 323 | break; |
b5e89ed5 | 324 | case _DRM_CONSISTENT: |
2d0f9eaf | 325 | /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, |
9c8da5eb | 326 | * As we're limiting the address to 2^32-1 (or less), |
2d0f9eaf DA |
327 | * casting it down to 32 bits is no problem, but we |
328 | * need to point to a 64bit variable first. */ | |
9c8da5eb DA |
329 | dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); |
330 | if (!dmah) { | |
2d0f9eaf DA |
331 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
332 | return -ENOMEM; | |
333 | } | |
9c8da5eb DA |
334 | map->handle = dmah->vaddr; |
335 | map->offset = (unsigned long)dmah->busaddr; | |
336 | kfree(dmah); | |
2d0f9eaf | 337 | break; |
1da177e4 | 338 | default: |
b5e89ed5 | 339 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
1da177e4 LT |
340 | return -EINVAL; |
341 | } | |
342 | ||
343 | list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); | |
b5e89ed5 | 344 | if (!list) { |
85abb3f9 | 345 | if (map->type == _DRM_REGISTERS) |
004a7727 | 346 | iounmap(map->handle); |
1da177e4 LT |
347 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
348 | return -EINVAL; | |
349 | } | |
350 | memset(list, 0, sizeof(*list)); | |
351 | list->map = map; | |
352 | ||
30e2fb18 | 353 | mutex_lock(&dev->struct_mutex); |
bd1b331f | 354 | list_add(&list->head, &dev->maplist); |
8d153f71 | 355 | |
d1f2b55a | 356 | /* Assign a 32-bit handle */ |
30e2fb18 | 357 | /* We do it here so that dev->struct_mutex protects the increment */ |
8d153f71 TH |
358 | user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : |
359 | map->offset; | |
f1a2a9b6 DM |
360 | ret = drm_map_handle(dev, &list->hash, user_token, 0, |
361 | (map->type == _DRM_SHM)); | |
8d153f71 | 362 | if (ret) { |
85abb3f9 | 363 | if (map->type == _DRM_REGISTERS) |
004a7727 | 364 | iounmap(map->handle); |
8d153f71 TH |
365 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
366 | drm_free(list, sizeof(*list), DRM_MEM_MAPS); | |
367 | mutex_unlock(&dev->struct_mutex); | |
368 | return ret; | |
369 | } | |
370 | ||
1545085a | 371 | list->user_token = list->hash.key << PAGE_SHIFT; |
30e2fb18 | 372 | mutex_unlock(&dev->struct_mutex); |
1da177e4 | 373 | |
27704a16 | 374 | list->master = dev->primary->master; |
89625eb1 | 375 | *maplist = list; |
7ab98401 | 376 | return 0; |
54ba2f76 | 377 | } |
89625eb1 | 378 | |
41c2e75e | 379 | int drm_addmap(struct drm_device * dev, resource_size_t offset, |
c60ce623 | 380 | unsigned int size, enum drm_map_type type, |
f77d390c | 381 | enum drm_map_flags flags, struct drm_local_map ** map_ptr) |
89625eb1 | 382 | { |
55910517 | 383 | struct drm_map_list *list; |
89625eb1 DA |
384 | int rc; |
385 | ||
386 | rc = drm_addmap_core(dev, offset, size, type, flags, &list); | |
387 | if (!rc) | |
388 | *map_ptr = list->map; | |
389 | return rc; | |
390 | } | |
b5e89ed5 | 391 | |
7ab98401 DA |
392 | EXPORT_SYMBOL(drm_addmap); |
393 | ||
f77d390c BH |
394 | /** |
395 | * Ioctl to specify a range of memory that is available for mapping by a | |
396 | * non-root process. | |
397 | * | |
398 | * \param inode device inode. | |
399 | * \param file_priv DRM file private. | |
400 | * \param cmd command. | |
401 | * \param arg pointer to a drm_map structure. | |
402 | * \return zero on success or a negative value on error. | |
403 | * | |
404 | */ | |
c153f45f EA |
405 | int drm_addmap_ioctl(struct drm_device *dev, void *data, |
406 | struct drm_file *file_priv) | |
7ab98401 | 407 | { |
c153f45f | 408 | struct drm_map *map = data; |
55910517 | 409 | struct drm_map_list *maplist; |
7ab98401 DA |
410 | int err; |
411 | ||
7c1c2871 | 412 | if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) |
d985c108 DA |
413 | return -EPERM; |
414 | ||
c153f45f EA |
415 | err = drm_addmap_core(dev, map->offset, map->size, map->type, |
416 | map->flags, &maplist); | |
7ab98401 | 417 | |
b5e89ed5 | 418 | if (err) |
7ab98401 | 419 | return err; |
d1f2b55a | 420 | |
67e1a014 | 421 | /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ |
c153f45f | 422 | map->handle = (void *)(unsigned long)maplist->user_token; |
1da177e4 | 423 | return 0; |
88f399cd | 424 | } |
1da177e4 | 425 | |
1da177e4 LT |
426 | /** |
427 | * Remove a map private from list and deallocate resources if the mapping | |
428 | * isn't in use. | |
429 | * | |
1da177e4 LT |
430 | * Searches the map on drm_device::maplist, removes it from the list, see if |
431 | * its being used, and free any associate resource (such as MTRR's) if it's not | |
432 | * being on use. | |
433 | * | |
7ab98401 | 434 | * \sa drm_addmap |
1da177e4 | 435 | */ |
f77d390c | 436 | int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) |
1da177e4 | 437 | { |
55910517 | 438 | struct drm_map_list *r_list = NULL, *list_t; |
836cf046 | 439 | drm_dma_handle_t dmah; |
bd1b331f | 440 | int found = 0; |
7c1c2871 | 441 | struct drm_master *master; |
1da177e4 | 442 | |
836cf046 | 443 | /* Find the list entry for the map and remove it */ |
bd1b331f | 444 | list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { |
836cf046 | 445 | if (r_list->map == map) { |
7c1c2871 | 446 | master = r_list->master; |
bd1b331f | 447 | list_del(&r_list->head); |
1545085a TH |
448 | drm_ht_remove_key(&dev->map_hash, |
449 | r_list->user_token >> PAGE_SHIFT); | |
bd1b331f DA |
450 | drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS); |
451 | found = 1; | |
836cf046 DA |
452 | break; |
453 | } | |
1da177e4 LT |
454 | } |
455 | ||
bd1b331f | 456 | if (!found) |
1da177e4 | 457 | return -EINVAL; |
1da177e4 | 458 | |
836cf046 DA |
459 | switch (map->type) { |
460 | case _DRM_REGISTERS: | |
004a7727 | 461 | iounmap(map->handle); |
836cf046 DA |
462 | /* FALLTHROUGH */ |
463 | case _DRM_FRAME_BUFFER: | |
464 | if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { | |
465 | int retcode; | |
b5e89ed5 DA |
466 | retcode = mtrr_del(map->mtrr, map->offset, map->size); |
467 | DRM_DEBUG("mtrr_del=%d\n", retcode); | |
1da177e4 | 468 | } |
836cf046 DA |
469 | break; |
470 | case _DRM_SHM: | |
471 | vfree(map->handle); | |
7c1c2871 DA |
472 | if (master) { |
473 | if (dev->sigdata.lock == master->lock.hw_lock) | |
474 | dev->sigdata.lock = NULL; | |
475 | master->lock.hw_lock = NULL; /* SHM removed */ | |
476 | master->lock.file_priv = NULL; | |
171901d1 | 477 | wake_up_interruptible_all(&master->lock.lock_queue); |
7c1c2871 | 478 | } |
836cf046 DA |
479 | break; |
480 | case _DRM_AGP: | |
481 | case _DRM_SCATTER_GATHER: | |
482 | break; | |
483 | case _DRM_CONSISTENT: | |
484 | dmah.vaddr = map->handle; | |
485 | dmah.busaddr = map->offset; | |
486 | dmah.size = map->size; | |
487 | __drm_pci_free(dev, &dmah); | |
488 | break; | |
a2c0a97b JB |
489 | case _DRM_GEM: |
490 | DRM_ERROR("tried to rmmap GEM object\n"); | |
491 | break; | |
1da177e4 | 492 | } |
836cf046 DA |
493 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
494 | ||
1da177e4 LT |
495 | return 0; |
496 | } | |
4e74f36d | 497 | EXPORT_SYMBOL(drm_rmmap_locked); |
836cf046 | 498 | |
f77d390c | 499 | int drm_rmmap(struct drm_device *dev, struct drm_local_map *map) |
836cf046 DA |
500 | { |
501 | int ret; | |
502 | ||
30e2fb18 | 503 | mutex_lock(&dev->struct_mutex); |
836cf046 | 504 | ret = drm_rmmap_locked(dev, map); |
30e2fb18 | 505 | mutex_unlock(&dev->struct_mutex); |
836cf046 DA |
506 | |
507 | return ret; | |
508 | } | |
ba8bbcf6 | 509 | EXPORT_SYMBOL(drm_rmmap); |
7ab98401 | 510 | |
836cf046 DA |
511 | /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on |
512 | * the last close of the device, and this is necessary for cleanup when things | |
513 | * exit uncleanly. Therefore, having userland manually remove mappings seems | |
514 | * like a pointless exercise since they're going away anyway. | |
515 | * | |
516 | * One use case might be after addmap is allowed for normal users for SHM and | |
517 | * gets used by drivers that the server doesn't need to care about. This seems | |
518 | * unlikely. | |
f77d390c BH |
519 | * |
520 | * \param inode device inode. | |
521 | * \param file_priv DRM file private. | |
522 | * \param cmd command. | |
523 | * \param arg pointer to a struct drm_map structure. | |
524 | * \return zero on success or a negative value on error. | |
836cf046 | 525 | */ |
c153f45f EA |
526 | int drm_rmmap_ioctl(struct drm_device *dev, void *data, |
527 | struct drm_file *file_priv) | |
7ab98401 | 528 | { |
c153f45f | 529 | struct drm_map *request = data; |
f77d390c | 530 | struct drm_local_map *map = NULL; |
55910517 | 531 | struct drm_map_list *r_list; |
836cf046 | 532 | int ret; |
7ab98401 | 533 | |
30e2fb18 | 534 | mutex_lock(&dev->struct_mutex); |
bd1b331f | 535 | list_for_each_entry(r_list, &dev->maplist, head) { |
836cf046 | 536 | if (r_list->map && |
c153f45f | 537 | r_list->user_token == (unsigned long)request->handle && |
836cf046 DA |
538 | r_list->map->flags & _DRM_REMOVABLE) { |
539 | map = r_list->map; | |
540 | break; | |
541 | } | |
542 | } | |
543 | ||
544 | /* List has wrapped around to the head pointer, or its empty we didn't | |
545 | * find anything. | |
546 | */ | |
bd1b331f | 547 | if (list_empty(&dev->maplist) || !map) { |
30e2fb18 | 548 | mutex_unlock(&dev->struct_mutex); |
836cf046 DA |
549 | return -EINVAL; |
550 | } | |
551 | ||
836cf046 DA |
552 | /* Register and framebuffer maps are permanent */ |
553 | if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { | |
30e2fb18 | 554 | mutex_unlock(&dev->struct_mutex); |
836cf046 DA |
555 | return 0; |
556 | } | |
557 | ||
558 | ret = drm_rmmap_locked(dev, map); | |
559 | ||
30e2fb18 | 560 | mutex_unlock(&dev->struct_mutex); |
836cf046 DA |
561 | |
562 | return ret; | |
7ab98401 | 563 | } |
1da177e4 LT |
564 | |
565 | /** | |
566 | * Cleanup after an error on one of the addbufs() functions. | |
567 | * | |
836cf046 | 568 | * \param dev DRM device. |
1da177e4 LT |
569 | * \param entry buffer entry where the error occurred. |
570 | * | |
571 | * Frees any pages and buffers associated with the given entry. | |
572 | */ | |
cdd55a29 DA |
573 | static void drm_cleanup_buf_error(struct drm_device * dev, |
574 | struct drm_buf_entry * entry) | |
1da177e4 LT |
575 | { |
576 | int i; | |
577 | ||
578 | if (entry->seg_count) { | |
579 | for (i = 0; i < entry->seg_count; i++) { | |
580 | if (entry->seglist[i]) { | |
ddf19b97 | 581 | drm_pci_free(dev, entry->seglist[i]); |
1da177e4 LT |
582 | } |
583 | } | |
584 | drm_free(entry->seglist, | |
b5e89ed5 DA |
585 | entry->seg_count * |
586 | sizeof(*entry->seglist), DRM_MEM_SEGS); | |
1da177e4 LT |
587 | |
588 | entry->seg_count = 0; | |
589 | } | |
590 | ||
b5e89ed5 DA |
591 | if (entry->buf_count) { |
592 | for (i = 0; i < entry->buf_count; i++) { | |
1da177e4 LT |
593 | if (entry->buflist[i].dev_private) { |
594 | drm_free(entry->buflist[i].dev_private, | |
b5e89ed5 DA |
595 | entry->buflist[i].dev_priv_size, |
596 | DRM_MEM_BUFS); | |
1da177e4 LT |
597 | } |
598 | } | |
599 | drm_free(entry->buflist, | |
b5e89ed5 DA |
600 | entry->buf_count * |
601 | sizeof(*entry->buflist), DRM_MEM_BUFS); | |
1da177e4 LT |
602 | |
603 | entry->buf_count = 0; | |
604 | } | |
605 | } | |
606 | ||
607 | #if __OS_HAS_AGP | |
608 | /** | |
d59431bf | 609 | * Add AGP buffers for DMA transfers. |
1da177e4 | 610 | * |
84b1fd10 | 611 | * \param dev struct drm_device to which the buffers are to be added. |
c60ce623 | 612 | * \param request pointer to a struct drm_buf_desc describing the request. |
1da177e4 | 613 | * \return zero on success or a negative number on failure. |
b5e89ed5 | 614 | * |
1da177e4 LT |
615 | * After some sanity checks creates a drm_buf structure for each buffer and |
616 | * reallocates the buffer list of the same size order to accommodate the new | |
617 | * buffers. | |
618 | */ | |
84b1fd10 | 619 | int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) |
1da177e4 | 620 | { |
cdd55a29 DA |
621 | struct drm_device_dma *dma = dev->dma; |
622 | struct drm_buf_entry *entry; | |
55910517 | 623 | struct drm_agp_mem *agp_entry; |
056219e2 | 624 | struct drm_buf *buf; |
1da177e4 LT |
625 | unsigned long offset; |
626 | unsigned long agp_offset; | |
627 | int count; | |
628 | int order; | |
629 | int size; | |
630 | int alignment; | |
631 | int page_order; | |
632 | int total; | |
633 | int byte_count; | |
54ba2f76 | 634 | int i, valid; |
056219e2 | 635 | struct drm_buf **temp_buflist; |
1da177e4 | 636 | |
b5e89ed5 DA |
637 | if (!dma) |
638 | return -EINVAL; | |
1da177e4 | 639 | |
d59431bf DA |
640 | count = request->count; |
641 | order = drm_order(request->size); | |
1da177e4 LT |
642 | size = 1 << order; |
643 | ||
b5e89ed5 DA |
644 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
645 | ? PAGE_ALIGN(size) : size; | |
1da177e4 LT |
646 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
647 | total = PAGE_SIZE << page_order; | |
648 | ||
649 | byte_count = 0; | |
d59431bf | 650 | agp_offset = dev->agp->base + request->agp_start; |
1da177e4 | 651 | |
b5e89ed5 DA |
652 | DRM_DEBUG("count: %d\n", count); |
653 | DRM_DEBUG("order: %d\n", order); | |
654 | DRM_DEBUG("size: %d\n", size); | |
d985c108 | 655 | DRM_DEBUG("agp_offset: %lx\n", agp_offset); |
b5e89ed5 DA |
656 | DRM_DEBUG("alignment: %d\n", alignment); |
657 | DRM_DEBUG("page_order: %d\n", page_order); | |
658 | DRM_DEBUG("total: %d\n", total); | |
1da177e4 | 659 | |
b5e89ed5 DA |
660 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
661 | return -EINVAL; | |
662 | if (dev->queue_count) | |
663 | return -EBUSY; /* Not while in use */ | |
1da177e4 | 664 | |
54ba2f76 DA |
665 | /* Make sure buffers are located in AGP memory that we own */ |
666 | valid = 0; | |
bd1b331f | 667 | list_for_each_entry(agp_entry, &dev->agp->memory, head) { |
54ba2f76 DA |
668 | if ((agp_offset >= agp_entry->bound) && |
669 | (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { | |
670 | valid = 1; | |
671 | break; | |
672 | } | |
673 | } | |
bd1b331f | 674 | if (!list_empty(&dev->agp->memory) && !valid) { |
54ba2f76 DA |
675 | DRM_DEBUG("zone invalid\n"); |
676 | return -EINVAL; | |
677 | } | |
b5e89ed5 DA |
678 | spin_lock(&dev->count_lock); |
679 | if (dev->buf_use) { | |
680 | spin_unlock(&dev->count_lock); | |
1da177e4 LT |
681 | return -EBUSY; |
682 | } | |
b5e89ed5 DA |
683 | atomic_inc(&dev->buf_alloc); |
684 | spin_unlock(&dev->count_lock); | |
1da177e4 | 685 | |
30e2fb18 | 686 | mutex_lock(&dev->struct_mutex); |
1da177e4 | 687 | entry = &dma->bufs[order]; |
b5e89ed5 | 688 | if (entry->buf_count) { |
30e2fb18 | 689 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 DA |
690 | atomic_dec(&dev->buf_alloc); |
691 | return -ENOMEM; /* May only call once for each order */ | |
1da177e4 LT |
692 | } |
693 | ||
694 | if (count < 0 || count > 4096) { | |
30e2fb18 | 695 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 696 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
697 | return -EINVAL; |
698 | } | |
699 | ||
b5e89ed5 DA |
700 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
701 | DRM_MEM_BUFS); | |
702 | if (!entry->buflist) { | |
30e2fb18 | 703 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 704 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
705 | return -ENOMEM; |
706 | } | |
b5e89ed5 | 707 | memset(entry->buflist, 0, count * sizeof(*entry->buflist)); |
1da177e4 LT |
708 | |
709 | entry->buf_size = size; | |
710 | entry->page_order = page_order; | |
711 | ||
712 | offset = 0; | |
713 | ||
b5e89ed5 DA |
714 | while (entry->buf_count < count) { |
715 | buf = &entry->buflist[entry->buf_count]; | |
716 | buf->idx = dma->buf_count + entry->buf_count; | |
717 | buf->total = alignment; | |
718 | buf->order = order; | |
719 | buf->used = 0; | |
1da177e4 | 720 | |
b5e89ed5 | 721 | buf->offset = (dma->byte_count + offset); |
1da177e4 LT |
722 | buf->bus_address = agp_offset + offset; |
723 | buf->address = (void *)(agp_offset + offset); | |
b5e89ed5 | 724 | buf->next = NULL; |
1da177e4 LT |
725 | buf->waiting = 0; |
726 | buf->pending = 0; | |
b5e89ed5 | 727 | init_waitqueue_head(&buf->dma_wait); |
6c340eac | 728 | buf->file_priv = NULL; |
1da177e4 LT |
729 | |
730 | buf->dev_priv_size = dev->driver->dev_priv_size; | |
b5e89ed5 DA |
731 | buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); |
732 | if (!buf->dev_private) { | |
1da177e4 LT |
733 | /* Set count correctly so we free the proper amount. */ |
734 | entry->buf_count = count; | |
b5e89ed5 | 735 | drm_cleanup_buf_error(dev, entry); |
30e2fb18 | 736 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 737 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
738 | return -ENOMEM; |
739 | } | |
b5e89ed5 | 740 | memset(buf->dev_private, 0, buf->dev_priv_size); |
1da177e4 | 741 | |
b5e89ed5 | 742 | DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); |
1da177e4 LT |
743 | |
744 | offset += alignment; | |
745 | entry->buf_count++; | |
746 | byte_count += PAGE_SIZE << page_order; | |
747 | } | |
748 | ||
b5e89ed5 | 749 | DRM_DEBUG("byte_count: %d\n", byte_count); |
1da177e4 | 750 | |
b5e89ed5 DA |
751 | temp_buflist = drm_realloc(dma->buflist, |
752 | dma->buf_count * sizeof(*dma->buflist), | |
753 | (dma->buf_count + entry->buf_count) | |
754 | * sizeof(*dma->buflist), DRM_MEM_BUFS); | |
755 | if (!temp_buflist) { | |
1da177e4 | 756 | /* Free the entry because it isn't valid */ |
b5e89ed5 | 757 | drm_cleanup_buf_error(dev, entry); |
30e2fb18 | 758 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 759 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
760 | return -ENOMEM; |
761 | } | |
762 | dma->buflist = temp_buflist; | |
763 | ||
b5e89ed5 | 764 | for (i = 0; i < entry->buf_count; i++) { |
1da177e4 LT |
765 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; |
766 | } | |
767 | ||
768 | dma->buf_count += entry->buf_count; | |
d985c108 DA |
769 | dma->seg_count += entry->seg_count; |
770 | dma->page_count += byte_count >> PAGE_SHIFT; | |
1da177e4 LT |
771 | dma->byte_count += byte_count; |
772 | ||
b5e89ed5 DA |
773 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); |
774 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | |
1da177e4 | 775 | |
30e2fb18 | 776 | mutex_unlock(&dev->struct_mutex); |
1da177e4 | 777 | |
d59431bf DA |
778 | request->count = entry->buf_count; |
779 | request->size = size; | |
1da177e4 LT |
780 | |
781 | dma->flags = _DRM_DMA_USE_AGP; | |
782 | ||
b5e89ed5 | 783 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
784 | return 0; |
785 | } | |
d84f76d3 | 786 | EXPORT_SYMBOL(drm_addbufs_agp); |
b5e89ed5 | 787 | #endif /* __OS_HAS_AGP */ |
1da177e4 | 788 | |
84b1fd10 | 789 | int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) |
1da177e4 | 790 | { |
cdd55a29 | 791 | struct drm_device_dma *dma = dev->dma; |
1da177e4 LT |
792 | int count; |
793 | int order; | |
794 | int size; | |
795 | int total; | |
796 | int page_order; | |
cdd55a29 | 797 | struct drm_buf_entry *entry; |
ddf19b97 | 798 | drm_dma_handle_t *dmah; |
056219e2 | 799 | struct drm_buf *buf; |
1da177e4 LT |
800 | int alignment; |
801 | unsigned long offset; | |
802 | int i; | |
803 | int byte_count; | |
804 | int page_count; | |
805 | unsigned long *temp_pagelist; | |
056219e2 | 806 | struct drm_buf **temp_buflist; |
1da177e4 | 807 | |
b5e89ed5 DA |
808 | if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) |
809 | return -EINVAL; | |
d985c108 | 810 | |
b5e89ed5 DA |
811 | if (!dma) |
812 | return -EINVAL; | |
1da177e4 | 813 | |
d985c108 DA |
814 | if (!capable(CAP_SYS_ADMIN)) |
815 | return -EPERM; | |
816 | ||
d59431bf DA |
817 | count = request->count; |
818 | order = drm_order(request->size); | |
1da177e4 LT |
819 | size = 1 << order; |
820 | ||
b5e89ed5 DA |
821 | DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n", |
822 | request->count, request->size, size, order, dev->queue_count); | |
1da177e4 | 823 | |
b5e89ed5 DA |
824 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
825 | return -EINVAL; | |
826 | if (dev->queue_count) | |
827 | return -EBUSY; /* Not while in use */ | |
1da177e4 | 828 | |
d59431bf | 829 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
b5e89ed5 | 830 | ? PAGE_ALIGN(size) : size; |
1da177e4 LT |
831 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
832 | total = PAGE_SIZE << page_order; | |
833 | ||
b5e89ed5 DA |
834 | spin_lock(&dev->count_lock); |
835 | if (dev->buf_use) { | |
836 | spin_unlock(&dev->count_lock); | |
1da177e4 LT |
837 | return -EBUSY; |
838 | } | |
b5e89ed5 DA |
839 | atomic_inc(&dev->buf_alloc); |
840 | spin_unlock(&dev->count_lock); | |
1da177e4 | 841 | |
30e2fb18 | 842 | mutex_lock(&dev->struct_mutex); |
1da177e4 | 843 | entry = &dma->bufs[order]; |
b5e89ed5 | 844 | if (entry->buf_count) { |
30e2fb18 | 845 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 846 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
847 | return -ENOMEM; /* May only call once for each order */ |
848 | } | |
849 | ||
850 | if (count < 0 || count > 4096) { | |
30e2fb18 | 851 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 852 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
853 | return -EINVAL; |
854 | } | |
855 | ||
b5e89ed5 DA |
856 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
857 | DRM_MEM_BUFS); | |
858 | if (!entry->buflist) { | |
30e2fb18 | 859 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 860 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
861 | return -ENOMEM; |
862 | } | |
b5e89ed5 DA |
863 | memset(entry->buflist, 0, count * sizeof(*entry->buflist)); |
864 | ||
865 | entry->seglist = drm_alloc(count * sizeof(*entry->seglist), | |
866 | DRM_MEM_SEGS); | |
867 | if (!entry->seglist) { | |
868 | drm_free(entry->buflist, | |
869 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); | |
30e2fb18 | 870 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 871 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
872 | return -ENOMEM; |
873 | } | |
b5e89ed5 | 874 | memset(entry->seglist, 0, count * sizeof(*entry->seglist)); |
1da177e4 LT |
875 | |
876 | /* Keep the original pagelist until we know all the allocations | |
877 | * have succeeded | |
878 | */ | |
b5e89ed5 DA |
879 | temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) |
880 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); | |
1da177e4 | 881 | if (!temp_pagelist) { |
b5e89ed5 DA |
882 | drm_free(entry->buflist, |
883 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); | |
884 | drm_free(entry->seglist, | |
885 | count * sizeof(*entry->seglist), DRM_MEM_SEGS); | |
30e2fb18 | 886 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 887 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
888 | return -ENOMEM; |
889 | } | |
890 | memcpy(temp_pagelist, | |
b5e89ed5 DA |
891 | dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); |
892 | DRM_DEBUG("pagelist: %d entries\n", | |
893 | dma->page_count + (count << page_order)); | |
1da177e4 | 894 | |
b5e89ed5 | 895 | entry->buf_size = size; |
1da177e4 LT |
896 | entry->page_order = page_order; |
897 | byte_count = 0; | |
898 | page_count = 0; | |
899 | ||
b5e89ed5 | 900 | while (entry->buf_count < count) { |
bc5f4523 | 901 | |
ddf19b97 | 902 | dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); |
bc5f4523 | 903 | |
ddf19b97 | 904 | if (!dmah) { |
1da177e4 LT |
905 | /* Set count correctly so we free the proper amount. */ |
906 | entry->buf_count = count; | |
907 | entry->seg_count = count; | |
908 | drm_cleanup_buf_error(dev, entry); | |
b5e89ed5 DA |
909 | drm_free(temp_pagelist, |
910 | (dma->page_count + (count << page_order)) | |
911 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); | |
30e2fb18 | 912 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 913 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
914 | return -ENOMEM; |
915 | } | |
ddf19b97 | 916 | entry->seglist[entry->seg_count++] = dmah; |
b5e89ed5 DA |
917 | for (i = 0; i < (1 << page_order); i++) { |
918 | DRM_DEBUG("page %d @ 0x%08lx\n", | |
919 | dma->page_count + page_count, | |
ddf19b97 | 920 | (unsigned long)dmah->vaddr + PAGE_SIZE * i); |
1da177e4 | 921 | temp_pagelist[dma->page_count + page_count++] |
ddf19b97 | 922 | = (unsigned long)dmah->vaddr + PAGE_SIZE * i; |
1da177e4 | 923 | } |
b5e89ed5 DA |
924 | for (offset = 0; |
925 | offset + size <= total && entry->buf_count < count; | |
926 | offset += alignment, ++entry->buf_count) { | |
927 | buf = &entry->buflist[entry->buf_count]; | |
928 | buf->idx = dma->buf_count + entry->buf_count; | |
929 | buf->total = alignment; | |
930 | buf->order = order; | |
931 | buf->used = 0; | |
932 | buf->offset = (dma->byte_count + byte_count + offset); | |
ddf19b97 DA |
933 | buf->address = (void *)(dmah->vaddr + offset); |
934 | buf->bus_address = dmah->busaddr + offset; | |
b5e89ed5 | 935 | buf->next = NULL; |
1da177e4 LT |
936 | buf->waiting = 0; |
937 | buf->pending = 0; | |
b5e89ed5 | 938 | init_waitqueue_head(&buf->dma_wait); |
6c340eac | 939 | buf->file_priv = NULL; |
1da177e4 LT |
940 | |
941 | buf->dev_priv_size = dev->driver->dev_priv_size; | |
b5e89ed5 DA |
942 | buf->dev_private = drm_alloc(buf->dev_priv_size, |
943 | DRM_MEM_BUFS); | |
944 | if (!buf->dev_private) { | |
1da177e4 LT |
945 | /* Set count correctly so we free the proper amount. */ |
946 | entry->buf_count = count; | |
947 | entry->seg_count = count; | |
b5e89ed5 DA |
948 | drm_cleanup_buf_error(dev, entry); |
949 | drm_free(temp_pagelist, | |
950 | (dma->page_count + | |
951 | (count << page_order)) | |
952 | * sizeof(*dma->pagelist), | |
953 | DRM_MEM_PAGES); | |
30e2fb18 | 954 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 955 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
956 | return -ENOMEM; |
957 | } | |
b5e89ed5 | 958 | memset(buf->dev_private, 0, buf->dev_priv_size); |
1da177e4 | 959 | |
b5e89ed5 DA |
960 | DRM_DEBUG("buffer %d @ %p\n", |
961 | entry->buf_count, buf->address); | |
1da177e4 LT |
962 | } |
963 | byte_count += PAGE_SIZE << page_order; | |
964 | } | |
965 | ||
b5e89ed5 DA |
966 | temp_buflist = drm_realloc(dma->buflist, |
967 | dma->buf_count * sizeof(*dma->buflist), | |
968 | (dma->buf_count + entry->buf_count) | |
969 | * sizeof(*dma->buflist), DRM_MEM_BUFS); | |
1da177e4 LT |
970 | if (!temp_buflist) { |
971 | /* Free the entry because it isn't valid */ | |
b5e89ed5 DA |
972 | drm_cleanup_buf_error(dev, entry); |
973 | drm_free(temp_pagelist, | |
974 | (dma->page_count + (count << page_order)) | |
975 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); | |
30e2fb18 | 976 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 977 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
978 | return -ENOMEM; |
979 | } | |
980 | dma->buflist = temp_buflist; | |
981 | ||
b5e89ed5 | 982 | for (i = 0; i < entry->buf_count; i++) { |
1da177e4 LT |
983 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; |
984 | } | |
985 | ||
986 | /* No allocations failed, so now we can replace the orginal pagelist | |
987 | * with the new one. | |
988 | */ | |
989 | if (dma->page_count) { | |
990 | drm_free(dma->pagelist, | |
b5e89ed5 DA |
991 | dma->page_count * sizeof(*dma->pagelist), |
992 | DRM_MEM_PAGES); | |
1da177e4 LT |
993 | } |
994 | dma->pagelist = temp_pagelist; | |
995 | ||
996 | dma->buf_count += entry->buf_count; | |
997 | dma->seg_count += entry->seg_count; | |
998 | dma->page_count += entry->seg_count << page_order; | |
999 | dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); | |
1000 | ||
30e2fb18 | 1001 | mutex_unlock(&dev->struct_mutex); |
1da177e4 | 1002 | |
d59431bf DA |
1003 | request->count = entry->buf_count; |
1004 | request->size = size; | |
1da177e4 | 1005 | |
3417f33e GS |
1006 | if (request->flags & _DRM_PCI_BUFFER_RO) |
1007 | dma->flags = _DRM_DMA_USE_PCI_RO; | |
1008 | ||
b5e89ed5 | 1009 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
1010 | return 0; |
1011 | ||
1012 | } | |
d84f76d3 | 1013 | EXPORT_SYMBOL(drm_addbufs_pci); |
1da177e4 | 1014 | |
84b1fd10 | 1015 | static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request) |
1da177e4 | 1016 | { |
cdd55a29 DA |
1017 | struct drm_device_dma *dma = dev->dma; |
1018 | struct drm_buf_entry *entry; | |
056219e2 | 1019 | struct drm_buf *buf; |
1da177e4 LT |
1020 | unsigned long offset; |
1021 | unsigned long agp_offset; | |
1022 | int count; | |
1023 | int order; | |
1024 | int size; | |
1025 | int alignment; | |
1026 | int page_order; | |
1027 | int total; | |
1028 | int byte_count; | |
1029 | int i; | |
056219e2 | 1030 | struct drm_buf **temp_buflist; |
1da177e4 | 1031 | |
b5e89ed5 DA |
1032 | if (!drm_core_check_feature(dev, DRIVER_SG)) |
1033 | return -EINVAL; | |
1034 | ||
1035 | if (!dma) | |
1036 | return -EINVAL; | |
1da177e4 | 1037 | |
d985c108 DA |
1038 | if (!capable(CAP_SYS_ADMIN)) |
1039 | return -EPERM; | |
1040 | ||
d59431bf DA |
1041 | count = request->count; |
1042 | order = drm_order(request->size); | |
1da177e4 LT |
1043 | size = 1 << order; |
1044 | ||
b5e89ed5 DA |
1045 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
1046 | ? PAGE_ALIGN(size) : size; | |
1da177e4 LT |
1047 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
1048 | total = PAGE_SIZE << page_order; | |
1049 | ||
1050 | byte_count = 0; | |
d59431bf | 1051 | agp_offset = request->agp_start; |
1da177e4 | 1052 | |
b5e89ed5 DA |
1053 | DRM_DEBUG("count: %d\n", count); |
1054 | DRM_DEBUG("order: %d\n", order); | |
1055 | DRM_DEBUG("size: %d\n", size); | |
1056 | DRM_DEBUG("agp_offset: %lu\n", agp_offset); | |
1057 | DRM_DEBUG("alignment: %d\n", alignment); | |
1058 | DRM_DEBUG("page_order: %d\n", page_order); | |
1059 | DRM_DEBUG("total: %d\n", total); | |
1da177e4 | 1060 | |
b5e89ed5 DA |
1061 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
1062 | return -EINVAL; | |
1063 | if (dev->queue_count) | |
1064 | return -EBUSY; /* Not while in use */ | |
1da177e4 | 1065 | |
b5e89ed5 DA |
1066 | spin_lock(&dev->count_lock); |
1067 | if (dev->buf_use) { | |
1068 | spin_unlock(&dev->count_lock); | |
1da177e4 LT |
1069 | return -EBUSY; |
1070 | } | |
b5e89ed5 DA |
1071 | atomic_inc(&dev->buf_alloc); |
1072 | spin_unlock(&dev->count_lock); | |
1da177e4 | 1073 | |
30e2fb18 | 1074 | mutex_lock(&dev->struct_mutex); |
1da177e4 | 1075 | entry = &dma->bufs[order]; |
b5e89ed5 | 1076 | if (entry->buf_count) { |
30e2fb18 | 1077 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 DA |
1078 | atomic_dec(&dev->buf_alloc); |
1079 | return -ENOMEM; /* May only call once for each order */ | |
1da177e4 LT |
1080 | } |
1081 | ||
1082 | if (count < 0 || count > 4096) { | |
30e2fb18 | 1083 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 1084 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
1085 | return -EINVAL; |
1086 | } | |
1087 | ||
b5e89ed5 DA |
1088 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
1089 | DRM_MEM_BUFS); | |
1090 | if (!entry->buflist) { | |
30e2fb18 | 1091 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 1092 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
1093 | return -ENOMEM; |
1094 | } | |
b5e89ed5 | 1095 | memset(entry->buflist, 0, count * sizeof(*entry->buflist)); |
1da177e4 LT |
1096 | |
1097 | entry->buf_size = size; | |
1098 | entry->page_order = page_order; | |
1099 | ||
1100 | offset = 0; | |
1101 | ||
b5e89ed5 DA |
1102 | while (entry->buf_count < count) { |
1103 | buf = &entry->buflist[entry->buf_count]; | |
1104 | buf->idx = dma->buf_count + entry->buf_count; | |
1105 | buf->total = alignment; | |
1106 | buf->order = order; | |
1107 | buf->used = 0; | |
1da177e4 | 1108 | |
b5e89ed5 | 1109 | buf->offset = (dma->byte_count + offset); |
1da177e4 | 1110 | buf->bus_address = agp_offset + offset; |
b5e89ed5 | 1111 | buf->address = (void *)(agp_offset + offset |
d1f2b55a | 1112 | + (unsigned long)dev->sg->virtual); |
b5e89ed5 | 1113 | buf->next = NULL; |
1da177e4 LT |
1114 | buf->waiting = 0; |
1115 | buf->pending = 0; | |
b5e89ed5 | 1116 | init_waitqueue_head(&buf->dma_wait); |
6c340eac | 1117 | buf->file_priv = NULL; |
1da177e4 LT |
1118 | |
1119 | buf->dev_priv_size = dev->driver->dev_priv_size; | |
b5e89ed5 DA |
1120 | buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); |
1121 | if (!buf->dev_private) { | |
1da177e4 LT |
1122 | /* Set count correctly so we free the proper amount. */ |
1123 | entry->buf_count = count; | |
b5e89ed5 | 1124 | drm_cleanup_buf_error(dev, entry); |
30e2fb18 | 1125 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 1126 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
1127 | return -ENOMEM; |
1128 | } | |
1129 | ||
b5e89ed5 | 1130 | memset(buf->dev_private, 0, buf->dev_priv_size); |
1da177e4 | 1131 | |
b5e89ed5 | 1132 | DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); |
1da177e4 LT |
1133 | |
1134 | offset += alignment; | |
1135 | entry->buf_count++; | |
1136 | byte_count += PAGE_SIZE << page_order; | |
1137 | } | |
1138 | ||
b5e89ed5 | 1139 | DRM_DEBUG("byte_count: %d\n", byte_count); |
1da177e4 | 1140 | |
b5e89ed5 DA |
1141 | temp_buflist = drm_realloc(dma->buflist, |
1142 | dma->buf_count * sizeof(*dma->buflist), | |
1143 | (dma->buf_count + entry->buf_count) | |
1144 | * sizeof(*dma->buflist), DRM_MEM_BUFS); | |
1145 | if (!temp_buflist) { | |
1da177e4 | 1146 | /* Free the entry because it isn't valid */ |
b5e89ed5 | 1147 | drm_cleanup_buf_error(dev, entry); |
30e2fb18 | 1148 | mutex_unlock(&dev->struct_mutex); |
b5e89ed5 | 1149 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
1150 | return -ENOMEM; |
1151 | } | |
1152 | dma->buflist = temp_buflist; | |
1153 | ||
b5e89ed5 | 1154 | for (i = 0; i < entry->buf_count; i++) { |
1da177e4 LT |
1155 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; |
1156 | } | |
1157 | ||
1158 | dma->buf_count += entry->buf_count; | |
d985c108 DA |
1159 | dma->seg_count += entry->seg_count; |
1160 | dma->page_count += byte_count >> PAGE_SHIFT; | |
1da177e4 LT |
1161 | dma->byte_count += byte_count; |
1162 | ||
b5e89ed5 DA |
1163 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); |
1164 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | |
1da177e4 | 1165 | |
30e2fb18 | 1166 | mutex_unlock(&dev->struct_mutex); |
1da177e4 | 1167 | |
d59431bf DA |
1168 | request->count = entry->buf_count; |
1169 | request->size = size; | |
1da177e4 LT |
1170 | |
1171 | dma->flags = _DRM_DMA_USE_SG; | |
1172 | ||
b5e89ed5 | 1173 | atomic_dec(&dev->buf_alloc); |
1da177e4 LT |
1174 | return 0; |
1175 | } | |
1176 | ||
84b1fd10 | 1177 | static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request) |
b84397d6 | 1178 | { |
cdd55a29 DA |
1179 | struct drm_device_dma *dma = dev->dma; |
1180 | struct drm_buf_entry *entry; | |
056219e2 | 1181 | struct drm_buf *buf; |
b84397d6 DA |
1182 | unsigned long offset; |
1183 | unsigned long agp_offset; | |
1184 | int count; | |
1185 | int order; | |
1186 | int size; | |
1187 | int alignment; | |
1188 | int page_order; | |
1189 | int total; | |
1190 | int byte_count; | |
1191 | int i; | |
056219e2 | 1192 | struct drm_buf **temp_buflist; |
b84397d6 DA |
1193 | |
1194 | if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) | |
1195 | return -EINVAL; | |
b5e89ed5 | 1196 | |
b84397d6 DA |
1197 | if (!dma) |
1198 | return -EINVAL; | |
1199 | ||
d985c108 DA |
1200 | if (!capable(CAP_SYS_ADMIN)) |
1201 | return -EPERM; | |
1202 | ||
d59431bf DA |
1203 | count = request->count; |
1204 | order = drm_order(request->size); | |
b84397d6 DA |
1205 | size = 1 << order; |
1206 | ||
d59431bf | 1207 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
b84397d6 DA |
1208 | ? PAGE_ALIGN(size) : size; |
1209 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | |
1210 | total = PAGE_SIZE << page_order; | |
1211 | ||
1212 | byte_count = 0; | |
d59431bf | 1213 | agp_offset = request->agp_start; |
b84397d6 DA |
1214 | |
1215 | DRM_DEBUG("count: %d\n", count); | |
1216 | DRM_DEBUG("order: %d\n", order); | |
1217 | DRM_DEBUG("size: %d\n", size); | |
1218 | DRM_DEBUG("agp_offset: %lu\n", agp_offset); | |
1219 | DRM_DEBUG("alignment: %d\n", alignment); | |
1220 | DRM_DEBUG("page_order: %d\n", page_order); | |
1221 | DRM_DEBUG("total: %d\n", total); | |
1222 | ||
1223 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) | |
1224 | return -EINVAL; | |
1225 | if (dev->queue_count) | |
1226 | return -EBUSY; /* Not while in use */ | |
1227 | ||
1228 | spin_lock(&dev->count_lock); | |
1229 | if (dev->buf_use) { | |
1230 | spin_unlock(&dev->count_lock); | |
1231 | return -EBUSY; | |
1232 | } | |
1233 | atomic_inc(&dev->buf_alloc); | |
1234 | spin_unlock(&dev->count_lock); | |
1235 | ||
30e2fb18 | 1236 | mutex_lock(&dev->struct_mutex); |
b84397d6 DA |
1237 | entry = &dma->bufs[order]; |
1238 | if (entry->buf_count) { | |
30e2fb18 | 1239 | mutex_unlock(&dev->struct_mutex); |
b84397d6 DA |
1240 | atomic_dec(&dev->buf_alloc); |
1241 | return -ENOMEM; /* May only call once for each order */ | |
1242 | } | |
1243 | ||
1244 | if (count < 0 || count > 4096) { | |
30e2fb18 | 1245 | mutex_unlock(&dev->struct_mutex); |
b84397d6 DA |
1246 | atomic_dec(&dev->buf_alloc); |
1247 | return -EINVAL; | |
1248 | } | |
1249 | ||
1250 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | |
1251 | DRM_MEM_BUFS); | |
1252 | if (!entry->buflist) { | |
30e2fb18 | 1253 | mutex_unlock(&dev->struct_mutex); |
b84397d6 DA |
1254 | atomic_dec(&dev->buf_alloc); |
1255 | return -ENOMEM; | |
1256 | } | |
1257 | memset(entry->buflist, 0, count * sizeof(*entry->buflist)); | |
1258 | ||
1259 | entry->buf_size = size; | |
1260 | entry->page_order = page_order; | |
1261 | ||
1262 | offset = 0; | |
1263 | ||
1264 | while (entry->buf_count < count) { | |
1265 | buf = &entry->buflist[entry->buf_count]; | |
1266 | buf->idx = dma->buf_count + entry->buf_count; | |
1267 | buf->total = alignment; | |
1268 | buf->order = order; | |
1269 | buf->used = 0; | |
1270 | ||
1271 | buf->offset = (dma->byte_count + offset); | |
1272 | buf->bus_address = agp_offset + offset; | |
1273 | buf->address = (void *)(agp_offset + offset); | |
1274 | buf->next = NULL; | |
1275 | buf->waiting = 0; | |
1276 | buf->pending = 0; | |
1277 | init_waitqueue_head(&buf->dma_wait); | |
6c340eac | 1278 | buf->file_priv = NULL; |
b84397d6 DA |
1279 | |
1280 | buf->dev_priv_size = dev->driver->dev_priv_size; | |
1281 | buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); | |
1282 | if (!buf->dev_private) { | |
1283 | /* Set count correctly so we free the proper amount. */ | |
1284 | entry->buf_count = count; | |
1285 | drm_cleanup_buf_error(dev, entry); | |
30e2fb18 | 1286 | mutex_unlock(&dev->struct_mutex); |
b84397d6 DA |
1287 | atomic_dec(&dev->buf_alloc); |
1288 | return -ENOMEM; | |
1289 | } | |
1290 | memset(buf->dev_private, 0, buf->dev_priv_size); | |
1291 | ||
1292 | DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); | |
1293 | ||
1294 | offset += alignment; | |
1295 | entry->buf_count++; | |
1296 | byte_count += PAGE_SIZE << page_order; | |
1297 | } | |
1298 | ||
1299 | DRM_DEBUG("byte_count: %d\n", byte_count); | |
1300 | ||
1301 | temp_buflist = drm_realloc(dma->buflist, | |
1302 | dma->buf_count * sizeof(*dma->buflist), | |
1303 | (dma->buf_count + entry->buf_count) | |
1304 | * sizeof(*dma->buflist), DRM_MEM_BUFS); | |
1305 | if (!temp_buflist) { | |
1306 | /* Free the entry because it isn't valid */ | |
1307 | drm_cleanup_buf_error(dev, entry); | |
30e2fb18 | 1308 | mutex_unlock(&dev->struct_mutex); |
b84397d6 DA |
1309 | atomic_dec(&dev->buf_alloc); |
1310 | return -ENOMEM; | |
1311 | } | |
1312 | dma->buflist = temp_buflist; | |
1313 | ||
1314 | for (i = 0; i < entry->buf_count; i++) { | |
1315 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | |
1316 | } | |
1317 | ||
1318 | dma->buf_count += entry->buf_count; | |
d985c108 DA |
1319 | dma->seg_count += entry->seg_count; |
1320 | dma->page_count += byte_count >> PAGE_SHIFT; | |
b84397d6 DA |
1321 | dma->byte_count += byte_count; |
1322 | ||
1323 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); | |
1324 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | |
1325 | ||
30e2fb18 | 1326 | mutex_unlock(&dev->struct_mutex); |
b84397d6 | 1327 | |
d59431bf DA |
1328 | request->count = entry->buf_count; |
1329 | request->size = size; | |
b84397d6 DA |
1330 | |
1331 | dma->flags = _DRM_DMA_USE_FB; | |
1332 | ||
1333 | atomic_dec(&dev->buf_alloc); | |
1334 | return 0; | |
1335 | } | |
d985c108 | 1336 | |
b84397d6 | 1337 | |
1da177e4 LT |
1338 | /** |
1339 | * Add buffers for DMA transfers (ioctl). | |
1340 | * | |
1341 | * \param inode device inode. | |
6c340eac | 1342 | * \param file_priv DRM file private. |
1da177e4 | 1343 | * \param cmd command. |
c60ce623 | 1344 | * \param arg pointer to a struct drm_buf_desc request. |
1da177e4 LT |
1345 | * \return zero on success or a negative number on failure. |
1346 | * | |
1347 | * According with the memory type specified in drm_buf_desc::flags and the | |
1348 | * build options, it dispatches the call either to addbufs_agp(), | |
1349 | * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent | |
1350 | * PCI memory respectively. | |
1351 | */ | |
c153f45f EA |
1352 | int drm_addbufs(struct drm_device *dev, void *data, |
1353 | struct drm_file *file_priv) | |
1da177e4 | 1354 | { |
c153f45f | 1355 | struct drm_buf_desc *request = data; |
d59431bf | 1356 | int ret; |
b5e89ed5 | 1357 | |
1da177e4 LT |
1358 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
1359 | return -EINVAL; | |
1360 | ||
1da177e4 | 1361 | #if __OS_HAS_AGP |
c153f45f EA |
1362 | if (request->flags & _DRM_AGP_BUFFER) |
1363 | ret = drm_addbufs_agp(dev, request); | |
1da177e4 LT |
1364 | else |
1365 | #endif | |
c153f45f EA |
1366 | if (request->flags & _DRM_SG_BUFFER) |
1367 | ret = drm_addbufs_sg(dev, request); | |
1368 | else if (request->flags & _DRM_FB_BUFFER) | |
1369 | ret = drm_addbufs_fb(dev, request); | |
1da177e4 | 1370 | else |
c153f45f | 1371 | ret = drm_addbufs_pci(dev, request); |
d59431bf | 1372 | |
d59431bf | 1373 | return ret; |
1da177e4 LT |
1374 | } |
1375 | ||
1da177e4 LT |
1376 | /** |
1377 | * Get information about the buffer mappings. | |
1378 | * | |
1379 | * This was originally mean for debugging purposes, or by a sophisticated | |
1380 | * client library to determine how best to use the available buffers (e.g., | |
1381 | * large buffers can be used for image transfer). | |
1382 | * | |
1383 | * \param inode device inode. | |
6c340eac | 1384 | * \param file_priv DRM file private. |
1da177e4 LT |
1385 | * \param cmd command. |
1386 | * \param arg pointer to a drm_buf_info structure. | |
1387 | * \return zero on success or a negative number on failure. | |
1388 | * | |
1389 | * Increments drm_device::buf_use while holding the drm_device::count_lock | |
1390 | * lock, preventing of allocating more buffers after this call. Information | |
1391 | * about each requested buffer is then copied into user space. | |
1392 | */ | |
c153f45f EA |
1393 | int drm_infobufs(struct drm_device *dev, void *data, |
1394 | struct drm_file *file_priv) | |
1da177e4 | 1395 | { |
cdd55a29 | 1396 | struct drm_device_dma *dma = dev->dma; |
c153f45f | 1397 | struct drm_buf_info *request = data; |
1da177e4 LT |
1398 | int i; |
1399 | int count; | |
1400 | ||
1401 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
1402 | return -EINVAL; | |
1403 | ||
b5e89ed5 DA |
1404 | if (!dma) |
1405 | return -EINVAL; | |
1da177e4 | 1406 | |
b5e89ed5 DA |
1407 | spin_lock(&dev->count_lock); |
1408 | if (atomic_read(&dev->buf_alloc)) { | |
1409 | spin_unlock(&dev->count_lock); | |
1da177e4 LT |
1410 | return -EBUSY; |
1411 | } | |
1412 | ++dev->buf_use; /* Can't allocate more after this call */ | |
b5e89ed5 | 1413 | spin_unlock(&dev->count_lock); |
1da177e4 | 1414 | |
b5e89ed5 DA |
1415 | for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { |
1416 | if (dma->bufs[i].buf_count) | |
1417 | ++count; | |
1da177e4 LT |
1418 | } |
1419 | ||
b5e89ed5 | 1420 | DRM_DEBUG("count = %d\n", count); |
1da177e4 | 1421 | |
c153f45f | 1422 | if (request->count >= count) { |
b5e89ed5 DA |
1423 | for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { |
1424 | if (dma->bufs[i].buf_count) { | |
c60ce623 | 1425 | struct drm_buf_desc __user *to = |
c153f45f | 1426 | &request->list[count]; |
cdd55a29 DA |
1427 | struct drm_buf_entry *from = &dma->bufs[i]; |
1428 | struct drm_freelist *list = &dma->bufs[i].freelist; | |
b5e89ed5 DA |
1429 | if (copy_to_user(&to->count, |
1430 | &from->buf_count, | |
1431 | sizeof(from->buf_count)) || | |
1432 | copy_to_user(&to->size, | |
1433 | &from->buf_size, | |
1434 | sizeof(from->buf_size)) || | |
1435 | copy_to_user(&to->low_mark, | |
1436 | &list->low_mark, | |
1437 | sizeof(list->low_mark)) || | |
1438 | copy_to_user(&to->high_mark, | |
1439 | &list->high_mark, | |
1440 | sizeof(list->high_mark))) | |
1da177e4 LT |
1441 | return -EFAULT; |
1442 | ||
b5e89ed5 DA |
1443 | DRM_DEBUG("%d %d %d %d %d\n", |
1444 | i, | |
1445 | dma->bufs[i].buf_count, | |
1446 | dma->bufs[i].buf_size, | |
1447 | dma->bufs[i].freelist.low_mark, | |
1448 | dma->bufs[i].freelist.high_mark); | |
1da177e4 LT |
1449 | ++count; |
1450 | } | |
1451 | } | |
1452 | } | |
c153f45f | 1453 | request->count = count; |
1da177e4 LT |
1454 | |
1455 | return 0; | |
1456 | } | |
1457 | ||
1458 | /** | |
1459 | * Specifies a low and high water mark for buffer allocation | |
1460 | * | |
1461 | * \param inode device inode. | |
6c340eac | 1462 | * \param file_priv DRM file private. |
1da177e4 LT |
1463 | * \param cmd command. |
1464 | * \param arg a pointer to a drm_buf_desc structure. | |
1465 | * \return zero on success or a negative number on failure. | |
1466 | * | |
1467 | * Verifies that the size order is bounded between the admissible orders and | |
1468 | * updates the respective drm_device_dma::bufs entry low and high water mark. | |
1469 | * | |
1470 | * \note This ioctl is deprecated and mostly never used. | |
1471 | */ | |
c153f45f EA |
1472 | int drm_markbufs(struct drm_device *dev, void *data, |
1473 | struct drm_file *file_priv) | |
1da177e4 | 1474 | { |
cdd55a29 | 1475 | struct drm_device_dma *dma = dev->dma; |
c153f45f | 1476 | struct drm_buf_desc *request = data; |
1da177e4 | 1477 | int order; |
cdd55a29 | 1478 | struct drm_buf_entry *entry; |
1da177e4 LT |
1479 | |
1480 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
1481 | return -EINVAL; | |
1482 | ||
b5e89ed5 DA |
1483 | if (!dma) |
1484 | return -EINVAL; | |
1da177e4 | 1485 | |
b5e89ed5 | 1486 | DRM_DEBUG("%d, %d, %d\n", |
c153f45f EA |
1487 | request->size, request->low_mark, request->high_mark); |
1488 | order = drm_order(request->size); | |
b5e89ed5 DA |
1489 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
1490 | return -EINVAL; | |
1da177e4 LT |
1491 | entry = &dma->bufs[order]; |
1492 | ||
c153f45f | 1493 | if (request->low_mark < 0 || request->low_mark > entry->buf_count) |
1da177e4 | 1494 | return -EINVAL; |
c153f45f | 1495 | if (request->high_mark < 0 || request->high_mark > entry->buf_count) |
1da177e4 LT |
1496 | return -EINVAL; |
1497 | ||
c153f45f EA |
1498 | entry->freelist.low_mark = request->low_mark; |
1499 | entry->freelist.high_mark = request->high_mark; | |
1da177e4 LT |
1500 | |
1501 | return 0; | |
1502 | } | |
1503 | ||
1504 | /** | |
b5e89ed5 | 1505 | * Unreserve the buffers in list, previously reserved using drmDMA. |
1da177e4 LT |
1506 | * |
1507 | * \param inode device inode. | |
6c340eac | 1508 | * \param file_priv DRM file private. |
1da177e4 LT |
1509 | * \param cmd command. |
1510 | * \param arg pointer to a drm_buf_free structure. | |
1511 | * \return zero on success or a negative number on failure. | |
b5e89ed5 | 1512 | * |
1da177e4 LT |
1513 | * Calls free_buffer() for each used buffer. |
1514 | * This function is primarily used for debugging. | |
1515 | */ | |
c153f45f EA |
1516 | int drm_freebufs(struct drm_device *dev, void *data, |
1517 | struct drm_file *file_priv) | |
1da177e4 | 1518 | { |
cdd55a29 | 1519 | struct drm_device_dma *dma = dev->dma; |
c153f45f | 1520 | struct drm_buf_free *request = data; |
1da177e4 LT |
1521 | int i; |
1522 | int idx; | |
056219e2 | 1523 | struct drm_buf *buf; |
1da177e4 LT |
1524 | |
1525 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
1526 | return -EINVAL; | |
1527 | ||
b5e89ed5 DA |
1528 | if (!dma) |
1529 | return -EINVAL; | |
1da177e4 | 1530 | |
c153f45f EA |
1531 | DRM_DEBUG("%d\n", request->count); |
1532 | for (i = 0; i < request->count; i++) { | |
1533 | if (copy_from_user(&idx, &request->list[i], sizeof(idx))) | |
1da177e4 | 1534 | return -EFAULT; |
b5e89ed5 DA |
1535 | if (idx < 0 || idx >= dma->buf_count) { |
1536 | DRM_ERROR("Index %d (of %d max)\n", | |
1537 | idx, dma->buf_count - 1); | |
1da177e4 LT |
1538 | return -EINVAL; |
1539 | } | |
1540 | buf = dma->buflist[idx]; | |
6c340eac | 1541 | if (buf->file_priv != file_priv) { |
b5e89ed5 | 1542 | DRM_ERROR("Process %d freeing buffer not owned\n", |
ba25f9dc | 1543 | task_pid_nr(current)); |
1da177e4 LT |
1544 | return -EINVAL; |
1545 | } | |
b5e89ed5 | 1546 | drm_free_buffer(dev, buf); |
1da177e4 LT |
1547 | } |
1548 | ||
1549 | return 0; | |
1550 | } | |
1551 | ||
1552 | /** | |
1553 | * Maps all of the DMA buffers into client-virtual space (ioctl). | |
1554 | * | |
1555 | * \param inode device inode. | |
6c340eac | 1556 | * \param file_priv DRM file private. |
1da177e4 LT |
1557 | * \param cmd command. |
1558 | * \param arg pointer to a drm_buf_map structure. | |
1559 | * \return zero on success or a negative number on failure. | |
1560 | * | |
3417f33e GS |
1561 | * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information |
1562 | * about each buffer into user space. For PCI buffers, it calls do_mmap() with | |
1563 | * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls | |
1564 | * drm_mmap_dma(). | |
1da177e4 | 1565 | */ |
c153f45f EA |
1566 | int drm_mapbufs(struct drm_device *dev, void *data, |
1567 | struct drm_file *file_priv) | |
1da177e4 | 1568 | { |
cdd55a29 | 1569 | struct drm_device_dma *dma = dev->dma; |
1da177e4 LT |
1570 | int retcode = 0; |
1571 | const int zero = 0; | |
1572 | unsigned long virtual; | |
1573 | unsigned long address; | |
c153f45f | 1574 | struct drm_buf_map *request = data; |
1da177e4 LT |
1575 | int i; |
1576 | ||
1577 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
1578 | return -EINVAL; | |
1579 | ||
b5e89ed5 DA |
1580 | if (!dma) |
1581 | return -EINVAL; | |
1da177e4 | 1582 | |
b5e89ed5 DA |
1583 | spin_lock(&dev->count_lock); |
1584 | if (atomic_read(&dev->buf_alloc)) { | |
1585 | spin_unlock(&dev->count_lock); | |
1da177e4 LT |
1586 | return -EBUSY; |
1587 | } | |
1588 | dev->buf_use++; /* Can't allocate more after this call */ | |
b5e89ed5 | 1589 | spin_unlock(&dev->count_lock); |
1da177e4 | 1590 | |
c153f45f | 1591 | if (request->count >= dma->buf_count) { |
b84397d6 | 1592 | if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) |
b5e89ed5 | 1593 | || (drm_core_check_feature(dev, DRIVER_SG) |
b84397d6 DA |
1594 | && (dma->flags & _DRM_DMA_USE_SG)) |
1595 | || (drm_core_check_feature(dev, DRIVER_FB_DMA) | |
1596 | && (dma->flags & _DRM_DMA_USE_FB))) { | |
f77d390c | 1597 | struct drm_local_map *map = dev->agp_buffer_map; |
d1f2b55a | 1598 | unsigned long token = dev->agp_buffer_token; |
1da177e4 | 1599 | |
b5e89ed5 | 1600 | if (!map) { |
1da177e4 LT |
1601 | retcode = -EINVAL; |
1602 | goto done; | |
1603 | } | |
b5e89ed5 | 1604 | down_write(¤t->mm->mmap_sem); |
6c340eac | 1605 | virtual = do_mmap(file_priv->filp, 0, map->size, |
b5e89ed5 | 1606 | PROT_READ | PROT_WRITE, |
c153f45f EA |
1607 | MAP_SHARED, |
1608 | token); | |
b5e89ed5 | 1609 | up_write(¤t->mm->mmap_sem); |
1da177e4 | 1610 | } else { |
b5e89ed5 | 1611 | down_write(¤t->mm->mmap_sem); |
6c340eac | 1612 | virtual = do_mmap(file_priv->filp, 0, dma->byte_count, |
b5e89ed5 DA |
1613 | PROT_READ | PROT_WRITE, |
1614 | MAP_SHARED, 0); | |
1615 | up_write(¤t->mm->mmap_sem); | |
1da177e4 | 1616 | } |
b5e89ed5 | 1617 | if (virtual > -1024UL) { |
1da177e4 LT |
1618 | /* Real error */ |
1619 | retcode = (signed long)virtual; | |
1620 | goto done; | |
1621 | } | |
c153f45f | 1622 | request->virtual = (void __user *)virtual; |
1da177e4 | 1623 | |
b5e89ed5 | 1624 | for (i = 0; i < dma->buf_count; i++) { |
c153f45f | 1625 | if (copy_to_user(&request->list[i].idx, |
b5e89ed5 | 1626 | &dma->buflist[i]->idx, |
c153f45f | 1627 | sizeof(request->list[0].idx))) { |
1da177e4 LT |
1628 | retcode = -EFAULT; |
1629 | goto done; | |
1630 | } | |
c153f45f | 1631 | if (copy_to_user(&request->list[i].total, |
b5e89ed5 | 1632 | &dma->buflist[i]->total, |
c153f45f | 1633 | sizeof(request->list[0].total))) { |
1da177e4 LT |
1634 | retcode = -EFAULT; |
1635 | goto done; | |
1636 | } | |
c153f45f | 1637 | if (copy_to_user(&request->list[i].used, |
b5e89ed5 | 1638 | &zero, sizeof(zero))) { |
1da177e4 LT |
1639 | retcode = -EFAULT; |
1640 | goto done; | |
1641 | } | |
b5e89ed5 | 1642 | address = virtual + dma->buflist[i]->offset; /* *** */ |
c153f45f | 1643 | if (copy_to_user(&request->list[i].address, |
b5e89ed5 | 1644 | &address, sizeof(address))) { |
1da177e4 LT |
1645 | retcode = -EFAULT; |
1646 | goto done; | |
1647 | } | |
1648 | } | |
1649 | } | |
b5e89ed5 | 1650 | done: |
c153f45f EA |
1651 | request->count = dma->buf_count; |
1652 | DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); | |
1da177e4 LT |
1653 | |
1654 | return retcode; | |
1655 | } | |
1656 | ||
836cf046 DA |
1657 | /** |
1658 | * Compute size order. Returns the exponent of the smaller power of two which | |
1659 | * is greater or equal to given number. | |
b5e89ed5 | 1660 | * |
836cf046 DA |
1661 | * \param size size. |
1662 | * \return order. | |
1663 | * | |
1664 | * \todo Can be made faster. | |
1665 | */ | |
b5e89ed5 | 1666 | int drm_order(unsigned long size) |
836cf046 DA |
1667 | { |
1668 | int order; | |
1669 | unsigned long tmp; | |
1670 | ||
b5e89ed5 | 1671 | for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; |
836cf046 DA |
1672 | |
1673 | if (size & (size - 1)) | |
1674 | ++order; | |
1675 | ||
1676 | return order; | |
1677 | } | |
1678 | EXPORT_SYMBOL(drm_order); |