Merge tag 'pull-18-rc1-work.namei' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / drivers / gpu / drm / drm_bufs.c
CommitLineData
1da177e4 1/*
9fc5cde7 2 * Legacy: Generic DRM Buffer Management
1da177e4
LT
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
9fc5cde7
DH
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
10 *
1da177e4
LT
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
2d1a8a48 31#include <linux/export.h>
0500c04e
SR
32#include <linux/log2.h>
33#include <linux/mm.h>
34#include <linux/mman.h>
35#include <linux/nospec.h>
625c18d7 36#include <linux/pci.h>
0500c04e
SR
37#include <linux/slab.h>
38#include <linux/uaccess.h>
39#include <linux/vmalloc.h>
40
f1a2a9b6 41#include <asm/shmparam.h>
0500c04e 42
0500c04e
SR
43#include <drm/drm_device.h>
44#include <drm/drm_drv.h>
45#include <drm/drm_file.h>
0500c04e
SR
46#include <drm/drm_print.h>
47
9fc5cde7 48#include "drm_legacy.h"
1da177e4 49
a3780509 50
55910517 51static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
f77d390c 52 struct drm_local_map *map)
836cf046 53{
55910517 54 struct drm_map_list *entry;
948de842 55
bd1b331f 56 list_for_each_entry(entry, &dev->maplist, head) {
41c2e75e
BH
57 /*
58 * Because the kernel-userspace ABI is fixed at a 32-bit offset
66aa6962
TV
59 * while PCI resources may live above that, we only compare the
60 * lower 32 bits of the map offset for maps of type
61 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
62 * It is assumed that if a driver have more than one resource
63 * of each type, the lower 32 bits are different.
41c2e75e
BH
64 */
65 if (!entry->map ||
66 map->type != entry->map->type ||
95c081c1 67 entry->master != dev->master)
41c2e75e
BH
68 continue;
69 switch (map->type) {
70 case _DRM_SHM:
71 if (map->flags != _DRM_CONTAINS_LOCK)
72 break;
66aa6962 73 return entry;
41c2e75e
BH
74 case _DRM_REGISTERS:
75 case _DRM_FRAME_BUFFER:
66aa6962
TV
76 if ((entry->map->offset & 0xffffffff) ==
77 (map->offset & 0xffffffff))
78 return entry;
8ce9daf8 79 break;
41c2e75e 80 default: /* Make gcc happy */
42be7ca1 81 break;
836cf046 82 }
41c2e75e
BH
83 if (entry->map->offset == map->offset)
84 return entry;
836cf046
DA
85 }
86
87 return NULL;
1da177e4 88}
1da177e4 89
e0be428e 90static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
f1a2a9b6 91 unsigned long user_token, int hashed_handle, int shm)
d1f2b55a 92{
f1a2a9b6
DM
93 int use_hashed_handle, shift;
94 unsigned long add;
95
c2604ce0 96#if (BITS_PER_LONG == 64)
8d153f71
TH
97 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
98#elif (BITS_PER_LONG == 32)
99 use_hashed_handle = hashed_handle;
100#else
101#error Unsupported long size. Neither 64 nor 32 bits.
102#endif
d1f2b55a 103
e08870c8
TH
104 if (!use_hashed_handle) {
105 int ret;
948de842 106
1545085a 107 hash->key = user_token >> PAGE_SHIFT;
e08870c8
TH
108 ret = drm_ht_insert_item(&dev->map_hash, hash);
109 if (ret != -EINVAL)
110 return ret;
d1f2b55a 111 }
f1a2a9b6
DM
112
113 shift = 0;
114 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
115 if (shm && (SHMLBA > PAGE_SIZE)) {
116 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
117
118 /* For shared memory, we have to preserve the SHMLBA
119 * bits of the eventual vma->vm_pgoff value during
120 * mmap(). Otherwise we run into cache aliasing problems
121 * on some platforms. On these platforms, the pgoff of
122 * a mmap() request is used to pick a suitable virtual
123 * address for the mmap() region such that it will not
124 * cause cache aliasing problems.
125 *
126 * Therefore, make sure the SHMLBA relevant bits of the
127 * hash value we use are equal to those in the original
128 * kernel virtual address.
129 */
130 shift = bits;
131 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
132 }
133
e08870c8
TH
134 return drm_ht_just_insert_please(&dev->map_hash, hash,
135 user_token, 32 - PAGE_SHIFT - 3,
f1a2a9b6 136 shift, add);
d1f2b55a 137}
9a186645 138
abee5491 139/*
f77d390c
BH
140 * Core function to create a range of memory available for mapping by a
141 * non-root process.
1da177e4
LT
142 *
143 * Adjusts the memory offset to its absolute value according to the mapping
144 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
145 * applicable and if supported by the kernel.
146 */
2bcfcbfc 147static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
c60ce623 148 unsigned int size, enum drm_map_type type,
55910517 149 enum drm_map_flags flags,
2bcfcbfc 150 struct drm_map_list **maplist)
1da177e4 151{
f77d390c 152 struct drm_local_map *map;
55910517 153 struct drm_map_list *list;
8d153f71
TH
154 unsigned long user_token;
155 int ret;
1da177e4 156
9a298b2a 157 map = kmalloc(sizeof(*map), GFP_KERNEL);
b5e89ed5 158 if (!map)
1da177e4
LT
159 return -ENOMEM;
160
7ab98401
DA
161 map->offset = offset;
162 map->size = size;
163 map->flags = flags;
164 map->type = type;
1da177e4
LT
165
166 /* Only allow shared memory to be removable since we only keep enough
167 * book keeping information about shared memory to allow for removal
168 * when processes fork.
169 */
b5e89ed5 170 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
9a298b2a 171 kfree(map);
1da177e4
LT
172 return -EINVAL;
173 }
41c2e75e
BH
174 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
175 (unsigned long long)map->offset, map->size, map->type);
b6741377
BH
176
177 /* page-align _DRM_SHM maps. They are allocated here so there is no security
178 * hole created by that and it works around various broken drivers that use
179 * a non-aligned quantity to map the SAREA. --BenH
180 */
181 if (map->type == _DRM_SHM)
182 map->size = PAGE_ALIGN(map->size);
183
41c2e75e 184 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
9a298b2a 185 kfree(map);
1da177e4
LT
186 return -EINVAL;
187 }
b5e89ed5 188 map->mtrr = -1;
1da177e4
LT
189 map->handle = NULL;
190
b5e89ed5 191 switch (map->type) {
1da177e4
LT
192 case _DRM_REGISTERS:
193 case _DRM_FRAME_BUFFER:
4b7fb9b5 194#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
8d2ea625 195 if (map->offset + (map->size-1) < map->offset ||
b5e89ed5 196 map->offset < virt_to_phys(high_memory)) {
9a298b2a 197 kfree(map);
1da177e4
LT
198 return -EINVAL;
199 }
1da177e4 200#endif
836cf046
DA
201 /* Some drivers preinitialize some maps, without the X Server
202 * needing to be aware of it. Therefore, we just return success
203 * when the server tries to create a duplicate map.
204 */
89625eb1
DA
205 list = drm_find_matching_map(dev, map);
206 if (list != NULL) {
207 if (list->map->size != map->size) {
836cf046 208 DRM_DEBUG("Matching maps of type %d with "
b5e89ed5
DA
209 "mismatched sizes, (%ld vs %ld)\n",
210 map->type, map->size,
211 list->map->size);
89625eb1 212 list->map->size = map->size;
836cf046
DA
213 }
214
9a298b2a 215 kfree(map);
89625eb1 216 *maplist = list;
836cf046
DA
217 return 0;
218 }
219
28185647
DV
220 if (map->type == _DRM_FRAME_BUFFER ||
221 (map->flags & _DRM_WRITE_COMBINING)) {
222 map->mtrr =
223 arch_phys_wc_add(map->offset, map->size);
1da177e4 224 }
0769d39c 225 if (map->type == _DRM_REGISTERS) {
ff47eaf2
AL
226 if (map->flags & _DRM_WRITE_COMBINING)
227 map->handle = ioremap_wc(map->offset,
228 map->size);
229 else
230 map->handle = ioremap(map->offset, map->size);
0769d39c 231 if (!map->handle) {
9a298b2a 232 kfree(map);
0769d39c
ST
233 return -ENOMEM;
234 }
235 }
bc5f4523 236
1da177e4 237 break;
1da177e4 238 case _DRM_SHM:
54ba2f76
DA
239 list = drm_find_matching_map(dev, map);
240 if (list != NULL) {
2bcfcbfc 241 if (list->map->size != map->size) {
54ba2f76
DA
242 DRM_DEBUG("Matching maps of type %d with "
243 "mismatched sizes, (%ld vs %ld)\n",
244 map->type, map->size, list->map->size);
245 list->map->size = map->size;
246 }
247
9a298b2a 248 kfree(map);
54ba2f76
DA
249 *maplist = list;
250 return 0;
251 }
f239b7b0 252 map->handle = vmalloc_user(map->size);
b5e89ed5 253 DRM_DEBUG("%lu %d %p\n",
04420c9c 254 map->size, order_base_2(map->size), map->handle);
b5e89ed5 255 if (!map->handle) {
9a298b2a 256 kfree(map);
1da177e4
LT
257 return -ENOMEM;
258 }
259 map->offset = (unsigned long)map->handle;
b5e89ed5 260 if (map->flags & _DRM_CONTAINS_LOCK) {
1da177e4 261 /* Prevent a 2nd X Server from creating a 2nd lock */
95c081c1 262 if (dev->master->lock.hw_lock != NULL) {
b5e89ed5 263 vfree(map->handle);
9a298b2a 264 kfree(map);
1da177e4
LT
265 return -EBUSY;
266 }
95c081c1 267 dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
1da177e4
LT
268 }
269 break;
54ba2f76 270 case _DRM_AGP: {
55910517 271 struct drm_agp_mem *entry;
54ba2f76
DA
272 int valid = 0;
273
d9906753 274 if (!dev->agp) {
9a298b2a 275 kfree(map);
54ba2f76
DA
276 return -EINVAL;
277 }
1da177e4 278#ifdef __alpha__
54ba2f76 279 map->offset += dev->hose->mem_space->start;
1da177e4 280#endif
47a184a8
EA
281 /* In some cases (i810 driver), user space may have already
282 * added the AGP base itself, because dev->agp->base previously
283 * only got set during AGP enable. So, only add the base
284 * address if the map's offset isn't already within the
285 * aperture.
54ba2f76 286 */
47a184a8
EA
287 if (map->offset < dev->agp->base ||
288 map->offset > dev->agp->base +
289 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
290 map->offset += dev->agp->base;
291 }
54ba2f76
DA
292 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
293
294 /* This assumes the DRM is in total control of AGP space.
295 * It's not always the case as AGP can be in the control
296 * of user space (i.e. i810 driver). So this loop will get
297 * skipped and we double check that dev->agp->memory is
298 * actually set as well as being invalid before EPERM'ing
299 */
bd1b331f 300 list_for_each_entry(entry, &dev->agp->memory, head) {
54ba2f76
DA
301 if ((map->offset >= entry->bound) &&
302 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
303 valid = 1;
304 break;
305 }
1da177e4 306 }
bd1b331f 307 if (!list_empty(&dev->agp->memory) && !valid) {
9a298b2a 308 kfree(map);
54ba2f76
DA
309 return -EPERM;
310 }
41c2e75e
BH
311 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
312 (unsigned long long)map->offset, map->size);
54ba2f76 313
a2c0a97b 314 break;
812c369d 315 }
1da177e4
LT
316 case _DRM_SCATTER_GATHER:
317 if (!dev->sg) {
9a298b2a 318 kfree(map);
1da177e4
LT
319 return -EINVAL;
320 }
d1f2b55a 321 map->offset += (unsigned long)dev->sg->virtual;
1da177e4 322 break;
b5e89ed5 323 case _DRM_CONSISTENT:
2d0f9eaf 324 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
9c8da5eb 325 * As we're limiting the address to 2^32-1 (or less),
2d0f9eaf 326 * casting it down to 32 bits is no problem, but we
ad6ce32a
BMC
327 * need to point to a 64bit variable first.
328 */
36b73b05 329 map->handle = dma_alloc_coherent(dev->dev,
8e4ff9b5
CW
330 map->size,
331 &map->offset,
332 GFP_KERNEL);
333 if (!map->handle) {
9a298b2a 334 kfree(map);
2d0f9eaf
DA
335 return -ENOMEM;
336 }
337 break;
1da177e4 338 default:
9a298b2a 339 kfree(map);
1da177e4
LT
340 return -EINVAL;
341 }
342
94e3370e 343 list = kzalloc(sizeof(*list), GFP_KERNEL);
b5e89ed5 344 if (!list) {
85abb3f9 345 if (map->type == _DRM_REGISTERS)
004a7727 346 iounmap(map->handle);
9a298b2a 347 kfree(map);
1da177e4
LT
348 return -EINVAL;
349 }
1da177e4
LT
350 list->map = map;
351
30e2fb18 352 mutex_lock(&dev->struct_mutex);
bd1b331f 353 list_add(&list->head, &dev->maplist);
8d153f71 354
d1f2b55a 355 /* Assign a 32-bit handle */
30e2fb18 356 /* We do it here so that dev->struct_mutex protects the increment */
8d153f71
TH
357 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
358 map->offset;
f1a2a9b6
DM
359 ret = drm_map_handle(dev, &list->hash, user_token, 0,
360 (map->type == _DRM_SHM));
8d153f71 361 if (ret) {
85abb3f9 362 if (map->type == _DRM_REGISTERS)
004a7727 363 iounmap(map->handle);
9a298b2a
EA
364 kfree(map);
365 kfree(list);
8d153f71
TH
366 mutex_unlock(&dev->struct_mutex);
367 return ret;
368 }
369
1545085a 370 list->user_token = list->hash.key << PAGE_SHIFT;
30e2fb18 371 mutex_unlock(&dev->struct_mutex);
1da177e4 372
2ff2e8a3 373 if (!(map->flags & _DRM_DRIVER))
95c081c1 374 list->master = dev->master;
89625eb1 375 *maplist = list;
7ab98401 376 return 0;
afe0f696 377}
89625eb1 378
2bcfcbfc 379int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
9fc5cde7
DH
380 unsigned int size, enum drm_map_type type,
381 enum drm_map_flags flags, struct drm_local_map **map_ptr)
89625eb1 382{
55910517 383 struct drm_map_list *list;
89625eb1
DA
384 int rc;
385
386 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
387 if (!rc)
388 *map_ptr = list->map;
389 return rc;
390}
9fc5cde7 391EXPORT_SYMBOL(drm_legacy_addmap);
7ab98401 392
c7642688
JN
393struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
394 unsigned int token)
395{
396 struct drm_map_list *_entry;
948de842 397
c7642688
JN
398 list_for_each_entry(_entry, &dev->maplist, head)
399 if (_entry->user_token == token)
400 return _entry->map;
401 return NULL;
402}
403EXPORT_SYMBOL(drm_legacy_findmap);
404
abee5491 405/*
f77d390c
BH
406 * Ioctl to specify a range of memory that is available for mapping by a
407 * non-root process.
408 *
409 * \param inode device inode.
410 * \param file_priv DRM file private.
411 * \param cmd command.
412 * \param arg pointer to a drm_map structure.
413 * \return zero on success or a negative value on error.
414 *
415 */
9fc5cde7
DH
416int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
417 struct drm_file *file_priv)
7ab98401 418{
c153f45f 419 struct drm_map *map = data;
55910517 420 struct drm_map_list *maplist;
7ab98401
DA
421 int err;
422
7c1c2871 423 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
d985c108
DA
424 return -EPERM;
425
e975eef0 426 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
fa538645 427 !drm_core_check_feature(dev, DRIVER_LEGACY))
69fdf420 428 return -EOPNOTSUPP;
e975eef0 429
c153f45f
EA
430 err = drm_addmap_core(dev, map->offset, map->size, map->type,
431 map->flags, &maplist);
7ab98401 432
b5e89ed5 433 if (err)
7ab98401 434 return err;
d1f2b55a 435
67e1a014 436 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
c153f45f 437 map->handle = (void *)(unsigned long)maplist->user_token;
0dd99f1b
AL
438
439 /*
440 * It appears that there are no users of this value whatsoever --
441 * drmAddMap just discards it. Let's not encourage its use.
442 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
443 * it's not a real mtrr index anymore.)
444 */
445 map->mtrr = -1;
446
1da177e4 447 return 0;
88f399cd 448}
1da177e4 449
ec1f52ef
DV
450/*
451 * Get a mapping information.
452 *
453 * \param inode device inode.
454 * \param file_priv DRM file private.
455 * \param cmd command.
456 * \param arg user argument, pointing to a drm_map structure.
457 *
458 * \return zero on success or a negative number on failure.
459 *
460 * Searches for the mapping with the specified offset and copies its information
461 * into userspace
462 */
463int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
464 struct drm_file *file_priv)
465{
466 struct drm_map *map = data;
467 struct drm_map_list *r_list = NULL;
468 struct list_head *list;
469 int idx;
470 int i;
471
e975eef0 472 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
fa538645 473 !drm_core_check_feature(dev, DRIVER_LEGACY))
69fdf420 474 return -EOPNOTSUPP;
e975eef0 475
ec1f52ef
DV
476 idx = map->offset;
477 if (idx < 0)
478 return -EINVAL;
479
480 i = 0;
481 mutex_lock(&dev->struct_mutex);
482 list_for_each(list, &dev->maplist) {
483 if (i == idx) {
484 r_list = list_entry(list, struct drm_map_list, head);
485 break;
486 }
487 i++;
488 }
489 if (!r_list || !r_list->map) {
490 mutex_unlock(&dev->struct_mutex);
491 return -EINVAL;
492 }
493
494 map->offset = r_list->map->offset;
495 map->size = r_list->map->size;
496 map->type = r_list->map->type;
497 map->flags = r_list->map->flags;
498 map->handle = (void *)(unsigned long) r_list->user_token;
499 map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
500
501 mutex_unlock(&dev->struct_mutex);
502
503 return 0;
504}
505
abee5491 506/*
1da177e4
LT
507 * Remove a map private from list and deallocate resources if the mapping
508 * isn't in use.
509 *
1da177e4 510 * Searches the map on drm_device::maplist, removes it from the list, see if
1e55a53a 511 * it's being used, and free any associated resource (such as MTRR's) if it's not
1da177e4
LT
512 * being on use.
513 *
9fc5cde7 514 * \sa drm_legacy_addmap
1da177e4 515 */
9fc5cde7 516int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
1da177e4 517{
55910517 518 struct drm_map_list *r_list = NULL, *list_t;
bd1b331f 519 int found = 0;
7c1c2871 520 struct drm_master *master;
1da177e4 521
836cf046 522 /* Find the list entry for the map and remove it */
bd1b331f 523 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
836cf046 524 if (r_list->map == map) {
7c1c2871 525 master = r_list->master;
bd1b331f 526 list_del(&r_list->head);
1545085a
TH
527 drm_ht_remove_key(&dev->map_hash,
528 r_list->user_token >> PAGE_SHIFT);
9a298b2a 529 kfree(r_list);
bd1b331f 530 found = 1;
836cf046
DA
531 break;
532 }
1da177e4
LT
533 }
534
bd1b331f 535 if (!found)
1da177e4 536 return -EINVAL;
1da177e4 537
836cf046
DA
538 switch (map->type) {
539 case _DRM_REGISTERS:
004a7727 540 iounmap(map->handle);
df561f66 541 fallthrough;
836cf046 542 case _DRM_FRAME_BUFFER:
28185647 543 arch_phys_wc_del(map->mtrr);
836cf046
DA
544 break;
545 case _DRM_SHM:
546 vfree(map->handle);
7c1c2871
DA
547 if (master) {
548 if (dev->sigdata.lock == master->lock.hw_lock)
549 dev->sigdata.lock = NULL;
550 master->lock.hw_lock = NULL; /* SHM removed */
551 master->lock.file_priv = NULL;
171901d1 552 wake_up_interruptible_all(&master->lock.lock_queue);
7c1c2871 553 }
836cf046
DA
554 break;
555 case _DRM_AGP:
556 case _DRM_SCATTER_GATHER:
557 break;
558 case _DRM_CONSISTENT:
36b73b05 559 dma_free_coherent(dev->dev,
8e4ff9b5
CW
560 map->size,
561 map->handle,
562 map->offset);
836cf046 563 break;
1da177e4 564 }
9a298b2a 565 kfree(map);
836cf046 566
1da177e4
LT
567 return 0;
568}
9fc5cde7 569EXPORT_SYMBOL(drm_legacy_rmmap_locked);
836cf046 570
40647e45 571void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
836cf046 572{
40647e45 573 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
fa538645 574 !drm_core_check_feature(dev, DRIVER_LEGACY))
40647e45 575 return;
836cf046 576
30e2fb18 577 mutex_lock(&dev->struct_mutex);
40647e45 578 drm_legacy_rmmap_locked(dev, map);
30e2fb18 579 mutex_unlock(&dev->struct_mutex);
836cf046 580}
9fc5cde7 581EXPORT_SYMBOL(drm_legacy_rmmap);
7ab98401 582
40647e45
DV
583void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
584{
585 struct drm_map_list *r_list, *list_temp;
586
fa538645 587 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
40647e45
DV
588 return;
589
590 mutex_lock(&dev->struct_mutex);
591 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
592 if (r_list->master == master) {
593 drm_legacy_rmmap_locked(dev, r_list->map);
594 r_list = NULL;
595 }
596 }
597 mutex_unlock(&dev->struct_mutex);
598}
599
35a28021
DA
600void drm_legacy_rmmaps(struct drm_device *dev)
601{
602 struct drm_map_list *r_list, *list_temp;
603
604 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
605 drm_legacy_rmmap(dev, r_list->map);
606}
607
836cf046
DA
608/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
609 * the last close of the device, and this is necessary for cleanup when things
610 * exit uncleanly. Therefore, having userland manually remove mappings seems
611 * like a pointless exercise since they're going away anyway.
612 *
613 * One use case might be after addmap is allowed for normal users for SHM and
614 * gets used by drivers that the server doesn't need to care about. This seems
615 * unlikely.
f77d390c
BH
616 *
617 * \param inode device inode.
618 * \param file_priv DRM file private.
619 * \param cmd command.
620 * \param arg pointer to a struct drm_map structure.
621 * \return zero on success or a negative value on error.
836cf046 622 */
9fc5cde7
DH
623int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
624 struct drm_file *file_priv)
7ab98401 625{
c153f45f 626 struct drm_map *request = data;
f77d390c 627 struct drm_local_map *map = NULL;
55910517 628 struct drm_map_list *r_list;
836cf046 629 int ret;
7ab98401 630
e975eef0 631 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
fa538645 632 !drm_core_check_feature(dev, DRIVER_LEGACY))
69fdf420 633 return -EOPNOTSUPP;
e975eef0 634
30e2fb18 635 mutex_lock(&dev->struct_mutex);
bd1b331f 636 list_for_each_entry(r_list, &dev->maplist, head) {
836cf046 637 if (r_list->map &&
c153f45f 638 r_list->user_token == (unsigned long)request->handle &&
836cf046
DA
639 r_list->map->flags & _DRM_REMOVABLE) {
640 map = r_list->map;
641 break;
642 }
643 }
644
1e55a53a 645 /* List has wrapped around to the head pointer, or it's empty we didn't
836cf046
DA
646 * find anything.
647 */
bd1b331f 648 if (list_empty(&dev->maplist) || !map) {
30e2fb18 649 mutex_unlock(&dev->struct_mutex);
836cf046
DA
650 return -EINVAL;
651 }
652
836cf046
DA
653 /* Register and framebuffer maps are permanent */
654 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
30e2fb18 655 mutex_unlock(&dev->struct_mutex);
836cf046
DA
656 return 0;
657 }
658
9fc5cde7 659 ret = drm_legacy_rmmap_locked(dev, map);
836cf046 660
30e2fb18 661 mutex_unlock(&dev->struct_mutex);
836cf046
DA
662
663 return ret;
7ab98401 664}
1da177e4 665
abee5491 666/*
1da177e4
LT
667 * Cleanup after an error on one of the addbufs() functions.
668 *
836cf046 669 * \param dev DRM device.
1da177e4
LT
670 * \param entry buffer entry where the error occurred.
671 *
672 * Frees any pages and buffers associated with the given entry.
673 */
2bcfcbfc
PM
674static void drm_cleanup_buf_error(struct drm_device *dev,
675 struct drm_buf_entry *entry)
1da177e4 676{
70556e24 677 drm_dma_handle_t *dmah;
1da177e4
LT
678 int i;
679
680 if (entry->seg_count) {
681 for (i = 0; i < entry->seg_count; i++) {
682 if (entry->seglist[i]) {
70556e24
JK
683 dmah = entry->seglist[i];
684 dma_free_coherent(dev->dev,
685 dmah->size,
686 dmah->vaddr,
687 dmah->busaddr);
5562f75c 688 kfree(dmah);
1da177e4
LT
689 }
690 }
9a298b2a 691 kfree(entry->seglist);
1da177e4
LT
692
693 entry->seg_count = 0;
694 }
695
b5e89ed5
DA
696 if (entry->buf_count) {
697 for (i = 0; i < entry->buf_count; i++) {
9a298b2a 698 kfree(entry->buflist[i].dev_private);
1da177e4 699 }
9a298b2a 700 kfree(entry->buflist);
1da177e4
LT
701
702 entry->buf_count = 0;
703 }
704}
705
a7fb8a23 706#if IS_ENABLED(CONFIG_AGP)
abee5491 707/*
d59431bf 708 * Add AGP buffers for DMA transfers.
1da177e4 709 *
84b1fd10 710 * \param dev struct drm_device to which the buffers are to be added.
c60ce623 711 * \param request pointer to a struct drm_buf_desc describing the request.
1da177e4 712 * \return zero on success or a negative number on failure.
b5e89ed5 713 *
1da177e4
LT
714 * After some sanity checks creates a drm_buf structure for each buffer and
715 * reallocates the buffer list of the same size order to accommodate the new
716 * buffers.
717 */
9fc5cde7
DH
718int drm_legacy_addbufs_agp(struct drm_device *dev,
719 struct drm_buf_desc *request)
1da177e4 720{
cdd55a29
DA
721 struct drm_device_dma *dma = dev->dma;
722 struct drm_buf_entry *entry;
55910517 723 struct drm_agp_mem *agp_entry;
056219e2 724 struct drm_buf *buf;
1da177e4
LT
725 unsigned long offset;
726 unsigned long agp_offset;
727 int count;
728 int order;
729 int size;
730 int alignment;
731 int page_order;
732 int total;
733 int byte_count;
54ba2f76 734 int i, valid;
056219e2 735 struct drm_buf **temp_buflist;
1da177e4 736
b5e89ed5
DA
737 if (!dma)
738 return -EINVAL;
1da177e4 739
d59431bf 740 count = request->count;
04420c9c 741 order = order_base_2(request->size);
1da177e4
LT
742 size = 1 << order;
743
b5e89ed5
DA
744 alignment = (request->flags & _DRM_PAGE_ALIGN)
745 ? PAGE_ALIGN(size) : size;
1da177e4
LT
746 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
747 total = PAGE_SIZE << page_order;
748
749 byte_count = 0;
d59431bf 750 agp_offset = dev->agp->base + request->agp_start;
1da177e4 751
b5e89ed5
DA
752 DRM_DEBUG("count: %d\n", count);
753 DRM_DEBUG("order: %d\n", order);
754 DRM_DEBUG("size: %d\n", size);
d985c108 755 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
b5e89ed5
DA
756 DRM_DEBUG("alignment: %d\n", alignment);
757 DRM_DEBUG("page_order: %d\n", page_order);
758 DRM_DEBUG("total: %d\n", total);
1da177e4 759
b5e89ed5
DA
760 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
761 return -EINVAL;
1da177e4 762
54ba2f76
DA
763 /* Make sure buffers are located in AGP memory that we own */
764 valid = 0;
bd1b331f 765 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
54ba2f76
DA
766 if ((agp_offset >= agp_entry->bound) &&
767 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
768 valid = 1;
769 break;
770 }
771 }
bd1b331f 772 if (!list_empty(&dev->agp->memory) && !valid) {
54ba2f76
DA
773 DRM_DEBUG("zone invalid\n");
774 return -EINVAL;
775 }
2177a218 776 spin_lock(&dev->buf_lock);
b5e89ed5 777 if (dev->buf_use) {
2177a218 778 spin_unlock(&dev->buf_lock);
1da177e4
LT
779 return -EBUSY;
780 }
b5e89ed5 781 atomic_inc(&dev->buf_alloc);
2177a218 782 spin_unlock(&dev->buf_lock);
1da177e4 783
30e2fb18 784 mutex_lock(&dev->struct_mutex);
1da177e4 785 entry = &dma->bufs[order];
b5e89ed5 786 if (entry->buf_count) {
30e2fb18 787 mutex_unlock(&dev->struct_mutex);
b5e89ed5
DA
788 atomic_dec(&dev->buf_alloc);
789 return -ENOMEM; /* May only call once for each order */
1da177e4
LT
790 }
791
792 if (count < 0 || count > 4096) {
30e2fb18 793 mutex_unlock(&dev->struct_mutex);
b5e89ed5 794 atomic_dec(&dev->buf_alloc);
1da177e4
LT
795 return -EINVAL;
796 }
797
81a44137 798 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
b5e89ed5 799 if (!entry->buflist) {
30e2fb18 800 mutex_unlock(&dev->struct_mutex);
b5e89ed5 801 atomic_dec(&dev->buf_alloc);
1da177e4
LT
802 return -ENOMEM;
803 }
1da177e4
LT
804
805 entry->buf_size = size;
806 entry->page_order = page_order;
807
808 offset = 0;
809
b5e89ed5
DA
810 while (entry->buf_count < count) {
811 buf = &entry->buflist[entry->buf_count];
812 buf->idx = dma->buf_count + entry->buf_count;
813 buf->total = alignment;
814 buf->order = order;
815 buf->used = 0;
1da177e4 816
b5e89ed5 817 buf->offset = (dma->byte_count + offset);
1da177e4
LT
818 buf->bus_address = agp_offset + offset;
819 buf->address = (void *)(agp_offset + offset);
b5e89ed5 820 buf->next = NULL;
1da177e4
LT
821 buf->waiting = 0;
822 buf->pending = 0;
6c340eac 823 buf->file_priv = NULL;
1da177e4
LT
824
825 buf->dev_priv_size = dev->driver->dev_priv_size;
94e3370e 826 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
b5e89ed5 827 if (!buf->dev_private) {
1da177e4
LT
828 /* Set count correctly so we free the proper amount. */
829 entry->buf_count = count;
b5e89ed5 830 drm_cleanup_buf_error(dev, entry);
30e2fb18 831 mutex_unlock(&dev->struct_mutex);
b5e89ed5 832 atomic_dec(&dev->buf_alloc);
1da177e4
LT
833 return -ENOMEM;
834 }
1da177e4 835
b5e89ed5 836 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1da177e4
LT
837
838 offset += alignment;
839 entry->buf_count++;
840 byte_count += PAGE_SIZE << page_order;
841 }
842
b5e89ed5 843 DRM_DEBUG("byte_count: %d\n", byte_count);
1da177e4 844
9a298b2a
EA
845 temp_buflist = krealloc(dma->buflist,
846 (dma->buf_count + entry->buf_count) *
847 sizeof(*dma->buflist), GFP_KERNEL);
b5e89ed5 848 if (!temp_buflist) {
1da177e4 849 /* Free the entry because it isn't valid */
b5e89ed5 850 drm_cleanup_buf_error(dev, entry);
30e2fb18 851 mutex_unlock(&dev->struct_mutex);
b5e89ed5 852 atomic_dec(&dev->buf_alloc);
1da177e4
LT
853 return -ENOMEM;
854 }
855 dma->buflist = temp_buflist;
856
b5e89ed5 857 for (i = 0; i < entry->buf_count; i++) {
1da177e4
LT
858 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
859 }
860
861 dma->buf_count += entry->buf_count;
d985c108
DA
862 dma->seg_count += entry->seg_count;
863 dma->page_count += byte_count >> PAGE_SHIFT;
1da177e4
LT
864 dma->byte_count += byte_count;
865
b5e89ed5
DA
866 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
867 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1da177e4 868
30e2fb18 869 mutex_unlock(&dev->struct_mutex);
1da177e4 870
d59431bf
DA
871 request->count = entry->buf_count;
872 request->size = size;
1da177e4
LT
873
874 dma->flags = _DRM_DMA_USE_AGP;
875
b5e89ed5 876 atomic_dec(&dev->buf_alloc);
1da177e4
LT
877 return 0;
878}
9fc5cde7 879EXPORT_SYMBOL(drm_legacy_addbufs_agp);
a7fb8a23 880#endif /* CONFIG_AGP */
1da177e4 881
9fc5cde7
DH
882int drm_legacy_addbufs_pci(struct drm_device *dev,
883 struct drm_buf_desc *request)
1da177e4 884{
cdd55a29 885 struct drm_device_dma *dma = dev->dma;
1da177e4
LT
886 int count;
887 int order;
888 int size;
889 int total;
890 int page_order;
cdd55a29 891 struct drm_buf_entry *entry;
ddf19b97 892 drm_dma_handle_t *dmah;
056219e2 893 struct drm_buf *buf;
1da177e4
LT
894 int alignment;
895 unsigned long offset;
896 int i;
897 int byte_count;
898 int page_count;
899 unsigned long *temp_pagelist;
056219e2 900 struct drm_buf **temp_buflist;
1da177e4 901
b5e89ed5 902 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
69fdf420 903 return -EOPNOTSUPP;
d985c108 904
b5e89ed5
DA
905 if (!dma)
906 return -EINVAL;
1da177e4 907
d985c108
DA
908 if (!capable(CAP_SYS_ADMIN))
909 return -EPERM;
910
d59431bf 911 count = request->count;
04420c9c 912 order = order_base_2(request->size);
1da177e4
LT
913 size = 1 << order;
914
a344a7e7
DV
915 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
916 request->count, request->size, size, order);
1da177e4 917
b5e89ed5
DA
918 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
919 return -EINVAL;
1da177e4 920
d59431bf 921 alignment = (request->flags & _DRM_PAGE_ALIGN)
b5e89ed5 922 ? PAGE_ALIGN(size) : size;
1da177e4
LT
923 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
924 total = PAGE_SIZE << page_order;
925
2177a218 926 spin_lock(&dev->buf_lock);
b5e89ed5 927 if (dev->buf_use) {
2177a218 928 spin_unlock(&dev->buf_lock);
1da177e4
LT
929 return -EBUSY;
930 }
b5e89ed5 931 atomic_inc(&dev->buf_alloc);
2177a218 932 spin_unlock(&dev->buf_lock);
1da177e4 933
30e2fb18 934 mutex_lock(&dev->struct_mutex);
1da177e4 935 entry = &dma->bufs[order];
b5e89ed5 936 if (entry->buf_count) {
30e2fb18 937 mutex_unlock(&dev->struct_mutex);
b5e89ed5 938 atomic_dec(&dev->buf_alloc);
1da177e4
LT
939 return -ENOMEM; /* May only call once for each order */
940 }
941
942 if (count < 0 || count > 4096) {
30e2fb18 943 mutex_unlock(&dev->struct_mutex);
b5e89ed5 944 atomic_dec(&dev->buf_alloc);
1da177e4
LT
945 return -EINVAL;
946 }
947
ed6dee41 948 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
b5e89ed5 949 if (!entry->buflist) {
30e2fb18 950 mutex_unlock(&dev->struct_mutex);
b5e89ed5 951 atomic_dec(&dev->buf_alloc);
1da177e4
LT
952 return -ENOMEM;
953 }
b5e89ed5 954
ed6dee41 955 entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
b5e89ed5 956 if (!entry->seglist) {
9a298b2a 957 kfree(entry->buflist);
30e2fb18 958 mutex_unlock(&dev->struct_mutex);
b5e89ed5 959 atomic_dec(&dev->buf_alloc);
1da177e4
LT
960 return -ENOMEM;
961 }
1da177e4
LT
962
963 /* Keep the original pagelist until we know all the allocations
964 * have succeeded
965 */
20274008
ME
966 temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
967 sizeof(*dma->pagelist),
968 GFP_KERNEL);
1da177e4 969 if (!temp_pagelist) {
9a298b2a
EA
970 kfree(entry->buflist);
971 kfree(entry->seglist);
30e2fb18 972 mutex_unlock(&dev->struct_mutex);
b5e89ed5 973 atomic_dec(&dev->buf_alloc);
1da177e4
LT
974 return -ENOMEM;
975 }
976 memcpy(temp_pagelist,
b5e89ed5
DA
977 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
978 DRM_DEBUG("pagelist: %d entries\n",
979 dma->page_count + (count << page_order));
1da177e4 980
b5e89ed5 981 entry->buf_size = size;
1da177e4
LT
982 entry->page_order = page_order;
983 byte_count = 0;
984 page_count = 0;
985
b5e89ed5 986 while (entry->buf_count < count) {
70556e24 987 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
ea8c9ed5
ZW
988 if (!dmah) {
989 /* Set count correctly so we free the proper amount. */
990 entry->buf_count = count;
991 entry->seg_count = count;
992 drm_cleanup_buf_error(dev, entry);
993 kfree(temp_pagelist);
994 mutex_unlock(&dev->struct_mutex);
995 atomic_dec(&dev->buf_alloc);
70556e24 996 return -ENOMEM;
ea8c9ed5 997 }
bc5f4523 998
70556e24
JK
999 dmah->size = total;
1000 dmah->vaddr = dma_alloc_coherent(dev->dev,
1001 dmah->size,
1002 &dmah->busaddr,
1003 GFP_KERNEL);
1004 if (!dmah->vaddr) {
1005 kfree(dmah);
bc5f4523 1006
1da177e4
LT
1007 /* Set count correctly so we free the proper amount. */
1008 entry->buf_count = count;
1009 entry->seg_count = count;
1010 drm_cleanup_buf_error(dev, entry);
9a298b2a 1011 kfree(temp_pagelist);
30e2fb18 1012 mutex_unlock(&dev->struct_mutex);
b5e89ed5 1013 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1014 return -ENOMEM;
1015 }
ddf19b97 1016 entry->seglist[entry->seg_count++] = dmah;
b5e89ed5
DA
1017 for (i = 0; i < (1 << page_order); i++) {
1018 DRM_DEBUG("page %d @ 0x%08lx\n",
1019 dma->page_count + page_count,
ddf19b97 1020 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
1da177e4 1021 temp_pagelist[dma->page_count + page_count++]
ddf19b97 1022 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
1da177e4 1023 }
b5e89ed5
DA
1024 for (offset = 0;
1025 offset + size <= total && entry->buf_count < count;
1026 offset += alignment, ++entry->buf_count) {
1027 buf = &entry->buflist[entry->buf_count];
1028 buf->idx = dma->buf_count + entry->buf_count;
1029 buf->total = alignment;
1030 buf->order = order;
1031 buf->used = 0;
1032 buf->offset = (dma->byte_count + byte_count + offset);
ddf19b97
DA
1033 buf->address = (void *)(dmah->vaddr + offset);
1034 buf->bus_address = dmah->busaddr + offset;
b5e89ed5 1035 buf->next = NULL;
1da177e4
LT
1036 buf->waiting = 0;
1037 buf->pending = 0;
6c340eac 1038 buf->file_priv = NULL;
1da177e4
LT
1039
1040 buf->dev_priv_size = dev->driver->dev_priv_size;
94e3370e
DB
1041 buf->dev_private = kzalloc(buf->dev_priv_size,
1042 GFP_KERNEL);
b5e89ed5 1043 if (!buf->dev_private) {
1da177e4
LT
1044 /* Set count correctly so we free the proper amount. */
1045 entry->buf_count = count;
1046 entry->seg_count = count;
b5e89ed5 1047 drm_cleanup_buf_error(dev, entry);
9a298b2a 1048 kfree(temp_pagelist);
30e2fb18 1049 mutex_unlock(&dev->struct_mutex);
b5e89ed5 1050 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1051 return -ENOMEM;
1052 }
1da177e4 1053
b5e89ed5
DA
1054 DRM_DEBUG("buffer %d @ %p\n",
1055 entry->buf_count, buf->address);
1da177e4
LT
1056 }
1057 byte_count += PAGE_SIZE << page_order;
1058 }
1059
9a298b2a
EA
1060 temp_buflist = krealloc(dma->buflist,
1061 (dma->buf_count + entry->buf_count) *
1062 sizeof(*dma->buflist), GFP_KERNEL);
1da177e4
LT
1063 if (!temp_buflist) {
1064 /* Free the entry because it isn't valid */
b5e89ed5 1065 drm_cleanup_buf_error(dev, entry);
9a298b2a 1066 kfree(temp_pagelist);
30e2fb18 1067 mutex_unlock(&dev->struct_mutex);
b5e89ed5 1068 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1069 return -ENOMEM;
1070 }
1071 dma->buflist = temp_buflist;
1072
b5e89ed5 1073 for (i = 0; i < entry->buf_count; i++) {
1da177e4
LT
1074 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1075 }
1076
88393161 1077 /* No allocations failed, so now we can replace the original pagelist
1da177e4
LT
1078 * with the new one.
1079 */
1080 if (dma->page_count) {
9a298b2a 1081 kfree(dma->pagelist);
1da177e4
LT
1082 }
1083 dma->pagelist = temp_pagelist;
1084
1085 dma->buf_count += entry->buf_count;
1086 dma->seg_count += entry->seg_count;
1087 dma->page_count += entry->seg_count << page_order;
1088 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1089
30e2fb18 1090 mutex_unlock(&dev->struct_mutex);
1da177e4 1091
d59431bf
DA
1092 request->count = entry->buf_count;
1093 request->size = size;
1da177e4 1094
3417f33e
GS
1095 if (request->flags & _DRM_PCI_BUFFER_RO)
1096 dma->flags = _DRM_DMA_USE_PCI_RO;
1097
b5e89ed5 1098 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1099 return 0;
1100
1101}
9fc5cde7 1102EXPORT_SYMBOL(drm_legacy_addbufs_pci);
1da177e4 1103
9fc5cde7
DH
1104static int drm_legacy_addbufs_sg(struct drm_device *dev,
1105 struct drm_buf_desc *request)
1da177e4 1106{
cdd55a29
DA
1107 struct drm_device_dma *dma = dev->dma;
1108 struct drm_buf_entry *entry;
056219e2 1109 struct drm_buf *buf;
1da177e4
LT
1110 unsigned long offset;
1111 unsigned long agp_offset;
1112 int count;
1113 int order;
1114 int size;
1115 int alignment;
1116 int page_order;
1117 int total;
1118 int byte_count;
1119 int i;
056219e2 1120 struct drm_buf **temp_buflist;
1da177e4 1121
b5e89ed5 1122 if (!drm_core_check_feature(dev, DRIVER_SG))
69fdf420 1123 return -EOPNOTSUPP;
b5e89ed5
DA
1124
1125 if (!dma)
1126 return -EINVAL;
1da177e4 1127
d985c108
DA
1128 if (!capable(CAP_SYS_ADMIN))
1129 return -EPERM;
1130
d59431bf 1131 count = request->count;
04420c9c 1132 order = order_base_2(request->size);
1da177e4
LT
1133 size = 1 << order;
1134
b5e89ed5
DA
1135 alignment = (request->flags & _DRM_PAGE_ALIGN)
1136 ? PAGE_ALIGN(size) : size;
1da177e4
LT
1137 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1138 total = PAGE_SIZE << page_order;
1139
1140 byte_count = 0;
d59431bf 1141 agp_offset = request->agp_start;
1da177e4 1142
b5e89ed5
DA
1143 DRM_DEBUG("count: %d\n", count);
1144 DRM_DEBUG("order: %d\n", order);
1145 DRM_DEBUG("size: %d\n", size);
1146 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1147 DRM_DEBUG("alignment: %d\n", alignment);
1148 DRM_DEBUG("page_order: %d\n", page_order);
1149 DRM_DEBUG("total: %d\n", total);
1da177e4 1150
b5e89ed5
DA
1151 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1152 return -EINVAL;
1da177e4 1153
2177a218 1154 spin_lock(&dev->buf_lock);
b5e89ed5 1155 if (dev->buf_use) {
2177a218 1156 spin_unlock(&dev->buf_lock);
1da177e4
LT
1157 return -EBUSY;
1158 }
b5e89ed5 1159 atomic_inc(&dev->buf_alloc);
2177a218 1160 spin_unlock(&dev->buf_lock);
1da177e4 1161
30e2fb18 1162 mutex_lock(&dev->struct_mutex);
1da177e4 1163 entry = &dma->bufs[order];
b5e89ed5 1164 if (entry->buf_count) {
30e2fb18 1165 mutex_unlock(&dev->struct_mutex);
b5e89ed5
DA
1166 atomic_dec(&dev->buf_alloc);
1167 return -ENOMEM; /* May only call once for each order */
1da177e4
LT
1168 }
1169
1170 if (count < 0 || count > 4096) {
30e2fb18 1171 mutex_unlock(&dev->struct_mutex);
b5e89ed5 1172 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1173 return -EINVAL;
1174 }
1175
b5a2ecd8 1176 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
b5e89ed5 1177 if (!entry->buflist) {
30e2fb18 1178 mutex_unlock(&dev->struct_mutex);
b5e89ed5 1179 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1180 return -ENOMEM;
1181 }
1da177e4
LT
1182
1183 entry->buf_size = size;
1184 entry->page_order = page_order;
1185
1186 offset = 0;
1187
b5e89ed5
DA
1188 while (entry->buf_count < count) {
1189 buf = &entry->buflist[entry->buf_count];
1190 buf->idx = dma->buf_count + entry->buf_count;
1191 buf->total = alignment;
1192 buf->order = order;
1193 buf->used = 0;
1da177e4 1194
b5e89ed5 1195 buf->offset = (dma->byte_count + offset);
1da177e4 1196 buf->bus_address = agp_offset + offset;
b5e89ed5 1197 buf->address = (void *)(agp_offset + offset
d1f2b55a 1198 + (unsigned long)dev->sg->virtual);
b5e89ed5 1199 buf->next = NULL;
1da177e4
LT
1200 buf->waiting = 0;
1201 buf->pending = 0;
6c340eac 1202 buf->file_priv = NULL;
1da177e4
LT
1203
1204 buf->dev_priv_size = dev->driver->dev_priv_size;
94e3370e 1205 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
b5e89ed5 1206 if (!buf->dev_private) {
1da177e4
LT
1207 /* Set count correctly so we free the proper amount. */
1208 entry->buf_count = count;
b5e89ed5 1209 drm_cleanup_buf_error(dev, entry);
30e2fb18 1210 mutex_unlock(&dev->struct_mutex);
b5e89ed5 1211 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1212 return -ENOMEM;
1213 }
1214
b5e89ed5 1215 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1da177e4
LT
1216
1217 offset += alignment;
1218 entry->buf_count++;
1219 byte_count += PAGE_SIZE << page_order;
1220 }
1221
b5e89ed5 1222 DRM_DEBUG("byte_count: %d\n", byte_count);
1da177e4 1223
9a298b2a
EA
1224 temp_buflist = krealloc(dma->buflist,
1225 (dma->buf_count + entry->buf_count) *
1226 sizeof(*dma->buflist), GFP_KERNEL);
b5e89ed5 1227 if (!temp_buflist) {
1da177e4 1228 /* Free the entry because it isn't valid */
b5e89ed5 1229 drm_cleanup_buf_error(dev, entry);
30e2fb18 1230 mutex_unlock(&dev->struct_mutex);
b5e89ed5 1231 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1232 return -ENOMEM;
1233 }
1234 dma->buflist = temp_buflist;
1235
b5e89ed5 1236 for (i = 0; i < entry->buf_count; i++) {
1da177e4
LT
1237 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1238 }
1239
1240 dma->buf_count += entry->buf_count;
d985c108
DA
1241 dma->seg_count += entry->seg_count;
1242 dma->page_count += byte_count >> PAGE_SHIFT;
1da177e4
LT
1243 dma->byte_count += byte_count;
1244
b5e89ed5
DA
1245 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1246 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1da177e4 1247
30e2fb18 1248 mutex_unlock(&dev->struct_mutex);
1da177e4 1249
d59431bf
DA
1250 request->count = entry->buf_count;
1251 request->size = size;
1da177e4
LT
1252
1253 dma->flags = _DRM_DMA_USE_SG;
1254
b5e89ed5 1255 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1256 return 0;
1257}
1258
abee5491 1259/*
1da177e4
LT
1260 * Add buffers for DMA transfers (ioctl).
1261 *
1262 * \param inode device inode.
6c340eac 1263 * \param file_priv DRM file private.
1da177e4 1264 * \param cmd command.
c60ce623 1265 * \param arg pointer to a struct drm_buf_desc request.
1da177e4
LT
1266 * \return zero on success or a negative number on failure.
1267 *
1268 * According with the memory type specified in drm_buf_desc::flags and the
1269 * build options, it dispatches the call either to addbufs_agp(),
1270 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1271 * PCI memory respectively.
1272 */
9fc5cde7
DH
1273int drm_legacy_addbufs(struct drm_device *dev, void *data,
1274 struct drm_file *file_priv)
1da177e4 1275{
c153f45f 1276 struct drm_buf_desc *request = data;
d59431bf 1277 int ret;
b5e89ed5 1278
fa538645 1279 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
69fdf420 1280 return -EOPNOTSUPP;
8d38c4b4 1281
1da177e4 1282 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
69fdf420 1283 return -EOPNOTSUPP;
1da177e4 1284
a7fb8a23 1285#if IS_ENABLED(CONFIG_AGP)
c153f45f 1286 if (request->flags & _DRM_AGP_BUFFER)
9fc5cde7 1287 ret = drm_legacy_addbufs_agp(dev, request);
1da177e4
LT
1288 else
1289#endif
c153f45f 1290 if (request->flags & _DRM_SG_BUFFER)
9fc5cde7 1291 ret = drm_legacy_addbufs_sg(dev, request);
c153f45f 1292 else if (request->flags & _DRM_FB_BUFFER)
687fbb2e 1293 ret = -EINVAL;
1da177e4 1294 else
9fc5cde7 1295 ret = drm_legacy_addbufs_pci(dev, request);
d59431bf 1296
d59431bf 1297 return ret;
1da177e4
LT
1298}
1299
abee5491 1300/*
1da177e4
LT
1301 * Get information about the buffer mappings.
1302 *
1303 * This was originally mean for debugging purposes, or by a sophisticated
1304 * client library to determine how best to use the available buffers (e.g.,
1305 * large buffers can be used for image transfer).
1306 *
1307 * \param inode device inode.
6c340eac 1308 * \param file_priv DRM file private.
1da177e4
LT
1309 * \param cmd command.
1310 * \param arg pointer to a drm_buf_info structure.
1311 * \return zero on success or a negative number on failure.
1312 *
2177a218 1313 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1da177e4
LT
1314 * lock, preventing of allocating more buffers after this call. Information
1315 * about each requested buffer is then copied into user space.
1316 */
5c7640ab
AV
1317int __drm_legacy_infobufs(struct drm_device *dev,
1318 void *data, int *p,
1319 int (*f)(void *, int, struct drm_buf_entry *))
1da177e4 1320{
cdd55a29 1321 struct drm_device_dma *dma = dev->dma;
1da177e4
LT
1322 int i;
1323 int count;
1324
fa538645 1325 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
69fdf420 1326 return -EOPNOTSUPP;
8d38c4b4 1327
1da177e4 1328 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
69fdf420 1329 return -EOPNOTSUPP;
1da177e4 1330
b5e89ed5
DA
1331 if (!dma)
1332 return -EINVAL;
1da177e4 1333
2177a218 1334 spin_lock(&dev->buf_lock);
b5e89ed5 1335 if (atomic_read(&dev->buf_alloc)) {
2177a218 1336 spin_unlock(&dev->buf_lock);
1da177e4
LT
1337 return -EBUSY;
1338 }
1339 ++dev->buf_use; /* Can't allocate more after this call */
2177a218 1340 spin_unlock(&dev->buf_lock);
1da177e4 1341
b5e89ed5
DA
1342 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1343 if (dma->bufs[i].buf_count)
1344 ++count;
1da177e4
LT
1345 }
1346
b5e89ed5 1347 DRM_DEBUG("count = %d\n", count);
1da177e4 1348
5c7640ab 1349 if (*p >= count) {
b5e89ed5 1350 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
5c7640ab 1351 struct drm_buf_entry *from = &dma->bufs[i];
948de842 1352
5c7640ab
AV
1353 if (from->buf_count) {
1354 if (f(data, count, from) < 0)
1da177e4 1355 return -EFAULT;
b5e89ed5
DA
1356 DRM_DEBUG("%d %d %d %d %d\n",
1357 i,
1358 dma->bufs[i].buf_count,
1359 dma->bufs[i].buf_size,
b008c0fc
DH
1360 dma->bufs[i].low_mark,
1361 dma->bufs[i].high_mark);
1da177e4
LT
1362 ++count;
1363 }
1364 }
1365 }
5c7640ab 1366 *p = count;
1da177e4
LT
1367
1368 return 0;
1369}
1370
5c7640ab
AV
1371static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
1372{
1373 struct drm_buf_info *request = data;
1374 struct drm_buf_desc __user *to = &request->list[count];
1375 struct drm_buf_desc v = {.count = from->buf_count,
1376 .size = from->buf_size,
1377 .low_mark = from->low_mark,
1378 .high_mark = from->high_mark};
74b67efa
DC
1379
1380 if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
1381 return -EFAULT;
1382 return 0;
5c7640ab
AV
1383}
1384
1385int drm_legacy_infobufs(struct drm_device *dev, void *data,
1386 struct drm_file *file_priv)
1387{
1388 struct drm_buf_info *request = data;
948de842 1389
5c7640ab
AV
1390 return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
1391}
1392
abee5491 1393/*
1da177e4
LT
1394 * Specifies a low and high water mark for buffer allocation
1395 *
1396 * \param inode device inode.
6c340eac 1397 * \param file_priv DRM file private.
1da177e4
LT
1398 * \param cmd command.
1399 * \param arg a pointer to a drm_buf_desc structure.
1400 * \return zero on success or a negative number on failure.
1401 *
1402 * Verifies that the size order is bounded between the admissible orders and
1403 * updates the respective drm_device_dma::bufs entry low and high water mark.
1404 *
1405 * \note This ioctl is deprecated and mostly never used.
1406 */
9fc5cde7
DH
1407int drm_legacy_markbufs(struct drm_device *dev, void *data,
1408 struct drm_file *file_priv)
1da177e4 1409{
cdd55a29 1410 struct drm_device_dma *dma = dev->dma;
c153f45f 1411 struct drm_buf_desc *request = data;
1da177e4 1412 int order;
cdd55a29 1413 struct drm_buf_entry *entry;
1da177e4 1414
fa538645 1415 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
69fdf420 1416 return -EOPNOTSUPP;
8d38c4b4 1417
1da177e4 1418 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
69fdf420 1419 return -EOPNOTSUPP;
1da177e4 1420
b5e89ed5
DA
1421 if (!dma)
1422 return -EINVAL;
1da177e4 1423
b5e89ed5 1424 DRM_DEBUG("%d, %d, %d\n",
c153f45f 1425 request->size, request->low_mark, request->high_mark);
04420c9c 1426 order = order_base_2(request->size);
b5e89ed5
DA
1427 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1428 return -EINVAL;
1da177e4
LT
1429 entry = &dma->bufs[order];
1430
c153f45f 1431 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1da177e4 1432 return -EINVAL;
c153f45f 1433 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1da177e4
LT
1434 return -EINVAL;
1435
b008c0fc
DH
1436 entry->low_mark = request->low_mark;
1437 entry->high_mark = request->high_mark;
1da177e4
LT
1438
1439 return 0;
1440}
1441
abee5491 1442/*
b5e89ed5 1443 * Unreserve the buffers in list, previously reserved using drmDMA.
1da177e4
LT
1444 *
1445 * \param inode device inode.
6c340eac 1446 * \param file_priv DRM file private.
1da177e4
LT
1447 * \param cmd command.
1448 * \param arg pointer to a drm_buf_free structure.
1449 * \return zero on success or a negative number on failure.
b5e89ed5 1450 *
1da177e4
LT
1451 * Calls free_buffer() for each used buffer.
1452 * This function is primarily used for debugging.
1453 */
9fc5cde7
DH
1454int drm_legacy_freebufs(struct drm_device *dev, void *data,
1455 struct drm_file *file_priv)
1da177e4 1456{
cdd55a29 1457 struct drm_device_dma *dma = dev->dma;
c153f45f 1458 struct drm_buf_free *request = data;
1da177e4
LT
1459 int i;
1460 int idx;
056219e2 1461 struct drm_buf *buf;
1da177e4 1462
fa538645 1463 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
69fdf420 1464 return -EOPNOTSUPP;
8d38c4b4 1465
1da177e4 1466 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
69fdf420 1467 return -EOPNOTSUPP;
1da177e4 1468
b5e89ed5
DA
1469 if (!dma)
1470 return -EINVAL;
1da177e4 1471
c153f45f
EA
1472 DRM_DEBUG("%d\n", request->count);
1473 for (i = 0; i < request->count; i++) {
1474 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1da177e4 1475 return -EFAULT;
b5e89ed5
DA
1476 if (idx < 0 || idx >= dma->buf_count) {
1477 DRM_ERROR("Index %d (of %d max)\n",
1478 idx, dma->buf_count - 1);
1da177e4
LT
1479 return -EINVAL;
1480 }
a3780509 1481 idx = array_index_nospec(idx, dma->buf_count);
1da177e4 1482 buf = dma->buflist[idx];
6c340eac 1483 if (buf->file_priv != file_priv) {
b5e89ed5 1484 DRM_ERROR("Process %d freeing buffer not owned\n",
ba25f9dc 1485 task_pid_nr(current));
1da177e4
LT
1486 return -EINVAL;
1487 }
a266162a 1488 drm_legacy_free_buffer(dev, buf);
1da177e4
LT
1489 }
1490
1491 return 0;
1492}
1493
abee5491 1494/*
1da177e4
LT
1495 * Maps all of the DMA buffers into client-virtual space (ioctl).
1496 *
1497 * \param inode device inode.
6c340eac 1498 * \param file_priv DRM file private.
1da177e4
LT
1499 * \param cmd command.
1500 * \param arg pointer to a drm_buf_map structure.
1501 * \return zero on success or a negative number on failure.
1502 *
6be5ceb0
LT
1503 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1504 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
0ae865ef 1505 * offset equal to 0, which drm_mmap() interprets as PCI buffers and calls
3417f33e 1506 * drm_mmap_dma().
1da177e4 1507 */
87d3ce11
AV
1508int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
1509 void __user **v,
1510 int (*f)(void *, int, unsigned long,
2bcfcbfc
PM
1511 struct drm_buf *),
1512 struct drm_file *file_priv)
1da177e4 1513{
cdd55a29 1514 struct drm_device_dma *dma = dev->dma;
1da177e4 1515 int retcode = 0;
1da177e4 1516 unsigned long virtual;
1da177e4
LT
1517 int i;
1518
fa538645 1519 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
69fdf420 1520 return -EOPNOTSUPP;
8d38c4b4 1521
1da177e4 1522 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
69fdf420 1523 return -EOPNOTSUPP;
1da177e4 1524
b5e89ed5
DA
1525 if (!dma)
1526 return -EINVAL;
1da177e4 1527
2177a218 1528 spin_lock(&dev->buf_lock);
b5e89ed5 1529 if (atomic_read(&dev->buf_alloc)) {
2177a218 1530 spin_unlock(&dev->buf_lock);
1da177e4
LT
1531 return -EBUSY;
1532 }
1533 dev->buf_use++; /* Can't allocate more after this call */
2177a218 1534 spin_unlock(&dev->buf_lock);
1da177e4 1535
87d3ce11 1536 if (*p >= dma->buf_count) {
d9906753 1537 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
b5e89ed5 1538 || (drm_core_check_feature(dev, DRIVER_SG)
687fbb2e 1539 && (dma->flags & _DRM_DMA_USE_SG))) {
f77d390c 1540 struct drm_local_map *map = dev->agp_buffer_map;
d1f2b55a 1541 unsigned long token = dev->agp_buffer_token;
1da177e4 1542
b5e89ed5 1543 if (!map) {
1da177e4
LT
1544 retcode = -EINVAL;
1545 goto done;
1546 }
6be5ceb0 1547 virtual = vm_mmap(file_priv->filp, 0, map->size,
b5e89ed5 1548 PROT_READ | PROT_WRITE,
c153f45f
EA
1549 MAP_SHARED,
1550 token);
1da177e4 1551 } else {
6be5ceb0 1552 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
b5e89ed5
DA
1553 PROT_READ | PROT_WRITE,
1554 MAP_SHARED, 0);
1da177e4 1555 }
b5e89ed5 1556 if (virtual > -1024UL) {
1da177e4
LT
1557 /* Real error */
1558 retcode = (signed long)virtual;
1559 goto done;
1560 }
87d3ce11 1561 *v = (void __user *)virtual;
1da177e4 1562
b5e89ed5 1563 for (i = 0; i < dma->buf_count; i++) {
87d3ce11 1564 if (f(data, i, virtual, dma->buflist[i]) < 0) {
1da177e4
LT
1565 retcode = -EFAULT;
1566 goto done;
1567 }
1568 }
1569 }
b5e89ed5 1570 done:
87d3ce11
AV
1571 *p = dma->buf_count;
1572 DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
1da177e4
LT
1573
1574 return retcode;
1575}
1576
87d3ce11
AV
1577static int map_one_buf(void *data, int idx, unsigned long virtual,
1578 struct drm_buf *buf)
1579{
1580 struct drm_buf_map *request = data;
1581 unsigned long address = virtual + buf->offset; /* *** */
1582
1583 if (copy_to_user(&request->list[idx].idx, &buf->idx,
1584 sizeof(request->list[0].idx)))
1585 return -EFAULT;
1586 if (copy_to_user(&request->list[idx].total, &buf->total,
1587 sizeof(request->list[0].total)))
1588 return -EFAULT;
1589 if (clear_user(&request->list[idx].used, sizeof(int)))
1590 return -EFAULT;
1591 if (copy_to_user(&request->list[idx].address, &address,
1592 sizeof(address)))
1593 return -EFAULT;
1594 return 0;
1595}
1596
1597int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1598 struct drm_file *file_priv)
1599{
1600 struct drm_buf_map *request = data;
948de842 1601
87d3ce11
AV
1602 return __drm_legacy_mapbufs(dev, data, &request->count,
1603 &request->virtual, map_one_buf,
1604 file_priv);
1605}
1606
9fc5cde7 1607int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
6eb9278a
DV
1608 struct drm_file *file_priv)
1609{
fa538645 1610 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
69fdf420 1611 return -EOPNOTSUPP;
6eb9278a
DV
1612
1613 if (dev->driver->dma_ioctl)
1614 return dev->driver->dma_ioctl(dev, data, file_priv);
1615 else
1616 return -EINVAL;
1617}
1618
9fc5cde7 1619struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
bd0c0cee
DV
1620{
1621 struct drm_map_list *entry;
1622
1623 list_for_each_entry(entry, &dev->maplist, head) {
1624 if (entry->map && entry->map->type == _DRM_SHM &&
1625 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1626 return entry->map;
1627 }
1628 }
1629 return NULL;
1630}
9fc5cde7 1631EXPORT_SYMBOL(drm_legacy_getsarea);