mm: replace vma->vm_flags direct modifications with modifier calls
[linux-block.git] / drivers / gpu / drm / omapdrm / omap_gem.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
cd5351f4 2/*
1b409fda 3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
cd5351f4 4 * Author: Rob Clark <rob.clark@linaro.org>
cd5351f4
RC
5 */
6
81f6156c 7#include <linux/dma-mapping.h>
2d802453 8#include <linux/seq_file.h>
cd5351f4 9#include <linux/shmem_fs.h>
2d278f54 10#include <linux/spinlock.h>
01c8f1c4 11#include <linux/pfn_t.h>
2d278f54 12
81f6156c 13#include <drm/drm_prime.h>
0de23977 14#include <drm/drm_vma_manager.h>
cd5351f4
RC
15
16#include "omap_drv.h"
f7f9f453 17#include "omap_dmm_tiler.h"
cd5351f4 18
cd5351f4
RC
19/*
20 * GEM buffer object implementation.
21 */
22
cd5351f4 23/* note: we use upper 8 bits of flags for driver-internal flags: */
cdb0381d
LP
24#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
25#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
b22e6690 26#define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
cd5351f4 27
cd5351f4
RC
28struct omap_gem_object {
29 struct drm_gem_object base;
30
f6b6036e
RC
31 struct list_head mm_list;
32
dfe9cfcc 33 u32 flags;
cd5351f4 34
f7f9f453 35 /** width/height for tiled formats (rounded up to slot boundaries) */
dfe9cfcc 36 u16 width, height;
f7f9f453 37
a6a91827 38 /** roll applied when mapping to DMM */
dfe9cfcc 39 u32 roll;
a6a91827 40
1948d28d 41 /** protects pin_cnt, block, pages, dma_addrs and vaddr */
3cbd0c58
LP
42 struct mutex lock;
43
cd5351f4 44 /**
16869083 45 * dma_addr contains the buffer DMA address. It is valid for
cd5351f4 46 *
b22e6690
LP
47 * - buffers allocated through the DMA mapping API (with the
48 * OMAP_BO_MEM_DMA_API flag set)
49 *
50 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
51 * if they are physically contiguous (when sgt->orig_nents == 1)
52 *
1948d28d
ID
53 * - buffers mapped through the TILER when pin_cnt is not zero, in which
54 * case the DMA address points to the TILER aperture
b22e6690
LP
55 *
56 * Physically contiguous buffers have their DMA address equal to the
57 * physical address as we don't remap those buffers through the TILER.
58 *
59 * Buffers mapped to the TILER have their DMA address pointing to the
1948d28d
ID
60 * TILER aperture. As TILER mappings are refcounted (through pin_cnt)
61 * the DMA address must be accessed through omap_gem_pin() to ensure
62 * that the mapping won't disappear unexpectedly. References must be
63 * released with omap_gem_unpin().
cd5351f4 64 */
16869083 65 dma_addr_t dma_addr;
cd5351f4 66
f7f9f453 67 /**
1948d28d 68 * # of users
f7f9f453 69 */
1948d28d 70 refcount_t pin_cnt;
f7f9f453 71
b22e6690
LP
72 /**
73 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
74 * is set and the sgt field is valid.
75 */
76 struct sg_table *sgt;
77
f7f9f453
RC
78 /**
79 * tiler block used when buffer is remapped in DMM/TILER.
80 */
81 struct tiler_block *block;
82
cd5351f4
RC
83 /**
84 * Array of backing pages, if allocated. Note that pages are never
85 * allocated for buffers originally allocated from contiguous memory
86 */
87 struct page **pages;
88
f3bc9d24 89 /** addresses corresponding to pages in above array */
57c22f7c 90 dma_addr_t *dma_addrs;
f3bc9d24 91
cd5351f4
RC
92 /**
93 * Virtual address, if mapped.
94 */
95 void *vaddr;
cd5351f4
RC
96};
97
7ef93b0a 98#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
c5b1247b 99
f7f9f453
RC
100/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
101 * not necessarily pinned in TILER all the time, and (b) when they are
102 * they are not necessarily page aligned, we reserve one or more small
103 * regions in each of the 2d containers to use as a user-GART where we
104 * can create a second page-aligned mapping of parts of the buffer
105 * being accessed from userspace.
106 *
107 * Note that we could optimize slightly when we know that multiple
108 * tiler containers are backed by the same PAT.. but I'll leave that
109 * for later..
110 */
111#define NUM_USERGART_ENTRIES 2
f4302747 112struct omap_drm_usergart_entry {
f7f9f453 113 struct tiler_block *block; /* the reserved tiler block */
16869083 114 dma_addr_t dma_addr;
f7f9f453
RC
115 struct drm_gem_object *obj; /* the current pinned obj */
116 pgoff_t obj_pgoff; /* page offset of obj currently
117 mapped in */
118};
f4302747
LP
119
120struct omap_drm_usergart {
121 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
f7f9f453
RC
122 int height; /* height in rows */
123 int height_shift; /* ilog2(height in rows) */
124 int slot_shift; /* ilog2(width per slot) */
125 int stride_pfn; /* stride in pages */
126 int last; /* index of last used entry */
f4302747 127};
f7f9f453 128
b902f8f4
LP
129/* -----------------------------------------------------------------------------
130 * Helpers
131 */
132
133/** get mmap offset */
dc8c9aee 134u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
b902f8f4
LP
135{
136 struct drm_device *dev = obj->dev;
137 int ret;
138 size_t size;
139
b902f8f4
LP
140 /* Make it mmapable */
141 size = omap_gem_mmap_size(obj);
142 ret = drm_gem_create_mmap_offset_size(obj, size);
143 if (ret) {
144 dev_err(dev->dev, "could not allocate mmap offset\n");
145 return 0;
146 }
147
148 return drm_vma_node_offset_addr(&obj->vma_node);
149}
150
620063e1 151static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
7ef93b0a 152{
b22e6690
LP
153 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
154 return true;
155
156 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
157 return true;
158
159 return false;
7ef93b0a
LP
160}
161
162/* -----------------------------------------------------------------------------
163 * Eviction
164 */
f7f9f453 165
620063e1 166static void omap_gem_evict_entry(struct drm_gem_object *obj,
f4302747 167 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
f7f9f453 168{
6796cb16 169 struct omap_gem_object *omap_obj = to_omap_bo(obj);
f4302747
LP
170 struct omap_drm_private *priv = obj->dev->dev_private;
171 int n = priv->usergart[fmt].height;
6796cb16 172 size_t size = PAGE_SIZE * n;
dc8c9aee 173 loff_t off = omap_gem_mmap_offset(obj) +
6796cb16 174 (entry->obj_pgoff << PAGE_SHIFT);
cc8dd766 175 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
6796cb16
DH
176
177 if (m > 1) {
178 int i;
179 /* if stride > than PAGE_SIZE then sparse mapping: */
180 for (i = n; i > 0; i--) {
181 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
182 off, PAGE_SIZE, 1);
183 off += PAGE_SIZE * m;
e559895a 184 }
6796cb16
DH
185 } else {
186 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
187 off, size, 1);
f7f9f453
RC
188 }
189
190 entry->obj = NULL;
191}
192
193/* Evict a buffer from usergart, if it is mapped there */
620063e1 194static void omap_gem_evict(struct drm_gem_object *obj)
f7f9f453
RC
195{
196 struct omap_gem_object *omap_obj = to_omap_bo(obj);
f4302747 197 struct omap_drm_private *priv = obj->dev->dev_private;
f7f9f453 198
48b34ac0 199 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
f7f9f453
RC
200 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
201 int i;
202
f7f9f453 203 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
f4302747
LP
204 struct omap_drm_usergart_entry *entry =
205 &priv->usergart[fmt].entry[i];
206
f7f9f453 207 if (entry->obj == obj)
620063e1 208 omap_gem_evict_entry(obj, fmt, entry);
f7f9f453
RC
209 }
210 }
211}
212
7ef93b0a
LP
213/* -----------------------------------------------------------------------------
214 * Page Management
8b6b569e 215 */
cd5351f4 216
3cbd0c58
LP
217/*
218 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
219 * held.
220 */
cd5351f4
RC
221static int omap_gem_attach_pages(struct drm_gem_object *obj)
222{
8b6b569e 223 struct drm_device *dev = obj->dev;
cd5351f4
RC
224 struct omap_gem_object *omap_obj = to_omap_bo(obj);
225 struct page **pages;
d4eb23a9
EG
226 int npages = obj->size >> PAGE_SHIFT;
227 int i, ret;
8b6b569e 228 dma_addr_t *addrs;
cd5351f4 229
3cbd0c58
LP
230 lockdep_assert_held(&omap_obj->lock);
231
2491244d
LP
232 /*
233 * If not using shmem (in which case backing pages don't need to be
234 * allocated) or if pages are already allocated we're done.
235 */
236 if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
237 return 0;
cd5351f4 238
0cdbe8ac 239 pages = drm_gem_get_pages(obj);
cd5351f4
RC
240 if (IS_ERR(pages)) {
241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
242 return PTR_ERR(pages);
243 }
244
f3bc9d24
RC
245 /* for non-cached buffers, ensure the new pages are clean because
246 * DSS, GPU, etc. are not cache coherent:
247 */
248 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
6da2ec56 249 addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
d4eb23a9
EG
250 if (!addrs) {
251 ret = -ENOMEM;
252 goto free_pages;
253 }
254
f3bc9d24 255 for (i = 0; i < npages; i++) {
8b6b569e 256 addrs[i] = dma_map_page(dev->dev, pages[i],
97817fd4 257 0, PAGE_SIZE, DMA_TO_DEVICE);
579ef254
TV
258
259 if (dma_mapping_error(dev->dev, addrs[i])) {
260 dev_warn(dev->dev,
261 "%s: failed to map page\n", __func__);
262
263 for (i = i - 1; i >= 0; --i) {
264 dma_unmap_page(dev->dev, addrs[i],
97817fd4 265 PAGE_SIZE, DMA_TO_DEVICE);
579ef254
TV
266 }
267
268 ret = -ENOMEM;
269 goto free_addrs;
270 }
f3bc9d24 271 }
8b6b569e 272 } else {
6396bb22 273 addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
d4eb23a9
EG
274 if (!addrs) {
275 ret = -ENOMEM;
276 goto free_pages;
277 }
f3bc9d24
RC
278 }
279
57c22f7c 280 omap_obj->dma_addrs = addrs;
cd5351f4 281 omap_obj->pages = pages;
8b6b569e 282
cd5351f4 283 return 0;
d4eb23a9 284
579ef254
TV
285free_addrs:
286 kfree(addrs);
d4eb23a9 287free_pages:
ddcd09d6 288 drm_gem_put_pages(obj, pages, true, false);
d4eb23a9
EG
289
290 return ret;
cd5351f4
RC
291}
292
3cbd0c58 293/* Release backing pages. Must be called with the omap_obj.lock held. */
cd5351f4
RC
294static void omap_gem_detach_pages(struct drm_gem_object *obj)
295{
296 struct omap_gem_object *omap_obj = to_omap_bo(obj);
930dc19c
LP
297 unsigned int npages = obj->size >> PAGE_SHIFT;
298 unsigned int i;
f3bc9d24 299
3cbd0c58
LP
300 lockdep_assert_held(&omap_obj->lock);
301
930dc19c
LP
302 for (i = 0; i < npages; i++) {
303 if (omap_obj->dma_addrs[i])
304 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
97817fd4 305 PAGE_SIZE, DMA_TO_DEVICE);
f3bc9d24
RC
306 }
307
57c22f7c
LP
308 kfree(omap_obj->dma_addrs);
309 omap_obj->dma_addrs = NULL;
8b6b569e 310
ddcd09d6 311 drm_gem_put_pages(obj, omap_obj->pages, true, false);
cd5351f4
RC
312 omap_obj->pages = NULL;
313}
314
6ad11bc3 315/* get buffer flags */
dfe9cfcc 316u32 omap_gem_flags(struct drm_gem_object *obj)
6ad11bc3
RC
317{
318 return to_omap_bo(obj)->flags;
319}
320
f7f9f453
RC
321/** get mmap size */
322size_t omap_gem_mmap_size(struct drm_gem_object *obj)
323{
324 struct omap_gem_object *omap_obj = to_omap_bo(obj);
325 size_t size = obj->size;
326
48b34ac0 327 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
f7f9f453
RC
328 /* for tiled buffers, the virtual size has stride rounded up
329 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
330 * 32kb later!). But we don't back the entire buffer with
331 * pages, only the valid picture part.. so need to adjust for
332 * this in the size used to mmap and generate mmap offset
333 */
334 size = tiler_vsize(gem2fmt(omap_obj->flags),
335 omap_obj->width, omap_obj->height);
336 }
337
338 return size;
339}
340
7ef93b0a
LP
341/* -----------------------------------------------------------------------------
342 * Fault Handling
343 */
344
f7f9f453 345/* Normal handling for the case of faulting in non-tiled buffers */
620063e1 346static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
f7f9f453
RC
347 struct vm_area_struct *vma, struct vm_fault *vmf)
348{
349 struct omap_gem_object *omap_obj = to_omap_bo(obj);
350 unsigned long pfn;
351 pgoff_t pgoff;
352
353 /* We don't use vmf->pgoff since that has the fake offset: */
1a29d85e 354 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
f7f9f453
RC
355
356 if (omap_obj->pages) {
d61ce7da 357 omap_gem_cpu_sync_page(obj, pgoff);
f7f9f453
RC
358 pfn = page_to_pfn(omap_obj->pages[pgoff]);
359 } else {
620063e1 360 BUG_ON(!omap_gem_is_contiguous(omap_obj));
16869083 361 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
f7f9f453
RC
362 }
363
1a29d85e 364 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
f7f9f453
RC
365 pfn, pfn << PAGE_SHIFT);
366
6ada1328
SJ
367 return vmf_insert_mixed(vma, vmf->address,
368 __pfn_to_pfn_t(pfn, PFN_DEV));
f7f9f453
RC
369}
370
371/* Special handling for the case of faulting in 2d tiled buffers */
620063e1 372static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
f7f9f453
RC
373 struct vm_area_struct *vma, struct vm_fault *vmf)
374{
375 struct omap_gem_object *omap_obj = to_omap_bo(obj);
f4302747
LP
376 struct omap_drm_private *priv = obj->dev->dev_private;
377 struct omap_drm_usergart_entry *entry;
f7f9f453
RC
378 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
379 struct page *pages[64]; /* XXX is this too much to have on stack? */
380 unsigned long pfn;
381 pgoff_t pgoff, base_pgoff;
1a29d85e 382 unsigned long vaddr;
6ada1328
SJ
383 int i, err, slots;
384 vm_fault_t ret = VM_FAULT_NOPAGE;
f7f9f453 385
e559895a
RC
386 /*
387 * Note the height of the slot is also equal to the number of pages
388 * that need to be mapped in to fill 4kb wide CPU page. If the slot
389 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
390 */
f4302747
LP
391 const int n = priv->usergart[fmt].height;
392 const int n_shift = priv->usergart[fmt].height_shift;
e559895a
RC
393
394 /*
395 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
396 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
397 * into account in some of the math, so figure out virtual stride
398 * in pages
f7f9f453 399 */
cc8dd766 400 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
f7f9f453
RC
401
402 /* We don't use vmf->pgoff since that has the fake offset: */
1a29d85e 403 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
f7f9f453 404
e559895a
RC
405 /*
406 * Actual address we start mapping at is rounded down to previous slot
f7f9f453
RC
407 * boundary in the y direction:
408 */
e559895a 409 base_pgoff = round_down(pgoff, m << n_shift);
f7f9f453 410
e559895a 411 /* figure out buffer width in slots */
f4302747 412 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
f7f9f453 413
1a29d85e 414 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
e559895a 415
f4302747 416 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
e559895a 417
f7f9f453
RC
418 /* evict previous buffer using this usergart entry, if any: */
419 if (entry->obj)
620063e1 420 omap_gem_evict_entry(entry->obj, fmt, entry);
f7f9f453
RC
421
422 entry->obj = obj;
423 entry->obj_pgoff = base_pgoff;
424
e559895a
RC
425 /* now convert base_pgoff to phys offset from virt offset: */
426 base_pgoff = (base_pgoff >> n_shift) * slots;
427
428 /* for wider-than 4k.. figure out which part of the slot-row we want: */
429 if (m > 1) {
430 int off = pgoff % m;
431 entry->obj_pgoff += off;
432 base_pgoff /= m;
433 slots = min(slots - (off << n_shift), n);
434 base_pgoff += off << n_shift;
435 vaddr += off << PAGE_SHIFT;
436 }
437
438 /*
439 * Map in pages. Beyond the valid pixel part of the buffer, we set
440 * pages[i] to NULL to get a dummy page mapped in.. if someone
441 * reads/writes it they will get random/undefined content, but at
442 * least it won't be corrupting whatever other random page used to
443 * be mapped in, or other undefined behavior.
f7f9f453
RC
444 */
445 memcpy(pages, &omap_obj->pages[base_pgoff],
446 sizeof(struct page *) * slots);
447 memset(pages + slots, 0,
e559895a 448 sizeof(struct page *) * (n - slots));
f7f9f453 449
6ada1328
SJ
450 err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
451 if (err) {
452 ret = vmf_error(err);
453 dev_err(obj->dev->dev, "failed to pin: %d\n", err);
f7f9f453
RC
454 return ret;
455 }
456
16869083 457 pfn = entry->dma_addr >> PAGE_SHIFT;
f7f9f453 458
1a29d85e 459 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
f7f9f453
RC
460 pfn, pfn << PAGE_SHIFT);
461
e559895a 462 for (i = n; i > 0; i--) {
6ada1328
SJ
463 ret = vmf_insert_mixed(vma,
464 vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
465 if (ret & VM_FAULT_ERROR)
466 break;
f4302747 467 pfn += priv->usergart[fmt].stride_pfn;
e559895a 468 vaddr += PAGE_SIZE * m;
f7f9f453
RC
469 }
470
471 /* simple round-robin: */
f4302747
LP
472 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
473 % NUM_USERGART_ENTRIES;
f7f9f453 474
6ada1328 475 return ret;
f7f9f453
RC
476}
477
cd5351f4
RC
478/**
479 * omap_gem_fault - pagefault handler for GEM objects
cd5351f4
RC
480 * @vmf: fault detail
481 *
482 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
483 * does most of the work for us including the actual map/unmap calls
484 * but we need to do the actual page work.
485 *
486 * The VMA was set up by GEM. In doing so it also ensured that the
487 * vma->vm_private_data points to the GEM object that is backing this
488 * mapping.
489 */
c5ca5e02 490static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
cd5351f4 491{
11bac800 492 struct vm_area_struct *vma = vmf->vma;
cd5351f4
RC
493 struct drm_gem_object *obj = vma->vm_private_data;
494 struct omap_gem_object *omap_obj = to_omap_bo(obj);
6ada1328
SJ
495 int err;
496 vm_fault_t ret;
cd5351f4
RC
497
498 /* Make sure we don't parallel update on a fault, nor move or remove
499 * something from beneath our feet
500 */
3cbd0c58 501 mutex_lock(&omap_obj->lock);
cd5351f4
RC
502
503 /* if a shmem backed object, make sure we have pages attached now */
2491244d 504 err = omap_gem_attach_pages(obj);
6ada1328
SJ
505 if (err) {
506 ret = vmf_error(err);
cd5351f4 507 goto fail;
6ada1328 508 }
cd5351f4
RC
509
510 /* where should we do corresponding put_pages().. we are mapping
511 * the original page, rather than thru a GART, so we can't rely
512 * on eviction to trigger this. But munmap() or all mappings should
513 * probably trigger put_pages()?
514 */
515
48b34ac0 516 if (omap_obj->flags & OMAP_BO_TILED_MASK)
620063e1 517 ret = omap_gem_fault_2d(obj, vma, vmf);
f7f9f453 518 else
620063e1 519 ret = omap_gem_fault_1d(obj, vma, vmf);
cd5351f4 520
cd5351f4
RC
521
522fail:
3cbd0c58 523 mutex_unlock(&omap_obj->lock);
6ada1328 524 return ret;
cd5351f4
RC
525}
526
527/** We override mainly to fix up some of the vm mapping flags.. */
528int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
529{
cd5351f4
RC
530 int ret;
531
532 ret = drm_gem_mmap(filp, vma);
533 if (ret) {
534 DBG("mmap failed: %d", ret);
535 return ret;
536 }
537
8b6b569e
RC
538 return omap_gem_mmap_obj(vma->vm_private_data, vma);
539}
540
541int omap_gem_mmap_obj(struct drm_gem_object *obj,
542 struct vm_area_struct *vma)
543{
544 struct omap_gem_object *omap_obj = to_omap_bo(obj);
cd5351f4 545
1c71222e 546 vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
cd5351f4
RC
547
548 if (omap_obj->flags & OMAP_BO_WC) {
549 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
550 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
551 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
552 } else {
8b6b569e
RC
553 /*
554 * We do have some private objects, at least for scanout buffers
555 * on hardware without DMM/TILER. But these are allocated write-
556 * combine
557 */
558 if (WARN_ON(!obj->filp))
559 return -EINVAL;
560
561 /*
562 * Shunt off cached objs to shmem file so they have their own
563 * address_space (so unmap_mapping_range does what we want,
564 * in particular in the case of mmap'd dmabufs)
565 */
8b6b569e 566 vma->vm_pgoff = 0;
295992fb 567 vma_set_file(vma, obj->filp);
8b6b569e 568
cd5351f4
RC
569 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
570 }
571
8b6b569e 572 return 0;
cd5351f4
RC
573}
574
7ef93b0a
LP
575/* -----------------------------------------------------------------------------
576 * Dumb Buffers
577 */
8b6b569e 578
cd5351f4
RC
579/**
580 * omap_gem_dumb_create - create a dumb buffer
567cd704 581 * @file: our client file
cd5351f4
RC
582 * @dev: our device
583 * @args: the requested arguments copied from userspace
584 *
585 * Allocate a buffer suitable for use for a frame buffer of the
586 * form described by user space. Give userspace a handle by which
587 * to reference it.
588 */
589int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
590 struct drm_mode_create_dumb *args)
591{
592 union omap_gem_size gsize;
593
ce481eda 594 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
6a5228fd 595
cd5351f4
RC
596 args->size = PAGE_ALIGN(args->pitch * args->height);
597
598 gsize = (union omap_gem_size){
599 .bytes = args->size,
600 };
601
602 return omap_gem_new_handle(dev, file, gsize,
603 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
604}
605
cd5351f4
RC
606/**
607 * omap_gem_dumb_map - buffer mapping for dumb interface
608 * @file: our drm client file
609 * @dev: drm device
610 * @handle: GEM handle to the object (from dumb_create)
567cd704 611 * @offset: memory map offset placeholder
cd5351f4
RC
612 *
613 * Do the necessary setup to allow the mapping of the frame buffer
614 * into user memory. We don't have to do much here at the moment.
615 */
616int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
dfe9cfcc 617 u32 handle, u64 *offset)
cd5351f4
RC
618{
619 struct drm_gem_object *obj;
620 int ret = 0;
621
cd5351f4 622 /* GEM does all our handle to object mapping */
a8ad0bd8 623 obj = drm_gem_object_lookup(file, handle);
cd5351f4
RC
624 if (obj == NULL) {
625 ret = -ENOENT;
626 goto fail;
627 }
628
629 *offset = omap_gem_mmap_offset(obj);
630
d742cdd6 631 drm_gem_object_put(obj);
cd5351f4
RC
632
633fail:
cd5351f4
RC
634 return ret;
635}
636
e1c1174f 637#ifdef CONFIG_DRM_FBDEV_EMULATION
a6a91827
RC
638/* Set scrolling position. This allows us to implement fast scrolling
639 * for console.
9b55b95a
RC
640 *
641 * Call only from non-atomic contexts.
a6a91827 642 */
dfe9cfcc 643int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
a6a91827
RC
644{
645 struct omap_gem_object *omap_obj = to_omap_bo(obj);
dfe9cfcc 646 u32 npages = obj->size >> PAGE_SHIFT;
a6a91827
RC
647 int ret = 0;
648
649 if (roll > npages) {
650 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
651 return -EINVAL;
652 }
653
a6a91827
RC
654 omap_obj->roll = roll;
655
3cbd0c58 656 mutex_lock(&omap_obj->lock);
af69592a 657
a6a91827
RC
658 /* if we aren't mapped yet, we don't need to do anything */
659 if (omap_obj->block) {
2491244d 660 ret = omap_gem_attach_pages(obj);
a6a91827
RC
661 if (ret)
662 goto fail;
2491244d
LP
663
664 ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
665 roll, true);
a6a91827
RC
666 if (ret)
667 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
668 }
669
670fail:
3cbd0c58 671 mutex_unlock(&omap_obj->lock);
a6a91827
RC
672
673 return ret;
674}
e1c1174f 675#endif
a6a91827 676
7ef93b0a
LP
677/* -----------------------------------------------------------------------------
678 * Memory Management & DMA Sync
679 */
680
24fbaca0
LP
681/*
682 * shmem buffers that are mapped cached are not coherent.
683 *
684 * We keep track of dirty pages using page faulting to perform cache management.
685 * When a page is mapped to the CPU in read/write mode the device can't access
686 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
687 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
688 * unmapped from the CPU.
7ef93b0a 689 */
620063e1 690static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
7ef93b0a
LP
691{
692 struct omap_gem_object *omap_obj = to_omap_bo(obj);
cdb0381d 693
24fbaca0
LP
694 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
695 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
7ef93b0a 696}
a6a91827 697
8b6b569e
RC
698/* Sync the buffer for CPU access.. note pages should already be
699 * attached, ie. omap_gem_get_pages()
700 */
d61ce7da 701void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
8b6b569e
RC
702{
703 struct drm_device *dev = obj->dev;
704 struct omap_gem_object *omap_obj = to_omap_bo(obj);
705
620063e1 706 if (omap_gem_is_cached_coherent(obj))
24fbaca0
LP
707 return;
708
709 if (omap_obj->dma_addrs[pgoff]) {
57c22f7c 710 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
97817fd4 711 PAGE_SIZE, DMA_TO_DEVICE);
57c22f7c 712 omap_obj->dma_addrs[pgoff] = 0;
8b6b569e
RC
713 }
714}
715
716/* sync the buffer for DMA access */
d61ce7da 717void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
8b6b569e
RC
718 enum dma_data_direction dir)
719{
720 struct drm_device *dev = obj->dev;
721 struct omap_gem_object *omap_obj = to_omap_bo(obj);
4fa6ce48
LP
722 int i, npages = obj->size >> PAGE_SHIFT;
723 struct page **pages = omap_obj->pages;
724 bool dirty = false;
8b6b569e 725
620063e1 726 if (omap_gem_is_cached_coherent(obj))
4fa6ce48 727 return;
a3d6345d 728
4fa6ce48 729 for (i = 0; i < npages; i++) {
57c22f7c 730 if (!omap_obj->dma_addrs[i]) {
4fa6ce48 731 dma_addr_t addr;
a3d6345d 732
4fa6ce48 733 addr = dma_map_page(dev->dev, pages[i], 0,
97817fd4 734 PAGE_SIZE, dir);
4fa6ce48
LP
735 if (dma_mapping_error(dev->dev, addr)) {
736 dev_warn(dev->dev, "%s: failed to map page\n",
737 __func__);
738 break;
8b6b569e 739 }
8b6b569e 740
4fa6ce48 741 dirty = true;
57c22f7c 742 omap_obj->dma_addrs[i] = addr;
8b6b569e
RC
743 }
744 }
4fa6ce48
LP
745
746 if (dirty) {
747 unmap_mapping_range(obj->filp->f_mapping, 0,
748 omap_gem_mmap_size(obj), 1);
749 }
8b6b569e
RC
750}
751
86ad0397
ID
752static int omap_gem_pin_tiler(struct drm_gem_object *obj)
753{
754 struct omap_gem_object *omap_obj = to_omap_bo(obj);
755 u32 npages = obj->size >> PAGE_SHIFT;
756 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
757 struct tiler_block *block;
758 int ret;
759
760 BUG_ON(omap_obj->block);
761
762 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
763 block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height,
764 PAGE_SIZE);
765 } else {
766 block = tiler_reserve_1d(obj->size);
767 }
768
769 if (IS_ERR(block)) {
770 ret = PTR_ERR(block);
771 dev_err(obj->dev->dev, "could not remap: %d (%d)\n", ret, fmt);
772 goto fail;
773 }
774
775 /* TODO: enable async refill.. */
776 ret = tiler_pin(block, omap_obj->pages, npages, omap_obj->roll, true);
777 if (ret) {
778 tiler_release(block);
779 dev_err(obj->dev->dev, "could not pin: %d\n", ret);
780 goto fail;
781 }
782
783 omap_obj->dma_addr = tiler_ssptr(block);
784 omap_obj->block = block;
785
786 DBG("got dma address: %pad", &omap_obj->dma_addr);
787
788fail:
789 return ret;
790}
791
bc20c85c
LP
792/**
793 * omap_gem_pin() - Pin a GEM object in memory
794 * @obj: the GEM object
795 * @dma_addr: the DMA address
796 *
797 * Pin the given GEM object in memory and fill the dma_addr pointer with the
798 * object's DMA address. If the buffer is not physically contiguous it will be
799 * remapped through the TILER to provide a contiguous view.
800 *
801 * Pins are reference-counted, calling this function multiple times is allowed
802 * as long the corresponding omap_gem_unpin() calls are balanced.
803 *
804 * Return 0 on success or a negative error code otherwise.
cd5351f4 805 */
bc20c85c 806int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
cd5351f4 807{
a6a91827 808 struct omap_drm_private *priv = obj->dev->dev_private;
cd5351f4
RC
809 struct omap_gem_object *omap_obj = to_omap_bo(obj);
810 int ret = 0;
811
3cbd0c58 812 mutex_lock(&omap_obj->lock);
f7f9f453 813
1948d28d
ID
814 if (!omap_gem_is_contiguous(omap_obj)) {
815 if (refcount_read(&omap_obj->pin_cnt) == 0) {
f7f9f453 816
1948d28d 817 refcount_set(&omap_obj->pin_cnt, 1);
cec4fa75 818
2491244d 819 ret = omap_gem_attach_pages(obj);
f7f9f453
RC
820 if (ret)
821 goto fail;
822
3ccd59f9
ID
823 if (omap_obj->flags & OMAP_BO_SCANOUT) {
824 if (priv->has_dmm) {
825 ret = omap_gem_pin_tiler(obj);
826 if (ret)
827 goto fail;
828 }
1948d28d 829 }
cec4fa75 830 } else {
1948d28d 831 refcount_inc(&omap_obj->pin_cnt);
f7f9f453 832 }
cd5351f4
RC
833 }
834
1948d28d
ID
835 if (dma_addr)
836 *dma_addr = omap_obj->dma_addr;
837
f7f9f453 838fail:
3cbd0c58 839 mutex_unlock(&omap_obj->lock);
cd5351f4
RC
840
841 return ret;
842}
843
bc20c85c 844/**
d3e4c46d 845 * omap_gem_unpin_locked() - Unpin a GEM object from memory
bc20c85c
LP
846 * @obj: the GEM object
847 *
d3e4c46d 848 * omap_gem_unpin() without locking.
cd5351f4 849 */
d3e4c46d 850static void omap_gem_unpin_locked(struct drm_gem_object *obj)
cd5351f4 851{
d9c148cf 852 struct omap_drm_private *priv = obj->dev->dev_private;
f7f9f453 853 struct omap_gem_object *omap_obj = to_omap_bo(obj);
393a949f 854 int ret;
f7f9f453 855
1948d28d 856 if (omap_gem_is_contiguous(omap_obj))
d9c148cf
TV
857 return;
858
1948d28d 859 if (refcount_dec_and_test(&omap_obj->pin_cnt)) {
fe4d0b63
ID
860 if (omap_obj->sgt) {
861 sg_free_table(omap_obj->sgt);
862 kfree(omap_obj->sgt);
863 omap_obj->sgt = NULL;
864 }
3ccd59f9
ID
865 if (!(omap_obj->flags & OMAP_BO_SCANOUT))
866 return;
1948d28d
ID
867 if (priv->has_dmm) {
868 ret = tiler_unpin(omap_obj->block);
869 if (ret) {
870 dev_err(obj->dev->dev,
871 "could not unpin pages: %d\n", ret);
872 }
873 ret = tiler_release(omap_obj->block);
874 if (ret) {
875 dev_err(obj->dev->dev,
876 "could not release unmap: %d\n", ret);
877 }
878 omap_obj->dma_addr = 0;
879 omap_obj->block = NULL;
f7f9f453
RC
880 }
881 }
d3e4c46d 882}
393a949f 883
d3e4c46d
TV
884/**
885 * omap_gem_unpin() - Unpin a GEM object from memory
886 * @obj: the GEM object
887 *
888 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
889 * reference-counted, the actual unpin will only be performed when the number
890 * of calls to this function matches the number of calls to omap_gem_pin().
891 */
892void omap_gem_unpin(struct drm_gem_object *obj)
893{
894 struct omap_gem_object *omap_obj = to_omap_bo(obj);
895
896 mutex_lock(&omap_obj->lock);
897 omap_gem_unpin_locked(obj);
3cbd0c58 898 mutex_unlock(&omap_obj->lock);
cd5351f4
RC
899}
900
3c810c61
RC
901/* Get rotated scanout address (only valid if already pinned), at the
902 * specified orientation and x,y offset from top-left corner of buffer
903 * (only valid for tiled 2d buffers)
904 */
dfe9cfcc 905int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
16869083 906 int x, int y, dma_addr_t *dma_addr)
3c810c61
RC
907{
908 struct omap_gem_object *omap_obj = to_omap_bo(obj);
909 int ret = -EINVAL;
910
3cbd0c58
LP
911 mutex_lock(&omap_obj->lock);
912
1948d28d 913 if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block &&
48b34ac0 914 (omap_obj->flags & OMAP_BO_TILED_MASK)) {
16869083 915 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
3c810c61
RC
916 ret = 0;
917 }
3cbd0c58
LP
918
919 mutex_unlock(&omap_obj->lock);
920
3c810c61
RC
921 return ret;
922}
923
924/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
dfe9cfcc 925int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
3c810c61
RC
926{
927 struct omap_gem_object *omap_obj = to_omap_bo(obj);
928 int ret = -EINVAL;
48b34ac0 929 if (omap_obj->flags & OMAP_BO_TILED_MASK)
3c810c61
RC
930 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
931 return ret;
932}
933
6ad11bc3
RC
934/* if !remap, and we don't have pages backing, then fail, rather than
935 * increasing the pin count (which we don't really do yet anyways,
936 * because we don't support swapping pages back out). And 'remap'
937 * might not be quite the right name, but I wanted to keep it working
bc20c85c 938 * similarly to omap_gem_pin(). Note though that mutex is not
6ad11bc3 939 * aquired if !remap (because this can be called in atomic ctxt),
bc20c85c 940 * but probably omap_gem_unpin() should be changed to work in the
6ad11bc3
RC
941 * same way. If !remap, a matching omap_gem_put_pages() call is not
942 * required (and should not be made).
943 */
944int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
945 bool remap)
cd5351f4 946{
2491244d 947 struct omap_gem_object *omap_obj = to_omap_bo(obj);
3cbd0c58 948 int ret = 0;
2491244d 949
3cbd0c58
LP
950 mutex_lock(&omap_obj->lock);
951
952 if (remap) {
953 ret = omap_gem_attach_pages(obj);
954 if (ret)
955 goto unlock;
6ad11bc3 956 }
3cbd0c58
LP
957
958 if (!omap_obj->pages) {
959 ret = -ENOMEM;
960 goto unlock;
961 }
962
2491244d 963 *pages = omap_obj->pages;
3cbd0c58
LP
964
965unlock:
966 mutex_unlock(&omap_obj->lock);
967
cd5351f4
RC
968 return ret;
969}
970
971/* release pages when DMA no longer being performed */
972int omap_gem_put_pages(struct drm_gem_object *obj)
973{
974 /* do something here if we dynamically attach/detach pages.. at
975 * least they would no longer need to be pinned if everyone has
976 * released the pages..
977 */
978 return 0;
979}
980
1948d28d
ID
981struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
982 enum dma_data_direction dir)
fe4d0b63
ID
983{
984 struct omap_gem_object *omap_obj = to_omap_bo(obj);
985 dma_addr_t addr;
986 struct sg_table *sgt;
987 struct scatterlist *sg;
988 unsigned int count, len, stride, i;
989 int ret;
990
991 ret = omap_gem_pin(obj, &addr);
992 if (ret)
993 return ERR_PTR(ret);
994
995 mutex_lock(&omap_obj->lock);
996
997 sgt = omap_obj->sgt;
998 if (sgt)
999 goto out;
1000
1001 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
1002 if (!sgt) {
1003 ret = -ENOMEM;
1004 goto err_unpin;
1005 }
1006
1948d28d
ID
1007 if (addr) {
1008 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1009 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
fe4d0b63 1010
1948d28d
ID
1011 len = omap_obj->width << (int)fmt;
1012 count = omap_obj->height;
1013 stride = tiler_stride(fmt, 0);
1014 } else {
1015 len = obj->size;
1016 count = 1;
1017 stride = 0;
1018 }
fe4d0b63 1019 } else {
1948d28d 1020 count = obj->size >> PAGE_SHIFT;
fe4d0b63
ID
1021 }
1022
1023 ret = sg_alloc_table(sgt, count, GFP_KERNEL);
1024 if (ret)
1025 goto err_free;
1026
1948d28d
ID
1027 /* this must be after omap_gem_pin() to ensure we have pages attached */
1028 omap_gem_dma_sync_buffer(obj, dir);
1029
1030 if (addr) {
1031 for_each_sg(sgt->sgl, sg, count, i) {
1032 sg_set_page(sg, phys_to_page(addr), len,
1033 offset_in_page(addr));
1034 sg_dma_address(sg) = addr;
1035 sg_dma_len(sg) = len;
fe4d0b63 1036
1948d28d
ID
1037 addr += stride;
1038 }
1039 } else {
1040 for_each_sg(sgt->sgl, sg, count, i) {
1041 sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0);
1042 sg_dma_address(sg) = omap_obj->dma_addrs[i];
1043 sg_dma_len(sg) = PAGE_SIZE;
1044 }
fe4d0b63
ID
1045 }
1046
1047 omap_obj->sgt = sgt;
1048out:
1049 mutex_unlock(&omap_obj->lock);
1050 return sgt;
1051
1052err_free:
1053 kfree(sgt);
1054err_unpin:
1055 mutex_unlock(&omap_obj->lock);
1056 omap_gem_unpin(obj);
1057 return ERR_PTR(ret);
1058}
1059
1060void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
1061{
1062 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1063
1064 if (WARN_ON(omap_obj->sgt != sgt))
1065 return;
1066
1067 omap_gem_unpin(obj);
1068}
1069
e1c1174f 1070#ifdef CONFIG_DRM_FBDEV_EMULATION
3cbd0c58
LP
1071/*
1072 * Get kernel virtual address for CPU access.. this more or less only
1073 * exists for omap_fbdev.
cd5351f4
RC
1074 */
1075void *omap_gem_vaddr(struct drm_gem_object *obj)
1076{
1077 struct omap_gem_object *omap_obj = to_omap_bo(obj);
3cbd0c58
LP
1078 void *vaddr;
1079 int ret;
1080
1081 mutex_lock(&omap_obj->lock);
620063e1 1082
3cbd0c58 1083 if (!omap_obj->vaddr) {
2491244d 1084 ret = omap_gem_attach_pages(obj);
3cbd0c58
LP
1085 if (ret) {
1086 vaddr = ERR_PTR(ret);
1087 goto unlock;
1088 }
1089
2491244d 1090 omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
f7f9f453
RC
1091 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
1092 }
3cbd0c58
LP
1093
1094 vaddr = omap_obj->vaddr;
1095
1096unlock:
1097 mutex_unlock(&omap_obj->lock);
1098 return vaddr;
cd5351f4 1099}
e1c1174f 1100#endif
cd5351f4 1101
7ef93b0a
LP
1102/* -----------------------------------------------------------------------------
1103 * Power Management
1104 */
cd5351f4 1105
e78edba1
AG
1106#ifdef CONFIG_PM
1107/* re-pin objects in DMM in resume path: */
7fb15c48 1108int omap_gem_resume(struct drm_device *dev)
e78edba1 1109{
7fb15c48 1110 struct omap_drm_private *priv = dev->dev_private;
e78edba1
AG
1111 struct omap_gem_object *omap_obj;
1112 int ret = 0;
1113
5117bd89 1114 mutex_lock(&priv->list_lock);
e78edba1
AG
1115 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1116 if (omap_obj->block) {
1117 struct drm_gem_object *obj = &omap_obj->base;
dfe9cfcc
LP
1118 u32 npages = obj->size >> PAGE_SHIFT;
1119
e78edba1
AG
1120 WARN_ON(!omap_obj->pages); /* this can't happen */
1121 ret = tiler_pin(omap_obj->block,
1122 omap_obj->pages, npages,
1123 omap_obj->roll, true);
1124 if (ret) {
7fb15c48 1125 dev_err(dev->dev, "could not repin: %d\n", ret);
5117bd89 1126 goto done;
e78edba1
AG
1127 }
1128 }
1129 }
1130
5117bd89
DV
1131done:
1132 mutex_unlock(&priv->list_lock);
1133 return ret;
e78edba1
AG
1134}
1135#endif
1136
7ef93b0a
LP
1137/* -----------------------------------------------------------------------------
1138 * DebugFS
1139 */
1140
f6b6036e
RC
1141#ifdef CONFIG_DEBUG_FS
1142void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1143{
f6b6036e 1144 struct omap_gem_object *omap_obj = to_omap_bo(obj);
dfe9cfcc 1145 u64 off;
f6b6036e 1146
0de23977 1147 off = drm_vma_node_start(&obj->vma_node);
f6b6036e 1148
3cbd0c58
LP
1149 mutex_lock(&omap_obj->lock);
1150
2d31ca3a 1151 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
2c935bc5 1152 omap_obj->flags, obj->name, kref_read(&obj->refcount),
cec4fa75 1153 off, &omap_obj->dma_addr,
1948d28d 1154 refcount_read(&omap_obj->pin_cnt),
f6b6036e
RC
1155 omap_obj->vaddr, omap_obj->roll);
1156
48b34ac0 1157 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
f6b6036e
RC
1158 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1159 if (omap_obj->block) {
1160 struct tcm_area *area = &omap_obj->block->area;
1161 seq_printf(m, " (%dx%d, %dx%d)",
1162 area->p0.x, area->p0.y,
1163 area->p1.x, area->p1.y);
1164 }
1165 } else {
2150c19b 1166 seq_printf(m, " %zu", obj->size);
f6b6036e
RC
1167 }
1168
3cbd0c58
LP
1169 mutex_unlock(&omap_obj->lock);
1170
f6b6036e
RC
1171 seq_printf(m, "\n");
1172}
1173
1174void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1175{
1176 struct omap_gem_object *omap_obj;
1177 int count = 0;
1178 size_t size = 0;
1179
1180 list_for_each_entry(omap_obj, list, mm_list) {
1181 struct drm_gem_object *obj = &omap_obj->base;
1182 seq_printf(m, " ");
1183 omap_gem_describe(obj, m);
1184 count++;
1185 size += obj->size;
1186 }
1187
1188 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1189}
1190#endif
1191
7ef93b0a
LP
1192/* -----------------------------------------------------------------------------
1193 * Constructor & Destructor
1194 */
1195
c5ca5e02 1196static void omap_gem_free_object(struct drm_gem_object *obj)
cd5351f4
RC
1197{
1198 struct drm_device *dev = obj->dev;
76c4055f 1199 struct omap_drm_private *priv = dev->dev_private;
cd5351f4
RC
1200 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1201
620063e1 1202 omap_gem_evict(obj);
f7f9f453 1203
5117bd89 1204 mutex_lock(&priv->list_lock);
f6b6036e 1205 list_del(&omap_obj->mm_list);
5117bd89 1206 mutex_unlock(&priv->list_lock);
f6b6036e 1207
3cbd0c58
LP
1208 /*
1209 * We own the sole reference to the object at this point, but to keep
1210 * lockdep happy, we must still take the omap_obj_lock to call
1211 * omap_gem_detach_pages(). This should hardly make any difference as
1212 * there can't be any lock contention.
9a0774e0 1213 */
3cbd0c58
LP
1214 mutex_lock(&omap_obj->lock);
1215
1216 /* The object should not be pinned. */
1948d28d 1217 WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0);
9a0774e0 1218
3f50effd
TV
1219 if (omap_obj->pages) {
1220 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1221 kfree(omap_obj->pages);
1222 else
cd5351f4 1223 omap_gem_detach_pages(obj);
3f50effd 1224 }
ae053039 1225
3f50effd 1226 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
266c73b7 1227 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
16869083 1228 omap_obj->dma_addr);
3f50effd
TV
1229 } else if (omap_obj->vaddr) {
1230 vunmap(omap_obj->vaddr);
1231 } else if (obj->import_attach) {
1232 drm_prime_gem_destroy(obj, omap_obj->sgt);
cd5351f4
RC
1233 }
1234
3cbd0c58
LP
1235 mutex_unlock(&omap_obj->lock);
1236
cd5351f4
RC
1237 drm_gem_object_release(obj);
1238
3cbd0c58
LP
1239 mutex_destroy(&omap_obj->lock);
1240
00e9c7c7 1241 kfree(omap_obj);
cd5351f4
RC
1242}
1243
4ecc5fbc
TV
1244static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
1245{
1246 struct omap_drm_private *priv = dev->dev_private;
1247
1248 switch (flags & OMAP_BO_CACHE_MASK) {
1249 case OMAP_BO_CACHED:
1250 case OMAP_BO_WC:
1251 case OMAP_BO_CACHE_MASK:
1252 break;
1253
1254 default:
1255 return false;
1256 }
1257
1258 if (flags & OMAP_BO_TILED_MASK) {
1259 if (!priv->usergart)
1260 return false;
1261
1262 switch (flags & OMAP_BO_TILED_MASK) {
1263 case OMAP_BO_TILED_8:
1264 case OMAP_BO_TILED_16:
1265 case OMAP_BO_TILED_32:
1266 break;
1267
1268 default:
1269 return false;
1270 }
1271 }
1272
1273 return true;
1274}
1275
c5ca5e02
TZ
1276static const struct vm_operations_struct omap_gem_vm_ops = {
1277 .fault = omap_gem_fault,
1278 .open = drm_gem_vm_open,
1279 .close = drm_gem_vm_close,
1280};
1281
1282static const struct drm_gem_object_funcs omap_gem_object_funcs = {
1283 .free = omap_gem_free_object,
1284 .export = omap_gem_prime_export,
1285 .vm_ops = &omap_gem_vm_ops,
1286};
1287
a96bf3cb 1288/* GEM buffer object constructor */
cd5351f4 1289struct drm_gem_object *omap_gem_new(struct drm_device *dev,
dfe9cfcc 1290 union omap_gem_size gsize, u32 flags)
cd5351f4 1291{
a6a91827 1292 struct omap_drm_private *priv = dev->dev_private;
cd5351f4 1293 struct omap_gem_object *omap_obj;
92b4b445 1294 struct drm_gem_object *obj;
ab5a60c3 1295 struct address_space *mapping;
cd5351f4
RC
1296 size_t size;
1297 int ret;
1298
4ecc5fbc
TV
1299 if (!omap_gem_validate_flags(dev, flags))
1300 return NULL;
1301
9cba3b99 1302 /* Validate the flags and compute the memory and cache flags. */
48b34ac0 1303 if (flags & OMAP_BO_TILED_MASK) {
9cba3b99
LP
1304 /*
1305 * Tiled buffers are always shmem paged backed. When they are
1306 * scanned out, they are remapped into DMM/TILER.
f7f9f453 1307 */
9cba3b99 1308 flags |= OMAP_BO_MEM_SHMEM;
f7f9f453 1309
9cba3b99
LP
1310 /*
1311 * Currently don't allow cached buffers. There is some caching
1312 * stuff that needs to be handled better.
f7f9f453 1313 */
7cb0d6c1
TV
1314 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1315 flags |= tiler_get_cpu_cache_flags();
a96bf3cb 1316 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
9cba3b99 1317 /*
18d7f5ab
TV
1318 * If we don't have DMM, we must allocate scanout buffers
1319 * from contiguous DMA memory.
9cba3b99
LP
1320 */
1321 flags |= OMAP_BO_MEM_DMA_API;
3f50effd 1322 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
9cba3b99 1323 /*
3f50effd 1324 * All other buffers not backed by dma_buf are shmem-backed.
9cba3b99
LP
1325 */
1326 flags |= OMAP_BO_MEM_SHMEM;
f7f9f453 1327 }
cd5351f4 1328
9cba3b99 1329 /* Allocate the initialize the OMAP GEM object. */
cd5351f4 1330 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
78110bb8 1331 if (!omap_obj)
a903e3b6 1332 return NULL;
f6b6036e 1333
cd5351f4 1334 obj = &omap_obj->base;
9cba3b99 1335 omap_obj->flags = flags;
3cbd0c58 1336 mutex_init(&omap_obj->lock);
cd5351f4 1337
48b34ac0 1338 if (flags & OMAP_BO_TILED_MASK) {
9cba3b99
LP
1339 /*
1340 * For tiled buffers align dimensions to slot boundaries and
1341 * calculate size based on aligned dimensions.
a6a91827 1342 */
9cba3b99
LP
1343 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1344 &gsize.tiled.height);
ae053039 1345
9cba3b99
LP
1346 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1347 gsize.tiled.height);
cd5351f4 1348
f7f9f453
RC
1349 omap_obj->width = gsize.tiled.width;
1350 omap_obj->height = gsize.tiled.height;
9cba3b99
LP
1351 } else {
1352 size = PAGE_ALIGN(gsize.bytes);
f7f9f453
RC
1353 }
1354
c5ca5e02
TZ
1355 obj->funcs = &omap_gem_object_funcs;
1356
9cba3b99
LP
1357 /* Initialize the GEM object. */
1358 if (!(flags & OMAP_BO_MEM_SHMEM)) {
89c8233f 1359 drm_gem_private_object_init(dev, obj, size);
ab5a60c3 1360 } else {
cd5351f4 1361 ret = drm_gem_object_init(dev, obj, size);
ab5a60c3 1362 if (ret)
c2eb77ff 1363 goto err_free;
cd5351f4 1364
93c76a3d 1365 mapping = obj->filp->f_mapping;
ab5a60c3
DH
1366 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1367 }
cd5351f4 1368
c2eb77ff
LP
1369 /* Allocate memory if needed. */
1370 if (flags & OMAP_BO_MEM_DMA_API) {
266c73b7 1371 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
16869083 1372 &omap_obj->dma_addr,
266c73b7 1373 GFP_KERNEL);
c2eb77ff
LP
1374 if (!omap_obj->vaddr)
1375 goto err_release;
1376 }
1377
5117bd89 1378 mutex_lock(&priv->list_lock);
c2eb77ff 1379 list_add(&omap_obj->mm_list, &priv->obj_list);
5117bd89 1380 mutex_unlock(&priv->list_lock);
c2eb77ff 1381
cd5351f4
RC
1382 return obj;
1383
c2eb77ff
LP
1384err_release:
1385 drm_gem_object_release(obj);
1386err_free:
1387 kfree(omap_obj);
cd5351f4
RC
1388 return NULL;
1389}
f7f9f453 1390
b22e6690
LP
1391struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1392 struct sg_table *sgt)
1393{
1394 struct omap_drm_private *priv = dev->dev_private;
1395 struct omap_gem_object *omap_obj;
1396 struct drm_gem_object *obj;
1397 union omap_gem_size gsize;
1398
1399 /* Without a DMM only physically contiguous buffers can be supported. */
1400 if (sgt->orig_nents != 1 && !priv->has_dmm)
1401 return ERR_PTR(-EINVAL);
1402
b22e6690
LP
1403 gsize.bytes = PAGE_ALIGN(size);
1404 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
3cbd0c58
LP
1405 if (!obj)
1406 return ERR_PTR(-ENOMEM);
b22e6690
LP
1407
1408 omap_obj = to_omap_bo(obj);
3cbd0c58
LP
1409
1410 mutex_lock(&omap_obj->lock);
1411
b22e6690
LP
1412 omap_obj->sgt = sgt;
1413
1414 if (sgt->orig_nents == 1) {
16869083 1415 omap_obj->dma_addr = sg_dma_address(sgt->sgl);
b22e6690
LP
1416 } else {
1417 /* Create pages list from sgt */
b22e6690
LP
1418 struct page **pages;
1419 unsigned int npages;
53760655 1420 unsigned int ret;
b22e6690
LP
1421
1422 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1423 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1424 if (!pages) {
1425 omap_gem_free_object(obj);
1426 obj = ERR_PTR(-ENOMEM);
1427 goto done;
1428 }
1429
1430 omap_obj->pages = pages;
c67e6279 1431 ret = drm_prime_sg_to_page_array(sgt, pages, npages);
53760655 1432 if (ret) {
b22e6690
LP
1433 omap_gem_free_object(obj);
1434 obj = ERR_PTR(-ENOMEM);
1435 goto done;
1436 }
1437 }
1438
1439done:
3cbd0c58 1440 mutex_unlock(&omap_obj->lock);
b22e6690
LP
1441 return obj;
1442}
1443
7ef93b0a
LP
1444/* convenience method to construct a GEM buffer object, and userspace handle */
1445int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
dfe9cfcc 1446 union omap_gem_size gsize, u32 flags, u32 *handle)
7ef93b0a
LP
1447{
1448 struct drm_gem_object *obj;
1449 int ret;
1450
1451 obj = omap_gem_new(dev, gsize, flags);
1452 if (!obj)
1453 return -ENOMEM;
1454
1455 ret = drm_gem_handle_create(file, obj, handle);
1456 if (ret) {
cd5351f4 1457 omap_gem_free_object(obj);
7ef93b0a
LP
1458 return ret;
1459 }
ae053039 1460
7ef93b0a 1461 /* drop reference from allocate - handle holds it now */
d742cdd6 1462 drm_gem_object_put(obj);
7ef93b0a
LP
1463
1464 return 0;
cd5351f4 1465}
f7f9f453 1466
7ef93b0a
LP
1467/* -----------------------------------------------------------------------------
1468 * Init & Cleanup
1469 */
1470
1471/* If DMM is used, we need to set some stuff up.. */
f7f9f453
RC
1472void omap_gem_init(struct drm_device *dev)
1473{
a6a91827 1474 struct omap_drm_private *priv = dev->dev_private;
f4302747 1475 struct omap_drm_usergart *usergart;
f7f9f453
RC
1476 const enum tiler_fmt fmts[] = {
1477 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1478 };
5c137797 1479 int i, j;
f7f9f453 1480
e5e4e9b7 1481 if (!dmm_is_available()) {
f7f9f453 1482 /* DMM only supported on OMAP4 and later, so this isn't fatal */
5c137797 1483 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
f7f9f453
RC
1484 return;
1485 }
1486
78110bb8
JP
1487 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1488 if (!usergart)
b369839b 1489 return;
f7f9f453
RC
1490
1491 /* reserve 4k aligned/wide regions for userspace mappings: */
1492 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
dfe9cfcc
LP
1493 u16 h = 1, w = PAGE_SIZE >> i;
1494
f7f9f453
RC
1495 tiler_align(fmts[i], &w, &h);
1496 /* note: since each region is 1 4kb page wide, and minimum
1497 * number of rows, the height ends up being the same as the
1498 * # of pages in the region
1499 */
1500 usergart[i].height = h;
1501 usergart[i].height_shift = ilog2(h);
3c810c61 1502 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
f7f9f453
RC
1503 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1504 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
f4302747
LP
1505 struct omap_drm_usergart_entry *entry;
1506 struct tiler_block *block;
1507
1508 entry = &usergart[i].entry[j];
1509 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
f7f9f453
RC
1510 if (IS_ERR(block)) {
1511 dev_err(dev->dev,
1512 "reserve failed: %d, %d, %ld\n",
1513 i, j, PTR_ERR(block));
1514 return;
1515 }
16869083 1516 entry->dma_addr = tiler_ssptr(block);
f7f9f453
RC
1517 entry->block = block;
1518
16869083
LP
1519 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1520 &entry->dma_addr,
f7f9f453
RC
1521 usergart[i].stride_pfn << PAGE_SHIFT);
1522 }
1523 }
a6a91827 1524
f4302747 1525 priv->usergart = usergart;
a6a91827 1526 priv->has_dmm = true;
f7f9f453
RC
1527}
1528
1529void omap_gem_deinit(struct drm_device *dev)
1530{
f4302747
LP
1531 struct omap_drm_private *priv = dev->dev_private;
1532
f7f9f453
RC
1533 /* I believe we can rely on there being no more outstanding GEM
1534 * objects which could depend on usergart/dmm at this point.
1535 */
f4302747 1536 kfree(priv->usergart);
f7f9f453 1537}