Merge drm/drm-next into drm-misc-next
[linux-2.6-block.git] / drivers / gpu / drm / drm_gem_shmem_helper.c
index 75185a960fc408f1042999e4c9b6c04baef6831b..9b0d540ff4a86a995c9f7250fac459759e068127 100644 (file)
@@ -141,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
 
-       WARN_ON(shmem->vmap_use_count);
+       drm_WARN_ON(obj->dev, shmem->vmap_use_count);
 
        if (obj->import_attach) {
                drm_prime_gem_destroy(obj, shmem->sgt);
@@ -156,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
                        drm_gem_shmem_put_pages(shmem);
        }
 
-       WARN_ON(shmem->pages_use_count);
+       drm_WARN_ON(obj->dev, shmem->pages_use_count);
 
        drm_gem_object_release(obj);
        mutex_destroy(&shmem->pages_lock);
@@ -175,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
 
        pages = drm_gem_get_pages(obj);
        if (IS_ERR(pages)) {
-               DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
+               drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
+                           PTR_ERR(pages));
                shmem->pages_use_count = 0;
                return PTR_ERR(pages);
        }
@@ -207,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
  */
 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
 {
+       struct drm_gem_object *obj = &shmem->base;
        int ret;
 
-       WARN_ON(shmem->base.import_attach);
+       drm_WARN_ON(obj->dev, obj->import_attach);
 
        ret = mutex_lock_interruptible(&shmem->pages_lock);
        if (ret)
@@ -225,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
 
-       if (WARN_ON_ONCE(!shmem->pages_use_count))
+       if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
                return;
 
        if (--shmem->pages_use_count > 0)
@@ -268,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);
  */
 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
 {
-       WARN_ON(shmem->base.import_attach);
+       struct drm_gem_object *obj = &shmem->base;
+
+       drm_WARN_ON(obj->dev, obj->import_attach);
 
        return drm_gem_shmem_get_pages(shmem);
 }
@@ -283,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin);
  */
 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
 {
-       WARN_ON(shmem->base.import_attach);
+       struct drm_gem_object *obj = &shmem->base;
+
+       drm_WARN_ON(obj->dev, obj->import_attach);
 
        drm_gem_shmem_put_pages(shmem);
 }
@@ -295,24 +301,22 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
        struct drm_gem_object *obj = &shmem->base;
        int ret = 0;
 
-       if (shmem->vmap_use_count++ > 0) {
-               iosys_map_set_vaddr(map, shmem->vaddr);
-               return 0;
-       }
-
        if (obj->import_attach) {
                ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
                if (!ret) {
-                       if (WARN_ON(map->is_iomem)) {
+                       if (drm_WARN_ON(obj->dev, map->is_iomem)) {
                                dma_buf_vunmap(obj->import_attach->dmabuf, map);
-                               ret = -EIO;
-                               goto err_put_pages;
+                               return -EIO;
                        }
-                       shmem->vaddr = map->vaddr;
                }
        } else {
                pgprot_t prot = PAGE_KERNEL;
 
+               if (shmem->vmap_use_count++ > 0) {
+                       iosys_map_set_vaddr(map, shmem->vaddr);
+                       return 0;
+               }
+
                ret = drm_gem_shmem_get_pages(shmem);
                if (ret)
                        goto err_zero_use;
@@ -328,7 +332,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
        }
 
        if (ret) {
-               DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
+               drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
                goto err_put_pages;
        }
 
@@ -378,15 +382,15 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
 {
        struct drm_gem_object *obj = &shmem->base;
 
-       if (WARN_ON_ONCE(!shmem->vmap_use_count))
-               return;
-
-       if (--shmem->vmap_use_count > 0)
-               return;
-
        if (obj->import_attach) {
                dma_buf_vunmap(obj->import_attach->dmabuf, map);
        } else {
+               if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
+                       return;
+
+               if (--shmem->vmap_use_count > 0)
+                       return;
+
                vunmap(shmem->vaddr);
                drm_gem_shmem_put_pages(shmem);
        }
@@ -461,7 +465,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
        struct drm_gem_object *obj = &shmem->base;
        struct drm_device *dev = obj->dev;
 
-       WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
+       drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
 
        dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
        sg_free_table(shmem->sgt);
@@ -550,7 +554,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
        mutex_lock(&shmem->pages_lock);
 
        if (page_offset >= num_pages ||
-           WARN_ON_ONCE(!shmem->pages) ||
+           drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
            shmem->madv < 0) {
                ret = VM_FAULT_SIGBUS;
        } else {
@@ -569,7 +573,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
 
-       WARN_ON(shmem->base.import_attach);
+       drm_WARN_ON(obj->dev, obj->import_attach);
 
        mutex_lock(&shmem->pages_lock);
 
@@ -578,7 +582,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
         * mmap'd, vm_open() just grabs an additional reference for the new
         * mm the vma is getting copied into (ie. on fork()).
         */
-       if (!WARN_ON_ONCE(!shmem->pages_use_count))
+       if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
                shmem->pages_use_count++;
 
        mutex_unlock(&shmem->pages_lock);
@@ -648,6 +652,9 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
                              struct drm_printer *p, unsigned int indent)
 {
+       if (shmem->base.import_attach)
+               return;
+
        drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
        drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
        drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
@@ -672,7 +679,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
 
-       WARN_ON(shmem->base.import_attach);
+       drm_WARN_ON(obj->dev, obj->import_attach);
 
        return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
 }
@@ -687,7 +694,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
        if (shmem->sgt)
                return shmem->sgt;
 
-       WARN_ON(obj->import_attach);
+       drm_WARN_ON(obj->dev, obj->import_attach);
 
        ret = drm_gem_shmem_get_pages_locked(shmem);
        if (ret)