drm/msm: prime support
[linux-2.6-block.git] / drivers / gpu / drm / msm / msm_gem.c
index 2bae46c66a30dd4c3eac623c07dbf55c70d32cb8..ea2c96f9459bf0095dd0ec7abf02831e7ac5630e 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
 
 #include "msm_drv.h"
 #include "msm_gem.h"
@@ -77,6 +78,21 @@ static void put_pages(struct drm_gem_object *obj)
        }
 }
 
+struct page **msm_gem_get_pages(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct page **p;
+       mutex_lock(&dev->struct_mutex);
+       p = get_pages(obj);
+       mutex_unlock(&dev->struct_mutex);
+       return p;
+}
+
+void msm_gem_put_pages(struct drm_gem_object *obj)
+{
+       /* when we start tracking the pin count, then do something here */
+}
+
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
                struct vm_area_struct *vma)
 {
@@ -510,10 +526,21 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 
        drm_gem_free_mmap_offset(obj);
 
-       if (msm_obj->vaddr)
-               vunmap(msm_obj->vaddr);
+       if (obj->import_attach) {
+               if (msm_obj->vaddr)
+                       dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
+
+               /* Don't drop the pages for imported dmabuf, as they are not
+                * ours, just free the array we allocated:
+                */
+               if (msm_obj->pages)
+                       drm_free_large(msm_obj->pages);
 
-       put_pages(obj);
+       } else {
+               if (msm_obj->vaddr)
+                       vunmap(msm_obj->vaddr);
+               put_pages(obj);
+       }
 
        if (msm_obj->resv == &msm_obj->_resv)
                reservation_object_fini(msm_obj->resv);
@@ -549,17 +576,12 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
        return ret;
 }
 
-struct drm_gem_object *msm_gem_new(struct drm_device *dev,
-               uint32_t size, uint32_t flags)
+static int msm_gem_new_impl(struct drm_device *dev,
+               uint32_t size, uint32_t flags,
+               struct drm_gem_object **obj)
 {
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj;
-       struct drm_gem_object *obj = NULL;
-       int ret;
-
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       size = PAGE_ALIGN(size);
 
        switch (flags & MSM_BO_CACHE_MASK) {
        case MSM_BO_UNCACHED:
@@ -569,21 +591,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
        default:
                dev_err(dev->dev, "invalid cache flag: %x\n",
                                (flags & MSM_BO_CACHE_MASK));
-               ret = -EINVAL;
-               goto fail;
+               return -EINVAL;
        }
 
        msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
-       if (!msm_obj) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       obj = &msm_obj->base;
-
-       ret = drm_gem_object_init(dev, obj, size);
-       if (ret)
-               goto fail;
+       if (!msm_obj)
+               return -ENOMEM;
 
        msm_obj->flags = flags;
 
@@ -594,6 +607,67 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
        INIT_LIST_HEAD(&msm_obj->inactive_work);
        list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 
+       *obj = &msm_obj->base;
+
+       return 0;
+}
+
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+               uint32_t size, uint32_t flags)
+{
+       struct drm_gem_object *obj;
+       int ret;
+
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       size = PAGE_ALIGN(size);
+
+       ret = msm_gem_new_impl(dev, size, flags, &obj);
+       if (ret)
+               goto fail;
+
+       ret = drm_gem_object_init(dev, obj, size);
+       if (ret)
+               goto fail;
+
+       return obj;
+
+fail:
+       if (obj)
+               drm_gem_object_unreference_unlocked(obj);
+
+       return ERR_PTR(ret);
+}
+
+struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+               uint32_t size, struct sg_table *sgt)
+{
+       struct msm_gem_object *msm_obj;
+       struct drm_gem_object *obj;
+       int ret, npages;
+
+       size = PAGE_ALIGN(size);
+
+       ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
+       if (ret)
+               goto fail;
+
+       drm_gem_private_object_init(dev, obj, size);
+
+       npages = size / PAGE_SIZE;
+
+       msm_obj = to_msm_bo(obj);
+       msm_obj->sgt = sgt;
+       msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+       if (!msm_obj->pages) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
+       if (ret)
+               goto fail;
+
        return obj;
 
 fail: