Merge drm/drm-fixes into drm-misc-fixes
[linux-block.git] / drivers / dma-buf / dma-buf.c
index eb6b59363c4f589a1100a5301bd99d4458793acb..e6528767efc7c340a51d6aa9e8bd05b6c3615936 100644 (file)
@@ -131,6 +131,7 @@ static struct file_system_type dma_buf_fs_type = {
 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 {
        struct dma_buf *dmabuf;
+       int ret;
 
        if (!is_dma_buf_file(file))
                return -EINVAL;
@@ -146,7 +147,11 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
            dmabuf->size >> PAGE_SHIFT)
                return -EINVAL;
 
-       return dmabuf->ops->mmap(dmabuf, vma);
+       dma_resv_lock(dmabuf->resv, NULL);
+       ret = dmabuf->ops->mmap(dmabuf, vma);
+       dma_resv_unlock(dmabuf->resv);
+
+       return ret;
 }
 
 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
@@ -655,7 +660,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        init_waitqueue_head(&dmabuf->poll);
        dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
        dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
-       mutex_init(&dmabuf->lock);
        INIT_LIST_HEAD(&dmabuf->attachments);
 
        if (!resv) {
@@ -800,6 +804,70 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
        return sg_table;
 }
 
+/**
+ * DOC: locking convention
+ *
+ * In order to avoid deadlock situations between dma-buf exports and importers,
+ * all dma-buf API users must follow the common dma-buf locking convention.
+ *
+ * Convention for importers
+ *
+ * 1. Importers must hold the dma-buf reservation lock when calling these
+ *    functions:
+ *
+ *     - dma_buf_pin()
+ *     - dma_buf_unpin()
+ *     - dma_buf_map_attachment()
+ *     - dma_buf_unmap_attachment()
+ *     - dma_buf_vmap()
+ *     - dma_buf_vunmap()
+ *
+ * 2. Importers must not hold the dma-buf reservation lock when calling these
+ *    functions:
+ *
+ *     - dma_buf_attach()
+ *     - dma_buf_dynamic_attach()
+ *     - dma_buf_detach()
+ *     - dma_buf_export(
+ *     - dma_buf_fd()
+ *     - dma_buf_get()
+ *     - dma_buf_put()
+ *     - dma_buf_mmap()
+ *     - dma_buf_begin_cpu_access()
+ *     - dma_buf_end_cpu_access()
+ *     - dma_buf_map_attachment_unlocked()
+ *     - dma_buf_unmap_attachment_unlocked()
+ *     - dma_buf_vmap_unlocked()
+ *     - dma_buf_vunmap_unlocked()
+ *
+ * Convention for exporters
+ *
+ * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
+ *    reservation and exporter can take the lock:
+ *
+ *     - &dma_buf_ops.attach()
+ *     - &dma_buf_ops.detach()
+ *     - &dma_buf_ops.release()
+ *     - &dma_buf_ops.begin_cpu_access()
+ *     - &dma_buf_ops.end_cpu_access()
+ *
+ * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
+ *    reservation and exporter can't take the lock:
+ *
+ *     - &dma_buf_ops.pin()
+ *     - &dma_buf_ops.unpin()
+ *     - &dma_buf_ops.map_dma_buf()
+ *     - &dma_buf_ops.unmap_dma_buf()
+ *     - &dma_buf_ops.mmap()
+ *     - &dma_buf_ops.vmap()
+ *     - &dma_buf_ops.vunmap()
+ *
+ * 3. Exporters must hold the dma-buf reservation lock when calling these
+ *    functions:
+ *
+ *     - dma_buf_move_notify()
+ */
+
 /**
  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
  * @dmabuf:            [in]    buffer to attach device to.
@@ -864,8 +932,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
            dma_buf_is_dynamic(dmabuf)) {
                struct sg_table *sgt;
 
+               dma_resv_lock(attach->dmabuf->resv, NULL);
                if (dma_buf_is_dynamic(attach->dmabuf)) {
-                       dma_resv_lock(attach->dmabuf->resv, NULL);
                        ret = dmabuf->ops->pin(attach);
                        if (ret)
                                goto err_unlock;
@@ -878,8 +946,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
                        ret = PTR_ERR(sgt);
                        goto err_unpin;
                }
-               if (dma_buf_is_dynamic(attach->dmabuf))
-                       dma_resv_unlock(attach->dmabuf->resv);
+               dma_resv_unlock(attach->dmabuf->resv);
                attach->sgt = sgt;
                attach->dir = DMA_BIDIRECTIONAL;
        }
@@ -895,8 +962,7 @@ err_unpin:
                dmabuf->ops->unpin(attach);
 
 err_unlock:
-       if (dma_buf_is_dynamic(attach->dmabuf))
-               dma_resv_unlock(attach->dmabuf->resv);
+       dma_resv_unlock(attach->dmabuf->resv);
 
        dma_buf_detach(dmabuf, attach);
        return ERR_PTR(ret);
@@ -939,24 +1005,22 @@ static void __unmap_dma_buf(struct dma_buf_attachment *attach,
  */
 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 {
-       if (WARN_ON(!dmabuf || !attach))
+       if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
                return;
 
+       dma_resv_lock(dmabuf->resv, NULL);
+
        if (attach->sgt) {
-               if (dma_buf_is_dynamic(attach->dmabuf))
-                       dma_resv_lock(attach->dmabuf->resv, NULL);
 
                __unmap_dma_buf(attach, attach->sgt, attach->dir);
 
-               if (dma_buf_is_dynamic(attach->dmabuf)) {
+               if (dma_buf_is_dynamic(attach->dmabuf))
                        dmabuf->ops->unpin(attach);
-                       dma_resv_unlock(attach->dmabuf->resv);
-               }
        }
-
-       dma_resv_lock(dmabuf->resv, NULL);
        list_del(&attach->node);
+
        dma_resv_unlock(dmabuf->resv);
+
        if (dmabuf->ops->detach)
                dmabuf->ops->detach(dmabuf, attach);
 
@@ -1047,8 +1111,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
        if (WARN_ON(!attach || !attach->dmabuf))
                return ERR_PTR(-EINVAL);
 
-       if (dma_buf_attachment_is_dynamic(attach))
-               dma_resv_assert_held(attach->dmabuf->resv);
+       dma_resv_assert_held(attach->dmabuf->resv);
 
        if (attach->sgt) {
                /*
@@ -1063,7 +1126,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
        }
 
        if (dma_buf_is_dynamic(attach->dmabuf)) {
-               dma_resv_assert_held(attach->dmabuf->resv);
                if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
                        r = attach->dmabuf->ops->pin(attach);
                        if (r)
@@ -1105,6 +1167,34 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
 
+/**
+ * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
+ * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
+ * dma_buf_ops.
+ * @attach:    [in]    attachment whose scatterlist is to be returned
+ * @direction: [in]    direction of DMA transfer
+ *
+ * Unlocked variant of dma_buf_map_attachment().
+ */
+struct sg_table *
+dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
+                               enum dma_data_direction direction)
+{
+       struct sg_table *sg_table;
+
+       might_sleep();
+
+       if (WARN_ON(!attach || !attach->dmabuf))
+               return ERR_PTR(-EINVAL);
+
+       dma_resv_lock(attach->dmabuf->resv, NULL);
+       sg_table = dma_buf_map_attachment(attach, direction);
+       dma_resv_unlock(attach->dmabuf->resv);
+
+       return sg_table;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
+
 /**
  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
@@ -1124,15 +1214,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
        if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
                return;
 
-       if (dma_buf_attachment_is_dynamic(attach))
-               dma_resv_assert_held(attach->dmabuf->resv);
+       dma_resv_assert_held(attach->dmabuf->resv);
 
        if (attach->sgt == sg_table)
                return;
 
-       if (dma_buf_is_dynamic(attach->dmabuf))
-               dma_resv_assert_held(attach->dmabuf->resv);
-
        __unmap_dma_buf(attach, sg_table, direction);
 
        if (dma_buf_is_dynamic(attach->dmabuf) &&
@@ -1141,6 +1227,31 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
 
+/**
+ * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
+ * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
+ * dma_buf_ops.
+ * @attach:    [in]    attachment to unmap buffer from
+ * @sg_table:  [in]    scatterlist info of the buffer to unmap
+ * @direction: [in]    direction of DMA transfer
+ *
+ * Unlocked variant of dma_buf_unmap_attachment().
+ */
+void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
+                                      struct sg_table *sg_table,
+                                      enum dma_data_direction direction)
+{
+       might_sleep();
+
+       if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
+               return;
+
+       dma_resv_lock(attach->dmabuf->resv, NULL);
+       dma_buf_unmap_attachment(attach, sg_table, direction);
+       dma_resv_unlock(attach->dmabuf->resv);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
+
 /**
  * dma_buf_move_notify - notify attachments that DMA-buf is moving
  *
@@ -1352,6 +1463,8 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
                 unsigned long pgoff)
 {
+       int ret;
+
        if (WARN_ON(!dmabuf || !vma))
                return -EINVAL;
 
@@ -1372,7 +1485,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
        vma_set_file(vma, dmabuf->file);
        vma->vm_pgoff = pgoff;
 
-       return dmabuf->ops->mmap(dmabuf, vma);
+       dma_resv_lock(dmabuf->resv, NULL);
+       ret = dmabuf->ops->mmap(dmabuf, vma);
+       dma_resv_unlock(dmabuf->resv);
+
+       return ret;
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
 
@@ -1395,41 +1512,67 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
 {
        struct iosys_map ptr;
-       int ret = 0;
+       int ret;
 
        iosys_map_clear(map);
 
        if (WARN_ON(!dmabuf))
                return -EINVAL;
 
+       dma_resv_assert_held(dmabuf->resv);
+
        if (!dmabuf->ops->vmap)
                return -EINVAL;
 
-       mutex_lock(&dmabuf->lock);
        if (dmabuf->vmapping_counter) {
                dmabuf->vmapping_counter++;
                BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
                *map = dmabuf->vmap_ptr;
-               goto out_unlock;
+               return 0;
        }
 
        BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
 
        ret = dmabuf->ops->vmap(dmabuf, &ptr);
        if (WARN_ON_ONCE(ret))
-               goto out_unlock;
+               return ret;
 
        dmabuf->vmap_ptr = ptr;
        dmabuf->vmapping_counter = 1;
 
        *map = dmabuf->vmap_ptr;
 
-out_unlock:
-       mutex_unlock(&dmabuf->lock);
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
 
+/**
+ * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @dmabuf:    [in]    buffer to vmap
+ * @map:       [out]   returns the vmap pointer
+ *
+ * Unlocked version of dma_buf_vmap()
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
+ */
+int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
+{
+       int ret;
+
+       iosys_map_clear(map);
+
+       if (WARN_ON(!dmabuf))
+               return -EINVAL;
+
+       dma_resv_lock(dmabuf->resv, NULL);
+       ret = dma_buf_vmap(dmabuf, map);
+       dma_resv_unlock(dmabuf->resv);
+
+       return ret;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
+
 /**
  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
  * @dmabuf:    [in]    buffer to vunmap
@@ -1440,20 +1583,36 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
        if (WARN_ON(!dmabuf))
                return;
 
+       dma_resv_assert_held(dmabuf->resv);
+
        BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
        BUG_ON(dmabuf->vmapping_counter == 0);
        BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
 
-       mutex_lock(&dmabuf->lock);
        if (--dmabuf->vmapping_counter == 0) {
                if (dmabuf->ops->vunmap)
                        dmabuf->ops->vunmap(dmabuf, map);
                iosys_map_clear(&dmabuf->vmap_ptr);
        }
-       mutex_unlock(&dmabuf->lock);
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
 
+/**
+ * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
+ * @dmabuf:    [in]    buffer to vunmap
+ * @map:       [in]    vmap pointer to vunmap
+ */
+void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
+{
+       if (WARN_ON(!dmabuf))
+               return;
+
+       dma_resv_lock(dmabuf->resv, NULL);
+       dma_buf_vunmap(dmabuf, map);
+       dma_resv_unlock(dmabuf->resv);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
+
 #ifdef CONFIG_DEBUG_FS
 static int dma_buf_debug_show(struct seq_file *s, void *unused)
 {