|| !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf
|| !exp_info->ops->release
- || !exp_info->ops->map_atomic
|| !exp_info->ops->map
|| !exp_info->ops->mmap)) {
return ERR_PTR(-EINVAL);
* void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
* void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
*
- * There are also atomic variants of these interfaces. Like for kmap they
- * facilitate non-blocking fast-paths. Neither the importer nor the exporter
- * (in the callback) is allowed to block when using these.
- *
- * Interfaces::
- * void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
- * void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
- *
- * For importers all the restrictions of using kmap apply, like the limited
- * supply of kmap_atomic slots. Hence an importer shall only hold onto at
- * max 2 atomic dma_buf kmaps at the same time (in any given process context).
+ * Implementing the functions is optional for exporters and for importers all
+ * the restrictions of using kmap apply.
*
* dma_buf kmap calls outside of the range specified in begin_cpu_access are
* undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
* the partial chunks at the beginning and end but may return stale or bogus
* data outside of the range (in these partial chunks).
*
- * Note that these calls need to always succeed. The exporter needs to
- * complete any preparations that might fail in begin_cpu_access.
- *
* For some cases the overhead of kmap can be too high, a vmap interface
* is introduced. This interface should be used very carefully, as vmalloc
* space is a limited resources on many architectures.
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
-/**
- * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
- * space. The same restrictions as for kmap_atomic and friends apply.
- * @dmabuf: [in] buffer to map page from.
- * @page_num: [in] page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
-{
- WARN_ON(!dmabuf);
-
- if (!dmabuf->ops->map_atomic)
- return NULL;
- return dmabuf->ops->map_atomic(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
-
-/**
- * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
- * @dmabuf: [in] buffer to unmap page from.
- * @page_num: [in] page in PAGE_SIZE units to unmap.
- * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
- void *vaddr)
-{
- WARN_ON(!dmabuf);
-
- if (dmabuf->ops->unmap_atomic)
- dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
-
/**
* dma_buf_kmap - Map a page of the buffer object into kernel address space. The
* same restrictions as for kmap and friends apply.
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_gem_begin_cpu_access,
.map = drm_gem_dmabuf_kmap,
- .map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
- .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
.map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .map_atomic = armada_gem_dmabuf_no_kmap,
- .unmap_atomic = armada_gem_dmabuf_no_kunmap,
.map = armada_gem_dmabuf_no_kmap,
.unmap = armada_gem_dmabuf_no_kunmap,
.mmap = armada_gem_dmabuf_mmap,
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
-/**
- * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
- * @dma_buf: buffer to be mapped
- * @page_num: page number within the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
- */
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- return NULL;
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
-
-/**
- * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
- * @dma_buf: buffer to be unmapped
- * @page_num: page number within the buffer
- * @addr: virtual address of the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
- */
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
-
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
-
/**
* drm_gem_dmabuf_kmap - map implementation for GEM
* @dma_buf: buffer to be mapped
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = drm_gem_dmabuf_kmap,
- .map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
- .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
i915_gem_object_unpin_map(obj);
}
-static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-
-}
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = i915_gem_dmabuf_kmap,
- .map_atomic = i915_gem_dmabuf_kmap_atomic,
.unmap = i915_gem_dmabuf_kunmap,
- .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
.unmap_dma_buf = mock_unmap_dma_buf,
.release = mock_dmabuf_release,
.map = mock_dmabuf_kmap,
- .map_atomic = mock_dmabuf_kmap_atomic,
.unmap = mock_dmabuf_kunmap,
- .unmap_atomic = mock_dmabuf_kunmap_atomic,
.mmap = mock_dmabuf_mmap,
.vmap = mock_dmabuf_vmap,
.vunmap = mock_dmabuf_vunmap,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
- .map_atomic = omap_gem_dmabuf_kmap_atomic,
- .unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
.map = omap_gem_dmabuf_kmap,
.unmap = omap_gem_dmabuf_kunmap,
.mmap = omap_gem_dmabuf_mmap,
return 0;
}
-static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
- unsigned long page)
-{
- return NULL;
-}
-
-static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
- unsigned long page,
- void *addr)
-{
-}
-
static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
{
return NULL;
.release = tegra_gem_prime_release,
.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
.end_cpu_access = tegra_gem_prime_end_cpu_access,
- .map_atomic = tegra_gem_prime_kmap_atomic,
- .unmap_atomic = tegra_gem_prime_kunmap_atomic,
.map = tegra_gem_prime_kmap,
.unmap = tegra_gem_prime_kunmap,
.mmap = tegra_gem_prime_mmap,
return NULL;
}
-static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- /* TODO */
-
- return NULL;
-}
-
static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
/* TODO */
}
-static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num,
- void *addr)
-{
- /* TODO */
-}
-
static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
struct vm_area_struct *vma)
{
.map_dma_buf = udl_map_dma_buf,
.unmap_dma_buf = udl_unmap_dma_buf,
.map = udl_dmabuf_kmap,
- .map_atomic = udl_dmabuf_kmap_atomic,
.unmap = udl_dmabuf_kunmap,
- .unmap_atomic = udl_dmabuf_kunmap_atomic,
.mmap = udl_dmabuf_mmap,
.release = drm_gem_dmabuf_release,
};
{
}
-static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- return NULL;
-}
-
-static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
-
-}
static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
unsigned long page_num)
{
.unmap_dma_buf = vmw_prime_unmap_dma_buf,
.release = NULL,
.map = vmw_prime_dmabuf_kmap,
- .map_atomic = vmw_prime_dmabuf_kmap_atomic,
.unmap = vmw_prime_dmabuf_kunmap,
- .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
.mmap = vmw_prime_dmabuf_mmap,
.vmap = vmw_prime_dmabuf_vmap,
.vunmap = vmw_prime_dmabuf_vunmap,
.map_dma_buf = vb2_dc_dmabuf_ops_map,
.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
.map = vb2_dc_dmabuf_ops_kmap,
- .map_atomic = vb2_dc_dmabuf_ops_kmap,
.vmap = vb2_dc_dmabuf_ops_vmap,
.mmap = vb2_dc_dmabuf_ops_mmap,
.release = vb2_dc_dmabuf_ops_release,
.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
.map = vb2_dma_sg_dmabuf_ops_kmap,
- .map_atomic = vb2_dma_sg_dmabuf_ops_kmap,
.vmap = vb2_dma_sg_dmabuf_ops_vmap,
.mmap = vb2_dma_sg_dmabuf_ops_mmap,
.release = vb2_dma_sg_dmabuf_ops_release,
.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
.map = vb2_vmalloc_dmabuf_ops_kmap,
- .map_atomic = vb2_vmalloc_dmabuf_ops_kmap,
.vmap = vb2_vmalloc_dmabuf_ops_vmap,
.mmap = vb2_vmalloc_dmabuf_ops_mmap,
.release = vb2_vmalloc_dmabuf_ops_release,
.detach = ion_dma_buf_detatch,
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
.end_cpu_access = ion_dma_buf_end_cpu_access,
- .map_atomic = ion_dma_buf_kmap,
- .unmap_atomic = ion_dma_buf_kunmap,
.map = ion_dma_buf_kmap,
.unmap = ion_dma_buf_kunmap,
};
tee_shm_release(shm);
}
-static void *tee_shm_op_map_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
-{
- return NULL;
-}
-
static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
{
return NULL;
.map_dma_buf = tee_shm_op_map_dma_buf,
.unmap_dma_buf = tee_shm_op_unmap_dma_buf,
.release = tee_shm_op_release,
- .map_atomic = tee_shm_op_map_atomic,
.map = tee_shm_op_map,
.mmap = tee_shm_op_mmap,
};
enum dma_data_direction dir);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num);
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr);
void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
void *addr);
* to be restarted.
*/
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
- void *(*map_atomic)(struct dma_buf *, unsigned long);
- void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*map)(struct dma_buf *, unsigned long);
void (*unmap)(struct dma_buf *, unsigned long, void *);
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
-void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
-void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
void *dma_buf_kmap(struct dma_buf *, unsigned long);
void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);