| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd |
| 4 | * Author:Mark Yao <mark.yao@rock-chips.com> |
| 5 | */ |
| 6 | |
| 7 | #include <linux/dma-buf.h> |
| 8 | #include <linux/iommu.h> |
| 9 | #include <linux/vmalloc.h> |
| 10 | |
| 11 | #include <drm/drm.h> |
| 12 | #include <drm/drm_fb_helper.h> |
| 13 | #include <drm/drm_gem.h> |
| 14 | #include <drm/drm_gem_dma_helper.h> |
| 15 | #include <drm/drm_prime.h> |
| 16 | #include <drm/drm_vma_manager.h> |
| 17 | |
| 18 | #include "rockchip_drm_drv.h" |
| 19 | #include "rockchip_drm_gem.h" |
| 20 | |
| 21 | static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) |
| 22 | { |
| 23 | struct drm_device *drm = rk_obj->base.dev; |
| 24 | struct rockchip_drm_private *private = drm->dev_private; |
| 25 | int prot = IOMMU_READ | IOMMU_WRITE; |
| 26 | ssize_t ret; |
| 27 | |
| 28 | mutex_lock(&private->mm_lock); |
| 29 | ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm, |
| 30 | rk_obj->base.size, PAGE_SIZE, |
| 31 | 0, 0); |
| 32 | mutex_unlock(&private->mm_lock); |
| 33 | |
| 34 | if (ret < 0) { |
| 35 | DRM_ERROR("out of I/O virtual memory: %zd\n", ret); |
| 36 | return ret; |
| 37 | } |
| 38 | |
| 39 | rk_obj->dma_addr = rk_obj->mm.start; |
| 40 | |
| 41 | ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt, |
| 42 | prot); |
| 43 | if (ret < (ssize_t)rk_obj->base.size) { |
| 44 | DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", |
| 45 | ret, rk_obj->base.size); |
| 46 | ret = -ENOMEM; |
| 47 | goto err_remove_node; |
| 48 | } |
| 49 | |
| 50 | rk_obj->size = ret; |
| 51 | |
| 52 | return 0; |
| 53 | |
| 54 | err_remove_node: |
| 55 | mutex_lock(&private->mm_lock); |
| 56 | drm_mm_remove_node(&rk_obj->mm); |
| 57 | mutex_unlock(&private->mm_lock); |
| 58 | |
| 59 | return ret; |
| 60 | } |
| 61 | |
| 62 | static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj) |
| 63 | { |
| 64 | struct drm_device *drm = rk_obj->base.dev; |
| 65 | struct rockchip_drm_private *private = drm->dev_private; |
| 66 | |
| 67 | iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size); |
| 68 | |
| 69 | mutex_lock(&private->mm_lock); |
| 70 | |
| 71 | drm_mm_remove_node(&rk_obj->mm); |
| 72 | |
| 73 | mutex_unlock(&private->mm_lock); |
| 74 | |
| 75 | return 0; |
| 76 | } |
| 77 | |
| 78 | static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) |
| 79 | { |
| 80 | struct drm_device *drm = rk_obj->base.dev; |
| 81 | int ret, i; |
| 82 | struct scatterlist *s; |
| 83 | |
| 84 | rk_obj->pages = drm_gem_get_pages(&rk_obj->base); |
| 85 | if (IS_ERR(rk_obj->pages)) |
| 86 | return PTR_ERR(rk_obj->pages); |
| 87 | |
| 88 | rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT; |
| 89 | |
| 90 | rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev, |
| 91 | rk_obj->pages, rk_obj->num_pages); |
| 92 | if (IS_ERR(rk_obj->sgt)) { |
| 93 | ret = PTR_ERR(rk_obj->sgt); |
| 94 | goto err_put_pages; |
| 95 | } |
| 96 | |
| 97 | /* |
| 98 | * Fake up the SG table so that dma_sync_sg_for_device() can be used |
| 99 | * to flush the pages associated with it. |
| 100 | * |
| 101 | * TODO: Replace this by drm_clflush_sg() once it can be implemented |
| 102 | * without relying on symbols that are not exported. |
| 103 | */ |
| 104 | for_each_sgtable_sg(rk_obj->sgt, s, i) |
| 105 | sg_dma_address(s) = sg_phys(s); |
| 106 | |
| 107 | dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE); |
| 108 | |
| 109 | return 0; |
| 110 | |
| 111 | err_put_pages: |
| 112 | drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false); |
| 113 | return ret; |
| 114 | } |
| 115 | |
| 116 | static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj) |
| 117 | { |
| 118 | sg_free_table(rk_obj->sgt); |
| 119 | kfree(rk_obj->sgt); |
| 120 | drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true); |
| 121 | } |
| 122 | |
| 123 | static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj, |
| 124 | bool alloc_kmap) |
| 125 | { |
| 126 | int ret; |
| 127 | |
| 128 | ret = rockchip_gem_get_pages(rk_obj); |
| 129 | if (ret < 0) |
| 130 | return ret; |
| 131 | |
| 132 | ret = rockchip_gem_iommu_map(rk_obj); |
| 133 | if (ret < 0) |
| 134 | goto err_free; |
| 135 | |
| 136 | if (alloc_kmap) { |
| 137 | rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, |
| 138 | pgprot_writecombine(PAGE_KERNEL)); |
| 139 | if (!rk_obj->kvaddr) { |
| 140 | DRM_ERROR("failed to vmap() buffer\n"); |
| 141 | ret = -ENOMEM; |
| 142 | goto err_unmap; |
| 143 | } |
| 144 | } |
| 145 | |
| 146 | return 0; |
| 147 | |
| 148 | err_unmap: |
| 149 | rockchip_gem_iommu_unmap(rk_obj); |
| 150 | err_free: |
| 151 | rockchip_gem_put_pages(rk_obj); |
| 152 | |
| 153 | return ret; |
| 154 | } |
| 155 | |
| 156 | static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj, |
| 157 | bool alloc_kmap) |
| 158 | { |
| 159 | struct drm_gem_object *obj = &rk_obj->base; |
| 160 | struct drm_device *drm = obj->dev; |
| 161 | |
| 162 | rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE; |
| 163 | |
| 164 | if (!alloc_kmap) |
| 165 | rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; |
| 166 | |
| 167 | rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, |
| 168 | &rk_obj->dma_addr, GFP_KERNEL, |
| 169 | rk_obj->dma_attrs); |
| 170 | if (!rk_obj->kvaddr) { |
| 171 | DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size); |
| 172 | return -ENOMEM; |
| 173 | } |
| 174 | |
| 175 | return 0; |
| 176 | } |
| 177 | |
| 178 | static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, |
| 179 | bool alloc_kmap) |
| 180 | { |
| 181 | struct drm_gem_object *obj = &rk_obj->base; |
| 182 | struct drm_device *drm = obj->dev; |
| 183 | struct rockchip_drm_private *private = drm->dev_private; |
| 184 | |
| 185 | if (private->domain) |
| 186 | return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap); |
| 187 | else |
| 188 | return rockchip_gem_alloc_dma(rk_obj, alloc_kmap); |
| 189 | } |
| 190 | |
| 191 | static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj) |
| 192 | { |
| 193 | vunmap(rk_obj->kvaddr); |
| 194 | rockchip_gem_iommu_unmap(rk_obj); |
| 195 | rockchip_gem_put_pages(rk_obj); |
| 196 | } |
| 197 | |
| 198 | static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj) |
| 199 | { |
| 200 | struct drm_gem_object *obj = &rk_obj->base; |
| 201 | struct drm_device *drm = obj->dev; |
| 202 | |
| 203 | dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr, |
| 204 | rk_obj->dma_attrs); |
| 205 | } |
| 206 | |
| 207 | static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) |
| 208 | { |
| 209 | if (rk_obj->pages) |
| 210 | rockchip_gem_free_iommu(rk_obj); |
| 211 | else |
| 212 | rockchip_gem_free_dma(rk_obj); |
| 213 | } |
| 214 | |
| 215 | static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj, |
| 216 | struct vm_area_struct *vma) |
| 217 | { |
| 218 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
| 219 | unsigned int count = obj->size >> PAGE_SHIFT; |
| 220 | unsigned long user_count = vma_pages(vma); |
| 221 | |
| 222 | if (user_count == 0) |
| 223 | return -ENXIO; |
| 224 | |
| 225 | return vm_map_pages(vma, rk_obj->pages, count); |
| 226 | } |
| 227 | |
| 228 | static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj, |
| 229 | struct vm_area_struct *vma) |
| 230 | { |
| 231 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
| 232 | struct drm_device *drm = obj->dev; |
| 233 | |
| 234 | return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, |
| 235 | obj->size, rk_obj->dma_attrs); |
| 236 | } |
| 237 | |
| 238 | static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, |
| 239 | struct vm_area_struct *vma) |
| 240 | { |
| 241 | int ret; |
| 242 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
| 243 | |
| 244 | /* |
| 245 | * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the |
| 246 | * whole buffer from the start. |
| 247 | */ |
| 248 | vma->vm_pgoff = 0; |
| 249 | |
| 250 | /* |
| 251 | * We allocated a struct page table for rk_obj, so clear |
| 252 | * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). |
| 253 | */ |
| 254 | vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP); |
| 255 | |
| 256 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
| 257 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); |
| 258 | |
| 259 | if (rk_obj->pages) |
| 260 | ret = rockchip_drm_gem_object_mmap_iommu(obj, vma); |
| 261 | else |
| 262 | ret = rockchip_drm_gem_object_mmap_dma(obj, vma); |
| 263 | |
| 264 | return ret; |
| 265 | } |
| 266 | |
| 267 | static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj) |
| 268 | { |
| 269 | drm_gem_object_release(&rk_obj->base); |
| 270 | kfree(rk_obj); |
| 271 | } |
| 272 | |
| 273 | static const struct drm_gem_object_funcs rockchip_gem_object_funcs = { |
| 274 | .free = rockchip_gem_free_object, |
| 275 | .get_sg_table = rockchip_gem_prime_get_sg_table, |
| 276 | .vmap = rockchip_gem_prime_vmap, |
| 277 | .vunmap = rockchip_gem_prime_vunmap, |
| 278 | .mmap = rockchip_drm_gem_object_mmap, |
| 279 | .vm_ops = &drm_gem_dma_vm_ops, |
| 280 | }; |
| 281 | |
| 282 | static struct rockchip_gem_object * |
| 283 | rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size) |
| 284 | { |
| 285 | struct rockchip_gem_object *rk_obj; |
| 286 | struct drm_gem_object *obj; |
| 287 | |
| 288 | size = round_up(size, PAGE_SIZE); |
| 289 | |
| 290 | rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL); |
| 291 | if (!rk_obj) |
| 292 | return ERR_PTR(-ENOMEM); |
| 293 | |
| 294 | obj = &rk_obj->base; |
| 295 | |
| 296 | obj->funcs = &rockchip_gem_object_funcs; |
| 297 | |
| 298 | drm_gem_object_init(drm, obj, size); |
| 299 | |
| 300 | return rk_obj; |
| 301 | } |
| 302 | |
| 303 | struct rockchip_gem_object * |
| 304 | rockchip_gem_create_object(struct drm_device *drm, unsigned int size, |
| 305 | bool alloc_kmap) |
| 306 | { |
| 307 | struct rockchip_gem_object *rk_obj; |
| 308 | int ret; |
| 309 | |
| 310 | rk_obj = rockchip_gem_alloc_object(drm, size); |
| 311 | if (IS_ERR(rk_obj)) |
| 312 | return rk_obj; |
| 313 | |
| 314 | ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap); |
| 315 | if (ret) |
| 316 | goto err_free_rk_obj; |
| 317 | |
| 318 | return rk_obj; |
| 319 | |
| 320 | err_free_rk_obj: |
| 321 | rockchip_gem_release_object(rk_obj); |
| 322 | return ERR_PTR(ret); |
| 323 | } |
| 324 | |
| 325 | /* |
| 326 | * rockchip_gem_free_object - (struct drm_gem_object_funcs)->free |
| 327 | * callback function |
| 328 | */ |
| 329 | void rockchip_gem_free_object(struct drm_gem_object *obj) |
| 330 | { |
| 331 | struct drm_device *drm = obj->dev; |
| 332 | struct rockchip_drm_private *private = drm->dev_private; |
| 333 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
| 334 | |
| 335 | if (obj->import_attach) { |
| 336 | if (private->domain) { |
| 337 | rockchip_gem_iommu_unmap(rk_obj); |
| 338 | } else { |
| 339 | dma_unmap_sgtable(drm->dev, rk_obj->sgt, |
| 340 | DMA_BIDIRECTIONAL, 0); |
| 341 | } |
| 342 | drm_prime_gem_destroy(obj, rk_obj->sgt); |
| 343 | } else { |
| 344 | rockchip_gem_free_buf(rk_obj); |
| 345 | } |
| 346 | |
| 347 | rockchip_gem_release_object(rk_obj); |
| 348 | } |
| 349 | |
| 350 | /* |
| 351 | * rockchip_gem_create_with_handle - allocate an object with the given |
| 352 | * size and create a gem handle on it |
| 353 | * |
| 354 | * returns a struct rockchip_gem_object* on success or ERR_PTR values |
| 355 | * on failure. |
| 356 | */ |
| 357 | static struct rockchip_gem_object * |
| 358 | rockchip_gem_create_with_handle(struct drm_file *file_priv, |
| 359 | struct drm_device *drm, unsigned int size, |
| 360 | unsigned int *handle) |
| 361 | { |
| 362 | struct rockchip_gem_object *rk_obj; |
| 363 | struct drm_gem_object *obj; |
| 364 | bool is_framebuffer; |
| 365 | int ret; |
| 366 | |
| 367 | is_framebuffer = drm->fb_helper && file_priv == drm->fb_helper->client.file; |
| 368 | |
| 369 | rk_obj = rockchip_gem_create_object(drm, size, is_framebuffer); |
| 370 | if (IS_ERR(rk_obj)) |
| 371 | return ERR_CAST(rk_obj); |
| 372 | |
| 373 | obj = &rk_obj->base; |
| 374 | |
| 375 | /* |
| 376 | * allocate a id of idr table where the obj is registered |
| 377 | * and handle has the id what user can see. |
| 378 | */ |
| 379 | ret = drm_gem_handle_create(file_priv, obj, handle); |
| 380 | if (ret) |
| 381 | goto err_handle_create; |
| 382 | |
| 383 | /* drop reference from allocate - handle holds it now. */ |
| 384 | drm_gem_object_put(obj); |
| 385 | |
| 386 | return rk_obj; |
| 387 | |
| 388 | err_handle_create: |
| 389 | rockchip_gem_free_object(obj); |
| 390 | |
| 391 | return ERR_PTR(ret); |
| 392 | } |
| 393 | |
| 394 | /* |
| 395 | * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback |
| 396 | * function |
| 397 | * |
| 398 | * This aligns the pitch and size arguments to the minimum required. wrap |
| 399 | * this into your own function if you need bigger alignment. |
| 400 | */ |
| 401 | int rockchip_gem_dumb_create(struct drm_file *file_priv, |
| 402 | struct drm_device *dev, |
| 403 | struct drm_mode_create_dumb *args) |
| 404 | { |
| 405 | struct rockchip_gem_object *rk_obj; |
| 406 | int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); |
| 407 | |
| 408 | /* |
| 409 | * align to 64 bytes since Mali requires it. |
| 410 | */ |
| 411 | args->pitch = ALIGN(min_pitch, 64); |
| 412 | args->size = args->pitch * args->height; |
| 413 | |
| 414 | rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, |
| 415 | &args->handle); |
| 416 | |
| 417 | return PTR_ERR_OR_ZERO(rk_obj); |
| 418 | } |
| 419 | |
| 420 | /* |
| 421 | * Allocate a sg_table for this GEM object. |
| 422 | * Note: Both the table's contents, and the sg_table itself must be freed by |
| 423 | * the caller. |
| 424 | * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. |
| 425 | */ |
| 426 | struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) |
| 427 | { |
| 428 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
| 429 | struct drm_device *drm = obj->dev; |
| 430 | struct sg_table *sgt; |
| 431 | int ret; |
| 432 | |
| 433 | if (rk_obj->pages) |
| 434 | return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages); |
| 435 | |
| 436 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); |
| 437 | if (!sgt) |
| 438 | return ERR_PTR(-ENOMEM); |
| 439 | |
| 440 | ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, |
| 441 | rk_obj->dma_addr, obj->size, |
| 442 | rk_obj->dma_attrs); |
| 443 | if (ret) { |
| 444 | DRM_ERROR("failed to allocate sgt, %d\n", ret); |
| 445 | kfree(sgt); |
| 446 | return ERR_PTR(ret); |
| 447 | } |
| 448 | |
| 449 | return sgt; |
| 450 | } |
| 451 | |
| 452 | static int |
| 453 | rockchip_gem_iommu_map_sg(struct drm_device *drm, |
| 454 | struct dma_buf_attachment *attach, |
| 455 | struct sg_table *sg, |
| 456 | struct rockchip_gem_object *rk_obj) |
| 457 | { |
| 458 | rk_obj->sgt = sg; |
| 459 | return rockchip_gem_iommu_map(rk_obj); |
| 460 | } |
| 461 | |
| 462 | static int |
| 463 | rockchip_gem_dma_map_sg(struct drm_device *drm, |
| 464 | struct dma_buf_attachment *attach, |
| 465 | struct sg_table *sg, |
| 466 | struct rockchip_gem_object *rk_obj) |
| 467 | { |
| 468 | int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); |
| 469 | if (err) |
| 470 | return err; |
| 471 | |
| 472 | if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { |
| 473 | DRM_ERROR("failed to map sg_table to contiguous linear address.\n"); |
| 474 | dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); |
| 475 | return -EINVAL; |
| 476 | } |
| 477 | |
| 478 | rk_obj->dma_addr = sg_dma_address(sg->sgl); |
| 479 | rk_obj->sgt = sg; |
| 480 | return 0; |
| 481 | } |
| 482 | |
| 483 | struct drm_gem_object * |
| 484 | rockchip_gem_prime_import_sg_table(struct drm_device *drm, |
| 485 | struct dma_buf_attachment *attach, |
| 486 | struct sg_table *sg) |
| 487 | { |
| 488 | struct rockchip_drm_private *private = drm->dev_private; |
| 489 | struct rockchip_gem_object *rk_obj; |
| 490 | int ret; |
| 491 | |
| 492 | rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size); |
| 493 | if (IS_ERR(rk_obj)) |
| 494 | return ERR_CAST(rk_obj); |
| 495 | |
| 496 | if (private->domain) |
| 497 | ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj); |
| 498 | else |
| 499 | ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj); |
| 500 | |
| 501 | if (ret < 0) { |
| 502 | DRM_ERROR("failed to import sg table: %d\n", ret); |
| 503 | goto err_free_rk_obj; |
| 504 | } |
| 505 | |
| 506 | return &rk_obj->base; |
| 507 | |
| 508 | err_free_rk_obj: |
| 509 | rockchip_gem_release_object(rk_obj); |
| 510 | return ERR_PTR(ret); |
| 511 | } |
| 512 | |
| 513 | int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) |
| 514 | { |
| 515 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
| 516 | |
| 517 | if (rk_obj->pages) { |
| 518 | void *vaddr; |
| 519 | |
| 520 | if (rk_obj->kvaddr) |
| 521 | vaddr = rk_obj->kvaddr; |
| 522 | else |
| 523 | vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, |
| 524 | pgprot_writecombine(PAGE_KERNEL)); |
| 525 | |
| 526 | if (!vaddr) |
| 527 | return -ENOMEM; |
| 528 | iosys_map_set_vaddr(map, vaddr); |
| 529 | return 0; |
| 530 | } |
| 531 | |
| 532 | if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) |
| 533 | return -ENOMEM; |
| 534 | iosys_map_set_vaddr(map, rk_obj->kvaddr); |
| 535 | |
| 536 | return 0; |
| 537 | } |
| 538 | |
| 539 | void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, |
| 540 | struct iosys_map *map) |
| 541 | { |
| 542 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
| 543 | |
| 544 | if (rk_obj->pages) { |
| 545 | if (map->vaddr != rk_obj->kvaddr) |
| 546 | vunmap(map->vaddr); |
| 547 | return; |
| 548 | } |
| 549 | |
| 550 | /* Nothing to do if allocated by DMA mapping API. */ |
| 551 | } |