2 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24 * This file contains functions for buffer object structure management
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/gfp.h> /* for GFP_ATOMIC */
30 #include <linux/mm_types.h>
31 #include <linux/hugetlb.h>
32 #include <linux/highmem.h>
33 #include <linux/slab.h> /* for kmalloc */
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/string.h>
37 #include <linux/list.h>
38 #include <linux/errno.h>
39 #include <asm/cacheflush.h>
41 #include <asm/current.h>
42 #include <linux/sched/signal.h>
43 #include <linux/file.h>
45 #include "atomisp_internal.h"
46 #include "hmm/hmm_common.h"
47 #include "hmm/hmm_pool.h"
48 #include "hmm/hmm_bo.h"
50 static unsigned int order_to_nr(unsigned int order)
55 static unsigned int nr_to_order_bottom(unsigned int nr)
60 struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
62 struct hmm_buffer_object *bo;
64 bo = kmem_cache_alloc(bo_cache, GFP_KERNEL);
66 dev_err(atomisp_dev, "%s: failed!\n", __func__);
71 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
74 check_bodev_null_return(bdev, -EINVAL);
75 var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
76 "hmm_bo_device not inited yet.\n");
77 /* prevent zero size buffer object */
79 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
83 memset(bo, 0, sizeof(*bo));
84 mutex_init(&bo->mutex);
86 /* init the bo->list HEAD as an element of entire_bo_list */
87 INIT_LIST_HEAD(&bo->list);
91 bo->status = HMM_BO_FREE;
92 bo->start = bdev->start;
94 bo->end = bo->start + pgnr_to_size(pgnr);
101 struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
102 struct rb_node *node, unsigned int pgnr)
104 struct hmm_buffer_object *this, *ret_bo, *temp_bo;
106 this = rb_entry(node, struct hmm_buffer_object, node);
107 if (this->pgnr == pgnr ||
108 (this->pgnr > pgnr && this->node.rb_left == NULL)) {
109 goto remove_bo_and_return;
111 if (this->pgnr < pgnr) {
112 if (!this->node.rb_right)
114 ret_bo = __bo_search_and_remove_from_free_rbtree(
115 this->node.rb_right, pgnr);
117 ret_bo = __bo_search_and_remove_from_free_rbtree(
118 this->node.rb_left, pgnr);
121 if (this->pgnr > pgnr)
122 goto remove_bo_and_return;
129 remove_bo_and_return:
130 /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
131 * 1. check if 'this->next' is NULL:
132 * yes: erase 'this' node and rebalance rbtree, return 'this'.
134 if (this->next == NULL) {
135 rb_erase(&this->node, &this->bdev->free_rbtree);
138 /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
139 * 2. check if 'this->next->next' is NULL:
140 * yes: change the related 'next/prev' pointer,
141 * return 'this->next' but the rbtree stays unchanged.
143 temp_bo = this->next;
144 this->next = temp_bo->next;
146 temp_bo->next->prev = this;
147 temp_bo->next = NULL;
148 temp_bo->prev = NULL;
152 struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
155 struct rb_node *n = root->rb_node;
156 struct hmm_buffer_object *bo;
159 bo = rb_entry(n, struct hmm_buffer_object, node);
161 if (bo->start > start) {
162 if (n->rb_left == NULL)
165 } else if (bo->start < start) {
166 if (n->rb_right == NULL)
177 struct hmm_buffer_object *__bo_search_by_addr_in_range(struct rb_root *root,
180 struct rb_node *n = root->rb_node;
181 struct hmm_buffer_object *bo;
184 bo = rb_entry(n, struct hmm_buffer_object, node);
186 if (bo->start > start) {
187 if (n->rb_left == NULL)
193 if (n->rb_right == NULL)
202 static void __bo_insert_to_free_rbtree(struct rb_root *root,
203 struct hmm_buffer_object *bo)
205 struct rb_node **new = &(root->rb_node);
206 struct rb_node *parent = NULL;
207 struct hmm_buffer_object *this;
208 unsigned int pgnr = bo->pgnr;
212 this = container_of(*new, struct hmm_buffer_object, node);
214 if (pgnr < this->pgnr) {
215 new = &((*new)->rb_left);
216 } else if (pgnr > this->pgnr) {
217 new = &((*new)->rb_right);
220 bo->next = this->next;
222 this->next->prev = bo;
224 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
229 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
231 rb_link_node(&bo->node, parent, new);
232 rb_insert_color(&bo->node, root);
235 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
236 struct hmm_buffer_object *bo)
238 struct rb_node **new = &(root->rb_node);
239 struct rb_node *parent = NULL;
240 struct hmm_buffer_object *this;
241 unsigned int start = bo->start;
245 this = container_of(*new, struct hmm_buffer_object, node);
247 if (start < this->start)
248 new = &((*new)->rb_left);
250 new = &((*new)->rb_right);
253 kref_init(&bo->kref);
254 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
256 rb_link_node(&bo->node, parent, new);
257 rb_insert_color(&bo->node, root);
260 struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
261 struct hmm_buffer_object *bo,
264 struct hmm_buffer_object *new_bo;
268 new_bo = __bo_alloc(bdev->bo_cache);
270 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
273 ret = __bo_init(bdev, new_bo, pgnr);
275 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
276 kmem_cache_free(bdev->bo_cache, new_bo);
280 new_bo->start = bo->start;
281 new_bo->end = new_bo->start + pgnr_to_size(pgnr);
282 bo->start = new_bo->end;
283 bo->pgnr = bo->pgnr - pgnr;
285 spin_lock_irqsave(&bdev->list_lock, flags);
286 list_add_tail(&new_bo->list, &bo->list);
287 spin_unlock_irqrestore(&bdev->list_lock, flags);
292 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
294 struct hmm_bo_device *bdev = bo->bdev;
295 /* There are 4 situations when we take off a known bo from free rbtree:
296 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
297 * and does not have a linked list after bo, to take off this bo,
298 * we just need erase bo directly and rebalance the free rbtree
300 if (bo->prev == NULL && bo->next == NULL) {
301 rb_erase(&bo->node, &bdev->free_rbtree);
302 /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
303 * and has a linked list,to take off this bo we need erase bo
304 * first, then, insert bo->next into free rbtree and rebalance
307 } else if (bo->prev == NULL && bo->next != NULL) {
308 bo->next->prev = NULL;
309 rb_erase(&bo->node, &bdev->free_rbtree);
310 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
312 /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
313 * node, bo is the last element of the linked list after rbtree
314 * node, to take off this bo, we just need set the "prev/next"
315 * pointers to NULL, the free rbtree stays unchaged
317 } else if (bo->prev != NULL && bo->next == NULL) {
318 bo->prev->next = NULL;
320 /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
321 * node, bo is in the middle of the linked list after rbtree node,
322 * to take off this bo, we just set take the "prev/next" pointers
323 * to NULL, the free rbtree stays unchaged
326 bo->next->prev = bo->prev;
327 bo->prev->next = bo->next;
333 struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
334 struct hmm_buffer_object *next_bo)
336 struct hmm_bo_device *bdev;
340 next_bo->start = bo->start;
341 next_bo->pgnr = next_bo->pgnr + bo->pgnr;
343 spin_lock_irqsave(&bdev->list_lock, flags);
345 spin_unlock_irqrestore(&bdev->list_lock, flags);
347 kmem_cache_free(bo->bdev->bo_cache, bo);
353 * hmm_bo_device functions.
355 int hmm_bo_device_init(struct hmm_bo_device *bdev,
356 struct isp_mmu_client *mmu_driver,
357 unsigned int vaddr_start,
360 struct hmm_buffer_object *bo;
364 check_bodev_null_return(bdev, -EINVAL);
366 ret = isp_mmu_init(&bdev->mmu, mmu_driver);
368 dev_err(atomisp_dev, "isp_mmu_init failed.\n");
372 bdev->start = vaddr_start;
373 bdev->pgnr = size_to_pgnr_ceil(size);
374 bdev->size = pgnr_to_size(bdev->pgnr);
376 spin_lock_init(&bdev->list_lock);
377 mutex_init(&bdev->rbtree_mutex);
379 bdev->flag = HMM_BO_DEVICE_INITED;
381 INIT_LIST_HEAD(&bdev->entire_bo_list);
382 bdev->allocated_rbtree = RB_ROOT;
383 bdev->free_rbtree = RB_ROOT;
385 bdev->bo_cache = kmem_cache_create("bo_cache",
386 sizeof(struct hmm_buffer_object), 0, 0, NULL);
387 if (!bdev->bo_cache) {
388 dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
389 isp_mmu_exit(&bdev->mmu);
393 bo = __bo_alloc(bdev->bo_cache);
395 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
396 isp_mmu_exit(&bdev->mmu);
400 ret = __bo_init(bdev, bo, bdev->pgnr);
402 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
403 kmem_cache_free(bdev->bo_cache, bo);
404 isp_mmu_exit(&bdev->mmu);
408 spin_lock_irqsave(&bdev->list_lock, flags);
409 list_add_tail(&bo->list, &bdev->entire_bo_list);
410 spin_unlock_irqrestore(&bdev->list_lock, flags);
412 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
417 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
420 struct hmm_buffer_object *bo, *new_bo;
421 struct rb_root *root = &bdev->free_rbtree;
423 check_bodev_null_return(bdev, NULL);
424 var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
425 "hmm_bo_device not inited yet.\n");
428 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
432 mutex_lock(&bdev->rbtree_mutex);
433 bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
435 mutex_unlock(&bdev->rbtree_mutex);
436 dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
441 if (bo->pgnr > pgnr) {
442 new_bo = __bo_break_up(bdev, bo, pgnr);
444 mutex_unlock(&bdev->rbtree_mutex);
445 dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
450 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
451 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
453 mutex_unlock(&bdev->rbtree_mutex);
457 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
459 mutex_unlock(&bdev->rbtree_mutex);
463 void hmm_bo_release(struct hmm_buffer_object *bo)
465 struct hmm_bo_device *bdev = bo->bdev;
466 struct hmm_buffer_object *next_bo, *prev_bo;
468 mutex_lock(&bdev->rbtree_mutex);
473 * how to destroy the bo when it is stilled MMAPED?
475 * ideally, this will not happened as hmm_bo_release
476 * will only be called when kref reaches 0, and in mmap
477 * operation the hmm_bo_ref will eventually be called.
478 * so, if this happened, something goes wrong.
480 if (bo->status & HMM_BO_MMAPED) {
481 mutex_unlock(&bdev->rbtree_mutex);
482 dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
486 if (bo->status & HMM_BO_BINDED) {
487 dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
491 if (bo->status & HMM_BO_PAGE_ALLOCED) {
492 dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
493 hmm_bo_free_pages(bo);
495 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
496 dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
500 rb_erase(&bo->node, &bdev->allocated_rbtree);
502 prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
503 next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
505 if (bo->list.prev != &bdev->entire_bo_list &&
506 prev_bo->end == bo->start &&
507 (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
508 __bo_take_off_handling(prev_bo);
509 bo = __bo_merge(prev_bo, bo);
512 if (bo->list.next != &bdev->entire_bo_list &&
513 next_bo->start == bo->end &&
514 (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
515 __bo_take_off_handling(next_bo);
516 bo = __bo_merge(bo, next_bo);
519 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
521 mutex_unlock(&bdev->rbtree_mutex);
525 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
527 struct hmm_buffer_object *bo;
530 dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
532 check_bodev_null_return_void(bdev);
535 * release all allocated bos even they a in use
536 * and all bos will be merged into a big bo
538 while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
540 rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
542 dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
545 /* free all bos to release all ISP virtual memory */
546 while (!list_empty(&bdev->entire_bo_list)) {
547 bo = list_to_hmm_bo(bdev->entire_bo_list.next);
549 spin_lock_irqsave(&bdev->list_lock, flags);
551 spin_unlock_irqrestore(&bdev->list_lock, flags);
553 kmem_cache_free(bdev->bo_cache, bo);
556 dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
558 kmem_cache_destroy(bdev->bo_cache);
560 isp_mmu_exit(&bdev->mmu);
563 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
565 check_bodev_null_return(bdev, -EINVAL);
567 return bdev->flag == HMM_BO_DEVICE_INITED;
570 int hmm_bo_allocated(struct hmm_buffer_object *bo)
572 check_bo_null_return(bo, 0);
574 return bo->status & HMM_BO_ALLOCED;
577 struct hmm_buffer_object *hmm_bo_device_search_start(
578 struct hmm_bo_device *bdev, ia_css_ptr vaddr)
580 struct hmm_buffer_object *bo;
582 check_bodev_null_return(bdev, NULL);
584 mutex_lock(&bdev->rbtree_mutex);
585 bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
587 mutex_unlock(&bdev->rbtree_mutex);
588 dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
592 mutex_unlock(&bdev->rbtree_mutex);
597 struct hmm_buffer_object *hmm_bo_device_search_in_range(
598 struct hmm_bo_device *bdev, unsigned int vaddr)
600 struct hmm_buffer_object *bo;
602 check_bodev_null_return(bdev, NULL);
604 mutex_lock(&bdev->rbtree_mutex);
605 bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
607 mutex_unlock(&bdev->rbtree_mutex);
608 dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
612 mutex_unlock(&bdev->rbtree_mutex);
617 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
618 struct hmm_bo_device *bdev, const void *vaddr)
620 struct list_head *pos;
621 struct hmm_buffer_object *bo;
624 check_bodev_null_return(bdev, NULL);
626 spin_lock_irqsave(&bdev->list_lock, flags);
627 list_for_each(pos, &bdev->entire_bo_list) {
628 bo = list_to_hmm_bo(pos);
629 /* pass bo which has no vm_node allocated */
630 if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
632 if (bo->vmap_addr == vaddr)
635 spin_unlock_irqrestore(&bdev->list_lock, flags);
638 spin_unlock_irqrestore(&bdev->list_lock, flags);
644 static void free_private_bo_pages(struct hmm_buffer_object *bo,
645 struct hmm_pool *dypool,
646 struct hmm_pool *repool,
651 for (i = 0; i < free_pgnr; i++) {
652 switch (bo->page_obj[i].type) {
653 case HMM_PAGE_TYPE_RESERVED:
655 && repool->pops->pool_free_pages) {
656 repool->pops->pool_free_pages(repool->pool_info,
658 hmm_mem_stat.res_cnt--;
662 * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
663 * memory, so when free them, they should be put into dynamic
666 case HMM_PAGE_TYPE_DYNAMIC:
667 case HMM_PAGE_TYPE_GENERAL:
669 && dypool->pops->pool_inited
670 && dypool->pops->pool_inited(dypool->pool_info)) {
671 if (dypool->pops->pool_free_pages)
672 dypool->pops->pool_free_pages(
679 * if dynamic memory pool doesn't exist, need to free
680 * pages to system directly.
683 ret = set_pages_wb(bo->page_obj[i].page, 1);
686 "set page to WB err ...ret = %d\n",
689 W/A: set_pages_wb seldom return value = -EFAULT
690 indicate that address of page is not in valid
691 range(0xffff880000000000~0xffffc7ffffffffff)
692 then, _free_pages would panic; Do not know why page
693 address be valid,it maybe memory corruption by lowmemory
696 __free_pages(bo->page_obj[i].page, 0);
697 hmm_mem_stat.sys_size--;
706 /*Allocate pages which will be used only by ISP*/
707 static int alloc_private_pages(struct hmm_buffer_object *bo,
710 struct hmm_pool *dypool,
711 struct hmm_pool *repool)
714 unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
716 gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
718 int failure_number = 0;
719 bool reduce_order = false;
720 bool lack_mem = true;
723 gfp |= __GFP_HIGHMEM;
727 bo->page_obj = atomisp_kernel_malloc(
728 sizeof(struct hmm_page_object) * pgnr);
729 if (unlikely(!bo->page_obj)) {
730 dev_err(atomisp_dev, "out of memory for bo->page_obj\n");
738 * get physical pages from dynamic pages pool.
740 if (dypool->pops && dypool->pops->pool_alloc_pages) {
741 alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
744 hmm_mem_stat.dyc_size -= alloc_pgnr;
746 if (alloc_pgnr == pgnr)
754 * get physical pages from reserved pages pool for atomisp.
756 if (repool->pops && repool->pops->pool_alloc_pages) {
757 alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
758 &bo->page_obj[i], pgnr,
760 hmm_mem_stat.res_cnt += alloc_pgnr;
761 if (alloc_pgnr == pgnr)
769 order = nr_to_order_bottom(pgnr);
771 * if be short of memory, we will set order to 0
775 order = HMM_MIN_ORDER;
776 else if (order > HMM_MAX_ORDER)
777 order = HMM_MAX_ORDER;
780 * When order > HMM_MIN_ORDER, for performance reasons we don't
781 * want alloc_pages() to sleep. In case it fails and fallbacks
782 * to HMM_MIN_ORDER or in case the requested order is originally
783 * the minimum value, we can allow alloc_pages() to sleep for
784 * robustness purpose.
786 * REVISIT: why __GFP_FS is necessary?
788 if (order == HMM_MIN_ORDER) {
790 gfp |= __GFP_RECLAIM | __GFP_FS;
793 pages = alloc_pages(gfp, order);
794 if (unlikely(!pages)) {
796 * in low memory case, if allocation page fails,
797 * we turn to try if order=0 allocation could
798 * succeed. if order=0 fails too, that means there is
801 if (order == HMM_MIN_ORDER) {
803 "%s: cannot allocate pages\n",
807 order = HMM_MIN_ORDER;
811 * if fail two times continuously, we think be short
814 if (failure_number == 2) {
820 blk_pgnr = order_to_nr(order);
824 * set memory to uncacheable -- UC_MINUS
826 ret = set_pages_uc(pages, blk_pgnr);
829 "set page uncacheable"
832 __free_pages(pages, order);
838 for (j = 0; j < blk_pgnr; j++) {
839 bo->page_obj[i].page = pages + j;
840 bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
844 hmm_mem_stat.sys_size += blk_pgnr;
847 * if order is not reduced this time, clear
851 reduce_order = false;
860 free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
862 atomisp_kernel_free(bo->page_obj);
867 static void free_private_pages(struct hmm_buffer_object *bo,
868 struct hmm_pool *dypool,
869 struct hmm_pool *repool)
871 free_private_bo_pages(bo, dypool, repool, bo->pgnr);
873 atomisp_kernel_free(bo->page_obj);
877 * Hacked from kernel function __get_user_pages in mm/memory.c
879 * Handle buffers allocated by other kernel space driver and mmaped into user
880 * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
882 * Get physical pages from user space virtual address and update into page list
884 static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
885 unsigned long start, int nr_pages,
886 unsigned int gup_flags, struct page **pages,
887 struct vm_area_struct **vmas)
890 unsigned long vm_flags;
895 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
898 * Require read or write permissions.
899 * If FOLL_FORCE is set, we only require the "MAY" flags.
901 vm_flags = (gup_flags & FOLL_WRITE) ?
902 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
903 vm_flags &= (gup_flags & FOLL_FORCE) ?
904 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
908 struct vm_area_struct *vma;
910 vma = find_vma(mm, start);
912 dev_err(atomisp_dev, "find_vma failed\n");
913 return i ? : -EFAULT;
916 if (is_vm_hugetlb_page(vma)) {
918 i = follow_hugetlb_page(mm, vma, pages, vmas,
919 &start, &nr_pages, i, gup_flags);
929 * If we have a pending SIGKILL, don't keep faulting
930 * pages and potentially allocating memory.
932 if (unlikely(fatal_signal_pending(current))) {
934 "fatal_signal_pending in %s\n",
936 return i ? i : -ERESTARTSYS;
939 ret = follow_pfn(vma, start, &pfn);
941 dev_err(atomisp_dev, "follow_pfn() failed\n");
942 return i ? : -EFAULT;
945 page = pfn_to_page(pfn);
947 return i ? i : PTR_ERR(page);
951 flush_anon_page(vma, page, start);
952 flush_dcache_page(page);
959 } while (nr_pages && start < vma->vm_end);
965 static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
966 unsigned long start, int nr_pages, int write, int force,
967 struct page **pages, struct vm_area_struct **vmas)
969 int flags = FOLL_TOUCH;
978 return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
982 * Convert user space virtual address into pages list
984 static int alloc_user_pages(struct hmm_buffer_object *bo,
985 void *userptr, bool cached)
989 struct vm_area_struct *vma;
992 pages = atomisp_kernel_malloc(sizeof(struct page *) * bo->pgnr);
993 if (unlikely(!pages)) {
994 dev_err(atomisp_dev, "out of memory for pages...\n");
998 bo->page_obj = atomisp_kernel_malloc(
999 sizeof(struct hmm_page_object) * bo->pgnr);
1000 if (unlikely(!bo->page_obj)) {
1001 dev_err(atomisp_dev, "out of memory for bo->page_obj...\n");
1002 atomisp_kernel_free(pages);
1006 mutex_unlock(&bo->mutex);
1007 down_read(¤t->mm->mmap_sem);
1008 vma = find_vma(current->mm, (unsigned long)userptr);
1009 up_read(¤t->mm->mmap_sem);
1011 dev_err(atomisp_dev, "find_vma failed\n");
1012 atomisp_kernel_free(bo->page_obj);
1013 atomisp_kernel_free(pages);
1014 mutex_lock(&bo->mutex);
1017 mutex_lock(&bo->mutex);
1019 * Handle frame buffer allocated in other kerenl space driver
1020 * and map to user space
1022 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
1023 page_nr = get_pfnmap_pages(current, current->mm,
1024 (unsigned long)userptr,
1025 (int)(bo->pgnr), 1, 0,
1027 bo->mem_type = HMM_BO_MEM_TYPE_PFN;
1029 /*Handle frame buffer allocated in user space*/
1030 mutex_unlock(&bo->mutex);
1031 down_read(¤t->mm->mmap_sem);
1032 page_nr = get_user_pages((unsigned long)userptr,
1033 (int)(bo->pgnr), 1, pages, NULL);
1034 up_read(¤t->mm->mmap_sem);
1035 mutex_lock(&bo->mutex);
1036 bo->mem_type = HMM_BO_MEM_TYPE_USER;
1039 /* can be written by caller, not forced */
1040 if (page_nr != bo->pgnr) {
1041 dev_err(atomisp_dev,
1042 "get_user_pages err: bo->pgnr = %d, "
1043 "pgnr actually pinned = %d.\n",
1048 for (i = 0; i < bo->pgnr; i++) {
1049 bo->page_obj[i].page = pages[i];
1050 bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
1052 hmm_mem_stat.usr_size += bo->pgnr;
1053 atomisp_kernel_free(pages);
1058 for (i = 0; i < page_nr; i++)
1060 atomisp_kernel_free(pages);
1061 atomisp_kernel_free(bo->page_obj);
1066 static void free_user_pages(struct hmm_buffer_object *bo)
1070 for (i = 0; i < bo->pgnr; i++)
1071 put_page(bo->page_obj[i].page);
1072 hmm_mem_stat.usr_size -= bo->pgnr;
1074 atomisp_kernel_free(bo->page_obj);
1078 * allocate/free physical pages for the bo.
1080 * type indicate where are the pages from. currently we have 3 types
1081 * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
1083 * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
1084 * try to alloc memory from highmem if from_highmem is set.
1086 * userptr is only valid when type is HMM_BO_USER, it indicates
1087 * the start address from user space task.
1089 * from_highmem and userptr will both be ignored when type is
1092 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
1093 enum hmm_bo_type type, int from_highmem,
1094 void *userptr, bool cached)
1098 check_bo_null_return(bo, -EINVAL);
1100 mutex_lock(&bo->mutex);
1101 check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1105 * add HMM_BO_USER type
1107 if (type == HMM_BO_PRIVATE) {
1108 ret = alloc_private_pages(bo, from_highmem,
1109 cached, &dynamic_pool, &reserved_pool);
1110 } else if (type == HMM_BO_USER) {
1111 ret = alloc_user_pages(bo, userptr, cached);
1113 dev_err(atomisp_dev, "invalid buffer type.\n");
1121 bo->status |= HMM_BO_PAGE_ALLOCED;
1123 mutex_unlock(&bo->mutex);
1128 mutex_unlock(&bo->mutex);
1129 dev_err(atomisp_dev, "alloc pages err...\n");
1132 mutex_unlock(&bo->mutex);
1133 dev_err(atomisp_dev,
1134 "buffer object has already page allocated.\n");
1139 * free physical pages of the bo.
1141 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
1143 check_bo_null_return_void(bo);
1145 mutex_lock(&bo->mutex);
1147 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
1149 /* clear the flag anyway. */
1150 bo->status &= (~HMM_BO_PAGE_ALLOCED);
1152 if (bo->type == HMM_BO_PRIVATE)
1153 free_private_pages(bo, &dynamic_pool, &reserved_pool);
1154 else if (bo->type == HMM_BO_USER)
1155 free_user_pages(bo);
1157 dev_err(atomisp_dev, "invalid buffer type.\n");
1158 mutex_unlock(&bo->mutex);
1163 mutex_unlock(&bo->mutex);
1164 dev_err(atomisp_dev,
1165 "buffer object not page allocated yet.\n");
1168 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
1172 check_bo_null_return(bo, 0);
1174 ret = bo->status & HMM_BO_PAGE_ALLOCED;
1180 * get physical page info of the bo.
1182 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
1183 struct hmm_page_object **page_obj, int *pgnr)
1185 check_bo_null_return(bo, -EINVAL);
1187 mutex_lock(&bo->mutex);
1189 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1191 *page_obj = bo->page_obj;
1194 mutex_unlock(&bo->mutex);
1199 dev_err(atomisp_dev,
1200 "buffer object not page allocated yet.\n");
1201 mutex_unlock(&bo->mutex);
1206 * bind the physical pages to a virtual address space.
1208 int hmm_bo_bind(struct hmm_buffer_object *bo)
1212 struct hmm_bo_device *bdev;
1215 check_bo_null_return(bo, -EINVAL);
1217 mutex_lock(&bo->mutex);
1219 check_bo_status_yes_goto(bo,
1220 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
1223 check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
1229 for (i = 0; i < bo->pgnr; i++) {
1231 isp_mmu_map(&bdev->mmu, virt,
1232 page_to_phys(bo->page_obj[i].page), 1);
1235 virt += (1 << PAGE_SHIFT);
1241 * theoretically, we donot need to flush TLB as we didnot change
1242 * any existed address mappings, but for Silicon Hive's MMU, its
1243 * really a bug here. I guess when fetching PTEs (page table entity)
1244 * to TLB, its MMU will fetch additional INVALID PTEs automatically
1245 * for performance issue. EX, we only set up 1 page address mapping,
1246 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1247 * so the additional 3 PTEs are invalid.
1249 if (bo->start != 0x0)
1250 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1251 (bo->pgnr << PAGE_SHIFT));
1253 bo->status |= HMM_BO_BINDED;
1255 mutex_unlock(&bo->mutex);
1260 /* unbind the physical pages with related virtual address space */
1262 for ( ; i > 0; i--) {
1263 isp_mmu_unmap(&bdev->mmu, virt, 1);
1264 virt += pgnr_to_size(1);
1267 mutex_unlock(&bo->mutex);
1268 dev_err(atomisp_dev,
1269 "setup MMU address mapping failed.\n");
1273 mutex_unlock(&bo->mutex);
1274 dev_err(atomisp_dev, "buffer object already binded.\n");
1277 mutex_unlock(&bo->mutex);
1278 dev_err(atomisp_dev,
1279 "buffer object vm_node or page not allocated.\n");
1284 * unbind the physical pages with related virtual address space.
1286 void hmm_bo_unbind(struct hmm_buffer_object *bo)
1289 struct hmm_bo_device *bdev;
1292 check_bo_null_return_void(bo);
1294 mutex_lock(&bo->mutex);
1296 check_bo_status_yes_goto(bo,
1297 HMM_BO_PAGE_ALLOCED |
1299 HMM_BO_BINDED, status_err);
1305 for (i = 0; i < bo->pgnr; i++) {
1306 isp_mmu_unmap(&bdev->mmu, virt, 1);
1307 virt += pgnr_to_size(1);
1311 * flush TLB as the address mapping has been removed and
1312 * related TLBs should be invalidated.
1314 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1315 (bo->pgnr << PAGE_SHIFT));
1317 bo->status &= (~HMM_BO_BINDED);
1319 mutex_unlock(&bo->mutex);
1324 mutex_unlock(&bo->mutex);
1325 dev_err(atomisp_dev,
1326 "buffer vm or page not allocated or not binded yet.\n");
1329 int hmm_bo_binded(struct hmm_buffer_object *bo)
1333 check_bo_null_return(bo, 0);
1335 mutex_lock(&bo->mutex);
1337 ret = bo->status & HMM_BO_BINDED;
1339 mutex_unlock(&bo->mutex);
1344 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
1346 struct page **pages;
1349 check_bo_null_return(bo, NULL);
1351 mutex_lock(&bo->mutex);
1352 if (((bo->status & HMM_BO_VMAPED) && !cached) ||
1353 ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
1354 mutex_unlock(&bo->mutex);
1355 return bo->vmap_addr;
1358 /* cached status need to be changed, so vunmap first */
1359 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1360 vunmap(bo->vmap_addr);
1361 bo->vmap_addr = NULL;
1362 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1365 pages = atomisp_kernel_malloc(sizeof(*pages) * bo->pgnr);
1366 if (unlikely(!pages)) {
1367 mutex_unlock(&bo->mutex);
1368 dev_err(atomisp_dev, "out of memory for pages...\n");
1372 for (i = 0; i < bo->pgnr; i++)
1373 pages[i] = bo->page_obj[i].page;
1375 bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
1376 cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
1377 if (unlikely(!bo->vmap_addr)) {
1378 atomisp_kernel_free(pages);
1379 mutex_unlock(&bo->mutex);
1380 dev_err(atomisp_dev, "vmap failed...\n");
1383 bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
1385 atomisp_kernel_free(pages);
1387 mutex_unlock(&bo->mutex);
1388 return bo->vmap_addr;
1391 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
1393 check_bo_null_return_void(bo);
1395 mutex_lock(&bo->mutex);
1396 if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
1397 mutex_unlock(&bo->mutex);
1401 clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
1402 mutex_unlock(&bo->mutex);
1405 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
1407 check_bo_null_return_void(bo);
1409 mutex_lock(&bo->mutex);
1410 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1411 vunmap(bo->vmap_addr);
1412 bo->vmap_addr = NULL;
1413 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1416 mutex_unlock(&bo->mutex);
1420 void hmm_bo_ref(struct hmm_buffer_object *bo)
1422 check_bo_null_return_void(bo);
1424 kref_get(&bo->kref);
1427 static void kref_hmm_bo_release(struct kref *kref)
1432 hmm_bo_release(kref_to_hmm_bo(kref));
1435 void hmm_bo_unref(struct hmm_buffer_object *bo)
1437 check_bo_null_return_void(bo);
1439 kref_put(&bo->kref, kref_hmm_bo_release);
1442 static void hmm_bo_vm_open(struct vm_area_struct *vma)
1444 struct hmm_buffer_object *bo =
1445 (struct hmm_buffer_object *)vma->vm_private_data;
1447 check_bo_null_return_void(bo);
1451 mutex_lock(&bo->mutex);
1453 bo->status |= HMM_BO_MMAPED;
1457 mutex_unlock(&bo->mutex);
1460 static void hmm_bo_vm_close(struct vm_area_struct *vma)
1462 struct hmm_buffer_object *bo =
1463 (struct hmm_buffer_object *)vma->vm_private_data;
1465 check_bo_null_return_void(bo);
1469 mutex_lock(&bo->mutex);
1473 if (!bo->mmap_count) {
1474 bo->status &= (~HMM_BO_MMAPED);
1475 vma->vm_private_data = NULL;
1478 mutex_unlock(&bo->mutex);
1481 static const struct vm_operations_struct hmm_bo_vm_ops = {
1482 .open = hmm_bo_vm_open,
1483 .close = hmm_bo_vm_close,
1487 * mmap the bo to user space.
1489 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1491 unsigned int start, end;
1493 unsigned int pgnr, i;
1496 check_bo_null_return(bo, -EINVAL);
1498 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1501 start = vma->vm_start;
1505 * check vma's virtual address space size and buffer object's size.
1508 if ((start + pgnr_to_size(pgnr)) != end) {
1509 dev_warn(atomisp_dev,
1510 "vma's address space size not equal"
1511 " to buffer object's size");
1515 virt = vma->vm_start;
1516 for (i = 0; i < pgnr; i++) {
1517 pfn = page_to_pfn(bo->page_obj[i].page);
1518 if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1519 dev_warn(atomisp_dev,
1520 "remap_pfn_range failed:"
1521 " virt = 0x%x, pfn = 0x%x,"
1522 " mapped_pgnr = %d\n", virt, pfn, 1);
1528 vma->vm_private_data = bo;
1530 vma->vm_ops = &hmm_bo_vm_ops;
1531 vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP;
1534 * call hmm_bo_vm_open explictly.
1536 hmm_bo_vm_open(vma);
1541 dev_err(atomisp_dev, "buffer page not allocated yet.\n");