2 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
20 * This file contains functions for buffer object structure management
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/gfp.h> /* for GFP_ATOMIC */
26 #include <linux/mm_types.h>
27 #include <linux/hugetlb.h>
28 #include <linux/highmem.h>
29 #include <linux/slab.h> /* for kmalloc */
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/string.h>
33 #include <linux/list.h>
34 #include <linux/errno.h>
36 #include <asm/current.h>
37 #include <linux/sched/signal.h>
38 #include <linux/file.h>
40 #include <asm/set_memory.h>
42 #include "atomisp_internal.h"
43 #include "hmm/hmm_common.h"
44 #include "hmm/hmm_pool.h"
45 #include "hmm/hmm_bo.h"
47 static unsigned int order_to_nr(unsigned int order)
52 static unsigned int nr_to_order_bottom(unsigned int nr)
57 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
60 check_bodev_null_return(bdev, -EINVAL);
61 var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
62 "hmm_bo_device not inited yet.\n");
63 /* prevent zero size buffer object */
65 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
69 memset(bo, 0, sizeof(*bo));
70 mutex_init(&bo->mutex);
72 /* init the bo->list HEAD as an element of entire_bo_list */
73 INIT_LIST_HEAD(&bo->list);
77 bo->status = HMM_BO_FREE;
78 bo->start = bdev->start;
80 bo->end = bo->start + pgnr_to_size(pgnr);
87 static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
88 struct rb_node *node, unsigned int pgnr)
90 struct hmm_buffer_object *this, *ret_bo, *temp_bo;
92 this = rb_entry(node, struct hmm_buffer_object, node);
93 if (this->pgnr == pgnr ||
94 (this->pgnr > pgnr && !this->node.rb_left)) {
95 goto remove_bo_and_return;
97 if (this->pgnr < pgnr) {
98 if (!this->node.rb_right)
100 ret_bo = __bo_search_and_remove_from_free_rbtree(
101 this->node.rb_right, pgnr);
103 ret_bo = __bo_search_and_remove_from_free_rbtree(
104 this->node.rb_left, pgnr);
107 if (this->pgnr > pgnr)
108 goto remove_bo_and_return;
115 remove_bo_and_return:
116 /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
117 * 1. check if 'this->next' is NULL:
118 * yes: erase 'this' node and rebalance rbtree, return 'this'.
121 rb_erase(&this->node, &this->bdev->free_rbtree);
124 /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
125 * 2. check if 'this->next->next' is NULL:
126 * yes: change the related 'next/prev' pointer,
127 * return 'this->next' but the rbtree stays unchanged.
129 temp_bo = this->next;
130 this->next = temp_bo->next;
132 temp_bo->next->prev = this;
133 temp_bo->next = NULL;
134 temp_bo->prev = NULL;
138 static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
141 struct rb_node *n = root->rb_node;
142 struct hmm_buffer_object *bo;
145 bo = rb_entry(n, struct hmm_buffer_object, node);
147 if (bo->start > start) {
151 } else if (bo->start < start) {
163 static struct hmm_buffer_object *__bo_search_by_addr_in_range(
164 struct rb_root *root, unsigned int start)
166 struct rb_node *n = root->rb_node;
167 struct hmm_buffer_object *bo;
170 bo = rb_entry(n, struct hmm_buffer_object, node);
172 if (bo->start > start) {
188 static void __bo_insert_to_free_rbtree(struct rb_root *root,
189 struct hmm_buffer_object *bo)
191 struct rb_node **new = &root->rb_node;
192 struct rb_node *parent = NULL;
193 struct hmm_buffer_object *this;
194 unsigned int pgnr = bo->pgnr;
198 this = container_of(*new, struct hmm_buffer_object, node);
200 if (pgnr < this->pgnr) {
201 new = &((*new)->rb_left);
202 } else if (pgnr > this->pgnr) {
203 new = &((*new)->rb_right);
206 bo->next = this->next;
208 this->next->prev = bo;
210 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
215 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
217 rb_link_node(&bo->node, parent, new);
218 rb_insert_color(&bo->node, root);
221 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
222 struct hmm_buffer_object *bo)
224 struct rb_node **new = &root->rb_node;
225 struct rb_node *parent = NULL;
226 struct hmm_buffer_object *this;
227 unsigned int start = bo->start;
231 this = container_of(*new, struct hmm_buffer_object, node);
233 if (start < this->start)
234 new = &((*new)->rb_left);
236 new = &((*new)->rb_right);
239 kref_init(&bo->kref);
240 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
242 rb_link_node(&bo->node, parent, new);
243 rb_insert_color(&bo->node, root);
246 static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
247 struct hmm_buffer_object *bo,
250 struct hmm_buffer_object *new_bo;
254 new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
256 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
259 ret = __bo_init(bdev, new_bo, pgnr);
261 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
262 kmem_cache_free(bdev->bo_cache, new_bo);
266 new_bo->start = bo->start;
267 new_bo->end = new_bo->start + pgnr_to_size(pgnr);
268 bo->start = new_bo->end;
269 bo->pgnr = bo->pgnr - pgnr;
271 spin_lock_irqsave(&bdev->list_lock, flags);
272 list_add_tail(&new_bo->list, &bo->list);
273 spin_unlock_irqrestore(&bdev->list_lock, flags);
278 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
280 struct hmm_bo_device *bdev = bo->bdev;
281 /* There are 4 situations when we take off a known bo from free rbtree:
282 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
283 * and does not have a linked list after bo, to take off this bo,
284 * we just need erase bo directly and rebalance the free rbtree
286 if (!bo->prev && !bo->next) {
287 rb_erase(&bo->node, &bdev->free_rbtree);
288 /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
289 * and has a linked list,to take off this bo we need erase bo
290 * first, then, insert bo->next into free rbtree and rebalance
293 } else if (!bo->prev && bo->next) {
294 bo->next->prev = NULL;
295 rb_erase(&bo->node, &bdev->free_rbtree);
296 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
298 /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
299 * node, bo is the last element of the linked list after rbtree
300 * node, to take off this bo, we just need set the "prev/next"
301 * pointers to NULL, the free rbtree stays unchaged
303 } else if (bo->prev && !bo->next) {
304 bo->prev->next = NULL;
306 /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
307 * node, bo is in the middle of the linked list after rbtree node,
308 * to take off this bo, we just set take the "prev/next" pointers
309 * to NULL, the free rbtree stays unchaged
311 } else if (bo->prev && bo->next) {
312 bo->next->prev = bo->prev;
313 bo->prev->next = bo->next;
319 static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
320 struct hmm_buffer_object *next_bo)
322 struct hmm_bo_device *bdev;
326 next_bo->start = bo->start;
327 next_bo->pgnr = next_bo->pgnr + bo->pgnr;
329 spin_lock_irqsave(&bdev->list_lock, flags);
331 spin_unlock_irqrestore(&bdev->list_lock, flags);
333 kmem_cache_free(bo->bdev->bo_cache, bo);
339 * hmm_bo_device functions.
341 int hmm_bo_device_init(struct hmm_bo_device *bdev,
342 struct isp_mmu_client *mmu_driver,
343 unsigned int vaddr_start,
346 struct hmm_buffer_object *bo;
350 check_bodev_null_return(bdev, -EINVAL);
352 ret = isp_mmu_init(&bdev->mmu, mmu_driver);
354 dev_err(atomisp_dev, "isp_mmu_init failed.\n");
358 bdev->start = vaddr_start;
359 bdev->pgnr = size_to_pgnr_ceil(size);
360 bdev->size = pgnr_to_size(bdev->pgnr);
362 spin_lock_init(&bdev->list_lock);
363 mutex_init(&bdev->rbtree_mutex);
365 bdev->flag = HMM_BO_DEVICE_INITED;
367 INIT_LIST_HEAD(&bdev->entire_bo_list);
368 bdev->allocated_rbtree = RB_ROOT;
369 bdev->free_rbtree = RB_ROOT;
371 bdev->bo_cache = kmem_cache_create("bo_cache",
372 sizeof(struct hmm_buffer_object), 0, 0, NULL);
373 if (!bdev->bo_cache) {
374 dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
375 isp_mmu_exit(&bdev->mmu);
379 bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
381 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
382 isp_mmu_exit(&bdev->mmu);
386 ret = __bo_init(bdev, bo, bdev->pgnr);
388 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
389 kmem_cache_free(bdev->bo_cache, bo);
390 isp_mmu_exit(&bdev->mmu);
394 spin_lock_irqsave(&bdev->list_lock, flags);
395 list_add_tail(&bo->list, &bdev->entire_bo_list);
396 spin_unlock_irqrestore(&bdev->list_lock, flags);
398 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
403 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
406 struct hmm_buffer_object *bo, *new_bo;
407 struct rb_root *root = &bdev->free_rbtree;
409 check_bodev_null_return(bdev, NULL);
410 var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
411 "hmm_bo_device not inited yet.\n");
414 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
418 mutex_lock(&bdev->rbtree_mutex);
419 bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
421 mutex_unlock(&bdev->rbtree_mutex);
422 dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
427 if (bo->pgnr > pgnr) {
428 new_bo = __bo_break_up(bdev, bo, pgnr);
430 mutex_unlock(&bdev->rbtree_mutex);
431 dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
436 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
437 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
439 mutex_unlock(&bdev->rbtree_mutex);
443 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
445 mutex_unlock(&bdev->rbtree_mutex);
449 void hmm_bo_release(struct hmm_buffer_object *bo)
451 struct hmm_bo_device *bdev = bo->bdev;
452 struct hmm_buffer_object *next_bo, *prev_bo;
454 mutex_lock(&bdev->rbtree_mutex);
459 * how to destroy the bo when it is stilled MMAPED?
461 * ideally, this will not happened as hmm_bo_release
462 * will only be called when kref reaches 0, and in mmap
463 * operation the hmm_bo_ref will eventually be called.
464 * so, if this happened, something goes wrong.
466 if (bo->status & HMM_BO_MMAPED) {
467 mutex_unlock(&bdev->rbtree_mutex);
468 dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
472 if (bo->status & HMM_BO_BINDED) {
473 dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
477 if (bo->status & HMM_BO_PAGE_ALLOCED) {
478 dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
479 hmm_bo_free_pages(bo);
481 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
482 dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
486 rb_erase(&bo->node, &bdev->allocated_rbtree);
488 prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
489 next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
491 if (bo->list.prev != &bdev->entire_bo_list &&
492 prev_bo->end == bo->start &&
493 (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
494 __bo_take_off_handling(prev_bo);
495 bo = __bo_merge(prev_bo, bo);
498 if (bo->list.next != &bdev->entire_bo_list &&
499 next_bo->start == bo->end &&
500 (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
501 __bo_take_off_handling(next_bo);
502 bo = __bo_merge(bo, next_bo);
505 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
507 mutex_unlock(&bdev->rbtree_mutex);
511 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
513 struct hmm_buffer_object *bo;
516 dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
518 check_bodev_null_return_void(bdev);
521 * release all allocated bos even they a in use
522 * and all bos will be merged into a big bo
524 while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
526 rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
528 dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
531 /* free all bos to release all ISP virtual memory */
532 while (!list_empty(&bdev->entire_bo_list)) {
533 bo = list_to_hmm_bo(bdev->entire_bo_list.next);
535 spin_lock_irqsave(&bdev->list_lock, flags);
537 spin_unlock_irqrestore(&bdev->list_lock, flags);
539 kmem_cache_free(bdev->bo_cache, bo);
542 dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
544 kmem_cache_destroy(bdev->bo_cache);
546 isp_mmu_exit(&bdev->mmu);
549 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
551 check_bodev_null_return(bdev, -EINVAL);
553 return bdev->flag == HMM_BO_DEVICE_INITED;
556 int hmm_bo_allocated(struct hmm_buffer_object *bo)
558 check_bo_null_return(bo, 0);
560 return bo->status & HMM_BO_ALLOCED;
563 struct hmm_buffer_object *hmm_bo_device_search_start(
564 struct hmm_bo_device *bdev, ia_css_ptr vaddr)
566 struct hmm_buffer_object *bo;
568 check_bodev_null_return(bdev, NULL);
570 mutex_lock(&bdev->rbtree_mutex);
571 bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
573 mutex_unlock(&bdev->rbtree_mutex);
574 dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
578 mutex_unlock(&bdev->rbtree_mutex);
583 struct hmm_buffer_object *hmm_bo_device_search_in_range(
584 struct hmm_bo_device *bdev, unsigned int vaddr)
586 struct hmm_buffer_object *bo;
588 check_bodev_null_return(bdev, NULL);
590 mutex_lock(&bdev->rbtree_mutex);
591 bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
593 mutex_unlock(&bdev->rbtree_mutex);
594 dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
598 mutex_unlock(&bdev->rbtree_mutex);
603 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
604 struct hmm_bo_device *bdev, const void *vaddr)
606 struct list_head *pos;
607 struct hmm_buffer_object *bo;
610 check_bodev_null_return(bdev, NULL);
612 spin_lock_irqsave(&bdev->list_lock, flags);
613 list_for_each(pos, &bdev->entire_bo_list) {
614 bo = list_to_hmm_bo(pos);
615 /* pass bo which has no vm_node allocated */
616 if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
618 if (bo->vmap_addr == vaddr)
621 spin_unlock_irqrestore(&bdev->list_lock, flags);
624 spin_unlock_irqrestore(&bdev->list_lock, flags);
628 static void free_private_bo_pages(struct hmm_buffer_object *bo,
629 struct hmm_pool *dypool,
630 struct hmm_pool *repool,
635 for (i = 0; i < free_pgnr; i++) {
636 switch (bo->page_obj[i].type) {
637 case HMM_PAGE_TYPE_RESERVED:
639 && repool->pops->pool_free_pages) {
640 repool->pops->pool_free_pages(repool->pool_info,
642 hmm_mem_stat.res_cnt--;
646 * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
647 * memory, so when free them, they should be put into dynamic
650 case HMM_PAGE_TYPE_DYNAMIC:
651 case HMM_PAGE_TYPE_GENERAL:
653 && dypool->pops->pool_inited
654 && dypool->pops->pool_inited(dypool->pool_info)) {
655 if (dypool->pops->pool_free_pages)
656 dypool->pops->pool_free_pages(
663 * if dynamic memory pool doesn't exist, need to free
664 * pages to system directly.
667 ret = set_pages_wb(bo->page_obj[i].page, 1);
670 "set page to WB err ...ret = %d\n",
673 W/A: set_pages_wb seldom return value = -EFAULT
674 indicate that address of page is not in valid
675 range(0xffff880000000000~0xffffc7ffffffffff)
676 then, _free_pages would panic; Do not know why page
677 address be valid,it maybe memory corruption by lowmemory
680 __free_pages(bo->page_obj[i].page, 0);
681 hmm_mem_stat.sys_size--;
690 /*Allocate pages which will be used only by ISP*/
691 static int alloc_private_pages(struct hmm_buffer_object *bo,
694 struct hmm_pool *dypool,
695 struct hmm_pool *repool)
698 unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
700 gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
702 int failure_number = 0;
703 bool reduce_order = false;
704 bool lack_mem = true;
707 gfp |= __GFP_HIGHMEM;
711 bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
713 if (unlikely(!bo->page_obj))
720 * get physical pages from dynamic pages pool.
722 if (dypool->pops && dypool->pops->pool_alloc_pages) {
723 alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
726 hmm_mem_stat.dyc_size -= alloc_pgnr;
728 if (alloc_pgnr == pgnr)
736 * get physical pages from reserved pages pool for atomisp.
738 if (repool->pops && repool->pops->pool_alloc_pages) {
739 alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
740 &bo->page_obj[i], pgnr,
742 hmm_mem_stat.res_cnt += alloc_pgnr;
743 if (alloc_pgnr == pgnr)
751 order = nr_to_order_bottom(pgnr);
753 * if be short of memory, we will set order to 0
757 order = HMM_MIN_ORDER;
758 else if (order > HMM_MAX_ORDER)
759 order = HMM_MAX_ORDER;
762 * When order > HMM_MIN_ORDER, for performance reasons we don't
763 * want alloc_pages() to sleep. In case it fails and fallbacks
764 * to HMM_MIN_ORDER or in case the requested order is originally
765 * the minimum value, we can allow alloc_pages() to sleep for
766 * robustness purpose.
768 * REVISIT: why __GFP_FS is necessary?
770 if (order == HMM_MIN_ORDER) {
772 gfp |= __GFP_RECLAIM | __GFP_FS;
775 pages = alloc_pages(gfp, order);
776 if (unlikely(!pages)) {
778 * in low memory case, if allocation page fails,
779 * we turn to try if order=0 allocation could
780 * succeed. if order=0 fails too, that means there is
783 if (order == HMM_MIN_ORDER) {
785 "%s: cannot allocate pages\n",
789 order = HMM_MIN_ORDER;
793 * if fail two times continuously, we think be short
796 if (failure_number == 2) {
802 blk_pgnr = order_to_nr(order);
806 * set memory to uncacheable -- UC_MINUS
808 ret = set_pages_uc(pages, blk_pgnr);
811 "set page uncacheablefailed.\n");
813 __free_pages(pages, order);
819 for (j = 0; j < blk_pgnr; j++) {
820 bo->page_obj[i].page = pages + j;
821 bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
825 hmm_mem_stat.sys_size += blk_pgnr;
828 * if order is not reduced this time, clear
832 reduce_order = false;
841 free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
848 static void free_private_pages(struct hmm_buffer_object *bo,
849 struct hmm_pool *dypool,
850 struct hmm_pool *repool)
852 free_private_bo_pages(bo, dypool, repool, bo->pgnr);
858 * Hacked from kernel function __get_user_pages in mm/memory.c
860 * Handle buffers allocated by other kernel space driver and mmaped into user
861 * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
863 * Get physical pages from user space virtual address and update into page list
865 static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
866 unsigned long start, int nr_pages,
867 unsigned int gup_flags, struct page **pages,
868 struct vm_area_struct **vmas)
871 unsigned long vm_flags;
876 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
879 * Require read or write permissions.
880 * If FOLL_FORCE is set, we only require the "MAY" flags.
882 vm_flags = (gup_flags & FOLL_WRITE) ?
883 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
884 vm_flags &= (gup_flags & FOLL_FORCE) ?
885 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
889 struct vm_area_struct *vma;
891 vma = find_vma(mm, start);
893 dev_err(atomisp_dev, "find_vma failed\n");
894 return i ? : -EFAULT;
897 if (is_vm_hugetlb_page(vma)) {
899 i = follow_hugetlb_page(mm, vma, pages, vmas,
900 &start, &nr_pages, i, gup_flags);
910 * If we have a pending SIGKILL, don't keep faulting
911 * pages and potentially allocating memory.
913 if (unlikely(fatal_signal_pending(current))) {
915 "fatal_signal_pending in %s\n",
917 return i ? i : -ERESTARTSYS;
920 ret = follow_pfn(vma, start, &pfn);
922 dev_err(atomisp_dev, "follow_pfn() failed\n");
923 return i ? : -EFAULT;
926 page = pfn_to_page(pfn);
928 return i ? i : PTR_ERR(page);
932 flush_anon_page(vma, page, start);
933 flush_dcache_page(page);
940 } while (nr_pages && start < vma->vm_end);
946 static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
947 unsigned long start, int nr_pages, int write, int force,
948 struct page **pages, struct vm_area_struct **vmas)
950 int flags = FOLL_TOUCH;
959 return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
963 * Convert user space virtual address into pages list
965 static int alloc_user_pages(struct hmm_buffer_object *bo,
966 const void __user *userptr, bool cached)
970 struct vm_area_struct *vma;
973 pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
974 if (unlikely(!pages))
977 bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
979 if (unlikely(!bo->page_obj)) {
984 mutex_unlock(&bo->mutex);
985 down_read(¤t->mm->mmap_sem);
986 vma = find_vma(current->mm, (unsigned long)userptr);
987 up_read(¤t->mm->mmap_sem);
989 dev_err(atomisp_dev, "find_vma failed\n");
992 mutex_lock(&bo->mutex);
995 mutex_lock(&bo->mutex);
997 * Handle frame buffer allocated in other kerenl space driver
998 * and map to user space
1000 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
1001 page_nr = get_pfnmap_pages(current, current->mm,
1002 (unsigned long)userptr,
1003 (int)(bo->pgnr), 1, 0,
1005 bo->mem_type = HMM_BO_MEM_TYPE_PFN;
1007 /*Handle frame buffer allocated in user space*/
1008 mutex_unlock(&bo->mutex);
1009 page_nr = get_user_pages_fast((unsigned long)userptr,
1010 (int)(bo->pgnr), 1, pages);
1011 mutex_lock(&bo->mutex);
1012 bo->mem_type = HMM_BO_MEM_TYPE_USER;
1015 /* can be written by caller, not forced */
1016 if (page_nr != bo->pgnr) {
1017 dev_err(atomisp_dev,
1018 "get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
1023 for (i = 0; i < bo->pgnr; i++) {
1024 bo->page_obj[i].page = pages[i];
1025 bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
1027 hmm_mem_stat.usr_size += bo->pgnr;
1033 for (i = 0; i < page_nr; i++)
1036 kfree(bo->page_obj);
1041 static void free_user_pages(struct hmm_buffer_object *bo)
1045 for (i = 0; i < bo->pgnr; i++)
1046 put_page(bo->page_obj[i].page);
1047 hmm_mem_stat.usr_size -= bo->pgnr;
1049 kfree(bo->page_obj);
1053 * allocate/free physical pages for the bo.
1055 * type indicate where are the pages from. currently we have 3 types
1056 * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
1058 * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
1059 * try to alloc memory from highmem if from_highmem is set.
1061 * userptr is only valid when type is HMM_BO_USER, it indicates
1062 * the start address from user space task.
1064 * from_highmem and userptr will both be ignored when type is
1067 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
1068 enum hmm_bo_type type, int from_highmem,
1069 const void __user *userptr, bool cached)
1073 check_bo_null_return(bo, -EINVAL);
1075 mutex_lock(&bo->mutex);
1076 check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1080 * add HMM_BO_USER type
1082 if (type == HMM_BO_PRIVATE) {
1083 ret = alloc_private_pages(bo, from_highmem,
1084 cached, &dynamic_pool, &reserved_pool);
1085 } else if (type == HMM_BO_USER) {
1086 ret = alloc_user_pages(bo, userptr, cached);
1088 dev_err(atomisp_dev, "invalid buffer type.\n");
1096 bo->status |= HMM_BO_PAGE_ALLOCED;
1098 mutex_unlock(&bo->mutex);
1103 mutex_unlock(&bo->mutex);
1104 dev_err(atomisp_dev, "alloc pages err...\n");
1107 mutex_unlock(&bo->mutex);
1108 dev_err(atomisp_dev,
1109 "buffer object has already page allocated.\n");
1114 * free physical pages of the bo.
1116 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
1118 check_bo_null_return_void(bo);
1120 mutex_lock(&bo->mutex);
1122 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
1124 /* clear the flag anyway. */
1125 bo->status &= (~HMM_BO_PAGE_ALLOCED);
1127 if (bo->type == HMM_BO_PRIVATE)
1128 free_private_pages(bo, &dynamic_pool, &reserved_pool);
1129 else if (bo->type == HMM_BO_USER)
1130 free_user_pages(bo);
1132 dev_err(atomisp_dev, "invalid buffer type.\n");
1133 mutex_unlock(&bo->mutex);
1138 mutex_unlock(&bo->mutex);
1139 dev_err(atomisp_dev,
1140 "buffer object not page allocated yet.\n");
1143 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
1145 check_bo_null_return(bo, 0);
1147 return bo->status & HMM_BO_PAGE_ALLOCED;
1151 * get physical page info of the bo.
1153 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
1154 struct hmm_page_object **page_obj, int *pgnr)
1156 check_bo_null_return(bo, -EINVAL);
1158 mutex_lock(&bo->mutex);
1160 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1162 *page_obj = bo->page_obj;
1165 mutex_unlock(&bo->mutex);
1170 dev_err(atomisp_dev,
1171 "buffer object not page allocated yet.\n");
1172 mutex_unlock(&bo->mutex);
1177 * bind the physical pages to a virtual address space.
1179 int hmm_bo_bind(struct hmm_buffer_object *bo)
1183 struct hmm_bo_device *bdev;
1186 check_bo_null_return(bo, -EINVAL);
1188 mutex_lock(&bo->mutex);
1190 check_bo_status_yes_goto(bo,
1191 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
1194 check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
1200 for (i = 0; i < bo->pgnr; i++) {
1202 isp_mmu_map(&bdev->mmu, virt,
1203 page_to_phys(bo->page_obj[i].page), 1);
1206 virt += (1 << PAGE_SHIFT);
1212 * theoretically, we donot need to flush TLB as we didnot change
1213 * any existed address mappings, but for Silicon Hive's MMU, its
1214 * really a bug here. I guess when fetching PTEs (page table entity)
1215 * to TLB, its MMU will fetch additional INVALID PTEs automatically
1216 * for performance issue. EX, we only set up 1 page address mapping,
1217 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1218 * so the additional 3 PTEs are invalid.
1220 if (bo->start != 0x0)
1221 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1222 (bo->pgnr << PAGE_SHIFT));
1224 bo->status |= HMM_BO_BINDED;
1226 mutex_unlock(&bo->mutex);
1231 /* unbind the physical pages with related virtual address space */
1233 for ( ; i > 0; i--) {
1234 isp_mmu_unmap(&bdev->mmu, virt, 1);
1235 virt += pgnr_to_size(1);
1238 mutex_unlock(&bo->mutex);
1239 dev_err(atomisp_dev,
1240 "setup MMU address mapping failed.\n");
1244 mutex_unlock(&bo->mutex);
1245 dev_err(atomisp_dev, "buffer object already binded.\n");
1248 mutex_unlock(&bo->mutex);
1249 dev_err(atomisp_dev,
1250 "buffer object vm_node or page not allocated.\n");
1255 * unbind the physical pages with related virtual address space.
1257 void hmm_bo_unbind(struct hmm_buffer_object *bo)
1260 struct hmm_bo_device *bdev;
1263 check_bo_null_return_void(bo);
1265 mutex_lock(&bo->mutex);
1267 check_bo_status_yes_goto(bo,
1268 HMM_BO_PAGE_ALLOCED |
1270 HMM_BO_BINDED, status_err);
1276 for (i = 0; i < bo->pgnr; i++) {
1277 isp_mmu_unmap(&bdev->mmu, virt, 1);
1278 virt += pgnr_to_size(1);
1282 * flush TLB as the address mapping has been removed and
1283 * related TLBs should be invalidated.
1285 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1286 (bo->pgnr << PAGE_SHIFT));
1288 bo->status &= (~HMM_BO_BINDED);
1290 mutex_unlock(&bo->mutex);
1295 mutex_unlock(&bo->mutex);
1296 dev_err(atomisp_dev,
1297 "buffer vm or page not allocated or not binded yet.\n");
1300 int hmm_bo_binded(struct hmm_buffer_object *bo)
1304 check_bo_null_return(bo, 0);
1306 mutex_lock(&bo->mutex);
1308 ret = bo->status & HMM_BO_BINDED;
1310 mutex_unlock(&bo->mutex);
1315 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
1317 struct page **pages;
1320 check_bo_null_return(bo, NULL);
1322 mutex_lock(&bo->mutex);
1323 if (((bo->status & HMM_BO_VMAPED) && !cached) ||
1324 ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
1325 mutex_unlock(&bo->mutex);
1326 return bo->vmap_addr;
1329 /* cached status need to be changed, so vunmap first */
1330 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1331 vunmap(bo->vmap_addr);
1332 bo->vmap_addr = NULL;
1333 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1336 pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
1337 if (unlikely(!pages)) {
1338 mutex_unlock(&bo->mutex);
1342 for (i = 0; i < bo->pgnr; i++)
1343 pages[i] = bo->page_obj[i].page;
1345 bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
1346 cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
1347 if (unlikely(!bo->vmap_addr)) {
1349 mutex_unlock(&bo->mutex);
1350 dev_err(atomisp_dev, "vmap failed...\n");
1353 bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
1357 mutex_unlock(&bo->mutex);
1358 return bo->vmap_addr;
1361 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
1363 check_bo_null_return_void(bo);
1365 mutex_lock(&bo->mutex);
1366 if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
1367 mutex_unlock(&bo->mutex);
1371 clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
1372 mutex_unlock(&bo->mutex);
1375 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
1377 check_bo_null_return_void(bo);
1379 mutex_lock(&bo->mutex);
1380 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1381 vunmap(bo->vmap_addr);
1382 bo->vmap_addr = NULL;
1383 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1386 mutex_unlock(&bo->mutex);
1390 void hmm_bo_ref(struct hmm_buffer_object *bo)
1392 check_bo_null_return_void(bo);
1394 kref_get(&bo->kref);
1397 static void kref_hmm_bo_release(struct kref *kref)
1402 hmm_bo_release(kref_to_hmm_bo(kref));
1405 void hmm_bo_unref(struct hmm_buffer_object *bo)
1407 check_bo_null_return_void(bo);
1409 kref_put(&bo->kref, kref_hmm_bo_release);
1412 static void hmm_bo_vm_open(struct vm_area_struct *vma)
1414 struct hmm_buffer_object *bo =
1415 (struct hmm_buffer_object *)vma->vm_private_data;
1417 check_bo_null_return_void(bo);
1421 mutex_lock(&bo->mutex);
1423 bo->status |= HMM_BO_MMAPED;
1427 mutex_unlock(&bo->mutex);
1430 static void hmm_bo_vm_close(struct vm_area_struct *vma)
1432 struct hmm_buffer_object *bo =
1433 (struct hmm_buffer_object *)vma->vm_private_data;
1435 check_bo_null_return_void(bo);
1439 mutex_lock(&bo->mutex);
1443 if (!bo->mmap_count) {
1444 bo->status &= (~HMM_BO_MMAPED);
1445 vma->vm_private_data = NULL;
1448 mutex_unlock(&bo->mutex);
1451 static const struct vm_operations_struct hmm_bo_vm_ops = {
1452 .open = hmm_bo_vm_open,
1453 .close = hmm_bo_vm_close,
1457 * mmap the bo to user space.
1459 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1461 unsigned int start, end;
1463 unsigned int pgnr, i;
1466 check_bo_null_return(bo, -EINVAL);
1468 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1471 start = vma->vm_start;
1475 * check vma's virtual address space size and buffer object's size.
1478 if ((start + pgnr_to_size(pgnr)) != end) {
1479 dev_warn(atomisp_dev,
1480 "vma's address space size not equal to buffer object's size");
1484 virt = vma->vm_start;
1485 for (i = 0; i < pgnr; i++) {
1486 pfn = page_to_pfn(bo->page_obj[i].page);
1487 if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1488 dev_warn(atomisp_dev,
1489 "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
1496 vma->vm_private_data = bo;
1498 vma->vm_ops = &hmm_bo_vm_ops;
1499 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1502 * call hmm_bo_vm_open explicitly.
1504 hmm_bo_vm_open(vma);
1509 dev_err(atomisp_dev, "buffer page not allocated yet.\n");