6c4ab5aa2a8d7ca2cb7650d5f458e664f8af5367
[linux-2.6-block.git] / drivers / staging / media / atomisp / pci / atomisp2 / hmm / hmm_bo.c
1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  */
23 /*
24  * This file contains functions for buffer object structure management
25  */
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/gfp.h>          /* for GFP_ATOMIC */
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/hugetlb.h>
32 #include <linux/highmem.h>
33 #include <linux/slab.h>         /* for kmalloc */
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/string.h>
37 #include <linux/list.h>
38 #include <linux/errno.h>
39 #include <asm/cacheflush.h>
40 #include <linux/io.h>
41 #include <asm/current.h>
42 #include <linux/sched/signal.h>
43 #include <linux/file.h>
44
45 #include "atomisp_internal.h"
46 #include "hmm/hmm_common.h"
47 #include "hmm/hmm_pool.h"
48 #include "hmm/hmm_bo.h"
49
50 static unsigned int order_to_nr(unsigned int order)
51 {
52         return 1U << order;
53 }
54
55 static unsigned int nr_to_order_bottom(unsigned int nr)
56 {
57         return fls(nr) - 1;
58 }
59
60 struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
61 {
62         struct hmm_buffer_object *bo;
63
64         bo = kmem_cache_alloc(bo_cache, GFP_KERNEL);
65         if (!bo)
66                 dev_err(atomisp_dev, "%s: failed!\n", __func__);
67
68         return bo;
69 }
70
71 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
72                                         unsigned int pgnr)
73 {
74         check_bodev_null_return(bdev, -EINVAL);
75         var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
76                         "hmm_bo_device not inited yet.\n");
77         /* prevent zero size buffer object */
78         if (pgnr == 0) {
79                 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
80                 return -EINVAL;
81         }
82
83         memset(bo, 0, sizeof(*bo));
84         mutex_init(&bo->mutex);
85
86         /* init the bo->list HEAD as an element of entire_bo_list */
87         INIT_LIST_HEAD(&bo->list);
88
89         bo->bdev = bdev;
90         bo->vmap_addr = NULL;
91         bo->status = HMM_BO_FREE;
92         bo->start = bdev->start;
93         bo->pgnr = pgnr;
94         bo->end = bo->start + pgnr_to_size(pgnr);
95         bo->prev = NULL;
96         bo->next = NULL;
97
98         return 0;
99 }
100
101 struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
102                                 struct rb_node *node, unsigned int pgnr)
103 {
104         struct hmm_buffer_object *this, *ret_bo, *temp_bo;
105
106         this = rb_entry(node, struct hmm_buffer_object, node);
107         if (this->pgnr == pgnr ||
108                 (this->pgnr > pgnr && this->node.rb_left == NULL)) {
109                 goto remove_bo_and_return;
110         } else {
111                 if (this->pgnr < pgnr) {
112                         if (!this->node.rb_right)
113                                 return NULL;
114                         ret_bo = __bo_search_and_remove_from_free_rbtree(
115                                 this->node.rb_right, pgnr);
116                 } else {
117                         ret_bo = __bo_search_and_remove_from_free_rbtree(
118                                 this->node.rb_left, pgnr);
119                 }
120                 if (!ret_bo) {
121                         if (this->pgnr > pgnr)
122                                 goto remove_bo_and_return;
123                         else
124                                 return NULL;
125                 }
126                 return ret_bo;
127         }
128
129 remove_bo_and_return:
130         /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
131          * 1. check if 'this->next' is NULL:
132          *      yes: erase 'this' node and rebalance rbtree, return 'this'.
133          */
134         if (this->next == NULL) {
135                 rb_erase(&this->node, &this->bdev->free_rbtree);
136                 return this;
137         }
138         /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
139          * 2. check if 'this->next->next' is NULL:
140          *      yes: change the related 'next/prev' pointer,
141          *              return 'this->next' but the rbtree stays unchanged.
142          */
143         temp_bo = this->next;
144         this->next = temp_bo->next;
145         if (temp_bo->next)
146                 temp_bo->next->prev = this;
147         temp_bo->next = NULL;
148         temp_bo->prev = NULL;
149         return temp_bo;
150 }
151
152 struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
153                                                         ia_css_ptr start)
154 {
155         struct rb_node *n = root->rb_node;
156         struct hmm_buffer_object *bo;
157
158         do {
159                 bo = rb_entry(n, struct hmm_buffer_object, node);
160
161                 if (bo->start > start) {
162                         if (n->rb_left == NULL)
163                                 return NULL;
164                         n = n->rb_left;
165                 } else if (bo->start < start) {
166                         if (n->rb_right == NULL)
167                                 return NULL;
168                         n = n->rb_right;
169                 } else {
170                         return bo;
171                 }
172         } while (n);
173
174         return NULL;
175 }
176
177 struct hmm_buffer_object *__bo_search_by_addr_in_range(struct rb_root *root,
178                                         unsigned int start)
179 {
180         struct rb_node *n = root->rb_node;
181         struct hmm_buffer_object *bo;
182
183         do {
184                 bo = rb_entry(n, struct hmm_buffer_object, node);
185
186                 if (bo->start > start) {
187                         if (n->rb_left == NULL)
188                                 return NULL;
189                         n = n->rb_left;
190                 } else {
191                         if (bo->end > start)
192                                 return bo;
193                         if (n->rb_right == NULL)
194                                 return NULL;
195                         n = n->rb_right;
196                 }
197         } while (n);
198
199         return NULL;
200 }
201
202 static void __bo_insert_to_free_rbtree(struct rb_root *root,
203                                         struct hmm_buffer_object *bo)
204 {
205         struct rb_node **new = &(root->rb_node);
206         struct rb_node *parent = NULL;
207         struct hmm_buffer_object *this;
208         unsigned int pgnr = bo->pgnr;
209
210         while (*new) {
211                 parent = *new;
212                 this = container_of(*new, struct hmm_buffer_object, node);
213
214                 if (pgnr < this->pgnr) {
215                         new = &((*new)->rb_left);
216                 } else if (pgnr > this->pgnr) {
217                         new = &((*new)->rb_right);
218                 } else {
219                         bo->prev = this;
220                         bo->next = this->next;
221                         if (this->next)
222                                 this->next->prev = bo;
223                         this->next = bo;
224                         bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
225                         return;
226                 }
227         }
228
229         bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
230
231         rb_link_node(&bo->node, parent, new);
232         rb_insert_color(&bo->node, root);
233 }
234
235 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
236                                         struct hmm_buffer_object *bo)
237 {
238         struct rb_node **new = &(root->rb_node);
239         struct rb_node *parent = NULL;
240         struct hmm_buffer_object *this;
241         unsigned int start = bo->start;
242
243         while (*new) {
244                 parent = *new;
245                 this = container_of(*new, struct hmm_buffer_object, node);
246
247                 if (start < this->start)
248                         new = &((*new)->rb_left);
249                 else
250                         new = &((*new)->rb_right);
251         }
252
253         kref_init(&bo->kref);
254         bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
255
256         rb_link_node(&bo->node, parent, new);
257         rb_insert_color(&bo->node, root);
258 }
259
260 struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
261                                         struct hmm_buffer_object *bo,
262                                         unsigned int pgnr)
263 {
264         struct hmm_buffer_object *new_bo;
265         unsigned long flags;
266         int ret;
267
268         new_bo = __bo_alloc(bdev->bo_cache);
269         if (!new_bo) {
270                 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
271                 return NULL;
272         }
273         ret = __bo_init(bdev, new_bo, pgnr);
274         if (ret) {
275                 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
276                 kmem_cache_free(bdev->bo_cache, new_bo);
277                 return NULL;
278         }
279
280         new_bo->start = bo->start;
281         new_bo->end = new_bo->start + pgnr_to_size(pgnr);
282         bo->start = new_bo->end;
283         bo->pgnr = bo->pgnr - pgnr;
284
285         spin_lock_irqsave(&bdev->list_lock, flags);
286         list_add_tail(&new_bo->list, &bo->list);
287         spin_unlock_irqrestore(&bdev->list_lock, flags);
288
289         return new_bo;
290 }
291
292 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
293 {
294         struct hmm_bo_device *bdev = bo->bdev;
295         /* There are 4 situations when we take off a known bo from free rbtree:
296          * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
297          *      and does not have a linked list after bo, to take off this bo,
298          *      we just need erase bo directly and rebalance the free rbtree
299          */
300         if (bo->prev == NULL && bo->next == NULL) {
301                 rb_erase(&bo->node, &bdev->free_rbtree);
302         /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
303          *      and has a linked list,to take off this bo we need erase bo
304          *      first, then, insert bo->next into free rbtree and rebalance
305          *      the free rbtree
306          */
307         } else if (bo->prev == NULL && bo->next != NULL) {
308                 bo->next->prev = NULL;
309                 rb_erase(&bo->node, &bdev->free_rbtree);
310                 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
311                 bo->next = NULL;
312         /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
313          *      node, bo is the last element of the linked list after rbtree
314          *      node, to take off this bo, we just need set the "prev/next"
315          *      pointers to NULL, the free rbtree stays unchaged
316          */
317         } else if (bo->prev != NULL && bo->next == NULL) {
318                 bo->prev->next = NULL;
319                 bo->prev = NULL;
320         /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
321          *      node, bo is in the middle of the linked list after rbtree node,
322          *      to take off this bo, we just set take the "prev/next" pointers
323          *      to NULL, the free rbtree stays unchaged
324          */
325         } else {
326                 bo->next->prev = bo->prev;
327                 bo->prev->next = bo->next;
328                 bo->next = NULL;
329                 bo->prev = NULL;
330         }
331 }
332
333 struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
334                                         struct hmm_buffer_object *next_bo)
335 {
336         struct hmm_bo_device *bdev;
337         unsigned long flags;
338
339         bdev = bo->bdev;
340         next_bo->start = bo->start;
341         next_bo->pgnr = next_bo->pgnr + bo->pgnr;
342
343         spin_lock_irqsave(&bdev->list_lock, flags);
344         list_del(&bo->list);
345         spin_unlock_irqrestore(&bdev->list_lock, flags);
346
347         kmem_cache_free(bo->bdev->bo_cache, bo);
348
349         return next_bo;
350 }
351
352 /*
353  * hmm_bo_device functions.
354  */
355 int hmm_bo_device_init(struct hmm_bo_device *bdev,
356                                 struct isp_mmu_client *mmu_driver,
357                                 unsigned int vaddr_start,
358                                 unsigned int size)
359 {
360         struct hmm_buffer_object *bo;
361         unsigned long flags;
362         int ret;
363
364         check_bodev_null_return(bdev, -EINVAL);
365
366         ret = isp_mmu_init(&bdev->mmu, mmu_driver);
367         if (ret) {
368                 dev_err(atomisp_dev, "isp_mmu_init failed.\n");
369                 return ret;
370         }
371
372         bdev->start = vaddr_start;
373         bdev->pgnr = size_to_pgnr_ceil(size);
374         bdev->size = pgnr_to_size(bdev->pgnr);
375
376         spin_lock_init(&bdev->list_lock);
377         mutex_init(&bdev->rbtree_mutex);
378
379         bdev->flag = HMM_BO_DEVICE_INITED;
380
381         INIT_LIST_HEAD(&bdev->entire_bo_list);
382         bdev->allocated_rbtree = RB_ROOT;
383         bdev->free_rbtree = RB_ROOT;
384
385         bdev->bo_cache = kmem_cache_create("bo_cache",
386                                 sizeof(struct hmm_buffer_object), 0, 0, NULL);
387         if (!bdev->bo_cache) {
388                 dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
389                 isp_mmu_exit(&bdev->mmu);
390                 return -ENOMEM;
391         }
392
393         bo = __bo_alloc(bdev->bo_cache);
394         if (!bo) {
395                 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
396                 isp_mmu_exit(&bdev->mmu);
397                 return -ENOMEM;
398         }
399
400         ret = __bo_init(bdev, bo, bdev->pgnr);
401         if (ret) {
402                 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
403                 kmem_cache_free(bdev->bo_cache, bo);
404                 isp_mmu_exit(&bdev->mmu);
405                 return -EINVAL;
406         }
407
408         spin_lock_irqsave(&bdev->list_lock, flags);
409         list_add_tail(&bo->list, &bdev->entire_bo_list);
410         spin_unlock_irqrestore(&bdev->list_lock, flags);
411
412         __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
413
414         return 0;
415 }
416
417 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
418                                         unsigned int pgnr)
419 {
420         struct hmm_buffer_object *bo, *new_bo;
421         struct rb_root *root = &bdev->free_rbtree;
422
423         check_bodev_null_return(bdev, NULL);
424         var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
425                         "hmm_bo_device not inited yet.\n");
426
427         if (pgnr == 0) {
428                 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
429                 return NULL;
430         }
431
432         mutex_lock(&bdev->rbtree_mutex);
433         bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
434         if (!bo) {
435                 mutex_unlock(&bdev->rbtree_mutex);
436                 dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
437                         __func__);
438                 return NULL;
439         }
440
441         if (bo->pgnr > pgnr) {
442                 new_bo = __bo_break_up(bdev, bo, pgnr);
443                 if (!new_bo) {
444                         mutex_unlock(&bdev->rbtree_mutex);
445                         dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
446                                 __func__);
447                         return NULL;
448                 }
449
450                 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
451                 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
452
453                 mutex_unlock(&bdev->rbtree_mutex);
454                 return new_bo;
455         }
456
457         __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
458
459         mutex_unlock(&bdev->rbtree_mutex);
460         return bo;
461 }
462
463 void hmm_bo_release(struct hmm_buffer_object *bo)
464 {
465         struct hmm_bo_device *bdev = bo->bdev;
466         struct hmm_buffer_object *next_bo, *prev_bo;
467
468         mutex_lock(&bdev->rbtree_mutex);
469
470         /*
471          * FIX ME:
472          *
473          * how to destroy the bo when it is stilled MMAPED?
474          *
475          * ideally, this will not happened as hmm_bo_release
476          * will only be called when kref reaches 0, and in mmap
477          * operation the hmm_bo_ref will eventually be called.
478          * so, if this happened, something goes wrong.
479          */
480         if (bo->status & HMM_BO_MMAPED) {
481                 mutex_unlock(&bdev->rbtree_mutex);
482                 dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
483                 return;
484         }
485
486         if (bo->status & HMM_BO_BINDED) {
487                 dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
488                 hmm_bo_unbind(bo);
489         }
490
491         if (bo->status & HMM_BO_PAGE_ALLOCED) {
492                 dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
493                 hmm_bo_free_pages(bo);
494         }
495         if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
496                 dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
497                 hmm_bo_vunmap(bo);
498         }
499
500         rb_erase(&bo->node, &bdev->allocated_rbtree);
501
502         prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
503         next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
504
505         if (bo->list.prev != &bdev->entire_bo_list &&
506                 prev_bo->end == bo->start &&
507                 (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
508                 __bo_take_off_handling(prev_bo);
509                 bo = __bo_merge(prev_bo, bo);
510         }
511
512         if (bo->list.next != &bdev->entire_bo_list &&
513                 next_bo->start == bo->end &&
514                 (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
515                 __bo_take_off_handling(next_bo);
516                 bo = __bo_merge(bo, next_bo);
517         }
518
519         __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
520
521         mutex_unlock(&bdev->rbtree_mutex);
522         return;
523 }
524
525 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
526 {
527         struct hmm_buffer_object *bo;
528         unsigned long flags;
529
530         dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
531
532         check_bodev_null_return_void(bdev);
533
534         /*
535          * release all allocated bos even they a in use
536          * and all bos will be merged into a big bo
537          */
538         while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
539                 hmm_bo_release(
540                         rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
541
542         dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
543                 __func__);
544
545         /* free all bos to release all ISP virtual memory */
546         while (!list_empty(&bdev->entire_bo_list)) {
547                 bo = list_to_hmm_bo(bdev->entire_bo_list.next);
548
549                 spin_lock_irqsave(&bdev->list_lock, flags);
550                 list_del(&bo->list);
551                 spin_unlock_irqrestore(&bdev->list_lock, flags);
552
553                 kmem_cache_free(bdev->bo_cache, bo);
554         }
555
556         dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
557
558         kmem_cache_destroy(bdev->bo_cache);
559
560         isp_mmu_exit(&bdev->mmu);
561 }
562
563 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
564 {
565         check_bodev_null_return(bdev, -EINVAL);
566
567         return bdev->flag == HMM_BO_DEVICE_INITED;
568 }
569
570 int hmm_bo_allocated(struct hmm_buffer_object *bo)
571 {
572         check_bo_null_return(bo, 0);
573
574         return bo->status & HMM_BO_ALLOCED;
575 }
576
577 struct hmm_buffer_object *hmm_bo_device_search_start(
578         struct hmm_bo_device *bdev, ia_css_ptr vaddr)
579 {
580         struct hmm_buffer_object *bo;
581
582         check_bodev_null_return(bdev, NULL);
583
584         mutex_lock(&bdev->rbtree_mutex);
585         bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
586         if (!bo) {
587                 mutex_unlock(&bdev->rbtree_mutex);
588                 dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
589                         __func__, vaddr);
590                 return NULL;
591         }
592         mutex_unlock(&bdev->rbtree_mutex);
593
594         return bo;
595 }
596
597 struct hmm_buffer_object *hmm_bo_device_search_in_range(
598         struct hmm_bo_device *bdev, unsigned int vaddr)
599 {
600         struct hmm_buffer_object *bo;
601
602         check_bodev_null_return(bdev, NULL);
603
604         mutex_lock(&bdev->rbtree_mutex);
605         bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
606         if (!bo) {
607                 mutex_unlock(&bdev->rbtree_mutex);
608                 dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
609                         __func__, vaddr);
610                 return NULL;
611         }
612         mutex_unlock(&bdev->rbtree_mutex);
613
614         return bo;
615 }
616
617 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
618         struct hmm_bo_device *bdev, const void *vaddr)
619 {
620         struct list_head *pos;
621         struct hmm_buffer_object *bo;
622         unsigned long flags;
623
624         check_bodev_null_return(bdev, NULL);
625
626         spin_lock_irqsave(&bdev->list_lock, flags);
627         list_for_each(pos, &bdev->entire_bo_list) {
628                 bo = list_to_hmm_bo(pos);
629                 /* pass bo which has no vm_node allocated */
630                 if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
631                         continue;
632                 if (bo->vmap_addr == vaddr)
633                         goto found;
634         }
635         spin_unlock_irqrestore(&bdev->list_lock, flags);
636         return NULL;
637 found:
638         spin_unlock_irqrestore(&bdev->list_lock, flags);
639         return bo;
640
641 }
642
643
644 static void free_private_bo_pages(struct hmm_buffer_object *bo,
645                                 struct hmm_pool *dypool,
646                                 struct hmm_pool *repool,
647                                 int free_pgnr)
648 {
649         int i, ret;
650
651         for (i = 0; i < free_pgnr; i++) {
652                 switch (bo->page_obj[i].type) {
653                 case HMM_PAGE_TYPE_RESERVED:
654                         if (repool->pops
655                             && repool->pops->pool_free_pages) {
656                                 repool->pops->pool_free_pages(repool->pool_info,
657                                                         &bo->page_obj[i]);
658                                 hmm_mem_stat.res_cnt--;
659                         }
660                         break;
661                 /*
662                  * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
663                  * memory, so when free them, they should be put into dynamic
664                  * pool.
665                  */
666                 case HMM_PAGE_TYPE_DYNAMIC:
667                 case HMM_PAGE_TYPE_GENERAL:
668                         if (dypool->pops
669                             && dypool->pops->pool_inited
670                             && dypool->pops->pool_inited(dypool->pool_info)) {
671                                 if (dypool->pops->pool_free_pages)
672                                         dypool->pops->pool_free_pages(
673                                                               dypool->pool_info,
674                                                               &bo->page_obj[i]);
675                                 break;
676                         }
677
678                         /*
679                          * if dynamic memory pool doesn't exist, need to free
680                          * pages to system directly.
681                          */
682                 default:
683                         ret = set_pages_wb(bo->page_obj[i].page, 1);
684                         if (ret)
685                                 dev_err(atomisp_dev,
686                                                 "set page to WB err ...ret = %d\n",
687                                                         ret);
688                         /*
689                         W/A: set_pages_wb seldom return value = -EFAULT
690                         indicate that address of page is not in valid
691                         range(0xffff880000000000~0xffffc7ffffffffff)
692                         then, _free_pages would panic; Do not know why page
693                         address be valid,it maybe memory corruption by lowmemory
694                         */
695                         if (!ret) {
696                                 __free_pages(bo->page_obj[i].page, 0);
697                                 hmm_mem_stat.sys_size--;
698                         }
699                         break;
700                 }
701         }
702
703         return;
704 }
705
706 /*Allocate pages which will be used only by ISP*/
707 static int alloc_private_pages(struct hmm_buffer_object *bo,
708                                 int from_highmem,
709                                 bool cached,
710                                 struct hmm_pool *dypool,
711                                 struct hmm_pool *repool)
712 {
713         int ret;
714         unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
715         struct page *pages;
716         gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
717         int i, j;
718         int failure_number = 0;
719         bool reduce_order = false;
720         bool lack_mem = true;
721
722         if (from_highmem)
723                 gfp |= __GFP_HIGHMEM;
724
725         pgnr = bo->pgnr;
726
727         bo->page_obj = atomisp_kernel_malloc(
728                                 sizeof(struct hmm_page_object) * pgnr);
729         if (unlikely(!bo->page_obj)) {
730                 dev_err(atomisp_dev, "out of memory for bo->page_obj\n");
731                 return -ENOMEM;
732         }
733
734         i = 0;
735         alloc_pgnr = 0;
736
737         /*
738          * get physical pages from dynamic pages pool.
739          */
740         if (dypool->pops && dypool->pops->pool_alloc_pages) {
741                 alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
742                                                         bo->page_obj, pgnr,
743                                                         cached);
744                 hmm_mem_stat.dyc_size -= alloc_pgnr;
745
746                 if (alloc_pgnr == pgnr)
747                         return 0;
748         }
749
750         pgnr -= alloc_pgnr;
751         i += alloc_pgnr;
752
753         /*
754          * get physical pages from reserved pages pool for atomisp.
755          */
756         if (repool->pops && repool->pops->pool_alloc_pages) {
757                 alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
758                                                         &bo->page_obj[i], pgnr,
759                                                         cached);
760                 hmm_mem_stat.res_cnt += alloc_pgnr;
761                 if (alloc_pgnr == pgnr)
762                         return 0;
763         }
764
765         pgnr -= alloc_pgnr;
766         i += alloc_pgnr;
767
768         while (pgnr) {
769                 order = nr_to_order_bottom(pgnr);
770                 /*
771                  * if be short of memory, we will set order to 0
772                  * everytime.
773                  */
774                 if (lack_mem)
775                         order = HMM_MIN_ORDER;
776                 else if (order > HMM_MAX_ORDER)
777                         order = HMM_MAX_ORDER;
778 retry:
779                 /*
780                  * When order > HMM_MIN_ORDER, for performance reasons we don't
781                  * want alloc_pages() to sleep. In case it fails and fallbacks
782                  * to HMM_MIN_ORDER or in case the requested order is originally
783                  * the minimum value, we can allow alloc_pages() to sleep for
784                  * robustness purpose.
785                  *
786                  * REVISIT: why __GFP_FS is necessary?
787                  */
788                 if (order == HMM_MIN_ORDER) {
789                         gfp &= ~GFP_NOWAIT;
790                         gfp |= __GFP_RECLAIM | __GFP_FS;
791                 }
792
793                 pages = alloc_pages(gfp, order);
794                 if (unlikely(!pages)) {
795                         /*
796                          * in low memory case, if allocation page fails,
797                          * we turn to try if order=0 allocation could
798                          * succeed. if order=0 fails too, that means there is
799                          * no memory left.
800                          */
801                         if (order == HMM_MIN_ORDER) {
802                                 dev_err(atomisp_dev,
803                                         "%s: cannot allocate pages\n",
804                                          __func__);
805                                 goto cleanup;
806                         }
807                         order = HMM_MIN_ORDER;
808                         failure_number++;
809                         reduce_order = true;
810                         /*
811                          * if fail two times continuously, we think be short
812                          * of memory now.
813                          */
814                         if (failure_number == 2) {
815                                 lack_mem = true;
816                                 failure_number = 0;
817                         }
818                         goto retry;
819                 } else {
820                         blk_pgnr = order_to_nr(order);
821
822                         if (!cached) {
823                                 /*
824                                  * set memory to uncacheable -- UC_MINUS
825                                  */
826                                 ret = set_pages_uc(pages, blk_pgnr);
827                                 if (ret) {
828                                         dev_err(atomisp_dev,
829                                                      "set page uncacheable"
830                                                         "failed.\n");
831
832                                         __free_pages(pages, order);
833
834                                         goto cleanup;
835                                 }
836                         }
837
838                         for (j = 0; j < blk_pgnr; j++) {
839                                 bo->page_obj[i].page = pages + j;
840                                 bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
841                         }
842
843                         pgnr -= blk_pgnr;
844                         hmm_mem_stat.sys_size += blk_pgnr;
845
846                         /*
847                          * if order is not reduced this time, clear
848                          * failure_number.
849                          */
850                         if (reduce_order)
851                                 reduce_order = false;
852                         else
853                                 failure_number = 0;
854                 }
855         }
856
857         return 0;
858 cleanup:
859         alloc_pgnr = i;
860         free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
861
862         atomisp_kernel_free(bo->page_obj);
863
864         return -ENOMEM;
865 }
866
867 static void free_private_pages(struct hmm_buffer_object *bo,
868                                 struct hmm_pool *dypool,
869                                 struct hmm_pool *repool)
870 {
871         free_private_bo_pages(bo, dypool, repool, bo->pgnr);
872
873         atomisp_kernel_free(bo->page_obj);
874 }
875
876 /*
877  * Hacked from kernel function __get_user_pages in mm/memory.c
878  *
879  * Handle buffers allocated by other kernel space driver and mmaped into user
880  * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
881  *
882  * Get physical pages from user space virtual address and update into page list
883  */
884 static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
885                               unsigned long start, int nr_pages,
886                               unsigned int gup_flags, struct page **pages,
887                               struct vm_area_struct **vmas)
888 {
889         int i, ret;
890         unsigned long vm_flags;
891
892         if (nr_pages <= 0)
893                 return 0;
894
895         VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
896
897         /*
898          * Require read or write permissions.
899          * If FOLL_FORCE is set, we only require the "MAY" flags.
900          */
901         vm_flags  = (gup_flags & FOLL_WRITE) ?
902                         (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
903         vm_flags &= (gup_flags & FOLL_FORCE) ?
904                         (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
905         i = 0;
906
907         do {
908                 struct vm_area_struct *vma;
909
910                 vma = find_vma(mm, start);
911                 if (!vma) {
912                         dev_err(atomisp_dev, "find_vma failed\n");
913                         return i ? : -EFAULT;
914                 }
915
916                 if (is_vm_hugetlb_page(vma)) {
917                         /*
918                         i = follow_hugetlb_page(mm, vma, pages, vmas,
919                                         &start, &nr_pages, i, gup_flags);
920                         */
921                         continue;
922                 }
923
924                 do {
925                         struct page *page;
926                         unsigned long pfn;
927
928                         /*
929                          * If we have a pending SIGKILL, don't keep faulting
930                          * pages and potentially allocating memory.
931                          */
932                         if (unlikely(fatal_signal_pending(current))) {
933                                 dev_err(atomisp_dev,
934                                         "fatal_signal_pending in %s\n",
935                                         __func__);
936                                 return i ? i : -ERESTARTSYS;
937                         }
938
939                         ret = follow_pfn(vma, start, &pfn);
940                         if (ret) {
941                                 dev_err(atomisp_dev, "follow_pfn() failed\n");
942                                 return i ? : -EFAULT;
943                         }
944
945                         page = pfn_to_page(pfn);
946                         if (IS_ERR(page))
947                                 return i ? i : PTR_ERR(page);
948                         if (pages) {
949                                 pages[i] = page;
950                                 get_page(page);
951                                 flush_anon_page(vma, page, start);
952                                 flush_dcache_page(page);
953                         }
954                         if (vmas)
955                                 vmas[i] = vma;
956                         i++;
957                         start += PAGE_SIZE;
958                         nr_pages--;
959                 } while (nr_pages && start < vma->vm_end);
960         } while (nr_pages);
961
962         return i;
963 }
964
965 static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
966                      unsigned long start, int nr_pages, int write, int force,
967                      struct page **pages, struct vm_area_struct **vmas)
968 {
969         int flags = FOLL_TOUCH;
970
971         if (pages)
972                 flags |= FOLL_GET;
973         if (write)
974                 flags |= FOLL_WRITE;
975         if (force)
976                 flags |= FOLL_FORCE;
977
978         return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
979 }
980
981 /*
982  * Convert user space virtual address into pages list
983  */
984 static int alloc_user_pages(struct hmm_buffer_object *bo,
985                               void *userptr, bool cached)
986 {
987         int page_nr;
988         int i;
989         struct vm_area_struct *vma;
990         struct page **pages;
991
992         pages = atomisp_kernel_malloc(sizeof(struct page *) * bo->pgnr);
993         if (unlikely(!pages)) {
994                 dev_err(atomisp_dev, "out of memory for pages...\n");
995                 return -ENOMEM;
996         }
997
998         bo->page_obj = atomisp_kernel_malloc(
999                                 sizeof(struct hmm_page_object) * bo->pgnr);
1000         if (unlikely(!bo->page_obj)) {
1001                 dev_err(atomisp_dev, "out of memory for bo->page_obj...\n");
1002                 atomisp_kernel_free(pages);
1003                 return -ENOMEM;
1004         }
1005
1006         mutex_unlock(&bo->mutex);
1007         down_read(&current->mm->mmap_sem);
1008         vma = find_vma(current->mm, (unsigned long)userptr);
1009         up_read(&current->mm->mmap_sem);
1010         if (vma == NULL) {
1011                 dev_err(atomisp_dev, "find_vma failed\n");
1012                 atomisp_kernel_free(bo->page_obj);
1013                 atomisp_kernel_free(pages);
1014                 mutex_lock(&bo->mutex);
1015                 return -EFAULT;
1016         }
1017         mutex_lock(&bo->mutex);
1018         /*
1019          * Handle frame buffer allocated in other kerenl space driver
1020          * and map to user space
1021          */
1022         if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
1023                 page_nr = get_pfnmap_pages(current, current->mm,
1024                                            (unsigned long)userptr,
1025                                            (int)(bo->pgnr), 1, 0,
1026                                            pages, NULL);
1027                 bo->mem_type = HMM_BO_MEM_TYPE_PFN;
1028         } else {
1029                 /*Handle frame buffer allocated in user space*/
1030                 mutex_unlock(&bo->mutex);
1031                 down_read(&current->mm->mmap_sem);
1032                 page_nr = get_user_pages((unsigned long)userptr,
1033                                          (int)(bo->pgnr), 1, pages, NULL);
1034                 up_read(&current->mm->mmap_sem);
1035                 mutex_lock(&bo->mutex);
1036                 bo->mem_type = HMM_BO_MEM_TYPE_USER;
1037         }
1038
1039         /* can be written by caller, not forced */
1040         if (page_nr != bo->pgnr) {
1041                 dev_err(atomisp_dev,
1042                                 "get_user_pages err: bo->pgnr = %d, "
1043                                 "pgnr actually pinned = %d.\n",
1044                                 bo->pgnr, page_nr);
1045                 goto out_of_mem;
1046         }
1047
1048         for (i = 0; i < bo->pgnr; i++) {
1049                 bo->page_obj[i].page = pages[i];
1050                 bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
1051         }
1052         hmm_mem_stat.usr_size += bo->pgnr;
1053         atomisp_kernel_free(pages);
1054
1055         return 0;
1056
1057 out_of_mem:
1058         for (i = 0; i < page_nr; i++)
1059                 put_page(pages[i]);
1060         atomisp_kernel_free(pages);
1061         atomisp_kernel_free(bo->page_obj);
1062
1063         return -ENOMEM;
1064 }
1065
1066 static void free_user_pages(struct hmm_buffer_object *bo)
1067 {
1068         int i;
1069
1070         for (i = 0; i < bo->pgnr; i++)
1071                 put_page(bo->page_obj[i].page);
1072         hmm_mem_stat.usr_size -= bo->pgnr;
1073
1074         atomisp_kernel_free(bo->page_obj);
1075 }
1076
1077 /*
1078  * allocate/free physical pages for the bo.
1079  *
1080  * type indicate where are the pages from. currently we have 3 types
1081  * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
1082  *
1083  * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
1084  * try to alloc memory from highmem if from_highmem is set.
1085  *
1086  * userptr is only valid when type is HMM_BO_USER, it indicates
1087  * the start address from user space task.
1088  *
1089  * from_highmem and userptr will both be ignored when type is
1090  * HMM_BO_SHARE.
1091  */
1092 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
1093                        enum hmm_bo_type type, int from_highmem,
1094                        void *userptr, bool cached)
1095 {
1096         int ret = -EINVAL;
1097
1098         check_bo_null_return(bo, -EINVAL);
1099
1100         mutex_lock(&bo->mutex);
1101         check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1102
1103         /*
1104          * TO DO:
1105          * add HMM_BO_USER type
1106          */
1107         if (type == HMM_BO_PRIVATE) {
1108                 ret = alloc_private_pages(bo, from_highmem,
1109                                 cached, &dynamic_pool, &reserved_pool);
1110         } else if (type == HMM_BO_USER) {
1111                 ret = alloc_user_pages(bo, userptr, cached);
1112         } else {
1113                 dev_err(atomisp_dev, "invalid buffer type.\n");
1114                 ret = -EINVAL;
1115         }
1116         if (ret)
1117                 goto alloc_err;
1118
1119         bo->type = type;
1120
1121         bo->status |= HMM_BO_PAGE_ALLOCED;
1122
1123         mutex_unlock(&bo->mutex);
1124
1125         return 0;
1126
1127 alloc_err:
1128         mutex_unlock(&bo->mutex);
1129         dev_err(atomisp_dev, "alloc pages err...\n");
1130         return ret;
1131 status_err:
1132         mutex_unlock(&bo->mutex);
1133         dev_err(atomisp_dev,
1134                         "buffer object has already page allocated.\n");
1135         return -EINVAL;
1136 }
1137
1138 /*
1139  * free physical pages of the bo.
1140  */
1141 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
1142 {
1143         check_bo_null_return_void(bo);
1144
1145         mutex_lock(&bo->mutex);
1146
1147         check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
1148
1149         /* clear the flag anyway. */
1150         bo->status &= (~HMM_BO_PAGE_ALLOCED);
1151
1152         if (bo->type == HMM_BO_PRIVATE)
1153                 free_private_pages(bo, &dynamic_pool, &reserved_pool);
1154         else if (bo->type == HMM_BO_USER)
1155                 free_user_pages(bo);
1156         else
1157                 dev_err(atomisp_dev, "invalid buffer type.\n");
1158         mutex_unlock(&bo->mutex);
1159
1160         return;
1161
1162 status_err2:
1163         mutex_unlock(&bo->mutex);
1164         dev_err(atomisp_dev,
1165                         "buffer object not page allocated yet.\n");
1166 }
1167
1168 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
1169 {
1170         int ret;
1171
1172         check_bo_null_return(bo, 0);
1173
1174         ret = bo->status & HMM_BO_PAGE_ALLOCED;
1175
1176         return ret;
1177 }
1178
1179 /*
1180  * get physical page info of the bo.
1181  */
1182 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
1183                          struct hmm_page_object **page_obj, int *pgnr)
1184 {
1185         check_bo_null_return(bo, -EINVAL);
1186
1187         mutex_lock(&bo->mutex);
1188
1189         check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1190
1191         *page_obj = bo->page_obj;
1192         *pgnr = bo->pgnr;
1193
1194         mutex_unlock(&bo->mutex);
1195
1196         return 0;
1197
1198 status_err:
1199         dev_err(atomisp_dev,
1200                         "buffer object not page allocated yet.\n");
1201         mutex_unlock(&bo->mutex);
1202         return -EINVAL;
1203 }
1204
1205 /*
1206  * bind the physical pages to a virtual address space.
1207  */
1208 int hmm_bo_bind(struct hmm_buffer_object *bo)
1209 {
1210         int ret;
1211         unsigned int virt;
1212         struct hmm_bo_device *bdev;
1213         unsigned int i;
1214
1215         check_bo_null_return(bo, -EINVAL);
1216
1217         mutex_lock(&bo->mutex);
1218
1219         check_bo_status_yes_goto(bo,
1220                                    HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
1221                                    status_err1);
1222
1223         check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
1224
1225         bdev = bo->bdev;
1226
1227         virt = bo->start;
1228
1229         for (i = 0; i < bo->pgnr; i++) {
1230                 ret =
1231                     isp_mmu_map(&bdev->mmu, virt,
1232                                 page_to_phys(bo->page_obj[i].page), 1);
1233                 if (ret)
1234                         goto map_err;
1235                 virt += (1 << PAGE_SHIFT);
1236         }
1237
1238         /*
1239          * flush TBL here.
1240          *
1241          * theoretically, we donot need to flush TLB as we didnot change
1242          * any existed address mappings, but for Silicon Hive's MMU, its
1243          * really a bug here. I guess when fetching PTEs (page table entity)
1244          * to TLB, its MMU will fetch additional INVALID PTEs automatically
1245          * for performance issue. EX, we only set up 1 page address mapping,
1246          * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1247          * so the additional 3 PTEs are invalid.
1248          */
1249         if (bo->start != 0x0)
1250                 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1251                                                 (bo->pgnr << PAGE_SHIFT));
1252
1253         bo->status |= HMM_BO_BINDED;
1254
1255         mutex_unlock(&bo->mutex);
1256
1257         return 0;
1258
1259 map_err:
1260         /* unbind the physical pages with related virtual address space */
1261         virt = bo->start;
1262         for ( ; i > 0; i--) {
1263                 isp_mmu_unmap(&bdev->mmu, virt, 1);
1264                 virt += pgnr_to_size(1);
1265         }
1266
1267         mutex_unlock(&bo->mutex);
1268         dev_err(atomisp_dev,
1269                         "setup MMU address mapping failed.\n");
1270         return ret;
1271
1272 status_err2:
1273         mutex_unlock(&bo->mutex);
1274         dev_err(atomisp_dev, "buffer object already binded.\n");
1275         return -EINVAL;
1276 status_err1:
1277         mutex_unlock(&bo->mutex);
1278         dev_err(atomisp_dev,
1279                      "buffer object vm_node or page not allocated.\n");
1280         return -EINVAL;
1281 }
1282
1283 /*
1284  * unbind the physical pages with related virtual address space.
1285  */
1286 void hmm_bo_unbind(struct hmm_buffer_object *bo)
1287 {
1288         unsigned int virt;
1289         struct hmm_bo_device *bdev;
1290         unsigned int i;
1291
1292         check_bo_null_return_void(bo);
1293
1294         mutex_lock(&bo->mutex);
1295
1296         check_bo_status_yes_goto(bo,
1297                                    HMM_BO_PAGE_ALLOCED |
1298                                    HMM_BO_ALLOCED |
1299                                    HMM_BO_BINDED, status_err);
1300
1301         bdev = bo->bdev;
1302
1303         virt = bo->start;
1304
1305         for (i = 0; i < bo->pgnr; i++) {
1306                 isp_mmu_unmap(&bdev->mmu, virt, 1);
1307                 virt += pgnr_to_size(1);
1308         }
1309
1310         /*
1311          * flush TLB as the address mapping has been removed and
1312          * related TLBs should be invalidated.
1313          */
1314         isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1315                                 (bo->pgnr << PAGE_SHIFT));
1316
1317         bo->status &= (~HMM_BO_BINDED);
1318
1319         mutex_unlock(&bo->mutex);
1320
1321         return;
1322
1323 status_err:
1324         mutex_unlock(&bo->mutex);
1325         dev_err(atomisp_dev,
1326                      "buffer vm or page not allocated or not binded yet.\n");
1327 }
1328
1329 int hmm_bo_binded(struct hmm_buffer_object *bo)
1330 {
1331         int ret;
1332
1333         check_bo_null_return(bo, 0);
1334
1335         mutex_lock(&bo->mutex);
1336
1337         ret = bo->status & HMM_BO_BINDED;
1338
1339         mutex_unlock(&bo->mutex);
1340
1341         return ret;
1342 }
1343
1344 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
1345 {
1346         struct page **pages;
1347         int i;
1348
1349         check_bo_null_return(bo, NULL);
1350
1351         mutex_lock(&bo->mutex);
1352         if (((bo->status & HMM_BO_VMAPED) && !cached) ||
1353             ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
1354                 mutex_unlock(&bo->mutex);
1355                 return bo->vmap_addr;
1356         }
1357
1358         /* cached status need to be changed, so vunmap first */
1359         if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1360                 vunmap(bo->vmap_addr);
1361                 bo->vmap_addr = NULL;
1362                 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1363         }
1364
1365         pages = atomisp_kernel_malloc(sizeof(*pages) * bo->pgnr);
1366         if (unlikely(!pages)) {
1367                 mutex_unlock(&bo->mutex);
1368                 dev_err(atomisp_dev, "out of memory for pages...\n");
1369                 return NULL;
1370         }
1371
1372         for (i = 0; i < bo->pgnr; i++)
1373                 pages[i] = bo->page_obj[i].page;
1374
1375         bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
1376                 cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
1377         if (unlikely(!bo->vmap_addr)) {
1378                 atomisp_kernel_free(pages);
1379                 mutex_unlock(&bo->mutex);
1380                 dev_err(atomisp_dev, "vmap failed...\n");
1381                 return NULL;
1382         }
1383         bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
1384
1385         atomisp_kernel_free(pages);
1386
1387         mutex_unlock(&bo->mutex);
1388         return bo->vmap_addr;
1389 }
1390
1391 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
1392 {
1393         check_bo_null_return_void(bo);
1394
1395         mutex_lock(&bo->mutex);
1396         if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
1397                 mutex_unlock(&bo->mutex);
1398                 return;
1399         }
1400
1401         clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
1402         mutex_unlock(&bo->mutex);
1403 }
1404
1405 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
1406 {
1407         check_bo_null_return_void(bo);
1408
1409         mutex_lock(&bo->mutex);
1410         if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1411                 vunmap(bo->vmap_addr);
1412                 bo->vmap_addr = NULL;
1413                 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1414         }
1415
1416         mutex_unlock(&bo->mutex);
1417         return;
1418 }
1419
1420 void hmm_bo_ref(struct hmm_buffer_object *bo)
1421 {
1422         check_bo_null_return_void(bo);
1423
1424         kref_get(&bo->kref);
1425 }
1426
1427 static void kref_hmm_bo_release(struct kref *kref)
1428 {
1429         if (!kref)
1430                 return;
1431
1432         hmm_bo_release(kref_to_hmm_bo(kref));
1433 }
1434
1435 void hmm_bo_unref(struct hmm_buffer_object *bo)
1436 {
1437         check_bo_null_return_void(bo);
1438
1439         kref_put(&bo->kref, kref_hmm_bo_release);
1440 }
1441
1442 static void hmm_bo_vm_open(struct vm_area_struct *vma)
1443 {
1444         struct hmm_buffer_object *bo =
1445             (struct hmm_buffer_object *)vma->vm_private_data;
1446
1447         check_bo_null_return_void(bo);
1448
1449         hmm_bo_ref(bo);
1450
1451         mutex_lock(&bo->mutex);
1452
1453         bo->status |= HMM_BO_MMAPED;
1454
1455         bo->mmap_count++;
1456
1457         mutex_unlock(&bo->mutex);
1458 }
1459
1460 static void hmm_bo_vm_close(struct vm_area_struct *vma)
1461 {
1462         struct hmm_buffer_object *bo =
1463             (struct hmm_buffer_object *)vma->vm_private_data;
1464
1465         check_bo_null_return_void(bo);
1466
1467         hmm_bo_unref(bo);
1468
1469         mutex_lock(&bo->mutex);
1470
1471         bo->mmap_count--;
1472
1473         if (!bo->mmap_count) {
1474                 bo->status &= (~HMM_BO_MMAPED);
1475                 vma->vm_private_data = NULL;
1476         }
1477
1478         mutex_unlock(&bo->mutex);
1479 }
1480
1481 static const struct vm_operations_struct hmm_bo_vm_ops = {
1482         .open = hmm_bo_vm_open,
1483         .close = hmm_bo_vm_close,
1484 };
1485
1486 /*
1487  * mmap the bo to user space.
1488  */
1489 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1490 {
1491         unsigned int start, end;
1492         unsigned int virt;
1493         unsigned int pgnr, i;
1494         unsigned int pfn;
1495
1496         check_bo_null_return(bo, -EINVAL);
1497
1498         check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1499
1500         pgnr = bo->pgnr;
1501         start = vma->vm_start;
1502         end = vma->vm_end;
1503
1504         /*
1505          * check vma's virtual address space size and buffer object's size.
1506          * must be the same.
1507          */
1508         if ((start + pgnr_to_size(pgnr)) != end) {
1509                 dev_warn(atomisp_dev,
1510                              "vma's address space size not equal"
1511                              " to buffer object's size");
1512                 return -EINVAL;
1513         }
1514
1515         virt = vma->vm_start;
1516         for (i = 0; i < pgnr; i++) {
1517                 pfn = page_to_pfn(bo->page_obj[i].page);
1518                 if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1519                         dev_warn(atomisp_dev,
1520                                         "remap_pfn_range failed:"
1521                                         " virt = 0x%x, pfn = 0x%x,"
1522                                         " mapped_pgnr = %d\n", virt, pfn, 1);
1523                         return -EINVAL;
1524                 }
1525                 virt += PAGE_SIZE;
1526         }
1527
1528         vma->vm_private_data = bo;
1529
1530         vma->vm_ops = &hmm_bo_vm_ops;
1531         vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP;
1532
1533         /*
1534          * call hmm_bo_vm_open explictly.
1535          */
1536         hmm_bo_vm_open(vma);
1537
1538         return 0;
1539
1540 status_err:
1541         dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1542         return -EINVAL;
1543 }