gpu: ion: Fix bug in ion shrinker
[linux-2.6-block.git] / drivers / staging / android / ion / ion_heap.c
CommitLineData
c30707be
RSZ
1/*
2 * drivers/staging/android/ion/ion_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/err.h>
8898227e
RSZ
18#include <linux/mm.h>
19#include <linux/scatterlist.h>
20#include <linux/vmalloc.h>
c30707be
RSZ
21#include "ion.h"
22#include "ion_priv.h"
23
8898227e
RSZ
24void *ion_heap_map_kernel(struct ion_heap *heap,
25 struct ion_buffer *buffer)
26{
27 struct scatterlist *sg;
28 int i, j;
29 void *vaddr;
30 pgprot_t pgprot;
31 struct sg_table *table = buffer->sg_table;
32 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
33 struct page **pages = vmalloc(sizeof(struct page *) * npages);
34 struct page **tmp = pages;
35
36 if (!pages)
37 return 0;
38
39 if (buffer->flags & ION_FLAG_CACHED)
40 pgprot = PAGE_KERNEL;
41 else
42 pgprot = pgprot_writecombine(PAGE_KERNEL);
43
44 for_each_sg(table->sgl, sg, table->nents, i) {
45 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
46 struct page *page = sg_page(sg);
47 BUG_ON(i >= npages);
48 for (j = 0; j < npages_this_entry; j++) {
49 *(tmp++) = page++;
50 }
51 }
52 vaddr = vmap(pages, npages, VM_MAP, pgprot);
53 vfree(pages);
54
55 return vaddr;
56}
57
58void ion_heap_unmap_kernel(struct ion_heap *heap,
59 struct ion_buffer *buffer)
60{
61 vunmap(buffer->vaddr);
62}
63
64int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
65 struct vm_area_struct *vma)
66{
67 struct sg_table *table = buffer->sg_table;
68 unsigned long addr = vma->vm_start;
69 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
70 struct scatterlist *sg;
71 int i;
72
73 for_each_sg(table->sgl, sg, table->nents, i) {
74 struct page *page = sg_page(sg);
75 unsigned long remainder = vma->vm_end - addr;
76 unsigned long len = sg_dma_len(sg);
77
78 if (offset >= sg_dma_len(sg)) {
79 offset -= sg_dma_len(sg);
80 continue;
81 } else if (offset) {
82 page += offset / PAGE_SIZE;
83 len = sg_dma_len(sg) - offset;
84 offset = 0;
85 }
86 len = min(len, remainder);
87 remap_pfn_range(vma, addr, page_to_pfn(page), len,
88 vma->vm_page_prot);
89 addr += len;
90 if (addr >= vma->vm_end)
91 return 0;
92 }
93 return 0;
94}
95
0b6b2cde
RSZ
96int ion_heap_buffer_zero(struct ion_buffer *buffer)
97{
98 struct sg_table *table = buffer->sg_table;
99 pgprot_t pgprot;
100 struct scatterlist *sg;
101 struct vm_struct *vm_struct;
102 int i, j, ret = 0;
103
104 if (buffer->flags & ION_FLAG_CACHED)
105 pgprot = PAGE_KERNEL;
106 else
107 pgprot = pgprot_writecombine(PAGE_KERNEL);
108
109 vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
110 if (!vm_struct)
111 return -ENOMEM;
112
113 for_each_sg(table->sgl, sg, table->nents, i) {
114 struct page *page = sg_page(sg);
115 unsigned long len = sg_dma_len(sg);
116
117 for (j = 0; j < len / PAGE_SIZE; j++) {
118 struct page *sub_page = page + j;
119 struct page **pages = &sub_page;
120 ret = map_vm_area(vm_struct, pgprot, &pages);
121 if (ret)
122 goto end;
123 memset(vm_struct->addr, 0, PAGE_SIZE);
124 unmap_kernel_range((unsigned long)vm_struct->addr,
125 PAGE_SIZE);
126 }
127 }
128end:
129 free_vm_area(vm_struct);
130 return ret;
131}
132
c30707be
RSZ
133struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
134{
135 struct ion_heap *heap = NULL;
136
137 switch (heap_data->type) {
138 case ION_HEAP_TYPE_SYSTEM_CONTIG:
139 heap = ion_system_contig_heap_create(heap_data);
140 break;
141 case ION_HEAP_TYPE_SYSTEM:
142 heap = ion_system_heap_create(heap_data);
143 break;
144 case ION_HEAP_TYPE_CARVEOUT:
145 heap = ion_carveout_heap_create(heap_data);
146 break;
e3c2eb7c
RSZ
147 case ION_HEAP_TYPE_CHUNK:
148 heap = ion_chunk_heap_create(heap_data);
149 break;
c30707be
RSZ
150 default:
151 pr_err("%s: Invalid heap type %d\n", __func__,
152 heap_data->type);
153 return ERR_PTR(-EINVAL);
154 }
155
156 if (IS_ERR_OR_NULL(heap)) {
157 pr_err("%s: error creating heap %s type %d base %lu size %u\n",
158 __func__, heap_data->name, heap_data->type,
159 heap_data->base, heap_data->size);
160 return ERR_PTR(-EINVAL);
161 }
162
163 heap->name = heap_data->name;
164 heap->id = heap_data->id;
165 return heap;
166}
167
168void ion_heap_destroy(struct ion_heap *heap)
169{
170 if (!heap)
171 return;
172
173 switch (heap->type) {
174 case ION_HEAP_TYPE_SYSTEM_CONTIG:
175 ion_system_contig_heap_destroy(heap);
176 break;
177 case ION_HEAP_TYPE_SYSTEM:
178 ion_system_heap_destroy(heap);
179 break;
180 case ION_HEAP_TYPE_CARVEOUT:
181 ion_carveout_heap_destroy(heap);
182 break;
e3c2eb7c
RSZ
183 case ION_HEAP_TYPE_CHUNK:
184 ion_chunk_heap_destroy(heap);
185 break;
c30707be
RSZ
186 default:
187 pr_err("%s: Invalid heap type %d\n", __func__,
188 heap->type);
189 }
190}