drivers/crypto/qat: use seq_hex_dump() to dump buffers
[linux-2.6-block.git] / drivers / misc / sram.c
CommitLineData
4984c6f5
PZ
1/*
2 * Generic on-chip SRAM allocation driver
3 *
4 * Copyright (C) 2012 Philipp Zabel, Pengutronix
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
18 * MA 02110-1301, USA.
19 */
20
4984c6f5 21#include <linux/clk.h>
98ce2d27 22#include <linux/genalloc.h>
4984c6f5 23#include <linux/io.h>
2da19688 24#include <linux/list_sort.h>
98ce2d27 25#include <linux/of_address.h>
4984c6f5
PZ
26#include <linux/platform_device.h>
27#include <linux/slab.h>
4984c6f5
PZ
28
29#define SRAM_GRANULARITY 32
30
31struct sram_dev {
665d82fb
VZ
32 struct device *dev;
33 void __iomem *virt_base;
34
4984c6f5
PZ
35 struct gen_pool *pool;
36 struct clk *clk;
37};
38
2da19688
HS
39struct sram_reserve {
40 struct list_head list;
41 u32 start;
42 u32 size;
43};
44
45static int sram_reserve_cmp(void *priv, struct list_head *a,
46 struct list_head *b)
47{
48 struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
49 struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
50
51 return ra->start - rb->start;
52}
53
a0a5be0b 54static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
4984c6f5 55{
a0a5be0b 56 struct device_node *np = sram->dev->of_node, *child;
2da19688
HS
57 unsigned long size, cur_start, cur_size;
58 struct sram_reserve *rblocks, *block;
59 struct list_head reserve_list;
60 unsigned int nblocks;
a0a5be0b 61 int ret = 0;
4984c6f5 62
2da19688
HS
63 INIT_LIST_HEAD(&reserve_list);
64
f3cbfa5d 65 size = resource_size(res);
4984c6f5 66
2da19688
HS
67 /*
68 * We need an additional block to mark the end of the memory region
69 * after the reserved blocks from the dt are processed.
70 */
71 nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
72 rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
ee895ccd
VZ
73 if (!rblocks)
74 return -ENOMEM;
4984c6f5 75
2da19688
HS
76 block = &rblocks[0];
77 for_each_available_child_of_node(np, child) {
78 struct resource child_res;
79
80 ret = of_address_to_resource(child, 0, &child_res);
81 if (ret < 0) {
665d82fb 82 dev_err(sram->dev,
2da19688
HS
83 "could not get address for node %s\n",
84 child->full_name);
b13365bb 85 of_node_put(child);
2da19688
HS
86 goto err_chunks;
87 }
88
89 if (child_res.start < res->start || child_res.end > res->end) {
665d82fb 90 dev_err(sram->dev,
2da19688
HS
91 "reserved block %s outside the sram area\n",
92 child->full_name);
93 ret = -EINVAL;
b13365bb 94 of_node_put(child);
2da19688
HS
95 goto err_chunks;
96 }
97
98 block->start = child_res.start - res->start;
99 block->size = resource_size(&child_res);
100 list_add_tail(&block->list, &reserve_list);
101
665d82fb
VZ
102 dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
103 block->start, block->start + block->size);
2da19688
HS
104
105 block++;
106 }
107
108 /* the last chunk marks the end of the region */
109 rblocks[nblocks - 1].start = size;
110 rblocks[nblocks - 1].size = 0;
111 list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
112
113 list_sort(NULL, &reserve_list, sram_reserve_cmp);
114
115 cur_start = 0;
116
117 list_for_each_entry(block, &reserve_list, list) {
118 /* can only happen if sections overlap */
119 if (block->start < cur_start) {
665d82fb 120 dev_err(sram->dev,
2da19688
HS
121 "block at 0x%x starts after current offset 0x%lx\n",
122 block->start, cur_start);
123 ret = -EINVAL;
124 goto err_chunks;
125 }
126
127 /* current start is in a reserved block, so continue after it */
128 if (block->start == cur_start) {
129 cur_start = block->start + block->size;
130 continue;
131 }
132
133 /*
134 * allocate the space between the current starting
135 * address and the following reserved block, or the
136 * end of the region.
137 */
138 cur_size = block->start - cur_start;
139
665d82fb 140 dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
2da19688 141 cur_start, cur_start + cur_size);
665d82fb 142
2da19688 143 ret = gen_pool_add_virt(sram->pool,
665d82fb 144 (unsigned long)sram->virt_base + cur_start,
2da19688
HS
145 res->start + cur_start, cur_size, -1);
146 if (ret < 0)
147 goto err_chunks;
148
149 /* next allocation after this reserved block */
150 cur_start = block->start + block->size;
151 }
152
a0a5be0b 153 err_chunks:
2da19688
HS
154 kfree(rblocks);
155
a0a5be0b
VZ
156 return ret;
157}
158
159static int sram_probe(struct platform_device *pdev)
160{
161 struct sram_dev *sram;
162 struct resource *res;
163 size_t size;
164 int ret;
165
166 sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
167 if (!sram)
168 return -ENOMEM;
169
170 sram->dev = &pdev->dev;
171
172 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
173 if (!res) {
174 dev_err(sram->dev, "found no memory resource\n");
175 return -EINVAL;
176 }
177
178 size = resource_size(res);
179
180 if (!devm_request_mem_region(sram->dev, res->start, size, pdev->name)) {
181 dev_err(sram->dev, "could not request region for resource\n");
182 return -EBUSY;
183 }
184
185 sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
186 if (IS_ERR(sram->virt_base))
187 return PTR_ERR(sram->virt_base);
188
73858173
VZ
189 sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
190 NUMA_NO_NODE, NULL);
191 if (IS_ERR(sram->pool))
192 return PTR_ERR(sram->pool);
a0a5be0b
VZ
193
194 ret = sram_reserve_regions(sram, res);
195 if (ret)
196 return ret;
197
665d82fb 198 sram->clk = devm_clk_get(sram->dev, NULL);
ee895ccd
VZ
199 if (IS_ERR(sram->clk))
200 sram->clk = NULL;
201 else
202 clk_prepare_enable(sram->clk);
203
4984c6f5
PZ
204 platform_set_drvdata(pdev, sram);
205
665d82fb
VZ
206 dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
207 gen_pool_size(sram->pool) / 1024, sram->virt_base);
4984c6f5
PZ
208
209 return 0;
210}
211
212static int sram_remove(struct platform_device *pdev)
213{
214 struct sram_dev *sram = platform_get_drvdata(pdev);
215
216 if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
665d82fb 217 dev_err(sram->dev, "removed while SRAM allocated\n");
4984c6f5 218
4984c6f5
PZ
219 if (sram->clk)
220 clk_disable_unprepare(sram->clk);
221
222 return 0;
223}
224
225#ifdef CONFIG_OF
6893d9b5 226static const struct of_device_id sram_dt_ids[] = {
4984c6f5
PZ
227 { .compatible = "mmio-sram" },
228 {}
229};
230#endif
231
232static struct platform_driver sram_driver = {
233 .driver = {
234 .name = "sram",
235 .of_match_table = of_match_ptr(sram_dt_ids),
236 },
237 .probe = sram_probe,
238 .remove = sram_remove,
239};
240
241static int __init sram_init(void)
242{
243 return platform_driver_register(&sram_driver);
244}
245
246postcore_initcall(sram_init);