Commit | Line | Data |
---|---|---|
df7e762d PYM |
1 | /* |
2 | * | |
3 | * Copyright (C) STMicroelectronics SA 2017 | |
4 | * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com> | |
5 | * Pierre-Yves Mordret <pierre-yves.mordret@st.com> | |
6 | * | |
7 | * License terms: GPL V2.0. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License version 2 as published by | |
11 | * the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more | |
16 | * details. | |
17 | * | |
18 | * DMA Router driver for STM32 DMA MUX | |
19 | * | |
20 | * Based on TI DMA Crossbar driver | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/clk.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/err.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/of_device.h> | |
30 | #include <linux/of_dma.h> | |
31 | #include <linux/reset.h> | |
32 | #include <linux/slab.h> | |
33 | #include <linux/spinlock.h> | |
34 | ||
35 | #define STM32_DMAMUX_CCR(x) (0x4 * (x)) | |
36 | #define STM32_DMAMUX_MAX_DMA_REQUESTS 32 | |
37 | #define STM32_DMAMUX_MAX_REQUESTS 255 | |
38 | ||
39 | struct stm32_dmamux { | |
40 | u32 master; | |
41 | u32 request; | |
42 | u32 chan_id; | |
43 | }; | |
44 | ||
45 | struct stm32_dmamux_data { | |
46 | struct dma_router dmarouter; | |
47 | struct clk *clk; | |
48 | struct reset_control *rst; | |
49 | void __iomem *iomem; | |
50 | u32 dma_requests; /* Number of DMA requests connected to DMAMUX */ | |
51 | u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */ | |
52 | spinlock_t lock; /* Protects register access */ | |
53 | unsigned long *dma_inuse; /* Used DMA channel */ | |
54 | u32 dma_reqs[]; /* Number of DMA Request per DMA masters. | |
55 | * [0] holds number of DMA Masters. | |
56 | * To be kept at very end end of this structure | |
57 | */ | |
58 | }; | |
59 | ||
60 | static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg) | |
61 | { | |
62 | return readl_relaxed(iomem + reg); | |
63 | } | |
64 | ||
65 | static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val) | |
66 | { | |
67 | writel_relaxed(val, iomem + reg); | |
68 | } | |
69 | ||
70 | static void stm32_dmamux_free(struct device *dev, void *route_data) | |
71 | { | |
72 | struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev); | |
73 | struct stm32_dmamux *mux = route_data; | |
74 | unsigned long flags; | |
75 | ||
76 | /* Clear dma request */ | |
77 | spin_lock_irqsave(&dmamux->lock, flags); | |
78 | ||
79 | stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0); | |
80 | clear_bit(mux->chan_id, dmamux->dma_inuse); | |
81 | ||
82 | if (!IS_ERR(dmamux->clk)) | |
83 | clk_disable(dmamux->clk); | |
84 | ||
85 | spin_unlock_irqrestore(&dmamux->lock, flags); | |
86 | ||
87 | dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n", | |
88 | mux->request, mux->master, mux->chan_id); | |
89 | ||
90 | kfree(mux); | |
91 | } | |
92 | ||
93 | static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec, | |
94 | struct of_dma *ofdma) | |
95 | { | |
96 | struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); | |
97 | struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev); | |
98 | struct stm32_dmamux *mux; | |
99 | u32 i, min, max; | |
100 | int ret; | |
101 | unsigned long flags; | |
102 | ||
103 | if (dma_spec->args_count != 3) { | |
104 | dev_err(&pdev->dev, "invalid number of dma mux args\n"); | |
105 | return ERR_PTR(-EINVAL); | |
106 | } | |
107 | ||
108 | if (dma_spec->args[0] > dmamux->dmamux_requests) { | |
109 | dev_err(&pdev->dev, "invalid mux request number: %d\n", | |
110 | dma_spec->args[0]); | |
111 | return ERR_PTR(-EINVAL); | |
112 | } | |
113 | ||
114 | mux = kzalloc(sizeof(*mux), GFP_KERNEL); | |
115 | if (!mux) | |
116 | return ERR_PTR(-ENOMEM); | |
117 | ||
118 | spin_lock_irqsave(&dmamux->lock, flags); | |
119 | mux->chan_id = find_first_zero_bit(dmamux->dma_inuse, | |
120 | dmamux->dma_requests); | |
121 | set_bit(mux->chan_id, dmamux->dma_inuse); | |
122 | spin_unlock_irqrestore(&dmamux->lock, flags); | |
123 | ||
124 | if (mux->chan_id == dmamux->dma_requests) { | |
125 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); | |
126 | ret = -ENOMEM; | |
127 | goto error; | |
128 | } | |
129 | ||
130 | /* Look for DMA Master */ | |
131 | for (i = 1, min = 0, max = dmamux->dma_reqs[i]; | |
132 | i <= dmamux->dma_reqs[0]; | |
133 | min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i]) | |
134 | if (mux->chan_id < max) | |
135 | break; | |
136 | mux->master = i - 1; | |
137 | ||
138 | /* The of_node_put() will be done in of_dma_router_xlate function */ | |
139 | dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1); | |
140 | if (!dma_spec->np) { | |
141 | dev_err(&pdev->dev, "can't get dma master\n"); | |
142 | ret = -EINVAL; | |
143 | goto error; | |
144 | } | |
145 | ||
146 | /* Set dma request */ | |
147 | spin_lock_irqsave(&dmamux->lock, flags); | |
148 | if (!IS_ERR(dmamux->clk)) { | |
149 | ret = clk_enable(dmamux->clk); | |
150 | if (ret < 0) { | |
151 | spin_unlock_irqrestore(&dmamux->lock, flags); | |
152 | dev_err(&pdev->dev, "clk_prep_enable issue: %d\n", ret); | |
153 | goto error; | |
154 | } | |
155 | } | |
156 | spin_unlock_irqrestore(&dmamux->lock, flags); | |
157 | ||
158 | mux->request = dma_spec->args[0]; | |
159 | ||
160 | /* craft DMA spec */ | |
161 | dma_spec->args[3] = dma_spec->args[2]; | |
162 | dma_spec->args[2] = dma_spec->args[1]; | |
163 | dma_spec->args[1] = 0; | |
164 | dma_spec->args[0] = mux->chan_id - min; | |
165 | dma_spec->args_count = 4; | |
166 | ||
167 | stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), | |
168 | mux->request); | |
169 | dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n", | |
170 | mux->request, mux->master, mux->chan_id); | |
171 | ||
172 | return mux; | |
173 | ||
174 | error: | |
175 | clear_bit(mux->chan_id, dmamux->dma_inuse); | |
176 | kfree(mux); | |
177 | return ERR_PTR(ret); | |
178 | } | |
179 | ||
180 | static const struct of_device_id stm32_stm32dma_master_match[] = { | |
181 | { .compatible = "st,stm32-dma", }, | |
182 | {}, | |
183 | }; | |
184 | ||
185 | static int stm32_dmamux_probe(struct platform_device *pdev) | |
186 | { | |
187 | struct device_node *node = pdev->dev.of_node; | |
188 | const struct of_device_id *match; | |
189 | struct device_node *dma_node; | |
190 | struct stm32_dmamux_data *stm32_dmamux; | |
191 | struct resource *res; | |
192 | void __iomem *iomem; | |
193 | int i, count, ret; | |
194 | u32 dma_req; | |
195 | ||
196 | if (!node) | |
197 | return -ENODEV; | |
198 | ||
199 | count = device_property_read_u32_array(&pdev->dev, "dma-masters", | |
200 | NULL, 0); | |
201 | if (count < 0) { | |
202 | dev_err(&pdev->dev, "Can't get DMA master(s) node\n"); | |
203 | return -ENODEV; | |
204 | } | |
205 | ||
206 | stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) + | |
207 | sizeof(u32) * (count + 1), GFP_KERNEL); | |
208 | if (!stm32_dmamux) | |
209 | return -ENOMEM; | |
210 | ||
211 | dma_req = 0; | |
212 | for (i = 1; i <= count; i++) { | |
213 | dma_node = of_parse_phandle(node, "dma-masters", i - 1); | |
214 | ||
215 | match = of_match_node(stm32_stm32dma_master_match, dma_node); | |
216 | if (!match) { | |
217 | dev_err(&pdev->dev, "DMA master is not supported\n"); | |
218 | of_node_put(dma_node); | |
219 | return -EINVAL; | |
220 | } | |
221 | ||
222 | if (of_property_read_u32(dma_node, "dma-requests", | |
223 | &stm32_dmamux->dma_reqs[i])) { | |
224 | dev_info(&pdev->dev, | |
225 | "Missing MUX output information, using %u.\n", | |
226 | STM32_DMAMUX_MAX_DMA_REQUESTS); | |
227 | stm32_dmamux->dma_reqs[i] = | |
228 | STM32_DMAMUX_MAX_DMA_REQUESTS; | |
229 | } | |
230 | dma_req += stm32_dmamux->dma_reqs[i]; | |
231 | of_node_put(dma_node); | |
232 | } | |
233 | ||
234 | if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) { | |
235 | dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n"); | |
236 | return -ENODEV; | |
237 | } | |
238 | ||
239 | stm32_dmamux->dma_requests = dma_req; | |
240 | stm32_dmamux->dma_reqs[0] = count; | |
241 | stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev, | |
242 | BITS_TO_LONGS(dma_req), | |
243 | sizeof(unsigned long), | |
244 | GFP_KERNEL); | |
245 | if (!stm32_dmamux->dma_inuse) | |
246 | return -ENOMEM; | |
247 | ||
248 | if (device_property_read_u32(&pdev->dev, "dma-requests", | |
249 | &stm32_dmamux->dmamux_requests)) { | |
250 | stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS; | |
251 | dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n", | |
252 | stm32_dmamux->dmamux_requests); | |
253 | } | |
254 | ||
255 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
256 | if (!res) | |
257 | return -ENODEV; | |
258 | ||
259 | iomem = devm_ioremap_resource(&pdev->dev, res); | |
4219ff33 DC |
260 | if (IS_ERR(iomem)) |
261 | return PTR_ERR(iomem); | |
df7e762d PYM |
262 | |
263 | spin_lock_init(&stm32_dmamux->lock); | |
264 | ||
265 | stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL); | |
266 | if (IS_ERR(stm32_dmamux->clk)) { | |
267 | ret = PTR_ERR(stm32_dmamux->clk); | |
268 | if (ret == -EPROBE_DEFER) | |
269 | dev_info(&pdev->dev, "Missing controller clock\n"); | |
270 | return ret; | |
271 | } | |
272 | ||
273 | stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL); | |
274 | if (!IS_ERR(stm32_dmamux->rst)) { | |
275 | reset_control_assert(stm32_dmamux->rst); | |
276 | udelay(2); | |
277 | reset_control_deassert(stm32_dmamux->rst); | |
278 | } | |
279 | ||
280 | stm32_dmamux->iomem = iomem; | |
281 | stm32_dmamux->dmarouter.dev = &pdev->dev; | |
282 | stm32_dmamux->dmarouter.route_free = stm32_dmamux_free; | |
283 | ||
284 | platform_set_drvdata(pdev, stm32_dmamux); | |
285 | ||
286 | if (!IS_ERR(stm32_dmamux->clk)) { | |
287 | ret = clk_prepare_enable(stm32_dmamux->clk); | |
288 | if (ret < 0) { | |
289 | dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); | |
290 | return ret; | |
291 | } | |
292 | } | |
293 | ||
294 | /* Reset the dmamux */ | |
295 | for (i = 0; i < stm32_dmamux->dma_requests; i++) | |
296 | stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0); | |
297 | ||
298 | if (!IS_ERR(stm32_dmamux->clk)) | |
299 | clk_disable(stm32_dmamux->clk); | |
300 | ||
301 | return of_dma_router_register(node, stm32_dmamux_route_allocate, | |
302 | &stm32_dmamux->dmarouter); | |
303 | } | |
304 | ||
305 | static const struct of_device_id stm32_dmamux_match[] = { | |
306 | { .compatible = "st,stm32h7-dmamux" }, | |
307 | {}, | |
308 | }; | |
309 | ||
310 | static struct platform_driver stm32_dmamux_driver = { | |
311 | .probe = stm32_dmamux_probe, | |
312 | .driver = { | |
313 | .name = "stm32-dmamux", | |
314 | .of_match_table = stm32_dmamux_match, | |
315 | }, | |
316 | }; | |
317 | ||
318 | static int __init stm32_dmamux_init(void) | |
319 | { | |
320 | return platform_driver_register(&stm32_dmamux_driver); | |
321 | } | |
322 | arch_initcall(stm32_dmamux_init); | |
323 | ||
324 | MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX"); | |
325 | MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>"); | |
326 | MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>"); | |
327 | MODULE_LICENSE("GPL v2"); |