Commit | Line | Data |
---|---|---|
88139ed0 SS |
1 | /* |
2 | * Copyright (C) 2014 Texas Instruments Incorporated | |
3 | * Authors: Santosh Shilimkar <santosh.shilimkar@ti.com> | |
4 | * Sandeep Nair <sandeep_n@ti.com> | |
5 | * Cyril Chemparathy <cyril@ti.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License as | |
9 | * published by the Free Software Foundation version 2. | |
10 | * | |
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
12 | * kind, whether express or implied; without even the implied warranty | |
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | */ | |
16 | ||
17 | #include <linux/io.h> | |
18 | #include <linux/sched.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/dma-direction.h> | |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/pm_runtime.h> | |
23 | #include <linux/of_dma.h> | |
24 | #include <linux/of_address.h> | |
25 | #include <linux/platform_device.h> | |
26 | #include <linux/soc/ti/knav_dma.h> | |
27 | #include <linux/debugfs.h> | |
28 | #include <linux/seq_file.h> | |
29 | ||
30 | #define REG_MASK 0xffffffff | |
31 | ||
32 | #define DMA_LOOPBACK BIT(31) | |
33 | #define DMA_ENABLE BIT(31) | |
34 | #define DMA_TEARDOWN BIT(30) | |
35 | ||
36 | #define DMA_TX_FILT_PSWORDS BIT(29) | |
37 | #define DMA_TX_FILT_EINFO BIT(30) | |
38 | #define DMA_TX_PRIO_SHIFT 0 | |
39 | #define DMA_RX_PRIO_SHIFT 16 | |
40 | #define DMA_PRIO_MASK GENMASK(3, 0) | |
41 | #define DMA_PRIO_DEFAULT 0 | |
42 | #define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */ | |
43 | #define DMA_RX_TIMEOUT_MASK GENMASK(16, 0) | |
44 | #define DMA_RX_TIMEOUT_SHIFT 0 | |
45 | ||
46 | #define CHAN_HAS_EPIB BIT(30) | |
47 | #define CHAN_HAS_PSINFO BIT(29) | |
48 | #define CHAN_ERR_RETRY BIT(28) | |
49 | #define CHAN_PSINFO_AT_SOP BIT(25) | |
50 | #define CHAN_SOP_OFF_SHIFT 16 | |
51 | #define CHAN_SOP_OFF_MASK GENMASK(9, 0) | |
52 | #define DESC_TYPE_SHIFT 26 | |
53 | #define DESC_TYPE_MASK GENMASK(2, 0) | |
54 | ||
55 | /* | |
56 | * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical | |
57 | * navigator cloud mapping scheme. | |
58 | * using the 14bit physical queue numbers directly maps into this scheme. | |
59 | */ | |
60 | #define CHAN_QNUM_MASK GENMASK(14, 0) | |
61 | #define DMA_MAX_QMS 4 | |
62 | #define DMA_TIMEOUT 1 /* msecs */ | |
63 | #define DMA_INVALID_ID 0xffff | |
64 | ||
65 | struct reg_global { | |
66 | u32 revision; | |
67 | u32 perf_control; | |
68 | u32 emulation_control; | |
69 | u32 priority_control; | |
70 | u32 qm_base_address[DMA_MAX_QMS]; | |
71 | }; | |
72 | ||
73 | struct reg_chan { | |
74 | u32 control; | |
75 | u32 mode; | |
76 | u32 __rsvd[6]; | |
77 | }; | |
78 | ||
79 | struct reg_tx_sched { | |
80 | u32 prio; | |
81 | }; | |
82 | ||
83 | struct reg_rx_flow { | |
84 | u32 control; | |
85 | u32 tags; | |
86 | u32 tag_sel; | |
87 | u32 fdq_sel[2]; | |
88 | u32 thresh[3]; | |
89 | }; | |
90 | ||
91 | struct knav_dma_pool_device { | |
92 | struct device *dev; | |
93 | struct list_head list; | |
94 | }; | |
95 | ||
96 | struct knav_dma_device { | |
97 | bool loopback, enable_all; | |
98 | unsigned tx_priority, rx_priority, rx_timeout; | |
99 | unsigned logical_queue_managers; | |
100 | unsigned qm_base_address[DMA_MAX_QMS]; | |
101 | struct reg_global __iomem *reg_global; | |
102 | struct reg_chan __iomem *reg_tx_chan; | |
103 | struct reg_rx_flow __iomem *reg_rx_flow; | |
104 | struct reg_chan __iomem *reg_rx_chan; | |
105 | struct reg_tx_sched __iomem *reg_tx_sched; | |
106 | unsigned max_rx_chan, max_tx_chan; | |
107 | unsigned max_rx_flow; | |
108 | char name[32]; | |
109 | atomic_t ref_count; | |
110 | struct list_head list; | |
111 | struct list_head chan_list; | |
112 | spinlock_t lock; | |
113 | }; | |
114 | ||
115 | struct knav_dma_chan { | |
116 | enum dma_transfer_direction direction; | |
117 | struct knav_dma_device *dma; | |
118 | atomic_t ref_count; | |
119 | ||
120 | /* registers */ | |
121 | struct reg_chan __iomem *reg_chan; | |
122 | struct reg_tx_sched __iomem *reg_tx_sched; | |
123 | struct reg_rx_flow __iomem *reg_rx_flow; | |
124 | ||
125 | /* configuration stuff */ | |
126 | unsigned channel, flow; | |
127 | struct knav_dma_cfg cfg; | |
128 | struct list_head list; | |
129 | spinlock_t lock; | |
130 | }; | |
131 | ||
132 | #define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \ | |
133 | ch->channel : ch->flow) | |
134 | ||
135 | static struct knav_dma_pool_device *kdev; | |
136 | ||
a2dd6877 MK |
137 | static bool device_ready; |
138 | bool knav_dma_device_ready(void) | |
139 | { | |
140 | return device_ready; | |
141 | } | |
142 | EXPORT_SYMBOL_GPL(knav_dma_device_ready); | |
143 | ||
88139ed0 SS |
144 | static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg) |
145 | { | |
146 | if (!memcmp(&chan->cfg, cfg, sizeof(*cfg))) | |
147 | return true; | |
148 | else | |
149 | return false; | |
150 | } | |
151 | ||
152 | static int chan_start(struct knav_dma_chan *chan, | |
153 | struct knav_dma_cfg *cfg) | |
154 | { | |
155 | u32 v = 0; | |
156 | ||
157 | spin_lock(&chan->lock); | |
158 | if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) { | |
159 | if (cfg->u.tx.filt_pswords) | |
160 | v |= DMA_TX_FILT_PSWORDS; | |
161 | if (cfg->u.tx.filt_einfo) | |
162 | v |= DMA_TX_FILT_EINFO; | |
163 | writel_relaxed(v, &chan->reg_chan->mode); | |
164 | writel_relaxed(DMA_ENABLE, &chan->reg_chan->control); | |
165 | } | |
166 | ||
167 | if (chan->reg_tx_sched) | |
168 | writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio); | |
169 | ||
170 | if (chan->reg_rx_flow) { | |
171 | v = 0; | |
172 | ||
173 | if (cfg->u.rx.einfo_present) | |
174 | v |= CHAN_HAS_EPIB; | |
175 | if (cfg->u.rx.psinfo_present) | |
176 | v |= CHAN_HAS_PSINFO; | |
177 | if (cfg->u.rx.err_mode == DMA_RETRY) | |
178 | v |= CHAN_ERR_RETRY; | |
179 | v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT; | |
180 | if (cfg->u.rx.psinfo_at_sop) | |
181 | v |= CHAN_PSINFO_AT_SOP; | |
182 | v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK) | |
183 | << CHAN_SOP_OFF_SHIFT; | |
184 | v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK; | |
185 | ||
186 | writel_relaxed(v, &chan->reg_rx_flow->control); | |
187 | writel_relaxed(0, &chan->reg_rx_flow->tags); | |
188 | writel_relaxed(0, &chan->reg_rx_flow->tag_sel); | |
189 | ||
190 | v = cfg->u.rx.fdq[0] << 16; | |
191 | v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK; | |
192 | writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]); | |
193 | ||
194 | v = cfg->u.rx.fdq[2] << 16; | |
195 | v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK; | |
196 | writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]); | |
197 | ||
198 | writel_relaxed(0, &chan->reg_rx_flow->thresh[0]); | |
199 | writel_relaxed(0, &chan->reg_rx_flow->thresh[1]); | |
200 | writel_relaxed(0, &chan->reg_rx_flow->thresh[2]); | |
201 | } | |
202 | ||
203 | /* Keep a copy of the cfg */ | |
204 | memcpy(&chan->cfg, cfg, sizeof(*cfg)); | |
205 | spin_unlock(&chan->lock); | |
206 | ||
207 | return 0; | |
208 | } | |
209 | ||
210 | static int chan_teardown(struct knav_dma_chan *chan) | |
211 | { | |
212 | unsigned long end, value; | |
213 | ||
214 | if (!chan->reg_chan) | |
215 | return 0; | |
216 | ||
217 | /* indicate teardown */ | |
218 | writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control); | |
219 | ||
220 | /* wait for the dma to shut itself down */ | |
221 | end = jiffies + msecs_to_jiffies(DMA_TIMEOUT); | |
222 | do { | |
223 | value = readl_relaxed(&chan->reg_chan->control); | |
224 | if ((value & DMA_ENABLE) == 0) | |
225 | break; | |
226 | } while (time_after(end, jiffies)); | |
227 | ||
228 | if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) { | |
229 | dev_err(kdev->dev, "timeout waiting for teardown\n"); | |
230 | return -ETIMEDOUT; | |
231 | } | |
232 | ||
233 | return 0; | |
234 | } | |
235 | ||
236 | static void chan_stop(struct knav_dma_chan *chan) | |
237 | { | |
238 | spin_lock(&chan->lock); | |
239 | if (chan->reg_rx_flow) { | |
240 | /* first detach fdqs, starve out the flow */ | |
241 | writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]); | |
242 | writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]); | |
243 | writel_relaxed(0, &chan->reg_rx_flow->thresh[0]); | |
244 | writel_relaxed(0, &chan->reg_rx_flow->thresh[1]); | |
245 | writel_relaxed(0, &chan->reg_rx_flow->thresh[2]); | |
246 | } | |
247 | ||
248 | /* teardown the dma channel */ | |
249 | chan_teardown(chan); | |
250 | ||
251 | /* then disconnect the completion side */ | |
252 | if (chan->reg_rx_flow) { | |
253 | writel_relaxed(0, &chan->reg_rx_flow->control); | |
254 | writel_relaxed(0, &chan->reg_rx_flow->tags); | |
255 | writel_relaxed(0, &chan->reg_rx_flow->tag_sel); | |
256 | } | |
257 | ||
258 | memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg)); | |
259 | spin_unlock(&chan->lock); | |
260 | ||
261 | dev_dbg(kdev->dev, "channel stopped\n"); | |
262 | } | |
263 | ||
264 | static void dma_hw_enable_all(struct knav_dma_device *dma) | |
265 | { | |
266 | int i; | |
267 | ||
268 | for (i = 0; i < dma->max_tx_chan; i++) { | |
269 | writel_relaxed(0, &dma->reg_tx_chan[i].mode); | |
270 | writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control); | |
271 | } | |
272 | } | |
273 | ||
274 | ||
275 | static void knav_dma_hw_init(struct knav_dma_device *dma) | |
276 | { | |
277 | unsigned v; | |
278 | int i; | |
279 | ||
280 | spin_lock(&dma->lock); | |
281 | v = dma->loopback ? DMA_LOOPBACK : 0; | |
282 | writel_relaxed(v, &dma->reg_global->emulation_control); | |
283 | ||
284 | v = readl_relaxed(&dma->reg_global->perf_control); | |
285 | v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT); | |
286 | writel_relaxed(v, &dma->reg_global->perf_control); | |
287 | ||
288 | v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) | | |
289 | (dma->rx_priority << DMA_RX_PRIO_SHIFT)); | |
290 | ||
291 | writel_relaxed(v, &dma->reg_global->priority_control); | |
292 | ||
293 | /* Always enable all Rx channels. Rx paths are managed using flows */ | |
294 | for (i = 0; i < dma->max_rx_chan; i++) | |
295 | writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control); | |
296 | ||
297 | for (i = 0; i < dma->logical_queue_managers; i++) | |
298 | writel_relaxed(dma->qm_base_address[i], | |
299 | &dma->reg_global->qm_base_address[i]); | |
300 | spin_unlock(&dma->lock); | |
301 | } | |
302 | ||
303 | static void knav_dma_hw_destroy(struct knav_dma_device *dma) | |
304 | { | |
305 | int i; | |
306 | unsigned v; | |
307 | ||
308 | spin_lock(&dma->lock); | |
309 | v = ~DMA_ENABLE & REG_MASK; | |
310 | ||
311 | for (i = 0; i < dma->max_rx_chan; i++) | |
312 | writel_relaxed(v, &dma->reg_rx_chan[i].control); | |
313 | ||
314 | for (i = 0; i < dma->max_tx_chan; i++) | |
315 | writel_relaxed(v, &dma->reg_tx_chan[i].control); | |
316 | spin_unlock(&dma->lock); | |
317 | } | |
318 | ||
319 | static void dma_debug_show_channels(struct seq_file *s, | |
320 | struct knav_dma_chan *chan) | |
321 | { | |
322 | int i; | |
323 | ||
324 | seq_printf(s, "\t%s %d:\t", | |
325 | ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"), | |
326 | chan_number(chan)); | |
327 | ||
328 | if (chan->direction == DMA_MEM_TO_DEV) { | |
329 | seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n", | |
330 | chan->cfg.u.tx.filt_einfo, | |
331 | chan->cfg.u.tx.filt_pswords, | |
332 | chan->cfg.u.tx.priority); | |
333 | } else { | |
334 | seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n", | |
335 | chan->cfg.u.rx.einfo_present, | |
336 | chan->cfg.u.rx.psinfo_present, | |
337 | chan->cfg.u.rx.desc_type); | |
338 | seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ", | |
339 | chan->cfg.u.rx.dst_q, | |
340 | chan->cfg.u.rx.thresh); | |
341 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++) | |
342 | seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]); | |
343 | seq_printf(s, "\n"); | |
344 | } | |
345 | } | |
346 | ||
347 | static void dma_debug_show_devices(struct seq_file *s, | |
348 | struct knav_dma_device *dma) | |
349 | { | |
350 | struct knav_dma_chan *chan; | |
351 | ||
352 | list_for_each_entry(chan, &dma->chan_list, list) { | |
353 | if (atomic_read(&chan->ref_count)) | |
354 | dma_debug_show_channels(s, chan); | |
355 | } | |
356 | } | |
357 | ||
358 | static int dma_debug_show(struct seq_file *s, void *v) | |
359 | { | |
360 | struct knav_dma_device *dma; | |
361 | ||
362 | list_for_each_entry(dma, &kdev->list, list) { | |
363 | if (atomic_read(&dma->ref_count)) { | |
364 | seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n", | |
365 | dma->name, dma->max_tx_chan, dma->max_rx_flow); | |
366 | dma_debug_show_devices(s, dma); | |
367 | } | |
368 | } | |
369 | ||
370 | return 0; | |
371 | } | |
372 | ||
373 | static int knav_dma_debug_open(struct inode *inode, struct file *file) | |
374 | { | |
375 | return single_open(file, dma_debug_show, NULL); | |
376 | } | |
377 | ||
378 | static const struct file_operations knav_dma_debug_ops = { | |
379 | .open = knav_dma_debug_open, | |
380 | .read = seq_read, | |
381 | .llseek = seq_lseek, | |
382 | .release = single_release, | |
383 | }; | |
384 | ||
385 | static int of_channel_match_helper(struct device_node *np, const char *name, | |
386 | const char **dma_instance) | |
387 | { | |
388 | struct of_phandle_args args; | |
389 | struct device_node *dma_node; | |
390 | int index; | |
391 | ||
392 | dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0); | |
393 | if (!dma_node) | |
394 | return -ENODEV; | |
395 | ||
396 | *dma_instance = dma_node->name; | |
397 | index = of_property_match_string(np, "ti,navigator-dma-names", name); | |
398 | if (index < 0) { | |
e3d132d1 | 399 | dev_err(kdev->dev, "No 'ti,navigator-dma-names' property\n"); |
88139ed0 SS |
400 | return -ENODEV; |
401 | } | |
402 | ||
403 | if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas", | |
404 | 1, index, &args)) { | |
4ee34aae | 405 | dev_err(kdev->dev, "Missing the phandle args name %s\n", name); |
88139ed0 SS |
406 | return -ENODEV; |
407 | } | |
408 | ||
409 | if (args.args[0] < 0) { | |
410 | dev_err(kdev->dev, "Missing args for %s\n", name); | |
411 | return -ENODEV; | |
412 | } | |
413 | ||
414 | return args.args[0]; | |
415 | } | |
416 | ||
417 | /** | |
418 | * knav_dma_open_channel() - try to setup an exclusive slave channel | |
419 | * @dev: pointer to client device structure | |
420 | * @name: slave channel name | |
421 | * @config: dma configuration parameters | |
422 | * | |
5b6cb43b | 423 | * Returns pointer to appropriate DMA channel on success or error. |
88139ed0 SS |
424 | */ |
425 | void *knav_dma_open_channel(struct device *dev, const char *name, | |
426 | struct knav_dma_cfg *config) | |
427 | { | |
428 | struct knav_dma_chan *chan; | |
429 | struct knav_dma_device *dma; | |
430 | bool found = false; | |
431 | int chan_num = -1; | |
432 | const char *instance; | |
433 | ||
434 | if (!kdev) { | |
435 | pr_err("keystone-navigator-dma driver not registered\n"); | |
436 | return (void *)-EINVAL; | |
437 | } | |
438 | ||
439 | chan_num = of_channel_match_helper(dev->of_node, name, &instance); | |
440 | if (chan_num < 0) { | |
7bcfe20d | 441 | dev_err(kdev->dev, "No DMA instance with name %s\n", name); |
88139ed0 SS |
442 | return (void *)-EINVAL; |
443 | } | |
444 | ||
445 | dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n", | |
4ee34aae | 446 | config->direction == DMA_MEM_TO_DEV ? "transmit" : |
88139ed0 SS |
447 | config->direction == DMA_DEV_TO_MEM ? "receive" : |
448 | "unknown", chan_num, instance); | |
449 | ||
450 | if (config->direction != DMA_MEM_TO_DEV && | |
451 | config->direction != DMA_DEV_TO_MEM) { | |
452 | dev_err(kdev->dev, "bad direction\n"); | |
453 | return (void *)-EINVAL; | |
454 | } | |
455 | ||
456 | /* Look for correct dma instance */ | |
457 | list_for_each_entry(dma, &kdev->list, list) { | |
458 | if (!strcmp(dma->name, instance)) { | |
459 | found = true; | |
460 | break; | |
461 | } | |
462 | } | |
463 | if (!found) { | |
7bcfe20d | 464 | dev_err(kdev->dev, "No DMA instance with name %s\n", instance); |
88139ed0 SS |
465 | return (void *)-EINVAL; |
466 | } | |
467 | ||
468 | /* Look for correct dma channel from dma instance */ | |
469 | found = false; | |
470 | list_for_each_entry(chan, &dma->chan_list, list) { | |
471 | if (config->direction == DMA_MEM_TO_DEV) { | |
472 | if (chan->channel == chan_num) { | |
473 | found = true; | |
474 | break; | |
475 | } | |
476 | } else { | |
477 | if (chan->flow == chan_num) { | |
478 | found = true; | |
479 | break; | |
480 | } | |
481 | } | |
482 | } | |
483 | if (!found) { | |
484 | dev_err(kdev->dev, "channel %d is not in DMA %s\n", | |
485 | chan_num, instance); | |
486 | return (void *)-EINVAL; | |
487 | } | |
488 | ||
489 | if (atomic_read(&chan->ref_count) >= 1) { | |
490 | if (!check_config(chan, config)) { | |
491 | dev_err(kdev->dev, "channel %d config miss-match\n", | |
492 | chan_num); | |
493 | return (void *)-EINVAL; | |
494 | } | |
495 | } | |
496 | ||
497 | if (atomic_inc_return(&chan->dma->ref_count) <= 1) | |
498 | knav_dma_hw_init(chan->dma); | |
499 | ||
500 | if (atomic_inc_return(&chan->ref_count) <= 1) | |
501 | chan_start(chan, config); | |
502 | ||
503 | dev_dbg(kdev->dev, "channel %d opened from DMA %s\n", | |
504 | chan_num, instance); | |
505 | ||
506 | return chan; | |
507 | } | |
508 | EXPORT_SYMBOL_GPL(knav_dma_open_channel); | |
509 | ||
510 | /** | |
511 | * knav_dma_close_channel() - Destroy a dma channel | |
512 | * | |
513 | * channel: dma channel handle | |
514 | * | |
515 | */ | |
516 | void knav_dma_close_channel(void *channel) | |
517 | { | |
518 | struct knav_dma_chan *chan = channel; | |
519 | ||
520 | if (!kdev) { | |
521 | pr_err("keystone-navigator-dma driver not registered\n"); | |
522 | return; | |
523 | } | |
524 | ||
525 | if (atomic_dec_return(&chan->ref_count) <= 0) | |
526 | chan_stop(chan); | |
527 | ||
528 | if (atomic_dec_return(&chan->dma->ref_count) <= 0) | |
529 | knav_dma_hw_destroy(chan->dma); | |
530 | ||
531 | dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n", | |
532 | chan->channel, chan->flow, chan->dma->name); | |
533 | } | |
534 | EXPORT_SYMBOL_GPL(knav_dma_close_channel); | |
535 | ||
536 | static void __iomem *pktdma_get_regs(struct knav_dma_device *dma, | |
537 | struct device_node *node, | |
538 | unsigned index, resource_size_t *_size) | |
539 | { | |
540 | struct device *dev = kdev->dev; | |
541 | struct resource res; | |
542 | void __iomem *regs; | |
543 | int ret; | |
544 | ||
545 | ret = of_address_to_resource(node, index, &res); | |
546 | if (ret) { | |
dc37a252 RH |
547 | dev_err(dev, "Can't translate of node(%pOFn) address for index(%d)\n", |
548 | node, index); | |
88139ed0 SS |
549 | return ERR_PTR(ret); |
550 | } | |
551 | ||
552 | regs = devm_ioremap_resource(kdev->dev, &res); | |
553 | if (IS_ERR(regs)) | |
dc37a252 RH |
554 | dev_err(dev, "Failed to map register base for index(%d) node(%pOFn)\n", |
555 | index, node); | |
88139ed0 SS |
556 | if (_size) |
557 | *_size = resource_size(&res); | |
558 | ||
559 | return regs; | |
560 | } | |
561 | ||
562 | static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow) | |
563 | { | |
564 | struct knav_dma_device *dma = chan->dma; | |
565 | ||
566 | chan->flow = flow; | |
567 | chan->reg_rx_flow = dma->reg_rx_flow + flow; | |
568 | chan->channel = DMA_INVALID_ID; | |
569 | dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow); | |
570 | ||
571 | return 0; | |
572 | } | |
573 | ||
574 | static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel) | |
575 | { | |
576 | struct knav_dma_device *dma = chan->dma; | |
577 | ||
578 | chan->channel = channel; | |
579 | chan->reg_chan = dma->reg_tx_chan + channel; | |
580 | chan->reg_tx_sched = dma->reg_tx_sched + channel; | |
581 | chan->flow = DMA_INVALID_ID; | |
582 | dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan); | |
583 | ||
584 | return 0; | |
585 | } | |
586 | ||
587 | static int pktdma_init_chan(struct knav_dma_device *dma, | |
588 | enum dma_transfer_direction dir, | |
589 | unsigned chan_num) | |
590 | { | |
591 | struct device *dev = kdev->dev; | |
592 | struct knav_dma_chan *chan; | |
593 | int ret = -EINVAL; | |
594 | ||
595 | chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL); | |
596 | if (!chan) | |
597 | return -ENOMEM; | |
598 | ||
599 | INIT_LIST_HEAD(&chan->list); | |
600 | chan->dma = dma; | |
601 | chan->direction = DMA_NONE; | |
602 | atomic_set(&chan->ref_count, 0); | |
603 | spin_lock_init(&chan->lock); | |
604 | ||
605 | if (dir == DMA_MEM_TO_DEV) { | |
606 | chan->direction = dir; | |
607 | ret = pktdma_init_tx_chan(chan, chan_num); | |
608 | } else if (dir == DMA_DEV_TO_MEM) { | |
609 | chan->direction = dir; | |
610 | ret = pktdma_init_rx_chan(chan, chan_num); | |
611 | } else { | |
612 | dev_err(dev, "channel(%d) direction unknown\n", chan_num); | |
613 | } | |
614 | ||
615 | list_add_tail(&chan->list, &dma->chan_list); | |
616 | ||
617 | return ret; | |
618 | } | |
619 | ||
620 | static int dma_init(struct device_node *cloud, struct device_node *dma_node) | |
621 | { | |
622 | unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched; | |
623 | struct device_node *node = dma_node; | |
624 | struct knav_dma_device *dma; | |
625 | int ret, len, num_chan = 0; | |
626 | resource_size_t size; | |
627 | u32 timeout; | |
628 | u32 i; | |
629 | ||
630 | dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL); | |
631 | if (!dma) { | |
632 | dev_err(kdev->dev, "could not allocate driver mem\n"); | |
633 | return -ENOMEM; | |
634 | } | |
635 | INIT_LIST_HEAD(&dma->list); | |
636 | INIT_LIST_HEAD(&dma->chan_list); | |
637 | ||
638 | if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) { | |
639 | dev_err(kdev->dev, "unspecified navigator cloud addresses\n"); | |
640 | return -ENODEV; | |
641 | } | |
642 | ||
643 | dma->logical_queue_managers = len / sizeof(u32); | |
644 | if (dma->logical_queue_managers > DMA_MAX_QMS) { | |
645 | dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n", | |
646 | dma->logical_queue_managers); | |
647 | dma->logical_queue_managers = DMA_MAX_QMS; | |
648 | } | |
649 | ||
650 | ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address", | |
651 | dma->qm_base_address, | |
652 | dma->logical_queue_managers); | |
653 | if (ret) { | |
654 | dev_err(kdev->dev, "invalid navigator cloud addresses\n"); | |
655 | return -ENODEV; | |
656 | } | |
657 | ||
658 | dma->reg_global = pktdma_get_regs(dma, node, 0, &size); | |
659 | if (!dma->reg_global) | |
660 | return -ENODEV; | |
661 | if (size < sizeof(struct reg_global)) { | |
662 | dev_err(kdev->dev, "bad size %pa for global regs\n", &size); | |
663 | return -ENODEV; | |
664 | } | |
665 | ||
666 | dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size); | |
667 | if (!dma->reg_tx_chan) | |
668 | return -ENODEV; | |
669 | ||
670 | max_tx_chan = size / sizeof(struct reg_chan); | |
671 | dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size); | |
672 | if (!dma->reg_rx_chan) | |
673 | return -ENODEV; | |
674 | ||
675 | max_rx_chan = size / sizeof(struct reg_chan); | |
676 | dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size); | |
677 | if (!dma->reg_tx_sched) | |
678 | return -ENODEV; | |
679 | ||
680 | max_tx_sched = size / sizeof(struct reg_tx_sched); | |
681 | dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size); | |
682 | if (!dma->reg_rx_flow) | |
683 | return -ENODEV; | |
684 | ||
685 | max_rx_flow = size / sizeof(struct reg_rx_flow); | |
686 | dma->rx_priority = DMA_PRIO_DEFAULT; | |
687 | dma->tx_priority = DMA_PRIO_DEFAULT; | |
688 | ||
689 | dma->enable_all = (of_get_property(node, "ti,enable-all", NULL) != NULL); | |
690 | dma->loopback = (of_get_property(node, "ti,loop-back", NULL) != NULL); | |
691 | ||
692 | ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout); | |
693 | if (ret < 0) { | |
694 | dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n", | |
695 | DMA_RX_TIMEOUT_DEFAULT); | |
696 | timeout = DMA_RX_TIMEOUT_DEFAULT; | |
697 | } | |
698 | ||
699 | dma->rx_timeout = timeout; | |
700 | dma->max_rx_chan = max_rx_chan; | |
701 | dma->max_rx_flow = max_rx_flow; | |
702 | dma->max_tx_chan = min(max_tx_chan, max_tx_sched); | |
703 | atomic_set(&dma->ref_count, 0); | |
704 | strcpy(dma->name, node->name); | |
705 | spin_lock_init(&dma->lock); | |
706 | ||
707 | for (i = 0; i < dma->max_tx_chan; i++) { | |
708 | if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0) | |
709 | num_chan++; | |
710 | } | |
711 | ||
712 | for (i = 0; i < dma->max_rx_flow; i++) { | |
713 | if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0) | |
714 | num_chan++; | |
715 | } | |
716 | ||
717 | list_add_tail(&dma->list, &kdev->list); | |
718 | ||
719 | /* | |
720 | * For DSP software usecases or userpace transport software, setup all | |
721 | * the DMA hardware resources. | |
722 | */ | |
723 | if (dma->enable_all) { | |
724 | atomic_inc(&dma->ref_count); | |
725 | knav_dma_hw_init(dma); | |
726 | dma_hw_enable_all(dma); | |
727 | } | |
728 | ||
729 | dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n", | |
730 | dma->name, num_chan, dma->max_rx_flow, | |
731 | dma->max_tx_chan, dma->max_rx_chan, | |
732 | dma->loopback ? ", loopback" : ""); | |
733 | ||
734 | return 0; | |
735 | } | |
736 | ||
737 | static int knav_dma_probe(struct platform_device *pdev) | |
738 | { | |
739 | struct device *dev = &pdev->dev; | |
740 | struct device_node *node = pdev->dev.of_node; | |
741 | struct device_node *child; | |
742 | int ret = 0; | |
743 | ||
744 | if (!node) { | |
745 | dev_err(&pdev->dev, "could not find device info\n"); | |
746 | return -EINVAL; | |
747 | } | |
748 | ||
749 | kdev = devm_kzalloc(dev, | |
750 | sizeof(struct knav_dma_pool_device), GFP_KERNEL); | |
751 | if (!kdev) { | |
752 | dev_err(dev, "could not allocate driver mem\n"); | |
753 | return -ENOMEM; | |
754 | } | |
755 | ||
756 | kdev->dev = dev; | |
757 | INIT_LIST_HEAD(&kdev->list); | |
758 | ||
759 | pm_runtime_enable(kdev->dev); | |
760 | ret = pm_runtime_get_sync(kdev->dev); | |
761 | if (ret < 0) { | |
762 | dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret); | |
763 | return ret; | |
764 | } | |
765 | ||
766 | /* Initialise all packet dmas */ | |
767 | for_each_child_of_node(node, child) { | |
768 | ret = dma_init(node, child); | |
769 | if (ret) { | |
770 | dev_err(&pdev->dev, "init failed with %d\n", ret); | |
771 | break; | |
772 | } | |
773 | } | |
774 | ||
775 | if (list_empty(&kdev->list)) { | |
776 | dev_err(dev, "no valid dma instance\n"); | |
777 | return -ENODEV; | |
778 | } | |
779 | ||
780 | debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL, | |
781 | &knav_dma_debug_ops); | |
782 | ||
a2dd6877 | 783 | device_ready = true; |
88139ed0 SS |
784 | return ret; |
785 | } | |
786 | ||
787 | static int knav_dma_remove(struct platform_device *pdev) | |
788 | { | |
789 | struct knav_dma_device *dma; | |
790 | ||
791 | list_for_each_entry(dma, &kdev->list, list) { | |
792 | if (atomic_dec_return(&dma->ref_count) == 0) | |
793 | knav_dma_hw_destroy(dma); | |
794 | } | |
795 | ||
796 | pm_runtime_put_sync(&pdev->dev); | |
797 | pm_runtime_disable(&pdev->dev); | |
798 | ||
799 | return 0; | |
800 | } | |
801 | ||
802 | static struct of_device_id of_match[] = { | |
803 | { .compatible = "ti,keystone-navigator-dma", }, | |
804 | {}, | |
805 | }; | |
806 | ||
807 | MODULE_DEVICE_TABLE(of, of_match); | |
808 | ||
809 | static struct platform_driver knav_dma_driver = { | |
810 | .probe = knav_dma_probe, | |
811 | .remove = knav_dma_remove, | |
812 | .driver = { | |
813 | .name = "keystone-navigator-dma", | |
88139ed0 SS |
814 | .of_match_table = of_match, |
815 | }, | |
816 | }; | |
817 | module_platform_driver(knav_dma_driver); | |
818 | ||
819 | MODULE_LICENSE("GPL v2"); | |
820 | MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver"); | |
821 | MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>"); | |
822 | MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>"); |