media: c8sectpfe-core: Convert to platform remove callback returning void
[linux-block.git] / drivers / media / platform / st / sti / c8sectpfe / c8sectpfe-core.c
CommitLineData
9ed785a9 1// SPDX-License-Identifier: GPL-2.0
c5f5d0f9
PG
2/*
3 * c8sectpfe-core.c - C8SECTPFE STi DVB driver
4 *
5 * Copyright (c) STMicroelectronics 2015
6 *
7 * Author:Peter Bennett <peter.bennett@st.com>
8 * Peter Griffin <peter.griffin@linaro.org>
9 *
c5f5d0f9
PG
10 */
11#include <linux/atomic.h>
12#include <linux/clk.h>
13#include <linux/completion.h>
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/dma-mapping.h>
17#include <linux/dvb/dmx.h>
18#include <linux/dvb/frontend.h>
19#include <linux/errno.h>
20#include <linux/firmware.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/module.h>
25#include <linux/of_gpio.h>
26#include <linux/of_platform.h>
8f32a6fb
AS
27#include <linux/pinctrl/consumer.h>
28#include <linux/pinctrl/pinctrl.h>
c5f5d0f9 29#include <linux/platform_device.h>
c5f5d0f9
PG
30#include <linux/slab.h>
31#include <linux/time.h>
8f32a6fb 32#include <linux/usb.h>
c5f5d0f9
PG
33#include <linux/wait.h>
34
c5f5d0f9 35#include "c8sectpfe-common.h"
8f32a6fb 36#include "c8sectpfe-core.h"
c5f5d0f9 37#include "c8sectpfe-debugfs.h"
8f32a6fb 38
fada1935
MCC
39#include <media/dmxdev.h>
40#include <media/dvb_demux.h>
41#include <media/dvb_frontend.h>
42#include <media/dvb_net.h>
c5f5d0f9
PG
43
44#define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
45MODULE_FIRMWARE(FIRMWARE_MEMDMA);
46
47#define PID_TABLE_SIZE 1024
48#define POLL_MSECS 50
49
c23ac90f 50static int load_c8sectpfe_fw(struct c8sectpfei *fei);
c5f5d0f9
PG
51
52#define TS_PKT_SIZE 188
53#define HEADER_SIZE (4)
54#define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
55
56#define FEI_ALIGNMENT (32)
57/* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
58#define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
59
60#define FIFO_LEN 1024
61
e99e88a9 62static void c8sectpfe_timer_interrupt(struct timer_list *t)
c5f5d0f9 63{
e99e88a9 64 struct c8sectpfei *fei = from_timer(fei, t, timer);
c5f5d0f9
PG
65 struct channel_info *channel;
66 int chan_num;
67
68 /* iterate through input block channels */
69 for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
70 channel = fei->channel_data[chan_num];
71
72 /* is this descriptor initialised and TP enabled */
73 if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
74 tasklet_schedule(&channel->tsklet);
75 }
76
77 fei->timer.expires = jiffies + msecs_to_jiffies(POLL_MSECS);
78 add_timer(&fei->timer);
79}
80
9db2f6a4 81static void channel_swdemux_tsklet(struct tasklet_struct *t)
c5f5d0f9 82{
9db2f6a4 83 struct channel_info *channel = from_tasklet(channel, t, tsklet);
baed3c4b 84 struct c8sectpfei *fei;
c5f5d0f9
PG
85 unsigned long wp, rp;
86 int pos, num_packets, n, size;
87 u8 *buf;
88
89 if (unlikely(!channel || !channel->irec))
90 return;
91
baed3c4b
GS
92 fei = channel->fei;
93
c5f5d0f9
PG
94 wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
95 rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
96
97 pos = rp - channel->back_buffer_busaddr;
98
99 /* has it wrapped */
100 if (wp < rp)
101 wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
102
103 size = wp - rp;
104 num_packets = size / PACKET_SIZE;
105
106 /* manage cache so data is visible to CPU */
107 dma_sync_single_for_cpu(fei->dev,
108 rp,
109 size,
110 DMA_FROM_DEVICE);
111
64e46b63 112 buf = channel->back_buffer_aligned;
c5f5d0f9
PG
113
114 dev_dbg(fei->dev,
7152c88e 115 "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
c5f5d0f9
PG
116 channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
117
118 for (n = 0; n < num_packets; n++) {
119 dvb_dmx_swfilter_packets(
120 &fei->c8sectpfe[0]->
121 demux[channel->demux_mapping].dvb_demux,
122 &buf[pos], 1);
123
124 pos += PACKET_SIZE;
125 }
126
127 /* advance the read pointer */
128 if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
129 writel(channel->back_buffer_busaddr, channel->irec +
130 DMA_PRDS_BUSRP_TP(0));
131 else
a6311d27 132 writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
c5f5d0f9
PG
133}
134
135static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
136{
137 struct dvb_demux *demux = dvbdmxfeed->demux;
138 struct stdemux *stdemux = (struct stdemux *)demux->priv;
139 struct c8sectpfei *fei = stdemux->c8sectpfei;
140 struct channel_info *channel;
141 u32 tmp;
142 unsigned long *bitmap;
c23ac90f 143 int ret;
c5f5d0f9
PG
144
145 switch (dvbdmxfeed->type) {
146 case DMX_TYPE_TS:
147 break;
148 case DMX_TYPE_SEC:
149 break;
150 default:
151 dev_err(fei->dev, "%s:%d Error bailing\n"
152 , __func__, __LINE__);
153 return -EINVAL;
154 }
155
156 if (dvbdmxfeed->type == DMX_TYPE_TS) {
157 switch (dvbdmxfeed->pes_type) {
158 case DMX_PES_VIDEO:
159 case DMX_PES_AUDIO:
160 case DMX_PES_TELETEXT:
161 case DMX_PES_PCR:
162 case DMX_PES_OTHER:
163 break;
164 default:
165 dev_err(fei->dev, "%s:%d Error bailing\n"
166 , __func__, __LINE__);
167 return -EINVAL;
168 }
169 }
170
171 if (!atomic_read(&fei->fw_loaded)) {
c23ac90f
PG
172 ret = load_c8sectpfe_fw(fei);
173 if (ret)
174 return ret;
c5f5d0f9
PG
175 }
176
177 mutex_lock(&fei->lock);
178
179 channel = fei->channel_data[stdemux->tsin_index];
180
64e46b63 181 bitmap = channel->pid_buffer_aligned;
c5f5d0f9
PG
182
183 /* 8192 is a special PID */
184 if (dvbdmxfeed->pid == 8192) {
185 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
186 tmp &= ~C8SECTPFE_PID_ENABLE;
187 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
188
189 } else {
190 bitmap_set(bitmap, dvbdmxfeed->pid, 1);
191 }
192
193 /* manage cache so PID bitmap is visible to HW */
194 dma_sync_single_for_device(fei->dev,
195 channel->pid_buffer_busaddr,
196 PID_TABLE_SIZE,
197 DMA_TO_DEVICE);
198
199 channel->active = 1;
200
201 if (fei->global_feed_count == 0) {
202 fei->timer.expires = jiffies +
203 msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
204
205 add_timer(&fei->timer);
206 }
207
208 if (stdemux->running_feed_count == 0) {
209
210 dev_dbg(fei->dev, "Starting channel=%p\n", channel);
211
9db2f6a4 212 tasklet_setup(&channel->tsklet, channel_swdemux_tsklet);
c5f5d0f9
PG
213
214 /* Reset the internal inputblock sram pointers */
215 writel(channel->fifo,
216 fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
217 writel(channel->fifo + FIFO_LEN - 1,
218 fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
219
220 writel(channel->fifo,
221 fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
222 writel(channel->fifo,
223 fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
224
225
226 /* reset read / write memdma ptrs for this channel */
227 writel(channel->back_buffer_busaddr, channel->irec +
228 DMA_PRDS_BUSBASE_TP(0));
229
230 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
231 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
232
233 writel(channel->back_buffer_busaddr, channel->irec +
234 DMA_PRDS_BUSWP_TP(0));
235
236 /* Issue a reset and enable InputBlock */
237 writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
238 , fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
239
240 /* and enable the tp */
241 writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
242
243 dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
244 , __func__, __LINE__, stdemux);
245 }
246
247 stdemux->running_feed_count++;
248 fei->global_feed_count++;
249
250 mutex_unlock(&fei->lock);
251
252 return 0;
253}
254
255static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
256{
257
258 struct dvb_demux *demux = dvbdmxfeed->demux;
259 struct stdemux *stdemux = (struct stdemux *)demux->priv;
260 struct c8sectpfei *fei = stdemux->c8sectpfei;
261 struct channel_info *channel;
262 int idlereq;
263 u32 tmp;
264 int ret;
265 unsigned long *bitmap;
266
267 if (!atomic_read(&fei->fw_loaded)) {
c23ac90f
PG
268 ret = load_c8sectpfe_fw(fei);
269 if (ret)
270 return ret;
c5f5d0f9
PG
271 }
272
273 mutex_lock(&fei->lock);
274
275 channel = fei->channel_data[stdemux->tsin_index];
276
64e46b63 277 bitmap = channel->pid_buffer_aligned;
c5f5d0f9
PG
278
279 if (dvbdmxfeed->pid == 8192) {
280 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
281 tmp |= C8SECTPFE_PID_ENABLE;
282 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
283 } else {
284 bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
285 }
286
287 /* manage cache so data is visible to HW */
288 dma_sync_single_for_device(fei->dev,
289 channel->pid_buffer_busaddr,
290 PID_TABLE_SIZE,
291 DMA_TO_DEVICE);
292
293 if (--stdemux->running_feed_count == 0) {
294
295 channel = fei->channel_data[stdemux->tsin_index];
296
297 /* TP re-configuration on page 168 of functional spec */
298
299 /* disable IB (prevents more TS data going to memdma) */
300 writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
301
302 /* disable this channels descriptor */
303 writel(0, channel->irec + DMA_PRDS_TPENABLE);
304
305 tasklet_disable(&channel->tsklet);
306
307 /* now request memdma channel goes idle */
308 idlereq = (1 << channel->tsin_id) | IDLEREQ;
309 writel(idlereq, fei->io + DMA_IDLE_REQ);
310
311 /* wait for idle irq handler to signal completion */
312 ret = wait_for_completion_timeout(&channel->idle_completion,
313 msecs_to_jiffies(100));
314
315 if (ret == 0)
316 dev_warn(fei->dev,
317 "Timeout waiting for idle irq on tsin%d\n",
318 channel->tsin_id);
319
320 reinit_completion(&channel->idle_completion);
321
322 /* reset read / write ptrs for this channel */
323
324 writel(channel->back_buffer_busaddr,
325 channel->irec + DMA_PRDS_BUSBASE_TP(0));
326
327 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
328 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
329
330 writel(channel->back_buffer_busaddr,
331 channel->irec + DMA_PRDS_BUSWP_TP(0));
332
333 dev_dbg(fei->dev,
334 "%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
335 __func__, __LINE__, stdemux, channel->tsin_id);
336
337 /* turn off all PIDS in the bitmap */
64e46b63 338 memset(channel->pid_buffer_aligned, 0, PID_TABLE_SIZE);
c5f5d0f9
PG
339
340 /* manage cache so data is visible to HW */
341 dma_sync_single_for_device(fei->dev,
342 channel->pid_buffer_busaddr,
343 PID_TABLE_SIZE,
344 DMA_TO_DEVICE);
345
346 channel->active = 0;
347 }
348
349 if (--fei->global_feed_count == 0) {
350 dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
351 , __func__, __LINE__, fei->global_feed_count);
352
353 del_timer(&fei->timer);
354 }
355
356 mutex_unlock(&fei->lock);
357
358 return 0;
359}
360
361static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
362{
363 int i;
364
7612cf97 365 for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
c5f5d0f9
PG
366 if (!fei->channel_data[i])
367 continue;
368
369 if (fei->channel_data[i]->tsin_id == tsin_num)
370 return fei->channel_data[i];
371 }
372
373 return NULL;
374}
375
376static void c8sectpfe_getconfig(struct c8sectpfei *fei)
377{
378 struct c8sectpfe_hw *hw = &fei->hw_stats;
379
380 hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
381 hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
382 hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
383 hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
384 hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
385 hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
386 hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
387
388 dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
389 dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
390 dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
391 dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
392 , hw->num_swts);
393 dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
394 dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
395 dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
396 dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
397 , hw->num_tp);
398}
399
400static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
401{
402 struct c8sectpfei *fei = priv;
403 struct channel_info *chan;
404 int bit;
405 unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
406
407 /* page 168 of functional spec: Clear the idle request
408 by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
409
410 /* signal idle completion */
411 for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
412
413 chan = find_channel(fei, bit);
414
415 if (chan)
416 complete(&chan->idle_completion);
417 }
418
419 writel(0, fei->io + DMA_IDLE_REQ);
420
421 return IRQ_HANDLED;
422}
423
424
425static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
426{
427 if (!fei || !tsin)
428 return;
429
430 if (tsin->back_buffer_busaddr)
431 if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
432 dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
433 FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
434
435 kfree(tsin->back_buffer_start);
436
437 if (tsin->pid_buffer_busaddr)
438 if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
439 dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
440 PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
441
442 kfree(tsin->pid_buffer_start);
443}
444
445#define MAX_NAME 20
446
447static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
448 struct channel_info *tsin)
449{
450 int ret;
451 u32 tmp;
452 char tsin_pin_name[MAX_NAME];
453
454 if (!fei || !tsin)
455 return -EINVAL;
456
457 dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
458 , __func__, __LINE__, tsin, tsin->tsin_id);
459
460 init_completion(&tsin->idle_completion);
461
64e46b63 462 tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE + FEI_ALIGNMENT, GFP_KERNEL);
c5f5d0f9
PG
463 if (!tsin->back_buffer_start) {
464 ret = -ENOMEM;
465 goto err_unmap;
466 }
467
468 /* Ensure backbuffer is 32byte aligned */
64e46b63 469 tsin->back_buffer_aligned = tsin->back_buffer_start + FEI_ALIGNMENT;
c5f5d0f9 470
64e46b63 471 tsin->back_buffer_aligned = PTR_ALIGN(tsin->back_buffer_aligned, FEI_ALIGNMENT);
c5f5d0f9
PG
472
473 tsin->back_buffer_busaddr = dma_map_single(fei->dev,
64e46b63 474 tsin->back_buffer_aligned,
c5f5d0f9
PG
475 FEI_BUFFER_SIZE,
476 DMA_BIDIRECTIONAL);
477
478 if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
479 dev_err(fei->dev, "failed to map back_buffer\n");
480 ret = -EFAULT;
481 goto err_unmap;
482 }
483
484 /*
485 * The pid buffer can be configured (in hw) for byte or bit
486 * per pid. By powers of deduction we conclude stih407 family
487 * is configured (at SoC design stage) for bit per pid.
488 */
64e46b63 489 tsin->pid_buffer_start = kzalloc(PID_TABLE_SIZE + PID_TABLE_SIZE, GFP_KERNEL);
c5f5d0f9
PG
490 if (!tsin->pid_buffer_start) {
491 ret = -ENOMEM;
492 goto err_unmap;
493 }
494
495 /*
496 * PID buffer needs to be aligned to size of the pid table
497 * which at bit per pid is 1024 bytes (8192 pids / 8).
498 * PIDF_BASE register enforces this alignment when writing
499 * the register.
500 */
501
64e46b63 502 tsin->pid_buffer_aligned = tsin->pid_buffer_start + PID_TABLE_SIZE;
c5f5d0f9 503
64e46b63 504 tsin->pid_buffer_aligned = PTR_ALIGN(tsin->pid_buffer_aligned, PID_TABLE_SIZE);
c5f5d0f9
PG
505
506 tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
507 tsin->pid_buffer_aligned,
508 PID_TABLE_SIZE,
509 DMA_BIDIRECTIONAL);
510
511 if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
512 dev_err(fei->dev, "failed to map pid_bitmap\n");
513 ret = -EFAULT;
514 goto err_unmap;
515 }
516
517 /* manage cache so pid bitmap is visible to HW */
518 dma_sync_single_for_device(fei->dev,
519 tsin->pid_buffer_busaddr,
520 PID_TABLE_SIZE,
521 DMA_TO_DEVICE);
522
523 snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
524 (tsin->serial_not_parallel ? "serial" : "parallel"));
525
526 tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
527 if (IS_ERR(tsin->pstate)) {
528 dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
529 , __func__, tsin_pin_name);
530 ret = PTR_ERR(tsin->pstate);
531 goto err_unmap;
532 }
533
534 ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
535
536 if (ret) {
537 dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
538 , __func__);
539 goto err_unmap;
540 }
541
542 /* Enable this input block */
543 tmp = readl(fei->io + SYS_INPUT_CLKEN);
544 tmp |= BIT(tsin->tsin_id);
545 writel(tmp, fei->io + SYS_INPUT_CLKEN);
546
547 if (tsin->serial_not_parallel)
548 tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
549
550 if (tsin->invert_ts_clk)
551 tmp |= C8SECTPFE_INVERT_TSCLK;
552
553 if (tsin->async_not_sync)
554 tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
555
556 tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
557
558 writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
559
560 writel(C8SECTPFE_SYNC(0x9) |
561 C8SECTPFE_DROP(0x9) |
562 C8SECTPFE_TOKEN(0x47),
563 fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
564
565 writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
566
567 /* Place the FIFO's at the end of the irec descriptors */
568
569 tsin->fifo = (tsin->tsin_id * FIFO_LEN);
570
571 writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
572 writel(tsin->fifo + FIFO_LEN - 1,
573 fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
574
575 writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
576 writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
577
578 writel(tsin->pid_buffer_busaddr,
579 fei->io + PIDF_BASE(tsin->tsin_id));
580
ee105cac 581 dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
c5f5d0f9 582 tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
62e5f051 583 &tsin->pid_buffer_busaddr);
c5f5d0f9
PG
584
585 /* Configure and enable HW PID filtering */
586
587 /*
588 * The PID value is created by assembling the first 8 bytes of
589 * the TS packet into a 64-bit word in big-endian format. A
590 * slice of that 64-bit word is taken from
591 * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
592 */
593 tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
594 | C8SECTPFE_PID_OFFSET(40));
595
596 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
597
598 dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
599 tsin->tsin_id,
600 readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
601 readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
602 readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
603 readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
604
605 /* Get base addpress of pointer record block from DMEM */
606 tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
607 readl(fei->io + DMA_PTRREC_BASE);
608
609 /* fill out pointer record data structure */
610
611 /* advance pointer record block to our channel */
612 tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
613
614 writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
615
616 writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
617
618 writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
619
620 writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
621
622 /* read/write pointers with physical bus address */
623
624 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
625
626 tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
627 writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
628
629 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
630 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
631
632 /* initialize tasklet */
9db2f6a4 633 tasklet_setup(&tsin->tsklet, channel_swdemux_tsklet);
c5f5d0f9
PG
634
635 return 0;
636
637err_unmap:
638 free_input_block(fei, tsin);
639 return ret;
640}
641
642static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
643{
644 struct c8sectpfei *fei = priv;
645
646 dev_err(fei->dev, "%s: error handling not yet implemented\n"
647 , __func__);
648
649 /*
650 * TODO FIXME we should detect some error conditions here
aa966274 651 * and ideally do something about them!
c5f5d0f9
PG
652 */
653
654 return IRQ_HANDLED;
655}
656
657static int c8sectpfe_probe(struct platform_device *pdev)
658{
659 struct device *dev = &pdev->dev;
660 struct device_node *child, *np = dev->of_node;
661 struct c8sectpfei *fei;
662 struct resource *res;
663 int ret, index = 0;
664 struct channel_info *tsin;
665
666 /* Allocate the c8sectpfei structure */
667 fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
668 if (!fei)
669 return -ENOMEM;
670
671 fei->dev = dev;
672
673 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
674 fei->io = devm_ioremap_resource(dev, res);
675 if (IS_ERR(fei->io))
676 return PTR_ERR(fei->io);
677
678 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
679 "c8sectpfe-ram");
680 fei->sram = devm_ioremap_resource(dev, res);
681 if (IS_ERR(fei->sram))
682 return PTR_ERR(fei->sram);
683
7bed816d 684 fei->sram_size = resource_size(res);
c5f5d0f9
PG
685
686 fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
97299a30 687 if (fei->idle_irq < 0)
c5f5d0f9 688 return fei->idle_irq;
c5f5d0f9
PG
689
690 fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
97299a30 691 if (fei->error_irq < 0)
c5f5d0f9 692 return fei->error_irq;
c5f5d0f9
PG
693
694 platform_set_drvdata(pdev, fei);
695
696 fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
697 if (IS_ERR(fei->c8sectpfeclk)) {
698 dev_err(dev, "c8sectpfe clk not found\n");
699 return PTR_ERR(fei->c8sectpfeclk);
700 }
701
702 ret = clk_prepare_enable(fei->c8sectpfeclk);
703 if (ret) {
704 dev_err(dev, "Failed to enable c8sectpfe clock\n");
705 return ret;
706 }
707
708 /* to save power disable all IP's (on by default) */
709 writel(0, fei->io + SYS_INPUT_CLKEN);
710
711 /* Enable memdma clock */
712 writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
713
714 /* clear internal sram */
715 memset_io(fei->sram, 0x0, fei->sram_size);
716
717 c8sectpfe_getconfig(fei);
718
719 ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
720 0, "c8sectpfe-idle-irq", fei);
721 if (ret) {
722 dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
723 goto err_clk_disable;
724 }
725
726 ret = devm_request_irq(dev, fei->error_irq,
727 c8sectpfe_error_irq_handler, 0,
728 "c8sectpfe-error-irq", fei);
729 if (ret) {
730 dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
731 goto err_clk_disable;
732 }
733
734 fei->tsin_count = of_get_child_count(np);
735
736 if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
737 fei->tsin_count > fei->hw_stats.num_ib) {
738
739 dev_err(dev, "More tsin declared than exist on SoC!\n");
740 ret = -EINVAL;
741 goto err_clk_disable;
742 }
743
744 fei->pinctrl = devm_pinctrl_get(dev);
745
746 if (IS_ERR(fei->pinctrl)) {
747 dev_err(dev, "Error getting tsin pins\n");
748 ret = PTR_ERR(fei->pinctrl);
749 goto err_clk_disable;
750 }
751
752 for_each_child_of_node(np, child) {
753 struct device_node *i2c_bus;
754
755 fei->channel_data[index] = devm_kzalloc(dev,
756 sizeof(struct channel_info),
757 GFP_KERNEL);
758
759 if (!fei->channel_data[index]) {
760 ret = -ENOMEM;
bf9d46f7 761 goto err_node_put;
c5f5d0f9
PG
762 }
763
764 tsin = fei->channel_data[index];
765
766 tsin->fei = fei;
767
768 ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
769 if (ret) {
770 dev_err(&pdev->dev, "No tsin_num found\n");
bf9d46f7 771 goto err_node_put;
c5f5d0f9
PG
772 }
773
774 /* sanity check value */
775 if (tsin->tsin_id > fei->hw_stats.num_ib) {
776 dev_err(&pdev->dev,
7152c88e 777 "tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
c5f5d0f9
PG
778 tsin->tsin_id, fei->hw_stats.num_ib);
779 ret = -EINVAL;
bf9d46f7 780 goto err_node_put;
c5f5d0f9
PG
781 }
782
783 tsin->invert_ts_clk = of_property_read_bool(child,
784 "invert-ts-clk");
785
786 tsin->serial_not_parallel = of_property_read_bool(child,
787 "serial-not-parallel");
788
789 tsin->async_not_sync = of_property_read_bool(child,
790 "async-not-sync");
791
792 ret = of_property_read_u32(child, "dvb-card",
793 &tsin->dvb_card);
794 if (ret) {
795 dev_err(&pdev->dev, "No dvb-card found\n");
bf9d46f7 796 goto err_node_put;
c5f5d0f9
PG
797 }
798
799 i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
800 if (!i2c_bus) {
801 dev_err(&pdev->dev, "No i2c-bus found\n");
21098562 802 ret = -ENODEV;
bf9d46f7 803 goto err_node_put;
c5f5d0f9
PG
804 }
805 tsin->i2c_adapter =
806 of_find_i2c_adapter_by_node(i2c_bus);
807 if (!tsin->i2c_adapter) {
808 dev_err(&pdev->dev, "No i2c adapter found\n");
809 of_node_put(i2c_bus);
21098562 810 ret = -ENODEV;
bf9d46f7 811 goto err_node_put;
c5f5d0f9
PG
812 }
813 of_node_put(i2c_bus);
814
0014eb75 815 tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0);
c5f5d0f9
PG
816
817 ret = gpio_is_valid(tsin->rst_gpio);
818 if (!ret) {
819 dev_err(dev,
820 "reset gpio for tsin%d not valid (gpio=%d)\n",
821 tsin->tsin_id, tsin->rst_gpio);
3d14284f 822 ret = -EINVAL;
bf9d46f7 823 goto err_node_put;
c5f5d0f9
PG
824 }
825
826 ret = devm_gpio_request_one(dev, tsin->rst_gpio,
827 GPIOF_OUT_INIT_LOW, "NIM reset");
828 if (ret && ret != -EBUSY) {
829 dev_err(dev, "Can't request tsin%d reset gpio\n"
830 , fei->channel_data[index]->tsin_id);
bf9d46f7 831 goto err_node_put;
c5f5d0f9
PG
832 }
833
834 if (!ret) {
835 /* toggle reset lines */
836 gpio_direction_output(tsin->rst_gpio, 0);
837 usleep_range(3500, 5000);
838 gpio_direction_output(tsin->rst_gpio, 1);
839 usleep_range(3000, 5000);
840 }
841
842 tsin->demux_mapping = index;
843
844 dev_dbg(fei->dev,
7152c88e 845 "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
c5f5d0f9
PG
846 fei->channel_data[index], index,
847 tsin->tsin_id, tsin->invert_ts_clk,
848 tsin->serial_not_parallel, tsin->async_not_sync,
849 tsin->dvb_card);
850
851 index++;
852 }
853
854 /* Setup timer interrupt */
e99e88a9 855 timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
c5f5d0f9
PG
856
857 mutex_init(&fei->lock);
858
859 /* Get the configuration information about the tuners */
860 ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
861 (void *)fei,
862 c8sectpfe_start_feed,
863 c8sectpfe_stop_feed);
864 if (ret) {
865 dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
866 ret);
867 goto err_clk_disable;
868 }
869
c5f5d0f9
PG
870 c8sectpfe_debugfs_init(fei);
871
872 return 0;
873
bf9d46f7
ND
874err_node_put:
875 of_node_put(child);
c5f5d0f9 876err_clk_disable:
bfc303e7 877 clk_disable_unprepare(fei->c8sectpfeclk);
c5f5d0f9
PG
878 return ret;
879}
880
5c5fce0f 881static void c8sectpfe_remove(struct platform_device *pdev)
c5f5d0f9
PG
882{
883 struct c8sectpfei *fei = platform_get_drvdata(pdev);
884 struct channel_info *channel;
885 int i;
886
887 wait_for_completion(&fei->fw_ack);
888
889 c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
890
891 /*
892 * Now loop through and un-configure each of the InputBlock resources
893 */
894 for (i = 0; i < fei->tsin_count; i++) {
895 channel = fei->channel_data[i];
896 free_input_block(fei, channel);
897 }
898
899 c8sectpfe_debugfs_exit(fei);
900
901 dev_info(fei->dev, "Stopping memdma SLIM core\n");
902 if (readl(fei->io + DMA_CPU_RUN))
903 writel(0x0, fei->io + DMA_CPU_RUN);
904
905 /* unclock all internal IP's */
906 if (readl(fei->io + SYS_INPUT_CLKEN))
907 writel(0, fei->io + SYS_INPUT_CLKEN);
908
909 if (readl(fei->io + SYS_OTHER_CLKEN))
910 writel(0, fei->io + SYS_OTHER_CLKEN);
911
6abcf98e 912 clk_disable_unprepare(fei->c8sectpfeclk);
c5f5d0f9
PG
913}
914
915
916static int configure_channels(struct c8sectpfei *fei)
917{
918 int index = 0, ret;
c5f5d0f9
PG
919 struct device_node *child, *np = fei->dev->of_node;
920
921 /* iterate round each tsin and configure memdma descriptor and IB hw */
922 for_each_child_of_node(np, child) {
c5f5d0f9
PG
923 ret = configure_memdma_and_inputblock(fei,
924 fei->channel_data[index]);
c5f5d0f9
PG
925 if (ret) {
926 dev_err(fei->dev,
927 "configure_memdma_and_inputblock failed\n");
63ff05a1 928 of_node_put(child);
c5f5d0f9
PG
929 goto err_unmap;
930 }
931 index++;
932 }
933
934 return 0;
935
936err_unmap:
232c297a
DC
937 while (--index >= 0)
938 free_input_block(fei, fei->channel_data[index]);
939
c5f5d0f9
PG
940 return ret;
941}
942
943static int
944c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
945{
946 struct elf32_hdr *ehdr;
947 char class;
948
949 if (!fw) {
950 dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
951 return -EINVAL;
952 }
953
954 if (fw->size < sizeof(struct elf32_hdr)) {
955 dev_err(fei->dev, "Image is too small\n");
956 return -EINVAL;
957 }
958
959 ehdr = (struct elf32_hdr *)fw->data;
960
961 /* We only support ELF32 at this point */
962 class = ehdr->e_ident[EI_CLASS];
963 if (class != ELFCLASS32) {
964 dev_err(fei->dev, "Unsupported class: %d\n", class);
965 return -EINVAL;
966 }
967
968 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
969 dev_err(fei->dev, "Unsupported firmware endianness\n");
970 return -EINVAL;
971 }
972
973 if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
974 dev_err(fei->dev, "Image is too small\n");
975 return -EINVAL;
976 }
977
978 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
979 dev_err(fei->dev, "Image is corrupted (bad magic)\n");
980 return -EINVAL;
981 }
982
983 /* Check ELF magic */
984 ehdr = (Elf32_Ehdr *)fw->data;
985 if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
986 ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
987 ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
988 ehdr->e_ident[EI_MAG3] != ELFMAG3) {
989 dev_err(fei->dev, "Invalid ELF magic\n");
990 return -EINVAL;
991 }
992
993 if (ehdr->e_type != ET_EXEC) {
994 dev_err(fei->dev, "Unsupported ELF header type\n");
995 return -EINVAL;
996 }
997
998 if (ehdr->e_phoff > fw->size) {
999 dev_err(fei->dev, "Firmware size is too small\n");
1000 return -EINVAL;
1001 }
1002
1003 return 0;
1004}
1005
1006
1007static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1008 const struct firmware *fw, u8 __iomem *dest,
1009 int seg_num)
1010{
1011 const u8 *imem_src = fw->data + phdr->p_offset;
1012 int i;
1013
1014 /*
1015 * For IMEM segments, the segment contains 24-bit
1016 * instructions which must be padded to 32-bit
1017 * instructions before being written. The written
1018 * segment is padded with NOP instructions.
1019 */
1020
1021 dev_dbg(fei->dev,
7152c88e 1022 "Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
dc10472b
DC
1023 seg_num, phdr->p_paddr, phdr->p_filesz, dest,
1024 phdr->p_memsz + phdr->p_memsz / 3);
c5f5d0f9
PG
1025
1026 for (i = 0; i < phdr->p_filesz; i++) {
1027
1028 writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
1029
1030 /* Every 3 bytes, add an additional
1031 * padding zero in destination */
1032 if (i % 3 == 2) {
1033 dest++;
1034 writeb(0x00, (void __iomem *)dest);
1035 }
1036
1037 dest++;
1038 imem_src++;
1039 }
1040}
1041
1042static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1043 const struct firmware *fw, u8 __iomem *dst, int seg_num)
1044{
1045 /*
1046 * For DMEM segments copy the segment data from the ELF
1047 * file and pad segment with zeroes
1048 */
1049
1050 dev_dbg(fei->dev,
7152c88e 1051 "Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
c5f5d0f9
PG
1052 seg_num, phdr->p_paddr, phdr->p_filesz,
1053 dst, phdr->p_memsz);
1054
5347f97c 1055 memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
c5f5d0f9
PG
1056 phdr->p_filesz);
1057
5347f97c 1058 memset((void __force *)dst + phdr->p_filesz, 0,
c5f5d0f9
PG
1059 phdr->p_memsz - phdr->p_filesz);
1060}
1061
c23ac90f 1062static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
c5f5d0f9 1063{
c5f5d0f9
PG
1064 Elf32_Ehdr *ehdr;
1065 Elf32_Phdr *phdr;
1066 u8 __iomem *dst;
51a3ac5f 1067 int err = 0, i;
c5f5d0f9 1068
c23ac90f 1069 if (!fw || !fei)
c5f5d0f9
PG
1070 return -EINVAL;
1071
1072 ehdr = (Elf32_Ehdr *)fw->data;
1073 phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
1074
1075 /* go through the available ELF segments */
51a3ac5f 1076 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
c5f5d0f9
PG
1077
1078 /* Only consider LOAD segments */
1079 if (phdr->p_type != PT_LOAD)
1080 continue;
1081
1082 /*
1083 * Check segment is contained within the fw->data buffer
1084 */
1085 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1086 dev_err(fei->dev,
1087 "Segment %d is outside of firmware file\n", i);
1088 err = -EINVAL;
1089 break;
1090 }
1091
1092 /*
1093 * MEMDMA IMEM has executable flag set, otherwise load
1094 * this segment into DMEM.
1095 *
1096 */
1097
1098 if (phdr->p_flags & PF_X) {
1099 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
1100 /*
1101 * The Slim ELF file uses 32-bit word addressing for
1102 * load offsets.
1103 */
1104 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1105 load_imem_segment(fei, phdr, fw, dst, i);
1106 } else {
1107 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
1108 /*
1109 * The Slim ELF file uses 32-bit word addressing for
1110 * load offsets.
1111 */
1112 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1113 load_dmem_segment(fei, phdr, fw, dst, i);
1114 }
1115 }
1116
1117 release_firmware(fw);
1118 return err;
1119}
1120
c23ac90f 1121static int load_c8sectpfe_fw(struct c8sectpfei *fei)
c5f5d0f9 1122{
c23ac90f 1123 const struct firmware *fw;
c5f5d0f9
PG
1124 int err;
1125
c23ac90f
PG
1126 dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
1127
1128 err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
1129 if (err)
1130 return err;
1131
c5f5d0f9
PG
1132 err = c8sectpfe_elf_sanity_check(fei, fw);
1133 if (err) {
1134 dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
1135 , err);
25f1ec24 1136 release_firmware(fw);
c23ac90f 1137 return err;
c5f5d0f9
PG
1138 }
1139
c23ac90f 1140 err = load_slim_core_fw(fw, fei);
c5f5d0f9
PG
1141 if (err) {
1142 dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
c23ac90f 1143 return err;
c5f5d0f9
PG
1144 }
1145
1146 /* now the firmware is loaded configure the input blocks */
1147 err = configure_channels(fei);
1148 if (err) {
1149 dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
c23ac90f 1150 return err;
c5f5d0f9
PG
1151 }
1152
1153 /*
1154 * STBus target port can access IMEM and DMEM ports
1155 * without waiting for CPU
1156 */
1157 writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
1158
1159 dev_info(fei->dev, "Boot the memdma SLIM core\n");
1160 writel(0x1, fei->io + DMA_CPU_RUN);
1161
1162 atomic_set(&fei->fw_loaded, 1);
c5f5d0f9
PG
1163
1164 return 0;
1165}
1166
1167static const struct of_device_id c8sectpfe_match[] = {
1168 { .compatible = "st,stih407-c8sectpfe" },
1169 { /* sentinel */ },
1170};
1171MODULE_DEVICE_TABLE(of, c8sectpfe_match);
1172
1173static struct platform_driver c8sectpfe_driver = {
1174 .driver = {
1175 .name = "c8sectpfe",
1176 .of_match_table = of_match_ptr(c8sectpfe_match),
1177 },
1178 .probe = c8sectpfe_probe,
5c5fce0f 1179 .remove_new = c8sectpfe_remove,
c5f5d0f9
PG
1180};
1181
1182module_platform_driver(c8sectpfe_driver);
1183
1184MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
1185MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
1186MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
1187MODULE_LICENSE("GPL");