1 /* Freescale QUICC Engine HDLC Device Driver
3 * Copyright 2016 Freescale Semiconductor Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/hdlc.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stddef.h>
30 #include <soc/fsl/qe/qe_tdm.h>
31 #include <uapi/linux/if_arp.h>
33 #include "fsl_ucc_hdlc.h"
35 #define DRV_DESC "Freescale QE UCC HDLC Driver"
36 #define DRV_NAME "ucc_hdlc"
38 #define TDM_PPPOHT_SLIC_MAXIN
40 static struct ucc_tdm_info utdm_primary_info = {
55 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
56 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
57 .tenc = UCC_FAST_TX_ENCODING_NRZ,
58 .renc = UCC_FAST_RX_ENCODING_NRZ,
59 .tcrc = UCC_FAST_16_BIT_CRC,
60 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
64 #ifdef TDM_PPPOHT_SLIC_MAXIN
79 static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
81 static int uhdlc_init(struct ucc_hdlc_private *priv)
83 struct ucc_tdm_info *ut_info;
84 struct ucc_fast_info *uf_info;
89 dma_addr_t bd_dma_addr;
94 ut_info = priv->ut_info;
95 uf_info = &ut_info->uf_info;
102 /* This sets HPM register in CMXUCR register which configures a
103 * open drain connected HDLC bus
106 uf_info->brkpt_support = 1;
108 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
109 UCC_HDLC_UCCE_TXB) << 16);
111 ret = ucc_fast_init(uf_info, &priv->uccf);
113 dev_err(priv->dev, "Failed to init uccf.");
117 priv->uf_regs = priv->uccf->uf_regs;
118 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
121 if (priv->loopback) {
122 dev_info(priv->dev, "Loopback Mode\n");
123 /* use the same clock when work in loopback */
124 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
126 gumr = ioread32be(&priv->uf_regs->gumr);
127 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
129 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
130 iowrite32be(gumr, &priv->uf_regs->gumr);
135 ucc_tdm_init(priv->utdm, priv->ut_info);
137 /* Write to QE CECR, UCCx channel to Stop Transmission */
138 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
139 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
140 QE_CR_PROTOCOL_UNSPECIFIED, 0);
142 /* Set UPSMR normal mode (need fixed)*/
143 iowrite32be(0, &priv->uf_regs->upsmr);
146 if (priv->hdlc_bus) {
149 dev_info(priv->dev, "HDLC bus Mode\n");
150 upsmr = ioread32be(&priv->uf_regs->upsmr);
152 /* bus mode and retransmit enable, with collision window
155 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
157 iowrite32be(upsmr, &priv->uf_regs->upsmr);
159 /* explicitly disable CDS & CTSP */
160 gumr = ioread32be(&priv->uf_regs->gumr);
161 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
162 /* set automatic sync to explicitly ignore CD signal */
163 gumr |= UCC_FAST_GUMR_SYNL_AUTO;
164 iowrite32be(gumr, &priv->uf_regs->gumr);
167 priv->rx_ring_size = RX_BD_RING_LEN;
168 priv->tx_ring_size = TX_BD_RING_LEN;
170 priv->rx_bd_base = dma_alloc_coherent(priv->dev,
171 RX_BD_RING_LEN * sizeof(struct qe_bd),
172 &priv->dma_rx_bd, GFP_KERNEL);
174 if (!priv->rx_bd_base) {
175 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
181 priv->tx_bd_base = dma_alloc_coherent(priv->dev,
182 TX_BD_RING_LEN * sizeof(struct qe_bd),
183 &priv->dma_tx_bd, GFP_KERNEL);
185 if (!priv->tx_bd_base) {
186 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
191 /* Alloc parameter ram for ucc hdlc */
192 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
193 ALIGNMENT_OF_UCC_HDLC_PRAM);
195 if (priv->ucc_pram_offset < 0) {
196 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
201 priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
203 if (!priv->rx_skbuff)
206 priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
208 if (!priv->tx_skbuff)
212 priv->skb_dirtytx = 0;
213 priv->curtx_bd = priv->tx_bd_base;
214 priv->dirty_tx = priv->tx_bd_base;
215 priv->currx_bd = priv->rx_bd_base;
216 priv->currx_bdnum = 0;
218 /* init parameter base */
219 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
220 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
221 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
223 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
224 qe_muram_addr(priv->ucc_pram_offset);
226 /* Zero out parameter ram */
227 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
229 /* Alloc riptr, tiptr */
230 riptr = qe_muram_alloc(32, 32);
232 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
237 tiptr = qe_muram_alloc(32, 32);
239 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
244 /* Set RIPTR, TIPTR */
245 iowrite16be(riptr, &priv->ucc_pram->riptr);
246 iowrite16be(tiptr, &priv->ucc_pram->tiptr);
249 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
251 /* Set RBASE, TBASE */
252 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
253 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
255 /* Set RSTATE, TSTATE */
256 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
257 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
259 /* Set C_MASK, C_PRES for 16bit CRC */
260 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
261 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
263 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
264 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
265 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
266 iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
267 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
268 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
269 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
270 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
273 bd_buffer = dma_zalloc_coherent(priv->dev,
274 (RX_BD_RING_LEN + TX_BD_RING_LEN) *
276 &bd_dma_addr, GFP_KERNEL);
279 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
284 priv->rx_buffer = bd_buffer;
285 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
287 priv->dma_rx_addr = bd_dma_addr;
288 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
290 for (i = 0; i < RX_BD_RING_LEN; i++) {
291 if (i < (RX_BD_RING_LEN - 1))
292 bd_status = R_E_S | R_I_S;
294 bd_status = R_E_S | R_I_S | R_W_S;
296 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
297 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
298 &priv->rx_bd_base[i].buf);
301 for (i = 0; i < TX_BD_RING_LEN; i++) {
302 if (i < (TX_BD_RING_LEN - 1))
303 bd_status = T_I_S | T_TC_S;
305 bd_status = T_I_S | T_TC_S | T_W_S;
307 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
308 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
309 &priv->tx_bd_base[i].buf);
315 qe_muram_free(tiptr);
317 qe_muram_free(riptr);
319 kfree(priv->tx_skbuff);
321 kfree(priv->rx_skbuff);
323 qe_muram_free(priv->ucc_pram_offset);
325 dma_free_coherent(priv->dev,
326 TX_BD_RING_LEN * sizeof(struct qe_bd),
327 priv->tx_bd_base, priv->dma_tx_bd);
329 dma_free_coherent(priv->dev,
330 RX_BD_RING_LEN * sizeof(struct qe_bd),
331 priv->rx_bd_base, priv->dma_rx_bd);
333 ucc_fast_free(priv->uccf);
338 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
340 hdlc_device *hdlc = dev_to_hdlc(dev);
341 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
342 struct qe_bd __iomem *bd;
349 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
350 dev->stats.tx_dropped++;
352 netdev_err(dev, "No enough space for hdlc head\n");
356 skb_push(skb, HDLC_HEAD_LEN);
358 proto_head = (u16 *)skb->data;
359 *proto_head = htons(DEFAULT_HDLC_HEAD);
361 dev->stats.tx_bytes += skb->len;
365 proto_head = (u16 *)skb->data;
366 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
367 dev->stats.tx_dropped++;
369 netdev_err(dev, "Wrong ppp header\n");
373 dev->stats.tx_bytes += skb->len;
377 dev->stats.tx_dropped++;
381 spin_lock_irqsave(&priv->lock, flags);
383 /* Start from the next BD that should be filled */
385 bd_status = ioread16be(&bd->status);
386 /* Save the skb pointer so we can free it later */
387 priv->tx_skbuff[priv->skb_curtx] = skb;
389 /* Update the current skb pointer (wrapping if this was the last) */
391 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
393 /* copy skb data to tx buffer for sdma processing */
394 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
395 skb->data, skb->len);
397 /* set bd status and length */
398 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
400 iowrite16be(skb->len, &bd->length);
401 iowrite16be(bd_status, &bd->status);
403 /* Move to next BD in the ring */
404 if (!(bd_status & T_W_S))
407 bd = priv->tx_bd_base;
409 if (bd == priv->dirty_tx) {
410 if (!netif_queue_stopped(dev))
411 netif_stop_queue(dev);
416 spin_unlock_irqrestore(&priv->lock, flags);
421 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
423 /* Start from the next BD that should be filled */
424 struct net_device *dev = priv->ndev;
425 struct qe_bd *bd; /* BD pointer */
429 bd_status = ioread16be(&bd->status);
431 /* Normal processing. */
432 while ((bd_status & T_R_S) == 0) {
435 /* BD contains already transmitted buffer. */
436 /* Handle the transmitted buffer and release */
437 /* the BD to be used with the current frame */
439 skb = priv->tx_skbuff[priv->skb_dirtytx];
442 dev->stats.tx_packets++;
443 memset(priv->tx_buffer +
444 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
446 dev_kfree_skb_irq(skb);
448 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
451 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
453 /* We freed a buffer, so now we can restart transmission */
454 if (netif_queue_stopped(dev))
455 netif_wake_queue(dev);
457 /* Advance the confirmation BD pointer */
458 if (!(bd_status & T_W_S))
461 bd = priv->tx_bd_base;
462 bd_status = ioread16be(&bd->status);
469 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
471 struct net_device *dev = priv->ndev;
472 struct sk_buff *skb = NULL;
473 hdlc_device *hdlc = dev_to_hdlc(dev);
476 u16 length, howmany = 0;
480 bd_status = ioread16be(&bd->status);
482 /* while there are received buffers and BD is full (~R_E) */
483 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
484 if (bd_status & R_OV_S)
485 dev->stats.rx_over_errors++;
486 if (bd_status & R_CR_S) {
487 dev->stats.rx_crc_errors++;
488 dev->stats.rx_dropped++;
491 bdbuffer = priv->rx_buffer +
492 (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
493 length = ioread16be(&bd->length);
497 bdbuffer += HDLC_HEAD_LEN;
498 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
500 skb = dev_alloc_skb(length);
502 dev->stats.rx_dropped++;
506 skb_put(skb, length);
509 memcpy(skb->data, bdbuffer, length);
513 length -= HDLC_CRC_SIZE;
515 skb = dev_alloc_skb(length);
517 dev->stats.rx_dropped++;
521 skb_put(skb, length);
524 memcpy(skb->data, bdbuffer, length);
528 dev->stats.rx_packets++;
529 dev->stats.rx_bytes += skb->len;
532 skb->protocol = hdlc_type_trans(skb, dev);
533 netif_receive_skb(skb);
536 iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
538 /* update to point at the next bd */
539 if (bd_status & R_W_S) {
540 priv->currx_bdnum = 0;
541 bd = priv->rx_bd_base;
543 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
544 priv->currx_bdnum += 1;
546 priv->currx_bdnum = RX_BD_RING_LEN - 1;
551 bd_status = ioread16be(&bd->status);
558 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
560 struct ucc_hdlc_private *priv = container_of(napi,
561 struct ucc_hdlc_private,
565 /* Tx event processing */
566 spin_lock(&priv->lock);
568 spin_unlock(&priv->lock);
571 howmany += hdlc_rx_done(priv, budget - howmany);
573 if (howmany < budget) {
574 napi_complete_done(napi, howmany);
575 qe_setbits32(priv->uccf->p_uccm,
576 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
582 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
584 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
585 struct net_device *dev = priv->ndev;
586 struct ucc_fast_private *uccf;
587 struct ucc_tdm_info *ut_info;
591 ut_info = priv->ut_info;
594 ucce = ioread32be(uccf->p_ucce);
595 uccm = ioread32be(uccf->p_uccm);
597 iowrite32be(ucce, uccf->p_ucce);
601 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
602 if (napi_schedule_prep(&priv->napi)) {
603 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
605 iowrite32be(uccm, uccf->p_uccm);
606 __napi_schedule(&priv->napi);
610 /* Errors and other events */
611 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
612 dev->stats.rx_errors++;
613 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
614 dev->stats.tx_errors++;
619 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
621 const size_t size = sizeof(te1_settings);
623 struct ucc_hdlc_private *priv = netdev_priv(dev);
625 if (cmd != SIOCWANDEV)
626 return hdlc_ioctl(dev, ifr, cmd);
628 switch (ifr->ifr_settings.type) {
630 ifr->ifr_settings.type = IF_IFACE_E1;
631 if (ifr->ifr_settings.size < size) {
632 ifr->ifr_settings.size = size; /* data size wanted */
635 memset(&line, 0, sizeof(line));
636 line.clock_type = priv->clocking;
638 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
643 return hdlc_ioctl(dev, ifr, cmd);
647 static int uhdlc_open(struct net_device *dev)
650 hdlc_device *hdlc = dev_to_hdlc(dev);
651 struct ucc_hdlc_private *priv = hdlc->priv;
652 struct ucc_tdm *utdm = priv->utdm;
654 if (priv->hdlc_busy != 1) {
655 if (request_irq(priv->ut_info->uf_info.irq,
656 ucc_hdlc_irq_handler, 0, "hdlc", priv))
659 cecr_subblock = ucc_fast_get_qe_cr_subblock(
660 priv->ut_info->uf_info.ucc_num);
662 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
663 QE_CR_PROTOCOL_UNSPECIFIED, 0);
665 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
667 /* Enable the TDM port */
669 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
672 netif_device_attach(priv->ndev);
673 napi_enable(&priv->napi);
674 netif_start_queue(dev);
681 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
683 qe_muram_free(priv->ucc_pram->riptr);
684 qe_muram_free(priv->ucc_pram->tiptr);
686 if (priv->rx_bd_base) {
687 dma_free_coherent(priv->dev,
688 RX_BD_RING_LEN * sizeof(struct qe_bd),
689 priv->rx_bd_base, priv->dma_rx_bd);
691 priv->rx_bd_base = NULL;
695 if (priv->tx_bd_base) {
696 dma_free_coherent(priv->dev,
697 TX_BD_RING_LEN * sizeof(struct qe_bd),
698 priv->tx_bd_base, priv->dma_tx_bd);
700 priv->tx_bd_base = NULL;
704 if (priv->ucc_pram) {
705 qe_muram_free(priv->ucc_pram_offset);
706 priv->ucc_pram = NULL;
707 priv->ucc_pram_offset = 0;
710 kfree(priv->rx_skbuff);
711 priv->rx_skbuff = NULL;
713 kfree(priv->tx_skbuff);
714 priv->tx_skbuff = NULL;
717 iounmap(priv->uf_regs);
718 priv->uf_regs = NULL;
722 ucc_fast_free(priv->uccf);
726 if (priv->rx_buffer) {
727 dma_free_coherent(priv->dev,
728 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
729 priv->rx_buffer, priv->dma_rx_addr);
730 priv->rx_buffer = NULL;
731 priv->dma_rx_addr = 0;
734 if (priv->tx_buffer) {
735 dma_free_coherent(priv->dev,
736 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
737 priv->tx_buffer, priv->dma_tx_addr);
738 priv->tx_buffer = NULL;
739 priv->dma_tx_addr = 0;
743 static int uhdlc_close(struct net_device *dev)
745 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
746 struct ucc_tdm *utdm = priv->utdm;
749 napi_disable(&priv->napi);
750 cecr_subblock = ucc_fast_get_qe_cr_subblock(
751 priv->ut_info->uf_info.ucc_num);
753 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
754 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
755 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
756 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
759 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
761 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
763 free_irq(priv->ut_info->uf_info.irq, priv);
764 netif_stop_queue(dev);
770 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
771 unsigned short parity)
773 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
775 if (encoding != ENCODING_NRZ &&
776 encoding != ENCODING_NRZI)
779 if (parity != PARITY_NONE &&
780 parity != PARITY_CRC32_PR1_CCITT &&
781 parity != PARITY_CRC16_PR1_CCITT)
784 priv->encoding = encoding;
785 priv->parity = parity;
791 static void store_clk_config(struct ucc_hdlc_private *priv)
793 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
796 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
797 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
800 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
803 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
806 static void resume_clk_config(struct ucc_hdlc_private *priv)
808 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
810 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
812 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
813 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
815 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
818 static int uhdlc_suspend(struct device *dev)
820 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
821 struct ucc_tdm_info *ut_info;
822 struct ucc_fast __iomem *uf_regs;
827 if (!netif_running(priv->ndev))
830 netif_device_detach(priv->ndev);
831 napi_disable(&priv->napi);
833 ut_info = priv->ut_info;
834 uf_regs = priv->uf_regs;
836 /* backup gumr guemr*/
837 priv->gumr = ioread32be(&uf_regs->gumr);
838 priv->guemr = ioread8(&uf_regs->guemr);
840 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
842 if (!priv->ucc_pram_bak)
845 /* backup HDLC parameter */
846 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
847 sizeof(struct ucc_hdlc_param));
849 /* store the clk configuration */
850 store_clk_config(priv);
853 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
858 static int uhdlc_resume(struct device *dev)
860 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
861 struct ucc_tdm *utdm;
862 struct ucc_tdm_info *ut_info;
863 struct ucc_fast __iomem *uf_regs;
864 struct ucc_fast_private *uccf;
865 struct ucc_fast_info *uf_info;
873 if (!netif_running(priv->ndev))
877 ut_info = priv->ut_info;
878 uf_info = &ut_info->uf_info;
879 uf_regs = priv->uf_regs;
882 /* restore gumr guemr */
883 iowrite8(priv->guemr, &uf_regs->guemr);
884 iowrite32be(priv->gumr, &uf_regs->gumr);
886 /* Set Virtual Fifo registers */
887 iowrite16be(uf_info->urfs, &uf_regs->urfs);
888 iowrite16be(uf_info->urfet, &uf_regs->urfet);
889 iowrite16be(uf_info->urfset, &uf_regs->urfset);
890 iowrite16be(uf_info->utfs, &uf_regs->utfs);
891 iowrite16be(uf_info->utfet, &uf_regs->utfet);
892 iowrite16be(uf_info->utftt, &uf_regs->utftt);
893 /* utfb, urfb are offsets from MURAM base */
894 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
895 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
897 /* Rx Tx and sync clock routing */
898 resume_clk_config(priv);
900 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
901 iowrite32be(0xffffffff, &uf_regs->ucce);
903 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
907 ucc_tdm_init(priv->utdm, priv->ut_info);
909 /* Write to QE CECR, UCCx channel to Stop Transmission */
910 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
911 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
912 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
914 /* Set UPSMR normal mode */
915 iowrite32be(0, &uf_regs->upsmr);
917 /* init parameter base */
918 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
919 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
920 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
922 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
923 qe_muram_addr(priv->ucc_pram_offset);
925 /* restore ucc parameter */
926 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
927 sizeof(struct ucc_hdlc_param));
928 kfree(priv->ucc_pram_bak);
930 /* rebuild BD entry */
931 for (i = 0; i < RX_BD_RING_LEN; i++) {
932 if (i < (RX_BD_RING_LEN - 1))
933 bd_status = R_E_S | R_I_S;
935 bd_status = R_E_S | R_I_S | R_W_S;
937 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
938 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
939 &priv->rx_bd_base[i].buf);
942 for (i = 0; i < TX_BD_RING_LEN; i++) {
943 if (i < (TX_BD_RING_LEN - 1))
944 bd_status = T_I_S | T_TC_S;
946 bd_status = T_I_S | T_TC_S | T_W_S;
948 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
949 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
950 &priv->tx_bd_base[i].buf);
953 /* if hdlc is busy enable TX and RX */
954 if (priv->hdlc_busy == 1) {
955 cecr_subblock = ucc_fast_get_qe_cr_subblock(
956 priv->ut_info->uf_info.ucc_num);
958 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
959 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
961 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
963 /* Enable the TDM port */
965 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
968 napi_enable(&priv->napi);
969 netif_device_attach(priv->ndev);
974 static const struct dev_pm_ops uhdlc_pm_ops = {
975 .suspend = uhdlc_suspend,
976 .resume = uhdlc_resume,
977 .freeze = uhdlc_suspend,
978 .thaw = uhdlc_resume,
981 #define HDLC_PM_OPS (&uhdlc_pm_ops)
985 #define HDLC_PM_OPS NULL
988 static const struct net_device_ops uhdlc_ops = {
989 .ndo_open = uhdlc_open,
990 .ndo_stop = uhdlc_close,
991 .ndo_start_xmit = hdlc_start_xmit,
992 .ndo_do_ioctl = uhdlc_ioctl,
995 static int ucc_hdlc_probe(struct platform_device *pdev)
997 struct device_node *np = pdev->dev.of_node;
998 struct ucc_hdlc_private *uhdlc_priv = NULL;
999 struct ucc_tdm_info *ut_info;
1000 struct ucc_tdm *utdm = NULL;
1001 struct resource res;
1002 struct net_device *dev;
1009 ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1011 dev_err(&pdev->dev, "Invalid ucc property\n");
1016 if ((ucc_num > 3) || (ucc_num < 0)) {
1017 dev_err(&pdev->dev, ": Invalid UCC num\n");
1021 memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1022 sizeof(utdm_primary_info));
1024 ut_info = &utdm_info[ucc_num];
1025 ut_info->uf_info.ucc_num = ucc_num;
1027 sprop = of_get_property(np, "rx-clock-name", NULL);
1029 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1030 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1031 (ut_info->uf_info.rx_clock > QE_CLK24)) {
1032 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1036 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1040 sprop = of_get_property(np, "tx-clock-name", NULL);
1042 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1043 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1044 (ut_info->uf_info.tx_clock > QE_CLK24)) {
1045 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1049 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1053 ret = of_address_to_resource(np, 0, &res);
1057 ut_info->uf_info.regs = res.start;
1058 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1060 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1065 dev_set_drvdata(&pdev->dev, uhdlc_priv);
1066 uhdlc_priv->dev = &pdev->dev;
1067 uhdlc_priv->ut_info = ut_info;
1069 if (of_get_property(np, "fsl,tdm-interface", NULL))
1070 uhdlc_priv->tsa = 1;
1072 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1073 uhdlc_priv->loopback = 1;
1075 if (of_get_property(np, "fsl,hdlc-bus", NULL))
1076 uhdlc_priv->hdlc_bus = 1;
1078 if (uhdlc_priv->tsa == 1) {
1079 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1082 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1083 goto free_uhdlc_priv;
1085 uhdlc_priv->utdm = utdm;
1086 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1091 ret = uhdlc_init(uhdlc_priv);
1093 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1097 dev = alloc_hdlcdev(uhdlc_priv);
1100 pr_err("ucc_hdlc: unable to allocate memory\n");
1101 goto undo_uhdlc_init;
1104 uhdlc_priv->ndev = dev;
1105 hdlc = dev_to_hdlc(dev);
1106 dev->tx_queue_len = 16;
1107 dev->netdev_ops = &uhdlc_ops;
1108 hdlc->attach = ucc_hdlc_attach;
1109 hdlc->xmit = ucc_hdlc_tx;
1110 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1111 if (register_hdlc_device(dev)) {
1113 pr_err("ucc_hdlc: unable to register hdlc device\n");
1124 if (uhdlc_priv->tsa)
1131 static int ucc_hdlc_remove(struct platform_device *pdev)
1133 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1135 uhdlc_memclean(priv);
1137 if (priv->utdm->si_regs) {
1138 iounmap(priv->utdm->si_regs);
1139 priv->utdm->si_regs = NULL;
1142 if (priv->utdm->siram) {
1143 iounmap(priv->utdm->siram);
1144 priv->utdm->siram = NULL;
1148 dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1153 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1155 .compatible = "fsl,ucc-hdlc",
1160 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1162 static struct platform_driver ucc_hdlc_driver = {
1163 .probe = ucc_hdlc_probe,
1164 .remove = ucc_hdlc_remove,
1168 .of_match_table = fsl_ucc_hdlc_of_match,
1172 module_platform_driver(ucc_hdlc_driver);
1173 MODULE_LICENSE("GPL");