2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5 * Daniel Martensson / daniel.martensson@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
9 #define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
11 #include <linux/spinlock.h>
12 #include <linux/sched.h>
13 #include <linux/list.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
18 #include <net/caif/caif_device.h>
19 #include <net/caif/caif_shm.h>
23 #define TX_BUF_SZ 0x2000
24 #define RX_BUF_SZ 0x2000
26 #define CAIF_NEEDED_HEADROOM 32
28 #define CAIF_FLOW_ON 1
29 #define CAIF_FLOW_OFF 0
31 #define LOW_WATERMARK 3
32 #define HIGH_WATERMARK 4
34 /* Maximum number of CAIF buffers per shared memory buffer. */
35 #define SHM_MAX_FRMS_PER_BUF 10
38 * Size in bytes of the descriptor area
39 * (With end of descriptor signalling)
41 #define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
42 sizeof(struct shm_pck_desc))
45 * Offset to the first CAIF frame within a shared memory buffer.
46 * Aligned on 32 bytes.
48 #define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
50 /* Number of bytes for CAIF shared memory header. */
53 /* Number of padding bytes for the complete CAIF frame. */
54 #define SHM_FRM_PAD_LEN 4
56 #define CAIF_MAX_MTU 4096
58 #define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
59 #define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
61 #define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
62 #define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
64 #define SHM_FULL_MASK (0x0F << 0)
65 #define SHM_EMPTY_MASK (0x0F << 4)
69 * Offset from start of shared memory area to start of
70 * shared memory CAIF frame.
77 unsigned char *desc_vptr;
83 struct list_head list;
87 /* Number of bytes of padding before the CAIF frame. */
92 /* caif_dev_common must always be first in the structure*/
93 struct caif_dev_common cfdev;
98 u32 tx_empty_available;
101 struct list_head tx_empty_list;
102 struct list_head tx_pend_list;
103 struct list_head tx_full_list;
104 struct list_head rx_empty_list;
105 struct list_head rx_pend_list;
106 struct list_head rx_full_list;
108 struct workqueue_struct *pshm_tx_workqueue;
109 struct workqueue_struct *pshm_rx_workqueue;
111 struct work_struct shm_tx_work;
112 struct work_struct shm_rx_work;
114 struct sk_buff_head sk_qhead;
115 struct shmdev_layer *pshm_dev;
118 static int shm_netdev_open(struct net_device *shm_netdev)
120 netif_wake_queue(shm_netdev);
124 static int shm_netdev_close(struct net_device *shm_netdev)
126 netif_stop_queue(shm_netdev);
130 int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
132 struct buf_list *pbuf;
133 struct shmdrv_layer *pshm_drv;
134 struct list_head *pos;
135 u32 avail_emptybuff = 0;
136 unsigned long flags = 0;
140 /* Check for received buffers. */
141 if (mbx_msg & SHM_FULL_MASK) {
144 spin_lock_irqsave(&pshm_drv->lock, flags);
146 /* Check whether we have any outstanding buffers. */
147 if (list_empty(&pshm_drv->rx_empty_list)) {
149 /* Release spin lock. */
150 spin_unlock_irqrestore(&pshm_drv->lock, flags);
152 /* We print even in IRQ context... */
153 pr_warn("No empty Rx buffers to fill: "
154 "mbx_msg:%x\n", mbx_msg);
161 list_entry(pshm_drv->rx_empty_list.next,
162 struct buf_list, list);
165 /* Check buffer synchronization. */
166 if (idx != SHM_GET_FULL(mbx_msg)) {
168 /* We print even in IRQ context... */
170 "phyif_shm_mbx_msg_cb: RX full out of sync:"
171 " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
172 idx, mbx_msg, SHM_GET_FULL(mbx_msg));
174 spin_unlock_irqrestore(&pshm_drv->lock, flags);
180 list_del_init(&pbuf->list);
181 list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
183 spin_unlock_irqrestore(&pshm_drv->lock, flags);
185 /* Schedule RX work queue. */
186 if (!work_pending(&pshm_drv->shm_rx_work))
187 queue_work(pshm_drv->pshm_rx_workqueue,
188 &pshm_drv->shm_rx_work);
191 /* Check for emptied buffers. */
192 if (mbx_msg & SHM_EMPTY_MASK) {
195 spin_lock_irqsave(&pshm_drv->lock, flags);
197 /* Check whether we have any outstanding buffers. */
198 if (list_empty(&pshm_drv->tx_full_list)) {
200 /* We print even in IRQ context... */
201 pr_warn("No TX to empty: msg:%x\n", mbx_msg);
203 spin_unlock_irqrestore(&pshm_drv->lock, flags);
210 list_entry(pshm_drv->tx_full_list.next,
211 struct buf_list, list);
214 /* Check buffer synchronization. */
215 if (idx != SHM_GET_EMPTY(mbx_msg)) {
217 spin_unlock_irqrestore(&pshm_drv->lock, flags);
219 /* We print even in IRQ context... */
221 "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
226 list_del_init(&pbuf->list);
228 /* Reset buffer parameters. */
230 pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
232 list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
234 /* Check the available no. of buffers in the empty list */
235 list_for_each(pos, &pshm_drv->tx_empty_list)
238 /* Check whether we have to wake up the transmitter. */
239 if ((avail_emptybuff > HIGH_WATERMARK) &&
240 (!pshm_drv->tx_empty_available)) {
241 pshm_drv->tx_empty_available = 1;
242 spin_unlock_irqrestore(&pshm_drv->lock, flags);
243 pshm_drv->cfdev.flowctrl
244 (pshm_drv->pshm_dev->pshm_netdev,
248 /* Schedule the work queue. if required */
249 if (!work_pending(&pshm_drv->shm_tx_work))
250 queue_work(pshm_drv->pshm_tx_workqueue,
251 &pshm_drv->shm_tx_work);
253 spin_unlock_irqrestore(&pshm_drv->lock, flags);
262 static void shm_rx_work_func(struct work_struct *rx_work)
264 struct shmdrv_layer *pshm_drv;
265 struct buf_list *pbuf;
266 unsigned long flags = 0;
271 pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
275 struct shm_pck_desc *pck_desc;
277 spin_lock_irqsave(&pshm_drv->lock, flags);
279 /* Check for received buffers. */
280 if (list_empty(&pshm_drv->rx_full_list)) {
281 spin_unlock_irqrestore(&pshm_drv->lock, flags);
286 list_entry(pshm_drv->rx_full_list.next, struct buf_list,
288 list_del_init(&pbuf->list);
289 spin_unlock_irqrestore(&pshm_drv->lock, flags);
291 /* Retrieve pointer to start of the packet descriptor area. */
292 pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
295 * Check whether descriptor contains a CAIF shared memory
298 while (pck_desc->frm_ofs) {
299 unsigned int frm_buf_ofs;
300 unsigned int frm_pck_ofs;
301 unsigned int frm_pck_len;
303 * Check whether offset is within buffer limits
306 if (pck_desc->frm_ofs <
307 (pbuf->phy_addr - pshm_drv->shm_base_addr))
310 * Check whether offset is within buffer limits
313 if (pck_desc->frm_ofs >
314 ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
318 /* Calculate offset from start of buffer. */
320 pck_desc->frm_ofs - (pbuf->phy_addr -
321 pshm_drv->shm_base_addr);
324 * Calculate offset and length of CAIF packet while
325 * taking care of the shared memory header.
328 frm_buf_ofs + SHM_HDR_LEN +
329 (*(pbuf->desc_vptr + frm_buf_ofs));
331 (pck_desc->frm_len - SHM_HDR_LEN -
332 (*(pbuf->desc_vptr + frm_buf_ofs)));
334 /* Check whether CAIF packet is within buffer limits */
335 if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
338 /* Get a suitable CAIF packet and copy in data. */
339 skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
344 p = skb_put(skb, frm_pck_len);
345 memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
347 skb->protocol = htons(ETH_P_CAIF);
348 skb_reset_mac_header(skb);
349 skb->dev = pshm_drv->pshm_dev->pshm_netdev;
351 /* Push received packet up the stack. */
352 ret = netif_rx_ni(skb);
355 pshm_drv->pshm_dev->pshm_netdev->stats.
357 pshm_drv->pshm_dev->pshm_netdev->stats.
358 rx_bytes += pck_desc->frm_len;
360 ++pshm_drv->pshm_dev->pshm_netdev->stats.
362 /* Move to next packet descriptor. */
366 spin_lock_irqsave(&pshm_drv->lock, flags);
367 list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
369 spin_unlock_irqrestore(&pshm_drv->lock, flags);
373 /* Schedule the work queue. if required */
374 if (!work_pending(&pshm_drv->shm_tx_work))
375 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
379 static void shm_tx_work_func(struct work_struct *tx_work)
382 unsigned int frmlen, avail_emptybuff, append = 0;
383 unsigned long flags = 0;
384 struct buf_list *pbuf = NULL;
385 struct shmdrv_layer *pshm_drv;
386 struct shm_caif_frm *frm;
388 struct shm_pck_desc *pck_desc;
389 struct list_head *pos;
391 pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
394 /* Initialize mailbox message. */
398 spin_lock_irqsave(&pshm_drv->lock, flags);
400 /* Check for pending receive buffers. */
401 if (!list_empty(&pshm_drv->rx_pend_list)) {
403 pbuf = list_entry(pshm_drv->rx_pend_list.next,
404 struct buf_list, list);
406 list_del_init(&pbuf->list);
407 list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
409 * Value index is never changed,
410 * so read access should be safe.
412 mbox_msg |= SHM_SET_EMPTY(pbuf->index);
415 skb = skb_peek(&pshm_drv->sk_qhead);
419 /* Check the available no. of buffers in the empty list */
420 list_for_each(pos, &pshm_drv->tx_empty_list)
423 if ((avail_emptybuff < LOW_WATERMARK) &&
424 pshm_drv->tx_empty_available) {
425 /* Update blocking condition. */
426 pshm_drv->tx_empty_available = 0;
427 spin_unlock_irqrestore(&pshm_drv->lock, flags);
428 pshm_drv->cfdev.flowctrl
429 (pshm_drv->pshm_dev->pshm_netdev,
431 spin_lock_irqsave(&pshm_drv->lock, flags);
434 * We simply return back to the caller if we do not have space
435 * either in Tx pending list or Tx empty list. In this case,
436 * we hold the received skb in the skb list, waiting to
437 * be transmitted once Tx buffers become available
439 if (list_empty(&pshm_drv->tx_empty_list))
442 /* Get the first free Tx buffer. */
443 pbuf = list_entry(pshm_drv->tx_empty_list.next,
444 struct buf_list, list);
447 skb = skb_peek(&pshm_drv->sk_qhead);
452 frm = (struct shm_caif_frm *)
453 (pbuf->desc_vptr + pbuf->frm_ofs);
457 frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
459 /* Add tail padding if needed. */
460 if (frmlen % SHM_FRM_PAD_LEN)
461 frmlen += SHM_FRM_PAD_LEN -
462 (frmlen % SHM_FRM_PAD_LEN);
465 * Verify that packet, header and additional padding
466 * can fit within the buffer frame area.
468 if (frmlen >= (pbuf->len - pbuf->frm_ofs))
472 list_del_init(&pbuf->list);
476 skb = skb_dequeue(&pshm_drv->sk_qhead);
479 /* Copy in CAIF frame. */
480 skb_copy_bits(skb, 0, pbuf->desc_vptr +
481 pbuf->frm_ofs + SHM_HDR_LEN +
482 frm->hdr_ofs, skb->len);
484 pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
485 pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
487 dev_kfree_skb_irq(skb);
489 /* Fill in the shared memory packet descriptor area. */
490 pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
491 /* Forward to current frame. */
492 pck_desc += pbuf->frames;
493 pck_desc->frm_ofs = (pbuf->phy_addr -
494 pshm_drv->shm_base_addr) +
496 pck_desc->frm_len = frmlen;
497 /* Terminate packet descriptor area. */
499 pck_desc->frm_ofs = 0;
500 /* Update buffer parameters. */
502 pbuf->frm_ofs += frmlen + (frmlen % 32);
504 } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
506 /* Assign buffer as full. */
507 list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
509 mbox_msg |= SHM_SET_FULL(pbuf->index);
511 spin_unlock_irqrestore(&pshm_drv->lock, flags);
514 pshm_drv->pshm_dev->pshmdev_mbxsend
515 (pshm_drv->pshm_dev->shm_id, mbox_msg);
519 static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
521 struct shmdrv_layer *pshm_drv;
523 pshm_drv = netdev_priv(shm_netdev);
525 skb_queue_tail(&pshm_drv->sk_qhead, skb);
527 /* Schedule Tx work queue. for deferred processing of skbs*/
528 if (!work_pending(&pshm_drv->shm_tx_work))
529 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
534 static const struct net_device_ops netdev_ops = {
535 .ndo_open = shm_netdev_open,
536 .ndo_stop = shm_netdev_close,
537 .ndo_start_xmit = shm_netdev_tx,
540 static void shm_netdev_setup(struct net_device *pshm_netdev)
542 struct shmdrv_layer *pshm_drv;
543 pshm_netdev->netdev_ops = &netdev_ops;
545 pshm_netdev->mtu = CAIF_MAX_MTU;
546 pshm_netdev->type = ARPHRD_CAIF;
547 pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
548 pshm_netdev->tx_queue_len = 0;
549 pshm_netdev->destructor = free_netdev;
551 pshm_drv = netdev_priv(pshm_netdev);
553 /* Initialize structures in a clean state. */
554 memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
556 pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
559 int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
562 struct shmdrv_layer *pshm_drv = NULL;
564 pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
565 "cfshm%d", shm_netdev_setup);
566 if (!pshm_dev->pshm_netdev)
569 pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
570 pshm_drv->pshm_dev = pshm_dev;
573 * Initialization starts with the verification of the
574 * availability of MBX driver by calling its setup function.
575 * MBX driver must be available by this time for proper
576 * functioning of SHM driver.
578 if ((pshm_dev->pshmdev_mbxsetup
579 (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
580 pr_warn("Could not config. SHM Mailbox,"
581 " Bailing out.....\n");
582 free_netdev(pshm_dev->pshm_netdev);
586 skb_queue_head_init(&pshm_drv->sk_qhead);
588 pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
589 " INSTANCE AT pshm_drv =0x%p\n",
590 pshm_drv->pshm_dev->shm_id, pshm_drv);
592 if (pshm_dev->shm_total_sz <
593 (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
595 pr_warn("ERROR, Amount of available"
596 " Phys. SHM cannot accommodate current SHM "
597 "driver configuration, Bailing out ...\n");
598 free_netdev(pshm_dev->pshm_netdev);
602 pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
603 pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
605 if (pshm_dev->shm_loopback)
606 pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
608 pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
609 (NR_TX_BUF * TX_BUF_SZ);
611 spin_lock_init(&pshm_drv->lock);
612 INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
613 INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
614 INIT_LIST_HEAD(&pshm_drv->tx_full_list);
616 INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
617 INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
618 INIT_LIST_HEAD(&pshm_drv->rx_full_list);
620 INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
621 INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
623 pshm_drv->pshm_tx_workqueue =
624 create_singlethread_workqueue("shm_tx_work");
625 pshm_drv->pshm_rx_workqueue =
626 create_singlethread_workqueue("shm_rx_work");
628 for (j = 0; j < NR_TX_BUF; j++) {
629 struct buf_list *tx_buf =
630 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
632 if (tx_buf == NULL) {
633 free_netdev(pshm_dev->pshm_netdev);
637 tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
638 tx_buf->len = TX_BUF_SZ;
640 tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
642 if (pshm_dev->shm_loopback)
643 tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
646 * FIXME: the result of ioremap is not a pointer - arnd
649 ioremap(tx_buf->phy_addr, TX_BUF_SZ);
651 list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
654 for (j = 0; j < NR_RX_BUF; j++) {
655 struct buf_list *rx_buf =
656 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
658 if (rx_buf == NULL) {
659 free_netdev(pshm_dev->pshm_netdev);
663 rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
664 rx_buf->len = RX_BUF_SZ;
666 if (pshm_dev->shm_loopback)
667 rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
670 ioremap(rx_buf->phy_addr, RX_BUF_SZ);
671 list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
674 pshm_drv->tx_empty_available = 1;
675 result = register_netdev(pshm_dev->pshm_netdev);
677 pr_warn("ERROR[%d], SHM could not, "
678 "register with NW FRMWK Bailing out ...\n", result);
683 void caif_shmcore_remove(struct net_device *pshm_netdev)
685 struct buf_list *pbuf;
686 struct shmdrv_layer *pshm_drv = NULL;
688 pshm_drv = netdev_priv(pshm_netdev);
690 while (!(list_empty(&pshm_drv->tx_pend_list))) {
692 list_entry(pshm_drv->tx_pend_list.next,
693 struct buf_list, list);
695 list_del(&pbuf->list);
699 while (!(list_empty(&pshm_drv->tx_full_list))) {
701 list_entry(pshm_drv->tx_full_list.next,
702 struct buf_list, list);
703 list_del(&pbuf->list);
707 while (!(list_empty(&pshm_drv->tx_empty_list))) {
709 list_entry(pshm_drv->tx_empty_list.next,
710 struct buf_list, list);
711 list_del(&pbuf->list);
715 while (!(list_empty(&pshm_drv->rx_full_list))) {
717 list_entry(pshm_drv->tx_full_list.next,
718 struct buf_list, list);
719 list_del(&pbuf->list);
723 while (!(list_empty(&pshm_drv->rx_pend_list))) {
725 list_entry(pshm_drv->tx_pend_list.next,
726 struct buf_list, list);
727 list_del(&pbuf->list);
731 while (!(list_empty(&pshm_drv->rx_empty_list))) {
733 list_entry(pshm_drv->rx_empty_list.next,
734 struct buf_list, list);
735 list_del(&pbuf->list);
739 /* Destroy work queues. */
740 destroy_workqueue(pshm_drv->pshm_tx_workqueue);
741 destroy_workqueue(pshm_drv->pshm_rx_workqueue);
743 unregister_netdev(pshm_netdev);