c64728fc21f229d867a60d175870854a11c5dfee
[linux-2.6-block.git] / drivers / staging / octeon / ethernet-tx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file is based on code from OCTEON SDK by Cavium Networks.
4  *
5  * Copyright (c) 2003-2010 Cavium Networks
6  */
7
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/ip.h>
13 #include <linux/ratelimit.h>
14 #include <linux/string.h>
15 #include <linux/interrupt.h>
16 #include <net/dst.h>
17 #ifdef CONFIG_XFRM
18 #include <linux/xfrm.h>
19 #include <net/xfrm.h>
20 #endif /* CONFIG_XFRM */
21
22 #include <linux/atomic.h>
23 #include <net/sch_generic.h>
24
25 #include "octeon-ethernet.h"
26 #include "ethernet-defines.h"
27 #include "ethernet-tx.h"
28 #include "ethernet-util.h"
29
30 #define CVM_OCT_SKB_CB(skb)     ((u64 *)((skb)->cb))
31
32 /*
33  * You can define GET_SKBUFF_QOS() to override how the skbuff output
34  * function determines which output queue is used. The default
35  * implementation always uses the base queue for the port. If, for
36  * example, you wanted to use the skb->priority field, define
37  * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
38  */
39 #ifndef GET_SKBUFF_QOS
40 #define GET_SKBUFF_QOS(skb) 0
41 #endif
42
43 static void cvm_oct_tx_do_cleanup(unsigned long arg);
44 static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
45
46 /* Maximum number of SKBs to try to free per xmit packet. */
47 #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
48
49 static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau)
50 {
51         int undo;
52
53         undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
54                                                    MAX_SKB_TO_FREE;
55         if (undo > 0)
56                 cvmx_fau_atomic_add32(fau, -undo);
57         skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
58                                                        -skb_to_free;
59         return skb_to_free;
60 }
61
62 static void cvm_oct_kick_tx_poll_watchdog(void)
63 {
64         union cvmx_ciu_timx ciu_timx;
65
66         ciu_timx.u64 = 0;
67         ciu_timx.s.one_shot = 1;
68         ciu_timx.s.len = cvm_oct_tx_poll_interval;
69         cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
70 }
71
72 static void cvm_oct_free_tx_skbs(struct net_device *dev)
73 {
74         int skb_to_free;
75         int qos, queues_per_port;
76         int total_freed = 0;
77         int total_remaining = 0;
78         unsigned long flags;
79         struct octeon_ethernet *priv = netdev_priv(dev);
80
81         queues_per_port = cvmx_pko_get_num_queues(priv->port);
82         /* Drain any pending packets in the free list */
83         for (qos = 0; qos < queues_per_port; qos++) {
84                 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
85                         continue;
86                 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
87                                                        MAX_SKB_TO_FREE);
88                 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
89                                                          priv->fau + qos * 4);
90                 total_freed += skb_to_free;
91                 if (skb_to_free > 0) {
92                         struct sk_buff *to_free_list = NULL;
93
94                         spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
95                         while (skb_to_free > 0) {
96                                 struct sk_buff *t;
97
98                                 t = __skb_dequeue(&priv->tx_free_list[qos]);
99                                 t->next = to_free_list;
100                                 to_free_list = t;
101                                 skb_to_free--;
102                         }
103                         spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
104                                                flags);
105                         /* Do the actual freeing outside of the lock. */
106                         while (to_free_list) {
107                                 struct sk_buff *t = to_free_list;
108
109                                 to_free_list = to_free_list->next;
110                                 dev_kfree_skb_any(t);
111                         }
112                 }
113                 total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
114         }
115         if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev))
116                 netif_wake_queue(dev);
117         if (total_remaining)
118                 cvm_oct_kick_tx_poll_watchdog();
119 }
120
121 /**
122  * cvm_oct_xmit - transmit a packet
123  * @skb:    Packet to send
124  * @dev:    Device info structure
125  *
126  * Returns Always returns NETDEV_TX_OK
127  */
128 int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
129 {
130         cvmx_pko_command_word0_t pko_command;
131         union cvmx_buf_ptr hw_buffer;
132         u64 old_scratch;
133         u64 old_scratch2;
134         int qos;
135         int i;
136         enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
137         struct octeon_ethernet *priv = netdev_priv(dev);
138         struct sk_buff *to_free_list;
139         int skb_to_free;
140         int buffers_to_free;
141         u32 total_to_clean;
142         unsigned long flags;
143 #if REUSE_SKBUFFS_WITHOUT_FREE
144         unsigned char *fpa_head;
145 #endif
146
147         /*
148          * Prefetch the private data structure.  It is larger than the
149          * one cache line.
150          */
151         prefetch(priv);
152
153         /*
154          * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
155          * completely remove "qos" in the event neither interface
156          * supports multiple queues per port.
157          */
158         if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
159             (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
160                 qos = GET_SKBUFF_QOS(skb);
161                 if (qos <= 0)
162                         qos = 0;
163                 else if (qos >= cvmx_pko_get_num_queues(priv->port))
164                         qos = 0;
165         } else {
166                 qos = 0;
167         }
168
169         if (USE_ASYNC_IOBDMA) {
170                 /* Save scratch in case userspace is using it */
171                 CVMX_SYNCIOBDMA;
172                 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
173                 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
174
175                 /*
176                  * Fetch and increment the number of packets to be
177                  * freed.
178                  */
179                 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
180                                                FAU_NUM_PACKET_BUFFERS_TO_FREE,
181                                                0);
182                 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
183                                                priv->fau + qos * 4,
184                                                MAX_SKB_TO_FREE);
185         }
186
187         /*
188          * We have space for 6 segment pointers, If there will be more
189          * than that, we must linearize.
190          */
191         if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
192                 if (unlikely(__skb_linearize(skb))) {
193                         queue_type = QUEUE_DROP;
194                         if (USE_ASYNC_IOBDMA) {
195                                 /*
196                                  * Get the number of skbuffs in use
197                                  * by the hardware
198                                  */
199                                 CVMX_SYNCIOBDMA;
200                                 skb_to_free =
201                                         cvmx_scratch_read64(CVMX_SCR_SCRATCH);
202                         } else {
203                                 /*
204                                  * Get the number of skbuffs in use
205                                  * by the hardware
206                                  */
207                                 skb_to_free =
208                                      cvmx_fau_fetch_and_add32(priv->fau +
209                                                               qos * 4,
210                                                               MAX_SKB_TO_FREE);
211                         }
212                         skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
213                                                                  priv->fau +
214                                                                  qos * 4);
215                         spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
216                         goto skip_xmit;
217                 }
218         }
219
220         /*
221          * The CN3XXX series of parts has an errata (GMX-401) which
222          * causes the GMX block to hang if a collision occurs towards
223          * the end of a <68 byte packet. As a workaround for this, we
224          * pad packets to be 68 bytes whenever we are in half duplex
225          * mode. We don't handle the case of having a small packet but
226          * no room to add the padding.  The kernel should always give
227          * us at least a cache line
228          */
229         if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
230                 union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
231                 int interface = INTERFACE(priv->port);
232                 int index = INDEX(priv->port);
233
234                 if (interface < 2) {
235                         /* We only need to pad packet in half duplex mode */
236                         gmx_prt_cfg.u64 =
237                             cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
238                         if (gmx_prt_cfg.s.duplex == 0) {
239                                 int add_bytes = 64 - skb->len;
240
241                                 if ((skb_tail_pointer(skb) + add_bytes) <=
242                                     skb_end_pointer(skb))
243                                         __skb_put_zero(skb, add_bytes);
244                         }
245                 }
246         }
247
248         /* Build the PKO command */
249         pko_command.u64 = 0;
250 #ifdef __LITTLE_ENDIAN
251         pko_command.s.le = 1;
252 #endif
253         pko_command.s.n2 = 1;   /* Don't pollute L2 with the outgoing packet */
254         pko_command.s.segs = 1;
255         pko_command.s.total_bytes = skb->len;
256         pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
257         pko_command.s.subone0 = 1;
258
259         pko_command.s.dontfree = 1;
260
261         /* Build the PKO buffer pointer */
262         hw_buffer.u64 = 0;
263         if (skb_shinfo(skb)->nr_frags == 0) {
264                 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
265                 hw_buffer.s.pool = 0;
266                 hw_buffer.s.size = skb->len;
267         } else {
268                 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
269                 hw_buffer.s.pool = 0;
270                 hw_buffer.s.size = skb_headlen(skb);
271                 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
272                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
273                         skb_frag_t *fs = skb_shinfo(skb)->frags + i;
274
275                         hw_buffer.s.addr =
276                                 XKPHYS_TO_PHYS((u64)skb_frag_address(fs));
277                         hw_buffer.s.size = skb_frag_size(fs);
278                         CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
279                 }
280                 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
281                 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
282                 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
283                 pko_command.s.gather = 1;
284                 goto dont_put_skbuff_in_hw;
285         }
286
287         /*
288          * See if we can put this skb in the FPA pool. Any strange
289          * behavior from the Linux networking stack will most likely
290          * be caused by a bug in the following code. If some field is
291          * in use by the network stack and gets carried over when a
292          * buffer is reused, bad things may happen.  If in doubt and
293          * you dont need the absolute best performance, disable the
294          * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
295          * shown a 25% increase in performance under some loads.
296          */
297 #if REUSE_SKBUFFS_WITHOUT_FREE
298         fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
299         if (unlikely(skb->data < fpa_head)) {
300                 /* TX buffer beginning can't meet FPA alignment constraints */
301                 goto dont_put_skbuff_in_hw;
302         }
303         if (unlikely
304             ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
305                 /* TX buffer isn't large enough for the FPA */
306                 goto dont_put_skbuff_in_hw;
307         }
308         if (unlikely(skb_shared(skb))) {
309                 /* TX buffer sharing data with someone else */
310                 goto dont_put_skbuff_in_hw;
311         }
312         if (unlikely(skb_cloned(skb))) {
313                 /* TX buffer has been cloned */
314                 goto dont_put_skbuff_in_hw;
315         }
316         if (unlikely(skb_header_cloned(skb))) {
317                 /* TX buffer header has been cloned */
318                 goto dont_put_skbuff_in_hw;
319         }
320         if (unlikely(skb->destructor)) {
321                 /* TX buffer has a destructor */
322                 goto dont_put_skbuff_in_hw;
323         }
324         if (unlikely(skb_shinfo(skb)->nr_frags)) {
325                 /* TX buffer has fragments */
326                 goto dont_put_skbuff_in_hw;
327         }
328         if (unlikely
329             (skb->truesize !=
330              sizeof(*skb) + skb_end_offset(skb))) {
331                 /* TX buffer truesize has been changed */
332                 goto dont_put_skbuff_in_hw;
333         }
334
335         /*
336          * We can use this buffer in the FPA.  We don't need the FAU
337          * update anymore
338          */
339         pko_command.s.dontfree = 0;
340
341         hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
342                            ((unsigned long)fpa_head >> 7);
343
344         *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
345
346         /*
347          * The skbuff will be reused without ever being freed. We must
348          * cleanup a bunch of core things.
349          */
350         dst_release(skb_dst(skb));
351         skb_dst_set(skb, NULL);
352 #ifdef CONFIG_XFRM
353         secpath_reset(skb);
354 #endif
355         nf_reset(skb);
356
357 #ifdef CONFIG_NET_SCHED
358         skb->tc_index = 0;
359         skb_reset_tc(skb);
360 #endif /* CONFIG_NET_SCHED */
361 #endif /* REUSE_SKBUFFS_WITHOUT_FREE */
362
363 dont_put_skbuff_in_hw:
364
365         /* Check if we can use the hardware checksumming */
366         if ((skb->protocol == htons(ETH_P_IP)) &&
367             (ip_hdr(skb)->version == 4) &&
368             (ip_hdr(skb)->ihl == 5) &&
369             ((ip_hdr(skb)->frag_off == 0) ||
370              (ip_hdr(skb)->frag_off == htons(1 << 14))) &&
371             ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
372              (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
373                 /* Use hardware checksum calc */
374                 pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
375         }
376
377         if (USE_ASYNC_IOBDMA) {
378                 /* Get the number of skbuffs in use by the hardware */
379                 CVMX_SYNCIOBDMA;
380                 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
381                 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
382         } else {
383                 /* Get the number of skbuffs in use by the hardware */
384                 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
385                                                        MAX_SKB_TO_FREE);
386                 buffers_to_free =
387                     cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
388         }
389
390         skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
391                                                  priv->fau + qos * 4);
392
393         /*
394          * If we're sending faster than the receive can free them then
395          * don't do the HW free.
396          */
397         if ((buffers_to_free < -100) && !pko_command.s.dontfree)
398                 pko_command.s.dontfree = 1;
399
400         if (pko_command.s.dontfree) {
401                 queue_type = QUEUE_CORE;
402                 pko_command.s.reg0 = priv->fau + qos * 4;
403         } else {
404                 queue_type = QUEUE_HW;
405         }
406         if (USE_ASYNC_IOBDMA)
407                 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
408                                                FAU_TOTAL_TX_TO_CLEAN, 1);
409
410         spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
411
412         /* Drop this packet if we have too many already queued to the HW */
413         if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
414                      MAX_OUT_QUEUE_DEPTH)) {
415                 if (dev->tx_queue_len != 0) {
416                         /* Drop the lock when notifying the core.  */
417                         spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
418                                                flags);
419                         netif_stop_queue(dev);
420                         spin_lock_irqsave(&priv->tx_free_list[qos].lock,
421                                           flags);
422                 } else {
423                         /* If not using normal queueing.  */
424                         queue_type = QUEUE_DROP;
425                         goto skip_xmit;
426                 }
427         }
428
429         cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
430                                      CVMX_PKO_LOCK_NONE);
431
432         /* Send the packet to the output queue */
433         if (unlikely(cvmx_pko_send_packet_finish(priv->port,
434                                                  priv->queue + qos,
435                                                  pko_command, hw_buffer,
436                                                  CVMX_PKO_LOCK_NONE))) {
437                 printk_ratelimited("%s: Failed to send the packet\n",
438                                    dev->name);
439                 queue_type = QUEUE_DROP;
440         }
441 skip_xmit:
442         to_free_list = NULL;
443
444         switch (queue_type) {
445         case QUEUE_DROP:
446                 skb->next = to_free_list;
447                 to_free_list = skb;
448                 dev->stats.tx_dropped++;
449                 break;
450         case QUEUE_HW:
451                 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
452                 break;
453         case QUEUE_CORE:
454                 __skb_queue_tail(&priv->tx_free_list[qos], skb);
455                 break;
456         default:
457                 BUG();
458         }
459
460         while (skb_to_free > 0) {
461                 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
462
463                 t->next = to_free_list;
464                 to_free_list = t;
465                 skb_to_free--;
466         }
467
468         spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
469
470         /* Do the actual freeing outside of the lock. */
471         while (to_free_list) {
472                 struct sk_buff *t = to_free_list;
473
474                 to_free_list = to_free_list->next;
475                 dev_kfree_skb_any(t);
476         }
477
478         if (USE_ASYNC_IOBDMA) {
479                 CVMX_SYNCIOBDMA;
480                 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
481                 /* Restore the scratch area */
482                 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
483                 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
484         } else {
485                 total_to_clean =
486                         cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
487         }
488
489         if (total_to_clean & 0x3ff) {
490                 /*
491                  * Schedule the cleanup tasklet every 1024 packets for
492                  * the pathological case of high traffic on one port
493                  * delaying clean up of packets on a different port
494                  * that is blocked waiting for the cleanup.
495                  */
496                 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
497         }
498
499         cvm_oct_kick_tx_poll_watchdog();
500
501         return NETDEV_TX_OK;
502 }
503
504 /**
505  * cvm_oct_xmit_pow - transmit a packet to the POW
506  * @skb:    Packet to send
507  * @dev:    Device info structure
508
509  * Returns Always returns zero
510  */
511 int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
512 {
513         struct octeon_ethernet *priv = netdev_priv(dev);
514         void *packet_buffer;
515         void *copy_location;
516
517         /* Get a work queue entry */
518         cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
519
520         if (unlikely(!work)) {
521                 printk_ratelimited("%s: Failed to allocate a work queue entry\n",
522                                    dev->name);
523                 dev->stats.tx_dropped++;
524                 dev_kfree_skb_any(skb);
525                 return 0;
526         }
527
528         /* Get a packet buffer */
529         packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
530         if (unlikely(!packet_buffer)) {
531                 printk_ratelimited("%s: Failed to allocate a packet buffer\n",
532                                    dev->name);
533                 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
534                 dev->stats.tx_dropped++;
535                 dev_kfree_skb_any(skb);
536                 return 0;
537         }
538
539         /*
540          * Calculate where we need to copy the data to. We need to
541          * leave 8 bytes for a next pointer (unused). We also need to
542          * include any configure skip. Then we need to align the IP
543          * packet src and dest into the same 64bit word. The below
544          * calculation may add a little extra, but that doesn't
545          * hurt.
546          */
547         copy_location = packet_buffer + sizeof(u64);
548         copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
549
550         /*
551          * We have to copy the packet since whoever processes this
552          * packet will free it to a hardware pool. We can't use the
553          * trick of counting outstanding packets like in
554          * cvm_oct_xmit.
555          */
556         memcpy(copy_location, skb->data, skb->len);
557
558         /*
559          * Fill in some of the work queue fields. We may need to add
560          * more if the software at the other end needs them.
561          */
562         if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
563                 work->word0.pip.cn38xx.hw_chksum = skb->csum;
564         work->word1.len = skb->len;
565         cvmx_wqe_set_port(work, priv->port);
566         cvmx_wqe_set_qos(work, priv->port & 0x7);
567         cvmx_wqe_set_grp(work, pow_send_group);
568         work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
569         work->word1.tag = pow_send_group;       /* FIXME */
570         /* Default to zero. Sets of zero later are commented out */
571         work->word2.u64 = 0;
572         work->word2.s.bufs = 1;
573         work->packet_ptr.u64 = 0;
574         work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
575         work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
576         work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
577         work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
578
579         if (skb->protocol == htons(ETH_P_IP)) {
580                 work->word2.s.ip_offset = 14;
581 #if 0
582                 work->word2.s.vlan_valid = 0;   /* FIXME */
583                 work->word2.s.vlan_cfi = 0;     /* FIXME */
584                 work->word2.s.vlan_id = 0;      /* FIXME */
585                 work->word2.s.dec_ipcomp = 0;   /* FIXME */
586 #endif
587                 work->word2.s.tcp_or_udp =
588                     (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
589                     (ip_hdr(skb)->protocol == IPPROTO_UDP);
590 #if 0
591                 /* FIXME */
592                 work->word2.s.dec_ipsec = 0;
593                 /* We only support IPv4 right now */
594                 work->word2.s.is_v6 = 0;
595                 /* Hardware would set to zero */
596                 work->word2.s.software = 0;
597                 /* No error, packet is internal */
598                 work->word2.s.L4_error = 0;
599 #endif
600                 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
601                                           (ip_hdr(skb)->frag_off ==
602                                               1 << 14));
603 #if 0
604                 /* Assume Linux is sending a good packet */
605                 work->word2.s.IP_exc = 0;
606 #endif
607                 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
608                 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
609 #if 0
610                 /* This is an IP packet */
611                 work->word2.s.not_IP = 0;
612                 /* No error, packet is internal */
613                 work->word2.s.rcv_error = 0;
614                 /* No error, packet is internal */
615                 work->word2.s.err_code = 0;
616 #endif
617
618                 /*
619                  * When copying the data, include 4 bytes of the
620                  * ethernet header to align the same way hardware
621                  * does.
622                  */
623                 memcpy(work->packet_data, skb->data + 10,
624                        sizeof(work->packet_data));
625         } else {
626 #if 0
627                 work->word2.snoip.vlan_valid = 0;       /* FIXME */
628                 work->word2.snoip.vlan_cfi = 0; /* FIXME */
629                 work->word2.snoip.vlan_id = 0;  /* FIXME */
630                 work->word2.snoip.software = 0; /* Hardware would set to zero */
631 #endif
632                 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
633                 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
634                 work->word2.snoip.is_bcast =
635                     (skb->pkt_type == PACKET_BROADCAST);
636                 work->word2.snoip.is_mcast =
637                     (skb->pkt_type == PACKET_MULTICAST);
638                 work->word2.snoip.not_IP = 1;   /* IP was done up above */
639 #if 0
640                 /* No error, packet is internal */
641                 work->word2.snoip.rcv_error = 0;
642                 /* No error, packet is internal */
643                 work->word2.snoip.err_code = 0;
644 #endif
645                 memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
646         }
647
648         /* Submit the packet to the POW */
649         cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
650                              cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
651         dev->stats.tx_packets++;
652         dev->stats.tx_bytes += skb->len;
653         dev_consume_skb_any(skb);
654         return 0;
655 }
656
657 /**
658  * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
659  * @dev:    Device being shutdown
660  *
661  */
662 void cvm_oct_tx_shutdown_dev(struct net_device *dev)
663 {
664         struct octeon_ethernet *priv = netdev_priv(dev);
665         unsigned long flags;
666         int qos;
667
668         for (qos = 0; qos < 16; qos++) {
669                 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
670                 while (skb_queue_len(&priv->tx_free_list[qos]))
671                         dev_kfree_skb_any(__skb_dequeue
672                                           (&priv->tx_free_list[qos]));
673                 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
674         }
675 }
676
677 static void cvm_oct_tx_do_cleanup(unsigned long arg)
678 {
679         int port;
680
681         for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
682                 if (cvm_oct_device[port]) {
683                         struct net_device *dev = cvm_oct_device[port];
684
685                         cvm_oct_free_tx_skbs(dev);
686                 }
687         }
688 }
689
690 static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
691 {
692         /* Disable the interrupt.  */
693         cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
694         /* Do the work in the tasklet.  */
695         tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
696         return IRQ_HANDLED;
697 }
698
699 void cvm_oct_tx_initialize(void)
700 {
701         int i;
702
703         /* Disable the interrupt.  */
704         cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
705         /* Register an IRQ handler to receive CIU_TIMX(1) interrupts */
706         i = request_irq(OCTEON_IRQ_TIMER1,
707                         cvm_oct_tx_cleanup_watchdog, 0,
708                         "Ethernet", cvm_oct_device);
709
710         if (i)
711                 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
712 }
713
714 void cvm_oct_tx_shutdown(void)
715 {
716         /* Free the interrupt handler */
717         free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
718 }