net: hd64570: fix the code style issue about "foo* bar"
[linux-2.6-block.git] / drivers / net / wan / hd64570.c
CommitLineData
25763b3c 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
88597364 3 * Hitachi SCA HD64570 driver for Linux
1da177e4
LT
4 *
5 * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
6 *
88597364 7 * Source of information: Hitachi HD64570 SCA User's Manual
1da177e4
LT
8 *
9 * We use the following SCA memory map:
10 *
11 * Packet buffer descriptor rings - starting from winbase or win0base:
12 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
13 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
14 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
15 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
16 *
17 * Packet data buffers - starting from winbase + buff_offset:
18 * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers
19 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers
20 * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used)
21 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
22 */
23
88597364
KH
24#include <linux/bitops.h>
25#include <linux/errno.h>
1da177e4 26#include <linux/fcntl.h>
88597364 27#include <linux/hdlc.h>
1da177e4 28#include <linux/in.h>
88597364 29#include <linux/interrupt.h>
1da177e4 30#include <linux/ioport.h>
88597364
KH
31#include <linux/jiffies.h>
32#include <linux/kernel.h>
33#include <linux/module.h>
1da177e4
LT
34#include <linux/netdevice.h>
35#include <linux/skbuff.h>
88597364
KH
36#include <linux/string.h>
37#include <linux/types.h>
38#include <asm/io.h>
7c0f6ba6 39#include <linux/uaccess.h>
88597364 40#include "hd64570.h"
1da177e4
LT
41
42#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET)
43#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
44#define get_dmac_tx(port) (phy_node(port) ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
45
46#define SCA_INTR_MSCI(node) (node ? 0x10 : 0x01)
47#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
48#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
49
1da177e4
LT
50static inline struct net_device *port_to_dev(port_t *port)
51{
52 return port->dev;
53}
54
55static inline int sca_intr_status(card_t *card)
56{
57 u8 result = 0;
1da177e4
LT
58 u8 isr0 = sca_in(ISR0, card);
59 u8 isr1 = sca_in(ISR1, card);
60
61 if (isr1 & 0x03) result |= SCA_INTR_DMAC_RX(0);
62 if (isr1 & 0x0C) result |= SCA_INTR_DMAC_TX(0);
63 if (isr1 & 0x30) result |= SCA_INTR_DMAC_RX(1);
64 if (isr1 & 0xC0) result |= SCA_INTR_DMAC_TX(1);
65 if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0);
66 if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1);
67
1da177e4
LT
68 if (!(result & SCA_INTR_DMAC_TX(0)))
69 if (sca_in(DSR_TX(0), card) & DSR_EOM)
70 result |= SCA_INTR_DMAC_TX(0);
71 if (!(result & SCA_INTR_DMAC_TX(1)))
72 if (sca_in(DSR_TX(1), card) & DSR_EOM)
73 result |= SCA_INTR_DMAC_TX(1);
74
75 return result;
76}
77
1d1fa598 78static inline port_t *dev_to_port(struct net_device *dev)
1da177e4
LT
79{
80 return dev_to_hdlc(dev)->priv;
81}
82
83static inline u16 next_desc(port_t *port, u16 desc, int transmit)
84{
85 return (desc + 1) % (transmit ? port_to_card(port)->tx_ring_buffers
86 : port_to_card(port)->rx_ring_buffers);
87}
88
1da177e4
LT
89static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
90{
91 u16 rx_buffs = port_to_card(port)->rx_ring_buffers;
92 u16 tx_buffs = port_to_card(port)->tx_ring_buffers;
93
94 desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
95 return log_node(port) * (rx_buffs + tx_buffs) +
96 transmit * rx_buffs + desc;
97}
98
1da177e4
LT
99static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
100{
fcfe9ff3 101 /* Descriptor offset always fits in 16 bits */
1da177e4
LT
102 return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
103}
104
88597364
KH
105static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
106 int transmit)
1da177e4
LT
107{
108#ifdef PAGE0_ALWAYS_MAPPED
109 return (pkt_desc __iomem *)(win0base(port_to_card(port))
88597364 110 + desc_offset(port, desc, transmit));
1da177e4
LT
111#else
112 return (pkt_desc __iomem *)(winbase(port_to_card(port))
88597364 113 + desc_offset(port, desc, transmit));
1da177e4
LT
114#endif
115}
116
1da177e4
LT
117static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
118{
119 return port_to_card(port)->buff_offset +
120 desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
121}
122
c2ce9204
KH
123static inline void sca_set_carrier(port_t *port)
124{
125 if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) {
126#ifdef DEBUG_LINK
127 printk(KERN_DEBUG "%s: sca_set_carrier on\n",
128 port_to_dev(port)->name);
129#endif
130 netif_carrier_on(port_to_dev(port));
131 } else {
132#ifdef DEBUG_LINK
133 printk(KERN_DEBUG "%s: sca_set_carrier off\n",
134 port_to_dev(port)->name);
135#endif
136 netif_carrier_off(port_to_dev(port));
137 }
138}
139
88597364 140static void sca_init_port(port_t *port)
1da177e4
LT
141{
142 card_t *card = port_to_card(port);
143 int transmit, i;
144
145 port->rxin = 0;
146 port->txin = 0;
147 port->txlast = 0;
148
88597364 149#ifndef PAGE0_ALWAYS_MAPPED
1da177e4
LT
150 openwin(card, 0);
151#endif
152
153 for (transmit = 0; transmit < 2; transmit++) {
154 u16 dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port);
155 u16 buffs = transmit ? card->tx_ring_buffers
156 : card->rx_ring_buffers;
157
158 for (i = 0; i < buffs; i++) {
159 pkt_desc __iomem *desc = desc_address(port, i, transmit);
160 u16 chain_off = desc_offset(port, i + 1, transmit);
161 u32 buff_off = buffer_offset(port, i, transmit);
162
88597364 163 writew(chain_off, &desc->cp);
1da177e4
LT
164 writel(buff_off, &desc->bp);
165 writew(0, &desc->len);
166 writeb(0, &desc->stat);
167 }
168
169 /* DMA disable - to halt state */
170 sca_out(0, transmit ? DSR_TX(phy_node(port)) :
171 DSR_RX(phy_node(port)), card);
172 /* software ABORT - to initial state */
173 sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) :
174 DCR_RX(phy_node(port)), card);
175
1da177e4 176 /* current desc addr */
88597364
KH
177 sca_out(0, dmac + CPB, card); /* pointer base */
178 sca_outw(desc_offset(port, 0, transmit), dmac + CDAL, card);
1da177e4 179 if (!transmit)
88597364 180 sca_outw(desc_offset(port, buffs - 1, transmit),
1da177e4
LT
181 dmac + EDAL, card);
182 else
88597364 183 sca_outw(desc_offset(port, 0, transmit), dmac + EDAL,
1da177e4
LT
184 card);
185
186 /* clear frame end interrupt counter */
187 sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) :
188 DCR_RX(phy_node(port)), card);
189
190 if (!transmit) { /* Receive */
191 /* set buffer length */
192 sca_outw(HDLC_MAX_MRU, dmac + BFLL, card);
193 /* Chain mode, Multi-frame */
194 sca_out(0x14, DMR_RX(phy_node(port)), card);
195 sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)),
196 card);
197 /* DMA enable */
198 sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
199 } else { /* Transmit */
200 /* Chain mode, Multi-frame */
201 sca_out(0x14, DMR_TX(phy_node(port)), card);
202 /* enable underflow interrupts */
203 sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card);
204 }
205 }
c2ce9204 206 sca_set_carrier(port);
1da177e4
LT
207}
208
1da177e4
LT
209#ifdef NEED_SCA_MSCI_INTR
210/* MSCI interrupt service */
211static inline void sca_msci_intr(port_t *port)
212{
213 u16 msci = get_msci(port);
1d1fa598 214 card_t *card = port_to_card(port);
1da177e4
LT
215 u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */
216
217 /* Reset MSCI TX underrun and CDCD status bit */
218 sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card);
219
220 if (stat & ST1_UDRN) {
198191c4
KH
221 /* TX Underrun error detected */
222 port_to_dev(port)->stats.tx_errors++;
223 port_to_dev(port)->stats.tx_fifo_errors++;
1da177e4
LT
224 }
225
226 if (stat & ST1_CDCD)
c2ce9204 227 sca_set_carrier(port);
1da177e4
LT
228}
229#endif
230
88597364
KH
231static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
232 u16 rxin)
1da177e4
LT
233{
234 struct net_device *dev = port_to_dev(port);
1da177e4
LT
235 struct sk_buff *skb;
236 u16 len;
237 u32 buff;
1da177e4
LT
238 u32 maxlen;
239 u8 page;
1da177e4
LT
240
241 len = readw(&desc->len);
242 skb = dev_alloc_skb(len);
243 if (!skb) {
198191c4 244 dev->stats.rx_dropped++;
1da177e4
LT
245 return;
246 }
247
248 buff = buffer_offset(port, rxin, 0);
1da177e4
LT
249 page = buff / winsize(card);
250 buff = buff % winsize(card);
251 maxlen = winsize(card) - buff;
252
253 openwin(card, page);
254
255 if (len > maxlen) {
256 memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
257 openwin(card, page + 1);
258 memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
259 } else
88597364 260 memcpy_fromio(skb->data, winbase(card) + buff, len);
1da177e4 261
88597364
KH
262#ifndef PAGE0_ALWAYS_MAPPED
263 openwin(card, 0); /* select pkt_desc table page back */
1da177e4
LT
264#endif
265 skb_put(skb, len);
266#ifdef DEBUG_PKT
267 printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
268 debug_frame(skb);
269#endif
198191c4
KH
270 dev->stats.rx_packets++;
271 dev->stats.rx_bytes += skb->len;
1da177e4
LT
272 skb->protocol = hdlc_type_trans(skb, dev);
273 netif_rx(skb);
274}
275
1da177e4
LT
276/* Receive DMA interrupt service */
277static inline void sca_rx_intr(port_t *port)
278{
198191c4 279 struct net_device *dev = port_to_dev(port);
1da177e4
LT
280 u16 dmac = get_dmac_rx(port);
281 card_t *card = port_to_card(port);
282 u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */
1da177e4
LT
283
284 /* Reset DSR status bits */
285 sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
286 DSR_RX(phy_node(port)), card);
287
288 if (stat & DSR_BOF)
198191c4
KH
289 /* Dropped one or more frames */
290 dev->stats.rx_over_errors++;
1da177e4
LT
291
292 while (1) {
293 u32 desc_off = desc_offset(port, port->rxin, 0);
294 pkt_desc __iomem *desc;
88597364 295 u32 cda = sca_inw(dmac + CDAL, card);
1da177e4
LT
296
297 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
298 break; /* No frame received */
299
300 desc = desc_address(port, port->rxin, 0);
301 stat = readb(&desc->stat);
302 if (!(stat & ST_RX_EOM))
303 port->rxpart = 1; /* partial frame received */
304 else if ((stat & ST_ERROR_MASK) || port->rxpart) {
198191c4
KH
305 dev->stats.rx_errors++;
306 if (stat & ST_RX_OVERRUN)
307 dev->stats.rx_fifo_errors++;
1da177e4
LT
308 else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
309 ST_RX_RESBIT)) || port->rxpart)
198191c4
KH
310 dev->stats.rx_frame_errors++;
311 else if (stat & ST_RX_CRC)
312 dev->stats.rx_crc_errors++;
1da177e4
LT
313 if (stat & ST_RX_EOM)
314 port->rxpart = 0; /* received last fragment */
315 } else
316 sca_rx(card, port, desc, port->rxin);
317
318 /* Set new error descriptor address */
88597364 319 sca_outw(desc_off, dmac + EDAL, card);
1da177e4
LT
320 port->rxin = next_desc(port, port->rxin, 0);
321 }
322
323 /* make sure RX DMA is enabled */
324 sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
325}
326
1da177e4
LT
327/* Transmit DMA interrupt service */
328static inline void sca_tx_intr(port_t *port)
329{
330 struct net_device *dev = port_to_dev(port);
1da177e4 331 u16 dmac = get_dmac_tx(port);
1d1fa598 332 card_t *card = port_to_card(port);
1da177e4
LT
333 u8 stat;
334
335 spin_lock(&port->lock);
336
337 stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */
338
339 /* Reset DSR status bits */
340 sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
341 DSR_TX(phy_node(port)), card);
342
343 while (1) {
344 pkt_desc __iomem *desc;
345
346 u32 desc_off = desc_offset(port, port->txlast, 1);
88597364 347 u32 cda = sca_inw(dmac + CDAL, card);
d364c0a9 348
1da177e4
LT
349 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
350 break; /* Transmitter is/will_be sending this frame */
351
352 desc = desc_address(port, port->txlast, 1);
198191c4
KH
353 dev->stats.tx_packets++;
354 dev->stats.tx_bytes += readw(&desc->len);
1da177e4
LT
355 writeb(0, &desc->stat); /* Free descriptor */
356 port->txlast = next_desc(port, port->txlast, 1);
357 }
358
359 netif_wake_queue(dev);
360 spin_unlock(&port->lock);
361}
362
1d1fa598 363static irqreturn_t sca_intr(int irq, void *dev_id)
1da177e4
LT
364{
365 card_t *card = dev_id;
366 int i;
367 u8 stat;
368 int handled = 0;
1da177e4 369 u8 page = sca_get_page(card);
1da177e4
LT
370
371 while((stat = sca_intr_status(card)) != 0) {
372 handled = 1;
373 for (i = 0; i < 2; i++) {
374 port_t *port = get_port(card, i);
d364c0a9 375
1da177e4
LT
376 if (port) {
377 if (stat & SCA_INTR_MSCI(i))
378 sca_msci_intr(port);
379
380 if (stat & SCA_INTR_DMAC_RX(i))
381 sca_rx_intr(port);
382
383 if (stat & SCA_INTR_DMAC_TX(i))
384 sca_tx_intr(port);
385 }
386 }
387 }
388
1da177e4 389 openwin(card, page); /* Restore original page */
1da177e4
LT
390 return IRQ_RETVAL(handled);
391}
392
1da177e4
LT
393static void sca_set_port(port_t *port)
394{
1d1fa598 395 card_t *card = port_to_card(port);
1da177e4
LT
396 u16 msci = get_msci(port);
397 u8 md2 = sca_in(msci + MD2, card);
398 unsigned int tmc, br = 10, brv = 1024;
399
1da177e4
LT
400 if (port->settings.clock_rate > 0) {
401 /* Try lower br for better accuracy*/
402 do {
403 br--;
404 brv >>= 1; /* brv = 2^9 = 512 max in specs */
405
406 /* Baud Rate = CLOCK_BASE / TMC / 2^BR */
407 tmc = CLOCK_BASE / brv / port->settings.clock_rate;
408 }while (br > 1 && tmc <= 128);
409
410 if (tmc < 1) {
411 tmc = 1;
412 br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
413 brv = 1;
414 } else if (tmc > 255)
415 tmc = 256; /* tmc=0 means 256 - low baud rates */
416
417 port->settings.clock_rate = CLOCK_BASE / brv / tmc;
418 } else {
419 br = 9; /* Minimum clock rate */
420 tmc = 256; /* 8bit = 0 */
421 port->settings.clock_rate = CLOCK_BASE / (256 * 512);
422 }
423
424 port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
425 port->txs = (port->txs & ~CLK_BRG_MASK) | br;
426 port->tmc = tmc;
427
428 /* baud divisor - time constant*/
1da177e4 429 sca_out(port->tmc, msci + TMC, card);
1da177e4
LT
430
431 /* Set BRG bits */
432 sca_out(port->rxs, msci + RXS, card);
433 sca_out(port->txs, msci + TXS, card);
434
435 if (port->settings.loopback)
436 md2 |= MD2_LOOPBACK;
437 else
438 md2 &= ~MD2_LOOPBACK;
439
440 sca_out(md2, msci + MD2, card);
1da177e4
LT
441}
442
1da177e4
LT
443static void sca_open(struct net_device *dev)
444{
445 port_t *port = dev_to_port(dev);
1d1fa598 446 card_t *card = port_to_card(port);
1da177e4
LT
447 u16 msci = get_msci(port);
448 u8 md0, md2;
449
450 switch(port->encoding) {
451 case ENCODING_NRZ: md2 = MD2_NRZ; break;
452 case ENCODING_NRZI: md2 = MD2_NRZI; break;
453 case ENCODING_FM_MARK: md2 = MD2_FM_MARK; break;
454 case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE; break;
455 default: md2 = MD2_MANCHESTER;
456 }
457
458 if (port->settings.loopback)
459 md2 |= MD2_LOOPBACK;
460
461 switch(port->parity) {
462 case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
463 case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
1da177e4 464 case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break;
1da177e4
LT
465 case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
466 default: md0 = MD0_HDLC | MD0_CRC_NONE;
467 }
468
469 sca_out(CMD_RESET, msci + CMD, card);
470 sca_out(md0, msci + MD0, card);
471 sca_out(0x00, msci + MD1, card); /* no address field check */
472 sca_out(md2, msci + MD2, card);
473 sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
1da177e4 474 sca_out(CTL_IDLE, msci + CTL, card);
1da177e4 475
1da177e4
LT
476 /* Allow at least 8 bytes before requesting RX DMA operation */
477 /* TX with higher priority and possibly with shorter transfers */
478 sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/
479 sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/
480 sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
1da177e4
LT
481
482/* We're using the following interrupts:
483 - TXINT (DMAC completed all transmisions, underrun or DCD change)
484 - all DMA interrupts
485*/
c2ce9204 486 sca_set_carrier(port);
1da177e4 487
1da177e4
LT
488 /* MSCI TX INT and RX INT A IRQ enable */
489 sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card);
490 sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card);
491 sca_out(sca_in(IER0, card) | (phy_node(port) ? 0xC0 : 0x0C),
492 IER0, card); /* TXINT and RXINT */
493 /* enable DMA IRQ */
494 sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F),
495 IER1, card);
1da177e4 496
1da177e4 497 sca_out(port->tmc, msci + TMC, card); /* Restore registers */
1da177e4
LT
498 sca_out(port->rxs, msci + RXS, card);
499 sca_out(port->txs, msci + TXS, card);
500 sca_out(CMD_TX_ENABLE, msci + CMD, card);
501 sca_out(CMD_RX_ENABLE, msci + CMD, card);
502
503 netif_start_queue(dev);
504}
505
1da177e4
LT
506static void sca_close(struct net_device *dev)
507{
508 port_t *port = dev_to_port(dev);
1d1fa598 509 card_t *card = port_to_card(port);
1da177e4
LT
510
511 /* reset channel */
512 sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
1da177e4
LT
513 /* disable MSCI interrupts */
514 sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0),
515 IER0, card);
516 /* disable DMA interrupts */
517 sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0),
518 IER1, card);
88597364 519
1da177e4
LT
520 netif_stop_queue(dev);
521}
522
1da177e4
LT
523static int sca_attach(struct net_device *dev, unsigned short encoding,
524 unsigned short parity)
525{
526 if (encoding != ENCODING_NRZ &&
527 encoding != ENCODING_NRZI &&
528 encoding != ENCODING_FM_MARK &&
529 encoding != ENCODING_FM_SPACE &&
530 encoding != ENCODING_MANCHESTER)
531 return -EINVAL;
532
533 if (parity != PARITY_NONE &&
534 parity != PARITY_CRC16_PR0 &&
535 parity != PARITY_CRC16_PR1 &&
1da177e4 536 parity != PARITY_CRC16_PR0_CCITT &&
1da177e4
LT
537 parity != PARITY_CRC16_PR1_CCITT)
538 return -EINVAL;
539
540 dev_to_port(dev)->encoding = encoding;
541 dev_to_port(dev)->parity = parity;
542 return 0;
543}
544
1da177e4
LT
545#ifdef DEBUG_RINGS
546static void sca_dump_rings(struct net_device *dev)
547{
548 port_t *port = dev_to_port(dev);
549 card_t *card = port_to_card(port);
550 u16 cnt;
88597364
KH
551#ifndef PAGE0_ALWAYS_MAPPED
552 u8 page = sca_get_page(card);
1da177e4 553
1da177e4
LT
554 openwin(card, 0);
555#endif
556
557 printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
88597364
KH
558 sca_inw(get_dmac_rx(port) + CDAL, card),
559 sca_inw(get_dmac_rx(port) + EDAL, card),
1da177e4 560 sca_in(DSR_RX(phy_node(port)), card), port->rxin,
88597364 561 sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in");
1da177e4 562 for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
12a3bfef
JP
563 pr_cont(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
564 pr_cont("\n");
1da177e4 565
ad361c98 566 printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
1da177e4 567 "last=%u %sactive",
88597364
KH
568 sca_inw(get_dmac_tx(port) + CDAL, card),
569 sca_inw(get_dmac_tx(port) + EDAL, card),
1da177e4
LT
570 sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast,
571 sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in");
572
573 for (cnt = 0; cnt < port_to_card(port)->tx_ring_buffers; cnt++)
12a3bfef
JP
574 pr_cont(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
575 pr_cont("\n");
1da177e4 576
88597364
KH
577 printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, ST: %02x %02x %02x %02x,"
578 " FST: %02x CST: %02x %02x\n",
1da177e4
LT
579 sca_in(get_msci(port) + MD0, card),
580 sca_in(get_msci(port) + MD1, card),
581 sca_in(get_msci(port) + MD2, card),
582 sca_in(get_msci(port) + ST0, card),
583 sca_in(get_msci(port) + ST1, card),
584 sca_in(get_msci(port) + ST2, card),
585 sca_in(get_msci(port) + ST3, card),
1da177e4
LT
586 sca_in(get_msci(port) + FST, card),
587 sca_in(get_msci(port) + CST0, card),
588 sca_in(get_msci(port) + CST1, card));
589
1da177e4
LT
590 printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card),
591 sca_in(ISR1, card), sca_in(ISR2, card));
1da177e4 592
88597364 593#ifndef PAGE0_ALWAYS_MAPPED
1da177e4
LT
594 openwin(card, page); /* Restore original page */
595#endif
596}
597#endif /* DEBUG_RINGS */
598
d71a6749 599static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
600{
601 port_t *port = dev_to_port(dev);
602 card_t *card = port_to_card(port);
603 pkt_desc __iomem *desc;
604 u32 buff, len;
1da177e4
LT
605 u8 page;
606 u32 maxlen;
1da177e4
LT
607
608 spin_lock_irq(&port->lock);
609
610 desc = desc_address(port, port->txin + 1, 1);
88597364 611 BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
1da177e4
LT
612
613#ifdef DEBUG_PKT
614 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
615 debug_frame(skb);
616#endif
617
618 desc = desc_address(port, port->txin, 1);
619 buff = buffer_offset(port, port->txin, 1);
620 len = skb->len;
1da177e4
LT
621 page = buff / winsize(card);
622 buff = buff % winsize(card);
623 maxlen = winsize(card) - buff;
624
625 openwin(card, page);
626 if (len > maxlen) {
627 memcpy_toio(winbase(card) + buff, skb->data, maxlen);
628 openwin(card, page + 1);
629 memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
88597364 630 } else
1da177e4
LT
631 memcpy_toio(winbase(card) + buff, skb->data, len);
632
88597364 633#ifndef PAGE0_ALWAYS_MAPPED
1da177e4
LT
634 openwin(card, 0); /* select pkt_desc table page back */
635#endif
636 writew(len, &desc->len);
637 writeb(ST_TX_EOM, &desc->stat);
1da177e4
LT
638
639 port->txin = next_desc(port, port->txin, 1);
88597364 640 sca_outw(desc_offset(port, port->txin, 1),
1da177e4
LT
641 get_dmac_tx(port) + EDAL, card);
642
643 sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */
644
645 desc = desc_address(port, port->txin + 1, 1);
646 if (readb(&desc->stat)) /* allow 1 packet gap */
647 netif_stop_queue(dev);
648
649 spin_unlock_irq(&port->lock);
650
651 dev_kfree_skb(skb);
d71a6749 652 return NETDEV_TX_OK;
1da177e4
LT
653}
654
1da177e4 655#ifdef NEED_DETECT_RAM
1dd06ae8 656static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
1da177e4
LT
657{
658 /* Round RAM size to 32 bits, fill from end to start */
659 u32 i = ramsize &= ~3;
1da177e4
LT
660 u32 size = winsize(card);
661
662 openwin(card, (i - 4) / size); /* select last window */
88597364 663
1da177e4
LT
664 do {
665 i -= 4;
1da177e4
LT
666 if ((i + 4) % size == 0)
667 openwin(card, i / size);
668 writel(i ^ 0x12345678, rambase + i % size);
88597364 669 } while (i > 0);
1da177e4
LT
670
671 for (i = 0; i < ramsize ; i += 4) {
1da177e4
LT
672 if (i % size == 0)
673 openwin(card, i / size);
674
675 if (readl(rambase + i % size) != (i ^ 0x12345678))
676 break;
1da177e4
LT
677 }
678
679 return i;
680}
681#endif /* NEED_DETECT_RAM */
682
aeea6bbf 683static void sca_init(card_t *card, int wait_states)
1da177e4
LT
684{
685 sca_out(wait_states, WCRL, card); /* Wait Control */
686 sca_out(wait_states, WCRM, card);
687 sca_out(wait_states, WCRH, card);
688
689 sca_out(0, DMER, card); /* DMA Master disable */
690 sca_out(0x03, PCR, card); /* DMA priority */
691 sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
692 sca_out(0, DSR_TX(0), card);
693 sca_out(0, DSR_RX(1), card);
694 sca_out(0, DSR_TX(1), card);
695 sca_out(DMER_DME, DMER, card); /* DMA Master enable */
696}