Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[linux-2.6-block.git] / drivers / net / ethernet / marvell / mv643xx_eth.c
CommitLineData
1da177e4 1/*
9c1bbdfe 2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
1da177e4
LT
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
4 *
5 * Based on the 64360 driver from:
4547fa61
LB
6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
7 * Rabeeh Khoury <rabeeh@marvell.com>
1da177e4
LT
8 *
9 * Copyright (C) 2003 PMC-Sierra, Inc.,
3bb8a18a 10 * written by Manish Lachwani
1da177e4
LT
11 *
12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
13 *
c8aaea25 14 * Copyright (C) 2004-2006 MontaVista Software, Inc.
1da177e4
LT
15 * Dale Farnsworth <dale@farnsworth.org>
16 *
17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
18 * <sjhill@realitydiluted.com>
19 *
4547fa61
LB
20 * Copyright (C) 2007-2008 Marvell Semiconductor
21 * Lennert Buytenhek <buytenh@marvell.com>
22 *
3871c387
MS
23 * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
24 *
1da177e4
LT
25 * This program is free software; you can redistribute it and/or
26 * modify it under the terms of the GNU General Public License
27 * as published by the Free Software Foundation; either version 2
28 * of the License, or (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
0ab75ae8 36 * along with this program; if not, see <http://www.gnu.org/licenses/>.
1da177e4 37 */
a779d38c 38
7542db8b
JP
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
1da177e4
LT
41#include <linux/init.h>
42#include <linux/dma-mapping.h>
b6298c22 43#include <linux/in.h>
c3efab8e 44#include <linux/ip.h>
3ae8f4e0 45#include <net/tso.h>
1da177e4
LT
46#include <linux/tcp.h>
47#include <linux/udp.h>
48#include <linux/etherdevice.h>
1da177e4
LT
49#include <linux/delay.h>
50#include <linux/ethtool.h>
d052d1be 51#include <linux/platform_device.h>
fbd6a754
LB
52#include <linux/module.h>
53#include <linux/kernel.h>
54#include <linux/spinlock.h>
55#include <linux/workqueue.h>
ed94493f 56#include <linux/phy.h>
fbd6a754 57#include <linux/mv643xx_eth.h>
10a9948d 58#include <linux/io.h>
3619eb85 59#include <linux/interrupt.h>
10a9948d 60#include <linux/types.h>
5a0e3ad6 61#include <linux/slab.h>
452503eb 62#include <linux/clk.h>
76723bca
SH
63#include <linux/of.h>
64#include <linux/of_irq.h>
65#include <linux/of_net.h>
cc9d4598 66#include <linux/of_mdio.h>
fbd6a754 67
e5371493 68static char mv643xx_eth_driver_name[] = "mv643xx_eth";
042af53c 69static char mv643xx_eth_driver_version[] = "1.4";
c9df406f 70
fbd6a754 71
fbd6a754
LB
72/*
73 * Registers shared between all ports.
74 */
3cb4667c 75#define PHY_ADDR 0x0000
3cb4667c
LB
76#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
77#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
78#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
79#define WINDOW_BAR_ENABLE 0x0290
80#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
fbd6a754
LB
81
82/*
37a6084f
LB
83 * Main per-port registers. These live at offset 0x0400 for
84 * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
fbd6a754 85 */
37a6084f 86#define PORT_CONFIG 0x0000
d9a073ea 87#define UNICAST_PROMISCUOUS_MODE 0x00000001
37a6084f
LB
88#define PORT_CONFIG_EXT 0x0004
89#define MAC_ADDR_LOW 0x0014
90#define MAC_ADDR_HIGH 0x0018
91#define SDMA_CONFIG 0x001c
becfad97
LB
92#define TX_BURST_SIZE_16_64BIT 0x01000000
93#define TX_BURST_SIZE_4_64BIT 0x00800000
94#define BLM_TX_NO_SWAP 0x00000020
95#define BLM_RX_NO_SWAP 0x00000010
96#define RX_BURST_SIZE_16_64BIT 0x00000008
97#define RX_BURST_SIZE_4_64BIT 0x00000004
37a6084f 98#define PORT_SERIAL_CONTROL 0x003c
becfad97
LB
99#define SET_MII_SPEED_TO_100 0x01000000
100#define SET_GMII_SPEED_TO_1000 0x00800000
101#define SET_FULL_DUPLEX_MODE 0x00200000
102#define MAX_RX_PACKET_9700BYTE 0x000a0000
103#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
104#define DO_NOT_FORCE_LINK_FAIL 0x00000400
105#define SERIAL_PORT_CONTROL_RESERVED 0x00000200
106#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
107#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
108#define FORCE_LINK_PASS 0x00000002
109#define SERIAL_PORT_ENABLE 0x00000001
37a6084f 110#define PORT_STATUS 0x0044
a2a41689 111#define TX_FIFO_EMPTY 0x00000400
ae9ae064 112#define TX_IN_PROGRESS 0x00000080
2f7eb47a
LB
113#define PORT_SPEED_MASK 0x00000030
114#define PORT_SPEED_1000 0x00000010
115#define PORT_SPEED_100 0x00000020
116#define PORT_SPEED_10 0x00000000
117#define FLOW_CONTROL_ENABLED 0x00000008
118#define FULL_DUPLEX 0x00000004
81600eea 119#define LINK_UP 0x00000002
37a6084f
LB
120#define TXQ_COMMAND 0x0048
121#define TXQ_FIX_PRIO_CONF 0x004c
cb85215f
SH
122#define PORT_SERIAL_CONTROL1 0x004c
123#define CLK125_BYPASS_EN 0x00000010
37a6084f
LB
124#define TX_BW_RATE 0x0050
125#define TX_BW_MTU 0x0058
126#define TX_BW_BURST 0x005c
127#define INT_CAUSE 0x0060
226bb6b7 128#define INT_TX_END 0x07f80000
e0ca8410 129#define INT_TX_END_0 0x00080000
befefe21 130#define INT_RX 0x000003fc
e0ca8410 131#define INT_RX_0 0x00000004
073a345c 132#define INT_EXT 0x00000002
37a6084f 133#define INT_CAUSE_EXT 0x0064
befefe21
LB
134#define INT_EXT_LINK_PHY 0x00110000
135#define INT_EXT_TX 0x000000ff
37a6084f
LB
136#define INT_MASK 0x0068
137#define INT_MASK_EXT 0x006c
138#define TX_FIFO_URGENT_THRESHOLD 0x0074
302476c9
PZ
139#define RX_DISCARD_FRAME_CNT 0x0084
140#define RX_OVERRUN_FRAME_CNT 0x0088
37a6084f
LB
141#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
142#define TX_BW_RATE_MOVED 0x00e0
143#define TX_BW_MTU_MOVED 0x00e8
144#define TX_BW_BURST_MOVED 0x00ec
145#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
146#define RXQ_COMMAND 0x0280
147#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
148#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
149#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
150#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
151
152/*
153 * Misc per-port registers.
154 */
3cb4667c
LB
155#define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
156#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
157#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
158#define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
fbd6a754 159
2679a550
LB
160
161/*
becfad97 162 * SDMA configuration register default value.
2679a550 163 */
fbd6a754
LB
164#if defined(__BIG_ENDIAN)
165#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
e0c6ef93
LB
166 (RX_BURST_SIZE_4_64BIT | \
167 TX_BURST_SIZE_4_64BIT)
fbd6a754
LB
168#elif defined(__LITTLE_ENDIAN)
169#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
e0c6ef93
LB
170 (RX_BURST_SIZE_4_64BIT | \
171 BLM_RX_NO_SWAP | \
172 BLM_TX_NO_SWAP | \
173 TX_BURST_SIZE_4_64BIT)
fbd6a754
LB
174#else
175#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
176#endif
177
2beff77b
LB
178
179/*
becfad97 180 * Misc definitions.
2beff77b 181 */
becfad97 182#define DEFAULT_RX_QUEUE_SIZE 128
3ae8f4e0 183#define DEFAULT_TX_QUEUE_SIZE 512
7fd96ce4 184#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
fbd6a754 185
3ae8f4e0 186#define TSO_HEADER_SIZE 128
fbd6a754 187
ee9e4956
EG
188/* Max number of allowed TCP segments for software TSO */
189#define MV643XX_MAX_TSO_SEGS 100
190#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
191
b926260c
EG
192#define IS_TSO_HEADER(txq, addr) \
193 ((addr >= txq->tso_hdrs_dma) && \
194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
9e911414
EG
195
196#define DESC_DMA_MAP_SINGLE 0
197#define DESC_DMA_MAP_PAGE 1
198
7ca72a3b
LB
199/*
200 * RX/TX descriptors.
fbd6a754
LB
201 */
202#if defined(__BIG_ENDIAN)
cc9754b3 203struct rx_desc {
fbd6a754
LB
204 u16 byte_cnt; /* Descriptor buffer byte count */
205 u16 buf_size; /* Buffer size */
206 u32 cmd_sts; /* Descriptor command status */
207 u32 next_desc_ptr; /* Next descriptor pointer */
208 u32 buf_ptr; /* Descriptor buffer pointer */
209};
210
cc9754b3 211struct tx_desc {
fbd6a754
LB
212 u16 byte_cnt; /* buffer byte count */
213 u16 l4i_chk; /* CPU provided TCP checksum */
214 u32 cmd_sts; /* Command/status field */
215 u32 next_desc_ptr; /* Pointer to next descriptor */
216 u32 buf_ptr; /* pointer to buffer for this descriptor*/
217};
218#elif defined(__LITTLE_ENDIAN)
cc9754b3 219struct rx_desc {
fbd6a754
LB
220 u32 cmd_sts; /* Descriptor command status */
221 u16 buf_size; /* Buffer size */
222 u16 byte_cnt; /* Descriptor buffer byte count */
223 u32 buf_ptr; /* Descriptor buffer pointer */
224 u32 next_desc_ptr; /* Next descriptor pointer */
225};
226
cc9754b3 227struct tx_desc {
fbd6a754
LB
228 u32 cmd_sts; /* Command/status field */
229 u16 l4i_chk; /* CPU provided TCP checksum */
230 u16 byte_cnt; /* buffer byte count */
231 u32 buf_ptr; /* pointer to buffer for this descriptor*/
232 u32 next_desc_ptr; /* Pointer to next descriptor */
233};
234#else
235#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
236#endif
237
7ca72a3b 238/* RX & TX descriptor command */
cc9754b3 239#define BUFFER_OWNED_BY_DMA 0x80000000
7ca72a3b
LB
240
241/* RX & TX descriptor status */
cc9754b3 242#define ERROR_SUMMARY 0x00000001
7ca72a3b
LB
243
244/* RX descriptor status */
cc9754b3
LB
245#define LAYER_4_CHECKSUM_OK 0x40000000
246#define RX_ENABLE_INTERRUPT 0x20000000
247#define RX_FIRST_DESC 0x08000000
248#define RX_LAST_DESC 0x04000000
eaf5d590
LB
249#define RX_IP_HDR_OK 0x02000000
250#define RX_PKT_IS_IPV4 0x01000000
251#define RX_PKT_IS_ETHERNETV2 0x00800000
252#define RX_PKT_LAYER4_TYPE_MASK 0x00600000
253#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
254#define RX_PKT_IS_VLAN_TAGGED 0x00080000
7ca72a3b
LB
255
256/* TX descriptor command */
cc9754b3
LB
257#define TX_ENABLE_INTERRUPT 0x00800000
258#define GEN_CRC 0x00400000
259#define TX_FIRST_DESC 0x00200000
260#define TX_LAST_DESC 0x00100000
261#define ZERO_PADDING 0x00080000
262#define GEN_IP_V4_CHECKSUM 0x00040000
263#define GEN_TCP_UDP_CHECKSUM 0x00020000
264#define UDP_FRAME 0x00010000
e32b6617 265#define MAC_HDR_EXTRA_4_BYTES 0x00008000
84411f73 266#define GEN_TCP_UDP_CHK_FULL 0x00000400
e32b6617 267#define MAC_HDR_EXTRA_8_BYTES 0x00000200
7ca72a3b 268
cc9754b3 269#define TX_IHL_SHIFT 11
7ca72a3b
LB
270
271
c9df406f 272/* global *******************************************************************/
e5371493 273struct mv643xx_eth_shared_private {
fc32b0e2
LB
274 /*
275 * Ethernet controller base address.
276 */
cc9754b3 277 void __iomem *base;
c9df406f 278
fc32b0e2
LB
279 /*
280 * Per-port MBUS window access register value.
281 */
c9df406f
LB
282 u32 win_protect;
283
fc32b0e2
LB
284 /*
285 * Hardware-specific parameters.
286 */
773fc3ee 287 int extended_rx_coal_limit;
457b1d5a 288 int tx_bw_control;
9b2c2ff7 289 int tx_csum_limit;
20922486 290 struct clk *clk;
c9df406f
LB
291};
292
457b1d5a
LB
293#define TX_BW_CONTROL_ABSENT 0
294#define TX_BW_CONTROL_OLD_LAYOUT 1
295#define TX_BW_CONTROL_NEW_LAYOUT 2
296
e7d2f4db
LB
297static int mv643xx_eth_open(struct net_device *dev);
298static int mv643xx_eth_stop(struct net_device *dev);
299
c9df406f
LB
300
301/* per-port *****************************************************************/
e5371493 302struct mib_counters {
fbd6a754
LB
303 u64 good_octets_received;
304 u32 bad_octets_received;
305 u32 internal_mac_transmit_err;
306 u32 good_frames_received;
307 u32 bad_frames_received;
308 u32 broadcast_frames_received;
309 u32 multicast_frames_received;
310 u32 frames_64_octets;
311 u32 frames_65_to_127_octets;
312 u32 frames_128_to_255_octets;
313 u32 frames_256_to_511_octets;
314 u32 frames_512_to_1023_octets;
315 u32 frames_1024_to_max_octets;
316 u64 good_octets_sent;
317 u32 good_frames_sent;
318 u32 excessive_collision;
319 u32 multicast_frames_sent;
320 u32 broadcast_frames_sent;
321 u32 unrec_mac_control_received;
322 u32 fc_sent;
323 u32 good_fc_received;
324 u32 bad_fc_received;
325 u32 undersize_received;
326 u32 fragments_received;
327 u32 oversize_received;
328 u32 jabber_received;
329 u32 mac_receive_error;
330 u32 bad_crc_event;
331 u32 collision;
332 u32 late_collision;
302476c9
PZ
333 /* Non MIB hardware counters */
334 u32 rx_discard;
335 u32 rx_overrun;
fbd6a754
LB
336};
337
8a578111 338struct rx_queue {
64da80a2
LB
339 int index;
340
8a578111
LB
341 int rx_ring_size;
342
343 int rx_desc_count;
344 int rx_curr_desc;
345 int rx_used_desc;
346
347 struct rx_desc *rx_desc_area;
348 dma_addr_t rx_desc_dma;
349 int rx_desc_area_size;
350 struct sk_buff **rx_skb;
8a578111
LB
351};
352
13d64285 353struct tx_queue {
3d6b35bc
LB
354 int index;
355
13d64285 356 int tx_ring_size;
fbd6a754 357
13d64285
LB
358 int tx_desc_count;
359 int tx_curr_desc;
360 int tx_used_desc;
fbd6a754 361
ee9e4956
EG
362 int tx_stop_threshold;
363 int tx_wake_threshold;
364
3ae8f4e0
EG
365 char *tso_hdrs;
366 dma_addr_t tso_hdrs_dma;
367
5daffe94 368 struct tx_desc *tx_desc_area;
9e911414 369 char *tx_desc_mapping; /* array to track the type of the dma mapping */
fbd6a754
LB
370 dma_addr_t tx_desc_dma;
371 int tx_desc_area_size;
99ab08e0
LB
372
373 struct sk_buff_head tx_skb;
8fd89211
LB
374
375 unsigned long tx_packets;
376 unsigned long tx_bytes;
377 unsigned long tx_dropped;
13d64285
LB
378};
379
380struct mv643xx_eth_private {
381 struct mv643xx_eth_shared_private *shared;
37a6084f 382 void __iomem *base;
fc32b0e2 383 int port_num;
13d64285 384
fc32b0e2 385 struct net_device *dev;
fbd6a754 386
4ff3495a
LB
387 struct timer_list mib_counters_timer;
388 spinlock_t mib_counters_lock;
fc32b0e2 389 struct mib_counters mib_counters;
4ff3495a 390
fc32b0e2 391 struct work_struct tx_timeout_task;
8a578111 392
1fa38c58 393 struct napi_struct napi;
e0ca8410 394 u32 int_mask;
1319ebad 395 u8 oom;
1fa38c58
LB
396 u8 work_link;
397 u8 work_tx;
398 u8 work_tx_end;
399 u8 work_rx;
400 u8 work_rx_refill;
1fa38c58 401
2bcb4b0f 402 int skb_size;
2bcb4b0f 403
8a578111
LB
404 /*
405 * RX state.
406 */
e7d2f4db 407 int rx_ring_size;
8a578111
LB
408 unsigned long rx_desc_sram_addr;
409 int rx_desc_sram_size;
f7981c1c 410 int rxq_count;
2257e05c 411 struct timer_list rx_oom;
64da80a2 412 struct rx_queue rxq[8];
13d64285
LB
413
414 /*
415 * TX state.
416 */
e7d2f4db 417 int tx_ring_size;
13d64285
LB
418 unsigned long tx_desc_sram_addr;
419 int tx_desc_sram_size;
f7981c1c 420 int txq_count;
3d6b35bc 421 struct tx_queue txq[8];
452503eb
AL
422
423 /*
424 * Hardware-specific parameters.
425 */
426 struct clk *clk;
427 unsigned int t_clk;
fbd6a754 428};
1da177e4 429
fbd6a754 430
c9df406f 431/* port register accessors **************************************************/
e5371493 432static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
c9df406f 433{
cc9754b3 434 return readl(mp->shared->base + offset);
c9df406f 435}
fbd6a754 436
37a6084f
LB
437static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
438{
439 return readl(mp->base + offset);
440}
441
e5371493 442static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
c9df406f 443{
cc9754b3 444 writel(data, mp->shared->base + offset);
c9df406f 445}
fbd6a754 446
37a6084f
LB
447static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
448{
449 writel(data, mp->base + offset);
450}
451
fbd6a754 452
c9df406f 453/* rxq/txq helper functions *************************************************/
8a578111 454static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
c9df406f 455{
64da80a2 456 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
c9df406f 457}
fbd6a754 458
13d64285
LB
459static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
460{
3d6b35bc 461 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
13d64285
LB
462}
463
8a578111 464static void rxq_enable(struct rx_queue *rxq)
c9df406f 465{
8a578111 466 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
37a6084f 467 wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
8a578111 468}
1da177e4 469
8a578111
LB
470static void rxq_disable(struct rx_queue *rxq)
471{
472 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
64da80a2 473 u8 mask = 1 << rxq->index;
1da177e4 474
37a6084f
LB
475 wrlp(mp, RXQ_COMMAND, mask << 8);
476 while (rdlp(mp, RXQ_COMMAND) & mask)
8a578111 477 udelay(10);
c9df406f
LB
478}
479
6b368f68
LB
480static void txq_reset_hw_ptr(struct tx_queue *txq)
481{
482 struct mv643xx_eth_private *mp = txq_to_mp(txq);
6b368f68
LB
483 u32 addr;
484
485 addr = (u32)txq->tx_desc_dma;
486 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
37a6084f 487 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
6b368f68
LB
488}
489
13d64285 490static void txq_enable(struct tx_queue *txq)
1da177e4 491{
13d64285 492 struct mv643xx_eth_private *mp = txq_to_mp(txq);
37a6084f 493 wrlp(mp, TXQ_COMMAND, 1 << txq->index);
1da177e4
LT
494}
495
13d64285 496static void txq_disable(struct tx_queue *txq)
1da177e4 497{
13d64285 498 struct mv643xx_eth_private *mp = txq_to_mp(txq);
3d6b35bc 499 u8 mask = 1 << txq->index;
c9df406f 500
37a6084f
LB
501 wrlp(mp, TXQ_COMMAND, mask << 8);
502 while (rdlp(mp, TXQ_COMMAND) & mask)
13d64285
LB
503 udelay(10);
504}
505
1fa38c58 506static void txq_maybe_wake(struct tx_queue *txq)
13d64285
LB
507{
508 struct mv643xx_eth_private *mp = txq_to_mp(txq);
e5ef1de1 509 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
3d6b35bc 510
8fd89211
LB
511 if (netif_tx_queue_stopped(nq)) {
512 __netif_tx_lock(nq, smp_processor_id());
ee9e4956 513 if (txq->tx_desc_count <= txq->tx_wake_threshold)
8fd89211
LB
514 netif_tx_wake_queue(nq);
515 __netif_tx_unlock(nq);
516 }
1da177e4
LT
517}
518
8a578111 519static int rxq_process(struct rx_queue *rxq, int budget)
1da177e4 520{
8a578111
LB
521 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
522 struct net_device_stats *stats = &mp->dev->stats;
523 int rx;
1da177e4 524
8a578111 525 rx = 0;
9e1f3772 526 while (rx < budget && rxq->rx_desc_count) {
fc32b0e2 527 struct rx_desc *rx_desc;
96587661 528 unsigned int cmd_sts;
fc32b0e2 529 struct sk_buff *skb;
6b8f90c2 530 u16 byte_cnt;
ff561eef 531
8a578111 532 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
1da177e4 533
96587661 534 cmd_sts = rx_desc->cmd_sts;
2257e05c 535 if (cmd_sts & BUFFER_OWNED_BY_DMA)
96587661 536 break;
96587661 537 rmb();
1da177e4 538
8a578111
LB
539 skb = rxq->rx_skb[rxq->rx_curr_desc];
540 rxq->rx_skb[rxq->rx_curr_desc] = NULL;
ff561eef 541
9da78745
LB
542 rxq->rx_curr_desc++;
543 if (rxq->rx_curr_desc == rxq->rx_ring_size)
544 rxq->rx_curr_desc = 0;
ff561eef 545
eb0519b5 546 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
abe78717 547 rx_desc->buf_size, DMA_FROM_DEVICE);
8a578111
LB
548 rxq->rx_desc_count--;
549 rx++;
b1dd9ca1 550
1fa38c58
LB
551 mp->work_rx_refill |= 1 << rxq->index;
552
6b8f90c2
LB
553 byte_cnt = rx_desc->byte_cnt;
554
468d09f8
DF
555 /*
556 * Update statistics.
fc32b0e2
LB
557 *
558 * Note that the descriptor byte count includes 2 dummy
559 * bytes automatically inserted by the hardware at the
560 * start of the packet (which we don't count), and a 4
561 * byte CRC at the end of the packet (which we do count).
468d09f8 562 */
1da177e4 563 stats->rx_packets++;
6b8f90c2 564 stats->rx_bytes += byte_cnt - 2;
96587661 565
1da177e4 566 /*
fc32b0e2
LB
567 * In case we received a packet without first / last bits
568 * on, or the error summary bit is set, the packet needs
569 * to be dropped.
1da177e4 570 */
f61e5547
LB
571 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
572 != (RX_FIRST_DESC | RX_LAST_DESC))
573 goto err;
574
575 /*
576 * The -4 is for the CRC in the trailer of the
577 * received packet
578 */
579 skb_put(skb, byte_cnt - 2 - 4);
580
581 if (cmd_sts & LAYER_4_CHECKSUM_OK)
582 skb->ip_summed = CHECKSUM_UNNECESSARY;
583 skb->protocol = eth_type_trans(skb, mp->dev);
eaf5d590 584
3619eb85 585 napi_gro_receive(&mp->napi, skb);
f61e5547
LB
586
587 continue;
588
589err:
590 stats->rx_dropped++;
591
592 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
593 (RX_FIRST_DESC | RX_LAST_DESC)) {
594 if (net_ratelimit())
7542db8b
JP
595 netdev_err(mp->dev,
596 "received packet spanning multiple descriptors\n");
1da177e4 597 }
f61e5547
LB
598
599 if (cmd_sts & ERROR_SUMMARY)
600 stats->rx_errors++;
601
602 dev_kfree_skb(skb);
1da177e4 603 }
fc32b0e2 604
1fa38c58
LB
605 if (rx < budget)
606 mp->work_rx &= ~(1 << rxq->index);
607
8a578111 608 return rx;
1da177e4
LT
609}
610
1fa38c58 611static int rxq_refill(struct rx_queue *rxq, int budget)
d0412d96 612{
1fa38c58 613 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1fa38c58 614 int refilled;
8a578111 615
1fa38c58
LB
616 refilled = 0;
617 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
618 struct sk_buff *skb;
1fa38c58 619 int rx;
53771522 620 struct rx_desc *rx_desc;
530e557a 621 int size;
d0412d96 622
acb600de 623 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
2bcb4b0f 624
1fa38c58 625 if (skb == NULL) {
1319ebad 626 mp->oom = 1;
1fa38c58
LB
627 goto oom;
628 }
d0412d96 629
7fd96ce4
LB
630 if (SKB_DMA_REALIGN)
631 skb_reserve(skb, SKB_DMA_REALIGN);
2257e05c 632
1fa38c58
LB
633 refilled++;
634 rxq->rx_desc_count++;
c9df406f 635
1fa38c58
LB
636 rx = rxq->rx_used_desc++;
637 if (rxq->rx_used_desc == rxq->rx_ring_size)
638 rxq->rx_used_desc = 0;
2257e05c 639
53771522
LB
640 rx_desc = rxq->rx_desc_area + rx;
641
18f1d054 642 size = skb_end_pointer(skb) - skb->data;
eb0519b5 643 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
530e557a 644 skb->data, size,
eb0519b5 645 DMA_FROM_DEVICE);
530e557a 646 rx_desc->buf_size = size;
1fa38c58
LB
647 rxq->rx_skb[rx] = skb;
648 wmb();
53771522 649 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
1fa38c58 650 wmb();
2257e05c 651
1fa38c58
LB
652 /*
653 * The hardware automatically prepends 2 bytes of
654 * dummy data to each received packet, so that the
655 * IP header ends up 16-byte aligned.
656 */
657 skb_reserve(skb, 2);
658 }
659
660 if (refilled < budget)
661 mp->work_rx_refill &= ~(1 << rxq->index);
662
663oom:
664 return refilled;
d0412d96
JC
665}
666
c9df406f
LB
667
668/* tx ***********************************************************************/
c9df406f 669static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
1da177e4 670{
13d64285 671 int frag;
1da177e4 672
c9df406f 673 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
9e903e08
ED
674 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
675
676 if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
c9df406f 677 return 1;
1da177e4 678 }
13d64285 679
c9df406f
LB
680 return 0;
681}
7303fde8 682
0a8fa933
EG
683static inline __be16 sum16_as_be(__sum16 sum)
684{
685 return (__force __be16)sum;
686}
687
688static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
689 u16 *l4i_chk, u32 *command, int length)
690{
691 int ret;
692 u32 cmd = 0;
693
694 if (skb->ip_summed == CHECKSUM_PARTIAL) {
695 int hdr_len;
696 int tag_bytes;
697
698 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
699 skb->protocol != htons(ETH_P_8021Q));
700
701 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
702 tag_bytes = hdr_len - ETH_HLEN;
703
704 if (length - hdr_len > mp->shared->tx_csum_limit ||
705 unlikely(tag_bytes & ~12)) {
706 ret = skb_checksum_help(skb);
707 if (!ret)
708 goto no_csum;
709 return ret;
710 }
711
712 if (tag_bytes & 4)
713 cmd |= MAC_HDR_EXTRA_4_BYTES;
714 if (tag_bytes & 8)
715 cmd |= MAC_HDR_EXTRA_8_BYTES;
716
84411f73 717 cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
0a8fa933
EG
718 GEN_IP_V4_CHECKSUM |
719 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
720
84411f73
EG
721 /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
722 * it seems we don't need to pass the initial checksum. */
0a8fa933
EG
723 switch (ip_hdr(skb)->protocol) {
724 case IPPROTO_UDP:
725 cmd |= UDP_FRAME;
84411f73 726 *l4i_chk = 0;
0a8fa933
EG
727 break;
728 case IPPROTO_TCP:
84411f73 729 *l4i_chk = 0;
0a8fa933
EG
730 break;
731 default:
732 WARN(1, "protocol not supported");
733 }
734 } else {
735no_csum:
736 /* Errata BTS #50, IHL must be 5 if no HW checksum */
737 cmd |= 5 << TX_IHL_SHIFT;
738 }
739 *command = cmd;
740 return 0;
741}
742
3ae8f4e0
EG
743static inline int
744txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
745 struct sk_buff *skb, char *data, int length,
746 bool last_tcp, bool is_last)
747{
748 int tx_index;
749 u32 cmd_sts;
750 struct tx_desc *desc;
751
752 tx_index = txq->tx_curr_desc++;
753 if (txq->tx_curr_desc == txq->tx_ring_size)
754 txq->tx_curr_desc = 0;
755 desc = &txq->tx_desc_area[tx_index];
9e911414 756 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
3ae8f4e0
EG
757
758 desc->l4i_chk = 0;
759 desc->byte_cnt = length;
91986fd3
PK
760
761 if (length <= 8 && (uintptr_t)data & 0x7) {
762 /* Copy unaligned small data fragment to TSO header data area */
3b89624a 763 memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
91986fd3
PK
764 data, length);
765 desc->buf_ptr = txq->tso_hdrs_dma
3b89624a 766 + tx_index * TSO_HEADER_SIZE;
91986fd3
PK
767 } else {
768 /* Alignment is okay, map buffer and hand off to hardware */
769 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
770 desc->buf_ptr = dma_map_single(dev->dev.parent, data,
771 length, DMA_TO_DEVICE);
772 if (unlikely(dma_mapping_error(dev->dev.parent,
773 desc->buf_ptr))) {
774 WARN(1, "dma_map_single failed!\n");
775 return -ENOMEM;
776 }
3ae8f4e0
EG
777 }
778
779 cmd_sts = BUFFER_OWNED_BY_DMA;
780 if (last_tcp) {
781 /* last descriptor in the TCP packet */
782 cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
783 /* last descriptor in SKB */
784 if (is_last)
785 cmd_sts |= TX_ENABLE_INTERRUPT;
786 }
787 desc->cmd_sts = cmd_sts;
788 return 0;
789}
790
791static inline void
968200f3
PK
792txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
793 u32 *first_cmd_sts, bool first_desc)
3ae8f4e0
EG
794{
795 struct mv643xx_eth_private *mp = txq_to_mp(txq);
796 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
797 int tx_index;
798 struct tx_desc *desc;
799 int ret;
800 u32 cmd_csum = 0;
801 u16 l4i_chk = 0;
968200f3 802 u32 cmd_sts;
3ae8f4e0
EG
803
804 tx_index = txq->tx_curr_desc;
805 desc = &txq->tx_desc_area[tx_index];
806
807 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
808 if (ret)
809 WARN(1, "failed to prepare checksum!");
810
811 /* Should we set this? Can't use the value from skb_tx_csum()
812 * as it's not the correct initial L4 checksum to use. */
813 desc->l4i_chk = 0;
814
815 desc->byte_cnt = hdr_len;
816 desc->buf_ptr = txq->tso_hdrs_dma +
817 txq->tx_curr_desc * TSO_HEADER_SIZE;
968200f3 818 cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
3ae8f4e0
EG
819 GEN_CRC;
820
968200f3
PK
821 /* Defer updating the first command descriptor until all
822 * following descriptors have been written.
823 */
824 if (first_desc)
825 *first_cmd_sts = cmd_sts;
826 else
827 desc->cmd_sts = cmd_sts;
828
3ae8f4e0
EG
829 txq->tx_curr_desc++;
830 if (txq->tx_curr_desc == txq->tx_ring_size)
831 txq->tx_curr_desc = 0;
832}
833
834static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
835 struct net_device *dev)
836{
837 struct mv643xx_eth_private *mp = txq_to_mp(txq);
838 int total_len, data_left, ret;
839 int desc_count = 0;
840 struct tso_t tso;
841 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
968200f3
PK
842 struct tx_desc *first_tx_desc;
843 u32 first_cmd_sts = 0;
3ae8f4e0
EG
844
845 /* Count needed descriptors */
846 if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
847 netdev_dbg(dev, "not enough descriptors for TSO!\n");
848 return -EBUSY;
849 }
850
968200f3
PK
851 first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
852
3ae8f4e0
EG
853 /* Initialize the TSO handler, and prepare the first payload */
854 tso_start(skb, &tso);
855
856 total_len = skb->len - hdr_len;
857 while (total_len > 0) {
968200f3 858 bool first_desc = (desc_count == 0);
3ae8f4e0
EG
859 char *hdr;
860
861 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
862 total_len -= data_left;
863 desc_count++;
864
865 /* prepare packet headers: MAC + IP + TCP */
866 hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
867 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
968200f3
PK
868 txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
869 first_desc);
3ae8f4e0
EG
870
871 while (data_left > 0) {
872 int size;
873 desc_count++;
874
875 size = min_t(int, tso.size, data_left);
876 ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
877 size == data_left,
878 total_len == 0);
879 if (ret)
880 goto err_release;
881 data_left -= size;
882 tso_build_data(skb, &tso, size);
883 }
884 }
885
886 __skb_queue_tail(&txq->tx_skb, skb);
887 skb_tx_timestamp(skb);
888
968200f3
PK
889 /* ensure all other descriptors are written before first cmd_sts */
890 wmb();
891 first_tx_desc->cmd_sts = first_cmd_sts;
892
3ae8f4e0
EG
893 /* clear TX_END status */
894 mp->work_tx_end &= ~(1 << txq->index);
895
896 /* ensure all descriptors are written before poking hardware */
897 wmb();
898 txq_enable(txq);
899 txq->tx_desc_count += desc_count;
900 return 0;
901err_release:
902 /* TODO: Release all used data descriptors; header descriptors must not
903 * be DMA-unmapped.
904 */
905 return ret;
906}
907
13d64285 908static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
c9df406f 909{
eb0519b5 910 struct mv643xx_eth_private *mp = txq_to_mp(txq);
13d64285 911 int nr_frags = skb_shinfo(skb)->nr_frags;
c9df406f 912 int frag;
1da177e4 913
13d64285
LB
914 for (frag = 0; frag < nr_frags; frag++) {
915 skb_frag_t *this_frag;
916 int tx_index;
917 struct tx_desc *desc;
918
919 this_frag = &skb_shinfo(skb)->frags[frag];
66823b92
LB
920 tx_index = txq->tx_curr_desc++;
921 if (txq->tx_curr_desc == txq->tx_ring_size)
922 txq->tx_curr_desc = 0;
13d64285 923 desc = &txq->tx_desc_area[tx_index];
9e911414 924 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
13d64285
LB
925
926 /*
927 * The last fragment will generate an interrupt
928 * which will free the skb on TX completion.
929 */
930 if (frag == nr_frags - 1) {
931 desc->cmd_sts = BUFFER_OWNED_BY_DMA |
932 ZERO_PADDING | TX_LAST_DESC |
933 TX_ENABLE_INTERRUPT;
13d64285
LB
934 } else {
935 desc->cmd_sts = BUFFER_OWNED_BY_DMA;
13d64285
LB
936 }
937
c9df406f 938 desc->l4i_chk = 0;
9e903e08 939 desc->byte_cnt = skb_frag_size(this_frag);
9e911414
EG
940 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
941 this_frag, 0, desc->byte_cnt,
942 DMA_TO_DEVICE);
c9df406f 943 }
1da177e4
LT
944}
945
ee9e4956
EG
946static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
947 struct net_device *dev)
1da177e4 948{
8fa89bf5 949 struct mv643xx_eth_private *mp = txq_to_mp(txq);
13d64285 950 int nr_frags = skb_shinfo(skb)->nr_frags;
c9df406f 951 int tx_index;
cc9754b3 952 struct tx_desc *desc;
c9df406f 953 u32 cmd_sts;
4df89bd5 954 u16 l4i_chk;
0a8fa933 955 int length, ret;
1da177e4 956
0a8fa933 957 cmd_sts = 0;
4df89bd5 958 l4i_chk = 0;
c9df406f 959
ee9e4956
EG
960 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
961 if (net_ratelimit())
962 netdev_err(dev, "tx queue full?!\n");
963 return -EBUSY;
964 }
965
0a8fa933 966 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
ee9e4956 967 if (ret)
0a8fa933 968 return ret;
0a8fa933 969 cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
c9df406f 970
66823b92
LB
971 tx_index = txq->tx_curr_desc++;
972 if (txq->tx_curr_desc == txq->tx_ring_size)
973 txq->tx_curr_desc = 0;
4df89bd5 974 desc = &txq->tx_desc_area[tx_index];
9e911414 975 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
4df89bd5
LB
976
977 if (nr_frags) {
978 txq_submit_frag_skb(txq, skb);
979 length = skb_headlen(skb);
980 } else {
981 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
982 length = skb->len;
983 }
984
985 desc->l4i_chk = l4i_chk;
986 desc->byte_cnt = length;
eb0519b5
GP
987 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
988 length, DMA_TO_DEVICE);
4df89bd5 989
99ab08e0
LB
990 __skb_queue_tail(&txq->tx_skb, skb);
991
3b182d7d
RC
992 skb_tx_timestamp(skb);
993
c9df406f
LB
994 /* ensure all other descriptors are written before first cmd_sts */
995 wmb();
996 desc->cmd_sts = cmd_sts;
997
1fa38c58
LB
998 /* clear TX_END status */
999 mp->work_tx_end &= ~(1 << txq->index);
8fa89bf5 1000
c9df406f
LB
1001 /* ensure all descriptors are written before poking hardware */
1002 wmb();
13d64285 1003 txq_enable(txq);
c9df406f 1004
13d64285 1005 txq->tx_desc_count += nr_frags + 1;
4df89bd5
LB
1006
1007 return 0;
1da177e4 1008}
1da177e4 1009
0ccfe64d 1010static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 1011{
e5371493 1012 struct mv643xx_eth_private *mp = netdev_priv(dev);
3ae8f4e0 1013 int length, queue, ret;
13d64285 1014 struct tx_queue *txq;
e5ef1de1 1015 struct netdev_queue *nq;
afdb57a2 1016
8fd89211
LB
1017 queue = skb_get_queue_mapping(skb);
1018 txq = mp->txq + queue;
1019 nq = netdev_get_tx_queue(dev, queue);
1020
c9df406f 1021 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
7542db8b
JP
1022 netdev_printk(KERN_DEBUG, dev,
1023 "failed to linearize skb with tiny unaligned fragment\n");
c9df406f
LB
1024 return NETDEV_TX_BUSY;
1025 }
1026
73151ce3
RC
1027 length = skb->len;
1028
3ae8f4e0
EG
1029 if (skb_is_gso(skb))
1030 ret = txq_submit_tso(txq, skb, dev);
1031 else
ee9e4956 1032 ret = txq_submit_skb(txq, skb, dev);
3ae8f4e0 1033 if (!ret) {
73151ce3 1034 txq->tx_bytes += length;
4df89bd5 1035 txq->tx_packets++;
c9df406f 1036
ee9e4956 1037 if (txq->tx_desc_count >= txq->tx_stop_threshold)
4df89bd5 1038 netif_tx_stop_queue(nq);
dd11680d
EG
1039 } else {
1040 txq->tx_dropped++;
1041 dev_kfree_skb_any(skb);
4df89bd5 1042 }
c9df406f 1043
c9df406f 1044 return NETDEV_TX_OK;
1da177e4
LT
1045}
1046
c9df406f 1047
1fa38c58
LB
1048/* tx napi ******************************************************************/
1049static void txq_kick(struct tx_queue *txq)
1050{
1051 struct mv643xx_eth_private *mp = txq_to_mp(txq);
8fd89211 1052 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1fa38c58
LB
1053 u32 hw_desc_ptr;
1054 u32 expected_ptr;
1055
8fd89211 1056 __netif_tx_lock(nq, smp_processor_id());
1fa38c58 1057
37a6084f 1058 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
1fa38c58
LB
1059 goto out;
1060
37a6084f 1061 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
1fa38c58
LB
1062 expected_ptr = (u32)txq->tx_desc_dma +
1063 txq->tx_curr_desc * sizeof(struct tx_desc);
1064
1065 if (hw_desc_ptr != expected_ptr)
1066 txq_enable(txq);
1067
1068out:
8fd89211 1069 __netif_tx_unlock(nq);
1fa38c58
LB
1070
1071 mp->work_tx_end &= ~(1 << txq->index);
1072}
1073
1074static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1075{
1076 struct mv643xx_eth_private *mp = txq_to_mp(txq);
8fd89211 1077 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1fa38c58
LB
1078 int reclaimed;
1079
3aefe2b4 1080 __netif_tx_lock_bh(nq);
1fa38c58
LB
1081
1082 reclaimed = 0;
1083 while (reclaimed < budget && txq->tx_desc_count > 0) {
1084 int tx_index;
1085 struct tx_desc *desc;
1086 u32 cmd_sts;
9e911414 1087 char desc_dma_map;
1fa38c58
LB
1088
1089 tx_index = txq->tx_used_desc;
1090 desc = &txq->tx_desc_area[tx_index];
9e911414
EG
1091 desc_dma_map = txq->tx_desc_mapping[tx_index];
1092
1fa38c58
LB
1093 cmd_sts = desc->cmd_sts;
1094
1095 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
1096 if (!force)
1097 break;
1098 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
1099 }
1100
1101 txq->tx_used_desc = tx_index + 1;
1102 if (txq->tx_used_desc == txq->tx_ring_size)
1103 txq->tx_used_desc = 0;
1104
1105 reclaimed++;
1106 txq->tx_desc_count--;
1107
9e911414
EG
1108 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
1109
1110 if (desc_dma_map == DESC_DMA_MAP_PAGE)
1111 dma_unmap_page(mp->dev->dev.parent,
1112 desc->buf_ptr,
1113 desc->byte_cnt,
1114 DMA_TO_DEVICE);
1115 else
1116 dma_unmap_single(mp->dev->dev.parent,
1117 desc->buf_ptr,
1118 desc->byte_cnt,
1119 DMA_TO_DEVICE);
1120 }
2c2a9cbd
KB
1121
1122 if (cmd_sts & TX_ENABLE_INTERRUPT) {
1123 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
1124
1125 if (!WARN_ON(!skb))
1126 dev_kfree_skb(skb);
1127 }
1fa38c58
LB
1128
1129 if (cmd_sts & ERROR_SUMMARY) {
7542db8b 1130 netdev_info(mp->dev, "tx error\n");
1fa38c58
LB
1131 mp->dev->stats.tx_errors++;
1132 }
1133
1fa38c58
LB
1134 }
1135
3aefe2b4 1136 __netif_tx_unlock_bh(nq);
8fd89211 1137
1fa38c58
LB
1138 if (reclaimed < budget)
1139 mp->work_tx &= ~(1 << txq->index);
1140
1fa38c58
LB
1141 return reclaimed;
1142}
1143
1144
89df5fdc
LB
1145/* tx rate control **********************************************************/
1146/*
1147 * Set total maximum TX rate (shared by all TX queues for this port)
1148 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
1149 */
1150static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
1151{
1152 int token_rate;
1153 int mtu;
1154 int bucket_size;
1155
452503eb 1156 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
89df5fdc
LB
1157 if (token_rate > 1023)
1158 token_rate = 1023;
1159
1160 mtu = (mp->dev->mtu + 255) >> 8;
1161 if (mtu > 63)
1162 mtu = 63;
1163
1164 bucket_size = (burst + 255) >> 8;
1165 if (bucket_size > 65535)
1166 bucket_size = 65535;
1167
457b1d5a
LB
1168 switch (mp->shared->tx_bw_control) {
1169 case TX_BW_CONTROL_OLD_LAYOUT:
37a6084f
LB
1170 wrlp(mp, TX_BW_RATE, token_rate);
1171 wrlp(mp, TX_BW_MTU, mtu);
1172 wrlp(mp, TX_BW_BURST, bucket_size);
457b1d5a
LB
1173 break;
1174 case TX_BW_CONTROL_NEW_LAYOUT:
37a6084f
LB
1175 wrlp(mp, TX_BW_RATE_MOVED, token_rate);
1176 wrlp(mp, TX_BW_MTU_MOVED, mtu);
1177 wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
457b1d5a 1178 break;
1e881592 1179 }
89df5fdc
LB
1180}
1181
1182static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
1183{
1184 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1185 int token_rate;
1186 int bucket_size;
1187
452503eb 1188 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
89df5fdc
LB
1189 if (token_rate > 1023)
1190 token_rate = 1023;
1191
1192 bucket_size = (burst + 255) >> 8;
1193 if (bucket_size > 65535)
1194 bucket_size = 65535;
1195
37a6084f
LB
1196 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
1197 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
89df5fdc
LB
1198}
1199
1200static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1201{
1202 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1203 int off;
1204 u32 val;
1205
1206 /*
1207 * Turn on fixed priority mode.
1208 */
457b1d5a
LB
1209 off = 0;
1210 switch (mp->shared->tx_bw_control) {
1211 case TX_BW_CONTROL_OLD_LAYOUT:
37a6084f 1212 off = TXQ_FIX_PRIO_CONF;
457b1d5a
LB
1213 break;
1214 case TX_BW_CONTROL_NEW_LAYOUT:
37a6084f 1215 off = TXQ_FIX_PRIO_CONF_MOVED;
457b1d5a
LB
1216 break;
1217 }
89df5fdc 1218
457b1d5a 1219 if (off) {
37a6084f 1220 val = rdlp(mp, off);
457b1d5a 1221 val |= 1 << txq->index;
37a6084f 1222 wrlp(mp, off, val);
457b1d5a 1223 }
89df5fdc
LB
1224}
1225
89df5fdc 1226
c9df406f 1227/* mii management interface *************************************************/
0a9e413b 1228static void mv643xx_eth_adjust_link(struct net_device *dev)
260055bb 1229{
0a9e413b 1230 struct mv643xx_eth_private *mp = netdev_priv(dev);
260055bb
PS
1231 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1232 u32 autoneg_disable = FORCE_LINK_PASS |
1233 DISABLE_AUTO_NEG_SPEED_GMII |
1234 DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1235 DISABLE_AUTO_NEG_FOR_DUPLEX;
1236
1e8a655d 1237 if (dev->phydev->autoneg == AUTONEG_ENABLE) {
260055bb
PS
1238 /* enable auto negotiation */
1239 pscr &= ~autoneg_disable;
1240 goto out_write;
1241 }
1242
1243 pscr |= autoneg_disable;
1244
1e8a655d 1245 if (dev->phydev->speed == SPEED_1000) {
260055bb
PS
1246 /* force gigabit, half duplex not supported */
1247 pscr |= SET_GMII_SPEED_TO_1000;
1248 pscr |= SET_FULL_DUPLEX_MODE;
1249 goto out_write;
1250 }
1251
1252 pscr &= ~SET_GMII_SPEED_TO_1000;
1253
1e8a655d 1254 if (dev->phydev->speed == SPEED_100)
260055bb
PS
1255 pscr |= SET_MII_SPEED_TO_100;
1256 else
1257 pscr &= ~SET_MII_SPEED_TO_100;
1258
1e8a655d 1259 if (dev->phydev->duplex == DUPLEX_FULL)
260055bb
PS
1260 pscr |= SET_FULL_DUPLEX_MODE;
1261 else
1262 pscr &= ~SET_FULL_DUPLEX_MODE;
1263
1264out_write:
1265 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1266}
1267
8fd89211
LB
1268/* statistics ***************************************************************/
1269static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1270{
1271 struct mv643xx_eth_private *mp = netdev_priv(dev);
1272 struct net_device_stats *stats = &dev->stats;
1273 unsigned long tx_packets = 0;
1274 unsigned long tx_bytes = 0;
1275 unsigned long tx_dropped = 0;
1276 int i;
1277
1278 for (i = 0; i < mp->txq_count; i++) {
1279 struct tx_queue *txq = mp->txq + i;
1280
1281 tx_packets += txq->tx_packets;
1282 tx_bytes += txq->tx_bytes;
1283 tx_dropped += txq->tx_dropped;
1284 }
1285
1286 stats->tx_packets = tx_packets;
1287 stats->tx_bytes = tx_bytes;
1288 stats->tx_dropped = tx_dropped;
1289
1290 return stats;
1291}
1292
fc32b0e2 1293static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
c9df406f 1294{
fc32b0e2 1295 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
1da177e4
LT
1296}
1297
fc32b0e2 1298static void mib_counters_clear(struct mv643xx_eth_private *mp)
d0412d96 1299{
fc32b0e2
LB
1300 int i;
1301
1302 for (i = 0; i < 0x80; i += 4)
1303 mib_read(mp, i);
302476c9
PZ
1304
1305 /* Clear non MIB hw counters also */
1306 rdlp(mp, RX_DISCARD_FRAME_CNT);
1307 rdlp(mp, RX_OVERRUN_FRAME_CNT);
c9df406f 1308}
d0412d96 1309
fc32b0e2 1310static void mib_counters_update(struct mv643xx_eth_private *mp)
c9df406f 1311{
e5371493 1312 struct mib_counters *p = &mp->mib_counters;
4b8e3655 1313
57e8f26a 1314 spin_lock_bh(&mp->mib_counters_lock);
fc32b0e2 1315 p->good_octets_received += mib_read(mp, 0x00);
fc32b0e2
LB
1316 p->bad_octets_received += mib_read(mp, 0x08);
1317 p->internal_mac_transmit_err += mib_read(mp, 0x0c);
1318 p->good_frames_received += mib_read(mp, 0x10);
1319 p->bad_frames_received += mib_read(mp, 0x14);
1320 p->broadcast_frames_received += mib_read(mp, 0x18);
1321 p->multicast_frames_received += mib_read(mp, 0x1c);
1322 p->frames_64_octets += mib_read(mp, 0x20);
1323 p->frames_65_to_127_octets += mib_read(mp, 0x24);
1324 p->frames_128_to_255_octets += mib_read(mp, 0x28);
1325 p->frames_256_to_511_octets += mib_read(mp, 0x2c);
1326 p->frames_512_to_1023_octets += mib_read(mp, 0x30);
1327 p->frames_1024_to_max_octets += mib_read(mp, 0x34);
1328 p->good_octets_sent += mib_read(mp, 0x38);
fc32b0e2
LB
1329 p->good_frames_sent += mib_read(mp, 0x40);
1330 p->excessive_collision += mib_read(mp, 0x44);
1331 p->multicast_frames_sent += mib_read(mp, 0x48);
1332 p->broadcast_frames_sent += mib_read(mp, 0x4c);
1333 p->unrec_mac_control_received += mib_read(mp, 0x50);
1334 p->fc_sent += mib_read(mp, 0x54);
1335 p->good_fc_received += mib_read(mp, 0x58);
1336 p->bad_fc_received += mib_read(mp, 0x5c);
1337 p->undersize_received += mib_read(mp, 0x60);
1338 p->fragments_received += mib_read(mp, 0x64);
1339 p->oversize_received += mib_read(mp, 0x68);
1340 p->jabber_received += mib_read(mp, 0x6c);
1341 p->mac_receive_error += mib_read(mp, 0x70);
1342 p->bad_crc_event += mib_read(mp, 0x74);
1343 p->collision += mib_read(mp, 0x78);
1344 p->late_collision += mib_read(mp, 0x7c);
302476c9
PZ
1345 /* Non MIB hardware counters */
1346 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
1347 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
57e8f26a 1348 spin_unlock_bh(&mp->mib_counters_lock);
4ff3495a
LB
1349}
1350
1351static void mib_counters_timer_wrapper(unsigned long _mp)
1352{
1353 struct mv643xx_eth_private *mp = (void *)_mp;
4ff3495a 1354 mib_counters_update(mp);
041b4ddb 1355 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
d0412d96
JC
1356}
1357
c9df406f 1358
3e508034
LB
1359/* interrupt coalescing *****************************************************/
1360/*
1361 * Hardware coalescing parameters are set in units of 64 t_clk
1362 * cycles. I.e.:
1363 *
1364 * coal_delay_in_usec = 64000000 * register_value / t_clk_rate
1365 *
1366 * register_value = coal_delay_in_usec * t_clk_rate / 64000000
1367 *
1368 * In the ->set*() methods, we round the computed register value
1369 * to the nearest integer.
1370 */
1371static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1372{
1373 u32 val = rdlp(mp, SDMA_CONFIG);
1374 u64 temp;
1375
1376 if (mp->shared->extended_rx_coal_limit)
1377 temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
1378 else
1379 temp = (val & 0x003fff00) >> 8;
1380
1381 temp *= 64000000;
b9b84fc0 1382 temp += mp->t_clk / 2;
452503eb 1383 do_div(temp, mp->t_clk);
3e508034
LB
1384
1385 return (unsigned int)temp;
1386}
1387
1388static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1389{
1390 u64 temp;
1391 u32 val;
1392
452503eb 1393 temp = (u64)usec * mp->t_clk;
3e508034
LB
1394 temp += 31999999;
1395 do_div(temp, 64000000);
1396
1397 val = rdlp(mp, SDMA_CONFIG);
1398 if (mp->shared->extended_rx_coal_limit) {
1399 if (temp > 0xffff)
1400 temp = 0xffff;
1401 val &= ~0x023fff80;
1402 val |= (temp & 0x8000) << 10;
1403 val |= (temp & 0x7fff) << 7;
1404 } else {
1405 if (temp > 0x3fff)
1406 temp = 0x3fff;
1407 val &= ~0x003fff00;
1408 val |= (temp & 0x3fff) << 8;
1409 }
1410 wrlp(mp, SDMA_CONFIG, val);
1411}
1412
1413static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1414{
1415 u64 temp;
1416
1417 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1418 temp *= 64000000;
b9b84fc0 1419 temp += mp->t_clk / 2;
452503eb 1420 do_div(temp, mp->t_clk);
3e508034
LB
1421
1422 return (unsigned int)temp;
1423}
1424
1425static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1426{
1427 u64 temp;
1428
452503eb 1429 temp = (u64)usec * mp->t_clk;
3e508034
LB
1430 temp += 31999999;
1431 do_div(temp, 64000000);
1432
1433 if (temp > 0x3fff)
1434 temp = 0x3fff;
1435
1436 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
1437}
1438
1439
c9df406f 1440/* ethtool ******************************************************************/
e5371493 1441struct mv643xx_eth_stats {
c9df406f
LB
1442 char stat_string[ETH_GSTRING_LEN];
1443 int sizeof_stat;
16820054
LB
1444 int netdev_off;
1445 int mp_off;
c9df406f
LB
1446};
1447
16820054
LB
1448#define SSTAT(m) \
1449 { #m, FIELD_SIZEOF(struct net_device_stats, m), \
1450 offsetof(struct net_device, stats.m), -1 }
1451
1452#define MIBSTAT(m) \
1453 { #m, FIELD_SIZEOF(struct mib_counters, m), \
1454 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
1455
1456static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1457 SSTAT(rx_packets),
1458 SSTAT(tx_packets),
1459 SSTAT(rx_bytes),
1460 SSTAT(tx_bytes),
1461 SSTAT(rx_errors),
1462 SSTAT(tx_errors),
1463 SSTAT(rx_dropped),
1464 SSTAT(tx_dropped),
1465 MIBSTAT(good_octets_received),
1466 MIBSTAT(bad_octets_received),
1467 MIBSTAT(internal_mac_transmit_err),
1468 MIBSTAT(good_frames_received),
1469 MIBSTAT(bad_frames_received),
1470 MIBSTAT(broadcast_frames_received),
1471 MIBSTAT(multicast_frames_received),
1472 MIBSTAT(frames_64_octets),
1473 MIBSTAT(frames_65_to_127_octets),
1474 MIBSTAT(frames_128_to_255_octets),
1475 MIBSTAT(frames_256_to_511_octets),
1476 MIBSTAT(frames_512_to_1023_octets),
1477 MIBSTAT(frames_1024_to_max_octets),
1478 MIBSTAT(good_octets_sent),
1479 MIBSTAT(good_frames_sent),
1480 MIBSTAT(excessive_collision),
1481 MIBSTAT(multicast_frames_sent),
1482 MIBSTAT(broadcast_frames_sent),
1483 MIBSTAT(unrec_mac_control_received),
1484 MIBSTAT(fc_sent),
1485 MIBSTAT(good_fc_received),
1486 MIBSTAT(bad_fc_received),
1487 MIBSTAT(undersize_received),
1488 MIBSTAT(fragments_received),
1489 MIBSTAT(oversize_received),
1490 MIBSTAT(jabber_received),
1491 MIBSTAT(mac_receive_error),
1492 MIBSTAT(bad_crc_event),
1493 MIBSTAT(collision),
1494 MIBSTAT(late_collision),
302476c9
PZ
1495 MIBSTAT(rx_discard),
1496 MIBSTAT(rx_overrun),
c9df406f
LB
1497};
1498
10a9948d 1499static int
a54e1612
PR
1500mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
1501 struct ethtool_link_ksettings *cmd)
d0412d96 1502{
1e8a655d 1503 struct net_device *dev = mp->dev;
d0412d96 1504 int err;
a54e1612 1505 u32 supported, advertising;
d0412d96 1506
1e8a655d 1507 err = phy_read_status(dev->phydev);
ed94493f 1508 if (err == 0)
a54e1612 1509 err = phy_ethtool_ksettings_get(dev->phydev, cmd);
d0412d96 1510
fc32b0e2
LB
1511 /*
1512 * The MAC does not support 1000baseT_Half.
1513 */
a54e1612
PR
1514 ethtool_convert_link_mode_to_legacy_u32(&supported,
1515 cmd->link_modes.supported);
1516 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1517 cmd->link_modes.advertising);
1518 supported &= ~SUPPORTED_1000baseT_Half;
1519 advertising &= ~ADVERTISED_1000baseT_Half;
1520 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1521 supported);
1522 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1523 advertising);
d0412d96
JC
1524
1525 return err;
1526}
1527
10a9948d 1528static int
a54e1612
PR
1529mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp,
1530 struct ethtool_link_ksettings *cmd)
bedfe324 1531{
81600eea 1532 u32 port_status;
a54e1612 1533 u32 supported, advertising;
81600eea 1534
37a6084f 1535 port_status = rdlp(mp, PORT_STATUS);
81600eea 1536
a54e1612
PR
1537 supported = SUPPORTED_MII;
1538 advertising = ADVERTISED_MII;
81600eea
LB
1539 switch (port_status & PORT_SPEED_MASK) {
1540 case PORT_SPEED_10:
a54e1612 1541 cmd->base.speed = SPEED_10;
81600eea
LB
1542 break;
1543 case PORT_SPEED_100:
a54e1612 1544 cmd->base.speed = SPEED_100;
81600eea
LB
1545 break;
1546 case PORT_SPEED_1000:
a54e1612 1547 cmd->base.speed = SPEED_1000;
81600eea
LB
1548 break;
1549 default:
a54e1612 1550 cmd->base.speed = -1;
81600eea
LB
1551 break;
1552 }
a54e1612
PR
1553 cmd->base.duplex = (port_status & FULL_DUPLEX) ?
1554 DUPLEX_FULL : DUPLEX_HALF;
1555 cmd->base.port = PORT_MII;
1556 cmd->base.phy_address = 0;
1557 cmd->base.autoneg = AUTONEG_DISABLE;
1558
1559 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1560 supported);
1561 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1562 advertising);
bedfe324
LB
1563
1564 return 0;
1565}
1566
3871c387
MS
1567static void
1568mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1569{
3871c387
MS
1570 wol->supported = 0;
1571 wol->wolopts = 0;
1e8a655d
PR
1572 if (dev->phydev)
1573 phy_ethtool_get_wol(dev->phydev, wol);
3871c387
MS
1574}
1575
1576static int
1577mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1578{
3871c387
MS
1579 int err;
1580
1e8a655d 1581 if (!dev->phydev)
3871c387
MS
1582 return -EOPNOTSUPP;
1583
1e8a655d 1584 err = phy_ethtool_set_wol(dev->phydev, wol);
3871c387
MS
1585 /* Given that mv643xx_eth works without the marvell-specific PHY driver,
1586 * this debugging hint is useful to have.
1587 */
1588 if (err == -EOPNOTSUPP)
1589 netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
1590 return err;
1591}
1592
6bdf576e 1593static int
a54e1612
PR
1594mv643xx_eth_get_link_ksettings(struct net_device *dev,
1595 struct ethtool_link_ksettings *cmd)
6bdf576e
LB
1596{
1597 struct mv643xx_eth_private *mp = netdev_priv(dev);
1598
1e8a655d 1599 if (dev->phydev)
a54e1612 1600 return mv643xx_eth_get_link_ksettings_phy(mp, cmd);
6bdf576e 1601 else
a54e1612 1602 return mv643xx_eth_get_link_ksettings_phyless(mp, cmd);
6bdf576e
LB
1603}
1604
10a9948d 1605static int
a54e1612
PR
1606mv643xx_eth_set_link_ksettings(struct net_device *dev,
1607 const struct ethtool_link_ksettings *cmd)
1da177e4 1608{
a54e1612
PR
1609 struct ethtool_link_ksettings c = *cmd;
1610 u32 advertising;
260055bb 1611 int ret;
ab4384a6 1612
1e8a655d 1613 if (!dev->phydev)
6bdf576e
LB
1614 return -EINVAL;
1615
fc32b0e2
LB
1616 /*
1617 * The MAC does not support 1000baseT_Half.
1618 */
a54e1612
PR
1619 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1620 c.link_modes.advertising);
1621 advertising &= ~ADVERTISED_1000baseT_Half;
1622 ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising,
1623 advertising);
fc32b0e2 1624
a54e1612 1625 ret = phy_ethtool_ksettings_set(dev->phydev, &c);
260055bb 1626 if (!ret)
0a9e413b 1627 mv643xx_eth_adjust_link(dev);
260055bb 1628 return ret;
c9df406f 1629}
1da177e4 1630
fc32b0e2
LB
1631static void mv643xx_eth_get_drvinfo(struct net_device *dev,
1632 struct ethtool_drvinfo *drvinfo)
c9df406f 1633{
6f39da2c
AL
1634 strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
1635 sizeof(drvinfo->driver));
68aad78c 1636 strlcpy(drvinfo->version, mv643xx_eth_driver_version,
6f39da2c
AL
1637 sizeof(drvinfo->version));
1638 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
1639 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
c9df406f 1640}
1da177e4 1641
3e508034
LB
1642static int
1643mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1644{
1645 struct mv643xx_eth_private *mp = netdev_priv(dev);
1646
1647 ec->rx_coalesce_usecs = get_rx_coal(mp);
1648 ec->tx_coalesce_usecs = get_tx_coal(mp);
1649
1650 return 0;
1651}
1652
1653static int
1654mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1655{
1656 struct mv643xx_eth_private *mp = netdev_priv(dev);
1657
1658 set_rx_coal(mp, ec->rx_coalesce_usecs);
1659 set_tx_coal(mp, ec->tx_coalesce_usecs);
1660
1661 return 0;
1662}
1663
e7d2f4db
LB
1664static void
1665mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1666{
1667 struct mv643xx_eth_private *mp = netdev_priv(dev);
1668
1669 er->rx_max_pending = 4096;
1670 er->tx_max_pending = 4096;
e7d2f4db
LB
1671
1672 er->rx_pending = mp->rx_ring_size;
1673 er->tx_pending = mp->tx_ring_size;
e7d2f4db
LB
1674}
1675
1676static int
1677mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1678{
1679 struct mv643xx_eth_private *mp = netdev_priv(dev);
1680
1681 if (er->rx_mini_pending || er->rx_jumbo_pending)
1682 return -EINVAL;
1683
1684 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
ee9e4956
EG
1685 mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
1686 MV643XX_MAX_SKB_DESCS * 2, 4096);
1687 if (mp->tx_ring_size != er->tx_pending)
1688 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
1689 mp->tx_ring_size, er->tx_pending);
e7d2f4db
LB
1690
1691 if (netif_running(dev)) {
1692 mv643xx_eth_stop(dev);
1693 if (mv643xx_eth_open(dev)) {
7542db8b
JP
1694 netdev_err(dev,
1695 "fatal error on re-opening device after ring param change\n");
e7d2f4db
LB
1696 return -ENOMEM;
1697 }
1698 }
1699
1700 return 0;
1701}
1702
d888b373
LB
1703
1704static int
c8f44aff 1705mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
d888b373
LB
1706{
1707 struct mv643xx_eth_private *mp = netdev_priv(dev);
3ad9b358 1708 bool rx_csum = features & NETIF_F_RXCSUM;
d888b373
LB
1709
1710 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
1711
1712 return 0;
1713}
1714
fc32b0e2
LB
1715static void mv643xx_eth_get_strings(struct net_device *dev,
1716 uint32_t stringset, uint8_t *data)
c9df406f
LB
1717{
1718 int i;
1da177e4 1719
fc32b0e2
LB
1720 if (stringset == ETH_SS_STATS) {
1721 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
c9df406f 1722 memcpy(data + i * ETH_GSTRING_LEN,
16820054 1723 mv643xx_eth_stats[i].stat_string,
e5371493 1724 ETH_GSTRING_LEN);
c9df406f 1725 }
c9df406f
LB
1726 }
1727}
1da177e4 1728
fc32b0e2
LB
1729static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1730 struct ethtool_stats *stats,
1731 uint64_t *data)
c9df406f 1732{
b9873841 1733 struct mv643xx_eth_private *mp = netdev_priv(dev);
c9df406f 1734 int i;
1da177e4 1735
8fd89211 1736 mv643xx_eth_get_stats(dev);
fc32b0e2 1737 mib_counters_update(mp);
1da177e4 1738
16820054
LB
1739 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1740 const struct mv643xx_eth_stats *stat;
1741 void *p;
1742
1743 stat = mv643xx_eth_stats + i;
1744
1745 if (stat->netdev_off >= 0)
1746 p = ((void *)mp->dev) + stat->netdev_off;
1747 else
1748 p = ((void *)mp) + stat->mp_off;
1749
1750 data[i] = (stat->sizeof_stat == 8) ?
1751 *(uint64_t *)p : *(uint32_t *)p;
1da177e4 1752 }
c9df406f 1753}
1da177e4 1754
fc32b0e2 1755static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
c9df406f 1756{
fc32b0e2 1757 if (sset == ETH_SS_STATS)
16820054 1758 return ARRAY_SIZE(mv643xx_eth_stats);
fc32b0e2
LB
1759
1760 return -EOPNOTSUPP;
c9df406f 1761}
1da177e4 1762
e5371493 1763static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
fc32b0e2 1764 .get_drvinfo = mv643xx_eth_get_drvinfo,
fc5e353c 1765 .nway_reset = phy_ethtool_nway_reset,
ed4ba4b5 1766 .get_link = ethtool_op_get_link,
3e508034
LB
1767 .get_coalesce = mv643xx_eth_get_coalesce,
1768 .set_coalesce = mv643xx_eth_set_coalesce,
e7d2f4db
LB
1769 .get_ringparam = mv643xx_eth_get_ringparam,
1770 .set_ringparam = mv643xx_eth_set_ringparam,
fc32b0e2
LB
1771 .get_strings = mv643xx_eth_get_strings,
1772 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
e5371493 1773 .get_sset_count = mv643xx_eth_get_sset_count,
ebad0a8d 1774 .get_ts_info = ethtool_op_get_ts_info,
3871c387
MS
1775 .get_wol = mv643xx_eth_get_wol,
1776 .set_wol = mv643xx_eth_set_wol,
a54e1612
PR
1777 .get_link_ksettings = mv643xx_eth_get_link_ksettings,
1778 .set_link_ksettings = mv643xx_eth_set_link_ksettings,
c9df406f 1779};
1da177e4 1780
bea3348e 1781
c9df406f 1782/* address handling *********************************************************/
5daffe94 1783static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
c9df406f 1784{
66e63ffb
LB
1785 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
1786 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
1da177e4 1787
5daffe94
LB
1788 addr[0] = (mac_h >> 24) & 0xff;
1789 addr[1] = (mac_h >> 16) & 0xff;
1790 addr[2] = (mac_h >> 8) & 0xff;
1791 addr[3] = mac_h & 0xff;
1792 addr[4] = (mac_l >> 8) & 0xff;
1793 addr[5] = mac_l & 0xff;
c9df406f 1794}
1da177e4 1795
66e63ffb 1796static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
c9df406f 1797{
66e63ffb
LB
1798 wrlp(mp, MAC_ADDR_HIGH,
1799 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
1800 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
c9df406f 1801}
d0412d96 1802
66e63ffb 1803static u32 uc_addr_filter_mask(struct net_device *dev)
c9df406f 1804{
ccffad25 1805 struct netdev_hw_addr *ha;
66e63ffb 1806 u32 nibbles;
1da177e4 1807
66e63ffb
LB
1808 if (dev->flags & IFF_PROMISC)
1809 return 0;
1da177e4 1810
66e63ffb 1811 nibbles = 1 << (dev->dev_addr[5] & 0x0f);
32e7bfc4 1812 netdev_for_each_uc_addr(ha, dev) {
ccffad25 1813 if (memcmp(dev->dev_addr, ha->addr, 5))
66e63ffb 1814 return 0;
ccffad25 1815 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
66e63ffb 1816 return 0;
ff561eef 1817
ccffad25 1818 nibbles |= 1 << (ha->addr[5] & 0x0f);
66e63ffb 1819 }
1da177e4 1820
66e63ffb 1821 return nibbles;
1da177e4
LT
1822}
1823
66e63ffb 1824static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
1da177e4 1825{
e5371493 1826 struct mv643xx_eth_private *mp = netdev_priv(dev);
66e63ffb
LB
1827 u32 port_config;
1828 u32 nibbles;
1829 int i;
1da177e4 1830
cc9754b3 1831 uc_addr_set(mp, dev->dev_addr);
1da177e4 1832
6877f54e
PS
1833 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
1834
66e63ffb
LB
1835 nibbles = uc_addr_filter_mask(dev);
1836 if (!nibbles) {
1837 port_config |= UNICAST_PROMISCUOUS_MODE;
6877f54e 1838 nibbles = 0xffff;
66e63ffb
LB
1839 }
1840
1841 for (i = 0; i < 16; i += 4) {
1842 int off = UNICAST_TABLE(mp->port_num) + i;
1843 u32 v;
1844
1845 v = 0;
1846 if (nibbles & 1)
1847 v |= 0x00000001;
1848 if (nibbles & 2)
1849 v |= 0x00000100;
1850 if (nibbles & 4)
1851 v |= 0x00010000;
1852 if (nibbles & 8)
1853 v |= 0x01000000;
1854 nibbles >>= 4;
1855
1856 wrl(mp, off, v);
1857 }
1858
66e63ffb 1859 wrlp(mp, PORT_CONFIG, port_config);
1da177e4
LT
1860}
1861
69876569
LB
1862static int addr_crc(unsigned char *addr)
1863{
1864 int crc = 0;
1865 int i;
1866
1867 for (i = 0; i < 6; i++) {
1868 int j;
1869
1870 crc = (crc ^ addr[i]) << 8;
1871 for (j = 7; j >= 0; j--) {
1872 if (crc & (0x100 << j))
1873 crc ^= 0x107 << j;
1874 }
1875 }
1876
1877 return crc;
1878}
1879
66e63ffb 1880static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
1da177e4 1881{
fc32b0e2 1882 struct mv643xx_eth_private *mp = netdev_priv(dev);
66e63ffb
LB
1883 u32 *mc_spec;
1884 u32 *mc_other;
22bedad3 1885 struct netdev_hw_addr *ha;
fc32b0e2 1886 int i;
c8aaea25 1887
8b711d6d
JP
1888 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
1889 goto promiscuous;
c8aaea25 1890
8b711d6d
JP
1891 /* Allocate both mc_spec and mc_other tables */
1892 mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC);
1893 if (!mc_spec)
1894 goto promiscuous;
1895 mc_other = &mc_spec[64];
66e63ffb 1896
22bedad3
JP
1897 netdev_for_each_mc_addr(ha, dev) {
1898 u8 *a = ha->addr;
66e63ffb 1899 u32 *table;
8b711d6d 1900 u8 entry;
1da177e4 1901
fc32b0e2 1902 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
66e63ffb
LB
1903 table = mc_spec;
1904 entry = a[5];
fc32b0e2 1905 } else {
66e63ffb
LB
1906 table = mc_other;
1907 entry = addr_crc(a);
fc32b0e2 1908 }
66e63ffb 1909
2b448334 1910 table[entry >> 2] |= 1 << (8 * (entry & 3));
fc32b0e2 1911 }
66e63ffb 1912
8b711d6d
JP
1913 for (i = 0; i < 64; i++) {
1914 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1915 mc_spec[i]);
1916 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1917 mc_other[i]);
66e63ffb
LB
1918 }
1919
1920 kfree(mc_spec);
8b711d6d
JP
1921 return;
1922
1923promiscuous:
1924 for (i = 0; i < 64; i++) {
1925 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1926 0x01010101u);
1927 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1928 0x01010101u);
1929 }
66e63ffb
LB
1930}
1931
1932static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1933{
1934 mv643xx_eth_program_unicast_filter(dev);
1935 mv643xx_eth_program_multicast_filter(dev);
1936}
1937
1938static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1939{
1940 struct sockaddr *sa = addr;
1941
a29ec08a 1942 if (!is_valid_ether_addr(sa->sa_data))
504f9b5a 1943 return -EADDRNOTAVAIL;
a29ec08a 1944
66e63ffb
LB
1945 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
1946
1947 netif_addr_lock_bh(dev);
1948 mv643xx_eth_program_unicast_filter(dev);
1949 netif_addr_unlock_bh(dev);
1950
1951 return 0;
c9df406f 1952}
c8aaea25 1953
c8aaea25 1954
c9df406f 1955/* rx/tx queue initialisation ***********************************************/
64da80a2 1956static int rxq_init(struct mv643xx_eth_private *mp, int index)
c9df406f 1957{
64da80a2 1958 struct rx_queue *rxq = mp->rxq + index;
8a578111
LB
1959 struct rx_desc *rx_desc;
1960 int size;
c9df406f
LB
1961 int i;
1962
64da80a2
LB
1963 rxq->index = index;
1964
e7d2f4db 1965 rxq->rx_ring_size = mp->rx_ring_size;
8a578111
LB
1966
1967 rxq->rx_desc_count = 0;
1968 rxq->rx_curr_desc = 0;
1969 rxq->rx_used_desc = 0;
1970
1971 size = rxq->rx_ring_size * sizeof(struct rx_desc);
1972
f7981c1c 1973 if (index == 0 && size <= mp->rx_desc_sram_size) {
8a578111
LB
1974 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1975 mp->rx_desc_sram_size);
1976 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1977 } else {
eb0519b5
GP
1978 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1979 size, &rxq->rx_desc_dma,
1980 GFP_KERNEL);
f7ea3337
PJ
1981 }
1982
8a578111 1983 if (rxq->rx_desc_area == NULL) {
7542db8b 1984 netdev_err(mp->dev,
8a578111
LB
1985 "can't allocate rx ring (%d bytes)\n", size);
1986 goto out;
1987 }
1988 memset(rxq->rx_desc_area, 0, size);
1da177e4 1989
8a578111 1990 rxq->rx_desc_area_size = size;
9fa8e980 1991 rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
b2adaca9
JP
1992 GFP_KERNEL);
1993 if (rxq->rx_skb == NULL)
8a578111 1994 goto out_free;
8a578111 1995
64699336 1996 rx_desc = rxq->rx_desc_area;
8a578111 1997 for (i = 0; i < rxq->rx_ring_size; i++) {
9da78745
LB
1998 int nexti;
1999
2000 nexti = i + 1;
2001 if (nexti == rxq->rx_ring_size)
2002 nexti = 0;
2003
8a578111
LB
2004 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
2005 nexti * sizeof(struct rx_desc);
2006 }
2007
8a578111
LB
2008 return 0;
2009
2010
2011out_free:
f7981c1c 2012 if (index == 0 && size <= mp->rx_desc_sram_size)
8a578111
LB
2013 iounmap(rxq->rx_desc_area);
2014 else
eb0519b5 2015 dma_free_coherent(mp->dev->dev.parent, size,
8a578111
LB
2016 rxq->rx_desc_area,
2017 rxq->rx_desc_dma);
2018
2019out:
2020 return -ENOMEM;
c9df406f 2021}
c8aaea25 2022
8a578111 2023static void rxq_deinit(struct rx_queue *rxq)
c9df406f 2024{
8a578111
LB
2025 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
2026 int i;
2027
2028 rxq_disable(rxq);
c8aaea25 2029
8a578111
LB
2030 for (i = 0; i < rxq->rx_ring_size; i++) {
2031 if (rxq->rx_skb[i]) {
2032 dev_kfree_skb(rxq->rx_skb[i]);
2033 rxq->rx_desc_count--;
1da177e4 2034 }
c8aaea25 2035 }
1da177e4 2036
8a578111 2037 if (rxq->rx_desc_count) {
7542db8b 2038 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
8a578111
LB
2039 rxq->rx_desc_count);
2040 }
2041
f7981c1c 2042 if (rxq->index == 0 &&
64da80a2 2043 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
8a578111 2044 iounmap(rxq->rx_desc_area);
c9df406f 2045 else
eb0519b5 2046 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
8a578111
LB
2047 rxq->rx_desc_area, rxq->rx_desc_dma);
2048
2049 kfree(rxq->rx_skb);
c9df406f 2050}
1da177e4 2051
3d6b35bc 2052static int txq_init(struct mv643xx_eth_private *mp, int index)
c9df406f 2053{
3d6b35bc 2054 struct tx_queue *txq = mp->txq + index;
13d64285
LB
2055 struct tx_desc *tx_desc;
2056 int size;
9e911414 2057 int ret;
c9df406f 2058 int i;
1da177e4 2059
3d6b35bc
LB
2060 txq->index = index;
2061
e7d2f4db 2062 txq->tx_ring_size = mp->tx_ring_size;
13d64285 2063
ee9e4956
EG
2064 /* A queue must always have room for at least one skb.
2065 * Therefore, stop the queue when the free entries reaches
2066 * the maximum number of descriptors per skb.
2067 */
2068 txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
2069 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2070
13d64285
LB
2071 txq->tx_desc_count = 0;
2072 txq->tx_curr_desc = 0;
2073 txq->tx_used_desc = 0;
2074
2075 size = txq->tx_ring_size * sizeof(struct tx_desc);
2076
f7981c1c 2077 if (index == 0 && size <= mp->tx_desc_sram_size) {
13d64285
LB
2078 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
2079 mp->tx_desc_sram_size);
2080 txq->tx_desc_dma = mp->tx_desc_sram_addr;
2081 } else {
eb0519b5
GP
2082 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
2083 size, &txq->tx_desc_dma,
2084 GFP_KERNEL);
13d64285
LB
2085 }
2086
2087 if (txq->tx_desc_area == NULL) {
7542db8b 2088 netdev_err(mp->dev,
13d64285 2089 "can't allocate tx ring (%d bytes)\n", size);
99ab08e0 2090 return -ENOMEM;
c9df406f 2091 }
13d64285
LB
2092 memset(txq->tx_desc_area, 0, size);
2093
2094 txq->tx_desc_area_size = size;
13d64285 2095
64699336 2096 tx_desc = txq->tx_desc_area;
13d64285 2097 for (i = 0; i < txq->tx_ring_size; i++) {
6b368f68 2098 struct tx_desc *txd = tx_desc + i;
9da78745
LB
2099 int nexti;
2100
2101 nexti = i + 1;
2102 if (nexti == txq->tx_ring_size)
2103 nexti = 0;
6b368f68
LB
2104
2105 txd->cmd_sts = 0;
2106 txd->next_desc_ptr = txq->tx_desc_dma +
13d64285
LB
2107 nexti * sizeof(struct tx_desc);
2108 }
2109
9e911414
EG
2110 txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
2111 GFP_KERNEL);
2112 if (!txq->tx_desc_mapping) {
2113 ret = -ENOMEM;
2114 goto err_free_desc_area;
2115 }
2116
3ae8f4e0
EG
2117 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2118 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2119 txq->tx_ring_size * TSO_HEADER_SIZE,
2120 &txq->tso_hdrs_dma, GFP_KERNEL);
2121 if (txq->tso_hdrs == NULL) {
9e911414
EG
2122 ret = -ENOMEM;
2123 goto err_free_desc_mapping;
3ae8f4e0 2124 }
99ab08e0 2125 skb_queue_head_init(&txq->tx_skb);
c9df406f 2126
99ab08e0 2127 return 0;
9e911414
EG
2128
2129err_free_desc_mapping:
2130 kfree(txq->tx_desc_mapping);
2131err_free_desc_area:
2132 if (index == 0 && size <= mp->tx_desc_sram_size)
2133 iounmap(txq->tx_desc_area);
2134 else
2135 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2136 txq->tx_desc_area, txq->tx_desc_dma);
2137 return ret;
c8aaea25 2138}
1da177e4 2139
13d64285 2140static void txq_deinit(struct tx_queue *txq)
c9df406f 2141{
13d64285 2142 struct mv643xx_eth_private *mp = txq_to_mp(txq);
fa3959f4 2143
13d64285 2144 txq_disable(txq);
1fa38c58 2145 txq_reclaim(txq, txq->tx_ring_size, 1);
1da177e4 2146
13d64285 2147 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
1da177e4 2148
f7981c1c 2149 if (txq->index == 0 &&
3d6b35bc 2150 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
13d64285 2151 iounmap(txq->tx_desc_area);
c9df406f 2152 else
eb0519b5 2153 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
13d64285 2154 txq->tx_desc_area, txq->tx_desc_dma);
9e911414
EG
2155 kfree(txq->tx_desc_mapping);
2156
3ae8f4e0
EG
2157 if (txq->tso_hdrs)
2158 dma_free_coherent(mp->dev->dev.parent,
2159 txq->tx_ring_size * TSO_HEADER_SIZE,
2160 txq->tso_hdrs, txq->tso_hdrs_dma);
c9df406f 2161}
1da177e4 2162
1da177e4 2163
c9df406f 2164/* netdev ops and related ***************************************************/
1fa38c58
LB
2165static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
2166{
2167 u32 int_cause;
2168 u32 int_cause_ext;
2169
e0ca8410 2170 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
1fa38c58
LB
2171 if (int_cause == 0)
2172 return 0;
2173
2174 int_cause_ext = 0;
e0ca8410
SB
2175 if (int_cause & INT_EXT) {
2176 int_cause &= ~INT_EXT;
37a6084f 2177 int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
e0ca8410 2178 }
1fa38c58 2179
1fa38c58 2180 if (int_cause) {
37a6084f 2181 wrlp(mp, INT_CAUSE, ~int_cause);
1fa38c58 2182 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
37a6084f 2183 ~(rdlp(mp, TXQ_COMMAND) & 0xff);
1fa38c58
LB
2184 mp->work_rx |= (int_cause & INT_RX) >> 2;
2185 }
2186
2187 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
2188 if (int_cause_ext) {
37a6084f 2189 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
1fa38c58
LB
2190 if (int_cause_ext & INT_EXT_LINK_PHY)
2191 mp->work_link = 1;
2192 mp->work_tx |= int_cause_ext & INT_EXT_TX;
2193 }
2194
2195 return 1;
2196}
2197
2198static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
2199{
2200 struct net_device *dev = (struct net_device *)dev_id;
2201 struct mv643xx_eth_private *mp = netdev_priv(dev);
2202
2203 if (unlikely(!mv643xx_eth_collect_events(mp)))
2204 return IRQ_NONE;
2205
37a6084f 2206 wrlp(mp, INT_MASK, 0);
1fa38c58
LB
2207 napi_schedule(&mp->napi);
2208
2209 return IRQ_HANDLED;
2210}
2211
2f7eb47a
LB
2212static void handle_link_event(struct mv643xx_eth_private *mp)
2213{
2214 struct net_device *dev = mp->dev;
2215 u32 port_status;
2216 int speed;
2217 int duplex;
2218 int fc;
2219
37a6084f 2220 port_status = rdlp(mp, PORT_STATUS);
2f7eb47a
LB
2221 if (!(port_status & LINK_UP)) {
2222 if (netif_carrier_ok(dev)) {
2223 int i;
2224
7542db8b 2225 netdev_info(dev, "link down\n");
2f7eb47a
LB
2226
2227 netif_carrier_off(dev);
2f7eb47a 2228
f7981c1c 2229 for (i = 0; i < mp->txq_count; i++) {
2f7eb47a
LB
2230 struct tx_queue *txq = mp->txq + i;
2231
1fa38c58 2232 txq_reclaim(txq, txq->tx_ring_size, 1);
f7981c1c 2233 txq_reset_hw_ptr(txq);
2f7eb47a
LB
2234 }
2235 }
2236 return;
2237 }
2238
2239 switch (port_status & PORT_SPEED_MASK) {
2240 case PORT_SPEED_10:
2241 speed = 10;
2242 break;
2243 case PORT_SPEED_100:
2244 speed = 100;
2245 break;
2246 case PORT_SPEED_1000:
2247 speed = 1000;
2248 break;
2249 default:
2250 speed = -1;
2251 break;
2252 }
2253 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
2254 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
2255
7542db8b
JP
2256 netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
2257 speed, duplex ? "full" : "half", fc ? "en" : "dis");
2f7eb47a 2258
4fdeca3f 2259 if (!netif_carrier_ok(dev))
2f7eb47a 2260 netif_carrier_on(dev);
2f7eb47a
LB
2261}
2262
1fa38c58 2263static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
c9df406f 2264{
1fa38c58
LB
2265 struct mv643xx_eth_private *mp;
2266 int work_done;
ce4e2e45 2267
1fa38c58 2268 mp = container_of(napi, struct mv643xx_eth_private, napi);
fc32b0e2 2269
1319ebad
LB
2270 if (unlikely(mp->oom)) {
2271 mp->oom = 0;
2272 del_timer(&mp->rx_oom);
2273 }
1da177e4 2274
1fa38c58
LB
2275 work_done = 0;
2276 while (work_done < budget) {
2277 u8 queue_mask;
2278 int queue;
2279 int work_tbd;
2280
2281 if (mp->work_link) {
2282 mp->work_link = 0;
2283 handle_link_event(mp);
26ef1f17 2284 work_done++;
1fa38c58
LB
2285 continue;
2286 }
1da177e4 2287
1319ebad
LB
2288 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
2289 if (likely(!mp->oom))
2290 queue_mask |= mp->work_rx_refill;
2291
1fa38c58
LB
2292 if (!queue_mask) {
2293 if (mv643xx_eth_collect_events(mp))
2294 continue;
2295 break;
2296 }
1da177e4 2297
1fa38c58
LB
2298 queue = fls(queue_mask) - 1;
2299 queue_mask = 1 << queue;
2300
2301 work_tbd = budget - work_done;
2302 if (work_tbd > 16)
2303 work_tbd = 16;
2304
2305 if (mp->work_tx_end & queue_mask) {
2306 txq_kick(mp->txq + queue);
2307 } else if (mp->work_tx & queue_mask) {
2308 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
2309 txq_maybe_wake(mp->txq + queue);
2310 } else if (mp->work_rx & queue_mask) {
2311 work_done += rxq_process(mp->rxq + queue, work_tbd);
1319ebad 2312 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
1fa38c58
LB
2313 work_done += rxq_refill(mp->rxq + queue, work_tbd);
2314 } else {
2315 BUG();
2316 }
84dd619e 2317 }
fc32b0e2 2318
1fa38c58 2319 if (work_done < budget) {
1319ebad 2320 if (mp->oom)
1fa38c58
LB
2321 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
2322 napi_complete(napi);
e0ca8410 2323 wrlp(mp, INT_MASK, mp->int_mask);
226bb6b7 2324 }
3d6b35bc 2325
1fa38c58
LB
2326 return work_done;
2327}
8fa89bf5 2328
1fa38c58
LB
2329static inline void oom_timer_wrapper(unsigned long data)
2330{
2331 struct mv643xx_eth_private *mp = (void *)data;
1da177e4 2332
1fa38c58 2333 napi_schedule(&mp->napi);
1da177e4
LT
2334}
2335
fc32b0e2 2336static void port_start(struct mv643xx_eth_private *mp)
1da177e4 2337{
1e8a655d 2338 struct net_device *dev = mp->dev;
d0412d96 2339 u32 pscr;
8a578111 2340 int i;
1da177e4 2341
bedfe324
LB
2342 /*
2343 * Perform PHY reset, if there is a PHY.
2344 */
1e8a655d 2345 if (dev->phydev) {
a54e1612 2346 struct ethtool_link_ksettings cmd;
bedfe324 2347
a54e1612 2348 mv643xx_eth_get_link_ksettings(dev, &cmd);
1e8a655d 2349 phy_init_hw(dev->phydev);
a54e1612
PR
2350 mv643xx_eth_set_link_ksettings(
2351 dev, (const struct ethtool_link_ksettings *)&cmd);
1e8a655d 2352 phy_start(dev->phydev);
bedfe324 2353 }
1da177e4 2354
81600eea
LB
2355 /*
2356 * Configure basic link parameters.
2357 */
37a6084f 2358 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
81600eea
LB
2359
2360 pscr |= SERIAL_PORT_ENABLE;
37a6084f 2361 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
81600eea
LB
2362
2363 pscr |= DO_NOT_FORCE_LINK_FAIL;
1e8a655d 2364 if (!dev->phydev)
81600eea 2365 pscr |= FORCE_LINK_PASS;
37a6084f 2366 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
81600eea 2367
13d64285
LB
2368 /*
2369 * Configure TX path and queues.
2370 */
89df5fdc 2371 tx_set_rate(mp, 1000000000, 16777216);
f7981c1c 2372 for (i = 0; i < mp->txq_count; i++) {
3d6b35bc 2373 struct tx_queue *txq = mp->txq + i;
13d64285 2374
6b368f68 2375 txq_reset_hw_ptr(txq);
89df5fdc
LB
2376 txq_set_rate(txq, 1000000000, 16777216);
2377 txq_set_fixed_prio_mode(txq);
13d64285
LB
2378 }
2379
d9a073ea
LB
2380 /*
2381 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
170e7108
LB
2382 * frames to RX queue #0, and include the pseudo-header when
2383 * calculating receive checksums.
d9a073ea 2384 */
e138f96b 2385 mv643xx_eth_set_features(mp->dev, mp->dev->features);
01999873 2386
376489a2
LB
2387 /*
2388 * Treat BPDUs as normal multicasts, and disable partition mode.
2389 */
37a6084f 2390 wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
01999873 2391
5a893922
LB
2392 /*
2393 * Add configured unicast addresses to address filter table.
2394 */
2395 mv643xx_eth_program_unicast_filter(mp->dev);
2396
8a578111 2397 /*
64da80a2 2398 * Enable the receive queues.
8a578111 2399 */
f7981c1c 2400 for (i = 0; i < mp->rxq_count; i++) {
64da80a2 2401 struct rx_queue *rxq = mp->rxq + i;
8a578111 2402 u32 addr;
1da177e4 2403
8a578111
LB
2404 addr = (u32)rxq->rx_desc_dma;
2405 addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
37a6084f 2406 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
1da177e4 2407
8a578111
LB
2408 rxq_enable(rxq);
2409 }
1da177e4
LT
2410}
2411
2bcb4b0f
LB
2412static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2413{
2414 int skb_size;
2415
2416 /*
2417 * Reserve 2+14 bytes for an ethernet header (the hardware
2418 * automatically prepends 2 bytes of dummy data to each
2419 * received packet), 16 bytes for up to four VLAN tags, and
2420 * 4 bytes for the trailing FCS -- 36 bytes total.
2421 */
2422 skb_size = mp->dev->mtu + 36;
2423
2424 /*
2425 * Make sure that the skb size is a multiple of 8 bytes, as
2426 * the lower three bits of the receive descriptor's buffer
2427 * size field are ignored by the hardware.
2428 */
2429 mp->skb_size = (skb_size + 7) & ~7;
7fd96ce4
LB
2430
2431 /*
2432 * If NET_SKB_PAD is smaller than a cache line,
2433 * netdev_alloc_skb() will cause skb->data to be misaligned
2434 * to a cache line boundary. If this is the case, include
2435 * some extra space to allow re-aligning the data area.
2436 */
2437 mp->skb_size += SKB_DMA_REALIGN;
2bcb4b0f
LB
2438}
2439
c9df406f 2440static int mv643xx_eth_open(struct net_device *dev)
16e03018 2441{
e5371493 2442 struct mv643xx_eth_private *mp = netdev_priv(dev);
c9df406f 2443 int err;
64da80a2 2444 int i;
16e03018 2445
37a6084f
LB
2446 wrlp(mp, INT_CAUSE, 0);
2447 wrlp(mp, INT_CAUSE_EXT, 0);
2448 rdlp(mp, INT_CAUSE_EXT);
c9df406f 2449
fc32b0e2 2450 err = request_irq(dev->irq, mv643xx_eth_irq,
2a1867a7 2451 IRQF_SHARED, dev->name, dev);
c9df406f 2452 if (err) {
7542db8b 2453 netdev_err(dev, "can't assign irq\n");
c9df406f 2454 return -EAGAIN;
16e03018
DF
2455 }
2456
2bcb4b0f
LB
2457 mv643xx_eth_recalc_skb_size(mp);
2458
2257e05c
LB
2459 napi_enable(&mp->napi);
2460
e0ca8410
SB
2461 mp->int_mask = INT_EXT;
2462
f7981c1c 2463 for (i = 0; i < mp->rxq_count; i++) {
64da80a2
LB
2464 err = rxq_init(mp, i);
2465 if (err) {
2466 while (--i >= 0)
f7981c1c 2467 rxq_deinit(mp->rxq + i);
64da80a2
LB
2468 goto out;
2469 }
2470
1fa38c58 2471 rxq_refill(mp->rxq + i, INT_MAX);
e0ca8410 2472 mp->int_mask |= INT_RX_0 << i;
2257e05c
LB
2473 }
2474
1319ebad 2475 if (mp->oom) {
2257e05c
LB
2476 mp->rx_oom.expires = jiffies + (HZ / 10);
2477 add_timer(&mp->rx_oom);
64da80a2 2478 }
8a578111 2479
f7981c1c 2480 for (i = 0; i < mp->txq_count; i++) {
3d6b35bc
LB
2481 err = txq_init(mp, i);
2482 if (err) {
2483 while (--i >= 0)
f7981c1c 2484 txq_deinit(mp->txq + i);
3d6b35bc
LB
2485 goto out_free;
2486 }
e0ca8410 2487 mp->int_mask |= INT_TX_END_0 << i;
3d6b35bc 2488 }
16e03018 2489
f564412c 2490 add_timer(&mp->mib_counters_timer);
fc32b0e2 2491 port_start(mp);
16e03018 2492
37a6084f 2493 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
e0ca8410 2494 wrlp(mp, INT_MASK, mp->int_mask);
16e03018 2495
c9df406f
LB
2496 return 0;
2497
13d64285 2498
fc32b0e2 2499out_free:
f7981c1c
LB
2500 for (i = 0; i < mp->rxq_count; i++)
2501 rxq_deinit(mp->rxq + i);
fc32b0e2 2502out:
c9df406f
LB
2503 free_irq(dev->irq, dev);
2504
2505 return err;
16e03018
DF
2506}
2507
e5371493 2508static void port_reset(struct mv643xx_eth_private *mp)
1da177e4 2509{
fc32b0e2 2510 unsigned int data;
64da80a2 2511 int i;
1da177e4 2512
f7981c1c
LB
2513 for (i = 0; i < mp->rxq_count; i++)
2514 rxq_disable(mp->rxq + i);
2515 for (i = 0; i < mp->txq_count; i++)
2516 txq_disable(mp->txq + i);
ae9ae064
LB
2517
2518 while (1) {
37a6084f 2519 u32 ps = rdlp(mp, PORT_STATUS);
ae9ae064
LB
2520
2521 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
2522 break;
13d64285 2523 udelay(10);
ae9ae064 2524 }
1da177e4 2525
c9df406f 2526 /* Reset the Enable bit in the Configuration Register */
37a6084f 2527 data = rdlp(mp, PORT_SERIAL_CONTROL);
fc32b0e2
LB
2528 data &= ~(SERIAL_PORT_ENABLE |
2529 DO_NOT_FORCE_LINK_FAIL |
2530 FORCE_LINK_PASS);
37a6084f 2531 wrlp(mp, PORT_SERIAL_CONTROL, data);
1da177e4
LT
2532}
2533
c9df406f 2534static int mv643xx_eth_stop(struct net_device *dev)
1da177e4 2535{
e5371493 2536 struct mv643xx_eth_private *mp = netdev_priv(dev);
64da80a2 2537 int i;
1da177e4 2538
fe65e704 2539 wrlp(mp, INT_MASK_EXT, 0x00000000);
37a6084f
LB
2540 wrlp(mp, INT_MASK, 0x00000000);
2541 rdlp(mp, INT_MASK);
1da177e4 2542
c9df406f 2543 napi_disable(&mp->napi);
78fff83b 2544
2257e05c
LB
2545 del_timer_sync(&mp->rx_oom);
2546
c9df406f 2547 netif_carrier_off(dev);
1e8a655d
PR
2548 if (dev->phydev)
2549 phy_stop(dev->phydev);
fc32b0e2
LB
2550 free_irq(dev->irq, dev);
2551
cc9754b3 2552 port_reset(mp);
8fd89211 2553 mv643xx_eth_get_stats(dev);
fc32b0e2 2554 mib_counters_update(mp);
57e8f26a 2555 del_timer_sync(&mp->mib_counters_timer);
1da177e4 2556
f7981c1c
LB
2557 for (i = 0; i < mp->rxq_count; i++)
2558 rxq_deinit(mp->rxq + i);
2559 for (i = 0; i < mp->txq_count; i++)
2560 txq_deinit(mp->txq + i);
1da177e4 2561
c9df406f 2562 return 0;
1da177e4
LT
2563}
2564
fc32b0e2 2565static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1da177e4 2566{
260055bb 2567 int ret;
1da177e4 2568
1e8a655d 2569 if (!dev->phydev)
260055bb 2570 return -ENOTSUPP;
bedfe324 2571
1e8a655d 2572 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
260055bb 2573 if (!ret)
0a9e413b 2574 mv643xx_eth_adjust_link(dev);
260055bb 2575 return ret;
1da177e4
LT
2576}
2577
c9df406f 2578static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 2579{
89df5fdc
LB
2580 struct mv643xx_eth_private *mp = netdev_priv(dev);
2581
c9df406f 2582 dev->mtu = new_mtu;
2bcb4b0f 2583 mv643xx_eth_recalc_skb_size(mp);
89df5fdc
LB
2584 tx_set_rate(mp, 1000000000, 16777216);
2585
c9df406f
LB
2586 if (!netif_running(dev))
2587 return 0;
1da177e4 2588
c9df406f
LB
2589 /*
2590 * Stop and then re-open the interface. This will allocate RX
2591 * skbs of the new MTU.
2592 * There is a possible danger that the open will not succeed,
fc32b0e2 2593 * due to memory being full.
c9df406f
LB
2594 */
2595 mv643xx_eth_stop(dev);
2596 if (mv643xx_eth_open(dev)) {
7542db8b
JP
2597 netdev_err(dev,
2598 "fatal error on re-opening device after MTU change\n");
c9df406f
LB
2599 }
2600
2601 return 0;
1da177e4
LT
2602}
2603
fc32b0e2 2604static void tx_timeout_task(struct work_struct *ugly)
1da177e4 2605{
fc32b0e2 2606 struct mv643xx_eth_private *mp;
1da177e4 2607
fc32b0e2
LB
2608 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2609 if (netif_running(mp->dev)) {
e5ef1de1 2610 netif_tx_stop_all_queues(mp->dev);
fc32b0e2
LB
2611 port_reset(mp);
2612 port_start(mp);
e5ef1de1 2613 netif_tx_wake_all_queues(mp->dev);
fc32b0e2 2614 }
c9df406f
LB
2615}
2616
c9df406f 2617static void mv643xx_eth_tx_timeout(struct net_device *dev)
1da177e4 2618{
e5371493 2619 struct mv643xx_eth_private *mp = netdev_priv(dev);
1da177e4 2620
7542db8b 2621 netdev_info(dev, "tx timeout\n");
d0412d96 2622
c9df406f 2623 schedule_work(&mp->tx_timeout_task);
1da177e4
LT
2624}
2625
c9df406f 2626#ifdef CONFIG_NET_POLL_CONTROLLER
fc32b0e2 2627static void mv643xx_eth_netpoll(struct net_device *dev)
9f8dd319 2628{
fc32b0e2 2629 struct mv643xx_eth_private *mp = netdev_priv(dev);
c9df406f 2630
37a6084f
LB
2631 wrlp(mp, INT_MASK, 0x00000000);
2632 rdlp(mp, INT_MASK);
c9df406f 2633
fc32b0e2 2634 mv643xx_eth_irq(dev->irq, dev);
c9df406f 2635
e0ca8410 2636 wrlp(mp, INT_MASK, mp->int_mask);
9f8dd319 2637}
c9df406f 2638#endif
9f8dd319 2639
9f8dd319 2640
c9df406f 2641/* platform glue ************************************************************/
e5371493
LB
2642static void
2643mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
63a9332b 2644 const struct mbus_dram_target_info *dram)
c9df406f 2645{
cc9754b3 2646 void __iomem *base = msp->base;
c9df406f
LB
2647 u32 win_enable;
2648 u32 win_protect;
2649 int i;
9f8dd319 2650
c9df406f
LB
2651 for (i = 0; i < 6; i++) {
2652 writel(0, base + WINDOW_BASE(i));
2653 writel(0, base + WINDOW_SIZE(i));
2654 if (i < 4)
2655 writel(0, base + WINDOW_REMAP_HIGH(i));
9f8dd319
DF
2656 }
2657
c9df406f
LB
2658 win_enable = 0x3f;
2659 win_protect = 0;
2660
2661 for (i = 0; i < dram->num_cs; i++) {
63a9332b 2662 const struct mbus_dram_window *cs = dram->cs + i;
c9df406f
LB
2663
2664 writel((cs->base & 0xffff0000) |
2665 (cs->mbus_attr << 8) |
2666 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
2667 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
2668
2669 win_enable &= ~(1 << i);
2670 win_protect |= 3 << (2 * i);
2671 }
2672
2673 writel(win_enable, base + WINDOW_BAR_ENABLE);
2674 msp->win_protect = win_protect;
9f8dd319
DF
2675}
2676
773fc3ee
LB
2677static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2678{
2679 /*
2680 * Check whether we have a 14-bit coal limit field in bits
2681 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
2682 * SDMA config register.
2683 */
37a6084f
LB
2684 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
2685 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
773fc3ee
LB
2686 msp->extended_rx_coal_limit = 1;
2687 else
2688 msp->extended_rx_coal_limit = 0;
1e881592
LB
2689
2690 /*
457b1d5a
LB
2691 * Check whether the MAC supports TX rate control, and if
2692 * yes, whether its associated registers are in the old or
2693 * the new place.
1e881592 2694 */
37a6084f
LB
2695 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
2696 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
457b1d5a
LB
2697 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
2698 } else {
37a6084f
LB
2699 writel(7, msp->base + 0x0400 + TX_BW_RATE);
2700 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
457b1d5a
LB
2701 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
2702 else
2703 msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
2704 }
773fc3ee
LB
2705}
2706
76723bca
SH
2707#if defined(CONFIG_OF)
2708static const struct of_device_id mv643xx_eth_shared_ids[] = {
2709 { .compatible = "marvell,orion-eth", },
2710 { .compatible = "marvell,kirkwood-eth", },
2711 { }
2712};
2713MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
2714#endif
2715
3e3397e7 2716#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60)
76723bca
SH
2717#define mv643xx_eth_property(_np, _name, _v) \
2718 do { \
2719 u32 tmp; \
2720 if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \
2721 _v = tmp; \
2722 } while (0)
2723
2724static struct platform_device *port_platdev[3];
2725
2726static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2727 struct device_node *pnp)
2728{
2729 struct platform_device *ppdev;
2730 struct mv643xx_eth_platform_data ppd;
2731 struct resource res;
2732 const char *mac_addr;
2733 int ret;
785bf6f7 2734 int dev_num = 0;
76723bca
SH
2735
2736 memset(&ppd, 0, sizeof(ppd));
2737 ppd.shared = pdev;
2738
2739 memset(&res, 0, sizeof(res));
2740 if (!of_irq_to_resource(pnp, 0, &res)) {
2741 dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
2742 return -EINVAL;
2743 }
2744
2745 if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
2746 dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name);
2747 return -EINVAL;
2748 }
2749
2750 if (ppd.port_number >= 3) {
2751 dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name);
2752 return -EINVAL;
2753 }
2754
785bf6f7
JG
2755 while (dev_num < 3 && port_platdev[dev_num])
2756 dev_num++;
2757
2758 if (dev_num == 3) {
2759 dev_err(&pdev->dev, "too many ports registered\n");
2760 return -EINVAL;
2761 }
2762
76723bca
SH
2763 mac_addr = of_get_mac_address(pnp);
2764 if (mac_addr)
d458cdf7 2765 memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
76723bca
SH
2766
2767 mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
2768 mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
2769 mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
2770 mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
2771 mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
2772 mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
2773
2774 ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
2775 if (!ppd.phy_node) {
2776 ppd.phy_addr = MV643XX_ETH_PHY_NONE;
2777 of_property_read_u32(pnp, "speed", &ppd.speed);
2778 of_property_read_u32(pnp, "duplex", &ppd.duplex);
2779 }
2780
785bf6f7 2781 ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
76723bca
SH
2782 if (!ppdev)
2783 return -ENOMEM;
2784 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
b5d82db8 2785 ppdev->dev.of_node = pnp;
76723bca
SH
2786
2787 ret = platform_device_add_resources(ppdev, &res, 1);
2788 if (ret)
2789 goto port_err;
2790
2791 ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
2792 if (ret)
2793 goto port_err;
2794
2795 ret = platform_device_add(ppdev);
2796 if (ret)
2797 goto port_err;
2798
785bf6f7 2799 port_platdev[dev_num] = ppdev;
76723bca
SH
2800
2801 return 0;
2802
2803port_err:
2804 platform_device_put(ppdev);
2805 return ret;
2806}
2807
2808static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2809{
2810 struct mv643xx_eth_shared_platform_data *pd;
2811 struct device_node *pnp, *np = pdev->dev.of_node;
2812 int ret;
2813
2814 /* bail out if not registered from DT */
2815 if (!np)
2816 return 0;
2817
2818 pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
2819 if (!pd)
2820 return -ENOMEM;
2821 pdev->dev.platform_data = pd;
2822
2823 mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
2824
2825 for_each_available_child_of_node(np, pnp) {
2826 ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
26b7974d
JL
2827 if (ret) {
2828 of_node_put(pnp);
76723bca 2829 return ret;
26b7974d 2830 }
76723bca
SH
2831 }
2832 return 0;
2833}
2834
2835static void mv643xx_eth_shared_of_remove(void)
2836{
2837 int n;
2838
2839 for (n = 0; n < 3; n++) {
2840 platform_device_del(port_platdev[n]);
2841 port_platdev[n] = NULL;
2842 }
2843}
2844#else
ff20877a 2845static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
76723bca 2846{
ff20877a 2847 return 0;
76723bca
SH
2848}
2849
ff20877a
AB
2850static inline void mv643xx_eth_shared_of_remove(void)
2851{
2852}
76723bca
SH
2853#endif
2854
c9df406f 2855static int mv643xx_eth_shared_probe(struct platform_device *pdev)
9f8dd319 2856{
10a9948d 2857 static int mv643xx_eth_version_printed;
76723bca 2858 struct mv643xx_eth_shared_platform_data *pd;
e5371493 2859 struct mv643xx_eth_shared_private *msp;
63a9332b 2860 const struct mbus_dram_target_info *dram;
c9df406f 2861 struct resource *res;
76723bca 2862 int ret;
9f8dd319 2863
e5371493 2864 if (!mv643xx_eth_version_printed++)
7542db8b
JP
2865 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
2866 mv643xx_eth_driver_version);
9f8dd319 2867
c9df406f
LB
2868 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2869 if (res == NULL)
727f957a 2870 return -EINVAL;
9f8dd319 2871
727f957a 2872 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
c9df406f 2873 if (msp == NULL)
727f957a 2874 return -ENOMEM;
76723bca 2875 platform_set_drvdata(pdev, msp);
c9df406f 2876
65a6f969 2877 msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
cc9754b3 2878 if (msp->base == NULL)
727f957a 2879 return -ENOMEM;
c9df406f 2880
20922486
SH
2881 msp->clk = devm_clk_get(&pdev->dev, NULL);
2882 if (!IS_ERR(msp->clk))
2883 clk_prepare_enable(msp->clk);
2884
c9df406f
LB
2885 /*
2886 * (Re-)program MBUS remapping windows if we are asked to.
2887 */
63a9332b
AL
2888 dram = mv_mbus_dram_info();
2889 if (dram)
2890 mv643xx_eth_conf_mbus_windows(msp, dram);
c9df406f 2891
76723bca
SH
2892 ret = mv643xx_eth_shared_of_probe(pdev);
2893 if (ret)
2894 return ret;
bbfa6d0a 2895 pd = dev_get_platdata(&pdev->dev);
76723bca 2896
50a749c1
DC
2897 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
2898 pd->tx_csum_limit : 9 * 1024;
773fc3ee 2899 infer_hw_params(msp);
fc32b0e2 2900
c9df406f 2901 return 0;
c9df406f
LB
2902}
2903
2904static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2905{
e5371493 2906 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
c9df406f 2907
76723bca 2908 mv643xx_eth_shared_of_remove();
20922486
SH
2909 if (!IS_ERR(msp->clk))
2910 clk_disable_unprepare(msp->clk);
c9df406f 2911 return 0;
9f8dd319
DF
2912}
2913
c9df406f 2914static struct platform_driver mv643xx_eth_shared_driver = {
fc32b0e2
LB
2915 .probe = mv643xx_eth_shared_probe,
2916 .remove = mv643xx_eth_shared_remove,
c9df406f 2917 .driver = {
fc32b0e2 2918 .name = MV643XX_ETH_SHARED_NAME,
76723bca 2919 .of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
c9df406f
LB
2920 },
2921};
2922
e5371493 2923static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
1da177e4 2924{
c9df406f 2925 int addr_shift = 5 * mp->port_num;
fc32b0e2 2926 u32 data;
1da177e4 2927
fc32b0e2
LB
2928 data = rdl(mp, PHY_ADDR);
2929 data &= ~(0x1f << addr_shift);
2930 data |= (phy_addr & 0x1f) << addr_shift;
2931 wrl(mp, PHY_ADDR, data);
1da177e4
LT
2932}
2933
e5371493 2934static int phy_addr_get(struct mv643xx_eth_private *mp)
1da177e4 2935{
fc32b0e2
LB
2936 unsigned int data;
2937
2938 data = rdl(mp, PHY_ADDR);
2939
2940 return (data >> (5 * mp->port_num)) & 0x1f;
2941}
2942
2943static void set_params(struct mv643xx_eth_private *mp,
2944 struct mv643xx_eth_platform_data *pd)
2945{
2946 struct net_device *dev = mp->dev;
ee9e4956 2947 unsigned int tx_ring_size;
fc32b0e2
LB
2948
2949 if (is_valid_ether_addr(pd->mac_addr))
d458cdf7 2950 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
fc32b0e2
LB
2951 else
2952 uc_addr_get(mp, dev->dev_addr);
2953
e7d2f4db 2954 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
fc32b0e2 2955 if (pd->rx_queue_size)
e7d2f4db 2956 mp->rx_ring_size = pd->rx_queue_size;
fc32b0e2
LB
2957 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2958 mp->rx_desc_sram_size = pd->rx_sram_size;
1da177e4 2959
f7981c1c 2960 mp->rxq_count = pd->rx_queue_count ? : 1;
64da80a2 2961
ee9e4956 2962 tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
fc32b0e2 2963 if (pd->tx_queue_size)
ee9e4956
EG
2964 tx_ring_size = pd->tx_queue_size;
2965
2966 mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
2967 MV643XX_MAX_SKB_DESCS * 2, 4096);
2968 if (mp->tx_ring_size != tx_ring_size)
2969 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2970 mp->tx_ring_size, tx_ring_size);
2971
fc32b0e2
LB
2972 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2973 mp->tx_desc_sram_size = pd->tx_sram_size;
3d6b35bc 2974
f7981c1c 2975 mp->txq_count = pd->tx_queue_count ? : 1;
1da177e4
LT
2976}
2977
fd33b244
JG
2978static int get_phy_mode(struct mv643xx_eth_private *mp)
2979{
2980 struct device *dev = mp->dev->dev.parent;
2981 int iface = -1;
2982
2983 if (dev->of_node)
2984 iface = of_get_phy_mode(dev->of_node);
2985
2986 /* Historical default if unspecified. We could also read/write
2987 * the interface state in the PSC1
2988 */
2989 if (iface < 0)
2990 iface = PHY_INTERFACE_MODE_GMII;
2991 return iface;
2992}
2993
ed94493f
LB
2994static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2995 int phy_addr)
1da177e4 2996{
ed94493f
LB
2997 struct phy_device *phydev;
2998 int start;
2999 int num;
3000 int i;
c3a07134 3001 char phy_id[MII_BUS_ID_SIZE + 3];
45c5d3bc 3002
ed94493f
LB
3003 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
3004 start = phy_addr_get(mp) & 0x1f;
3005 num = 32;
3006 } else {
3007 start = phy_addr & 0x1f;
3008 num = 1;
3009 }
45c5d3bc 3010
c3a07134 3011 /* Attempt to connect to the PHY using orion-mdio */
976c90b9 3012 phydev = ERR_PTR(-ENODEV);
ed94493f
LB
3013 for (i = 0; i < num; i++) {
3014 int addr = (start + i) & 0x1f;
fc32b0e2 3015
c3a07134
FF
3016 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
3017 "orion-mdio-mii", addr);
1da177e4 3018
c3a07134 3019 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
fd33b244 3020 get_phy_mode(mp));
c3a07134
FF
3021 if (!IS_ERR(phydev)) {
3022 phy_addr_set(mp, addr);
3023 break;
ed94493f
LB
3024 }
3025 }
1da177e4 3026
ed94493f 3027 return phydev;
1da177e4
LT
3028}
3029
ed94493f 3030static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
c28a4f89 3031{
1e8a655d
PR
3032 struct net_device *dev = mp->dev;
3033 struct phy_device *phy = dev->phydev;
c28a4f89 3034
ed94493f
LB
3035 if (speed == 0) {
3036 phy->autoneg = AUTONEG_ENABLE;
3037 phy->speed = 0;
3038 phy->duplex = 0;
3039 phy->advertising = phy->supported | ADVERTISED_Autoneg;
c9df406f 3040 } else {
ed94493f
LB
3041 phy->autoneg = AUTONEG_DISABLE;
3042 phy->advertising = 0;
3043 phy->speed = speed;
3044 phy->duplex = duplex;
c9df406f 3045 }
ed94493f 3046 phy_start_aneg(phy);
c28a4f89
JC
3047}
3048
81600eea
LB
3049static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
3050{
1e8a655d 3051 struct net_device *dev = mp->dev;
81600eea
LB
3052 u32 pscr;
3053
37a6084f 3054 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
81600eea
LB
3055 if (pscr & SERIAL_PORT_ENABLE) {
3056 pscr &= ~SERIAL_PORT_ENABLE;
37a6084f 3057 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
81600eea
LB
3058 }
3059
3060 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
1e8a655d 3061 if (!dev->phydev) {
81600eea
LB
3062 pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
3063 if (speed == SPEED_1000)
3064 pscr |= SET_GMII_SPEED_TO_1000;
3065 else if (speed == SPEED_100)
3066 pscr |= SET_MII_SPEED_TO_100;
3067
3068 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
3069
3070 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
3071 if (duplex == DUPLEX_FULL)
3072 pscr |= SET_FULL_DUPLEX_MODE;
3073 }
3074
37a6084f 3075 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
81600eea
LB
3076}
3077
ea8a8642
LB
3078static const struct net_device_ops mv643xx_eth_netdev_ops = {
3079 .ndo_open = mv643xx_eth_open,
3080 .ndo_stop = mv643xx_eth_stop,
3081 .ndo_start_xmit = mv643xx_eth_xmit,
3082 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
3083 .ndo_set_mac_address = mv643xx_eth_set_mac_address,
1d4bd947 3084 .ndo_validate_addr = eth_validate_addr,
ea8a8642
LB
3085 .ndo_do_ioctl = mv643xx_eth_ioctl,
3086 .ndo_change_mtu = mv643xx_eth_change_mtu,
aad59c43 3087 .ndo_set_features = mv643xx_eth_set_features,
ea8a8642
LB
3088 .ndo_tx_timeout = mv643xx_eth_tx_timeout,
3089 .ndo_get_stats = mv643xx_eth_get_stats,
3090#ifdef CONFIG_NET_POLL_CONTROLLER
3091 .ndo_poll_controller = mv643xx_eth_netpoll,
3092#endif
3093};
3094
c9df406f 3095static int mv643xx_eth_probe(struct platform_device *pdev)
1da177e4 3096{
c9df406f 3097 struct mv643xx_eth_platform_data *pd;
e5371493 3098 struct mv643xx_eth_private *mp;
c9df406f 3099 struct net_device *dev;
1e8a655d 3100 struct phy_device *phydev = NULL;
c9df406f 3101 struct resource *res;
fc32b0e2 3102 int err;
1da177e4 3103
bbfa6d0a 3104 pd = dev_get_platdata(&pdev->dev);
c9df406f 3105 if (pd == NULL) {
7542db8b 3106 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
c9df406f
LB
3107 return -ENODEV;
3108 }
1da177e4 3109
c9df406f 3110 if (pd->shared == NULL) {
7542db8b 3111 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
c9df406f
LB
3112 return -ENODEV;
3113 }
8f518703 3114
e5ef1de1 3115 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
c9df406f
LB
3116 if (!dev)
3117 return -ENOMEM;
1da177e4 3118
fd33b244 3119 SET_NETDEV_DEV(dev, &pdev->dev);
c9df406f 3120 mp = netdev_priv(dev);
fc32b0e2
LB
3121 platform_set_drvdata(pdev, mp);
3122
3123 mp->shared = platform_get_drvdata(pd->shared);
37a6084f 3124 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
fc32b0e2
LB
3125 mp->port_num = pd->port_number;
3126
c9df406f 3127 mp->dev = dev;
78fff83b 3128
cb85215f
SH
3129 /* Kirkwood resets some registers on gated clocks. Especially
3130 * CLK125_BYPASS_EN must be cleared but is not available on
3131 * all other SoCs/System Controllers using this driver.
3132 */
3133 if (of_device_is_compatible(pdev->dev.of_node,
3134 "marvell,kirkwood-eth-port"))
3135 wrlp(mp, PORT_SERIAL_CONTROL1,
3136 rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
3137
452503eb 3138 /*
9a43a026
AL
3139 * Start with a default rate, and if there is a clock, allow
3140 * it to override the default.
452503eb 3141 */
9a43a026 3142 mp->t_clk = 133000000;
20922486 3143 mp->clk = devm_clk_get(&pdev->dev, NULL);
452503eb
AL
3144 if (!IS_ERR(mp->clk)) {
3145 clk_prepare_enable(mp->clk);
3146 mp->t_clk = clk_get_rate(mp->clk);
76723bca
SH
3147 } else if (!IS_ERR(mp->shared->clk)) {
3148 mp->t_clk = clk_get_rate(mp->shared->clk);
452503eb 3149 }
20922486 3150
fc32b0e2 3151 set_params(mp, pd);
206d6b32
BH
3152 netif_set_real_num_tx_queues(dev, mp->txq_count);
3153 netif_set_real_num_rx_queues(dev, mp->rxq_count);
fc32b0e2 3154
cc9d4598
SH
3155 err = 0;
3156 if (pd->phy_node) {
1e8a655d
PR
3157 phydev = of_phy_connect(mp->dev, pd->phy_node,
3158 mv643xx_eth_adjust_link, 0,
27058af4 3159 get_phy_mode(mp));
1e8a655d 3160 if (!phydev)
cc9d4598 3161 err = -ENODEV;
6115c11f 3162 else
1e8a655d 3163 phy_addr_set(mp, phydev->mdio.addr);
cc9d4598 3164 } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
1e8a655d 3165 phydev = phy_scan(mp, pd->phy_addr);
bedfe324 3166
1e8a655d
PR
3167 if (IS_ERR(phydev))
3168 err = PTR_ERR(phydev);
cc9d4598
SH
3169 else
3170 phy_init(mp, pd->speed, pd->duplex);
976c90b9 3171 }
cc9d4598
SH
3172 if (err == -ENODEV) {
3173 err = -EPROBE_DEFER;
3174 goto out;
3175 }
3176 if (err)
3177 goto out;
6bdf576e 3178
7ad24ea4 3179 dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
ed94493f 3180
81600eea 3181 init_pscr(mp, pd->speed, pd->duplex);
fc32b0e2 3182
4ff3495a
LB
3183
3184 mib_counters_clear(mp);
3185
12d5e6fd
VT
3186 setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
3187 (unsigned long)mp);
4ff3495a 3188 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
4ff3495a
LB
3189
3190 spin_lock_init(&mp->mib_counters_lock);
3191
3192 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
3193
a3659aa0 3194 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
2257e05c 3195
12d5e6fd 3196 setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
2257e05c 3197
fc32b0e2 3198
c9df406f
LB
3199 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
3200 BUG_ON(!res);
3201 dev->irq = res->start;
1da177e4 3202
ea8a8642
LB
3203 dev->netdev_ops = &mv643xx_eth_netdev_ops;
3204
c9df406f
LB
3205 dev->watchdog_timeo = 2 * HZ;
3206 dev->base_addr = 0;
1da177e4 3207
3ae8f4e0 3208 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
4d48d589
EG
3209 dev->vlan_features = dev->features;
3210
3211 dev->features |= NETIF_F_RXCSUM;
3212 dev->hw_features = dev->features;
1da177e4 3213
01789349 3214 dev->priv_flags |= IFF_UNICAST_FLT;
ee9e4956 3215 dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
01789349 3216
d894be57
JW
3217 /* MTU range: 64 - 9500 */
3218 dev->min_mtu = 64;
3219 dev->max_mtu = 9500;
3220
c9df406f 3221 if (mp->shared->win_protect)
fc32b0e2 3222 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
1da177e4 3223
a5fe3616
LB
3224 netif_carrier_off(dev);
3225
b5e86db4
LB
3226 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
3227
4fb0a54a 3228 set_rx_coal(mp, 250);
a5fe3616
LB
3229 set_tx_coal(mp, 0);
3230
c9df406f
LB
3231 err = register_netdev(dev);
3232 if (err)
3233 goto out;
1da177e4 3234
7542db8b
JP
3235 netdev_notice(dev, "port %d with MAC address %pM\n",
3236 mp->port_num, dev->dev_addr);
1da177e4 3237
13d64285 3238 if (mp->tx_desc_sram_size > 0)
7542db8b 3239 netdev_notice(dev, "configured with sram\n");
1da177e4 3240
c9df406f 3241 return 0;
1da177e4 3242
c9df406f 3243out:
20922486 3244 if (!IS_ERR(mp->clk))
baffab28 3245 clk_disable_unprepare(mp->clk);
c9df406f 3246 free_netdev(dev);
1da177e4 3247
c9df406f 3248 return err;
1da177e4
LT
3249}
3250
c9df406f 3251static int mv643xx_eth_remove(struct platform_device *pdev)
1da177e4 3252{
fc32b0e2 3253 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
1e8a655d 3254 struct net_device *dev = mp->dev;
1da177e4 3255
fc32b0e2 3256 unregister_netdev(mp->dev);
1e8a655d
PR
3257 if (dev->phydev)
3258 phy_disconnect(dev->phydev);
23f333a2 3259 cancel_work_sync(&mp->tx_timeout_task);
452503eb 3260
20922486 3261 if (!IS_ERR(mp->clk))
452503eb 3262 clk_disable_unprepare(mp->clk);
9a43a026 3263
fc32b0e2 3264 free_netdev(mp->dev);
c9df406f 3265
c9df406f 3266 return 0;
1da177e4
LT
3267}
3268
c9df406f 3269static void mv643xx_eth_shutdown(struct platform_device *pdev)
d0412d96 3270{
fc32b0e2 3271 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
d0412d96 3272
c9df406f 3273 /* Mask all interrupts on ethernet port */
37a6084f
LB
3274 wrlp(mp, INT_MASK, 0);
3275 rdlp(mp, INT_MASK);
c9df406f 3276
fc32b0e2
LB
3277 if (netif_running(mp->dev))
3278 port_reset(mp);
d0412d96
JC
3279}
3280
c9df406f 3281static struct platform_driver mv643xx_eth_driver = {
fc32b0e2
LB
3282 .probe = mv643xx_eth_probe,
3283 .remove = mv643xx_eth_remove,
3284 .shutdown = mv643xx_eth_shutdown,
c9df406f 3285 .driver = {
fc32b0e2 3286 .name = MV643XX_ETH_NAME,
c9df406f
LB
3287 },
3288};
3289
3b5dde70
TR
3290static struct platform_driver * const drivers[] = {
3291 &mv643xx_eth_shared_driver,
3292 &mv643xx_eth_driver,
3293};
3294
e5371493 3295static int __init mv643xx_eth_init_module(void)
d0412d96 3296{
3b5dde70 3297 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
d0412d96 3298}
fc32b0e2 3299module_init(mv643xx_eth_init_module);
d0412d96 3300
e5371493 3301static void __exit mv643xx_eth_cleanup_module(void)
d0412d96 3302{
3b5dde70 3303 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
d0412d96 3304}
e5371493 3305module_exit(mv643xx_eth_cleanup_module);
1da177e4 3306
45675bc6
LB
3307MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
3308 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
c9df406f 3309MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
fc32b0e2 3310MODULE_LICENSE("GPL");
c9df406f 3311MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
fc32b0e2 3312MODULE_ALIAS("platform:" MV643XX_ETH_NAME);