Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / drivers / net / usb / lan78xx.c
CommitLineData
6be665a5 1// SPDX-License-Identifier: GPL-2.0+
55d7de9d
WH
2/*
3 * Copyright (C) 2015 Microchip Technology
55d7de9d 4 */
55d7de9d
WH
5#include <linux/module.h>
6#include <linux/netdevice.h>
7#include <linux/etherdevice.h>
8#include <linux/ethtool.h>
55d7de9d
WH
9#include <linux/usb.h>
10#include <linux/crc32.h>
11#include <linux/signal.h>
12#include <linux/slab.h>
13#include <linux/if_vlan.h>
14#include <linux/uaccess.h>
3c1bcc86 15#include <linux/linkmode.h>
55d7de9d
WH
16#include <linux/list.h>
17#include <linux/ip.h>
18#include <linux/ipv6.h>
19#include <linux/mdio.h>
c6e970a0 20#include <linux/phy.h>
55d7de9d 21#include <net/ip6_checksum.h>
ce896476 22#include <net/vxlan.h>
cc89c323
WH
23#include <linux/interrupt.h>
24#include <linux/irqdomain.h>
25#include <linux/irq.h>
26#include <linux/irqchip/chained_irq.h>
bdfba55e 27#include <linux/microchipphy.h>
89b36fb5 28#include <linux/phy_fixed.h>
1827b067 29#include <linux/of_mdio.h>
760db29b 30#include <linux/of_net.h>
55d7de9d
WH
31#include "lan78xx.h"
32
33#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
34#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
35#define DRIVER_NAME "lan78xx"
55d7de9d
WH
36
37#define TX_TIMEOUT_JIFFIES (5 * HZ)
38#define THROTTLE_JIFFIES (HZ / 8)
39#define UNLINK_TIMEOUT_MS 3
40
41#define RX_MAX_QUEUE_MEMORY (60 * 1518)
42
43#define SS_USB_PKT_SIZE (1024)
44#define HS_USB_PKT_SIZE (512)
45#define FS_USB_PKT_SIZE (64)
46
47#define MAX_RX_FIFO_SIZE (12 * 1024)
48#define MAX_TX_FIFO_SIZE (12 * 1024)
dc35f854
JE
49
50#define FLOW_THRESHOLD(n) ((((n) + 511) / 512) & 0x7F)
51#define FLOW_CTRL_THRESHOLD(on, off) ((FLOW_THRESHOLD(on) << 0) | \
52 (FLOW_THRESHOLD(off) << 8))
53
54/* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55#define FLOW_ON_SS 9216
56#define FLOW_ON_HS 8704
57
58/* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59#define FLOW_OFF_SS 4096
60#define FLOW_OFF_HS 1024
61
55d7de9d
WH
62#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
63#define DEFAULT_BULK_IN_DELAY (0x0800)
64#define MAX_SINGLE_PACKET_SIZE (9000)
65#define DEFAULT_TX_CSUM_ENABLE (true)
66#define DEFAULT_RX_CSUM_ENABLE (true)
67#define DEFAULT_TSO_CSUM_ENABLE (true)
68#define DEFAULT_VLAN_FILTER_ENABLE (true)
ec21ecf0 69#define DEFAULT_VLAN_RX_OFFLOAD (true)
d383216a 70#define TX_ALIGNMENT (4)
55d7de9d
WH
71#define RXW_PADDING 2
72
73#define LAN78XX_USB_VENDOR_ID (0x0424)
74#define LAN7800_USB_PRODUCT_ID (0x7800)
75#define LAN7850_USB_PRODUCT_ID (0x7850)
02dc1f3d 76#define LAN7801_USB_PRODUCT_ID (0x7801)
55d7de9d
WH
77#define LAN78XX_EEPROM_MAGIC (0x78A5)
78#define LAN78XX_OTP_MAGIC (0x78F3)
ef8a0f6e
GJ
79#define AT29M2AF_USB_VENDOR_ID (0x07C9)
80#define AT29M2AF_USB_PRODUCT_ID (0x0012)
55d7de9d
WH
81
82#define MII_READ 1
83#define MII_WRITE 0
84
85#define EEPROM_INDICATOR (0xA5)
86#define EEPROM_MAC_OFFSET (0x01)
87#define MAX_EEPROM_SIZE 512
88#define OTP_INDICATOR_1 (0xF3)
89#define OTP_INDICATOR_2 (0xF7)
90
91#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
92 WAKE_MCAST | WAKE_BCAST | \
93 WAKE_ARP | WAKE_MAGIC)
94
d383216a
JE
95#define TX_URB_NUM 10
96#define TX_SS_URB_NUM TX_URB_NUM
97#define TX_HS_URB_NUM TX_URB_NUM
98#define TX_FS_URB_NUM TX_URB_NUM
99
100/* A single URB buffer must be large enough to hold a complete jumbo packet
101 */
102#define TX_SS_URB_SIZE (32 * 1024)
103#define TX_HS_URB_SIZE (16 * 1024)
104#define TX_FS_URB_SIZE (10 * 1024)
105
c450a8eb
JE
106#define RX_SS_URB_NUM 30
107#define RX_HS_URB_NUM 10
108#define RX_FS_URB_NUM 10
109#define RX_SS_URB_SIZE TX_SS_URB_SIZE
110#define RX_HS_URB_SIZE TX_HS_URB_SIZE
111#define RX_FS_URB_SIZE TX_FS_URB_SIZE
112
113#define SS_BURST_CAP_SIZE RX_SS_URB_SIZE
114#define SS_BULK_IN_DELAY 0x2000
115#define HS_BURST_CAP_SIZE RX_HS_URB_SIZE
116#define HS_BULK_IN_DELAY 0x2000
117#define FS_BURST_CAP_SIZE RX_FS_URB_SIZE
118#define FS_BULK_IN_DELAY 0x2000
119
d383216a
JE
120#define TX_CMD_LEN 8
121#define TX_SKB_MIN_LEN (TX_CMD_LEN + ETH_HLEN)
122#define LAN78XX_TSO_SIZE(dev) ((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123
0dd87266
JE
124#define RX_CMD_LEN 10
125#define RX_SKB_MIN_LEN (RX_CMD_LEN + ETH_HLEN)
126#define RX_MAX_FRAME_LEN(mtu) ((mtu) + ETH_HLEN + VLAN_HLEN)
127
55d7de9d
WH
128/* USB related defines */
129#define BULK_IN_PIPE 1
130#define BULK_OUT_PIPE 2
131
132/* default autosuspend delay (mSec)*/
133#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
134
20ff5565
WH
135/* statistic update interval (mSec) */
136#define STAT_UPDATE_TIMER (1 * 1000)
137
e1210fe6
JE
138/* time to wait for MAC or FCT to stop (jiffies) */
139#define HW_DISABLE_TIMEOUT (HZ / 10)
140
141/* time to wait between polling MAC or FCT state (ms) */
142#define HW_DISABLE_DELAY_MS 1
143
cc89c323
WH
144/* defines interrupts from interrupt EP */
145#define MAX_INT_EP (32)
146#define INT_EP_INTEP (31)
147#define INT_EP_OTP_WR_DONE (28)
148#define INT_EP_EEE_TX_LPI_START (26)
149#define INT_EP_EEE_TX_LPI_STOP (25)
150#define INT_EP_EEE_RX_LPI (24)
151#define INT_EP_MAC_RESET_TIMEOUT (23)
152#define INT_EP_RDFO (22)
153#define INT_EP_TXE (21)
154#define INT_EP_USB_STATUS (20)
155#define INT_EP_TX_DIS (19)
156#define INT_EP_RX_DIS (18)
157#define INT_EP_PHY (17)
158#define INT_EP_DP (16)
159#define INT_EP_MAC_ERR (15)
160#define INT_EP_TDFU (14)
161#define INT_EP_TDFO (13)
162#define INT_EP_UTX (12)
163#define INT_EP_GPIO_11 (11)
164#define INT_EP_GPIO_10 (10)
165#define INT_EP_GPIO_9 (9)
166#define INT_EP_GPIO_8 (8)
167#define INT_EP_GPIO_7 (7)
168#define INT_EP_GPIO_6 (6)
169#define INT_EP_GPIO_5 (5)
170#define INT_EP_GPIO_4 (4)
171#define INT_EP_GPIO_3 (3)
172#define INT_EP_GPIO_2 (2)
173#define INT_EP_GPIO_1 (1)
174#define INT_EP_GPIO_0 (0)
175
55d7de9d
WH
176static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 "RX FCS Errors",
178 "RX Alignment Errors",
179 "Rx Fragment Errors",
180 "RX Jabber Errors",
181 "RX Undersize Frame Errors",
182 "RX Oversize Frame Errors",
183 "RX Dropped Frames",
184 "RX Unicast Byte Count",
185 "RX Broadcast Byte Count",
186 "RX Multicast Byte Count",
187 "RX Unicast Frames",
188 "RX Broadcast Frames",
189 "RX Multicast Frames",
190 "RX Pause Frames",
191 "RX 64 Byte Frames",
192 "RX 65 - 127 Byte Frames",
193 "RX 128 - 255 Byte Frames",
194 "RX 256 - 511 Bytes Frames",
195 "RX 512 - 1023 Byte Frames",
196 "RX 1024 - 1518 Byte Frames",
197 "RX Greater 1518 Byte Frames",
198 "EEE RX LPI Transitions",
199 "EEE RX LPI Time",
200 "TX FCS Errors",
201 "TX Excess Deferral Errors",
202 "TX Carrier Errors",
203 "TX Bad Byte Count",
204 "TX Single Collisions",
205 "TX Multiple Collisions",
206 "TX Excessive Collision",
207 "TX Late Collisions",
208 "TX Unicast Byte Count",
209 "TX Broadcast Byte Count",
210 "TX Multicast Byte Count",
211 "TX Unicast Frames",
212 "TX Broadcast Frames",
213 "TX Multicast Frames",
214 "TX Pause Frames",
215 "TX 64 Byte Frames",
216 "TX 65 - 127 Byte Frames",
217 "TX 128 - 255 Byte Frames",
218 "TX 256 - 511 Bytes Frames",
219 "TX 512 - 1023 Byte Frames",
220 "TX 1024 - 1518 Byte Frames",
221 "TX Greater 1518 Byte Frames",
222 "EEE TX LPI Transitions",
223 "EEE TX LPI Time",
224};
225
226struct lan78xx_statstage {
227 u32 rx_fcs_errors;
228 u32 rx_alignment_errors;
229 u32 rx_fragment_errors;
230 u32 rx_jabber_errors;
231 u32 rx_undersize_frame_errors;
232 u32 rx_oversize_frame_errors;
233 u32 rx_dropped_frames;
234 u32 rx_unicast_byte_count;
235 u32 rx_broadcast_byte_count;
236 u32 rx_multicast_byte_count;
237 u32 rx_unicast_frames;
238 u32 rx_broadcast_frames;
239 u32 rx_multicast_frames;
240 u32 rx_pause_frames;
241 u32 rx_64_byte_frames;
242 u32 rx_65_127_byte_frames;
243 u32 rx_128_255_byte_frames;
244 u32 rx_256_511_bytes_frames;
245 u32 rx_512_1023_byte_frames;
246 u32 rx_1024_1518_byte_frames;
247 u32 rx_greater_1518_byte_frames;
248 u32 eee_rx_lpi_transitions;
249 u32 eee_rx_lpi_time;
250 u32 tx_fcs_errors;
251 u32 tx_excess_deferral_errors;
252 u32 tx_carrier_errors;
253 u32 tx_bad_byte_count;
254 u32 tx_single_collisions;
255 u32 tx_multiple_collisions;
256 u32 tx_excessive_collision;
257 u32 tx_late_collisions;
258 u32 tx_unicast_byte_count;
259 u32 tx_broadcast_byte_count;
260 u32 tx_multicast_byte_count;
261 u32 tx_unicast_frames;
262 u32 tx_broadcast_frames;
263 u32 tx_multicast_frames;
264 u32 tx_pause_frames;
265 u32 tx_64_byte_frames;
266 u32 tx_65_127_byte_frames;
267 u32 tx_128_255_byte_frames;
268 u32 tx_256_511_bytes_frames;
269 u32 tx_512_1023_byte_frames;
270 u32 tx_1024_1518_byte_frames;
271 u32 tx_greater_1518_byte_frames;
272 u32 eee_tx_lpi_transitions;
273 u32 eee_tx_lpi_time;
274};
275
20ff5565
WH
276struct lan78xx_statstage64 {
277 u64 rx_fcs_errors;
278 u64 rx_alignment_errors;
279 u64 rx_fragment_errors;
280 u64 rx_jabber_errors;
281 u64 rx_undersize_frame_errors;
282 u64 rx_oversize_frame_errors;
283 u64 rx_dropped_frames;
284 u64 rx_unicast_byte_count;
285 u64 rx_broadcast_byte_count;
286 u64 rx_multicast_byte_count;
287 u64 rx_unicast_frames;
288 u64 rx_broadcast_frames;
289 u64 rx_multicast_frames;
290 u64 rx_pause_frames;
291 u64 rx_64_byte_frames;
292 u64 rx_65_127_byte_frames;
293 u64 rx_128_255_byte_frames;
294 u64 rx_256_511_bytes_frames;
295 u64 rx_512_1023_byte_frames;
296 u64 rx_1024_1518_byte_frames;
297 u64 rx_greater_1518_byte_frames;
298 u64 eee_rx_lpi_transitions;
299 u64 eee_rx_lpi_time;
300 u64 tx_fcs_errors;
301 u64 tx_excess_deferral_errors;
302 u64 tx_carrier_errors;
303 u64 tx_bad_byte_count;
304 u64 tx_single_collisions;
305 u64 tx_multiple_collisions;
306 u64 tx_excessive_collision;
307 u64 tx_late_collisions;
308 u64 tx_unicast_byte_count;
309 u64 tx_broadcast_byte_count;
310 u64 tx_multicast_byte_count;
311 u64 tx_unicast_frames;
312 u64 tx_broadcast_frames;
313 u64 tx_multicast_frames;
314 u64 tx_pause_frames;
315 u64 tx_64_byte_frames;
316 u64 tx_65_127_byte_frames;
317 u64 tx_128_255_byte_frames;
318 u64 tx_256_511_bytes_frames;
319 u64 tx_512_1023_byte_frames;
320 u64 tx_1024_1518_byte_frames;
321 u64 tx_greater_1518_byte_frames;
322 u64 eee_tx_lpi_transitions;
323 u64 eee_tx_lpi_time;
324};
325
49621865
RC
326static u32 lan78xx_regs[] = {
327 ID_REV,
328 INT_STS,
329 HW_CFG,
330 PMT_CTL,
331 E2P_CMD,
332 E2P_DATA,
333 USB_STATUS,
334 VLAN_TYPE,
335 MAC_CR,
336 MAC_RX,
337 MAC_TX,
338 FLOW,
339 ERR_STS,
340 MII_ACC,
341 MII_DATA,
342 EEE_TX_LPI_REQ_DLY,
343 EEE_TW_TX_SYS,
344 EEE_TX_LPI_REM_DLY,
345 WUCSR
346};
347
348#define PHY_REG_SIZE (32 * sizeof(u32))
349
55d7de9d
WH
350struct lan78xx_net;
351
352struct lan78xx_priv {
353 struct lan78xx_net *dev;
354 u32 rfe_ctl;
f62c4f38 355 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
55d7de9d
WH
356 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 struct mutex dataport_mutex; /* for dataport access */
359 spinlock_t rfe_ctl_lock; /* for rfe register access */
360 struct work_struct set_multicast;
361 struct work_struct set_vlan;
362 u32 wol;
363};
364
365enum skb_state {
366 illegal = 0,
367 tx_start,
368 tx_done,
369 rx_start,
370 rx_done,
371 rx_cleanup,
372 unlink_start
373};
374
375struct skb_data { /* skb->cb is one of these */
376 struct urb *urb;
377 struct lan78xx_net *dev;
378 enum skb_state state;
379 size_t length;
74d79a2e 380 int num_of_packet;
55d7de9d
WH
381};
382
55d7de9d
WH
383#define EVENT_TX_HALT 0
384#define EVENT_RX_HALT 1
385#define EVENT_RX_MEMORY 2
386#define EVENT_STS_SPLIT 3
387#define EVENT_LINK_RESET 4
388#define EVENT_RX_PAUSED 5
389#define EVENT_DEV_WAKING 6
390#define EVENT_DEV_ASLEEP 7
391#define EVENT_DEV_OPEN 8
20ff5565 392#define EVENT_STAT_UPDATE 9
77dfff5b 393#define EVENT_DEV_DISCONNECT 10
20ff5565
WH
394
395struct statstage {
396 struct mutex access_lock; /* for stats access */
397 struct lan78xx_statstage saved;
398 struct lan78xx_statstage rollover_count;
399 struct lan78xx_statstage rollover_max;
400 struct lan78xx_statstage64 curr_stat;
401};
55d7de9d 402
cc89c323
WH
403struct irq_domain_data {
404 struct irq_domain *irqdomain;
405 unsigned int phyirq;
406 struct irq_chip *irqchip;
407 irq_flow_handler_t irq_handler;
408 u32 irqenable;
409 struct mutex irq_lock; /* for irq bus access */
410};
411
55d7de9d
WH
412struct lan78xx_net {
413 struct net_device *net;
414 struct usb_device *udev;
415 struct usb_interface *intf;
416 void *driver_priv;
417
d383216a
JE
418 unsigned int tx_pend_data_len;
419 size_t n_tx_urbs;
c450a8eb 420 size_t n_rx_urbs;
d383216a 421 size_t tx_urb_size;
c450a8eb 422 size_t rx_urb_size;
d383216a 423
c450a8eb 424 struct sk_buff_head rxq_free;
55d7de9d 425 struct sk_buff_head rxq;
c450a8eb 426 struct sk_buff_head rxq_done;
ec4c7e12 427 struct sk_buff_head rxq_overflow;
d383216a
JE
428 struct sk_buff_head txq_free;
429 struct sk_buff_head txq;
55d7de9d
WH
430 struct sk_buff_head txq_pend;
431
ec4c7e12
JE
432 struct napi_struct napi;
433
55d7de9d
WH
434 struct delayed_work wq;
435
55d7de9d
WH
436 int msg_enable;
437
438 struct urb *urb_intr;
439 struct usb_anchor deferred;
440
5f4cc6e2 441 struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */
3a59437e 442 struct mutex mdiobus_mutex; /* for MDIO bus access */
9ceec7d3 443 unsigned int pipe_in, pipe_out, pipe_intr;
55d7de9d 444
c450a8eb
JE
445 unsigned int bulk_in_delay;
446 unsigned int burst_cap;
55d7de9d
WH
447
448 unsigned long flags;
449
450 wait_queue_head_t *wait;
451 unsigned char suspend_count;
452
9ceec7d3 453 unsigned int maxpacket;
20ff5565 454 struct timer_list stat_monitor;
55d7de9d
WH
455
456 unsigned long data[5];
55d7de9d
WH
457
458 int link_on;
459 u8 mdix_ctrl;
ce85e13a 460
87177ba6
WH
461 u32 chipid;
462 u32 chiprev;
ce85e13a 463 struct mii_bus *mdiobus;
02dc1f3d 464 phy_interface_t interface;
349e0c5e
WH
465
466 int fc_autoneg;
467 u8 fc_request_control;
20ff5565
WH
468
469 int delta;
470 struct statstage stats;
cc89c323
WH
471
472 struct irq_domain_data domain_data;
55d7de9d
WH
473};
474
475/* use ethtool to change the level for any given device */
476static int msg_level = -1;
477module_param(msg_level, int, 0);
478MODULE_PARM_DESC(msg_level, "Override default message level");
479
d383216a
JE
480static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
481{
482 if (skb_queue_empty(buf_pool))
483 return NULL;
484
485 return skb_dequeue(buf_pool);
486}
487
488static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
489 struct sk_buff *buf)
490{
491 buf->data = buf->head;
492 skb_reset_tail_pointer(buf);
493
494 buf->len = 0;
495 buf->data_len = 0;
496
497 skb_queue_tail(buf_pool, buf);
498}
499
500static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
501{
502 struct skb_data *entry;
503 struct sk_buff *buf;
504
505 while (!skb_queue_empty(buf_pool)) {
506 buf = skb_dequeue(buf_pool);
507 if (buf) {
508 entry = (struct skb_data *)buf->cb;
509 usb_free_urb(entry->urb);
510 dev_kfree_skb_any(buf);
511 }
512 }
513}
514
515static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
516 size_t n_urbs, size_t urb_size,
517 struct lan78xx_net *dev)
518{
519 struct skb_data *entry;
520 struct sk_buff *buf;
521 struct urb *urb;
522 int i;
523
524 skb_queue_head_init(buf_pool);
525
526 for (i = 0; i < n_urbs; i++) {
527 buf = alloc_skb(urb_size, GFP_ATOMIC);
528 if (!buf)
529 goto error;
530
531 if (skb_linearize(buf) != 0) {
532 dev_kfree_skb_any(buf);
533 goto error;
534 }
535
536 urb = usb_alloc_urb(0, GFP_ATOMIC);
537 if (!urb) {
538 dev_kfree_skb_any(buf);
539 goto error;
540 }
541
542 entry = (struct skb_data *)buf->cb;
543 entry->urb = urb;
544 entry->dev = dev;
545 entry->length = 0;
546 entry->num_of_packet = 0;
547
548 skb_queue_tail(buf_pool, buf);
549 }
550
551 return 0;
552
553error:
554 lan78xx_free_buf_pool(buf_pool);
555
556 return -ENOMEM;
557}
558
c450a8eb
JE
559static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
560{
561 return lan78xx_get_buf(&dev->rxq_free);
562}
563
564static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
565 struct sk_buff *rx_buf)
566{
567 lan78xx_release_buf(&dev->rxq_free, rx_buf);
568}
569
570static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
571{
572 lan78xx_free_buf_pool(&dev->rxq_free);
573}
574
575static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
576{
577 return lan78xx_alloc_buf_pool(&dev->rxq_free,
578 dev->n_rx_urbs, dev->rx_urb_size, dev);
579}
580
d383216a
JE
581static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
582{
583 return lan78xx_get_buf(&dev->txq_free);
584}
585
586static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
587 struct sk_buff *tx_buf)
588{
589 lan78xx_release_buf(&dev->txq_free, tx_buf);
590}
591
592static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
593{
594 lan78xx_free_buf_pool(&dev->txq_free);
595}
596
597static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
598{
599 return lan78xx_alloc_buf_pool(&dev->txq_free,
600 dev->n_tx_urbs, dev->tx_urb_size, dev);
601}
602
55d7de9d
WH
603static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
604{
77dfff5b 605 u32 *buf;
55d7de9d
WH
606 int ret;
607
77dfff5b
JE
608 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
609 return -ENODEV;
610
611 buf = kmalloc(sizeof(u32), GFP_KERNEL);
55d7de9d
WH
612 if (!buf)
613 return -ENOMEM;
614
615 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
616 USB_VENDOR_REQUEST_READ_REGISTER,
617 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
618 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
619 if (likely(ret >= 0)) {
620 le32_to_cpus(buf);
621 *data = *buf;
df0d6f7a 622 } else if (net_ratelimit()) {
55d7de9d 623 netdev_warn(dev->net,
9bcdc610
OR
624 "Failed to read register index 0x%08x. ret = %pe",
625 index, ERR_PTR(ret));
55d7de9d
WH
626 }
627
628 kfree(buf);
629
cfa693bf 630 return ret < 0 ? ret : 0;
55d7de9d
WH
631}
632
633static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
634{
77dfff5b 635 u32 *buf;
55d7de9d
WH
636 int ret;
637
77dfff5b
JE
638 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
639 return -ENODEV;
640
641 buf = kmalloc(sizeof(u32), GFP_KERNEL);
55d7de9d
WH
642 if (!buf)
643 return -ENOMEM;
644
645 *buf = data;
646 cpu_to_le32s(buf);
647
648 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
649 USB_VENDOR_REQUEST_WRITE_REGISTER,
650 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
651 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
df0d6f7a
JE
652 if (unlikely(ret < 0) &&
653 net_ratelimit()) {
55d7de9d 654 netdev_warn(dev->net,
9bcdc610
OR
655 "Failed to write register index 0x%08x. ret = %pe",
656 index, ERR_PTR(ret));
55d7de9d
WH
657 }
658
659 kfree(buf);
660
cfa693bf 661 return ret < 0 ? ret : 0;
55d7de9d
WH
662}
663
e1210fe6
JE
664static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
665 u32 data)
666{
667 int ret;
668 u32 buf;
669
670 ret = lan78xx_read_reg(dev, reg, &buf);
671 if (ret < 0)
672 return ret;
673
674 buf &= ~mask;
675 buf |= (mask & data);
676
41b774e4 677 return lan78xx_write_reg(dev, reg, buf);
e1210fe6
JE
678}
679
55d7de9d
WH
680static int lan78xx_read_stats(struct lan78xx_net *dev,
681 struct lan78xx_statstage *data)
682{
683 int ret = 0;
684 int i;
685 struct lan78xx_statstage *stats;
686 u32 *src;
687 u32 *dst;
688
55d7de9d
WH
689 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
690 if (!stats)
691 return -ENOMEM;
692
693 ret = usb_control_msg(dev->udev,
694 usb_rcvctrlpipe(dev->udev, 0),
695 USB_VENDOR_REQUEST_GET_STATS,
696 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
697 0,
698 0,
699 (void *)stats,
700 sizeof(*stats),
701 USB_CTRL_SET_TIMEOUT);
702 if (likely(ret >= 0)) {
703 src = (u32 *)stats;
704 dst = (u32 *)data;
9ceec7d3 705 for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
55d7de9d
WH
706 le32_to_cpus(&src[i]);
707 dst[i] = src[i];
708 }
709 } else {
710 netdev_warn(dev->net,
858ce8ca 711 "Failed to read stat ret = %d", ret);
55d7de9d
WH
712 }
713
714 kfree(stats);
715
716 return ret;
717}
718
9ceec7d3
JE
719#define check_counter_rollover(struct1, dev_stats, member) \
720 do { \
721 if ((struct1)->member < (dev_stats).saved.member) \
722 (dev_stats).rollover_count.member++; \
723 } while (0)
20ff5565
WH
724
725static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
726 struct lan78xx_statstage *stats)
727{
728 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
729 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
730 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
731 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
732 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
733 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
734 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
735 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
736 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
737 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
738 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
739 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
740 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
741 check_counter_rollover(stats, dev->stats, rx_pause_frames);
742 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
743 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
744 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
745 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
746 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
747 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
748 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
749 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
750 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
751 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
752 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
753 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
754 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
755 check_counter_rollover(stats, dev->stats, tx_single_collisions);
756 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
757 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
758 check_counter_rollover(stats, dev->stats, tx_late_collisions);
759 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
760 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
761 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
762 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
763 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
764 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
765 check_counter_rollover(stats, dev->stats, tx_pause_frames);
766 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
767 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
768 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
769 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
770 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
771 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
772 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
773 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
774 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
775
776 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
777}
778
779static void lan78xx_update_stats(struct lan78xx_net *dev)
780{
781 u32 *p, *count, *max;
782 u64 *data;
783 int i;
784 struct lan78xx_statstage lan78xx_stats;
785
786 if (usb_autopm_get_interface(dev->intf) < 0)
787 return;
788
789 p = (u32 *)&lan78xx_stats;
790 count = (u32 *)&dev->stats.rollover_count;
791 max = (u32 *)&dev->stats.rollover_max;
792 data = (u64 *)&dev->stats.curr_stat;
793
794 mutex_lock(&dev->stats.access_lock);
795
796 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
797 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
798
799 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
800 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
801
802 mutex_unlock(&dev->stats.access_lock);
803
804 usb_autopm_put_interface(dev->intf);
805}
806
39aa1d62
OR
807static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
808{
809 return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
810}
811
812static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
813 u32 hw_disabled)
814{
815 unsigned long timeout;
816 bool stopped = true;
817 int ret;
818 u32 buf;
819
820 /* Stop the h/w block (if not already stopped) */
821
822 ret = lan78xx_read_reg(dev, reg, &buf);
823 if (ret < 0)
824 return ret;
825
826 if (buf & hw_enabled) {
827 buf &= ~hw_enabled;
828
829 ret = lan78xx_write_reg(dev, reg, buf);
830 if (ret < 0)
831 return ret;
832
833 stopped = false;
834 timeout = jiffies + HW_DISABLE_TIMEOUT;
835 do {
836 ret = lan78xx_read_reg(dev, reg, &buf);
837 if (ret < 0)
838 return ret;
839
840 if (buf & hw_disabled)
841 stopped = true;
842 else
843 msleep(HW_DISABLE_DELAY_MS);
844 } while (!stopped && !time_after(jiffies, timeout));
845 }
846
18bdefe6 847 return stopped ? 0 : -ETIMEDOUT;
39aa1d62
OR
848}
849
850static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
851{
852 return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
853}
854
855static int lan78xx_start_tx_path(struct lan78xx_net *dev)
856{
857 int ret;
858
859 netif_dbg(dev, drv, dev->net, "start tx path");
860
861 /* Start the MAC transmitter */
862
863 ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
864 if (ret < 0)
865 return ret;
866
867 /* Start the Tx FIFO */
868
869 ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
870 if (ret < 0)
871 return ret;
872
873 return 0;
874}
875
876static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
877{
878 int ret;
879
880 netif_dbg(dev, drv, dev->net, "stop tx path");
881
882 /* Stop the Tx FIFO */
883
884 ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
885 if (ret < 0)
886 return ret;
887
888 /* Stop the MAC transmitter */
889
890 ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
891 if (ret < 0)
892 return ret;
893
894 return 0;
895}
896
897/* The caller must ensure the Tx path is stopped before calling
898 * lan78xx_flush_tx_fifo().
899 */
900static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
901{
902 return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
903}
904
905static int lan78xx_start_rx_path(struct lan78xx_net *dev)
906{
907 int ret;
908
909 netif_dbg(dev, drv, dev->net, "start rx path");
910
911 /* Start the Rx FIFO */
912
913 ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
914 if (ret < 0)
915 return ret;
916
917 /* Start the MAC receiver*/
918
919 ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
920 if (ret < 0)
921 return ret;
922
923 return 0;
924}
925
926static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
927{
928 int ret;
929
930 netif_dbg(dev, drv, dev->net, "stop rx path");
931
932 /* Stop the MAC receiver */
933
934 ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
935 if (ret < 0)
936 return ret;
937
938 /* Stop the Rx FIFO */
939
940 ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
941 if (ret < 0)
942 return ret;
943
944 return 0;
945}
946
947/* The caller must ensure the Rx path is stopped before calling
948 * lan78xx_flush_rx_fifo().
949 */
950static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
951{
952 return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
953}
954
3a59437e 955/* Loop until the read is completed with timeout called with mdiobus_mutex held */
530f17e6 956static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
55d7de9d
WH
957{
958 unsigned long start_time = jiffies;
959 u32 val;
960 int ret;
961
962 do {
963 ret = lan78xx_read_reg(dev, MII_ACC, &val);
21fff45a
OR
964 if (ret < 0)
965 return ret;
55d7de9d
WH
966
967 if (!(val & MII_ACC_MII_BUSY_))
968 return 0;
969 } while (!time_after(jiffies, start_time + HZ));
970
21fff45a 971 return -ETIMEDOUT;
55d7de9d
WH
972}
973
974static inline u32 mii_access(int id, int index, int read)
975{
976 u32 ret;
977
978 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
979 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
980 if (read)
981 ret |= MII_ACC_MII_READ_;
982 else
983 ret |= MII_ACC_MII_WRITE_;
984 ret |= MII_ACC_MII_BUSY_;
985
986 return ret;
987}
988
55d7de9d
WH
989static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
990{
991 unsigned long start_time = jiffies;
992 u32 val;
993 int ret;
994
995 do {
996 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
8b1b2ca8
OR
997 if (ret < 0)
998 return ret;
55d7de9d
WH
999
1000 if (!(val & E2P_CMD_EPC_BUSY_) ||
1001 (val & E2P_CMD_EPC_TIMEOUT_))
1002 break;
1003 usleep_range(40, 100);
1004 } while (!time_after(jiffies, start_time + HZ));
1005
1006 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1007 netdev_warn(dev->net, "EEPROM read operation timeout");
8b1b2ca8 1008 return -ETIMEDOUT;
55d7de9d
WH
1009 }
1010
1011 return 0;
1012}
1013
1014static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1015{
1016 unsigned long start_time = jiffies;
1017 u32 val;
1018 int ret;
1019
1020 do {
1021 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
8b1b2ca8
OR
1022 if (ret < 0)
1023 return ret;
55d7de9d
WH
1024
1025 if (!(val & E2P_CMD_EPC_BUSY_))
1026 return 0;
1027
1028 usleep_range(40, 100);
1029 } while (!time_after(jiffies, start_time + HZ));
1030
1031 netdev_warn(dev->net, "EEPROM is busy");
8b1b2ca8 1032 return -ETIMEDOUT;
55d7de9d
WH
1033}
1034
1035static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1036 u32 length, u8 *data)
1037{
8b1b2ca8 1038 u32 val, saved;
55d7de9d 1039 int i, ret;
a0db7d10
WH
1040
1041 /* depends on chip, some EEPROM pins are muxed with LED function.
1042 * disable & restore LED function to access EEPROM.
1043 */
1044 ret = lan78xx_read_reg(dev, HW_CFG, &val);
8b1b2ca8
OR
1045 if (ret < 0)
1046 return ret;
1047
a0db7d10 1048 saved = val;
87177ba6 1049 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
1050 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1051 ret = lan78xx_write_reg(dev, HW_CFG, val);
8b1b2ca8
OR
1052 if (ret < 0)
1053 return ret;
a0db7d10 1054 }
55d7de9d 1055
8b1b2ca8
OR
1056 ret = lan78xx_eeprom_confirm_not_busy(dev);
1057 if (ret == -ETIMEDOUT)
1058 goto read_raw_eeprom_done;
1059 /* If USB fails, there is nothing to do */
1060 if (ret < 0)
1061 return ret;
55d7de9d
WH
1062
1063 for (i = 0; i < length; i++) {
1064 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1065 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1066 ret = lan78xx_write_reg(dev, E2P_CMD, val);
8b1b2ca8
OR
1067 if (ret < 0)
1068 return ret;
55d7de9d 1069
8b1b2ca8
OR
1070 ret = lan78xx_wait_eeprom(dev);
1071 /* Looks like not USB specific error, try to recover */
1072 if (ret == -ETIMEDOUT)
1073 goto read_raw_eeprom_done;
1074 /* If USB fails, there is nothing to do */
1075 if (ret < 0)
1076 return ret;
55d7de9d
WH
1077
1078 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
8b1b2ca8
OR
1079 if (ret < 0)
1080 return ret;
55d7de9d
WH
1081
1082 data[i] = val & 0xFF;
1083 offset++;
1084 }
1085
8b1b2ca8 1086read_raw_eeprom_done:
87177ba6 1087 if (dev->chipid == ID_REV_CHIP_ID_7800_)
8b1b2ca8 1088 return lan78xx_write_reg(dev, HW_CFG, saved);
a0db7d10 1089
8b1b2ca8 1090 return 0;
55d7de9d
WH
1091}
1092
1093static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1094 u32 length, u8 *data)
1095{
55d7de9d 1096 int ret;
8b1b2ca8 1097 u8 sig;
55d7de9d
WH
1098
1099 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
8b1b2ca8
OR
1100 if (ret < 0)
1101 return ret;
55d7de9d 1102
8b1b2ca8
OR
1103 if (sig != EEPROM_INDICATOR)
1104 return -ENODATA;
1105
1106 return lan78xx_read_raw_eeprom(dev, offset, length, data);
55d7de9d
WH
1107}
1108
1109static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1110 u32 length, u8 *data)
1111{
1112 u32 val;
a0db7d10 1113 u32 saved;
55d7de9d 1114 int i, ret;
a0db7d10
WH
1115
1116 /* depends on chip, some EEPROM pins are muxed with LED function.
1117 * disable & restore LED function to access EEPROM.
1118 */
1119 ret = lan78xx_read_reg(dev, HW_CFG, &val);
8b1b2ca8
OR
1120 if (ret < 0)
1121 return ret;
1122
a0db7d10 1123 saved = val;
87177ba6 1124 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
1125 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1126 ret = lan78xx_write_reg(dev, HW_CFG, val);
8b1b2ca8
OR
1127 if (ret < 0)
1128 return ret;
a0db7d10 1129 }
55d7de9d 1130
8b1b2ca8
OR
1131 ret = lan78xx_eeprom_confirm_not_busy(dev);
1132 /* Looks like not USB specific error, try to recover */
1133 if (ret == -ETIMEDOUT)
1134 goto write_raw_eeprom_done;
1135 /* If USB fails, there is nothing to do */
1136 if (ret < 0)
1137 return ret;
55d7de9d
WH
1138
1139 /* Issue write/erase enable command */
1140 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1141 ret = lan78xx_write_reg(dev, E2P_CMD, val);
8b1b2ca8
OR
1142 if (ret < 0)
1143 return ret;
55d7de9d 1144
8b1b2ca8
OR
1145 ret = lan78xx_wait_eeprom(dev);
1146 /* Looks like not USB specific error, try to recover */
1147 if (ret == -ETIMEDOUT)
1148 goto write_raw_eeprom_done;
1149 /* If USB fails, there is nothing to do */
1150 if (ret < 0)
1151 return ret;
55d7de9d
WH
1152
1153 for (i = 0; i < length; i++) {
1154 /* Fill data register */
1155 val = data[i];
1156 ret = lan78xx_write_reg(dev, E2P_DATA, val);
8b1b2ca8
OR
1157 if (ret < 0)
1158 return ret;
55d7de9d
WH
1159
1160 /* Send "write" command */
1161 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1162 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1163 ret = lan78xx_write_reg(dev, E2P_CMD, val);
8b1b2ca8
OR
1164 if (ret < 0)
1165 return ret;
55d7de9d 1166
8b1b2ca8
OR
1167 ret = lan78xx_wait_eeprom(dev);
1168 /* Looks like not USB specific error, try to recover */
1169 if (ret == -ETIMEDOUT)
1170 goto write_raw_eeprom_done;
1171 /* If USB fails, there is nothing to do */
1172 if (ret < 0)
1173 return ret;
55d7de9d
WH
1174
1175 offset++;
1176 }
1177
8b1b2ca8 1178write_raw_eeprom_done:
87177ba6 1179 if (dev->chipid == ID_REV_CHIP_ID_7800_)
8b1b2ca8 1180 return lan78xx_write_reg(dev, HW_CFG, saved);
a0db7d10 1181
8b1b2ca8 1182 return 0;
55d7de9d
WH
1183}
1184
1185static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1186 u32 length, u8 *data)
1187{
55d7de9d 1188 unsigned long timeout;
8b1b2ca8
OR
1189 int ret, i;
1190 u32 buf;
55d7de9d 1191
8b1b2ca8
OR
1192 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1193 if (ret < 0)
1194 return ret;
55d7de9d
WH
1195
1196 if (buf & OTP_PWR_DN_PWRDN_N_) {
1197 /* clear it and wait to be cleared */
8b1b2ca8
OR
1198 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1199 if (ret < 0)
1200 return ret;
55d7de9d
WH
1201
1202 timeout = jiffies + HZ;
1203 do {
1204 usleep_range(1, 10);
8b1b2ca8
OR
1205 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1206 if (ret < 0)
1207 return ret;
1208
55d7de9d
WH
1209 if (time_after(jiffies, timeout)) {
1210 netdev_warn(dev->net,
1211 "timeout on OTP_PWR_DN");
8b1b2ca8 1212 return -ETIMEDOUT;
55d7de9d
WH
1213 }
1214 } while (buf & OTP_PWR_DN_PWRDN_N_);
1215 }
1216
1217 for (i = 0; i < length; i++) {
8b1b2ca8
OR
1218 ret = lan78xx_write_reg(dev, OTP_ADDR1,
1219 ((offset + i) >> 8) & OTP_ADDR1_15_11);
1220 if (ret < 0)
1221 return ret;
55d7de9d 1222
8b1b2ca8
OR
1223 ret = lan78xx_write_reg(dev, OTP_ADDR2,
1224 ((offset + i) & OTP_ADDR2_10_3));
1225 if (ret < 0)
1226 return ret;
1227
1228 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1229 if (ret < 0)
1230 return ret;
1231
1232 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1233 if (ret < 0)
1234 return ret;
55d7de9d
WH
1235
1236 timeout = jiffies + HZ;
1237 do {
1238 udelay(1);
8b1b2ca8
OR
1239 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1240 if (ret < 0)
1241 return ret;
1242
55d7de9d
WH
1243 if (time_after(jiffies, timeout)) {
1244 netdev_warn(dev->net,
1245 "timeout on OTP_STATUS");
8b1b2ca8 1246 return -ETIMEDOUT;
55d7de9d
WH
1247 }
1248 } while (buf & OTP_STATUS_BUSY_);
1249
8b1b2ca8
OR
1250 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1251 if (ret < 0)
1252 return ret;
55d7de9d
WH
1253
1254 data[i] = (u8)(buf & 0xFF);
1255 }
1256
1257 return 0;
1258}
1259
9fb6066d
WH
1260static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1261 u32 length, u8 *data)
1262{
1263 int i;
9fb6066d
WH
1264 u32 buf;
1265 unsigned long timeout;
8b1b2ca8 1266 int ret;
9fb6066d 1267
8b1b2ca8
OR
1268 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1269 if (ret < 0)
1270 return ret;
9fb6066d
WH
1271
1272 if (buf & OTP_PWR_DN_PWRDN_N_) {
1273 /* clear it and wait to be cleared */
8b1b2ca8
OR
1274 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1275 if (ret < 0)
1276 return ret;
9fb6066d
WH
1277
1278 timeout = jiffies + HZ;
1279 do {
1280 udelay(1);
8b1b2ca8
OR
1281 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1282 if (ret < 0)
1283 return ret;
1284
9fb6066d
WH
1285 if (time_after(jiffies, timeout)) {
1286 netdev_warn(dev->net,
1287 "timeout on OTP_PWR_DN completion");
8b1b2ca8 1288 return -ETIMEDOUT;
9fb6066d
WH
1289 }
1290 } while (buf & OTP_PWR_DN_PWRDN_N_);
1291 }
1292
1293 /* set to BYTE program mode */
8b1b2ca8
OR
1294 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1295 if (ret < 0)
1296 return ret;
9fb6066d
WH
1297
1298 for (i = 0; i < length; i++) {
8b1b2ca8
OR
1299 ret = lan78xx_write_reg(dev, OTP_ADDR1,
1300 ((offset + i) >> 8) & OTP_ADDR1_15_11);
1301 if (ret < 0)
1302 return ret;
1303
1304 ret = lan78xx_write_reg(dev, OTP_ADDR2,
1305 ((offset + i) & OTP_ADDR2_10_3));
1306 if (ret < 0)
1307 return ret;
1308
1309 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1310 if (ret < 0)
1311 return ret;
1312
1313 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1314 if (ret < 0)
1315 return ret;
1316
1317 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1318 if (ret < 0)
1319 return ret;
9fb6066d
WH
1320
1321 timeout = jiffies + HZ;
1322 do {
1323 udelay(1);
8b1b2ca8
OR
1324 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1325 if (ret < 0)
1326 return ret;
1327
9fb6066d
WH
1328 if (time_after(jiffies, timeout)) {
1329 netdev_warn(dev->net,
1330 "Timeout on OTP_STATUS completion");
8b1b2ca8 1331 return -ETIMEDOUT;
9fb6066d
WH
1332 }
1333 } while (buf & OTP_STATUS_BUSY_);
1334 }
1335
1336 return 0;
1337}
1338
55d7de9d
WH
1339static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1340 u32 length, u8 *data)
1341{
1342 u8 sig;
1343 int ret;
1344
1345 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1346
1347 if (ret == 0) {
94e7c844 1348 if (sig == OTP_INDICATOR_2)
55d7de9d 1349 offset += 0x100;
94e7c844 1350 else if (sig != OTP_INDICATOR_1)
55d7de9d 1351 ret = -EINVAL;
4bfc3380
PE
1352 if (!ret)
1353 ret = lan78xx_read_raw_otp(dev, offset, length, data);
55d7de9d
WH
1354 }
1355
1356 return ret;
1357}
1358
1359static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1360{
1361 int i, ret;
1362
1363 for (i = 0; i < 100; i++) {
1364 u32 dp_sel;
1365
1366 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1367 if (unlikely(ret < 0))
48fb3d3c 1368 return ret;
55d7de9d
WH
1369
1370 if (dp_sel & DP_SEL_DPRDY_)
1371 return 0;
1372
1373 usleep_range(40, 100);
1374 }
1375
9ceec7d3 1376 netdev_warn(dev->net, "%s timed out", __func__);
55d7de9d 1377
48fb3d3c 1378 return -ETIMEDOUT;
55d7de9d
WH
1379}
1380
1381static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1382 u32 addr, u32 length, u32 *buf)
1383{
1384 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
55d7de9d
WH
1385 int i, ret;
1386
48fb3d3c
OR
1387 ret = usb_autopm_get_interface(dev->intf);
1388 if (ret < 0)
1389 return ret;
55d7de9d
WH
1390
1391 mutex_lock(&pdata->dataport_mutex);
1392
1393 ret = lan78xx_dataport_wait_not_busy(dev);
1394 if (ret < 0)
48fb3d3c 1395 goto dataport_write;
55d7de9d 1396
48fb3d3c
OR
1397 ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1398 if (ret < 0)
1399 goto dataport_write;
55d7de9d
WH
1400
1401 for (i = 0; i < length; i++) {
1402 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
48fb3d3c
OR
1403 if (ret < 0)
1404 goto dataport_write;
55d7de9d
WH
1405
1406 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
48fb3d3c
OR
1407 if (ret < 0)
1408 goto dataport_write;
55d7de9d
WH
1409
1410 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
48fb3d3c
OR
1411 if (ret < 0)
1412 goto dataport_write;
55d7de9d
WH
1413
1414 ret = lan78xx_dataport_wait_not_busy(dev);
1415 if (ret < 0)
48fb3d3c 1416 goto dataport_write;
55d7de9d
WH
1417 }
1418
48fb3d3c
OR
1419dataport_write:
1420 if (ret < 0)
1421 netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1422
55d7de9d
WH
1423 mutex_unlock(&pdata->dataport_mutex);
1424 usb_autopm_put_interface(dev->intf);
1425
1426 return ret;
1427}
1428
1429static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1430 int index, u8 addr[ETH_ALEN])
1431{
51ceac9f 1432 u32 temp;
55d7de9d
WH
1433
1434 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1435 temp = addr[3];
1436 temp = addr[2] | (temp << 8);
1437 temp = addr[1] | (temp << 8);
1438 temp = addr[0] | (temp << 8);
1439 pdata->pfilter_table[index][1] = temp;
1440 temp = addr[5];
1441 temp = addr[4] | (temp << 8);
1442 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1443 pdata->pfilter_table[index][0] = temp;
1444 }
1445}
1446
1447/* returns hash bit number for given MAC address */
1448static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1449{
1450 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1451}
1452
1453static void lan78xx_deferred_multicast_write(struct work_struct *param)
1454{
1455 struct lan78xx_priv *pdata =
1456 container_of(param, struct lan78xx_priv, set_multicast);
1457 struct lan78xx_net *dev = pdata->dev;
48fb3d3c 1458 int i, ret;
55d7de9d
WH
1459
1460 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1461 pdata->rfe_ctl);
1462
48fb3d3c
OR
1463 ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1464 DP_SEL_VHF_VLAN_LEN,
1465 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1466 if (ret < 0)
1467 goto multicast_write_done;
55d7de9d
WH
1468
1469 for (i = 1; i < NUM_OF_MAF; i++) {
48fb3d3c
OR
1470 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1471 if (ret < 0)
1472 goto multicast_write_done;
1473
1474 ret = lan78xx_write_reg(dev, MAF_LO(i),
1475 pdata->pfilter_table[i][1]);
1476 if (ret < 0)
1477 goto multicast_write_done;
1478
1479 ret = lan78xx_write_reg(dev, MAF_HI(i),
1480 pdata->pfilter_table[i][0]);
1481 if (ret < 0)
1482 goto multicast_write_done;
55d7de9d
WH
1483 }
1484
48fb3d3c
OR
1485 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1486
1487multicast_write_done:
1488 if (ret < 0)
1489 netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1490 return;
55d7de9d
WH
1491}
1492
1493static void lan78xx_set_multicast(struct net_device *netdev)
1494{
1495 struct lan78xx_net *dev = netdev_priv(netdev);
1496 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1497 unsigned long flags;
1498 int i;
1499
1500 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1501
1502 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1503 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1504
1505 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
9ceec7d3
JE
1506 pdata->mchash_table[i] = 0;
1507
55d7de9d
WH
1508 /* pfilter_table[0] has own HW address */
1509 for (i = 1; i < NUM_OF_MAF; i++) {
9ceec7d3
JE
1510 pdata->pfilter_table[i][0] = 0;
1511 pdata->pfilter_table[i][1] = 0;
55d7de9d
WH
1512 }
1513
1514 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1515
1516 if (dev->net->flags & IFF_PROMISC) {
1517 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1518 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1519 } else {
1520 if (dev->net->flags & IFF_ALLMULTI) {
1521 netif_dbg(dev, drv, dev->net,
1522 "receive all multicast enabled");
1523 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1524 }
1525 }
1526
1527 if (netdev_mc_count(dev->net)) {
1528 struct netdev_hw_addr *ha;
1529 int i;
1530
1531 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1532
1533 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1534
1535 i = 1;
1536 netdev_for_each_mc_addr(ha, netdev) {
1537 /* set first 32 into Perfect Filter */
1538 if (i < 33) {
1539 lan78xx_set_addr_filter(pdata, i, ha->addr);
1540 } else {
1541 u32 bitnum = lan78xx_hash(ha->addr);
1542
1543 pdata->mchash_table[bitnum / 32] |=
1544 (1 << (bitnum % 32));
1545 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1546 }
1547 i++;
1548 }
1549 }
1550
1551 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1552
1553 /* defer register writes to a sleepable context */
1554 schedule_work(&pdata->set_multicast);
1555}
1556
ef6a29e8
OR
1557static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
1558 bool tx_pause, bool rx_pause);
1559
55d7de9d
WH
1560static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1561 u16 lcladv, u16 rmtadv)
1562{
349e0c5e 1563 u8 cap;
55d7de9d 1564
349e0c5e
WH
1565 if (dev->fc_autoneg)
1566 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1567 else
1568 cap = dev->fc_request_control;
55d7de9d 1569
55d7de9d
WH
1570 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1571 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1572 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1573
ef6a29e8
OR
1574 return lan78xx_configure_flowcontrol(dev,
1575 cap & FLOW_CTRL_TX,
1576 cap & FLOW_CTRL_RX);
55d7de9d
WH
1577}
1578
c450a8eb
JE
1579static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1580
b1f6696d
JE
1581static int lan78xx_mac_reset(struct lan78xx_net *dev)
1582{
1583 unsigned long start_time = jiffies;
1584 u32 val;
1585 int ret;
1586
3a59437e 1587 mutex_lock(&dev->mdiobus_mutex);
b1f6696d
JE
1588
1589 /* Resetting the device while there is activity on the MDIO
1590 * bus can result in the MAC interface locking up and not
1591 * completing register access transactions.
1592 */
530f17e6 1593 ret = lan78xx_mdiobus_wait_not_busy(dev);
b1f6696d 1594 if (ret < 0)
7433d022 1595 goto exit_unlock;
b1f6696d
JE
1596
1597 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1598 if (ret < 0)
7433d022 1599 goto exit_unlock;
b1f6696d
JE
1600
1601 val |= MAC_CR_RST_;
1602 ret = lan78xx_write_reg(dev, MAC_CR, val);
1603 if (ret < 0)
7433d022 1604 goto exit_unlock;
b1f6696d
JE
1605
1606 /* Wait for the reset to complete before allowing any further
1607 * MAC register accesses otherwise the MAC may lock up.
1608 */
1609 do {
1610 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1611 if (ret < 0)
7433d022 1612 goto exit_unlock;
b1f6696d
JE
1613
1614 if (!(val & MAC_CR_RST_)) {
1615 ret = 0;
7433d022 1616 goto exit_unlock;
b1f6696d
JE
1617 }
1618 } while (!time_after(jiffies, start_time + HZ));
1619
1620 ret = -ETIMEDOUT;
7433d022 1621exit_unlock:
3a59437e 1622 mutex_unlock(&dev->mdiobus_mutex);
b1f6696d
JE
1623
1624 return ret;
1625}
1626
f485849a
OR
1627/**
1628 * lan78xx_phy_int_ack - Acknowledge PHY interrupt
1629 * @dev: pointer to the LAN78xx device structure
1630 *
1631 * This function acknowledges the PHY interrupt by setting the
1632 * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
1633 *
1634 * Return: 0 on success or a negative error code on failure.
1635 */
1636static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
1637{
1638 return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1639}
1640
d746e074
OR
1641static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed);
1642
55d7de9d
WH
1643static int lan78xx_link_reset(struct lan78xx_net *dev)
1644{
ce85e13a 1645 struct phy_device *phydev = dev->net->phydev;
6e76510e 1646 struct ethtool_link_ksettings ecmd;
6b67d4d6 1647 int ladv, radv, ret, link;
55d7de9d 1648
55d7de9d 1649 /* clear LAN78xx interrupt status */
f485849a 1650 ret = lan78xx_phy_int_ack(dev);
55d7de9d 1651 if (unlikely(ret < 0))
3415f6ba 1652 return ret;
55d7de9d 1653
6b67d4d6 1654 mutex_lock(&phydev->lock);
ce85e13a 1655 phy_read_status(phydev);
6b67d4d6
II
1656 link = phydev->link;
1657 mutex_unlock(&phydev->lock);
ce85e13a 1658
6b67d4d6 1659 if (!link && dev->link_on) {
55d7de9d 1660 dev->link_on = false;
55d7de9d
WH
1661
1662 /* reset MAC */
b1f6696d
JE
1663 ret = lan78xx_mac_reset(dev);
1664 if (ret < 0)
3415f6ba 1665 return ret;
e4953910 1666
8fa7292f 1667 timer_delete(&dev->stat_monitor);
6b67d4d6 1668 } else if (link && !dev->link_on) {
55d7de9d
WH
1669 dev->link_on = true;
1670
6e76510e 1671 phy_ethtool_ksettings_get(phydev, &ecmd);
55d7de9d 1672
d746e074
OR
1673 ret = lan78xx_configure_usb(dev, ecmd.base.speed);
1674 if (ret < 0)
1675 return ret;
55d7de9d 1676
ce85e13a 1677 ladv = phy_read(phydev, MII_ADVERTISE);
99c79ece
GU
1678 if (ladv < 0)
1679 return ladv;
55d7de9d 1680
ce85e13a 1681 radv = phy_read(phydev, MII_LPA);
99c79ece
GU
1682 if (radv < 0)
1683 return radv;
55d7de9d
WH
1684
1685 netif_dbg(dev, link, dev->net,
1686 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
6e76510e 1687 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
55d7de9d 1688
6e76510e
PR
1689 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1690 radv);
3415f6ba
JE
1691 if (ret < 0)
1692 return ret;
20ff5565
WH
1693
1694 if (!timer_pending(&dev->stat_monitor)) {
1695 dev->delta = 1;
1696 mod_timer(&dev->stat_monitor,
1697 jiffies + STAT_UPDATE_TIMER);
1698 }
136f55f6 1699
c450a8eb
JE
1700 lan78xx_rx_urb_submit_all(dev);
1701
e3d5d70c 1702 local_bh_disable();
ec4c7e12 1703 napi_schedule(&dev->napi);
e3d5d70c 1704 local_bh_enable();
55d7de9d
WH
1705 }
1706
3415f6ba 1707 return 0;
55d7de9d
WH
1708}
1709
1710/* some work can't be done in tasklets, so we use keventd
1711 *
1712 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1713 * but tasklet_schedule() doesn't. hope the failure is rare.
1714 */
e0c79ff6 1715static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
55d7de9d
WH
1716{
1717 set_bit(work, &dev->flags);
1718 if (!schedule_delayed_work(&dev->wq, 0))
1719 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1720}
1721
1722static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1723{
1724 u32 intdata;
1725
1726 if (urb->actual_length != 4) {
1727 netdev_warn(dev->net,
1728 "unexpected urb length %d", urb->actual_length);
1729 return;
1730 }
1731
bb448f8a 1732 intdata = get_unaligned_le32(urb->transfer_buffer);
55d7de9d
WH
1733
1734 if (intdata & INT_ENP_PHY_INT) {
1735 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
cc89c323
WH
1736 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1737
bfe6b967
SAS
1738 if (dev->domain_data.phyirq > 0)
1739 generic_handle_irq_safe(dev->domain_data.phyirq);
9ceec7d3 1740 } else {
55d7de9d
WH
1741 netdev_warn(dev->net,
1742 "unexpected interrupt: 0x%08x\n", intdata);
9ceec7d3 1743 }
55d7de9d
WH
1744}
1745
1746static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1747{
1748 return MAX_EEPROM_SIZE;
1749}
1750
1751static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1752 struct ethtool_eeprom *ee, u8 *data)
1753{
1754 struct lan78xx_net *dev = netdev_priv(netdev);
8a7ffeb7
NS
1755 int ret;
1756
1757 ret = usb_autopm_get_interface(dev->intf);
1758 if (ret)
1759 return ret;
55d7de9d
WH
1760
1761 ee->magic = LAN78XX_EEPROM_MAGIC;
1762
8a7ffeb7
NS
1763 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1764
1765 usb_autopm_put_interface(dev->intf);
1766
1767 return ret;
55d7de9d
WH
1768}
1769
1770static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1771 struct ethtool_eeprom *ee, u8 *data)
1772{
1773 struct lan78xx_net *dev = netdev_priv(netdev);
8a7ffeb7
NS
1774 int ret;
1775
1776 ret = usb_autopm_get_interface(dev->intf);
1777 if (ret)
1778 return ret;
55d7de9d 1779
c0776822
NS
1780 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1781 * to load data from EEPROM
1782 */
1783 if (ee->magic == LAN78XX_EEPROM_MAGIC)
8a7ffeb7 1784 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
55d7de9d
WH
1785 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1786 (ee->offset == 0) &&
1787 (ee->len == 512) &&
1788 (data[0] == OTP_INDICATOR_1))
8a7ffeb7 1789 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
55d7de9d 1790
8a7ffeb7
NS
1791 usb_autopm_put_interface(dev->intf);
1792
1793 return ret;
55d7de9d
WH
1794}
1795
1796static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1797 u8 *data)
1798{
1799 if (stringset == ETH_SS_STATS)
1800 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1801}
1802
1803static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1804{
1805 if (sset == ETH_SS_STATS)
1806 return ARRAY_SIZE(lan78xx_gstrings);
1807 else
1808 return -EOPNOTSUPP;
1809}
1810
1811static void lan78xx_get_stats(struct net_device *netdev,
1812 struct ethtool_stats *stats, u64 *data)
1813{
1814 struct lan78xx_net *dev = netdev_priv(netdev);
55d7de9d 1815
20ff5565 1816 lan78xx_update_stats(dev);
55d7de9d 1817
20ff5565
WH
1818 mutex_lock(&dev->stats.access_lock);
1819 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1820 mutex_unlock(&dev->stats.access_lock);
55d7de9d
WH
1821}
1822
1823static void lan78xx_get_wol(struct net_device *netdev,
1824 struct ethtool_wolinfo *wol)
1825{
1826 struct lan78xx_net *dev = netdev_priv(netdev);
1827 int ret;
1828 u32 buf;
1829 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1830
1831 if (usb_autopm_get_interface(dev->intf) < 0)
9ceec7d3 1832 return;
55d7de9d
WH
1833
1834 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1835 if (unlikely(ret < 0)) {
01e2f4d5 1836 netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
55d7de9d
WH
1837 wol->supported = 0;
1838 wol->wolopts = 0;
1839 } else {
1840 if (buf & USB_CFG_RMT_WKP_) {
1841 wol->supported = WAKE_ALL;
1842 wol->wolopts = pdata->wol;
1843 } else {
1844 wol->supported = 0;
1845 wol->wolopts = 0;
1846 }
1847 }
1848
1849 usb_autopm_put_interface(dev->intf);
1850}
1851
1852static int lan78xx_set_wol(struct net_device *netdev,
1853 struct ethtool_wolinfo *wol)
1854{
1855 struct lan78xx_net *dev = netdev_priv(netdev);
1856 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1857 int ret;
1858
e863ff80
OR
1859 if (wol->wolopts & ~WAKE_ALL)
1860 return -EINVAL;
1861
55d7de9d
WH
1862 ret = usb_autopm_get_interface(dev->intf);
1863 if (ret < 0)
1864 return ret;
1865
eb9ad088 1866 pdata->wol = wol->wolopts;
55d7de9d 1867
01e2f4d5
OR
1868 ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1869 if (ret < 0)
1870 goto exit_pm_put;
55d7de9d 1871
01e2f4d5 1872 ret = phy_ethtool_set_wol(netdev->phydev, wol);
ce85e13a 1873
01e2f4d5 1874exit_pm_put:
55d7de9d
WH
1875 usb_autopm_put_interface(dev->intf);
1876
1877 return ret;
1878}
1879
d80a5233 1880static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
55d7de9d
WH
1881{
1882 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1883 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1884 int ret;
1885 u32 buf;
55d7de9d
WH
1886
1887 ret = usb_autopm_get_interface(dev->intf);
1888 if (ret < 0)
1889 return ret;
1890
ce85e13a
WH
1891 ret = phy_ethtool_get_eee(phydev, edata);
1892 if (ret < 0)
1893 goto exit;
1894
55d7de9d
WH
1895 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1896 if (buf & MAC_CR_EEE_EN_) {
55d7de9d
WH
1897 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1898 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1899 edata->tx_lpi_timer = buf;
1900 } else {
55d7de9d
WH
1901 edata->tx_lpi_timer = 0;
1902 }
1903
ce85e13a
WH
1904 ret = 0;
1905exit:
55d7de9d
WH
1906 usb_autopm_put_interface(dev->intf);
1907
ce85e13a 1908 return ret;
55d7de9d
WH
1909}
1910
d80a5233 1911static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
55d7de9d
WH
1912{
1913 struct lan78xx_net *dev = netdev_priv(net);
1914 int ret;
1915 u32 buf;
1916
1917 ret = usb_autopm_get_interface(dev->intf);
1918 if (ret < 0)
1919 return ret;
1920
a00bbd15
AL
1921 ret = phy_ethtool_set_eee(net->phydev, edata);
1922 if (ret < 0)
1923 goto out;
55d7de9d 1924
a00bbd15
AL
1925 buf = (u32)edata->tx_lpi_timer;
1926 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1927out:
55d7de9d
WH
1928 usb_autopm_put_interface(dev->intf);
1929
a00bbd15 1930 return ret;
55d7de9d
WH
1931}
1932
1933static u32 lan78xx_get_link(struct net_device *net)
1934{
6b67d4d6
II
1935 u32 link;
1936
1937 mutex_lock(&net->phydev->lock);
ce85e13a 1938 phy_read_status(net->phydev);
6b67d4d6
II
1939 link = net->phydev->link;
1940 mutex_unlock(&net->phydev->lock);
55d7de9d 1941
6b67d4d6 1942 return link;
55d7de9d
WH
1943}
1944
55d7de9d
WH
1945static void lan78xx_get_drvinfo(struct net_device *net,
1946 struct ethtool_drvinfo *info)
1947{
1948 struct lan78xx_net *dev = netdev_priv(net);
1949
2242f22a 1950 strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
55d7de9d
WH
1951 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1952}
1953
1954static u32 lan78xx_get_msglevel(struct net_device *net)
1955{
1956 struct lan78xx_net *dev = netdev_priv(net);
1957
1958 return dev->msg_enable;
1959}
1960
1961static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1962{
1963 struct lan78xx_net *dev = netdev_priv(net);
1964
1965 dev->msg_enable = level;
1966}
1967
6e76510e
PR
1968static int lan78xx_get_link_ksettings(struct net_device *net,
1969 struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1970{
1971 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1972 struct phy_device *phydev = net->phydev;
55d7de9d 1973 int ret;
55d7de9d 1974
55d7de9d
WH
1975 ret = usb_autopm_get_interface(dev->intf);
1976 if (ret < 0)
1977 return ret;
1978
5514174f 1979 phy_ethtool_ksettings_get(phydev, cmd);
55d7de9d 1980
55d7de9d
WH
1981 usb_autopm_put_interface(dev->intf);
1982
1983 return ret;
1984}
1985
6e76510e
PR
1986static int lan78xx_set_link_ksettings(struct net_device *net,
1987 const struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1988{
1989 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1990 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1991 int ret = 0;
1992 int temp;
1993
55d7de9d
WH
1994 ret = usb_autopm_get_interface(dev->intf);
1995 if (ret < 0)
1996 return ret;
1997
55d7de9d 1998 /* change speed & duplex */
6e76510e 1999 ret = phy_ethtool_ksettings_set(phydev, cmd);
55d7de9d 2000
6e76510e 2001 if (!cmd->base.autoneg) {
55d7de9d 2002 /* force link down */
ce85e13a
WH
2003 temp = phy_read(phydev, MII_BMCR);
2004 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
55d7de9d 2005 mdelay(1);
ce85e13a 2006 phy_write(phydev, MII_BMCR, temp);
55d7de9d
WH
2007 }
2008
2009 usb_autopm_put_interface(dev->intf);
2010
2011 return ret;
2012}
2013
349e0c5e
WH
2014static void lan78xx_get_pause(struct net_device *net,
2015 struct ethtool_pauseparam *pause)
2016{
2017 struct lan78xx_net *dev = netdev_priv(net);
2018 struct phy_device *phydev = net->phydev;
6e76510e 2019 struct ethtool_link_ksettings ecmd;
349e0c5e 2020
6e76510e 2021 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e
WH
2022
2023 pause->autoneg = dev->fc_autoneg;
2024
2025 if (dev->fc_request_control & FLOW_CTRL_TX)
2026 pause->tx_pause = 1;
2027
2028 if (dev->fc_request_control & FLOW_CTRL_RX)
2029 pause->rx_pause = 1;
2030}
2031
2032static int lan78xx_set_pause(struct net_device *net,
2033 struct ethtool_pauseparam *pause)
2034{
2035 struct lan78xx_net *dev = netdev_priv(net);
2036 struct phy_device *phydev = net->phydev;
6e76510e 2037 struct ethtool_link_ksettings ecmd;
349e0c5e
WH
2038 int ret;
2039
6e76510e 2040 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e 2041
6e76510e 2042 if (pause->autoneg && !ecmd.base.autoneg) {
349e0c5e
WH
2043 ret = -EINVAL;
2044 goto exit;
2045 }
2046
2047 dev->fc_request_control = 0;
2048 if (pause->rx_pause)
2049 dev->fc_request_control |= FLOW_CTRL_RX;
2050
2051 if (pause->tx_pause)
2052 dev->fc_request_control |= FLOW_CTRL_TX;
2053
6e76510e 2054 if (ecmd.base.autoneg) {
3c1bcc86 2055 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
349e0c5e 2056 u32 mii_adv;
6e76510e 2057
3c1bcc86
AL
2058 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2059 ecmd.link_modes.advertising);
2060 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2061 ecmd.link_modes.advertising);
349e0c5e 2062 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
3c1bcc86
AL
2063 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2064 linkmode_or(ecmd.link_modes.advertising, fc,
2065 ecmd.link_modes.advertising);
6e76510e
PR
2066
2067 phy_ethtool_ksettings_set(phydev, &ecmd);
349e0c5e
WH
2068 }
2069
2070 dev->fc_autoneg = pause->autoneg;
2071
2072 ret = 0;
2073exit:
2074 return ret;
2075}
2076
49621865
RC
2077static int lan78xx_get_regs_len(struct net_device *netdev)
2078{
d09de7eb 2079 return sizeof(lan78xx_regs);
49621865
RC
2080}
2081
2082static void
2083lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2084 void *buf)
2085{
49621865 2086 struct lan78xx_net *dev = netdev_priv(netdev);
30c63aba
OR
2087 unsigned int data_count = 0;
2088 u32 *data = buf;
d09de7eb 2089 int i, ret;
49621865
RC
2090
2091 /* Read Device/MAC registers */
30c63aba
OR
2092 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
2093 ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
2094 if (ret < 0) {
2095 netdev_warn(dev->net,
2096 "failed to read register 0x%08x\n",
2097 lan78xx_regs[i]);
2098 goto clean_data;
2099 }
2100
2101 data_count++;
2102 }
49621865 2103
30c63aba
OR
2104 return;
2105
2106clean_data:
2107 memset(data, 0, data_count * sizeof(u32));
49621865
RC
2108}
2109
55d7de9d
WH
2110static const struct ethtool_ops lan78xx_ethtool_ops = {
2111 .get_link = lan78xx_get_link,
860ce4b4 2112 .nway_reset = phy_ethtool_nway_reset,
55d7de9d
WH
2113 .get_drvinfo = lan78xx_get_drvinfo,
2114 .get_msglevel = lan78xx_get_msglevel,
2115 .set_msglevel = lan78xx_set_msglevel,
55d7de9d
WH
2116 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
2117 .get_eeprom = lan78xx_ethtool_get_eeprom,
2118 .set_eeprom = lan78xx_ethtool_set_eeprom,
2119 .get_ethtool_stats = lan78xx_get_stats,
2120 .get_sset_count = lan78xx_get_sset_count,
2121 .get_strings = lan78xx_get_strings,
2122 .get_wol = lan78xx_get_wol,
2123 .set_wol = lan78xx_set_wol,
33e6b167 2124 .get_ts_info = ethtool_op_get_ts_info,
55d7de9d
WH
2125 .get_eee = lan78xx_get_eee,
2126 .set_eee = lan78xx_set_eee,
349e0c5e
WH
2127 .get_pauseparam = lan78xx_get_pause,
2128 .set_pauseparam = lan78xx_set_pause,
6e76510e
PR
2129 .get_link_ksettings = lan78xx_get_link_ksettings,
2130 .set_link_ksettings = lan78xx_set_link_ksettings,
49621865
RC
2131 .get_regs_len = lan78xx_get_regs_len,
2132 .get_regs = lan78xx_get_regs,
55d7de9d
WH
2133};
2134
6f311358 2135static int lan78xx_init_mac_address(struct lan78xx_net *dev)
55d7de9d
WH
2136{
2137 u32 addr_lo, addr_hi;
55d7de9d 2138 u8 addr[6];
6f311358
OR
2139 int ret;
2140
2141 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
2142 if (ret < 0)
2143 return ret;
55d7de9d 2144
6f311358
OR
2145 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
2146 if (ret < 0)
2147 return ret;
55d7de9d
WH
2148
2149 addr[0] = addr_lo & 0xFF;
2150 addr[1] = (addr_lo >> 8) & 0xFF;
2151 addr[2] = (addr_lo >> 16) & 0xFF;
2152 addr[3] = (addr_lo >> 24) & 0xFF;
2153 addr[4] = addr_hi & 0xFF;
2154 addr[5] = (addr_hi >> 8) & 0xFF;
2155
2156 if (!is_valid_ether_addr(addr)) {
760db29b
PE
2157 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
2158 /* valid address present in Device Tree */
2159 netif_dbg(dev, ifup, dev->net,
2160 "MAC address read from Device Tree");
2161 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
2162 ETH_ALEN, addr) == 0) ||
2163 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
2164 ETH_ALEN, addr) == 0)) &&
2165 is_valid_ether_addr(addr)) {
2166 /* eeprom values are valid so use them */
2167 netif_dbg(dev, ifup, dev->net,
2168 "MAC address read from EEPROM");
55d7de9d
WH
2169 } else {
2170 /* generate random MAC */
6c1f0a1f 2171 eth_random_addr(addr);
55d7de9d
WH
2172 netif_dbg(dev, ifup, dev->net,
2173 "MAC address set to random addr");
2174 }
760db29b
PE
2175
2176 addr_lo = addr[0] | (addr[1] << 8) |
2177 (addr[2] << 16) | (addr[3] << 24);
2178 addr_hi = addr[4] | (addr[5] << 8);
2179
6f311358
OR
2180 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2181 if (ret < 0)
2182 return ret;
2183
2184 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2185 if (ret < 0)
2186 return ret;
55d7de9d
WH
2187 }
2188
6f311358
OR
2189 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2190 if (ret < 0)
2191 return ret;
2192
2193 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2194 if (ret < 0)
2195 return ret;
55d7de9d 2196
af804e6d 2197 eth_hw_addr_set(dev->net, addr);
6f311358
OR
2198
2199 return 0;
55d7de9d
WH
2200}
2201
ce85e13a
WH
2202/* MDIO read and write wrappers for phylib */
2203static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
2204{
2205 struct lan78xx_net *dev = bus->priv;
2206 u32 val, addr;
2207 int ret;
2208
2209 ret = usb_autopm_get_interface(dev->intf);
2210 if (ret < 0)
2211 return ret;
2212
3a59437e 2213 mutex_lock(&dev->mdiobus_mutex);
ce85e13a
WH
2214
2215 /* confirm MII not busy */
530f17e6 2216 ret = lan78xx_mdiobus_wait_not_busy(dev);
ce85e13a
WH
2217 if (ret < 0)
2218 goto done;
2219
2220 /* set the address, index & direction (read from PHY) */
2221 addr = mii_access(phy_id, idx, MII_READ);
2222 ret = lan78xx_write_reg(dev, MII_ACC, addr);
32ee0dc7
OR
2223 if (ret < 0)
2224 goto done;
ce85e13a 2225
530f17e6 2226 ret = lan78xx_mdiobus_wait_not_busy(dev);
ce85e13a
WH
2227 if (ret < 0)
2228 goto done;
2229
2230 ret = lan78xx_read_reg(dev, MII_DATA, &val);
32ee0dc7
OR
2231 if (ret < 0)
2232 goto done;
ce85e13a
WH
2233
2234 ret = (int)(val & 0xFFFF);
2235
2236done:
3a59437e 2237 mutex_unlock(&dev->mdiobus_mutex);
ce85e13a 2238 usb_autopm_put_interface(dev->intf);
02dc1f3d 2239
ce85e13a
WH
2240 return ret;
2241}
2242
2243static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2244 u16 regval)
2245{
2246 struct lan78xx_net *dev = bus->priv;
2247 u32 val, addr;
2248 int ret;
2249
2250 ret = usb_autopm_get_interface(dev->intf);
2251 if (ret < 0)
2252 return ret;
2253
3a59437e 2254 mutex_lock(&dev->mdiobus_mutex);
ce85e13a
WH
2255
2256 /* confirm MII not busy */
530f17e6 2257 ret = lan78xx_mdiobus_wait_not_busy(dev);
ce85e13a
WH
2258 if (ret < 0)
2259 goto done;
2260
2261 val = (u32)regval;
2262 ret = lan78xx_write_reg(dev, MII_DATA, val);
32ee0dc7
OR
2263 if (ret < 0)
2264 goto done;
ce85e13a
WH
2265
2266 /* set the address, index & direction (write to PHY) */
2267 addr = mii_access(phy_id, idx, MII_WRITE);
2268 ret = lan78xx_write_reg(dev, MII_ACC, addr);
32ee0dc7
OR
2269 if (ret < 0)
2270 goto done;
ce85e13a 2271
530f17e6 2272 ret = lan78xx_mdiobus_wait_not_busy(dev);
ce85e13a
WH
2273 if (ret < 0)
2274 goto done;
2275
2276done:
3a59437e 2277 mutex_unlock(&dev->mdiobus_mutex);
ce85e13a 2278 usb_autopm_put_interface(dev->intf);
32ee0dc7 2279 return ret;
ce85e13a
WH
2280}
2281
2282static int lan78xx_mdio_init(struct lan78xx_net *dev)
55d7de9d 2283{
1827b067 2284 struct device_node *node;
ce85e13a 2285 int ret;
ce85e13a
WH
2286
2287 dev->mdiobus = mdiobus_alloc();
2288 if (!dev->mdiobus) {
2289 netdev_err(dev->net, "can't allocate MDIO bus\n");
2290 return -ENOMEM;
2291 }
2292
2293 dev->mdiobus->priv = (void *)dev;
2294 dev->mdiobus->read = lan78xx_mdiobus_read;
2295 dev->mdiobus->write = lan78xx_mdiobus_write;
2296 dev->mdiobus->name = "lan78xx-mdiobus";
20032b63 2297 dev->mdiobus->parent = &dev->udev->dev;
ce85e13a
WH
2298
2299 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2300 dev->udev->bus->busnum, dev->udev->devnum);
2301
87177ba6
WH
2302 switch (dev->chipid) {
2303 case ID_REV_CHIP_ID_7800_:
2304 case ID_REV_CHIP_ID_7850_:
ce85e13a
WH
2305 /* set to internal PHY id */
2306 dev->mdiobus->phy_mask = ~(1 << 1);
2307 break;
02dc1f3d
WH
2308 case ID_REV_CHIP_ID_7801_:
2309 /* scan thru PHYAD[2..0] */
2310 dev->mdiobus->phy_mask = ~(0xFF);
2311 break;
ce85e13a
WH
2312 }
2313
1827b067 2314 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
00e798c7 2315 ret = of_mdiobus_register(dev->mdiobus, node);
764ea371 2316 of_node_put(node);
ce85e13a
WH
2317 if (ret) {
2318 netdev_err(dev->net, "can't register MDIO bus\n");
e7f4dc35 2319 goto exit1;
ce85e13a
WH
2320 }
2321
2322 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2323 return 0;
ce85e13a
WH
2324exit1:
2325 mdiobus_free(dev->mdiobus);
2326 return ret;
2327}
2328
2329static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2330{
2331 mdiobus_unregister(dev->mdiobus);
ce85e13a
WH
2332 mdiobus_free(dev->mdiobus);
2333}
2334
2335static void lan78xx_link_status_change(struct net_device *net)
2336{
a00bbd15 2337 struct lan78xx_net *dev = netdev_priv(net);
14437e3f 2338 struct phy_device *phydev = net->phydev;
a00bbd15
AL
2339 u32 data;
2340 int ret;
2341
2342 ret = lan78xx_read_reg(dev, MAC_CR, &data);
2343 if (ret < 0)
2344 return;
2345
2346 if (phydev->enable_tx_lpi)
2347 data |= MAC_CR_EEE_EN_;
2348 else
2349 data &= ~MAC_CR_EEE_EN_;
2350 lan78xx_write_reg(dev, MAC_CR, data);
14437e3f 2351
e57cf363 2352 phy_print_status(phydev);
55d7de9d
WH
2353}
2354
cc89c323
WH
2355static int irq_map(struct irq_domain *d, unsigned int irq,
2356 irq_hw_number_t hwirq)
2357{
2358 struct irq_domain_data *data = d->host_data;
2359
2360 irq_set_chip_data(irq, data);
2361 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2362 irq_set_noprobe(irq);
2363
2364 return 0;
2365}
2366
2367static void irq_unmap(struct irq_domain *d, unsigned int irq)
2368{
2369 irq_set_chip_and_handler(irq, NULL, NULL);
2370 irq_set_chip_data(irq, NULL);
2371}
2372
2373static const struct irq_domain_ops chip_domain_ops = {
2374 .map = irq_map,
2375 .unmap = irq_unmap,
2376};
2377
2378static void lan78xx_irq_mask(struct irq_data *irqd)
2379{
2380 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2381
2382 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2383}
2384
2385static void lan78xx_irq_unmask(struct irq_data *irqd)
2386{
2387 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2388
2389 data->irqenable |= BIT(irqd_to_hwirq(irqd));
2390}
2391
2392static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2393{
2394 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2395
2396 mutex_lock(&data->irq_lock);
2397}
2398
2399static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2400{
2401 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2402 struct lan78xx_net *dev =
2403 container_of(data, struct lan78xx_net, domain_data);
2404 u32 buf;
0da202e6 2405 int ret;
cc89c323
WH
2406
2407 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
2408 * are only two callbacks executed in non-atomic contex.
2409 */
0da202e6
OR
2410 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2411 if (ret < 0)
2412 goto irq_bus_sync_unlock;
2413
cc89c323 2414 if (buf != data->irqenable)
0da202e6
OR
2415 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2416
2417irq_bus_sync_unlock:
2418 if (ret < 0)
2419 netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2420 ERR_PTR(ret));
cc89c323
WH
2421
2422 mutex_unlock(&data->irq_lock);
2423}
2424
2425static struct irq_chip lan78xx_irqchip = {
2426 .name = "lan78xx-irqs",
2427 .irq_mask = lan78xx_irq_mask,
2428 .irq_unmask = lan78xx_irq_unmask,
2429 .irq_bus_lock = lan78xx_irq_bus_lock,
2430 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
2431};
2432
2433static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2434{
cc89c323
WH
2435 struct irq_domain *irqdomain;
2436 unsigned int irqmap = 0;
2437 u32 buf;
2438 int ret = 0;
2439
cc89c323
WH
2440 mutex_init(&dev->domain_data.irq_lock);
2441
d354d008
OR
2442 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2443 if (ret < 0)
2444 return ret;
2445
cc89c323
WH
2446 dev->domain_data.irqenable = buf;
2447
2448 dev->domain_data.irqchip = &lan78xx_irqchip;
2449 dev->domain_data.irq_handler = handle_simple_irq;
2450
e0c27a82
JSS
2451 irqdomain = irq_domain_create_simple(of_fwnode_handle(dev->udev->dev.parent->of_node),
2452 MAX_INT_EP, 0,
2453 &chip_domain_ops,
2454 &dev->domain_data);
cc89c323
WH
2455 if (irqdomain) {
2456 /* create mapping for PHY interrupt */
2457 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2458 if (!irqmap) {
2459 irq_domain_remove(irqdomain);
2460
2461 irqdomain = NULL;
2462 ret = -EINVAL;
2463 }
2464 } else {
2465 ret = -EINVAL;
2466 }
2467
2468 dev->domain_data.irqdomain = irqdomain;
2469 dev->domain_data.phyirq = irqmap;
2470
2471 return ret;
2472}
2473
2474static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2475{
2476 if (dev->domain_data.phyirq > 0) {
2477 irq_dispose_mapping(dev->domain_data.phyirq);
2478
2479 if (dev->domain_data.irqdomain)
2480 irq_domain_remove(dev->domain_data.irqdomain);
2481 }
2482 dev->domain_data.phyirq = 0;
2483 dev->domain_data.irqdomain = NULL;
2484}
2485
d746e074
OR
2486/**
2487 * lan78xx_configure_usb - Configure USB link power settings
2488 * @dev: pointer to the LAN78xx device structure
2489 * @speed: negotiated Ethernet link speed (in Mbps)
2490 *
2491 * This function configures U1/U2 link power management for SuperSpeed
2492 * USB devices based on the current Ethernet link speed. It uses the
2493 * USB_CFG1 register to enable or disable U1 and U2 low-power states.
2494 *
2495 * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
2496 * LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
2497 *
2498 * Return: 0 on success or a negative error code on failure.
2499 */
2500static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
55d7de9d 2501{
d746e074
OR
2502 u32 mask, val;
2503 int ret;
2504
2505 /* Only configure USB settings for SuperSpeed devices */
2506 if (dev->udev->speed != USB_SPEED_SUPER)
2507 return 0;
2508
2509 /* LAN7850 does not support USB 3.x */
2510 if (dev->chipid == ID_REV_CHIP_ID_7850_) {
2511 netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
2512 return 0;
2513 }
2514
2515 switch (speed) {
2516 case SPEED_1000:
2517 /* Disable U2, enable U1 */
2518 ret = lan78xx_update_reg(dev, USB_CFG1,
2519 USB_CFG1_DEV_U2_INIT_EN_, 0);
2520 if (ret < 0)
2521 return ret;
2522
2523 return lan78xx_update_reg(dev, USB_CFG1,
2524 USB_CFG1_DEV_U1_INIT_EN_,
2525 USB_CFG1_DEV_U1_INIT_EN_);
2526
2527 case SPEED_100:
2528 case SPEED_10:
2529 /* Enable both U1 and U2 */
2530 mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
2531 val = mask;
2532 return lan78xx_update_reg(dev, USB_CFG1, mask, val);
2533
2534 default:
2535 netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
2536 return -EINVAL;
2537 }
2538}
2539
ef6a29e8
OR
2540/**
2541 * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
2542 * @dev: pointer to the LAN78xx device structure
2543 * @tx_pause: enable transmission of pause frames
2544 * @rx_pause: enable reception of pause frames
2545 *
2546 * This function configures the LAN78xx flow control settings by writing
2547 * to the FLOW and FCT_FLOW registers. The pause time is set to the
2548 * maximum allowed value (65535 quanta). FIFO thresholds are selected
2549 * based on USB speed.
2550 *
2551 * The Pause Time field is measured in units of 512-bit times (quanta):
2552 * - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
2553 * - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
2554 * - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
2555 *
2556 * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
2557 * - RXUSED is the number of bytes used in the RX FIFO
2558 * - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
2559 * - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
2560 * - Both thresholds are encoded in units of 512 bytes (rounded up)
2561 *
2562 * Thresholds differ by USB speed because available USB bandwidth
2563 * affects how fast packets can be drained from the RX FIFO:
2564 * - USB 3.x (SuperSpeed):
2565 * FLOW_ON = 9216 bytes → 18 units
2566 * FLOW_OFF = 4096 bytes → 8 units
2567 * - USB 2.0 (High-Speed):
2568 * FLOW_ON = 8704 bytes → 17 units
2569 * FLOW_OFF = 1024 bytes → 2 units
2570 *
2571 * Note: The FCT_FLOW register must be configured before enabling TX pause
2572 * (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
2573 *
2574 * Return: 0 on success or a negative error code on failure.
2575 */
2576static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
2577 bool tx_pause, bool rx_pause)
2578{
2579 /* Use maximum pause time: 65535 quanta (512-bit times) */
2580 const u32 pause_time_quanta = 65535;
2581 u32 fct_flow = 0;
2582 u32 flow = 0;
ce85e13a 2583 int ret;
ef6a29e8
OR
2584
2585 /* Prepare MAC flow control bits */
2586 if (tx_pause)
2587 flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
2588
2589 if (rx_pause)
2590 flow |= FLOW_CR_RX_FCEN_;
2591
2592 /* Select RX FIFO thresholds based on USB speed
2593 *
2594 * FCT_FLOW layout:
2595 * bits [6:0] FLOW_ON threshold (RXUSED ≥ ON → assert pause)
2596 * bits [14:8] FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
2597 * thresholds are expressed in units of 512 bytes
2598 */
2599 switch (dev->udev->speed) {
2600 case USB_SPEED_SUPER:
2601 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
2602 break;
2603 case USB_SPEED_HIGH:
2604 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
2605 break;
2606 default:
2607 netdev_warn(dev->net, "Unsupported USB speed: %d\n",
2608 dev->udev->speed);
2609 return -EINVAL;
2610 }
2611
2612 /* Step 1: Write FIFO thresholds before enabling pause frames */
2613 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
2614 if (ret < 0)
2615 return ret;
2616
2617 /* Step 2: Enable MAC pause functionality */
2618 return lan78xx_write_reg(dev, FLOW, flow);
2619}
2620
d39f339d
OR
2621/**
2622 * lan78xx_register_fixed_phy() - Register a fallback fixed PHY
2623 * @dev: LAN78xx device
2624 *
2625 * Registers a fixed PHY with 1 Gbps full duplex. This is used in special cases
2626 * like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface to a
2627 * switch without a visible PHY.
2628 *
2629 * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
2630 */
2631static struct phy_device *lan78xx_register_fixed_phy(struct lan78xx_net *dev)
55d7de9d 2632{
89b36fb5
RC
2633 struct fixed_phy_status fphy_status = {
2634 .link = 1,
2635 .speed = SPEED_1000,
2636 .duplex = DUPLEX_FULL,
2637 };
d39f339d
OR
2638
2639 netdev_info(dev->net,
2640 "No PHY found on LAN7801 – registering fixed PHY (e.g. EVB-KSZ9897-1)\n");
2641
d23b4af5 2642 return fixed_phy_register(&fphy_status, NULL);
d39f339d
OR
2643}
2644
2645/**
2646 * lan78xx_get_phy() - Probe or register PHY device and set interface mode
2647 * @dev: LAN78xx device structure
2648 *
2649 * This function attempts to find a PHY on the MDIO bus. If no PHY is found
2650 * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
2651 * sets dev->interface based on chip ID and detected PHY type.
2652 *
2653 * Return: a valid PHY device pointer, or ERR_PTR() on failure.
2654 */
2655static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
2656{
3b51cc75 2657 struct phy_device *phydev;
55d7de9d 2658
d39f339d 2659 /* Attempt to locate a PHY on the MDIO bus */
ce85e13a 2660 phydev = phy_find_first(dev->mdiobus);
d39f339d
OR
2661
2662 switch (dev->chipid) {
2663 case ID_REV_CHIP_ID_7801_:
2664 if (phydev) {
2665 /* External RGMII PHY detected */
2666 dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2667 phydev->is_internal = false;
2668
2669 if (!phydev->drv)
2670 netdev_warn(dev->net,
2671 "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
2672
2673 return phydev;
89b36fb5 2674 }
d39f339d 2675
89b36fb5 2676 dev->interface = PHY_INTERFACE_MODE_RGMII;
d39f339d
OR
2677 /* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
2678 return lan78xx_register_fixed_phy(dev);
2679
2680 case ID_REV_CHIP_ID_7800_:
2681 case ID_REV_CHIP_ID_7850_:
2682 if (!phydev)
2683 return ERR_PTR(-ENODEV);
2684
2685 /* These use internal GMII-connected PHY */
2686 dev->interface = PHY_INTERFACE_MODE_GMII;
2687 phydev->is_internal = true;
2688 return phydev;
2689
2690 default:
2691 netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
2692 return ERR_PTR(-ENODEV);
2693 }
2694}
2695
2696/**
2697 * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
2698 * @dev: LAN78xx device
2699 *
2700 * Configure MAC-side registers according to dev->interface, which should be
2701 * set by lan78xx_get_phy().
2702 *
2703 * - For PHY_INTERFACE_MODE_RGMII:
2704 * Enable MAC-side TXC delay. This mode seems to be used in a special setup
2705 * without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
2706 * connected to the KSZ9897 switch, and the link timing is expected to be
2707 * hardwired (e.g. via strapping or board layout). No devicetree support is
2708 * assumed here.
2709 *
2710 * - For PHY_INTERFACE_MODE_RGMII_ID:
2711 * Disable MAC-side delay and rely on the PHY driver to provide delay.
2712 *
2713 * - For GMII, no MAC-specific config is needed.
2714 *
2715 * Return: 0 on success or a negative error code.
2716 */
2717static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
2718{
2719 int ret;
2720
2721 switch (dev->interface) {
2722 case PHY_INTERFACE_MODE_RGMII:
2723 /* Enable MAC-side TX clock delay */
89b36fb5
RC
2724 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2725 MAC_RGMII_ID_TXC_DELAY_EN_);
232aa459 2726 if (ret < 0)
d39f339d 2727 return ret;
232aa459 2728
89b36fb5 2729 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
232aa459 2730 if (ret < 0)
d39f339d 2731 return ret;
232aa459 2732
d39f339d
OR
2733 ret = lan78xx_update_reg(dev, HW_CFG,
2734 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
232aa459
OR
2735 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
2736 if (ret < 0)
d39f339d
OR
2737 return ret;
2738
2739 break;
2740
2741 case PHY_INTERFACE_MODE_RGMII_ID:
2742 /* Disable MAC-side TXC delay, PHY provides it */
232aa459
OR
2743 ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2744 if (ret < 0)
d39f339d
OR
2745 return ret;
2746
2747 break;
2748
2749 case PHY_INTERFACE_MODE_GMII:
2750 /* No MAC-specific configuration required */
2751 break;
02dc1f3d 2752
d39f339d
OR
2753 default:
2754 netdev_warn(dev->net, "Unsupported interface mode: %d\n",
2755 dev->interface);
2756 break;
89b36fb5 2757 }
232aa459 2758
d39f339d 2759 return 0;
89b36fb5
RC
2760}
2761
8ba1f33c
OR
2762/**
2763 * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
2764 * @dev: LAN78xx device
2765 * @phydev: PHY device (must be valid)
2766 *
2767 * Reads "microchip,led-modes" property from the PHY's DT node and enables
2768 * the corresponding number of LEDs by writing to HW_CFG.
2769 *
2770 * This helper preserves the original logic, enabling up to 4 LEDs.
2771 * If the property is not present, this function does nothing.
2772 *
2773 * Return: 0 on success or a negative error code.
2774 */
2775static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
2776 struct phy_device *phydev)
2777{
2778 struct device_node *np = phydev->mdio.dev.of_node;
2779 u32 reg;
2780 int len, ret;
2781
2782 if (!np)
2783 return 0;
2784
2785 len = of_property_count_elems_of_size(np, "microchip,led-modes",
2786 sizeof(u32));
2787 if (len < 0)
2788 return 0;
2789
2790 ret = lan78xx_read_reg(dev, HW_CFG, &reg);
2791 if (ret < 0)
2792 return ret;
2793
2794 reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
2795 HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
2796
2797 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2798 (len > 1) * HW_CFG_LED1_EN_ |
2799 (len > 2) * HW_CFG_LED2_EN_ |
2800 (len > 3) * HW_CFG_LED3_EN_;
2801
2802 return lan78xx_write_reg(dev, HW_CFG, reg);
89b36fb5
RC
2803}
2804
2805static int lan78xx_phy_init(struct lan78xx_net *dev)
2806{
3c1bcc86 2807 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
89b36fb5
RC
2808 int ret;
2809 u32 mii_adv;
2810 struct phy_device *phydev;
2811
d39f339d
OR
2812 phydev = lan78xx_get_phy(dev);
2813 if (IS_ERR(phydev))
2814 return PTR_ERR(phydev);
89b36fb5 2815
d39f339d
OR
2816 ret = lan78xx_mac_prepare_for_phy(dev);
2817 if (ret < 0)
2818 goto free_phy;
02dc1f3d 2819
cc89c323
WH
2820 /* if phyirq is not set, use polling mode in phylib */
2821 if (dev->domain_data.phyirq > 0)
2822 phydev->irq = dev->domain_data.phyirq;
2823 else
817b6531 2824 phydev->irq = PHY_POLL;
cc89c323 2825 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
e4953910 2826
f6e3ef3e
WH
2827 /* set to AUTOMDIX */
2828 phydev->mdix = ETH_TP_MDI_AUTO;
2829
ce85e13a
WH
2830 ret = phy_connect_direct(dev->net, phydev,
2831 lan78xx_link_status_change,
02dc1f3d 2832 dev->interface);
ce85e13a
WH
2833 if (ret) {
2834 netdev_err(dev->net, "can't attach PHY to %s\n",
2835 dev->mdiobus->id);
89b36fb5
RC
2836 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2837 if (phy_is_pseudo_fixed_link(phydev)) {
2838 fixed_phy_unregister(phydev);
ae7370e6 2839 phy_device_free(phydev);
89b36fb5
RC
2840 }
2841 }
ce85e13a
WH
2842 return -EIO;
2843 }
55d7de9d 2844
ce85e13a 2845 /* MAC doesn't support 1000T Half */
41124fa6 2846 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
e270b2db 2847
349e0c5e
WH
2848 /* support both flow controls */
2849 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
3c1bcc86
AL
2850 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2851 phydev->advertising);
2852 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2853 phydev->advertising);
349e0c5e 2854 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
3c1bcc86
AL
2855 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2856 linkmode_or(phydev->advertising, fc, phydev->advertising);
349e0c5e 2857
a00bbd15
AL
2858 phy_support_eee(phydev);
2859
8ba1f33c
OR
2860 ret = lan78xx_configure_leds_from_dt(dev, phydev);
2861 if (ret)
2862 goto free_phy;
1827b067 2863
ce85e13a
WH
2864 genphy_config_aneg(phydev);
2865
349e0c5e
WH
2866 dev->fc_autoneg = phydev->autoneg;
2867
55d7de9d 2868 return 0;
d39f339d
OR
2869
2870free_phy:
2871 if (phy_is_pseudo_fixed_link(phydev)) {
2872 fixed_phy_unregister(phydev);
2873 phy_device_free(phydev);
2874 }
2875
2876 return ret;
55d7de9d
WH
2877}
2878
2879static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2880{
55d7de9d 2881 bool rxenabled;
65520a70
OR
2882 u32 buf;
2883 int ret;
55d7de9d 2884
65520a70
OR
2885 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2886 if (ret < 0)
2887 return ret;
55d7de9d
WH
2888
2889 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2890
2891 if (rxenabled) {
2892 buf &= ~MAC_RX_RXEN_;
65520a70
OR
2893 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2894 if (ret < 0)
2895 return ret;
55d7de9d
WH
2896 }
2897
2898 /* add 4 to size for FCS */
2899 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2900 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2901
65520a70
OR
2902 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2903 if (ret < 0)
2904 return ret;
55d7de9d
WH
2905
2906 if (rxenabled) {
2907 buf |= MAC_RX_RXEN_;
65520a70
OR
2908 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2909 if (ret < 0)
2910 return ret;
55d7de9d
WH
2911 }
2912
2913 return 0;
2914}
2915
2916static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2917{
2918 struct sk_buff *skb;
2919 unsigned long flags;
2920 int count = 0;
2921
2922 spin_lock_irqsave(&q->lock, flags);
2923 while (!skb_queue_empty(q)) {
2924 struct skb_data *entry;
2925 struct urb *urb;
2926 int ret;
2927
2928 skb_queue_walk(q, skb) {
2929 entry = (struct skb_data *)skb->cb;
2930 if (entry->state != unlink_start)
2931 goto found;
2932 }
2933 break;
2934found:
2935 entry->state = unlink_start;
2936 urb = entry->urb;
2937
2938 /* Get reference count of the URB to avoid it to be
2939 * freed during usb_unlink_urb, which may trigger
2940 * use-after-free problem inside usb_unlink_urb since
2941 * usb_unlink_urb is always racing with .complete
2942 * handler(include defer_bh).
2943 */
2944 usb_get_urb(urb);
2945 spin_unlock_irqrestore(&q->lock, flags);
2946 /* during some PM-driven resume scenarios,
2947 * these (async) unlinks complete immediately
2948 */
2949 ret = usb_unlink_urb(urb);
2950 if (ret != -EINPROGRESS && ret != 0)
2951 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2952 else
2953 count++;
2954 usb_put_urb(urb);
2955 spin_lock_irqsave(&q->lock, flags);
2956 }
2957 spin_unlock_irqrestore(&q->lock, flags);
2958 return count;
2959}
2960
2961static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2962{
2963 struct lan78xx_net *dev = netdev_priv(netdev);
0dd87266 2964 int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
5f4cc6e2 2965 int ret;
55d7de9d 2966
55d7de9d 2967 /* no second zero-length packet read wanted after mtu-sized packets */
0dd87266 2968 if ((max_frame_len % dev->maxpacket) == 0)
55d7de9d
WH
2969 return -EDOM;
2970
5f4cc6e2
JE
2971 ret = usb_autopm_get_interface(dev->intf);
2972 if (ret < 0)
2973 return ret;
2974
0dd87266 2975 ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
65520a70
OR
2976 if (ret < 0)
2977 netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2978 new_mtu, netdev->mtu, ERR_PTR(ret));
2979 else
1eb2cded 2980 WRITE_ONCE(netdev->mtu, new_mtu);
55d7de9d 2981
5f4cc6e2
JE
2982 usb_autopm_put_interface(dev->intf);
2983
0dd87266 2984 return ret;
55d7de9d
WH
2985}
2986
e0c79ff6 2987static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
55d7de9d
WH
2988{
2989 struct lan78xx_net *dev = netdev_priv(netdev);
2990 struct sockaddr *addr = p;
2991 u32 addr_lo, addr_hi;
9a46956c 2992 int ret;
55d7de9d
WH
2993
2994 if (netif_running(netdev))
2995 return -EBUSY;
2996
2997 if (!is_valid_ether_addr(addr->sa_data))
2998 return -EADDRNOTAVAIL;
2999
af804e6d 3000 eth_hw_addr_set(netdev, addr->sa_data);
55d7de9d
WH
3001
3002 addr_lo = netdev->dev_addr[0] |
3003 netdev->dev_addr[1] << 8 |
3004 netdev->dev_addr[2] << 16 |
3005 netdev->dev_addr[3] << 24;
3006 addr_hi = netdev->dev_addr[4] |
3007 netdev->dev_addr[5] << 8;
3008
9a46956c
OR
3009 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
3010 if (ret < 0)
3011 return ret;
3012
3013 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
3014 if (ret < 0)
3015 return ret;
55d7de9d 3016
15515aaa 3017 /* Added to support MAC address changes */
9a46956c
OR
3018 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
3019 if (ret < 0)
3020 return ret;
15515aaa 3021
9a46956c 3022 return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
55d7de9d
WH
3023}
3024
3025/* Enable or disable Rx checksum offload engine */
3026static int lan78xx_set_features(struct net_device *netdev,
3027 netdev_features_t features)
3028{
3029 struct lan78xx_net *dev = netdev_priv(netdev);
3030 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3031 unsigned long flags;
55d7de9d
WH
3032
3033 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
3034
3035 if (features & NETIF_F_RXCSUM) {
3036 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
3037 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
3038 } else {
3039 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
3040 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
3041 }
3042
3043 if (features & NETIF_F_HW_VLAN_CTAG_RX)
ec21ecf0
DS
3044 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
3045 else
3046 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
3047
4a27327b 3048 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
55d7de9d
WH
3049 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
3050 else
3051 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
3052
3053 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
3054
bf361b18 3055 return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
55d7de9d
WH
3056}
3057
3058static void lan78xx_deferred_vlan_write(struct work_struct *param)
3059{
3060 struct lan78xx_priv *pdata =
3061 container_of(param, struct lan78xx_priv, set_vlan);
3062 struct lan78xx_net *dev = pdata->dev;
3063
3064 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
3065 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
3066}
3067
3068static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
3069 __be16 proto, u16 vid)
3070{
3071 struct lan78xx_net *dev = netdev_priv(netdev);
3072 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3073 u16 vid_bit_index;
3074 u16 vid_dword_index;
3075
3076 vid_dword_index = (vid >> 5) & 0x7F;
3077 vid_bit_index = vid & 0x1F;
3078
3079 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
3080
3081 /* defer register writes to a sleepable context */
3082 schedule_work(&pdata->set_vlan);
3083
3084 return 0;
3085}
3086
3087static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
3088 __be16 proto, u16 vid)
3089{
3090 struct lan78xx_net *dev = netdev_priv(netdev);
3091 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3092 u16 vid_bit_index;
3093 u16 vid_dword_index;
3094
3095 vid_dword_index = (vid >> 5) & 0x7F;
3096 vid_bit_index = vid & 0x1F;
3097
3098 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
3099
3100 /* defer register writes to a sleepable context */
3101 schedule_work(&pdata->set_vlan);
3102
3103 return 0;
3104}
3105
77586156 3106static int lan78xx_init_ltm(struct lan78xx_net *dev)
55d7de9d 3107{
77586156 3108 u32 regs[6] = { 0 };
55d7de9d
WH
3109 int ret;
3110 u32 buf;
55d7de9d
WH
3111
3112 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
77586156
OR
3113 if (ret < 0)
3114 goto init_ltm_failed;
3115
55d7de9d
WH
3116 if (buf & USB_CFG1_LTM_ENABLE_) {
3117 u8 temp[2];
3118 /* Get values from EEPROM first */
3119 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
3120 if (temp[0] == 24) {
3121 ret = lan78xx_read_raw_eeprom(dev,
3122 temp[1] * 2,
3123 24,
3124 (u8 *)regs);
3125 if (ret < 0)
77586156 3126 return ret;
55d7de9d
WH
3127 }
3128 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
3129 if (temp[0] == 24) {
3130 ret = lan78xx_read_raw_otp(dev,
3131 temp[1] * 2,
3132 24,
3133 (u8 *)regs);
3134 if (ret < 0)
77586156 3135 return ret;
55d7de9d
WH
3136 }
3137 }
3138 }
3139
77586156
OR
3140 ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
3141 if (ret < 0)
3142 goto init_ltm_failed;
3143
3144 ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
3145 if (ret < 0)
3146 goto init_ltm_failed;
3147
3148 ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
3149 if (ret < 0)
3150 goto init_ltm_failed;
3151
3152 ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
3153 if (ret < 0)
3154 goto init_ltm_failed;
3155
3156 ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
3157 if (ret < 0)
3158 goto init_ltm_failed;
3159
3160 ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
3161 if (ret < 0)
3162 goto init_ltm_failed;
3163
3164 return 0;
3165
3166init_ltm_failed:
3167 netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
3168 return ret;
55d7de9d
WH
3169}
3170
d383216a
JE
3171static int lan78xx_urb_config_init(struct lan78xx_net *dev)
3172{
3173 int result = 0;
3174
3175 switch (dev->udev->speed) {
3176 case USB_SPEED_SUPER:
c450a8eb 3177 dev->rx_urb_size = RX_SS_URB_SIZE;
d383216a 3178 dev->tx_urb_size = TX_SS_URB_SIZE;
c450a8eb 3179 dev->n_rx_urbs = RX_SS_URB_NUM;
d383216a 3180 dev->n_tx_urbs = TX_SS_URB_NUM;
c450a8eb
JE
3181 dev->bulk_in_delay = SS_BULK_IN_DELAY;
3182 dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
d383216a
JE
3183 break;
3184 case USB_SPEED_HIGH:
c450a8eb 3185 dev->rx_urb_size = RX_HS_URB_SIZE;
d383216a 3186 dev->tx_urb_size = TX_HS_URB_SIZE;
c450a8eb 3187 dev->n_rx_urbs = RX_HS_URB_NUM;
d383216a 3188 dev->n_tx_urbs = TX_HS_URB_NUM;
c450a8eb
JE
3189 dev->bulk_in_delay = HS_BULK_IN_DELAY;
3190 dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
d383216a
JE
3191 break;
3192 case USB_SPEED_FULL:
c450a8eb 3193 dev->rx_urb_size = RX_FS_URB_SIZE;
d383216a 3194 dev->tx_urb_size = TX_FS_URB_SIZE;
c450a8eb 3195 dev->n_rx_urbs = RX_FS_URB_NUM;
d383216a 3196 dev->n_tx_urbs = TX_FS_URB_NUM;
c450a8eb
JE
3197 dev->bulk_in_delay = FS_BULK_IN_DELAY;
3198 dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
d383216a
JE
3199 break;
3200 default:
3201 netdev_warn(dev->net, "USB bus speed not supported\n");
3202 result = -EIO;
3203 break;
3204 }
3205
3206 return result;
3207}
3208
55d7de9d
WH
3209static int lan78xx_reset(struct lan78xx_net *dev)
3210{
3211 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
55d7de9d 3212 unsigned long timeout;
3415f6ba
JE
3213 int ret;
3214 u32 buf;
e69647a1 3215 u8 sig;
55d7de9d
WH
3216
3217 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3415f6ba
JE
3218 if (ret < 0)
3219 return ret;
3220
55d7de9d 3221 buf |= HW_CFG_LRST_;
3415f6ba 3222
55d7de9d 3223 ret = lan78xx_write_reg(dev, HW_CFG, buf);
3415f6ba
JE
3224 if (ret < 0)
3225 return ret;
55d7de9d
WH
3226
3227 timeout = jiffies + HZ;
3228 do {
3229 mdelay(1);
3230 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3415f6ba
JE
3231 if (ret < 0)
3232 return ret;
3233
55d7de9d
WH
3234 if (time_after(jiffies, timeout)) {
3235 netdev_warn(dev->net,
3236 "timeout on completion of LiteReset");
3415f6ba
JE
3237 ret = -ETIMEDOUT;
3238 return ret;
55d7de9d
WH
3239 }
3240 } while (buf & HW_CFG_LRST_);
3241
6f311358
OR
3242 ret = lan78xx_init_mac_address(dev);
3243 if (ret < 0)
3244 return ret;
55d7de9d 3245
ce85e13a
WH
3246 /* save DEVID for later usage */
3247 ret = lan78xx_read_reg(dev, ID_REV, &buf);
3415f6ba
JE
3248 if (ret < 0)
3249 return ret;
3250
87177ba6
WH
3251 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3252 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
ce85e13a 3253
55d7de9d
WH
3254 /* Respond to the IN token with a NAK */
3255 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3415f6ba
JE
3256 if (ret < 0)
3257 return ret;
3258
55d7de9d 3259 buf |= USB_CFG_BIR_;
3415f6ba 3260
55d7de9d 3261 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3415f6ba
JE
3262 if (ret < 0)
3263 return ret;
55d7de9d
WH
3264
3265 /* Init LTM */
77586156
OR
3266 ret = lan78xx_init_ltm(dev);
3267 if (ret < 0)
3268 return ret;
55d7de9d 3269
c450a8eb 3270 ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3415f6ba
JE
3271 if (ret < 0)
3272 return ret;
3273
c450a8eb 3274 ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3415f6ba
JE
3275 if (ret < 0)
3276 return ret;
55d7de9d
WH
3277
3278 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3415f6ba
JE
3279 if (ret < 0)
3280 return ret;
3281
55d7de9d 3282 buf |= HW_CFG_MEF_;
5160b129
R
3283 buf |= HW_CFG_CLK125_EN_;
3284 buf |= HW_CFG_REFCLK25_EN_;
3415f6ba 3285
55d7de9d 3286 ret = lan78xx_write_reg(dev, HW_CFG, buf);
3415f6ba
JE
3287 if (ret < 0)
3288 return ret;
55d7de9d
WH
3289
3290 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3415f6ba
JE
3291 if (ret < 0)
3292 return ret;
3293
55d7de9d 3294 buf |= USB_CFG_BCE_;
3415f6ba 3295
55d7de9d 3296 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3415f6ba
JE
3297 if (ret < 0)
3298 return ret;
55d7de9d
WH
3299
3300 /* set FIFO sizes */
3301 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3415f6ba 3302
55d7de9d 3303 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3415f6ba
JE
3304 if (ret < 0)
3305 return ret;
55d7de9d
WH
3306
3307 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3415f6ba 3308
55d7de9d 3309 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3415f6ba
JE
3310 if (ret < 0)
3311 return ret;
55d7de9d
WH
3312
3313 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3415f6ba
JE
3314 if (ret < 0)
3315 return ret;
3316
55d7de9d 3317 ret = lan78xx_write_reg(dev, FLOW, 0);
3415f6ba
JE
3318 if (ret < 0)
3319 return ret;
3320
55d7de9d 3321 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3415f6ba
JE
3322 if (ret < 0)
3323 return ret;
55d7de9d
WH
3324
3325 /* Don't need rfe_ctl_lock during initialisation */
3326 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3415f6ba
JE
3327 if (ret < 0)
3328 return ret;
3329
55d7de9d 3330 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3415f6ba 3331
55d7de9d 3332 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3415f6ba
JE
3333 if (ret < 0)
3334 return ret;
55d7de9d
WH
3335
3336 /* Enable or disable checksum offload engines */
3415f6ba
JE
3337 ret = lan78xx_set_features(dev->net, dev->net->features);
3338 if (ret < 0)
3339 return ret;
55d7de9d
WH
3340
3341 lan78xx_set_multicast(dev->net);
3342
3343 /* reset PHY */
3344 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3415f6ba
JE
3345 if (ret < 0)
3346 return ret;
3347
55d7de9d 3348 buf |= PMT_CTL_PHY_RST_;
3415f6ba 3349
55d7de9d 3350 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3415f6ba
JE
3351 if (ret < 0)
3352 return ret;
55d7de9d
WH
3353
3354 timeout = jiffies + HZ;
3355 do {
3356 mdelay(1);
3357 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3415f6ba
JE
3358 if (ret < 0)
3359 return ret;
3360
55d7de9d
WH
3361 if (time_after(jiffies, timeout)) {
3362 netdev_warn(dev->net, "timeout waiting for PHY Reset");
3415f6ba
JE
3363 ret = -ETIMEDOUT;
3364 return ret;
55d7de9d 3365 }
6c595b03 3366 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
55d7de9d 3367
55d7de9d 3368 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3415f6ba
JE
3369 if (ret < 0)
3370 return ret;
3371
02dc1f3d 3372 /* LAN7801 only has RGMII mode */
799f532d 3373 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
02dc1f3d 3374 buf &= ~MAC_CR_GMII_EN_;
799f532d
R
3375 /* Enable Auto Duplex and Auto speed */
3376 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3377 }
e69647a1 3378
0e67899a
OR
3379 if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
3380 dev->chipid == ID_REV_CHIP_ID_7850_) {
e69647a1
RC
3381 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3382 if (!ret && sig != EEPROM_INDICATOR) {
3383 /* Implies there is no external eeprom. Set mac speed */
3384 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3385 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3386 }
3387 }
55d7de9d 3388 ret = lan78xx_write_reg(dev, MAC_CR, buf);
3415f6ba
JE
3389 if (ret < 0)
3390 return ret;
55d7de9d 3391
2259b7a6 3392 ret = lan78xx_set_rx_max_frame_length(dev,
0dd87266 3393 RX_MAX_FRAME_LEN(dev->net->mtu));
55d7de9d 3394
e1210fe6 3395 return ret;
55d7de9d
WH
3396}
3397
20ff5565
WH
3398static void lan78xx_init_stats(struct lan78xx_net *dev)
3399{
3400 u32 *p;
3401 int i;
3402
3403 /* initialize for stats update
3404 * some counters are 20bits and some are 32bits
3405 */
3406 p = (u32 *)&dev->stats.rollover_max;
3407 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3408 p[i] = 0xFFFFF;
3409
3410 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3411 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3412 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3413 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3414 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3415 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3416 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3417 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3418 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3419 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3420
fed56079 3421 set_bit(EVENT_STAT_UPDATE, &dev->flags);
20ff5565
WH
3422}
3423
55d7de9d
WH
3424static int lan78xx_open(struct net_device *net)
3425{
3426 struct lan78xx_net *dev = netdev_priv(net);
3427 int ret;
3428
5f4cc6e2
JE
3429 netif_dbg(dev, ifup, dev->net, "open device");
3430
55d7de9d
WH
3431 ret = usb_autopm_get_interface(dev->intf);
3432 if (ret < 0)
3415f6ba 3433 return ret;
55d7de9d 3434
5f4cc6e2
JE
3435 mutex_lock(&dev->dev_mutex);
3436
92571a1a
AG
3437 phy_start(net->phydev);
3438
3439 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
ce85e13a 3440
55d7de9d
WH
3441 /* for Link Check */
3442 if (dev->urb_intr) {
3443 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3444 if (ret < 0) {
3445 netif_err(dev, ifup, dev->net,
3446 "intr submit %d\n", ret);
3447 goto done;
3448 }
3449 }
3450
5f4cc6e2
JE
3451 ret = lan78xx_flush_rx_fifo(dev);
3452 if (ret < 0)
3453 goto done;
3454 ret = lan78xx_flush_tx_fifo(dev);
3455 if (ret < 0)
3456 goto done;
3457
3458 ret = lan78xx_start_tx_path(dev);
3459 if (ret < 0)
3460 goto done;
3461 ret = lan78xx_start_rx_path(dev);
3462 if (ret < 0)
3463 goto done;
3464
20ff5565
WH
3465 lan78xx_init_stats(dev);
3466
55d7de9d
WH
3467 set_bit(EVENT_DEV_OPEN, &dev->flags);
3468
3469 netif_start_queue(net);
3470
3471 dev->link_on = false;
3472
ec4c7e12
JE
3473 napi_enable(&dev->napi);
3474
55d7de9d
WH
3475 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3476done:
5f4cc6e2
JE
3477 mutex_unlock(&dev->dev_mutex);
3478
1eecc7ab
OR
3479 if (ret < 0)
3480 usb_autopm_put_interface(dev->intf);
55d7de9d 3481
55d7de9d
WH
3482 return ret;
3483}
3484
3485static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3486{
3487 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3488 DECLARE_WAITQUEUE(wait, current);
3489 int temp;
3490
3491 /* ensure there are no more active urbs */
3492 add_wait_queue(&unlink_wakeup, &wait);
3493 set_current_state(TASK_UNINTERRUPTIBLE);
3494 dev->wait = &unlink_wakeup;
3495 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3496
3497 /* maybe wait for deletions to finish. */
5f4cc6e2
JE
3498 while (!skb_queue_empty(&dev->rxq) ||
3499 !skb_queue_empty(&dev->txq)) {
55d7de9d
WH
3500 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3501 set_current_state(TASK_UNINTERRUPTIBLE);
3502 netif_dbg(dev, ifdown, dev->net,
5f4cc6e2 3503 "waited for %d urb completions", temp);
55d7de9d
WH
3504 }
3505 set_current_state(TASK_RUNNING);
3506 dev->wait = NULL;
3507 remove_wait_queue(&unlink_wakeup, &wait);
5f4cc6e2 3508
ec4c7e12 3509 /* empty Rx done, Rx overflow and Tx pend queues
c450a8eb
JE
3510 */
3511 while (!skb_queue_empty(&dev->rxq_done)) {
3512 struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
5f4cc6e2 3513
c450a8eb 3514 lan78xx_release_rx_buf(dev, skb);
5f4cc6e2 3515 }
d383216a 3516
ec4c7e12 3517 skb_queue_purge(&dev->rxq_overflow);
d383216a 3518 skb_queue_purge(&dev->txq_pend);
55d7de9d
WH
3519}
3520
e0c79ff6 3521static int lan78xx_stop(struct net_device *net)
55d7de9d 3522{
51ceac9f 3523 struct lan78xx_net *dev = netdev_priv(net);
55d7de9d 3524
5f4cc6e2
JE
3525 netif_dbg(dev, ifup, dev->net, "stop device");
3526
3527 mutex_lock(&dev->dev_mutex);
3528
20ff5565 3529 if (timer_pending(&dev->stat_monitor))
8fa7292f 3530 timer_delete_sync(&dev->stat_monitor);
20ff5565 3531
55d7de9d
WH
3532 clear_bit(EVENT_DEV_OPEN, &dev->flags);
3533 netif_stop_queue(net);
ec4c7e12 3534 napi_disable(&dev->napi);
5f4cc6e2
JE
3535
3536 lan78xx_terminate_urbs(dev);
55d7de9d
WH
3537
3538 netif_info(dev, ifdown, dev->net,
3539 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3540 net->stats.rx_packets, net->stats.tx_packets,
3541 net->stats.rx_errors, net->stats.tx_errors);
3542
5f4cc6e2
JE
3543 /* ignore errors that occur stopping the Tx and Rx data paths */
3544 lan78xx_stop_tx_path(dev);
3545 lan78xx_stop_rx_path(dev);
3546
3547 if (net->phydev)
3548 phy_stop(net->phydev);
55d7de9d
WH
3549
3550 usb_kill_urb(dev->urb_intr);
3551
55d7de9d
WH
3552 /* deferred work (task, timer, softirq) must also stop.
3553 * can't flush_scheduled_work() until we drop rtnl (later),
3554 * else workers could deadlock; so make workers a NOP.
3555 */
5f4cc6e2
JE
3556 clear_bit(EVENT_TX_HALT, &dev->flags);
3557 clear_bit(EVENT_RX_HALT, &dev->flags);
3558 clear_bit(EVENT_LINK_RESET, &dev->flags);
3559 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3560
55d7de9d 3561 cancel_delayed_work_sync(&dev->wq);
55d7de9d
WH
3562
3563 usb_autopm_put_interface(dev->intf);
3564
5f4cc6e2
JE
3565 mutex_unlock(&dev->dev_mutex);
3566
55d7de9d
WH
3567 return 0;
3568}
3569
55d7de9d
WH
3570static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3571 struct sk_buff_head *list, enum skb_state state)
3572{
3573 unsigned long flags;
3574 enum skb_state old_state;
3575 struct skb_data *entry = (struct skb_data *)skb->cb;
3576
3577 spin_lock_irqsave(&list->lock, flags);
3578 old_state = entry->state;
3579 entry->state = state;
55d7de9d
WH
3580
3581 __skb_unlink(skb, list);
3582 spin_unlock(&list->lock);
c450a8eb 3583 spin_lock(&dev->rxq_done.lock);
55d7de9d 3584
c450a8eb
JE
3585 __skb_queue_tail(&dev->rxq_done, skb);
3586 if (skb_queue_len(&dev->rxq_done) == 1)
ec4c7e12
JE
3587 napi_schedule(&dev->napi);
3588
c450a8eb 3589 spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
55d7de9d
WH
3590
3591 return old_state;
3592}
3593
3594static void tx_complete(struct urb *urb)
3595{
3596 struct sk_buff *skb = (struct sk_buff *)urb->context;
3597 struct skb_data *entry = (struct skb_data *)skb->cb;
3598 struct lan78xx_net *dev = entry->dev;
3599
3600 if (urb->status == 0) {
74d79a2e 3601 dev->net->stats.tx_packets += entry->num_of_packet;
55d7de9d
WH
3602 dev->net->stats.tx_bytes += entry->length;
3603 } else {
d383216a 3604 dev->net->stats.tx_errors += entry->num_of_packet;
55d7de9d
WH
3605
3606 switch (urb->status) {
3607 case -EPIPE:
3608 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3609 break;
3610
3611 /* software-driven interface shutdown */
3612 case -ECONNRESET:
3613 case -ESHUTDOWN:
77dfff5b
JE
3614 netif_dbg(dev, tx_err, dev->net,
3615 "tx err interface gone %d\n",
3616 entry->urb->status);
55d7de9d
WH
3617 break;
3618
3619 case -EPROTO:
3620 case -ETIME:
3621 case -EILSEQ:
3622 netif_stop_queue(dev->net);
77dfff5b
JE
3623 netif_dbg(dev, tx_err, dev->net,
3624 "tx err queue stopped %d\n",
3625 entry->urb->status);
55d7de9d
WH
3626 break;
3627 default:
3628 netif_dbg(dev, tx_err, dev->net,
77dfff5b
JE
3629 "unknown tx err %d\n",
3630 entry->urb->status);
55d7de9d
WH
3631 break;
3632 }
3633 }
3634
3635 usb_autopm_put_interface_async(dev->intf);
3636
d383216a
JE
3637 skb_unlink(skb, &dev->txq);
3638
3639 lan78xx_release_tx_buf(dev, skb);
3640
ec4c7e12 3641 /* Re-schedule NAPI if Tx data pending but no URBs in progress.
d383216a
JE
3642 */
3643 if (skb_queue_empty(&dev->txq) &&
3644 !skb_queue_empty(&dev->txq_pend))
ec4c7e12 3645 napi_schedule(&dev->napi);
55d7de9d
WH
3646}
3647
3648static void lan78xx_queue_skb(struct sk_buff_head *list,
3649 struct sk_buff *newsk, enum skb_state state)
3650{
3651 struct skb_data *entry = (struct skb_data *)newsk->cb;
3652
3653 __skb_queue_tail(list, newsk);
3654 entry->state = state;
3655}
3656
d383216a
JE
3657static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3658{
3659 return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3660}
3661
3662static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3663{
3664 return dev->tx_pend_data_len;
3665}
3666
3667static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3668 struct sk_buff *skb,
3669 unsigned int *tx_pend_data_len)
3670{
3671 unsigned long flags;
3672
3673 spin_lock_irqsave(&dev->txq_pend.lock, flags);
3674
3675 __skb_queue_tail(&dev->txq_pend, skb);
3676
3677 dev->tx_pend_data_len += skb->len;
3678 *tx_pend_data_len = dev->tx_pend_data_len;
3679
3680 spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3681}
3682
3683static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3684 struct sk_buff *skb,
3685 unsigned int *tx_pend_data_len)
3686{
3687 unsigned long flags;
3688
3689 spin_lock_irqsave(&dev->txq_pend.lock, flags);
3690
3691 __skb_queue_head(&dev->txq_pend, skb);
3692
3693 dev->tx_pend_data_len += skb->len;
3694 *tx_pend_data_len = dev->tx_pend_data_len;
3695
3696 spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3697}
3698
3699static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3700 struct sk_buff **skb,
3701 unsigned int *tx_pend_data_len)
3702{
3703 unsigned long flags;
3704
3705 spin_lock_irqsave(&dev->txq_pend.lock, flags);
3706
3707 *skb = __skb_dequeue(&dev->txq_pend);
3708 if (*skb)
3709 dev->tx_pend_data_len -= (*skb)->len;
3710 *tx_pend_data_len = dev->tx_pend_data_len;
3711
3712 spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3713}
3714
e0c79ff6
BX
3715static netdev_tx_t
3716lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
55d7de9d
WH
3717{
3718 struct lan78xx_net *dev = netdev_priv(net);
d383216a 3719 unsigned int tx_pend_data_len;
55d7de9d 3720
5f4cc6e2
JE
3721 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3722 schedule_delayed_work(&dev->wq, 0);
3723
d383216a 3724 skb_tx_timestamp(skb);
55d7de9d 3725
d383216a 3726 lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
55d7de9d 3727
d383216a 3728 /* Set up a Tx URB if none is in progress */
55d7de9d 3729
d383216a 3730 if (skb_queue_empty(&dev->txq))
ec4c7e12 3731 napi_schedule(&dev->napi);
d383216a
JE
3732
3733 /* Stop stack Tx queue if we have enough data to fill
3734 * all the free Tx URBs.
3735 */
3736 if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3737 netif_stop_queue(net);
3738
3739 netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3740 tx_pend_data_len, lan78xx_tx_urb_space(dev));
3741
3742 /* Kick off transmission of pending data */
3743
3744 if (!skb_queue_empty(&dev->txq_free))
ec4c7e12 3745 napi_schedule(&dev->napi);
d383216a 3746 }
55d7de9d
WH
3747
3748 return NETDEV_TX_OK;
3749}
3750
55d7de9d
WH
3751static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3752{
3753 struct lan78xx_priv *pdata = NULL;
3754 int ret;
3755 int i;
3756
55d7de9d
WH
3757 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3758
3759 pdata = (struct lan78xx_priv *)(dev->data[0]);
3760 if (!pdata) {
3761 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3762 return -ENOMEM;
3763 }
3764
3765 pdata->dev = dev;
3766
3767 spin_lock_init(&pdata->rfe_ctl_lock);
3768 mutex_init(&pdata->dataport_mutex);
3769
3770 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3771
3772 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3773 pdata->vlan_table[i] = 0;
3774
3775 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3776
3777 dev->net->features = 0;
3778
3779 if (DEFAULT_TX_CSUM_ENABLE)
3780 dev->net->features |= NETIF_F_HW_CSUM;
3781
3782 if (DEFAULT_RX_CSUM_ENABLE)
3783 dev->net->features |= NETIF_F_RXCSUM;
3784
3785 if (DEFAULT_TSO_CSUM_ENABLE)
3786 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3787
ec21ecf0
DS
3788 if (DEFAULT_VLAN_RX_OFFLOAD)
3789 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3790
4a27327b
DS
3791 if (DEFAULT_VLAN_FILTER_ENABLE)
3792 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3793
55d7de9d
WH
3794 dev->net->hw_features = dev->net->features;
3795
cc89c323
WH
3796 ret = lan78xx_setup_irq_domain(dev);
3797 if (ret < 0) {
3798 netdev_warn(dev->net,
3799 "lan78xx_setup_irq_domain() failed : %d", ret);
2d2d99ec 3800 goto out1;
cc89c323
WH
3801 }
3802
55d7de9d
WH
3803 /* Init all registers */
3804 ret = lan78xx_reset(dev);
2d2d99ec
RC
3805 if (ret) {
3806 netdev_warn(dev->net, "Registers INIT FAILED....");
3807 goto out2;
3808 }
55d7de9d 3809
fb52c3b5 3810 ret = lan78xx_mdio_init(dev);
2d2d99ec
RC
3811 if (ret) {
3812 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3813 goto out2;
3814 }
ce85e13a 3815
55d7de9d
WH
3816 dev->net->flags |= IFF_MULTICAST;
3817
3818 pdata->wol = WAKE_MAGIC;
3819
fb52c3b5 3820 return ret;
2d2d99ec
RC
3821
3822out2:
3823 lan78xx_remove_irq_domain(dev);
3824
3825out1:
3826 netdev_warn(dev->net, "Bind routine FAILED");
3827 cancel_work_sync(&pdata->set_multicast);
3828 cancel_work_sync(&pdata->set_vlan);
3829 kfree(pdata);
3830 return ret;
55d7de9d
WH
3831}
3832
3833static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3834{
3835 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3836
cc89c323
WH
3837 lan78xx_remove_irq_domain(dev);
3838
ce85e13a
WH
3839 lan78xx_remove_mdio(dev);
3840
55d7de9d 3841 if (pdata) {
2d2d99ec
RC
3842 cancel_work_sync(&pdata->set_multicast);
3843 cancel_work_sync(&pdata->set_vlan);
55d7de9d
WH
3844 netif_dbg(dev, ifdown, dev->net, "free pdata");
3845 kfree(pdata);
3846 pdata = NULL;
3847 dev->data[0] = 0;
3848 }
3849}
3850
3851static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3852 struct sk_buff *skb,
3853 u32 rx_cmd_a, u32 rx_cmd_b)
3854{
9343ac87
DS
3855 /* HW Checksum offload appears to be flawed if used when not stripping
3856 * VLAN headers. Drop back to S/W checksums under these conditions.
3857 */
55d7de9d 3858 if (!(dev->net->features & NETIF_F_RXCSUM) ||
9343ac87
DS
3859 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3860 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3861 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
55d7de9d
WH
3862 skb->ip_summed = CHECKSUM_NONE;
3863 } else {
3864 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3865 skb->ip_summed = CHECKSUM_COMPLETE;
3866 }
3867}
3868
ec21ecf0
DS
3869static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3870 struct sk_buff *skb,
3871 u32 rx_cmd_a, u32 rx_cmd_b)
3872{
3873 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3874 (rx_cmd_a & RX_CMD_A_FVTG_))
3875 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3876 (rx_cmd_b & 0xffff));
3877}
3878
e0c79ff6 3879static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
55d7de9d 3880{
55d7de9d
WH
3881 dev->net->stats.rx_packets++;
3882 dev->net->stats.rx_bytes += skb->len;
3883
74d79a2e
WH
3884 skb->protocol = eth_type_trans(skb, dev->net);
3885
55d7de9d
WH
3886 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3887 skb->len + sizeof(struct ethhdr), skb->protocol);
3888 memset(skb->cb, 0, sizeof(struct skb_data));
3889
3890 if (skb_defer_rx_timestamp(skb))
3891 return;
3892
ec4c7e12 3893 napi_gro_receive(&dev->napi, skb);
55d7de9d
WH
3894}
3895
ec4c7e12
JE
3896static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3897 int budget, int *work_done)
55d7de9d 3898{
0dd87266 3899 if (skb->len < RX_SKB_MIN_LEN)
55d7de9d
WH
3900 return 0;
3901
ec4c7e12
JE
3902 /* Extract frames from the URB buffer and pass each one to
3903 * the stack in a new NAPI SKB.
3904 */
55d7de9d
WH
3905 while (skb->len > 0) {
3906 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3907 u16 rx_cmd_c;
55d7de9d
WH
3908 unsigned char *packet;
3909
bb448f8a 3910 rx_cmd_a = get_unaligned_le32(skb->data);
55d7de9d
WH
3911 skb_pull(skb, sizeof(rx_cmd_a));
3912
bb448f8a 3913 rx_cmd_b = get_unaligned_le32(skb->data);
55d7de9d
WH
3914 skb_pull(skb, sizeof(rx_cmd_b));
3915
bb448f8a 3916 rx_cmd_c = get_unaligned_le16(skb->data);
55d7de9d
WH
3917 skb_pull(skb, sizeof(rx_cmd_c));
3918
3919 packet = skb->data;
3920
3921 /* get the packet length */
3922 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3923 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3924
7f247f5a
SH
3925 if (unlikely(size > skb->len)) {
3926 netif_dbg(dev, rx_err, dev->net,
3927 "size err rx_cmd_a=0x%08x\n",
3928 rx_cmd_a);
3929 return 0;
3930 }
3931
55d7de9d
WH
3932 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3933 netif_dbg(dev, rx_err, dev->net,
3934 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3935 } else {
7f247f5a 3936 u32 frame_len;
ec4c7e12 3937 struct sk_buff *skb2;
55d7de9d 3938
7f247f5a
SH
3939 if (unlikely(size < ETH_FCS_LEN)) {
3940 netif_dbg(dev, rx_err, dev->net,
3941 "size err rx_cmd_a=0x%08x\n",
3942 rx_cmd_a);
3943 return 0;
3944 }
3945
3946 frame_len = size - ETH_FCS_LEN;
3947
ec4c7e12
JE
3948 skb2 = napi_alloc_skb(&dev->napi, frame_len);
3949 if (!skb2)
55d7de9d 3950 return 0;
55d7de9d 3951
ec4c7e12
JE
3952 memcpy(skb2->data, packet, frame_len);
3953
3954 skb_put(skb2, frame_len);
55d7de9d
WH
3955
3956 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
ec21ecf0 3957 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
55d7de9d 3958
ec4c7e12
JE
3959 /* Processing of the URB buffer must complete once
3960 * it has started. If the NAPI work budget is exhausted
3961 * while frames remain they are added to the overflow
3962 * queue for delivery in the next NAPI polling cycle.
3963 */
3964 if (*work_done < budget) {
3965 lan78xx_skb_return(dev, skb2);
3966 ++(*work_done);
3967 } else {
3968 skb_queue_tail(&dev->rxq_overflow, skb2);
3969 }
55d7de9d
WH
3970 }
3971
3972 skb_pull(skb, size);
3973
ec4c7e12 3974 /* skip padding bytes before the next frame starts */
55d7de9d
WH
3975 if (skb->len)
3976 skb_pull(skb, align_count);
3977 }
3978
55d7de9d
WH
3979 return 1;
3980}
3981
ec4c7e12
JE
3982static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3983 int budget, int *work_done)
55d7de9d 3984{
ec4c7e12
JE
3985 if (!lan78xx_rx(dev, skb, budget, work_done)) {
3986 netif_dbg(dev, rx_err, dev->net, "drop\n");
55d7de9d 3987 dev->net->stats.rx_errors++;
55d7de9d 3988 }
55d7de9d
WH
3989}
3990
55d7de9d
WH
3991static void rx_complete(struct urb *urb)
3992{
3993 struct sk_buff *skb = (struct sk_buff *)urb->context;
3994 struct skb_data *entry = (struct skb_data *)skb->cb;
3995 struct lan78xx_net *dev = entry->dev;
3996 int urb_status = urb->status;
3997 enum skb_state state;
3998
c450a8eb
JE
3999 netif_dbg(dev, rx_status, dev->net,
4000 "rx done: status %d", urb->status);
4001
55d7de9d
WH
4002 skb_put(skb, urb->actual_length);
4003 state = rx_done;
c450a8eb
JE
4004
4005 if (urb != entry->urb)
4006 netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
55d7de9d
WH
4007
4008 switch (urb_status) {
4009 case 0:
0dd87266 4010 if (skb->len < RX_SKB_MIN_LEN) {
55d7de9d
WH
4011 state = rx_cleanup;
4012 dev->net->stats.rx_errors++;
4013 dev->net->stats.rx_length_errors++;
4014 netif_dbg(dev, rx_err, dev->net,
4015 "rx length %d\n", skb->len);
4016 }
4017 usb_mark_last_busy(dev->udev);
4018 break;
4019 case -EPIPE:
4020 dev->net->stats.rx_errors++;
4021 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
df561f66 4022 fallthrough;
55d7de9d
WH
4023 case -ECONNRESET: /* async unlink */
4024 case -ESHUTDOWN: /* hardware gone */
4025 netif_dbg(dev, ifdown, dev->net,
4026 "rx shutdown, code %d\n", urb_status);
4027 state = rx_cleanup;
55d7de9d
WH
4028 break;
4029 case -EPROTO:
4030 case -ETIME:
4031 case -EILSEQ:
4032 dev->net->stats.rx_errors++;
4033 state = rx_cleanup;
55d7de9d
WH
4034 break;
4035
4036 /* data overrun ... flush fifo? */
4037 case -EOVERFLOW:
4038 dev->net->stats.rx_over_errors++;
df561f66 4039 fallthrough;
55d7de9d
WH
4040
4041 default:
4042 state = rx_cleanup;
4043 dev->net->stats.rx_errors++;
4044 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
4045 break;
4046 }
4047
4048 state = defer_bh(dev, skb, &dev->rxq, state);
c450a8eb 4049}
55d7de9d 4050
9d2da721
JE
4051static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
4052{
4053 struct skb_data *entry = (struct skb_data *)skb->cb;
4054 size_t size = dev->rx_urb_size;
4055 struct urb *urb = entry->urb;
4056 unsigned long lockflags;
4057 int ret = 0;
4058
4059 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
4060 skb->data, size, rx_complete, skb);
4061
4062 spin_lock_irqsave(&dev->rxq.lock, lockflags);
4063
4064 if (netif_device_present(dev->net) &&
4065 netif_running(dev->net) &&
4066 !test_bit(EVENT_RX_HALT, &dev->flags) &&
4067 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4068 ret = usb_submit_urb(urb, flags);
4069 switch (ret) {
4070 case 0:
4071 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
4072 break;
4073 case -EPIPE:
4074 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4075 break;
4076 case -ENODEV:
4077 case -ENOENT:
4078 netif_dbg(dev, ifdown, dev->net, "device gone\n");
4079 netif_device_detach(dev->net);
4080 break;
4081 case -EHOSTUNREACH:
4082 ret = -ENOLINK;
ec4c7e12 4083 napi_schedule(&dev->napi);
9d2da721
JE
4084 break;
4085 default:
4086 netif_dbg(dev, rx_err, dev->net,
4087 "rx submit, %d\n", ret);
ec4c7e12 4088 napi_schedule(&dev->napi);
9d2da721
JE
4089 break;
4090 }
4091 } else {
4092 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
4093 ret = -ENOLINK;
4094 }
4095 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
4096
4097 if (ret)
4098 lan78xx_release_rx_buf(dev, skb);
4099
4100 return ret;
4101}
4102
c450a8eb
JE
4103static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
4104{
4105 struct sk_buff *rx_buf;
4106
4107 /* Ensure the maximum number of Rx URBs is submitted
4108 */
4109 while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
4110 if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
4111 break;
55d7de9d 4112 }
c450a8eb
JE
4113}
4114
4115static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
4116 struct sk_buff *rx_buf)
4117{
4118 /* reset SKB data pointers */
4119
4120 rx_buf->data = rx_buf->head;
4121 skb_reset_tail_pointer(rx_buf);
4122 rx_buf->len = 0;
4123 rx_buf->data_len = 0;
4124
4125 rx_submit(dev, rx_buf, GFP_ATOMIC);
55d7de9d
WH
4126}
4127
d383216a 4128static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
55d7de9d 4129{
d383216a
JE
4130 u32 tx_cmd_a;
4131 u32 tx_cmd_b;
55d7de9d 4132
d383216a
JE
4133 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
4134
4135 if (skb->ip_summed == CHECKSUM_PARTIAL)
4136 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
4137
4138 tx_cmd_b = 0;
4139 if (skb_is_gso(skb)) {
4140 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
4141
4142 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
4143
4144 tx_cmd_a |= TX_CMD_A_LSO_;
55d7de9d 4145 }
d383216a
JE
4146
4147 if (skb_vlan_tag_present(skb)) {
4148 tx_cmd_a |= TX_CMD_A_IVTG_;
4149 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
55d7de9d
WH
4150 }
4151
d383216a
JE
4152 put_unaligned_le32(tx_cmd_a, buffer);
4153 put_unaligned_le32(tx_cmd_b, buffer + 4);
4154}
55d7de9d 4155
d383216a
JE
4156static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
4157 struct sk_buff *tx_buf)
4158{
4159 struct skb_data *entry = (struct skb_data *)tx_buf->cb;
4160 int remain = dev->tx_urb_size;
4161 u8 *tx_data = tx_buf->data;
4162 u32 urb_len = 0;
55d7de9d 4163
d383216a
JE
4164 entry->num_of_packet = 0;
4165 entry->length = 0;
4166
4167 /* Work through the pending SKBs and copy the data of each SKB into
4168 * the URB buffer if there room for all the SKB data.
4169 *
4170 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
4171 */
4172 while (remain >= TX_SKB_MIN_LEN) {
4173 unsigned int pending_bytes;
4174 unsigned int align_bytes;
4175 struct sk_buff *skb;
4176 unsigned int len;
4177
4178 lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
4179
4180 if (!skb)
4181 break;
4182
4183 align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
4184 TX_ALIGNMENT;
4185 len = align_bytes + TX_CMD_LEN + skb->len;
4186 if (len > remain) {
4187 lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
4188 break;
4189 }
4190
4191 tx_data += align_bytes;
4192
4193 lan78xx_fill_tx_cmd_words(skb, tx_data);
4194 tx_data += TX_CMD_LEN;
4195
4196 len = skb->len;
4197 if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
4198 struct net_device_stats *stats = &dev->net->stats;
4199
4200 stats->tx_dropped++;
4201 dev_kfree_skb_any(skb);
4202 tx_data -= TX_CMD_LEN;
4203 continue;
4204 }
4205
4206 tx_data += len;
4207 entry->length += len;
4208 entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
4209
4210 dev_kfree_skb_any(skb);
4211
4212 urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
4213
4214 remain = dev->tx_urb_size - urb_len;
55d7de9d
WH
4215 }
4216
d383216a
JE
4217 skb_put(tx_buf, urb_len);
4218
4219 return entry;
4220}
4221
4222static void lan78xx_tx_bh(struct lan78xx_net *dev)
4223{
4224 int ret;
55d7de9d 4225
d383216a
JE
4226 /* Start the stack Tx queue if it was stopped
4227 */
4228 netif_tx_lock(dev->net);
4229 if (netif_queue_stopped(dev->net)) {
4230 if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4231 netif_wake_queue(dev->net);
55d7de9d 4232 }
d383216a
JE
4233 netif_tx_unlock(dev->net);
4234
4235 /* Go through the Tx pending queue and set up URBs to transfer
4236 * the data to the device. Stop if no more pending data or URBs,
4237 * or if an error occurs when a URB is submitted.
4238 */
4239 do {
4240 struct skb_data *entry;
4241 struct sk_buff *tx_buf;
4242 unsigned long flags;
4243
4244 if (skb_queue_empty(&dev->txq_pend))
4245 break;
4246
4247 tx_buf = lan78xx_get_tx_buf(dev);
4248 if (!tx_buf)
4249 break;
4250
4251 entry = lan78xx_tx_buf_fill(dev, tx_buf);
4252
4253 spin_lock_irqsave(&dev->txq.lock, flags);
4254 ret = usb_autopm_get_interface_async(dev->intf);
4255 if (ret < 0) {
4256 spin_unlock_irqrestore(&dev->txq.lock, flags);
4257 goto out;
4258 }
4259
4260 usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4261 tx_buf->data, tx_buf->len, tx_complete,
4262 tx_buf);
4263
4264 if (tx_buf->len % dev->maxpacket == 0) {
4265 /* send USB_ZERO_PACKET */
4266 entry->urb->transfer_flags |= URB_ZERO_PACKET;
4267 }
55d7de9d
WH
4268
4269#ifdef CONFIG_PM
d383216a
JE
4270 /* if device is asleep stop outgoing packet processing */
4271 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4272 usb_anchor_urb(entry->urb, &dev->deferred);
4273 netif_stop_queue(dev->net);
4274 spin_unlock_irqrestore(&dev->txq.lock, flags);
4275 netdev_dbg(dev->net,
4276 "Delaying transmission for resumption\n");
4277 return;
4278 }
55d7de9d 4279#endif
d383216a
JE
4280 ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4281 switch (ret) {
4282 case 0:
4283 netif_trans_update(dev->net);
4284 lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4285 break;
4286 case -EPIPE:
55d7de9d 4287 netif_stop_queue(dev->net);
d383216a
JE
4288 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4289 usb_autopm_put_interface_async(dev->intf);
4290 break;
4291 case -ENODEV:
4292 case -ENOENT:
4293 netif_dbg(dev, tx_err, dev->net,
4294 "tx submit urb err %d (disconnected?)", ret);
4295 netif_device_detach(dev->net);
4296 break;
4297 default:
4298 usb_autopm_put_interface_async(dev->intf);
4299 netif_dbg(dev, tx_err, dev->net,
4300 "tx submit urb err %d\n", ret);
4301 break;
4302 }
55d7de9d 4303
d383216a 4304 spin_unlock_irqrestore(&dev->txq.lock, flags);
55d7de9d 4305
d383216a
JE
4306 if (ret) {
4307 netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4308out:
4309 dev->net->stats.tx_dropped += entry->num_of_packet;
4310 lan78xx_release_tx_buf(dev, tx_buf);
4311 }
4312 } while (ret == 0);
55d7de9d
WH
4313}
4314
ec4c7e12 4315static int lan78xx_bh(struct lan78xx_net *dev, int budget)
55d7de9d 4316{
c450a8eb
JE
4317 struct sk_buff_head done;
4318 struct sk_buff *rx_buf;
55d7de9d 4319 struct skb_data *entry;
c450a8eb 4320 unsigned long flags;
ec4c7e12
JE
4321 int work_done = 0;
4322
4323 /* Pass frames received in the last NAPI cycle before
4324 * working on newly completed URBs.
4325 */
4326 while (!skb_queue_empty(&dev->rxq_overflow)) {
4327 lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4328 ++work_done;
4329 }
c450a8eb
JE
4330
4331 /* Take a snapshot of the done queue and move items to a
4332 * temporary queue. Rx URB completions will continue to add
4333 * to the done queue.
4334 */
4335 __skb_queue_head_init(&done);
4336
4337 spin_lock_irqsave(&dev->rxq_done.lock, flags);
4338 skb_queue_splice_init(&dev->rxq_done, &done);
4339 spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
55d7de9d 4340
c450a8eb
JE
4341 /* Extract receive frames from completed URBs and
4342 * pass them to the stack. Re-submit each completed URB.
4343 */
ec4c7e12
JE
4344 while ((work_done < budget) &&
4345 (rx_buf = __skb_dequeue(&done))) {
c450a8eb 4346 entry = (struct skb_data *)(rx_buf->cb);
55d7de9d
WH
4347 switch (entry->state) {
4348 case rx_done:
ec4c7e12 4349 rx_process(dev, rx_buf, budget, &work_done);
c450a8eb 4350 break;
55d7de9d 4351 case rx_cleanup:
c450a8eb 4352 break;
55d7de9d 4353 default:
ec4c7e12
JE
4354 netdev_dbg(dev->net, "rx buf state %d\n",
4355 entry->state);
c450a8eb 4356 break;
55d7de9d 4357 }
c450a8eb
JE
4358
4359 lan78xx_rx_urb_resubmit(dev, rx_buf);
55d7de9d
WH
4360 }
4361
ec4c7e12
JE
4362 /* If budget was consumed before processing all the URBs put them
4363 * back on the front of the done queue. They will be first to be
4364 * processed in the next NAPI cycle.
4365 */
4366 spin_lock_irqsave(&dev->rxq_done.lock, flags);
4367 skb_queue_splice(&done, &dev->rxq_done);
4368 spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4369
55d7de9d 4370 if (netif_device_present(dev->net) && netif_running(dev->net)) {
20ff5565
WH
4371 /* reset update timer delta */
4372 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4373 dev->delta = 1;
4374 mod_timer(&dev->stat_monitor,
4375 jiffies + STAT_UPDATE_TIMER);
4376 }
4377
ec4c7e12
JE
4378 /* Submit all free Rx URBs */
4379
3bef6b9e 4380 if (!test_bit(EVENT_RX_HALT, &dev->flags))
c450a8eb 4381 lan78xx_rx_urb_submit_all(dev);
d383216a 4382
ec4c7e12
JE
4383 /* Submit new Tx URBs */
4384
d383216a 4385 lan78xx_tx_bh(dev);
ec4c7e12
JE
4386 }
4387
4388 return work_done;
4389}
4390
4391static int lan78xx_poll(struct napi_struct *napi, int budget)
4392{
4393 struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4394 int result = budget;
4395 int work_done;
4396
4397 /* Don't do any work if the device is suspended */
4398
4399 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4400 napi_complete_done(napi, 0);
4401 return 0;
4402 }
4403
4404 /* Process completed URBs and submit new URBs */
4405
4406 work_done = lan78xx_bh(dev, budget);
4407
4408 if (work_done < budget) {
4409 napi_complete_done(napi, work_done);
d383216a 4410
c450a8eb
JE
4411 /* Start a new polling cycle if data was received or
4412 * data is waiting to be transmitted.
4413 */
4414 if (!skb_queue_empty(&dev->rxq_done)) {
ec4c7e12 4415 napi_schedule(napi);
d383216a
JE
4416 } else if (netif_carrier_ok(dev->net)) {
4417 if (skb_queue_empty(&dev->txq) &&
4418 !skb_queue_empty(&dev->txq_pend)) {
ec4c7e12 4419 napi_schedule(napi);
d383216a
JE
4420 } else {
4421 netif_tx_lock(dev->net);
4422 if (netif_queue_stopped(dev->net)) {
4423 netif_wake_queue(dev->net);
ec4c7e12 4424 napi_schedule(napi);
d383216a
JE
4425 }
4426 netif_tx_unlock(dev->net);
4427 }
4428 }
ec4c7e12 4429 result = work_done;
55d7de9d 4430 }
ec4c7e12
JE
4431
4432 return result;
55d7de9d
WH
4433}
4434
4435static void lan78xx_delayedwork(struct work_struct *work)
4436{
4437 int status;
4438 struct lan78xx_net *dev;
4439
4440 dev = container_of(work, struct lan78xx_net, wq.work);
4441
77dfff5b
JE
4442 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4443 return;
4444
5f4cc6e2
JE
4445 if (usb_autopm_get_interface(dev->intf) < 0)
4446 return;
4447
55d7de9d
WH
4448 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4449 unlink_urbs(dev, &dev->txq);
5f4cc6e2 4450
55d7de9d 4451 status = usb_clear_halt(dev->udev, dev->pipe_out);
55d7de9d
WH
4452 if (status < 0 &&
4453 status != -EPIPE &&
4454 status != -ESHUTDOWN) {
4455 if (netif_msg_tx_err(dev))
55d7de9d
WH
4456 netdev_err(dev->net,
4457 "can't clear tx halt, status %d\n",
4458 status);
4459 } else {
4460 clear_bit(EVENT_TX_HALT, &dev->flags);
4461 if (status != -ESHUTDOWN)
4462 netif_wake_queue(dev->net);
4463 }
4464 }
5f4cc6e2 4465
55d7de9d
WH
4466 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4467 unlink_urbs(dev, &dev->rxq);
55d7de9d 4468 status = usb_clear_halt(dev->udev, dev->pipe_in);
55d7de9d
WH
4469 if (status < 0 &&
4470 status != -EPIPE &&
4471 status != -ESHUTDOWN) {
4472 if (netif_msg_rx_err(dev))
55d7de9d
WH
4473 netdev_err(dev->net,
4474 "can't clear rx halt, status %d\n",
4475 status);
4476 } else {
4477 clear_bit(EVENT_RX_HALT, &dev->flags);
ec4c7e12 4478 napi_schedule(&dev->napi);
55d7de9d
WH
4479 }
4480 }
4481
4482 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4483 int ret = 0;
4484
4485 clear_bit(EVENT_LINK_RESET, &dev->flags);
55d7de9d 4486 if (lan78xx_link_reset(dev) < 0) {
55d7de9d
WH
4487 netdev_info(dev->net, "link reset failed (%d)\n",
4488 ret);
55d7de9d
WH
4489 }
4490 }
20ff5565
WH
4491
4492 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4493 lan78xx_update_stats(dev);
4494
4495 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4496
4497 mod_timer(&dev->stat_monitor,
4498 jiffies + (STAT_UPDATE_TIMER * dev->delta));
4499
4500 dev->delta = min((dev->delta * 2), 50);
4501 }
5f4cc6e2
JE
4502
4503 usb_autopm_put_interface(dev->intf);
55d7de9d
WH
4504}
4505
4506static void intr_complete(struct urb *urb)
4507{
4508 struct lan78xx_net *dev = urb->context;
4509 int status = urb->status;
4510
4511 switch (status) {
4512 /* success */
4513 case 0:
4514 lan78xx_status(dev, urb);
4515 break;
4516
4517 /* software-driven interface shutdown */
4518 case -ENOENT: /* urb killed */
77dfff5b 4519 case -ENODEV: /* hardware gone */
55d7de9d
WH
4520 case -ESHUTDOWN: /* hardware gone */
4521 netif_dbg(dev, ifdown, dev->net,
4522 "intr shutdown, code %d\n", status);
4523 return;
4524
4525 /* NOTE: not throttling like RX/TX, since this endpoint
4526 * already polls infrequently
4527 */
4528 default:
4529 netdev_dbg(dev->net, "intr status %d\n", status);
4530 break;
4531 }
4532
77dfff5b
JE
4533 if (!netif_device_present(dev->net) ||
4534 !netif_running(dev->net)) {
4535 netdev_warn(dev->net, "not submitting new status URB");
55d7de9d 4536 return;
77dfff5b 4537 }
55d7de9d
WH
4538
4539 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4540 status = usb_submit_urb(urb, GFP_ATOMIC);
77dfff5b
JE
4541
4542 switch (status) {
4543 case 0:
4544 break;
4545 case -ENODEV:
4546 case -ENOENT:
4547 netif_dbg(dev, timer, dev->net,
4548 "intr resubmit %d (disconnect?)", status);
4549 netif_device_detach(dev->net);
4550 break;
4551 default:
55d7de9d
WH
4552 netif_err(dev, timer, dev->net,
4553 "intr resubmit --> %d\n", status);
77dfff5b
JE
4554 break;
4555 }
55d7de9d
WH
4556}
4557
4558static void lan78xx_disconnect(struct usb_interface *intf)
4559{
51ceac9f
SW
4560 struct lan78xx_net *dev;
4561 struct usb_device *udev;
4562 struct net_device *net;
4563 struct phy_device *phydev;
55d7de9d
WH
4564
4565 dev = usb_get_intfdata(intf);
4566 usb_set_intfdata(intf, NULL);
4567 if (!dev)
4568 return;
4569
4570 udev = interface_to_usbdev(intf);
55d7de9d 4571 net = dev->net;
77dfff5b
JE
4572
4573 unregister_netdev(net);
4574
1e7417c1
DZ
4575 timer_shutdown_sync(&dev->stat_monitor);
4576 set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
77dfff5b
JE
4577 cancel_delayed_work_sync(&dev->wq);
4578
89b36fb5 4579 phydev = net->phydev;
92571a1a 4580
92571a1a
AG
4581 phy_disconnect(net->phydev);
4582
ae7370e6 4583 if (phy_is_pseudo_fixed_link(phydev)) {
89b36fb5 4584 fixed_phy_unregister(phydev);
ae7370e6
OR
4585 phy_device_free(phydev);
4586 }
89b36fb5 4587
55d7de9d
WH
4588 usb_scuttle_anchored_urbs(&dev->deferred);
4589
4590 lan78xx_unbind(dev, intf);
4591
d383216a 4592 lan78xx_free_tx_resources(dev);
c450a8eb 4593 lan78xx_free_rx_resources(dev);
d383216a 4594
55d7de9d
WH
4595 usb_kill_urb(dev->urb_intr);
4596 usb_free_urb(dev->urb_intr);
4597
4598 free_netdev(net);
4599 usb_put_dev(udev);
4600}
4601
0290bd29 4602static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
55d7de9d
WH
4603{
4604 struct lan78xx_net *dev = netdev_priv(net);
4605
4606 unlink_urbs(dev, &dev->txq);
ec4c7e12 4607 napi_schedule(&dev->napi);
55d7de9d
WH
4608}
4609
ce896476
JH
4610static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4611 struct net_device *netdev,
4612 netdev_features_t features)
4613{
d383216a
JE
4614 struct lan78xx_net *dev = netdev_priv(netdev);
4615
4616 if (skb->len > LAN78XX_TSO_SIZE(dev))
ce896476
JH
4617 features &= ~NETIF_F_GSO_MASK;
4618
4619 features = vlan_features_check(skb, features);
4620 features = vxlan_features_check(skb, features);
4621
4622 return features;
4623}
4624
55d7de9d
WH
4625static const struct net_device_ops lan78xx_netdev_ops = {
4626 .ndo_open = lan78xx_open,
4627 .ndo_stop = lan78xx_stop,
4628 .ndo_start_xmit = lan78xx_start_xmit,
4629 .ndo_tx_timeout = lan78xx_tx_timeout,
4630 .ndo_change_mtu = lan78xx_change_mtu,
4631 .ndo_set_mac_address = lan78xx_set_mac_addr,
4632 .ndo_validate_addr = eth_validate_addr,
a7605370 4633 .ndo_eth_ioctl = phy_do_ioctl_running,
55d7de9d
WH
4634 .ndo_set_rx_mode = lan78xx_set_multicast,
4635 .ndo_set_features = lan78xx_set_features,
4636 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
4637 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
ce896476 4638 .ndo_features_check = lan78xx_features_check,
55d7de9d
WH
4639};
4640
d28bb967 4641static void lan78xx_stat_monitor(struct timer_list *t)
20ff5565 4642{
41cb0855 4643 struct lan78xx_net *dev = timer_container_of(dev, t, stat_monitor);
20ff5565
WH
4644
4645 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4646}
4647
55d7de9d
WH
4648static int lan78xx_probe(struct usb_interface *intf,
4649 const struct usb_device_id *id)
4650{
ea060b35 4651 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
55d7de9d
WH
4652 struct lan78xx_net *dev;
4653 struct net_device *netdev;
4654 struct usb_device *udev;
4655 int ret;
9ceec7d3
JE
4656 unsigned int maxp;
4657 unsigned int period;
55d7de9d
WH
4658 u8 *buf = NULL;
4659
4660 udev = interface_to_usbdev(intf);
4661 udev = usb_get_dev(udev);
4662
55d7de9d
WH
4663 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4664 if (!netdev) {
fb52c3b5
NS
4665 dev_err(&intf->dev, "Error: OOM\n");
4666 ret = -ENOMEM;
4667 goto out1;
55d7de9d
WH
4668 }
4669
4670 /* netdev_printk() needs this */
4671 SET_NETDEV_DEV(netdev, &intf->dev);
4672
4673 dev = netdev_priv(netdev);
4674 dev->udev = udev;
4675 dev->intf = intf;
4676 dev->net = netdev;
4677 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4678 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
4679
4680 skb_queue_head_init(&dev->rxq);
4681 skb_queue_head_init(&dev->txq);
c450a8eb 4682 skb_queue_head_init(&dev->rxq_done);
55d7de9d 4683 skb_queue_head_init(&dev->txq_pend);
ec4c7e12 4684 skb_queue_head_init(&dev->rxq_overflow);
3a59437e 4685 mutex_init(&dev->mdiobus_mutex);
5f4cc6e2 4686 mutex_init(&dev->dev_mutex);
55d7de9d 4687
d383216a
JE
4688 ret = lan78xx_urb_config_init(dev);
4689 if (ret < 0)
4690 goto out2;
4691
4692 ret = lan78xx_alloc_tx_resources(dev);
4693 if (ret < 0)
4694 goto out2;
4695
c450a8eb
JE
4696 ret = lan78xx_alloc_rx_resources(dev);
4697 if (ret < 0)
4698 goto out3;
4699
0dd87266
JE
4700 /* MTU range: 68 - 9000 */
4701 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4702
ee8b7a11 4703 netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
d383216a 4704
b48b89f9 4705 netif_napi_add(netdev, &dev->napi, lan78xx_poll);
ec4c7e12 4706
55d7de9d
WH
4707 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4708 init_usb_anchor(&dev->deferred);
4709
4710 netdev->netdev_ops = &lan78xx_netdev_ops;
4711 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4712 netdev->ethtool_ops = &lan78xx_ethtool_ops;
4713
20ff5565 4714 dev->delta = 1;
d28bb967 4715 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
20ff5565
WH
4716
4717 mutex_init(&dev->stats.access_lock);
4718
ea060b35
JH
4719 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4720 ret = -ENODEV;
c450a8eb 4721 goto out4;
ea060b35
JH
4722 }
4723
4724 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4725 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4726 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4727 ret = -ENODEV;
c450a8eb 4728 goto out4;
ea060b35
JH
4729 }
4730
4731 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4732 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4733 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4734 ret = -ENODEV;
c450a8eb 4735 goto out4;
ea060b35
JH
4736 }
4737
4738 ep_intr = &intf->cur_altsetting->endpoint[2];
4739 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4740 ret = -ENODEV;
c450a8eb 4741 goto out4;
ea060b35
JH
4742 }
4743
4744 dev->pipe_intr = usb_rcvintpipe(dev->udev,
4745 usb_endpoint_num(&ep_intr->desc));
4746
55d7de9d
WH
4747 ret = lan78xx_bind(dev, intf);
4748 if (ret < 0)
c450a8eb 4749 goto out4;
55d7de9d 4750
ea060b35 4751 period = ep_intr->desc.bInterval;
e13adbfa 4752 maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
03819abb
OR
4753
4754 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4755 if (!dev->urb_intr) {
a6df95ca 4756 ret = -ENOMEM;
c450a8eb 4757 goto out5;
a6df95ca
JE
4758 }
4759
03819abb
OR
4760 buf = kmalloc(maxp, GFP_KERNEL);
4761 if (!buf) {
a6df95ca 4762 ret = -ENOMEM;
03819abb 4763 goto free_urbs;
55d7de9d
WH
4764 }
4765
03819abb
OR
4766 usb_fill_int_urb(dev->urb_intr, dev->udev,
4767 dev->pipe_intr, buf, maxp,
4768 intr_complete, dev, period);
4769 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4770
e13adbfa 4771 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
55d7de9d 4772
db6c3c06
JH
4773 /* Reject broken descriptors. */
4774 if (dev->maxpacket == 0) {
4775 ret = -ENODEV;
03819abb 4776 goto free_urbs;
db6c3c06
JH
4777 }
4778
55d7de9d
WH
4779 /* driver requires remote-wakeup capability during autosuspend. */
4780 intf->needs_remote_wakeup = 1;
4781
38b4fe32
AL
4782 ret = lan78xx_phy_init(dev);
4783 if (ret < 0)
03819abb 4784 goto free_urbs;
38b4fe32 4785
55d7de9d
WH
4786 ret = register_netdev(netdev);
4787 if (ret != 0) {
4788 netif_err(dev, probe, netdev, "couldn't register the device\n");
c450a8eb 4789 goto out8;
55d7de9d
WH
4790 }
4791
4792 usb_set_intfdata(intf, dev);
4793
4794 ret = device_set_wakeup_enable(&udev->dev, true);
4795
4796 /* Default delay of 2sec has more overhead than advantage.
4797 * Set to 10sec as default.
4798 */
4799 pm_runtime_set_autosuspend_delay(&udev->dev,
4800 DEFAULT_AUTOSUSPEND_DELAY);
4801
4802 return 0;
4803
c450a8eb 4804out8:
38b4fe32 4805 phy_disconnect(netdev->phydev);
03819abb 4806free_urbs:
b9cbf8a6 4807 usb_free_urb(dev->urb_intr);
c450a8eb 4808out5:
55d7de9d 4809 lan78xx_unbind(dev, intf);
c450a8eb 4810out4:
ec4c7e12 4811 netif_napi_del(&dev->napi);
c450a8eb 4812 lan78xx_free_rx_resources(dev);
d383216a
JE
4813out3:
4814 lan78xx_free_tx_resources(dev);
55d7de9d
WH
4815out2:
4816 free_netdev(netdev);
4817out1:
4818 usb_put_dev(udev);
4819
4820 return ret;
4821}
4822
4823static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4824{
4825 const u16 crc16poly = 0x8005;
4826 int i;
4827 u16 bit, crc, msb;
4828 u8 data;
4829
4830 crc = 0xFFFF;
4831 for (i = 0; i < len; i++) {
4832 data = *buf++;
4833 for (bit = 0; bit < 8; bit++) {
4834 msb = crc >> 15;
4835 crc <<= 1;
4836
4837 if (msb ^ (u16)(data & 1)) {
4838 crc ^= crc16poly;
4839 crc |= (u16)0x0001U;
4840 }
4841 data >>= 1;
4842 }
4843 }
4844
4845 return crc;
4846}
4847
5f4cc6e2
JE
4848static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4849{
4850 u32 buf;
4851 int ret;
4852
4853 ret = lan78xx_stop_tx_path(dev);
4854 if (ret < 0)
4855 return ret;
4856
4857 ret = lan78xx_stop_rx_path(dev);
4858 if (ret < 0)
4859 return ret;
4860
4861 /* auto suspend (selective suspend) */
4862
4863 ret = lan78xx_write_reg(dev, WUCSR, 0);
4864 if (ret < 0)
4865 return ret;
4866 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4867 if (ret < 0)
4868 return ret;
4869 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4870 if (ret < 0)
4871 return ret;
4872
4873 /* set goodframe wakeup */
4874
4875 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4876 if (ret < 0)
4877 return ret;
4878
4879 buf |= WUCSR_RFE_WAKE_EN_;
4880 buf |= WUCSR_STORE_WAKE_;
4881
4882 ret = lan78xx_write_reg(dev, WUCSR, buf);
4883 if (ret < 0)
4884 return ret;
4885
4886 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4887 if (ret < 0)
4888 return ret;
4889
4890 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4891 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4892 buf |= PMT_CTL_PHY_WAKE_EN_;
4893 buf |= PMT_CTL_WOL_EN_;
4894 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4895 buf |= PMT_CTL_SUS_MODE_3_;
4896
4897 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4898 if (ret < 0)
4899 return ret;
4900
4901 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4902 if (ret < 0)
4903 return ret;
4904
4905 buf |= PMT_CTL_WUPS_MASK_;
4906
4907 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4908 if (ret < 0)
4909 return ret;
4910
4911 ret = lan78xx_start_rx_path(dev);
4912
4913 return ret;
4914}
4915
55d7de9d
WH
4916static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4917{
55d7de9d
WH
4918 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4919 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4920 const u8 arp_type[2] = { 0x08, 0x06 };
3415f6ba
JE
4921 u32 temp_pmt_ctl;
4922 int mask_index;
4923 u32 temp_wucsr;
4924 u32 buf;
4925 u16 crc;
4926 int ret;
4927
e1210fe6 4928 ret = lan78xx_stop_tx_path(dev);
3415f6ba
JE
4929 if (ret < 0)
4930 return ret;
e1210fe6 4931 ret = lan78xx_stop_rx_path(dev);
3415f6ba
JE
4932 if (ret < 0)
4933 return ret;
4934
4935 ret = lan78xx_write_reg(dev, WUCSR, 0);
4936 if (ret < 0)
4937 return ret;
4938 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4939 if (ret < 0)
4940 return ret;
4941 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4942 if (ret < 0)
4943 return ret;
55d7de9d
WH
4944
4945 temp_wucsr = 0;
4946
4947 temp_pmt_ctl = 0;
3415f6ba
JE
4948
4949 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4950 if (ret < 0)
4951 return ret;
4952
55d7de9d
WH
4953 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4954 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4955
3415f6ba
JE
4956 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4957 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4958 if (ret < 0)
4959 return ret;
4960 }
55d7de9d
WH
4961
4962 mask_index = 0;
4963 if (wol & WAKE_PHY) {
4964 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4965
4966 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4967 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4968 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4969 }
4970 if (wol & WAKE_MAGIC) {
4971 temp_wucsr |= WUCSR_MPEN_;
4972
4973 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4974 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4975 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4976 }
4977 if (wol & WAKE_BCAST) {
4978 temp_wucsr |= WUCSR_BCST_EN_;
4979
4980 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4981 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4982 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4983 }
4984 if (wol & WAKE_MCAST) {
4985 temp_wucsr |= WUCSR_WAKE_EN_;
4986
4987 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4988 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3415f6ba
JE
4989 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4990 WUF_CFGX_EN_ |
4991 WUF_CFGX_TYPE_MCAST_ |
4992 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4993 (crc & WUF_CFGX_CRC16_MASK_));
4994 if (ret < 0)
4995 return ret;
4996
4997 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4998 if (ret < 0)
4999 return ret;
5000 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5001 if (ret < 0)
5002 return ret;
5003 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5004 if (ret < 0)
5005 return ret;
5006 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5007 if (ret < 0)
5008 return ret;
5009
55d7de9d
WH
5010 mask_index++;
5011
5012 /* for IPv6 Multicast */
5013 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3415f6ba
JE
5014 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5015 WUF_CFGX_EN_ |
5016 WUF_CFGX_TYPE_MCAST_ |
5017 (0 << WUF_CFGX_OFFSET_SHIFT_) |
5018 (crc & WUF_CFGX_CRC16_MASK_));
5019 if (ret < 0)
5020 return ret;
5021
5022 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
5023 if (ret < 0)
5024 return ret;
5025 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5026 if (ret < 0)
5027 return ret;
5028 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5029 if (ret < 0)
5030 return ret;
5031 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5032 if (ret < 0)
5033 return ret;
5034
55d7de9d
WH
5035 mask_index++;
5036
5037 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5038 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5039 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5040 }
5041 if (wol & WAKE_UCAST) {
5042 temp_wucsr |= WUCSR_PFDA_EN_;
5043
5044 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5045 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5046 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5047 }
5048 if (wol & WAKE_ARP) {
5049 temp_wucsr |= WUCSR_WAKE_EN_;
5050
5051 /* set WUF_CFG & WUF_MASK
5052 * for packettype (offset 12,13) = ARP (0x0806)
5053 */
5054 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3415f6ba
JE
5055 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5056 WUF_CFGX_EN_ |
5057 WUF_CFGX_TYPE_ALL_ |
5058 (0 << WUF_CFGX_OFFSET_SHIFT_) |
5059 (crc & WUF_CFGX_CRC16_MASK_));
5060 if (ret < 0)
5061 return ret;
5062
5063 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
5064 if (ret < 0)
5065 return ret;
5066 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5067 if (ret < 0)
5068 return ret;
5069 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5070 if (ret < 0)
5071 return ret;
5072 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5073 if (ret < 0)
5074 return ret;
5075
55d7de9d
WH
5076 mask_index++;
5077
5078 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5079 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5080 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5081 }
5082
3415f6ba
JE
5083 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
5084 if (ret < 0)
5085 return ret;
55d7de9d
WH
5086
5087 /* when multiple WOL bits are set */
5088 if (hweight_long((unsigned long)wol) > 1) {
5089 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5090 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5091 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5092 }
3415f6ba
JE
5093 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
5094 if (ret < 0)
5095 return ret;
55d7de9d
WH
5096
5097 /* clear WUPS */
3415f6ba
JE
5098 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5099 if (ret < 0)
5100 return ret;
5101
55d7de9d 5102 buf |= PMT_CTL_WUPS_MASK_;
3415f6ba
JE
5103
5104 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5105 if (ret < 0)
5106 return ret;
55d7de9d 5107
e1210fe6 5108 ret = lan78xx_start_rx_path(dev);
3415f6ba 5109
e1210fe6 5110 return ret;
55d7de9d
WH
5111}
5112
e0c79ff6 5113static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
55d7de9d
WH
5114{
5115 struct lan78xx_net *dev = usb_get_intfdata(intf);
5f4cc6e2 5116 bool dev_open;
55d7de9d 5117 int ret;
55d7de9d 5118
5f4cc6e2
JE
5119 mutex_lock(&dev->dev_mutex);
5120
5121 netif_dbg(dev, ifdown, dev->net,
5122 "suspending: pm event %#x", message.event);
5123
5124 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5125
5126 if (dev_open) {
55d7de9d
WH
5127 spin_lock_irq(&dev->txq.lock);
5128 /* don't autosuspend while transmitting */
5129 if ((skb_queue_len(&dev->txq) ||
5130 skb_queue_len(&dev->txq_pend)) &&
5f4cc6e2 5131 PMSG_IS_AUTO(message)) {
55d7de9d
WH
5132 spin_unlock_irq(&dev->txq.lock);
5133 ret = -EBUSY;
5134 goto out;
5135 } else {
5136 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5137 spin_unlock_irq(&dev->txq.lock);
5138 }
5139
e1210fe6
JE
5140 /* stop RX */
5141 ret = lan78xx_stop_rx_path(dev);
3415f6ba 5142 if (ret < 0)
5f4cc6e2 5143 goto out;
3415f6ba 5144
e1210fe6 5145 ret = lan78xx_flush_rx_fifo(dev);
3415f6ba 5146 if (ret < 0)
5f4cc6e2 5147 goto out;
3415f6ba 5148
e1210fe6
JE
5149 /* stop Tx */
5150 ret = lan78xx_stop_tx_path(dev);
3415f6ba 5151 if (ret < 0)
5f4cc6e2 5152 goto out;
55d7de9d 5153
5f4cc6e2 5154 /* empty out the Rx and Tx queues */
55d7de9d
WH
5155 netif_device_detach(dev->net);
5156 lan78xx_terminate_urbs(dev);
5157 usb_kill_urb(dev->urb_intr);
5158
5159 /* reattach */
5160 netif_device_attach(dev->net);
55d7de9d 5161
8fa7292f 5162 timer_delete(&dev->stat_monitor);
20ff5565 5163
55d7de9d 5164 if (PMSG_IS_AUTO(message)) {
5f4cc6e2 5165 ret = lan78xx_set_auto_suspend(dev);
3415f6ba 5166 if (ret < 0)
5f4cc6e2
JE
5167 goto out;
5168 } else {
5169 struct lan78xx_priv *pdata;
3415f6ba 5170
5f4cc6e2
JE
5171 pdata = (struct lan78xx_priv *)(dev->data[0]);
5172 netif_carrier_off(dev->net);
5173 ret = lan78xx_set_suspend(dev, pdata->wol);
3415f6ba 5174 if (ret < 0)
5f4cc6e2
JE
5175 goto out;
5176 }
5177 } else {
5178 /* Interface is down; don't allow WOL and PHY
5179 * events to wake up the host
5180 */
5181 u32 buf;
55d7de9d 5182
5f4cc6e2 5183 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
55d7de9d 5184
5f4cc6e2
JE
5185 ret = lan78xx_write_reg(dev, WUCSR, 0);
5186 if (ret < 0)
5187 goto out;
5188 ret = lan78xx_write_reg(dev, WUCSR2, 0);
5189 if (ret < 0)
5190 goto out;
55d7de9d 5191
5f4cc6e2
JE
5192 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5193 if (ret < 0)
5194 goto out;
55d7de9d 5195
5f4cc6e2
JE
5196 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
5197 buf |= PMT_CTL_RES_CLR_WKP_STS_;
5198 buf &= ~PMT_CTL_SUS_MODE_MASK_;
5199 buf |= PMT_CTL_SUS_MODE_3_;
55d7de9d 5200
5f4cc6e2
JE
5201 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5202 if (ret < 0)
5203 goto out;
55d7de9d 5204
5f4cc6e2
JE
5205 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5206 if (ret < 0)
5207 goto out;
55d7de9d 5208
5f4cc6e2 5209 buf |= PMT_CTL_WUPS_MASK_;
55d7de9d 5210
5f4cc6e2
JE
5211 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5212 if (ret < 0)
5213 goto out;
5214 }
55d7de9d 5215
5f4cc6e2
JE
5216 ret = 0;
5217out:
5218 mutex_unlock(&dev->dev_mutex);
55d7de9d 5219
5f4cc6e2
JE
5220 return ret;
5221}
55d7de9d 5222
5f4cc6e2
JE
5223static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5224{
5225 bool pipe_halted = false;
5226 struct urb *urb;
55d7de9d 5227
5f4cc6e2
JE
5228 while ((urb = usb_get_from_anchor(&dev->deferred))) {
5229 struct sk_buff *skb = urb->context;
5230 int ret;
3415f6ba 5231
5f4cc6e2
JE
5232 if (!netif_device_present(dev->net) ||
5233 !netif_carrier_ok(dev->net) ||
5234 pipe_halted) {
d383216a 5235 lan78xx_release_tx_buf(dev, skb);
5f4cc6e2
JE
5236 continue;
5237 }
3415f6ba 5238
5f4cc6e2
JE
5239 ret = usb_submit_urb(urb, GFP_ATOMIC);
5240
5241 if (ret == 0) {
5242 netif_trans_update(dev->net);
5243 lan78xx_queue_skb(&dev->txq, skb, tx_start);
5244 } else {
5f4cc6e2
JE
5245 if (ret == -EPIPE) {
5246 netif_stop_queue(dev->net);
5247 pipe_halted = true;
5248 } else if (ret == -ENODEV) {
5249 netif_device_detach(dev->net);
5250 }
d383216a
JE
5251
5252 lan78xx_release_tx_buf(dev, skb);
55d7de9d
WH
5253 }
5254 }
5255
5f4cc6e2 5256 return pipe_halted;
55d7de9d
WH
5257}
5258
e0c79ff6 5259static int lan78xx_resume(struct usb_interface *intf)
55d7de9d
WH
5260{
5261 struct lan78xx_net *dev = usb_get_intfdata(intf);
5f4cc6e2 5262 bool dev_open;
55d7de9d 5263 int ret;
55d7de9d 5264
5f4cc6e2 5265 mutex_lock(&dev->dev_mutex);
20ff5565 5266
5f4cc6e2 5267 netif_dbg(dev, ifup, dev->net, "resuming device");
e1210fe6 5268
5f4cc6e2
JE
5269 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5270
5271 if (dev_open) {
5272 bool pipe_halted = false;
5273
5274 ret = lan78xx_flush_tx_fifo(dev);
5275 if (ret < 0)
5276 goto out;
5277
5278 if (dev->urb_intr) {
5279 int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
55d7de9d 5280
55d7de9d 5281 if (ret < 0) {
5f4cc6e2
JE
5282 if (ret == -ENODEV)
5283 netif_device_detach(dev->net);
45932221 5284 netdev_warn(dev->net, "Failed to submit intr URB");
55d7de9d
WH
5285 }
5286 }
5287
5f4cc6e2
JE
5288 spin_lock_irq(&dev->txq.lock);
5289
5290 if (netif_device_present(dev->net)) {
5291 pipe_halted = lan78xx_submit_deferred_urbs(dev);
5292
5293 if (pipe_halted)
5294 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5295 }
5296
55d7de9d 5297 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5f4cc6e2 5298
55d7de9d
WH
5299 spin_unlock_irq(&dev->txq.lock);
5300
5f4cc6e2
JE
5301 if (!pipe_halted &&
5302 netif_device_present(dev->net) &&
d383216a 5303 (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5f4cc6e2
JE
5304 netif_start_queue(dev->net);
5305
5306 ret = lan78xx_start_tx_path(dev);
5307 if (ret < 0)
5308 goto out;
5309
ec4c7e12 5310 napi_schedule(&dev->napi);
5f4cc6e2
JE
5311
5312 if (!timer_pending(&dev->stat_monitor)) {
5313 dev->delta = 1;
5314 mod_timer(&dev->stat_monitor,
5315 jiffies + STAT_UPDATE_TIMER);
55d7de9d 5316 }
5f4cc6e2
JE
5317
5318 } else {
5319 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
55d7de9d
WH
5320 }
5321
5322 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3415f6ba 5323 if (ret < 0)
5f4cc6e2 5324 goto out;
55d7de9d 5325 ret = lan78xx_write_reg(dev, WUCSR, 0);
3415f6ba 5326 if (ret < 0)
5f4cc6e2 5327 goto out;
55d7de9d 5328 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3415f6ba 5329 if (ret < 0)
5f4cc6e2 5330 goto out;
55d7de9d
WH
5331
5332 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5333 WUCSR2_ARP_RCD_ |
5334 WUCSR2_IPV6_TCPSYN_RCD_ |
5335 WUCSR2_IPV4_TCPSYN_RCD_);
3415f6ba 5336 if (ret < 0)
5f4cc6e2 5337 goto out;
55d7de9d
WH
5338
5339 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5340 WUCSR_EEE_RX_WAKE_ |
5341 WUCSR_PFDA_FR_ |
5342 WUCSR_RFE_WAKE_FR_ |
5343 WUCSR_WUFR_ |
5344 WUCSR_MPR_ |
5345 WUCSR_BCST_FR_);
3415f6ba 5346 if (ret < 0)
5f4cc6e2 5347 goto out;
55d7de9d 5348
5f4cc6e2
JE
5349 ret = 0;
5350out:
5351 mutex_unlock(&dev->dev_mutex);
3415f6ba 5352
e1210fe6 5353 return ret;
55d7de9d
WH
5354}
5355
e0c79ff6 5356static int lan78xx_reset_resume(struct usb_interface *intf)
55d7de9d
WH
5357{
5358 struct lan78xx_net *dev = usb_get_intfdata(intf);
3415f6ba 5359 int ret;
55d7de9d 5360
5f4cc6e2
JE
5361 netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5362
3415f6ba
JE
5363 ret = lan78xx_reset(dev);
5364 if (ret < 0)
5365 return ret;
ce85e13a 5366
92571a1a 5367 phy_start(dev->net->phydev);
ce85e13a 5368
3415f6ba
JE
5369 ret = lan78xx_resume(intf);
5370
5371 return ret;
55d7de9d
WH
5372}
5373
5374static const struct usb_device_id products[] = {
5375 {
5376 /* LAN7800 USB Gigabit Ethernet Device */
5377 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5378 },
5379 {
5380 /* LAN7850 USB Gigabit Ethernet Device */
5381 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5382 },
02dc1f3d
WH
5383 {
5384 /* LAN7801 USB Gigabit Ethernet Device */
5385 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5386 },
ef8a0f6e
GJ
5387 {
5388 /* ATM2-AF USB Gigabit Ethernet Device */
5389 USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5390 },
55d7de9d
WH
5391 {},
5392};
5393MODULE_DEVICE_TABLE(usb, products);
5394
5395static struct usb_driver lan78xx_driver = {
5396 .name = DRIVER_NAME,
5397 .id_table = products,
5398 .probe = lan78xx_probe,
5399 .disconnect = lan78xx_disconnect,
5400 .suspend = lan78xx_suspend,
5401 .resume = lan78xx_resume,
5402 .reset_resume = lan78xx_reset_resume,
5403 .supports_autosuspend = 1,
5404 .disable_hub_initiated_lpm = 1,
5405};
5406
5407module_usb_driver(lan78xx_driver);
5408
5409MODULE_AUTHOR(DRIVER_AUTHOR);
5410MODULE_DESCRIPTION(DRIVER_DESC);
5411MODULE_LICENSE("GPL");