Commit | Line | Data |
---|---|---|
65e0ace2 JD |
1 | /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver |
2 | * | |
3 | * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) | |
4 | * | |
ea8c1c64 JD |
5 | * This program is dual-licensed; you may select either version 2 of |
6 | * the GNU General Public License ("GPL") or BSD license ("BSD"). | |
65e0ace2 JD |
7 | * |
8 | * This Synopsys DWC XLGMAC software driver and associated documentation | |
9 | * (hereinafter the "Software") is an unsupported proprietary work of | |
10 | * Synopsys, Inc. unless otherwise expressly agreed to in writing between | |
11 | * Synopsys and you. The Software IS NOT an item of Licensed Software or a | |
12 | * Licensed Product under any End User Software License Agreement or | |
13 | * Agreement for Licensed Products with Synopsys or any supplement thereto. | |
14 | * Synopsys is a registered trademark of Synopsys, Inc. Other names included | |
15 | * in the SOFTWARE may be the trademarks of their respective owners. | |
16 | */ | |
17 | ||
18 | #include <linux/netdevice.h> | |
19 | #include <linux/tcp.h> | |
0ab10314 | 20 | #include <linux/interrupt.h> |
65e0ace2 JD |
21 | |
22 | #include "dwc-xlgmac.h" | |
23 | #include "dwc-xlgmac-reg.h" | |
24 | ||
25 | static int xlgmac_one_poll(struct napi_struct *, int); | |
26 | static int xlgmac_all_poll(struct napi_struct *, int); | |
27 | ||
28 | static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring) | |
29 | { | |
30 | return (ring->dma_desc_count - (ring->cur - ring->dirty)); | |
31 | } | |
32 | ||
33 | static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring) | |
34 | { | |
35 | return (ring->cur - ring->dirty); | |
36 | } | |
37 | ||
38 | static int xlgmac_maybe_stop_tx_queue( | |
39 | struct xlgmac_channel *channel, | |
40 | struct xlgmac_ring *ring, | |
41 | unsigned int count) | |
42 | { | |
43 | struct xlgmac_pdata *pdata = channel->pdata; | |
44 | ||
45 | if (count > xlgmac_tx_avail_desc(ring)) { | |
46 | netif_info(pdata, drv, pdata->netdev, | |
47 | "Tx queue stopped, not enough descriptors available\n"); | |
48 | netif_stop_subqueue(pdata->netdev, channel->queue_index); | |
49 | ring->tx.queue_stopped = 1; | |
50 | ||
51 | /* If we haven't notified the hardware because of xmit_more | |
52 | * support, tell it now | |
53 | */ | |
54 | if (ring->tx.xmit_more) | |
55 | pdata->hw_ops.tx_start_xmit(channel, ring); | |
56 | ||
57 | return NETDEV_TX_BUSY; | |
58 | } | |
59 | ||
60 | return 0; | |
61 | } | |
62 | ||
63 | static void xlgmac_prep_vlan(struct sk_buff *skb, | |
64 | struct xlgmac_pkt_info *pkt_info) | |
65 | { | |
66 | if (skb_vlan_tag_present(skb)) | |
67 | pkt_info->vlan_ctag = skb_vlan_tag_get(skb); | |
68 | } | |
69 | ||
70 | static int xlgmac_prep_tso(struct sk_buff *skb, | |
71 | struct xlgmac_pkt_info *pkt_info) | |
72 | { | |
73 | int ret; | |
74 | ||
75 | if (!XLGMAC_GET_REG_BITS(pkt_info->attributes, | |
76 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, | |
77 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN)) | |
78 | return 0; | |
79 | ||
80 | ret = skb_cow_head(skb, 0); | |
81 | if (ret) | |
82 | return ret; | |
83 | ||
84 | pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
85 | pkt_info->tcp_header_len = tcp_hdrlen(skb); | |
86 | pkt_info->tcp_payload_len = skb->len - pkt_info->header_len; | |
87 | pkt_info->mss = skb_shinfo(skb)->gso_size; | |
88 | ||
89 | XLGMAC_PR("header_len=%u\n", pkt_info->header_len); | |
90 | XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n", | |
91 | pkt_info->tcp_header_len, pkt_info->tcp_payload_len); | |
92 | XLGMAC_PR("mss=%u\n", pkt_info->mss); | |
93 | ||
94 | /* Update the number of packets that will ultimately be transmitted | |
95 | * along with the extra bytes for each extra packet | |
96 | */ | |
97 | pkt_info->tx_packets = skb_shinfo(skb)->gso_segs; | |
98 | pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len; | |
99 | ||
100 | return 0; | |
101 | } | |
102 | ||
103 | static int xlgmac_is_tso(struct sk_buff *skb) | |
104 | { | |
105 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
106 | return 0; | |
107 | ||
108 | if (!skb_is_gso(skb)) | |
109 | return 0; | |
110 | ||
111 | return 1; | |
112 | } | |
113 | ||
114 | static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata, | |
115 | struct xlgmac_ring *ring, | |
116 | struct sk_buff *skb, | |
117 | struct xlgmac_pkt_info *pkt_info) | |
118 | { | |
d7840976 | 119 | skb_frag_t *frag; |
65e0ace2 JD |
120 | unsigned int context_desc; |
121 | unsigned int len; | |
122 | unsigned int i; | |
123 | ||
124 | pkt_info->skb = skb; | |
125 | ||
126 | context_desc = 0; | |
127 | pkt_info->desc_count = 0; | |
128 | ||
129 | pkt_info->tx_packets = 1; | |
130 | pkt_info->tx_bytes = skb->len; | |
131 | ||
132 | if (xlgmac_is_tso(skb)) { | |
133 | /* TSO requires an extra descriptor if mss is different */ | |
134 | if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { | |
135 | context_desc = 1; | |
136 | pkt_info->desc_count++; | |
137 | } | |
138 | ||
139 | /* TSO requires an extra descriptor for TSO header */ | |
140 | pkt_info->desc_count++; | |
141 | ||
142 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
143 | pkt_info->attributes, | |
144 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, | |
145 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN, | |
146 | 1); | |
147 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
148 | pkt_info->attributes, | |
149 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, | |
150 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN, | |
151 | 1); | |
152 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) | |
153 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
154 | pkt_info->attributes, | |
155 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, | |
156 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN, | |
157 | 1); | |
158 | ||
159 | if (skb_vlan_tag_present(skb)) { | |
160 | /* VLAN requires an extra descriptor if tag is different */ | |
161 | if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) | |
162 | /* We can share with the TSO context descriptor */ | |
163 | if (!context_desc) { | |
164 | context_desc = 1; | |
165 | pkt_info->desc_count++; | |
166 | } | |
167 | ||
168 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
169 | pkt_info->attributes, | |
170 | TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, | |
171 | TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, | |
172 | 1); | |
173 | } | |
174 | ||
175 | for (len = skb_headlen(skb); len;) { | |
176 | pkt_info->desc_count++; | |
177 | len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE); | |
178 | } | |
179 | ||
180 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
181 | frag = &skb_shinfo(skb)->frags[i]; | |
182 | for (len = skb_frag_size(frag); len; ) { | |
183 | pkt_info->desc_count++; | |
184 | len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE); | |
185 | } | |
186 | } | |
187 | } | |
188 | ||
189 | static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) | |
190 | { | |
191 | unsigned int rx_buf_size; | |
192 | ||
193 | if (mtu > XLGMAC_JUMBO_PACKET_MTU) { | |
194 | netdev_alert(netdev, "MTU exceeds maximum supported value\n"); | |
195 | return -EINVAL; | |
196 | } | |
197 | ||
198 | rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; | |
199 | rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE); | |
200 | ||
201 | rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) & | |
202 | ~(XLGMAC_RX_BUF_ALIGN - 1); | |
203 | ||
204 | return rx_buf_size; | |
205 | } | |
206 | ||
207 | static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata) | |
208 | { | |
209 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; | |
210 | struct xlgmac_channel *channel; | |
211 | enum xlgmac_int int_id; | |
212 | unsigned int i; | |
213 | ||
214 | channel = pdata->channel_head; | |
215 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
216 | if (channel->tx_ring && channel->rx_ring) | |
217 | int_id = XLGMAC_INT_DMA_CH_SR_TI_RI; | |
218 | else if (channel->tx_ring) | |
219 | int_id = XLGMAC_INT_DMA_CH_SR_TI; | |
220 | else if (channel->rx_ring) | |
221 | int_id = XLGMAC_INT_DMA_CH_SR_RI; | |
222 | else | |
223 | continue; | |
224 | ||
225 | hw_ops->enable_int(channel, int_id); | |
226 | } | |
227 | } | |
228 | ||
229 | static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata) | |
230 | { | |
231 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; | |
232 | struct xlgmac_channel *channel; | |
233 | enum xlgmac_int int_id; | |
234 | unsigned int i; | |
235 | ||
236 | channel = pdata->channel_head; | |
237 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
238 | if (channel->tx_ring && channel->rx_ring) | |
239 | int_id = XLGMAC_INT_DMA_CH_SR_TI_RI; | |
240 | else if (channel->tx_ring) | |
241 | int_id = XLGMAC_INT_DMA_CH_SR_TI; | |
242 | else if (channel->rx_ring) | |
243 | int_id = XLGMAC_INT_DMA_CH_SR_RI; | |
244 | else | |
245 | continue; | |
246 | ||
247 | hw_ops->disable_int(channel, int_id); | |
248 | } | |
249 | } | |
250 | ||
251 | static irqreturn_t xlgmac_isr(int irq, void *data) | |
252 | { | |
253 | unsigned int dma_isr, dma_ch_isr, mac_isr; | |
254 | struct xlgmac_pdata *pdata = data; | |
255 | struct xlgmac_channel *channel; | |
256 | struct xlgmac_hw_ops *hw_ops; | |
257 | unsigned int i, ti, ri; | |
258 | ||
259 | hw_ops = &pdata->hw_ops; | |
260 | ||
261 | /* The DMA interrupt status register also reports MAC and MTL | |
262 | * interrupts. So for polling mode, we just need to check for | |
263 | * this register to be non-zero | |
264 | */ | |
265 | dma_isr = readl(pdata->mac_regs + DMA_ISR); | |
266 | if (!dma_isr) | |
267 | return IRQ_HANDLED; | |
268 | ||
269 | netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr); | |
270 | ||
271 | for (i = 0; i < pdata->channel_count; i++) { | |
272 | if (!(dma_isr & (1 << i))) | |
273 | continue; | |
274 | ||
275 | channel = pdata->channel_head + i; | |
276 | ||
277 | dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR)); | |
278 | netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", | |
279 | i, dma_ch_isr); | |
280 | ||
281 | /* The TI or RI interrupt bits may still be set even if using | |
282 | * per channel DMA interrupts. Check to be sure those are not | |
283 | * enabled before using the private data napi structure. | |
284 | */ | |
285 | ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS, | |
286 | DMA_CH_SR_TI_LEN); | |
287 | ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS, | |
288 | DMA_CH_SR_RI_LEN); | |
289 | if (!pdata->per_channel_irq && (ti || ri)) { | |
290 | if (napi_schedule_prep(&pdata->napi)) { | |
291 | /* Disable Tx and Rx interrupts */ | |
292 | xlgmac_disable_rx_tx_ints(pdata); | |
293 | ||
d4d49bc1 | 294 | pdata->stats.napi_poll_isr++; |
65e0ace2 JD |
295 | /* Turn on polling */ |
296 | __napi_schedule_irqoff(&pdata->napi); | |
297 | } | |
298 | } | |
299 | ||
d4d49bc1 JD |
300 | if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS, |
301 | DMA_CH_SR_TPS_LEN)) | |
302 | pdata->stats.tx_process_stopped++; | |
303 | ||
304 | if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS, | |
305 | DMA_CH_SR_RPS_LEN)) | |
306 | pdata->stats.rx_process_stopped++; | |
307 | ||
308 | if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS, | |
309 | DMA_CH_SR_TBU_LEN)) | |
310 | pdata->stats.tx_buffer_unavailable++; | |
311 | ||
65e0ace2 JD |
312 | if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS, |
313 | DMA_CH_SR_RBU_LEN)) | |
314 | pdata->stats.rx_buffer_unavailable++; | |
315 | ||
316 | /* Restart the device on a Fatal Bus Error */ | |
317 | if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS, | |
d4d49bc1 JD |
318 | DMA_CH_SR_FBE_LEN)) { |
319 | pdata->stats.fatal_bus_error++; | |
65e0ace2 | 320 | schedule_work(&pdata->restart_work); |
d4d49bc1 | 321 | } |
65e0ace2 JD |
322 | |
323 | /* Clear all interrupt signals */ | |
324 | writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR)); | |
325 | } | |
326 | ||
327 | if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS, | |
328 | DMA_ISR_MACIS_LEN)) { | |
329 | mac_isr = readl(pdata->mac_regs + MAC_ISR); | |
330 | ||
331 | if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS, | |
332 | MAC_ISR_MMCTXIS_LEN)) | |
333 | hw_ops->tx_mmc_int(pdata); | |
334 | ||
335 | if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS, | |
336 | MAC_ISR_MMCRXIS_LEN)) | |
337 | hw_ops->rx_mmc_int(pdata); | |
338 | } | |
339 | ||
340 | return IRQ_HANDLED; | |
341 | } | |
342 | ||
343 | static irqreturn_t xlgmac_dma_isr(int irq, void *data) | |
344 | { | |
345 | struct xlgmac_channel *channel = data; | |
346 | ||
347 | /* Per channel DMA interrupts are enabled, so we use the per | |
348 | * channel napi structure and not the private data napi structure | |
349 | */ | |
350 | if (napi_schedule_prep(&channel->napi)) { | |
351 | /* Disable Tx and Rx interrupts */ | |
352 | disable_irq_nosync(channel->dma_irq); | |
353 | ||
354 | /* Turn on polling */ | |
355 | __napi_schedule_irqoff(&channel->napi); | |
356 | } | |
357 | ||
358 | return IRQ_HANDLED; | |
359 | } | |
360 | ||
e99e88a9 | 361 | static void xlgmac_tx_timer(struct timer_list *t) |
65e0ace2 | 362 | { |
e99e88a9 | 363 | struct xlgmac_channel *channel = from_timer(channel, t, tx_timer); |
65e0ace2 JD |
364 | struct xlgmac_pdata *pdata = channel->pdata; |
365 | struct napi_struct *napi; | |
366 | ||
367 | napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; | |
368 | ||
369 | if (napi_schedule_prep(napi)) { | |
370 | /* Disable Tx and Rx interrupts */ | |
371 | if (pdata->per_channel_irq) | |
372 | disable_irq_nosync(channel->dma_irq); | |
373 | else | |
374 | xlgmac_disable_rx_tx_ints(pdata); | |
375 | ||
d4d49bc1 | 376 | pdata->stats.napi_poll_txtimer++; |
65e0ace2 JD |
377 | /* Turn on polling */ |
378 | __napi_schedule(napi); | |
379 | } | |
380 | ||
381 | channel->tx_timer_active = 0; | |
382 | } | |
383 | ||
384 | static void xlgmac_init_timers(struct xlgmac_pdata *pdata) | |
385 | { | |
386 | struct xlgmac_channel *channel; | |
387 | unsigned int i; | |
388 | ||
389 | channel = pdata->channel_head; | |
390 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
391 | if (!channel->tx_ring) | |
392 | break; | |
393 | ||
e99e88a9 | 394 | timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0); |
65e0ace2 JD |
395 | } |
396 | } | |
397 | ||
398 | static void xlgmac_stop_timers(struct xlgmac_pdata *pdata) | |
399 | { | |
400 | struct xlgmac_channel *channel; | |
401 | unsigned int i; | |
402 | ||
403 | channel = pdata->channel_head; | |
404 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
405 | if (!channel->tx_ring) | |
406 | break; | |
407 | ||
408 | del_timer_sync(&channel->tx_timer); | |
409 | } | |
410 | } | |
411 | ||
412 | static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add) | |
413 | { | |
414 | struct xlgmac_channel *channel; | |
415 | unsigned int i; | |
416 | ||
417 | if (pdata->per_channel_irq) { | |
418 | channel = pdata->channel_head; | |
419 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
420 | if (add) | |
421 | netif_napi_add(pdata->netdev, &channel->napi, | |
422 | xlgmac_one_poll, | |
423 | NAPI_POLL_WEIGHT); | |
424 | ||
425 | napi_enable(&channel->napi); | |
426 | } | |
427 | } else { | |
428 | if (add) | |
429 | netif_napi_add(pdata->netdev, &pdata->napi, | |
430 | xlgmac_all_poll, NAPI_POLL_WEIGHT); | |
431 | ||
432 | napi_enable(&pdata->napi); | |
433 | } | |
434 | } | |
435 | ||
436 | static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del) | |
437 | { | |
438 | struct xlgmac_channel *channel; | |
439 | unsigned int i; | |
440 | ||
441 | if (pdata->per_channel_irq) { | |
442 | channel = pdata->channel_head; | |
443 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
444 | napi_disable(&channel->napi); | |
445 | ||
446 | if (del) | |
447 | netif_napi_del(&channel->napi); | |
448 | } | |
449 | } else { | |
450 | napi_disable(&pdata->napi); | |
451 | ||
452 | if (del) | |
453 | netif_napi_del(&pdata->napi); | |
454 | } | |
455 | } | |
456 | ||
457 | static int xlgmac_request_irqs(struct xlgmac_pdata *pdata) | |
458 | { | |
459 | struct net_device *netdev = pdata->netdev; | |
460 | struct xlgmac_channel *channel; | |
461 | unsigned int i; | |
462 | int ret; | |
463 | ||
464 | ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr, | |
465 | IRQF_SHARED, netdev->name, pdata); | |
466 | if (ret) { | |
467 | netdev_alert(netdev, "error requesting irq %d\n", | |
468 | pdata->dev_irq); | |
469 | return ret; | |
470 | } | |
471 | ||
472 | if (!pdata->per_channel_irq) | |
473 | return 0; | |
474 | ||
475 | channel = pdata->channel_head; | |
476 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
477 | snprintf(channel->dma_irq_name, | |
478 | sizeof(channel->dma_irq_name) - 1, | |
479 | "%s-TxRx-%u", netdev_name(netdev), | |
480 | channel->queue_index); | |
481 | ||
482 | ret = devm_request_irq(pdata->dev, channel->dma_irq, | |
483 | xlgmac_dma_isr, 0, | |
484 | channel->dma_irq_name, channel); | |
485 | if (ret) { | |
486 | netdev_alert(netdev, "error requesting irq %d\n", | |
487 | channel->dma_irq); | |
488 | goto err_irq; | |
489 | } | |
490 | } | |
491 | ||
492 | return 0; | |
493 | ||
494 | err_irq: | |
495 | /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ | |
496 | for (i--, channel--; i < pdata->channel_count; i--, channel--) | |
497 | devm_free_irq(pdata->dev, channel->dma_irq, channel); | |
498 | ||
499 | devm_free_irq(pdata->dev, pdata->dev_irq, pdata); | |
500 | ||
501 | return ret; | |
502 | } | |
503 | ||
504 | static void xlgmac_free_irqs(struct xlgmac_pdata *pdata) | |
505 | { | |
506 | struct xlgmac_channel *channel; | |
507 | unsigned int i; | |
508 | ||
509 | devm_free_irq(pdata->dev, pdata->dev_irq, pdata); | |
510 | ||
511 | if (!pdata->per_channel_irq) | |
512 | return; | |
513 | ||
514 | channel = pdata->channel_head; | |
515 | for (i = 0; i < pdata->channel_count; i++, channel++) | |
516 | devm_free_irq(pdata->dev, channel->dma_irq, channel); | |
517 | } | |
518 | ||
519 | static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata) | |
520 | { | |
521 | struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; | |
522 | struct xlgmac_desc_data *desc_data; | |
523 | struct xlgmac_channel *channel; | |
524 | struct xlgmac_ring *ring; | |
525 | unsigned int i, j; | |
526 | ||
527 | channel = pdata->channel_head; | |
528 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
529 | ring = channel->tx_ring; | |
530 | if (!ring) | |
531 | break; | |
532 | ||
533 | for (j = 0; j < ring->dma_desc_count; j++) { | |
534 | desc_data = XLGMAC_GET_DESC_DATA(ring, j); | |
535 | desc_ops->unmap_desc_data(pdata, desc_data); | |
536 | } | |
537 | } | |
538 | } | |
539 | ||
540 | static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata) | |
541 | { | |
542 | struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; | |
543 | struct xlgmac_desc_data *desc_data; | |
544 | struct xlgmac_channel *channel; | |
545 | struct xlgmac_ring *ring; | |
546 | unsigned int i, j; | |
547 | ||
548 | channel = pdata->channel_head; | |
549 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
550 | ring = channel->rx_ring; | |
551 | if (!ring) | |
552 | break; | |
553 | ||
554 | for (j = 0; j < ring->dma_desc_count; j++) { | |
555 | desc_data = XLGMAC_GET_DESC_DATA(ring, j); | |
556 | desc_ops->unmap_desc_data(pdata, desc_data); | |
557 | } | |
558 | } | |
559 | } | |
560 | ||
561 | static int xlgmac_start(struct xlgmac_pdata *pdata) | |
562 | { | |
563 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; | |
564 | struct net_device *netdev = pdata->netdev; | |
565 | int ret; | |
566 | ||
567 | hw_ops->init(pdata); | |
568 | xlgmac_napi_enable(pdata, 1); | |
569 | ||
570 | ret = xlgmac_request_irqs(pdata); | |
571 | if (ret) | |
572 | goto err_napi; | |
573 | ||
574 | hw_ops->enable_tx(pdata); | |
575 | hw_ops->enable_rx(pdata); | |
576 | netif_tx_start_all_queues(netdev); | |
577 | ||
578 | return 0; | |
579 | ||
580 | err_napi: | |
581 | xlgmac_napi_disable(pdata, 1); | |
582 | hw_ops->exit(pdata); | |
583 | ||
584 | return ret; | |
585 | } | |
586 | ||
587 | static void xlgmac_stop(struct xlgmac_pdata *pdata) | |
588 | { | |
589 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; | |
590 | struct net_device *netdev = pdata->netdev; | |
591 | struct xlgmac_channel *channel; | |
592 | struct netdev_queue *txq; | |
593 | unsigned int i; | |
594 | ||
595 | netif_tx_stop_all_queues(netdev); | |
596 | xlgmac_stop_timers(pdata); | |
597 | hw_ops->disable_tx(pdata); | |
598 | hw_ops->disable_rx(pdata); | |
599 | xlgmac_free_irqs(pdata); | |
600 | xlgmac_napi_disable(pdata, 1); | |
601 | hw_ops->exit(pdata); | |
602 | ||
603 | channel = pdata->channel_head; | |
604 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
605 | if (!channel->tx_ring) | |
606 | continue; | |
607 | ||
608 | txq = netdev_get_tx_queue(netdev, channel->queue_index); | |
609 | netdev_tx_reset_queue(txq); | |
610 | } | |
611 | } | |
612 | ||
613 | static void xlgmac_restart_dev(struct xlgmac_pdata *pdata) | |
614 | { | |
615 | /* If not running, "restart" will happen on open */ | |
616 | if (!netif_running(pdata->netdev)) | |
617 | return; | |
618 | ||
619 | xlgmac_stop(pdata); | |
620 | ||
621 | xlgmac_free_tx_data(pdata); | |
622 | xlgmac_free_rx_data(pdata); | |
623 | ||
624 | xlgmac_start(pdata); | |
625 | } | |
626 | ||
627 | static void xlgmac_restart(struct work_struct *work) | |
628 | { | |
629 | struct xlgmac_pdata *pdata = container_of(work, | |
630 | struct xlgmac_pdata, | |
631 | restart_work); | |
632 | ||
633 | rtnl_lock(); | |
634 | ||
635 | xlgmac_restart_dev(pdata); | |
636 | ||
637 | rtnl_unlock(); | |
638 | } | |
639 | ||
640 | static int xlgmac_open(struct net_device *netdev) | |
641 | { | |
642 | struct xlgmac_pdata *pdata = netdev_priv(netdev); | |
643 | struct xlgmac_desc_ops *desc_ops; | |
644 | int ret; | |
645 | ||
646 | desc_ops = &pdata->desc_ops; | |
647 | ||
648 | /* TODO: Initialize the phy */ | |
649 | ||
650 | /* Calculate the Rx buffer size before allocating rings */ | |
651 | ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu); | |
652 | if (ret < 0) | |
653 | return ret; | |
654 | pdata->rx_buf_size = ret; | |
655 | ||
656 | /* Allocate the channels and rings */ | |
a455fcd7 | 657 | ret = desc_ops->alloc_channels_and_rings(pdata); |
65e0ace2 JD |
658 | if (ret) |
659 | return ret; | |
660 | ||
661 | INIT_WORK(&pdata->restart_work, xlgmac_restart); | |
662 | xlgmac_init_timers(pdata); | |
663 | ||
664 | ret = xlgmac_start(pdata); | |
665 | if (ret) | |
666 | goto err_channels_and_rings; | |
667 | ||
668 | return 0; | |
669 | ||
670 | err_channels_and_rings: | |
671 | desc_ops->free_channels_and_rings(pdata); | |
672 | ||
673 | return ret; | |
674 | } | |
675 | ||
676 | static int xlgmac_close(struct net_device *netdev) | |
677 | { | |
678 | struct xlgmac_pdata *pdata = netdev_priv(netdev); | |
679 | struct xlgmac_desc_ops *desc_ops; | |
680 | ||
681 | desc_ops = &pdata->desc_ops; | |
682 | ||
683 | /* Stop the device */ | |
684 | xlgmac_stop(pdata); | |
685 | ||
686 | /* Free the channels and rings */ | |
687 | desc_ops->free_channels_and_rings(pdata); | |
688 | ||
689 | return 0; | |
690 | } | |
691 | ||
0290bd29 | 692 | static void xlgmac_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
65e0ace2 JD |
693 | { |
694 | struct xlgmac_pdata *pdata = netdev_priv(netdev); | |
695 | ||
696 | netdev_warn(netdev, "tx timeout, device restarting\n"); | |
697 | schedule_work(&pdata->restart_work); | |
698 | } | |
699 | ||
673d8eb6 | 700 | static netdev_tx_t xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev) |
65e0ace2 JD |
701 | { |
702 | struct xlgmac_pdata *pdata = netdev_priv(netdev); | |
703 | struct xlgmac_pkt_info *tx_pkt_info; | |
704 | struct xlgmac_desc_ops *desc_ops; | |
705 | struct xlgmac_channel *channel; | |
706 | struct xlgmac_hw_ops *hw_ops; | |
707 | struct netdev_queue *txq; | |
708 | struct xlgmac_ring *ring; | |
709 | int ret; | |
710 | ||
711 | desc_ops = &pdata->desc_ops; | |
712 | hw_ops = &pdata->hw_ops; | |
713 | ||
714 | XLGMAC_PR("skb->len = %d\n", skb->len); | |
715 | ||
716 | channel = pdata->channel_head + skb->queue_mapping; | |
717 | txq = netdev_get_tx_queue(netdev, channel->queue_index); | |
718 | ring = channel->tx_ring; | |
719 | tx_pkt_info = &ring->pkt_info; | |
720 | ||
721 | if (skb->len == 0) { | |
722 | netif_err(pdata, tx_err, netdev, | |
723 | "empty skb received from stack\n"); | |
724 | dev_kfree_skb_any(skb); | |
725 | return NETDEV_TX_OK; | |
726 | } | |
727 | ||
728 | /* Prepare preliminary packet info for TX */ | |
729 | memset(tx_pkt_info, 0, sizeof(*tx_pkt_info)); | |
730 | xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info); | |
731 | ||
732 | /* Check that there are enough descriptors available */ | |
733 | ret = xlgmac_maybe_stop_tx_queue(channel, ring, | |
734 | tx_pkt_info->desc_count); | |
735 | if (ret) | |
736 | return ret; | |
737 | ||
738 | ret = xlgmac_prep_tso(skb, tx_pkt_info); | |
739 | if (ret) { | |
740 | netif_err(pdata, tx_err, netdev, | |
741 | "error processing TSO packet\n"); | |
742 | dev_kfree_skb_any(skb); | |
743 | return ret; | |
744 | } | |
745 | xlgmac_prep_vlan(skb, tx_pkt_info); | |
746 | ||
747 | if (!desc_ops->map_tx_skb(channel, skb)) { | |
748 | dev_kfree_skb_any(skb); | |
749 | return NETDEV_TX_OK; | |
750 | } | |
751 | ||
752 | /* Report on the actual number of bytes (to be) sent */ | |
753 | netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes); | |
754 | ||
755 | /* Configure required descriptor fields for transmission */ | |
756 | hw_ops->dev_xmit(channel); | |
757 | ||
758 | if (netif_msg_pktdata(pdata)) | |
759 | xlgmac_print_pkt(netdev, skb, true); | |
760 | ||
761 | /* Stop the queue in advance if there may not be enough descriptors */ | |
762 | xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR); | |
763 | ||
764 | return NETDEV_TX_OK; | |
765 | } | |
766 | ||
767 | static void xlgmac_get_stats64(struct net_device *netdev, | |
768 | struct rtnl_link_stats64 *s) | |
769 | { | |
770 | struct xlgmac_pdata *pdata = netdev_priv(netdev); | |
771 | struct xlgmac_stats *pstats = &pdata->stats; | |
772 | ||
773 | pdata->hw_ops.read_mmc_stats(pdata); | |
774 | ||
775 | s->rx_packets = pstats->rxframecount_gb; | |
776 | s->rx_bytes = pstats->rxoctetcount_gb; | |
777 | s->rx_errors = pstats->rxframecount_gb - | |
778 | pstats->rxbroadcastframes_g - | |
779 | pstats->rxmulticastframes_g - | |
780 | pstats->rxunicastframes_g; | |
781 | s->multicast = pstats->rxmulticastframes_g; | |
782 | s->rx_length_errors = pstats->rxlengtherror; | |
783 | s->rx_crc_errors = pstats->rxcrcerror; | |
784 | s->rx_fifo_errors = pstats->rxfifooverflow; | |
785 | ||
786 | s->tx_packets = pstats->txframecount_gb; | |
787 | s->tx_bytes = pstats->txoctetcount_gb; | |
788 | s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; | |
789 | s->tx_dropped = netdev->stats.tx_dropped; | |
790 | } | |
791 | ||
792 | static int xlgmac_set_mac_address(struct net_device *netdev, void *addr) | |
793 | { | |
794 | struct xlgmac_pdata *pdata = netdev_priv(netdev); | |
795 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; | |
796 | struct sockaddr *saddr = addr; | |
797 | ||
798 | if (!is_valid_ether_addr(saddr->sa_data)) | |
799 | return -EADDRNOTAVAIL; | |
800 | ||
a05e4c0a | 801 | eth_hw_addr_set(netdev, saddr->sa_data); |
65e0ace2 JD |
802 | |
803 | hw_ops->set_mac_address(pdata, netdev->dev_addr); | |
804 | ||
805 | return 0; | |
806 | } | |
807 | ||
808 | static int xlgmac_ioctl(struct net_device *netdev, | |
809 | struct ifreq *ifreq, int cmd) | |
810 | { | |
811 | if (!netif_running(netdev)) | |
812 | return -ENODEV; | |
813 | ||
814 | return 0; | |
815 | } | |
816 | ||
817 | static int xlgmac_change_mtu(struct net_device *netdev, int mtu) | |
818 | { | |
819 | struct xlgmac_pdata *pdata = netdev_priv(netdev); | |
820 | int ret; | |
821 | ||
822 | ret = xlgmac_calc_rx_buf_size(netdev, mtu); | |
823 | if (ret < 0) | |
824 | return ret; | |
825 | ||
826 | pdata->rx_buf_size = ret; | |
827 | netdev->mtu = mtu; | |
828 | ||
829 | xlgmac_restart_dev(pdata); | |
830 | ||
831 | return 0; | |
832 | } | |
833 | ||
834 | static int xlgmac_vlan_rx_add_vid(struct net_device *netdev, | |
835 | __be16 proto, | |
836 | u16 vid) | |
837 | { | |
838 | struct xlgmac_pdata *pdata = netdev_priv(netdev); | |
839 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; | |
840 | ||
841 | set_bit(vid, pdata->active_vlans); | |
842 | hw_ops->update_vlan_hash_table(pdata); | |
843 | ||
844 | return 0; | |
845 | } | |
846 | ||
847 | static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev, | |
848 | __be16 proto, | |
849 | u16 vid) | |
850 | { | |
851 | struct xlgmac_pdata *pdata = netdev_priv(netdev); | |
852 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; | |
853 | ||
854 | clear_bit(vid, pdata->active_vlans); | |
855 | hw_ops->update_vlan_hash_table(pdata); | |
856 | ||
857 | return 0; | |
858 | } | |
859 | ||
860 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
861 | static void xlgmac_poll_controller(struct net_device *netdev) | |
862 | { | |
863 | struct xlgmac_pdata *pdata = netdev_priv(netdev); | |
864 | struct xlgmac_channel *channel; | |
865 | unsigned int i; | |
866 | ||
867 | if (pdata->per_channel_irq) { | |
868 | channel = pdata->channel_head; | |
869 | for (i = 0; i < pdata->channel_count; i++, channel++) | |
870 | xlgmac_dma_isr(channel->dma_irq, channel); | |
871 | } else { | |
872 | disable_irq(pdata->dev_irq); | |
873 | xlgmac_isr(pdata->dev_irq, pdata); | |
874 | enable_irq(pdata->dev_irq); | |
875 | } | |
876 | } | |
877 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | |
878 | ||
879 | static int xlgmac_set_features(struct net_device *netdev, | |
880 | netdev_features_t features) | |
881 | { | |
882 | netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; | |
883 | struct xlgmac_pdata *pdata = netdev_priv(netdev); | |
884 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; | |
885 | int ret = 0; | |
886 | ||
887 | rxhash = pdata->netdev_features & NETIF_F_RXHASH; | |
888 | rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; | |
889 | rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; | |
890 | rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; | |
891 | ||
892 | if ((features & NETIF_F_RXHASH) && !rxhash) | |
893 | ret = hw_ops->enable_rss(pdata); | |
894 | else if (!(features & NETIF_F_RXHASH) && rxhash) | |
895 | ret = hw_ops->disable_rss(pdata); | |
896 | if (ret) | |
897 | return ret; | |
898 | ||
899 | if ((features & NETIF_F_RXCSUM) && !rxcsum) | |
900 | hw_ops->enable_rx_csum(pdata); | |
901 | else if (!(features & NETIF_F_RXCSUM) && rxcsum) | |
902 | hw_ops->disable_rx_csum(pdata); | |
903 | ||
904 | if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) | |
905 | hw_ops->enable_rx_vlan_stripping(pdata); | |
906 | else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan) | |
907 | hw_ops->disable_rx_vlan_stripping(pdata); | |
908 | ||
909 | if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter) | |
910 | hw_ops->enable_rx_vlan_filtering(pdata); | |
911 | else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) | |
912 | hw_ops->disable_rx_vlan_filtering(pdata); | |
913 | ||
914 | pdata->netdev_features = features; | |
915 | ||
916 | return 0; | |
917 | } | |
918 | ||
919 | static void xlgmac_set_rx_mode(struct net_device *netdev) | |
920 | { | |
921 | struct xlgmac_pdata *pdata = netdev_priv(netdev); | |
922 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; | |
923 | ||
924 | hw_ops->config_rx_mode(pdata); | |
925 | } | |
926 | ||
927 | static const struct net_device_ops xlgmac_netdev_ops = { | |
928 | .ndo_open = xlgmac_open, | |
929 | .ndo_stop = xlgmac_close, | |
930 | .ndo_start_xmit = xlgmac_xmit, | |
931 | .ndo_tx_timeout = xlgmac_tx_timeout, | |
932 | .ndo_get_stats64 = xlgmac_get_stats64, | |
933 | .ndo_change_mtu = xlgmac_change_mtu, | |
934 | .ndo_set_mac_address = xlgmac_set_mac_address, | |
935 | .ndo_validate_addr = eth_validate_addr, | |
a7605370 | 936 | .ndo_eth_ioctl = xlgmac_ioctl, |
65e0ace2 JD |
937 | .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid, |
938 | .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid, | |
939 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
940 | .ndo_poll_controller = xlgmac_poll_controller, | |
941 | #endif | |
942 | .ndo_set_features = xlgmac_set_features, | |
943 | .ndo_set_rx_mode = xlgmac_set_rx_mode, | |
944 | }; | |
945 | ||
946 | const struct net_device_ops *xlgmac_get_netdev_ops(void) | |
947 | { | |
948 | return &xlgmac_netdev_ops; | |
949 | } | |
950 | ||
951 | static void xlgmac_rx_refresh(struct xlgmac_channel *channel) | |
952 | { | |
953 | struct xlgmac_pdata *pdata = channel->pdata; | |
954 | struct xlgmac_ring *ring = channel->rx_ring; | |
955 | struct xlgmac_desc_data *desc_data; | |
956 | struct xlgmac_desc_ops *desc_ops; | |
957 | struct xlgmac_hw_ops *hw_ops; | |
958 | ||
959 | desc_ops = &pdata->desc_ops; | |
960 | hw_ops = &pdata->hw_ops; | |
961 | ||
962 | while (ring->dirty != ring->cur) { | |
963 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); | |
964 | ||
965 | /* Reset desc_data values */ | |
966 | desc_ops->unmap_desc_data(pdata, desc_data); | |
967 | ||
968 | if (desc_ops->map_rx_buffer(pdata, ring, desc_data)) | |
969 | break; | |
970 | ||
971 | hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty); | |
972 | ||
973 | ring->dirty++; | |
974 | } | |
975 | ||
976 | /* Make sure everything is written before the register write */ | |
977 | wmb(); | |
978 | ||
979 | /* Update the Rx Tail Pointer Register with address of | |
980 | * the last cleaned entry | |
981 | */ | |
982 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1); | |
983 | writel(lower_32_bits(desc_data->dma_desc_addr), | |
984 | XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); | |
985 | } | |
986 | ||
987 | static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata, | |
988 | struct napi_struct *napi, | |
989 | struct xlgmac_desc_data *desc_data, | |
990 | unsigned int len) | |
991 | { | |
992 | unsigned int copy_len; | |
993 | struct sk_buff *skb; | |
994 | u8 *packet; | |
995 | ||
996 | skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len); | |
997 | if (!skb) | |
998 | return NULL; | |
999 | ||
1000 | /* Start with the header buffer which may contain just the header | |
1001 | * or the header plus data | |
1002 | */ | |
1003 | dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base, | |
1004 | desc_data->rx.hdr.dma_off, | |
1005 | desc_data->rx.hdr.dma_len, | |
1006 | DMA_FROM_DEVICE); | |
1007 | ||
1008 | packet = page_address(desc_data->rx.hdr.pa.pages) + | |
1009 | desc_data->rx.hdr.pa.pages_offset; | |
1010 | copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len; | |
1011 | copy_len = min(desc_data->rx.hdr.dma_len, copy_len); | |
1012 | skb_copy_to_linear_data(skb, packet, copy_len); | |
1013 | skb_put(skb, copy_len); | |
1014 | ||
1015 | len -= copy_len; | |
1016 | if (len) { | |
1017 | /* Add the remaining data as a frag */ | |
1018 | dma_sync_single_range_for_cpu(pdata->dev, | |
1019 | desc_data->rx.buf.dma_base, | |
1020 | desc_data->rx.buf.dma_off, | |
1021 | desc_data->rx.buf.dma_len, | |
1022 | DMA_FROM_DEVICE); | |
1023 | ||
1024 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | |
1025 | desc_data->rx.buf.pa.pages, | |
1026 | desc_data->rx.buf.pa.pages_offset, | |
1027 | len, desc_data->rx.buf.dma_len); | |
1028 | desc_data->rx.buf.pa.pages = NULL; | |
1029 | } | |
1030 | ||
1031 | return skb; | |
1032 | } | |
1033 | ||
1034 | static int xlgmac_tx_poll(struct xlgmac_channel *channel) | |
1035 | { | |
1036 | struct xlgmac_pdata *pdata = channel->pdata; | |
1037 | struct xlgmac_ring *ring = channel->tx_ring; | |
1038 | struct net_device *netdev = pdata->netdev; | |
1039 | unsigned int tx_packets = 0, tx_bytes = 0; | |
1040 | struct xlgmac_desc_data *desc_data; | |
1041 | struct xlgmac_dma_desc *dma_desc; | |
1042 | struct xlgmac_desc_ops *desc_ops; | |
1043 | struct xlgmac_hw_ops *hw_ops; | |
1044 | struct netdev_queue *txq; | |
1045 | int processed = 0; | |
1046 | unsigned int cur; | |
1047 | ||
1048 | desc_ops = &pdata->desc_ops; | |
1049 | hw_ops = &pdata->hw_ops; | |
1050 | ||
1051 | /* Nothing to do if there isn't a Tx ring for this channel */ | |
1052 | if (!ring) | |
1053 | return 0; | |
1054 | ||
1055 | cur = ring->cur; | |
1056 | ||
1057 | /* Be sure we get ring->cur before accessing descriptor data */ | |
1058 | smp_rmb(); | |
1059 | ||
1060 | txq = netdev_get_tx_queue(netdev, channel->queue_index); | |
1061 | ||
1062 | while ((processed < XLGMAC_TX_DESC_MAX_PROC) && | |
1063 | (ring->dirty != cur)) { | |
1064 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); | |
1065 | dma_desc = desc_data->dma_desc; | |
1066 | ||
1067 | if (!hw_ops->tx_complete(dma_desc)) | |
1068 | break; | |
1069 | ||
1070 | /* Make sure descriptor fields are read after reading | |
1071 | * the OWN bit | |
1072 | */ | |
1073 | dma_rmb(); | |
1074 | ||
1075 | if (netif_msg_tx_done(pdata)) | |
1076 | xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); | |
1077 | ||
1078 | if (hw_ops->is_last_desc(dma_desc)) { | |
1079 | tx_packets += desc_data->tx.packets; | |
1080 | tx_bytes += desc_data->tx.bytes; | |
1081 | } | |
1082 | ||
1083 | /* Free the SKB and reset the descriptor for re-use */ | |
1084 | desc_ops->unmap_desc_data(pdata, desc_data); | |
1085 | hw_ops->tx_desc_reset(desc_data); | |
1086 | ||
1087 | processed++; | |
1088 | ring->dirty++; | |
1089 | } | |
1090 | ||
1091 | if (!processed) | |
1092 | return 0; | |
1093 | ||
1094 | netdev_tx_completed_queue(txq, tx_packets, tx_bytes); | |
1095 | ||
1096 | if ((ring->tx.queue_stopped == 1) && | |
1097 | (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) { | |
1098 | ring->tx.queue_stopped = 0; | |
1099 | netif_tx_wake_queue(txq); | |
1100 | } | |
1101 | ||
1102 | XLGMAC_PR("processed=%d\n", processed); | |
1103 | ||
1104 | return processed; | |
1105 | } | |
1106 | ||
1107 | static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget) | |
1108 | { | |
1109 | struct xlgmac_pdata *pdata = channel->pdata; | |
1110 | struct xlgmac_ring *ring = channel->rx_ring; | |
1111 | struct net_device *netdev = pdata->netdev; | |
1112 | unsigned int len, dma_desc_len, max_len; | |
1113 | unsigned int context_next, context; | |
1114 | struct xlgmac_desc_data *desc_data; | |
1115 | struct xlgmac_pkt_info *pkt_info; | |
1116 | unsigned int incomplete, error; | |
1117 | struct xlgmac_hw_ops *hw_ops; | |
1118 | unsigned int received = 0; | |
1119 | struct napi_struct *napi; | |
1120 | struct sk_buff *skb; | |
1121 | int packet_count = 0; | |
1122 | ||
1123 | hw_ops = &pdata->hw_ops; | |
1124 | ||
1125 | /* Nothing to do if there isn't a Rx ring for this channel */ | |
1126 | if (!ring) | |
1127 | return 0; | |
1128 | ||
1129 | incomplete = 0; | |
1130 | context_next = 0; | |
1131 | ||
1132 | napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; | |
1133 | ||
1134 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); | |
1135 | pkt_info = &ring->pkt_info; | |
1136 | while (packet_count < budget) { | |
1137 | /* First time in loop see if we need to restore state */ | |
1138 | if (!received && desc_data->state_saved) { | |
1139 | skb = desc_data->state.skb; | |
1140 | error = desc_data->state.error; | |
1141 | len = desc_data->state.len; | |
1142 | } else { | |
1143 | memset(pkt_info, 0, sizeof(*pkt_info)); | |
1144 | skb = NULL; | |
1145 | error = 0; | |
1146 | len = 0; | |
1147 | } | |
1148 | ||
1149 | read_again: | |
1150 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); | |
1151 | ||
1152 | if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY) | |
1153 | xlgmac_rx_refresh(channel); | |
1154 | ||
1155 | if (hw_ops->dev_read(channel)) | |
1156 | break; | |
1157 | ||
1158 | received++; | |
1159 | ring->cur++; | |
1160 | ||
1161 | incomplete = XLGMAC_GET_REG_BITS( | |
1162 | pkt_info->attributes, | |
1163 | RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, | |
1164 | RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN); | |
1165 | context_next = XLGMAC_GET_REG_BITS( | |
1166 | pkt_info->attributes, | |
1167 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, | |
1168 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN); | |
1169 | context = XLGMAC_GET_REG_BITS( | |
1170 | pkt_info->attributes, | |
1171 | RX_PACKET_ATTRIBUTES_CONTEXT_POS, | |
1172 | RX_PACKET_ATTRIBUTES_CONTEXT_LEN); | |
1173 | ||
1174 | /* Earlier error, just drain the remaining data */ | |
1175 | if ((incomplete || context_next) && error) | |
1176 | goto read_again; | |
1177 | ||
1178 | if (error || pkt_info->errors) { | |
1179 | if (pkt_info->errors) | |
1180 | netif_err(pdata, rx_err, netdev, | |
1181 | "error in received packet\n"); | |
1182 | dev_kfree_skb(skb); | |
1183 | goto next_packet; | |
1184 | } | |
1185 | ||
1186 | if (!context) { | |
1187 | /* Length is cumulative, get this descriptor's length */ | |
1188 | dma_desc_len = desc_data->rx.len - len; | |
1189 | len += dma_desc_len; | |
1190 | ||
1191 | if (dma_desc_len && !skb) { | |
1192 | skb = xlgmac_create_skb(pdata, napi, desc_data, | |
1193 | dma_desc_len); | |
1194 | if (!skb) | |
1195 | error = 1; | |
1196 | } else if (dma_desc_len) { | |
1197 | dma_sync_single_range_for_cpu( | |
1198 | pdata->dev, | |
1199 | desc_data->rx.buf.dma_base, | |
1200 | desc_data->rx.buf.dma_off, | |
1201 | desc_data->rx.buf.dma_len, | |
1202 | DMA_FROM_DEVICE); | |
1203 | ||
1204 | skb_add_rx_frag( | |
1205 | skb, skb_shinfo(skb)->nr_frags, | |
1206 | desc_data->rx.buf.pa.pages, | |
1207 | desc_data->rx.buf.pa.pages_offset, | |
1208 | dma_desc_len, | |
1209 | desc_data->rx.buf.dma_len); | |
1210 | desc_data->rx.buf.pa.pages = NULL; | |
1211 | } | |
1212 | } | |
1213 | ||
1214 | if (incomplete || context_next) | |
1215 | goto read_again; | |
1216 | ||
1217 | if (!skb) | |
1218 | goto next_packet; | |
1219 | ||
1220 | /* Be sure we don't exceed the configured MTU */ | |
1221 | max_len = netdev->mtu + ETH_HLEN; | |
1222 | if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && | |
1223 | (skb->protocol == htons(ETH_P_8021Q))) | |
1224 | max_len += VLAN_HLEN; | |
1225 | ||
1226 | if (skb->len > max_len) { | |
1227 | netif_err(pdata, rx_err, netdev, | |
1228 | "packet length exceeds configured MTU\n"); | |
1229 | dev_kfree_skb(skb); | |
1230 | goto next_packet; | |
1231 | } | |
1232 | ||
1233 | if (netif_msg_pktdata(pdata)) | |
1234 | xlgmac_print_pkt(netdev, skb, false); | |
1235 | ||
1236 | skb_checksum_none_assert(skb); | |
1237 | if (XLGMAC_GET_REG_BITS(pkt_info->attributes, | |
1238 | RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, | |
1239 | RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN)) | |
1240 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1241 | ||
1242 | if (XLGMAC_GET_REG_BITS(pkt_info->attributes, | |
1243 | RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, | |
d4d49bc1 | 1244 | RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) { |
65e0ace2 JD |
1245 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
1246 | pkt_info->vlan_ctag); | |
d4d49bc1 JD |
1247 | pdata->stats.rx_vlan_packets++; |
1248 | } | |
65e0ace2 JD |
1249 | |
1250 | if (XLGMAC_GET_REG_BITS(pkt_info->attributes, | |
1251 | RX_PACKET_ATTRIBUTES_RSS_HASH_POS, | |
1252 | RX_PACKET_ATTRIBUTES_RSS_HASH_LEN)) | |
1253 | skb_set_hash(skb, pkt_info->rss_hash, | |
1254 | pkt_info->rss_hash_type); | |
1255 | ||
1256 | skb->dev = netdev; | |
1257 | skb->protocol = eth_type_trans(skb, netdev); | |
1258 | skb_record_rx_queue(skb, channel->queue_index); | |
1259 | ||
1260 | napi_gro_receive(napi, skb); | |
1261 | ||
1262 | next_packet: | |
1263 | packet_count++; | |
1264 | } | |
1265 | ||
1266 | /* Check if we need to save state before leaving */ | |
1267 | if (received && (incomplete || context_next)) { | |
1268 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); | |
1269 | desc_data->state_saved = 1; | |
1270 | desc_data->state.skb = skb; | |
1271 | desc_data->state.len = len; | |
1272 | desc_data->state.error = error; | |
1273 | } | |
1274 | ||
1275 | XLGMAC_PR("packet_count = %d\n", packet_count); | |
1276 | ||
1277 | return packet_count; | |
1278 | } | |
1279 | ||
1280 | static int xlgmac_one_poll(struct napi_struct *napi, int budget) | |
1281 | { | |
1282 | struct xlgmac_channel *channel = container_of(napi, | |
1283 | struct xlgmac_channel, | |
1284 | napi); | |
1285 | int processed = 0; | |
1286 | ||
1287 | XLGMAC_PR("budget=%d\n", budget); | |
1288 | ||
1289 | /* Cleanup Tx ring first */ | |
1290 | xlgmac_tx_poll(channel); | |
1291 | ||
1292 | /* Process Rx ring next */ | |
1293 | processed = xlgmac_rx_poll(channel, budget); | |
1294 | ||
1295 | /* If we processed everything, we are done */ | |
1296 | if (processed < budget) { | |
1297 | /* Turn off polling */ | |
1298 | napi_complete_done(napi, processed); | |
1299 | ||
1300 | /* Enable Tx and Rx interrupts */ | |
1301 | enable_irq(channel->dma_irq); | |
1302 | } | |
1303 | ||
1304 | XLGMAC_PR("received = %d\n", processed); | |
1305 | ||
1306 | return processed; | |
1307 | } | |
1308 | ||
1309 | static int xlgmac_all_poll(struct napi_struct *napi, int budget) | |
1310 | { | |
1311 | struct xlgmac_pdata *pdata = container_of(napi, | |
1312 | struct xlgmac_pdata, | |
1313 | napi); | |
1314 | struct xlgmac_channel *channel; | |
1315 | int processed, last_processed; | |
1316 | int ring_budget; | |
1317 | unsigned int i; | |
1318 | ||
1319 | XLGMAC_PR("budget=%d\n", budget); | |
1320 | ||
1321 | processed = 0; | |
1322 | ring_budget = budget / pdata->rx_ring_count; | |
1323 | do { | |
1324 | last_processed = processed; | |
1325 | ||
1326 | channel = pdata->channel_head; | |
1327 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1328 | /* Cleanup Tx ring first */ | |
1329 | xlgmac_tx_poll(channel); | |
1330 | ||
1331 | /* Process Rx ring next */ | |
1332 | if (ring_budget > (budget - processed)) | |
1333 | ring_budget = budget - processed; | |
1334 | processed += xlgmac_rx_poll(channel, ring_budget); | |
1335 | } | |
1336 | } while ((processed < budget) && (processed != last_processed)); | |
1337 | ||
1338 | /* If we processed everything, we are done */ | |
1339 | if (processed < budget) { | |
1340 | /* Turn off polling */ | |
1341 | napi_complete_done(napi, processed); | |
1342 | ||
1343 | /* Enable Tx and Rx interrupts */ | |
1344 | xlgmac_enable_rx_tx_ints(pdata); | |
1345 | } | |
1346 | ||
1347 | XLGMAC_PR("received = %d\n", processed); | |
1348 | ||
1349 | return processed; | |
1350 | } |