Commit | Line | Data |
---|---|---|
8199d3a7 CL |
1 | /***************************************************************************** |
2 | * * | |
3 | * File: sge.c * | |
559fb51b SB |
4 | * $Revision: 1.26 $ * |
5 | * $Date: 2005/06/21 18:29:48 $ * | |
8199d3a7 CL |
6 | * Description: * |
7 | * DMA engine. * | |
8 | * part of the Chelsio 10Gb Ethernet Driver. * | |
9 | * * | |
10 | * This program is free software; you can redistribute it and/or modify * | |
11 | * it under the terms of the GNU General Public License, version 2, as * | |
12 | * published by the Free Software Foundation. * | |
13 | * * | |
14 | * You should have received a copy of the GNU General Public License along * | |
15 | * with this program; if not, write to the Free Software Foundation, Inc., * | |
16 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | |
17 | * * | |
18 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | |
19 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | |
20 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | |
21 | * * | |
22 | * http://www.chelsio.com * | |
23 | * * | |
24 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | |
25 | * All rights reserved. * | |
26 | * * | |
27 | * Maintainers: maintainers@chelsio.com * | |
28 | * * | |
29 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | |
30 | * Tina Yang <tainay@chelsio.com> * | |
31 | * Felix Marti <felix@chelsio.com> * | |
32 | * Scott Bardone <sbardone@chelsio.com> * | |
33 | * Kurt Ottaway <kottaway@chelsio.com> * | |
34 | * Frank DiMambro <frank@chelsio.com> * | |
35 | * * | |
36 | * History: * | |
37 | * * | |
38 | ****************************************************************************/ | |
39 | ||
40 | #include "common.h" | |
41 | ||
8199d3a7 CL |
42 | #include <linux/types.h> |
43 | #include <linux/errno.h> | |
44 | #include <linux/pci.h> | |
f1d3d38a | 45 | #include <linux/ktime.h> |
8199d3a7 CL |
46 | #include <linux/netdevice.h> |
47 | #include <linux/etherdevice.h> | |
48 | #include <linux/if_vlan.h> | |
49 | #include <linux/skbuff.h> | |
50 | #include <linux/init.h> | |
51 | #include <linux/mm.h> | |
f1d3d38a | 52 | #include <linux/tcp.h> |
8199d3a7 CL |
53 | #include <linux/ip.h> |
54 | #include <linux/in.h> | |
55 | #include <linux/if_arp.h> | |
5a0e3ad6 | 56 | #include <linux/slab.h> |
70c71606 | 57 | #include <linux/prefetch.h> |
8199d3a7 CL |
58 | |
59 | #include "cpl5_cmd.h" | |
60 | #include "sge.h" | |
61 | #include "regs.h" | |
62 | #include "espi.h" | |
63 | ||
f1d3d38a SH |
64 | /* This belongs in if_ether.h */ |
65 | #define ETH_P_CPL5 0xf | |
8199d3a7 CL |
66 | |
67 | #define SGE_CMDQ_N 2 | |
68 | #define SGE_FREELQ_N 2 | |
559fb51b | 69 | #define SGE_CMDQ0_E_N 1024 |
8199d3a7 CL |
70 | #define SGE_CMDQ1_E_N 128 |
71 | #define SGE_FREEL_SIZE 4096 | |
72 | #define SGE_JUMBO_FREEL_SIZE 512 | |
73 | #define SGE_FREEL_REFILL_THRESH 16 | |
74 | #define SGE_RESPQ_E_N 1024 | |
559fb51b | 75 | #define SGE_INTRTIMER_NRES 1000 |
8199d3a7 | 76 | #define SGE_RX_SM_BUF_SIZE 1536 |
f1d3d38a | 77 | #define SGE_TX_DESC_MAX_PLEN 16384 |
8199d3a7 | 78 | |
559fb51b SB |
79 | #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) |
80 | ||
81 | /* | |
82 | * Period of the TX buffer reclaim timer. This timer does not need to run | |
83 | * frequently as TX buffers are usually reclaimed by new TX packets. | |
84 | */ | |
85 | #define TX_RECLAIM_PERIOD (HZ / 4) | |
8199d3a7 | 86 | |
559fb51b SB |
87 | #define M_CMD_LEN 0x7fffffff |
88 | #define V_CMD_LEN(v) (v) | |
89 | #define G_CMD_LEN(v) ((v) & M_CMD_LEN) | |
90 | #define V_CMD_GEN1(v) ((v) << 31) | |
91 | #define V_CMD_GEN2(v) (v) | |
92 | #define F_CMD_DATAVALID (1 << 1) | |
93 | #define F_CMD_SOP (1 << 2) | |
94 | #define V_CMD_EOP(v) ((v) << 3) | |
95 | ||
8199d3a7 | 96 | /* |
559fb51b | 97 | * Command queue, receive buffer list, and response queue descriptors. |
8199d3a7 CL |
98 | */ |
99 | #if defined(__BIG_ENDIAN_BITFIELD) | |
100 | struct cmdQ_e { | |
559fb51b SB |
101 | u32 addr_lo; |
102 | u32 len_gen; | |
103 | u32 flags; | |
104 | u32 addr_hi; | |
8199d3a7 CL |
105 | }; |
106 | ||
107 | struct freelQ_e { | |
559fb51b SB |
108 | u32 addr_lo; |
109 | u32 len_gen; | |
110 | u32 gen2; | |
111 | u32 addr_hi; | |
8199d3a7 CL |
112 | }; |
113 | ||
114 | struct respQ_e { | |
115 | u32 Qsleeping : 4; | |
116 | u32 Cmdq1CreditReturn : 5; | |
117 | u32 Cmdq1DmaComplete : 5; | |
118 | u32 Cmdq0CreditReturn : 5; | |
119 | u32 Cmdq0DmaComplete : 5; | |
120 | u32 FreelistQid : 2; | |
121 | u32 CreditValid : 1; | |
122 | u32 DataValid : 1; | |
123 | u32 Offload : 1; | |
124 | u32 Eop : 1; | |
125 | u32 Sop : 1; | |
126 | u32 GenerationBit : 1; | |
127 | u32 BufferLength; | |
128 | }; | |
8199d3a7 CL |
129 | #elif defined(__LITTLE_ENDIAN_BITFIELD) |
130 | struct cmdQ_e { | |
559fb51b SB |
131 | u32 len_gen; |
132 | u32 addr_lo; | |
133 | u32 addr_hi; | |
134 | u32 flags; | |
8199d3a7 CL |
135 | }; |
136 | ||
137 | struct freelQ_e { | |
559fb51b SB |
138 | u32 len_gen; |
139 | u32 addr_lo; | |
140 | u32 addr_hi; | |
141 | u32 gen2; | |
8199d3a7 CL |
142 | }; |
143 | ||
144 | struct respQ_e { | |
145 | u32 BufferLength; | |
146 | u32 GenerationBit : 1; | |
147 | u32 Sop : 1; | |
148 | u32 Eop : 1; | |
149 | u32 Offload : 1; | |
150 | u32 DataValid : 1; | |
151 | u32 CreditValid : 1; | |
152 | u32 FreelistQid : 2; | |
153 | u32 Cmdq0DmaComplete : 5; | |
154 | u32 Cmdq0CreditReturn : 5; | |
155 | u32 Cmdq1DmaComplete : 5; | |
156 | u32 Cmdq1CreditReturn : 5; | |
157 | u32 Qsleeping : 4; | |
158 | } ; | |
159 | #endif | |
160 | ||
161 | /* | |
162 | * SW Context Command and Freelist Queue Descriptors | |
163 | */ | |
164 | struct cmdQ_ce { | |
165 | struct sk_buff *skb; | |
094f92a6 FT |
166 | DEFINE_DMA_UNMAP_ADDR(dma_addr); |
167 | DEFINE_DMA_UNMAP_LEN(dma_len); | |
8199d3a7 CL |
168 | }; |
169 | ||
170 | struct freelQ_ce { | |
171 | struct sk_buff *skb; | |
094f92a6 FT |
172 | DEFINE_DMA_UNMAP_ADDR(dma_addr); |
173 | DEFINE_DMA_UNMAP_LEN(dma_len); | |
8199d3a7 CL |
174 | }; |
175 | ||
176 | /* | |
559fb51b | 177 | * SW command, freelist and response rings |
8199d3a7 CL |
178 | */ |
179 | struct cmdQ { | |
559fb51b SB |
180 | unsigned long status; /* HW DMA fetch status */ |
181 | unsigned int in_use; /* # of in-use command descriptors */ | |
182 | unsigned int size; /* # of descriptors */ | |
f1d3d38a SH |
183 | unsigned int processed; /* total # of descs HW has processed */ |
184 | unsigned int cleaned; /* total # of descs SW has reclaimed */ | |
185 | unsigned int stop_thres; /* SW TX queue suspend threshold */ | |
559fb51b SB |
186 | u16 pidx; /* producer index (SW) */ |
187 | u16 cidx; /* consumer index (HW) */ | |
188 | u8 genbit; /* current generation (=valid) bit */ | |
f1d3d38a | 189 | u8 sop; /* is next entry start of packet? */ |
559fb51b SB |
190 | struct cmdQ_e *entries; /* HW command descriptor Q */ |
191 | struct cmdQ_ce *centries; /* SW command context descriptor Q */ | |
559fb51b | 192 | dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ |
356bd146 | 193 | spinlock_t lock; /* Lock to protect cmdQ enqueuing */ |
8199d3a7 CL |
194 | }; |
195 | ||
196 | struct freelQ { | |
559fb51b SB |
197 | unsigned int credits; /* # of available RX buffers */ |
198 | unsigned int size; /* free list capacity */ | |
199 | u16 pidx; /* producer index (SW) */ | |
200 | u16 cidx; /* consumer index (HW) */ | |
8199d3a7 | 201 | u16 rx_buffer_size; /* Buffer size on this free list */ |
f1d3d38a SH |
202 | u16 dma_offset; /* DMA offset to align IP headers */ |
203 | u16 recycleq_idx; /* skb recycle q to use */ | |
559fb51b SB |
204 | u8 genbit; /* current generation (=valid) bit */ |
205 | struct freelQ_e *entries; /* HW freelist descriptor Q */ | |
206 | struct freelQ_ce *centries; /* SW freelist context descriptor Q */ | |
207 | dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ | |
8199d3a7 CL |
208 | }; |
209 | ||
210 | struct respQ { | |
559fb51b SB |
211 | unsigned int credits; /* credits to be returned to SGE */ |
212 | unsigned int size; /* # of response Q descriptors */ | |
213 | u16 cidx; /* consumer index (SW) */ | |
214 | u8 genbit; /* current generation(=valid) bit */ | |
8199d3a7 | 215 | struct respQ_e *entries; /* HW response descriptor Q */ |
559fb51b SB |
216 | dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ |
217 | }; | |
218 | ||
219 | /* Bit flags for cmdQ.status */ | |
220 | enum { | |
221 | CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ | |
222 | CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ | |
8199d3a7 CL |
223 | }; |
224 | ||
f1d3d38a SH |
225 | /* T204 TX SW scheduler */ |
226 | ||
227 | /* Per T204 TX port */ | |
228 | struct sched_port { | |
229 | unsigned int avail; /* available bits - quota */ | |
230 | unsigned int drain_bits_per_1024ns; /* drain rate */ | |
231 | unsigned int speed; /* drain rate, mbps */ | |
232 | unsigned int mtu; /* mtu size */ | |
233 | struct sk_buff_head skbq; /* pending skbs */ | |
234 | }; | |
235 | ||
236 | /* Per T204 device */ | |
237 | struct sched { | |
238 | ktime_t last_updated; /* last time quotas were computed */ | |
356bd146 FR |
239 | unsigned int max_avail; /* max bits to be sent to any port */ |
240 | unsigned int port; /* port index (round robin ports) */ | |
241 | unsigned int num; /* num skbs in per port queues */ | |
f1d3d38a SH |
242 | struct sched_port p[MAX_NPORTS]; |
243 | struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ | |
244 | }; | |
245 | static void restart_sched(unsigned long); | |
246 | ||
247 | ||
8199d3a7 CL |
248 | /* |
249 | * Main SGE data structure | |
250 | * | |
251 | * Interrupts are handled by a single CPU and it is likely that on a MP system | |
252 | * the application is migrated to another CPU. In that scenario, we try to | |
3ad2f3fb | 253 | * separate the RX(in irq context) and TX state in order to decrease memory |
8199d3a7 CL |
254 | * contention. |
255 | */ | |
256 | struct sge { | |
356bd146 | 257 | struct adapter *adapter; /* adapter backpointer */ |
559fb51b | 258 | struct net_device *netdev; /* netdevice backpointer */ |
356bd146 FR |
259 | struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ |
260 | struct respQ respQ; /* response Q */ | |
559fb51b | 261 | unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ |
8199d3a7 CL |
262 | unsigned int rx_pkt_pad; /* RX padding for L2 packets */ |
263 | unsigned int jumbo_fl; /* jumbo freelist Q index */ | |
559fb51b | 264 | unsigned int intrtimer_nres; /* no-resource interrupt timer */ |
f1d3d38a | 265 | unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ |
559fb51b SB |
266 | struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ |
267 | struct timer_list espibug_timer; | |
f1d3d38a SH |
268 | unsigned long espibug_timeout; |
269 | struct sk_buff *espibug_skb[MAX_NPORTS]; | |
559fb51b SB |
270 | u32 sge_control; /* shadow value of sge control reg */ |
271 | struct sge_intr_counts stats; | |
47d74275 | 272 | struct sge_port_stats __percpu *port_stats[MAX_NPORTS]; |
f1d3d38a | 273 | struct sched *tx_sched; |
559fb51b | 274 | struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; |
8199d3a7 CL |
275 | }; |
276 | ||
215faf9c JP |
277 | static const u8 ch_mac_addr[ETH_ALEN] = { |
278 | 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 | |
279 | }; | |
280 | ||
f1d3d38a SH |
281 | /* |
282 | * stop tasklet and free all pending skb's | |
283 | */ | |
284 | static void tx_sched_stop(struct sge *sge) | |
285 | { | |
286 | struct sched *s = sge->tx_sched; | |
287 | int i; | |
288 | ||
289 | tasklet_kill(&s->sched_tsk); | |
290 | ||
291 | for (i = 0; i < MAX_NPORTS; i++) | |
292 | __skb_queue_purge(&s->p[s->port].skbq); | |
293 | } | |
294 | ||
295 | /* | |
296 | * t1_sched_update_parms() is called when the MTU or link speed changes. It | |
297 | * re-computes scheduler parameters to scope with the change. | |
298 | */ | |
299 | unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, | |
300 | unsigned int mtu, unsigned int speed) | |
301 | { | |
302 | struct sched *s = sge->tx_sched; | |
303 | struct sched_port *p = &s->p[port]; | |
304 | unsigned int max_avail_segs; | |
305 | ||
306 | pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed); | |
307 | if (speed) | |
308 | p->speed = speed; | |
309 | if (mtu) | |
310 | p->mtu = mtu; | |
311 | ||
312 | if (speed || mtu) { | |
313 | unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40); | |
314 | do_div(drain, (p->mtu + 50) * 1000); | |
315 | p->drain_bits_per_1024ns = (unsigned int) drain; | |
316 | ||
317 | if (p->speed < 1000) | |
318 | p->drain_bits_per_1024ns = | |
319 | 90 * p->drain_bits_per_1024ns / 100; | |
320 | } | |
321 | ||
322 | if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { | |
323 | p->drain_bits_per_1024ns -= 16; | |
324 | s->max_avail = max(4096U, p->mtu + 16 + 14 + 4); | |
325 | max_avail_segs = max(1U, 4096 / (p->mtu - 40)); | |
326 | } else { | |
327 | s->max_avail = 16384; | |
328 | max_avail_segs = max(1U, 9000 / (p->mtu - 40)); | |
329 | } | |
330 | ||
331 | pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u " | |
332 | "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu, | |
333 | p->speed, s->max_avail, max_avail_segs, | |
334 | p->drain_bits_per_1024ns); | |
335 | ||
336 | return max_avail_segs * (p->mtu - 40); | |
337 | } | |
338 | ||
68d579fb AB |
339 | #if 0 |
340 | ||
f1d3d38a SH |
341 | /* |
342 | * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of | |
343 | * data that can be pushed per port. | |
344 | */ | |
345 | void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) | |
346 | { | |
347 | struct sched *s = sge->tx_sched; | |
348 | unsigned int i; | |
349 | ||
350 | s->max_avail = val; | |
351 | for (i = 0; i < MAX_NPORTS; i++) | |
352 | t1_sched_update_parms(sge, i, 0, 0); | |
353 | } | |
354 | ||
355 | /* | |
356 | * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port | |
357 | * is draining. | |
358 | */ | |
359 | void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port, | |
360 | unsigned int val) | |
361 | { | |
362 | struct sched *s = sge->tx_sched; | |
363 | struct sched_port *p = &s->p[port]; | |
364 | p->drain_bits_per_1024ns = val * 1024 / 1000; | |
365 | t1_sched_update_parms(sge, port, 0, 0); | |
366 | } | |
367 | ||
68d579fb AB |
368 | #endif /* 0 */ |
369 | ||
f1d3d38a SH |
370 | |
371 | /* | |
372 | * get_clock() implements a ns clock (see ktime_get) | |
373 | */ | |
374 | static inline ktime_t get_clock(void) | |
375 | { | |
376 | struct timespec ts; | |
377 | ||
378 | ktime_get_ts(&ts); | |
379 | return timespec_to_ktime(ts); | |
380 | } | |
381 | ||
382 | /* | |
383 | * tx_sched_init() allocates resources and does basic initialization. | |
384 | */ | |
385 | static int tx_sched_init(struct sge *sge) | |
386 | { | |
387 | struct sched *s; | |
388 | int i; | |
389 | ||
390 | s = kzalloc(sizeof (struct sched), GFP_KERNEL); | |
391 | if (!s) | |
392 | return -ENOMEM; | |
393 | ||
394 | pr_debug("tx_sched_init\n"); | |
395 | tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); | |
396 | sge->tx_sched = s; | |
397 | ||
398 | for (i = 0; i < MAX_NPORTS; i++) { | |
399 | skb_queue_head_init(&s->p[i].skbq); | |
400 | t1_sched_update_parms(sge, i, 1500, 1000); | |
401 | } | |
402 | ||
403 | return 0; | |
404 | } | |
405 | ||
406 | /* | |
407 | * sched_update_avail() computes the delta since the last time it was called | |
408 | * and updates the per port quota (number of bits that can be sent to the any | |
409 | * port). | |
410 | */ | |
411 | static inline int sched_update_avail(struct sge *sge) | |
412 | { | |
413 | struct sched *s = sge->tx_sched; | |
414 | ktime_t now = get_clock(); | |
415 | unsigned int i; | |
416 | long long delta_time_ns; | |
417 | ||
418 | delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated)); | |
419 | ||
420 | pr_debug("sched_update_avail delta=%lld\n", delta_time_ns); | |
421 | if (delta_time_ns < 15000) | |
422 | return 0; | |
423 | ||
424 | for (i = 0; i < MAX_NPORTS; i++) { | |
425 | struct sched_port *p = &s->p[i]; | |
426 | unsigned int delta_avail; | |
427 | ||
428 | delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13; | |
429 | p->avail = min(p->avail + delta_avail, s->max_avail); | |
430 | } | |
431 | ||
432 | s->last_updated = now; | |
433 | ||
434 | return 1; | |
435 | } | |
436 | ||
437 | /* | |
438 | * sched_skb() is called from two different places. In the tx path, any | |
439 | * packet generating load on an output port will call sched_skb() | |
440 | * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq | |
441 | * context (skb == NULL). | |
442 | * The scheduler only returns a skb (which will then be sent) if the | |
443 | * length of the skb is <= the current quota of the output port. | |
444 | */ | |
445 | static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, | |
446 | unsigned int credits) | |
447 | { | |
448 | struct sched *s = sge->tx_sched; | |
449 | struct sk_buff_head *skbq; | |
450 | unsigned int i, len, update = 1; | |
451 | ||
452 | pr_debug("sched_skb %p\n", skb); | |
453 | if (!skb) { | |
454 | if (!s->num) | |
455 | return NULL; | |
456 | } else { | |
457 | skbq = &s->p[skb->dev->if_port].skbq; | |
458 | __skb_queue_tail(skbq, skb); | |
459 | s->num++; | |
460 | skb = NULL; | |
461 | } | |
462 | ||
463 | if (credits < MAX_SKB_FRAGS + 1) | |
464 | goto out; | |
465 | ||
356bd146 | 466 | again: |
f1d3d38a | 467 | for (i = 0; i < MAX_NPORTS; i++) { |
18d777a5 | 468 | s->port = (s->port + 1) & (MAX_NPORTS - 1); |
f1d3d38a SH |
469 | skbq = &s->p[s->port].skbq; |
470 | ||
471 | skb = skb_peek(skbq); | |
472 | ||
473 | if (!skb) | |
474 | continue; | |
475 | ||
476 | len = skb->len; | |
477 | if (len <= s->p[s->port].avail) { | |
478 | s->p[s->port].avail -= len; | |
479 | s->num--; | |
480 | __skb_unlink(skb, skbq); | |
481 | goto out; | |
482 | } | |
483 | skb = NULL; | |
484 | } | |
485 | ||
486 | if (update-- && sched_update_avail(sge)) | |
487 | goto again; | |
488 | ||
356bd146 FR |
489 | out: |
490 | /* If there are more pending skbs, we use the hardware to schedule us | |
f1d3d38a SH |
491 | * again. |
492 | */ | |
493 | if (s->num && !skb) { | |
494 | struct cmdQ *q = &sge->cmdQ[0]; | |
495 | clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | |
496 | if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { | |
497 | set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | |
498 | writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); | |
499 | } | |
500 | } | |
501 | pr_debug("sched_skb ret %p\n", skb); | |
502 | ||
503 | return skb; | |
504 | } | |
505 | ||
8199d3a7 CL |
506 | /* |
507 | * PIO to indicate that memory mapped Q contains valid descriptor(s). | |
508 | */ | |
559fb51b | 509 | static inline void doorbell_pio(struct adapter *adapter, u32 val) |
8199d3a7 CL |
510 | { |
511 | wmb(); | |
559fb51b | 512 | writel(val, adapter->regs + A_SG_DOORBELL); |
8199d3a7 CL |
513 | } |
514 | ||
515 | /* | |
516 | * Frees all RX buffers on the freelist Q. The caller must make sure that | |
517 | * the SGE is turned off before calling this function. | |
518 | */ | |
559fb51b | 519 | static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) |
8199d3a7 | 520 | { |
559fb51b | 521 | unsigned int cidx = q->cidx; |
8199d3a7 | 522 | |
559fb51b SB |
523 | while (q->credits--) { |
524 | struct freelQ_ce *ce = &q->centries[cidx]; | |
8199d3a7 | 525 | |
094f92a6 FT |
526 | pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), |
527 | dma_unmap_len(ce, dma_len), | |
8199d3a7 CL |
528 | PCI_DMA_FROMDEVICE); |
529 | dev_kfree_skb(ce->skb); | |
530 | ce->skb = NULL; | |
559fb51b | 531 | if (++cidx == q->size) |
8199d3a7 CL |
532 | cidx = 0; |
533 | } | |
534 | } | |
535 | ||
536 | /* | |
537 | * Free RX free list and response queue resources. | |
538 | */ | |
539 | static void free_rx_resources(struct sge *sge) | |
540 | { | |
541 | struct pci_dev *pdev = sge->adapter->pdev; | |
542 | unsigned int size, i; | |
543 | ||
544 | if (sge->respQ.entries) { | |
559fb51b | 545 | size = sizeof(struct respQ_e) * sge->respQ.size; |
8199d3a7 CL |
546 | pci_free_consistent(pdev, size, sge->respQ.entries, |
547 | sge->respQ.dma_addr); | |
548 | } | |
549 | ||
550 | for (i = 0; i < SGE_FREELQ_N; i++) { | |
559fb51b | 551 | struct freelQ *q = &sge->freelQ[i]; |
8199d3a7 | 552 | |
559fb51b SB |
553 | if (q->centries) { |
554 | free_freelQ_buffers(pdev, q); | |
555 | kfree(q->centries); | |
8199d3a7 | 556 | } |
559fb51b SB |
557 | if (q->entries) { |
558 | size = sizeof(struct freelQ_e) * q->size; | |
559 | pci_free_consistent(pdev, size, q->entries, | |
560 | q->dma_addr); | |
8199d3a7 CL |
561 | } |
562 | } | |
563 | } | |
564 | ||
565 | /* | |
566 | * Allocates basic RX resources, consisting of memory mapped freelist Qs and a | |
559fb51b | 567 | * response queue. |
8199d3a7 CL |
568 | */ |
569 | static int alloc_rx_resources(struct sge *sge, struct sge_params *p) | |
570 | { | |
571 | struct pci_dev *pdev = sge->adapter->pdev; | |
572 | unsigned int size, i; | |
573 | ||
574 | for (i = 0; i < SGE_FREELQ_N; i++) { | |
559fb51b SB |
575 | struct freelQ *q = &sge->freelQ[i]; |
576 | ||
577 | q->genbit = 1; | |
578 | q->size = p->freelQ_size[i]; | |
579 | q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; | |
580 | size = sizeof(struct freelQ_e) * q->size; | |
3e0f75be | 581 | q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); |
559fb51b | 582 | if (!q->entries) |
8199d3a7 | 583 | goto err_no_mem; |
3e0f75be | 584 | |
559fb51b | 585 | size = sizeof(struct freelQ_ce) * q->size; |
cbee9f91 | 586 | q->centries = kzalloc(size, GFP_KERNEL); |
559fb51b | 587 | if (!q->centries) |
8199d3a7 CL |
588 | goto err_no_mem; |
589 | } | |
590 | ||
591 | /* | |
592 | * Calculate the buffer sizes for the two free lists. FL0 accommodates | |
593 | * regular sized Ethernet frames, FL1 is sized not to exceed 16K, | |
594 | * including all the sk_buff overhead. | |
595 | * | |
596 | * Note: For T2 FL0 and FL1 are reversed. | |
597 | */ | |
598 | sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + | |
599 | sizeof(struct cpl_rx_data) + | |
600 | sge->freelQ[!sge->jumbo_fl].dma_offset; | |
f1d3d38a SH |
601 | |
602 | size = (16 * 1024) - | |
603 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
604 | ||
605 | sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; | |
8199d3a7 | 606 | |
559fb51b SB |
607 | /* |
608 | * Setup which skb recycle Q should be used when recycling buffers from | |
609 | * each free list. | |
610 | */ | |
611 | sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; | |
612 | sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; | |
613 | ||
8199d3a7 | 614 | sge->respQ.genbit = 1; |
559fb51b SB |
615 | sge->respQ.size = SGE_RESPQ_E_N; |
616 | sge->respQ.credits = 0; | |
617 | size = sizeof(struct respQ_e) * sge->respQ.size; | |
3e0f75be | 618 | sge->respQ.entries = |
8199d3a7 CL |
619 | pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); |
620 | if (!sge->respQ.entries) | |
621 | goto err_no_mem; | |
8199d3a7 CL |
622 | return 0; |
623 | ||
624 | err_no_mem: | |
625 | free_rx_resources(sge); | |
626 | return -ENOMEM; | |
627 | } | |
628 | ||
629 | /* | |
559fb51b | 630 | * Reclaims n TX descriptors and frees the buffers associated with them. |
8199d3a7 | 631 | */ |
559fb51b | 632 | static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) |
8199d3a7 | 633 | { |
559fb51b | 634 | struct cmdQ_ce *ce; |
8199d3a7 | 635 | struct pci_dev *pdev = sge->adapter->pdev; |
559fb51b | 636 | unsigned int cidx = q->cidx; |
8199d3a7 | 637 | |
559fb51b SB |
638 | q->in_use -= n; |
639 | ce = &q->centries[cidx]; | |
640 | while (n--) { | |
094f92a6 FT |
641 | if (likely(dma_unmap_len(ce, dma_len))) { |
642 | pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), | |
643 | dma_unmap_len(ce, dma_len), | |
3e0f75be FR |
644 | PCI_DMA_TODEVICE); |
645 | if (q->sop) | |
f1d3d38a | 646 | q->sop = 0; |
f1d3d38a | 647 | } |
559fb51b | 648 | if (ce->skb) { |
f1d3d38a | 649 | dev_kfree_skb_any(ce->skb); |
559fb51b SB |
650 | q->sop = 1; |
651 | } | |
8199d3a7 | 652 | ce++; |
559fb51b | 653 | if (++cidx == q->size) { |
8199d3a7 | 654 | cidx = 0; |
559fb51b | 655 | ce = q->centries; |
8199d3a7 CL |
656 | } |
657 | } | |
559fb51b | 658 | q->cidx = cidx; |
8199d3a7 CL |
659 | } |
660 | ||
661 | /* | |
662 | * Free TX resources. | |
663 | * | |
664 | * Assumes that SGE is stopped and all interrupts are disabled. | |
665 | */ | |
666 | static void free_tx_resources(struct sge *sge) | |
667 | { | |
668 | struct pci_dev *pdev = sge->adapter->pdev; | |
669 | unsigned int size, i; | |
670 | ||
671 | for (i = 0; i < SGE_CMDQ_N; i++) { | |
559fb51b | 672 | struct cmdQ *q = &sge->cmdQ[i]; |
8199d3a7 | 673 | |
559fb51b SB |
674 | if (q->centries) { |
675 | if (q->in_use) | |
676 | free_cmdQ_buffers(sge, q, q->in_use); | |
677 | kfree(q->centries); | |
8199d3a7 | 678 | } |
559fb51b SB |
679 | if (q->entries) { |
680 | size = sizeof(struct cmdQ_e) * q->size; | |
681 | pci_free_consistent(pdev, size, q->entries, | |
682 | q->dma_addr); | |
8199d3a7 CL |
683 | } |
684 | } | |
685 | } | |
686 | ||
687 | /* | |
688 | * Allocates basic TX resources, consisting of memory mapped command Qs. | |
689 | */ | |
690 | static int alloc_tx_resources(struct sge *sge, struct sge_params *p) | |
691 | { | |
692 | struct pci_dev *pdev = sge->adapter->pdev; | |
693 | unsigned int size, i; | |
694 | ||
695 | for (i = 0; i < SGE_CMDQ_N; i++) { | |
559fb51b SB |
696 | struct cmdQ *q = &sge->cmdQ[i]; |
697 | ||
698 | q->genbit = 1; | |
699 | q->sop = 1; | |
700 | q->size = p->cmdQ_size[i]; | |
701 | q->in_use = 0; | |
702 | q->status = 0; | |
703 | q->processed = q->cleaned = 0; | |
704 | q->stop_thres = 0; | |
705 | spin_lock_init(&q->lock); | |
706 | size = sizeof(struct cmdQ_e) * q->size; | |
3e0f75be | 707 | q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); |
559fb51b | 708 | if (!q->entries) |
8199d3a7 | 709 | goto err_no_mem; |
3e0f75be | 710 | |
559fb51b | 711 | size = sizeof(struct cmdQ_ce) * q->size; |
cbee9f91 | 712 | q->centries = kzalloc(size, GFP_KERNEL); |
559fb51b | 713 | if (!q->centries) |
8199d3a7 CL |
714 | goto err_no_mem; |
715 | } | |
716 | ||
559fb51b SB |
717 | /* |
718 | * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE | |
719 | * only. For queue 0 set the stop threshold so we can handle one more | |
720 | * packet from each port, plus reserve an additional 24 entries for | |
721 | * Ethernet packets only. Queue 1 never suspends nor do we reserve | |
722 | * space for Ethernet packets. | |
723 | */ | |
724 | sge->cmdQ[0].stop_thres = sge->adapter->params.nports * | |
725 | (MAX_SKB_FRAGS + 1); | |
8199d3a7 CL |
726 | return 0; |
727 | ||
728 | err_no_mem: | |
729 | free_tx_resources(sge); | |
730 | return -ENOMEM; | |
731 | } | |
732 | ||
733 | static inline void setup_ring_params(struct adapter *adapter, u64 addr, | |
734 | u32 size, int base_reg_lo, | |
735 | int base_reg_hi, int size_reg) | |
736 | { | |
559fb51b SB |
737 | writel((u32)addr, adapter->regs + base_reg_lo); |
738 | writel(addr >> 32, adapter->regs + base_reg_hi); | |
739 | writel(size, adapter->regs + size_reg); | |
8199d3a7 CL |
740 | } |
741 | ||
742 | /* | |
743 | * Enable/disable VLAN acceleration. | |
744 | */ | |
c8f44aff | 745 | void t1_vlan_mode(struct adapter *adapter, netdev_features_t features) |
8199d3a7 CL |
746 | { |
747 | struct sge *sge = adapter->sge; | |
748 | ||
133b0851 | 749 | if (features & NETIF_F_HW_VLAN_RX) |
8199d3a7 | 750 | sge->sge_control |= F_VLAN_XTRACT; |
133b0851 JP |
751 | else |
752 | sge->sge_control &= ~F_VLAN_XTRACT; | |
8199d3a7 | 753 | if (adapter->open_device_map) { |
559fb51b | 754 | writel(sge->sge_control, adapter->regs + A_SG_CONTROL); |
f1d3d38a | 755 | readl(adapter->regs + A_SG_CONTROL); /* flush */ |
8199d3a7 CL |
756 | } |
757 | } | |
758 | ||
8199d3a7 CL |
759 | /* |
760 | * Programs the various SGE registers. However, the engine is not yet enabled, | |
761 | * but sge->sge_control is setup and ready to go. | |
762 | */ | |
763 | static void configure_sge(struct sge *sge, struct sge_params *p) | |
764 | { | |
765 | struct adapter *ap = sge->adapter; | |
356bd146 | 766 | |
559fb51b SB |
767 | writel(0, ap->regs + A_SG_CONTROL); |
768 | setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, | |
8199d3a7 | 769 | A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); |
559fb51b | 770 | setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, |
8199d3a7 CL |
771 | A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); |
772 | setup_ring_params(ap, sge->freelQ[0].dma_addr, | |
559fb51b | 773 | sge->freelQ[0].size, A_SG_FL0BASELWR, |
8199d3a7 CL |
774 | A_SG_FL0BASEUPR, A_SG_FL0SIZE); |
775 | setup_ring_params(ap, sge->freelQ[1].dma_addr, | |
559fb51b | 776 | sge->freelQ[1].size, A_SG_FL1BASELWR, |
8199d3a7 CL |
777 | A_SG_FL1BASEUPR, A_SG_FL1SIZE); |
778 | ||
779 | /* The threshold comparison uses <. */ | |
559fb51b | 780 | writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); |
8199d3a7 | 781 | |
559fb51b SB |
782 | setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, |
783 | A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); | |
784 | writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); | |
8199d3a7 CL |
785 | |
786 | sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | | |
787 | F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | | |
788 | V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | | |
789 | V_RX_PKT_OFFSET(sge->rx_pkt_pad); | |
790 | ||
791 | #if defined(__BIG_ENDIAN_BITFIELD) | |
792 | sge->sge_control |= F_ENABLE_BIG_ENDIAN; | |
793 | #endif | |
794 | ||
559fb51b SB |
795 | /* Initialize no-resource timer */ |
796 | sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); | |
797 | ||
798 | t1_sge_set_coalesce_params(sge, p); | |
8199d3a7 CL |
799 | } |
800 | ||
801 | /* | |
802 | * Return the payload capacity of the jumbo free-list buffers. | |
803 | */ | |
804 | static inline unsigned int jumbo_payload_capacity(const struct sge *sge) | |
805 | { | |
806 | return sge->freelQ[sge->jumbo_fl].rx_buffer_size - | |
559fb51b SB |
807 | sge->freelQ[sge->jumbo_fl].dma_offset - |
808 | sizeof(struct cpl_rx_data); | |
8199d3a7 CL |
809 | } |
810 | ||
811 | /* | |
812 | * Frees all SGE related resources and the sge structure itself | |
813 | */ | |
814 | void t1_sge_destroy(struct sge *sge) | |
815 | { | |
56f643c2 SH |
816 | int i; |
817 | ||
818 | for_each_port(sge->adapter, i) | |
819 | free_percpu(sge->port_stats[i]); | |
820 | ||
f1d3d38a | 821 | kfree(sge->tx_sched); |
8199d3a7 CL |
822 | free_tx_resources(sge); |
823 | free_rx_resources(sge); | |
824 | kfree(sge); | |
825 | } | |
826 | ||
827 | /* | |
828 | * Allocates new RX buffers on the freelist Q (and tracks them on the freelist | |
829 | * context Q) until the Q is full or alloc_skb fails. | |
830 | * | |
831 | * It is possible that the generation bits already match, indicating that the | |
832 | * buffer is already valid and nothing needs to be done. This happens when we | |
833 | * copied a received buffer into a new sk_buff during the interrupt processing. | |
834 | * | |
835 | * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), | |
836 | * we specify a RX_OFFSET in order to make sure that the IP header is 4B | |
837 | * aligned. | |
838 | */ | |
559fb51b | 839 | static void refill_free_list(struct sge *sge, struct freelQ *q) |
8199d3a7 CL |
840 | { |
841 | struct pci_dev *pdev = sge->adapter->pdev; | |
559fb51b SB |
842 | struct freelQ_ce *ce = &q->centries[q->pidx]; |
843 | struct freelQ_e *e = &q->entries[q->pidx]; | |
844 | unsigned int dma_len = q->rx_buffer_size - q->dma_offset; | |
8199d3a7 | 845 | |
559fb51b SB |
846 | while (q->credits < q->size) { |
847 | struct sk_buff *skb; | |
848 | dma_addr_t mapping; | |
8199d3a7 | 849 | |
559fb51b SB |
850 | skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); |
851 | if (!skb) | |
852 | break; | |
853 | ||
854 | skb_reserve(skb, q->dma_offset); | |
855 | mapping = pci_map_single(pdev, skb->data, dma_len, | |
856 | PCI_DMA_FROMDEVICE); | |
24a427cf SH |
857 | skb_reserve(skb, sge->rx_pkt_pad); |
858 | ||
559fb51b | 859 | ce->skb = skb; |
094f92a6 FT |
860 | dma_unmap_addr_set(ce, dma_addr, mapping); |
861 | dma_unmap_len_set(ce, dma_len, dma_len); | |
559fb51b SB |
862 | e->addr_lo = (u32)mapping; |
863 | e->addr_hi = (u64)mapping >> 32; | |
864 | e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); | |
865 | wmb(); | |
866 | e->gen2 = V_CMD_GEN2(q->genbit); | |
8199d3a7 CL |
867 | |
868 | e++; | |
869 | ce++; | |
559fb51b SB |
870 | if (++q->pidx == q->size) { |
871 | q->pidx = 0; | |
872 | q->genbit ^= 1; | |
873 | ce = q->centries; | |
874 | e = q->entries; | |
8199d3a7 | 875 | } |
559fb51b | 876 | q->credits++; |
8199d3a7 | 877 | } |
8199d3a7 CL |
878 | } |
879 | ||
880 | /* | |
559fb51b SB |
881 | * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 |
882 | * of both rings, we go into 'few interrupt mode' in order to give the system | |
883 | * time to free up resources. | |
8199d3a7 CL |
884 | */ |
885 | static void freelQs_empty(struct sge *sge) | |
886 | { | |
559fb51b SB |
887 | struct adapter *adapter = sge->adapter; |
888 | u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); | |
8199d3a7 CL |
889 | u32 irqholdoff_reg; |
890 | ||
891 | refill_free_list(sge, &sge->freelQ[0]); | |
892 | refill_free_list(sge, &sge->freelQ[1]); | |
893 | ||
559fb51b SB |
894 | if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && |
895 | sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { | |
8199d3a7 | 896 | irq_reg |= F_FL_EXHAUSTED; |
559fb51b | 897 | irqholdoff_reg = sge->fixed_intrtimer; |
8199d3a7 CL |
898 | } else { |
899 | /* Clear the F_FL_EXHAUSTED interrupts for now */ | |
900 | irq_reg &= ~F_FL_EXHAUSTED; | |
901 | irqholdoff_reg = sge->intrtimer_nres; | |
902 | } | |
559fb51b SB |
903 | writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); |
904 | writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); | |
8199d3a7 CL |
905 | |
906 | /* We reenable the Qs to force a freelist GTS interrupt later */ | |
559fb51b | 907 | doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); |
8199d3a7 CL |
908 | } |
909 | ||
910 | #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) | |
911 | #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) | |
912 | #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ | |
913 | F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) | |
914 | ||
915 | /* | |
916 | * Disable SGE Interrupts | |
917 | */ | |
918 | void t1_sge_intr_disable(struct sge *sge) | |
919 | { | |
559fb51b | 920 | u32 val = readl(sge->adapter->regs + A_PL_ENABLE); |
8199d3a7 | 921 | |
559fb51b SB |
922 | writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); |
923 | writel(0, sge->adapter->regs + A_SG_INT_ENABLE); | |
8199d3a7 CL |
924 | } |
925 | ||
926 | /* | |
927 | * Enable SGE interrupts. | |
928 | */ | |
929 | void t1_sge_intr_enable(struct sge *sge) | |
930 | { | |
931 | u32 en = SGE_INT_ENABLE; | |
559fb51b | 932 | u32 val = readl(sge->adapter->regs + A_PL_ENABLE); |
8199d3a7 | 933 | |
30f554f9 | 934 | if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO) |
8199d3a7 | 935 | en &= ~F_PACKET_TOO_BIG; |
559fb51b SB |
936 | writel(en, sge->adapter->regs + A_SG_INT_ENABLE); |
937 | writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); | |
8199d3a7 CL |
938 | } |
939 | ||
940 | /* | |
941 | * Clear SGE interrupts. | |
942 | */ | |
943 | void t1_sge_intr_clear(struct sge *sge) | |
944 | { | |
559fb51b SB |
945 | writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); |
946 | writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); | |
8199d3a7 CL |
947 | } |
948 | ||
949 | /* | |
950 | * SGE 'Error' interrupt handler | |
951 | */ | |
952 | int t1_sge_intr_error_handler(struct sge *sge) | |
953 | { | |
954 | struct adapter *adapter = sge->adapter; | |
559fb51b | 955 | u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); |
8199d3a7 | 956 | |
30f554f9 | 957 | if (adapter->port[0].dev->hw_features & NETIF_F_TSO) |
8199d3a7 CL |
958 | cause &= ~F_PACKET_TOO_BIG; |
959 | if (cause & F_RESPQ_EXHAUSTED) | |
559fb51b | 960 | sge->stats.respQ_empty++; |
8199d3a7 | 961 | if (cause & F_RESPQ_OVERFLOW) { |
559fb51b | 962 | sge->stats.respQ_overflow++; |
c1f51212 | 963 | pr_alert("%s: SGE response queue overflow\n", |
8199d3a7 CL |
964 | adapter->name); |
965 | } | |
966 | if (cause & F_FL_EXHAUSTED) { | |
559fb51b | 967 | sge->stats.freelistQ_empty++; |
8199d3a7 CL |
968 | freelQs_empty(sge); |
969 | } | |
970 | if (cause & F_PACKET_TOO_BIG) { | |
559fb51b | 971 | sge->stats.pkt_too_big++; |
c1f51212 | 972 | pr_alert("%s: SGE max packet size exceeded\n", |
8199d3a7 CL |
973 | adapter->name); |
974 | } | |
975 | if (cause & F_PACKET_MISMATCH) { | |
559fb51b | 976 | sge->stats.pkt_mismatch++; |
c1f51212 | 977 | pr_alert("%s: SGE packet mismatch\n", adapter->name); |
8199d3a7 CL |
978 | } |
979 | if (cause & SGE_INT_FATAL) | |
980 | t1_fatal_err(adapter); | |
981 | ||
559fb51b | 982 | writel(cause, adapter->regs + A_SG_INT_CAUSE); |
8199d3a7 CL |
983 | return 0; |
984 | } | |
985 | ||
56f643c2 | 986 | const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) |
559fb51b SB |
987 | { |
988 | return &sge->stats; | |
989 | } | |
990 | ||
56f643c2 SH |
991 | void t1_sge_get_port_stats(const struct sge *sge, int port, |
992 | struct sge_port_stats *ss) | |
559fb51b | 993 | { |
56f643c2 SH |
994 | int cpu; |
995 | ||
996 | memset(ss, 0, sizeof(*ss)); | |
997 | for_each_possible_cpu(cpu) { | |
998 | struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); | |
999 | ||
56f643c2 | 1000 | ss->rx_cso_good += st->rx_cso_good; |
56f643c2 SH |
1001 | ss->tx_cso += st->tx_cso; |
1002 | ss->tx_tso += st->tx_tso; | |
7832ee03 | 1003 | ss->tx_need_hdrroom += st->tx_need_hdrroom; |
56f643c2 SH |
1004 | ss->vlan_xtract += st->vlan_xtract; |
1005 | ss->vlan_insert += st->vlan_insert; | |
1006 | } | |
559fb51b SB |
1007 | } |
1008 | ||
1009 | /** | |
1010 | * recycle_fl_buf - recycle a free list buffer | |
1011 | * @fl: the free list | |
1012 | * @idx: index of buffer to recycle | |
8199d3a7 | 1013 | * |
559fb51b SB |
1014 | * Recycles the specified buffer on the given free list by adding it at |
1015 | * the next available slot on the list. | |
8199d3a7 | 1016 | */ |
559fb51b | 1017 | static void recycle_fl_buf(struct freelQ *fl, int idx) |
8199d3a7 | 1018 | { |
559fb51b SB |
1019 | struct freelQ_e *from = &fl->entries[idx]; |
1020 | struct freelQ_e *to = &fl->entries[fl->pidx]; | |
8199d3a7 | 1021 | |
559fb51b SB |
1022 | fl->centries[fl->pidx] = fl->centries[idx]; |
1023 | to->addr_lo = from->addr_lo; | |
1024 | to->addr_hi = from->addr_hi; | |
1025 | to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); | |
1026 | wmb(); | |
1027 | to->gen2 = V_CMD_GEN2(fl->genbit); | |
1028 | fl->credits++; | |
8199d3a7 | 1029 | |
559fb51b SB |
1030 | if (++fl->pidx == fl->size) { |
1031 | fl->pidx = 0; | |
1032 | fl->genbit ^= 1; | |
8199d3a7 | 1033 | } |
559fb51b | 1034 | } |
8199d3a7 | 1035 | |
24a427cf SH |
1036 | static int copybreak __read_mostly = 256; |
1037 | module_param(copybreak, int, 0); | |
1038 | MODULE_PARM_DESC(copybreak, "Receive copy threshold"); | |
1039 | ||
559fb51b SB |
1040 | /** |
1041 | * get_packet - return the next ingress packet buffer | |
1042 | * @pdev: the PCI device that received the packet | |
1043 | * @fl: the SGE free list holding the packet | |
1044 | * @len: the actual packet length, excluding any SGE padding | |
559fb51b SB |
1045 | * |
1046 | * Get the next packet from a free list and complete setup of the | |
1047 | * sk_buff. If the packet is small we make a copy and recycle the | |
1048 | * original buffer, otherwise we use the original buffer itself. If a | |
1049 | * positive drop threshold is supplied packets are dropped and their | |
1050 | * buffers recycled if (a) the number of remaining buffers is under the | |
1051 | * threshold and the packet is too big to copy, or (b) the packet should | |
1052 | * be copied but there is no memory for the copy. | |
1053 | */ | |
1054 | static inline struct sk_buff *get_packet(struct pci_dev *pdev, | |
24a427cf | 1055 | struct freelQ *fl, unsigned int len) |
559fb51b SB |
1056 | { |
1057 | struct sk_buff *skb; | |
24a427cf | 1058 | const struct freelQ_ce *ce = &fl->centries[fl->cidx]; |
559fb51b | 1059 | |
24a427cf SH |
1060 | if (len < copybreak) { |
1061 | skb = alloc_skb(len + 2, GFP_ATOMIC); | |
1062 | if (!skb) | |
1063 | goto use_orig_buf; | |
1064 | ||
1065 | skb_reserve(skb, 2); /* align IP header */ | |
1066 | skb_put(skb, len); | |
1067 | pci_dma_sync_single_for_cpu(pdev, | |
094f92a6 FT |
1068 | dma_unmap_addr(ce, dma_addr), |
1069 | dma_unmap_len(ce, dma_len), | |
559fb51b | 1070 | PCI_DMA_FROMDEVICE); |
d626f62b | 1071 | skb_copy_from_linear_data(ce->skb, skb->data, len); |
24a427cf | 1072 | pci_dma_sync_single_for_device(pdev, |
094f92a6 FT |
1073 | dma_unmap_addr(ce, dma_addr), |
1074 | dma_unmap_len(ce, dma_len), | |
24a427cf | 1075 | PCI_DMA_FROMDEVICE); |
559fb51b SB |
1076 | recycle_fl_buf(fl, fl->cidx); |
1077 | return skb; | |
8199d3a7 CL |
1078 | } |
1079 | ||
24a427cf SH |
1080 | use_orig_buf: |
1081 | if (fl->credits < 2) { | |
559fb51b SB |
1082 | recycle_fl_buf(fl, fl->cidx); |
1083 | return NULL; | |
1084 | } | |
8199d3a7 | 1085 | |
094f92a6 FT |
1086 | pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), |
1087 | dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); | |
559fb51b | 1088 | skb = ce->skb; |
24a427cf SH |
1089 | prefetch(skb->data); |
1090 | ||
559fb51b SB |
1091 | skb_put(skb, len); |
1092 | return skb; | |
1093 | } | |
8199d3a7 | 1094 | |
559fb51b SB |
1095 | /** |
1096 | * unexpected_offload - handle an unexpected offload packet | |
1097 | * @adapter: the adapter | |
1098 | * @fl: the free list that received the packet | |
1099 | * | |
1100 | * Called when we receive an unexpected offload packet (e.g., the TOE | |
1101 | * function is disabled or the card is a NIC). Prints a message and | |
1102 | * recycles the buffer. | |
1103 | */ | |
1104 | static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) | |
1105 | { | |
1106 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; | |
1107 | struct sk_buff *skb = ce->skb; | |
1108 | ||
094f92a6 FT |
1109 | pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), |
1110 | dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); | |
c1f51212 | 1111 | pr_err("%s: unexpected offload packet, cmd %u\n", |
559fb51b SB |
1112 | adapter->name, *skb->data); |
1113 | recycle_fl_buf(fl, fl->cidx); | |
8199d3a7 CL |
1114 | } |
1115 | ||
f1d3d38a SH |
1116 | /* |
1117 | * T1/T2 SGE limits the maximum DMA size per TX descriptor to | |
1118 | * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the | |
1119 | * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner. | |
1120 | * Note that the *_large_page_tx_descs stuff will be optimized out when | |
1121 | * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN. | |
1122 | * | |
1123 | * compute_large_page_descs() computes how many additional descriptors are | |
1124 | * required to break down the stack's request. | |
1125 | */ | |
1126 | static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) | |
1127 | { | |
1128 | unsigned int count = 0; | |
356bd146 | 1129 | |
f1d3d38a SH |
1130 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { |
1131 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; | |
e743d313 | 1132 | unsigned int i, len = skb_headlen(skb); |
f1d3d38a SH |
1133 | while (len > SGE_TX_DESC_MAX_PLEN) { |
1134 | count++; | |
1135 | len -= SGE_TX_DESC_MAX_PLEN; | |
1136 | } | |
1137 | for (i = 0; nfrags--; i++) { | |
9e903e08 ED |
1138 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1139 | len = skb_frag_size(frag); | |
f1d3d38a SH |
1140 | while (len > SGE_TX_DESC_MAX_PLEN) { |
1141 | count++; | |
1142 | len -= SGE_TX_DESC_MAX_PLEN; | |
1143 | } | |
1144 | } | |
1145 | } | |
1146 | return count; | |
1147 | } | |
1148 | ||
1149 | /* | |
1150 | * Write a cmdQ entry. | |
1151 | * | |
1152 | * Since this function writes the 'flags' field, it must not be used to | |
1153 | * write the first cmdQ entry. | |
1154 | */ | |
1155 | static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping, | |
1156 | unsigned int len, unsigned int gen, | |
1157 | unsigned int eop) | |
1158 | { | |
0ee904c3 AB |
1159 | BUG_ON(len > SGE_TX_DESC_MAX_PLEN); |
1160 | ||
f1d3d38a SH |
1161 | e->addr_lo = (u32)mapping; |
1162 | e->addr_hi = (u64)mapping >> 32; | |
1163 | e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen); | |
1164 | e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen); | |
1165 | } | |
1166 | ||
1167 | /* | |
1168 | * See comment for previous function. | |
1169 | * | |
1170 | * write_tx_descs_large_page() writes additional SGE tx descriptors if | |
1171 | * *desc_len exceeds HW's capability. | |
1172 | */ | |
1173 | static inline unsigned int write_large_page_tx_descs(unsigned int pidx, | |
1174 | struct cmdQ_e **e, | |
1175 | struct cmdQ_ce **ce, | |
1176 | unsigned int *gen, | |
1177 | dma_addr_t *desc_mapping, | |
1178 | unsigned int *desc_len, | |
1179 | unsigned int nfrags, | |
1180 | struct cmdQ *q) | |
1181 | { | |
1182 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { | |
1183 | struct cmdQ_e *e1 = *e; | |
1184 | struct cmdQ_ce *ce1 = *ce; | |
1185 | ||
1186 | while (*desc_len > SGE_TX_DESC_MAX_PLEN) { | |
1187 | *desc_len -= SGE_TX_DESC_MAX_PLEN; | |
1188 | write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, | |
1189 | *gen, nfrags == 0 && *desc_len == 0); | |
1190 | ce1->skb = NULL; | |
094f92a6 | 1191 | dma_unmap_len_set(ce1, dma_len, 0); |
f1d3d38a SH |
1192 | *desc_mapping += SGE_TX_DESC_MAX_PLEN; |
1193 | if (*desc_len) { | |
1194 | ce1++; | |
1195 | e1++; | |
1196 | if (++pidx == q->size) { | |
1197 | pidx = 0; | |
1198 | *gen ^= 1; | |
1199 | ce1 = q->centries; | |
1200 | e1 = q->entries; | |
1201 | } | |
1202 | } | |
1203 | } | |
1204 | *e = e1; | |
1205 | *ce = ce1; | |
1206 | } | |
1207 | return pidx; | |
1208 | } | |
1209 | ||
8199d3a7 | 1210 | /* |
559fb51b SB |
1211 | * Write the command descriptors to transmit the given skb starting at |
1212 | * descriptor pidx with the given generation. | |
8199d3a7 | 1213 | */ |
559fb51b SB |
1214 | static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, |
1215 | unsigned int pidx, unsigned int gen, | |
1216 | struct cmdQ *q) | |
8199d3a7 | 1217 | { |
f1d3d38a | 1218 | dma_addr_t mapping, desc_mapping; |
559fb51b SB |
1219 | struct cmdQ_e *e, *e1; |
1220 | struct cmdQ_ce *ce; | |
f1d3d38a SH |
1221 | unsigned int i, flags, first_desc_len, desc_len, |
1222 | nfrags = skb_shinfo(skb)->nr_frags; | |
559fb51b | 1223 | |
f1d3d38a | 1224 | e = e1 = &q->entries[pidx]; |
559fb51b | 1225 | ce = &q->centries[pidx]; |
f1d3d38a SH |
1226 | |
1227 | mapping = pci_map_single(adapter->pdev, skb->data, | |
e743d313 | 1228 | skb_headlen(skb), PCI_DMA_TODEVICE); |
f1d3d38a SH |
1229 | |
1230 | desc_mapping = mapping; | |
e743d313 | 1231 | desc_len = skb_headlen(skb); |
f1d3d38a SH |
1232 | |
1233 | flags = F_CMD_DATAVALID | F_CMD_SOP | | |
1234 | V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | | |
1235 | V_CMD_GEN2(gen); | |
1236 | first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ? | |
1237 | desc_len : SGE_TX_DESC_MAX_PLEN; | |
1238 | e->addr_lo = (u32)desc_mapping; | |
1239 | e->addr_hi = (u64)desc_mapping >> 32; | |
1240 | e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); | |
1241 | ce->skb = NULL; | |
094f92a6 | 1242 | dma_unmap_len_set(ce, dma_len, 0); |
f1d3d38a SH |
1243 | |
1244 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && | |
1245 | desc_len > SGE_TX_DESC_MAX_PLEN) { | |
1246 | desc_mapping += first_desc_len; | |
1247 | desc_len -= first_desc_len; | |
1248 | e1++; | |
1249 | ce++; | |
1250 | if (++pidx == q->size) { | |
1251 | pidx = 0; | |
1252 | gen ^= 1; | |
1253 | e1 = q->entries; | |
1254 | ce = q->centries; | |
1255 | } | |
1256 | pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, | |
1257 | &desc_mapping, &desc_len, | |
1258 | nfrags, q); | |
1259 | ||
1260 | if (likely(desc_len)) | |
1261 | write_tx_desc(e1, desc_mapping, desc_len, gen, | |
1262 | nfrags == 0); | |
1263 | } | |
1264 | ||
559fb51b | 1265 | ce->skb = NULL; |
094f92a6 | 1266 | dma_unmap_addr_set(ce, dma_addr, mapping); |
e743d313 | 1267 | dma_unmap_len_set(ce, dma_len, skb_headlen(skb)); |
8199d3a7 | 1268 | |
f1d3d38a | 1269 | for (i = 0; nfrags--; i++) { |
559fb51b | 1270 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
559fb51b | 1271 | e1++; |
f1d3d38a | 1272 | ce++; |
559fb51b SB |
1273 | if (++pidx == q->size) { |
1274 | pidx = 0; | |
1275 | gen ^= 1; | |
559fb51b | 1276 | e1 = q->entries; |
f1d3d38a | 1277 | ce = q->centries; |
8199d3a7 | 1278 | } |
8199d3a7 | 1279 | |
01139222 | 1280 | mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0, |
9e903e08 | 1281 | skb_frag_size(frag), DMA_TO_DEVICE); |
f1d3d38a | 1282 | desc_mapping = mapping; |
9e903e08 | 1283 | desc_len = skb_frag_size(frag); |
f1d3d38a SH |
1284 | |
1285 | pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, | |
1286 | &desc_mapping, &desc_len, | |
1287 | nfrags, q); | |
1288 | if (likely(desc_len)) | |
1289 | write_tx_desc(e1, desc_mapping, desc_len, gen, | |
1290 | nfrags == 0); | |
559fb51b | 1291 | ce->skb = NULL; |
094f92a6 | 1292 | dma_unmap_addr_set(ce, dma_addr, mapping); |
9e903e08 | 1293 | dma_unmap_len_set(ce, dma_len, skb_frag_size(frag)); |
8199d3a7 | 1294 | } |
559fb51b SB |
1295 | ce->skb = skb; |
1296 | wmb(); | |
1297 | e->flags = flags; | |
1298 | } | |
8199d3a7 | 1299 | |
559fb51b SB |
1300 | /* |
1301 | * Clean up completed Tx buffers. | |
1302 | */ | |
1303 | static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) | |
1304 | { | |
1305 | unsigned int reclaim = q->processed - q->cleaned; | |
8199d3a7 | 1306 | |
559fb51b | 1307 | if (reclaim) { |
f1d3d38a SH |
1308 | pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n", |
1309 | q->processed, q->cleaned); | |
559fb51b SB |
1310 | free_cmdQ_buffers(sge, q, reclaim); |
1311 | q->cleaned += reclaim; | |
8199d3a7 | 1312 | } |
559fb51b | 1313 | } |
8199d3a7 | 1314 | |
f1d3d38a SH |
1315 | /* |
1316 | * Called from tasklet. Checks the scheduler for any | |
1317 | * pending skbs that can be sent. | |
1318 | */ | |
1319 | static void restart_sched(unsigned long arg) | |
1320 | { | |
1321 | struct sge *sge = (struct sge *) arg; | |
1322 | struct adapter *adapter = sge->adapter; | |
1323 | struct cmdQ *q = &sge->cmdQ[0]; | |
1324 | struct sk_buff *skb; | |
1325 | unsigned int credits, queued_skb = 0; | |
1326 | ||
1327 | spin_lock(&q->lock); | |
1328 | reclaim_completed_tx(sge, q); | |
1329 | ||
1330 | credits = q->size - q->in_use; | |
1331 | pr_debug("restart_sched credits=%d\n", credits); | |
1332 | while ((skb = sched_skb(sge, NULL, credits)) != NULL) { | |
1333 | unsigned int genbit, pidx, count; | |
1334 | count = 1 + skb_shinfo(skb)->nr_frags; | |
356bd146 | 1335 | count += compute_large_page_tx_descs(skb); |
f1d3d38a SH |
1336 | q->in_use += count; |
1337 | genbit = q->genbit; | |
1338 | pidx = q->pidx; | |
1339 | q->pidx += count; | |
1340 | if (q->pidx >= q->size) { | |
1341 | q->pidx -= q->size; | |
1342 | q->genbit ^= 1; | |
1343 | } | |
1344 | write_tx_descs(adapter, skb, pidx, genbit, q); | |
1345 | credits = q->size - q->in_use; | |
1346 | queued_skb = 1; | |
1347 | } | |
1348 | ||
1349 | if (queued_skb) { | |
1350 | clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | |
1351 | if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { | |
1352 | set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | |
1353 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); | |
1354 | } | |
1355 | } | |
1356 | spin_unlock(&q->lock); | |
1357 | } | |
8199d3a7 | 1358 | |
559fb51b SB |
1359 | /** |
1360 | * sge_rx - process an ingress ethernet packet | |
1361 | * @sge: the sge structure | |
1362 | * @fl: the free list that contains the packet buffer | |
1363 | * @len: the packet length | |
8199d3a7 | 1364 | * |
559fb51b | 1365 | * Process an ingress ethernet pakcet and deliver it to the stack. |
8199d3a7 | 1366 | */ |
24a427cf | 1367 | static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) |
8199d3a7 | 1368 | { |
559fb51b | 1369 | struct sk_buff *skb; |
24a427cf | 1370 | const struct cpl_rx_pkt *p; |
559fb51b | 1371 | struct adapter *adapter = sge->adapter; |
56f643c2 | 1372 | struct sge_port_stats *st; |
30f554f9 | 1373 | struct net_device *dev; |
8199d3a7 | 1374 | |
24a427cf | 1375 | skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); |
56f643c2 SH |
1376 | if (unlikely(!skb)) { |
1377 | sge->stats.rx_drops++; | |
24a427cf | 1378 | return; |
8199d3a7 | 1379 | } |
559fb51b | 1380 | |
24a427cf | 1381 | p = (const struct cpl_rx_pkt *) skb->data; |
f1d3d38a SH |
1382 | if (p->iff >= adapter->params.nports) { |
1383 | kfree_skb(skb); | |
24a427cf | 1384 | return; |
f1d3d38a | 1385 | } |
24a427cf | 1386 | __skb_pull(skb, sizeof(*p)); |
f1d3d38a | 1387 | |
ca0c9584 | 1388 | st = this_cpu_ptr(sge->port_stats[p->iff]); |
30f554f9 | 1389 | dev = adapter->port[p->iff].dev; |
56f643c2 | 1390 | |
30f554f9 MM |
1391 | skb->protocol = eth_type_trans(skb, dev); |
1392 | if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff && | |
559fb51b SB |
1393 | skb->protocol == htons(ETH_P_IP) && |
1394 | (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { | |
56f643c2 | 1395 | ++st->rx_cso_good; |
559fb51b SB |
1396 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1397 | } else | |
bc8acf2c | 1398 | skb_checksum_none_assert(skb); |
559fb51b | 1399 | |
133b0851 | 1400 | if (p->vlan_valid) { |
56f643c2 | 1401 | st->vlan_xtract++; |
133b0851 JP |
1402 | __vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); |
1403 | } | |
1404 | netif_receive_skb(skb); | |
8199d3a7 CL |
1405 | } |
1406 | ||
1407 | /* | |
559fb51b | 1408 | * Returns true if a command queue has enough available descriptors that |
8199d3a7 CL |
1409 | * we can resume Tx operation after temporarily disabling its packet queue. |
1410 | */ | |
559fb51b | 1411 | static inline int enough_free_Tx_descs(const struct cmdQ *q) |
8199d3a7 | 1412 | { |
559fb51b SB |
1413 | unsigned int r = q->processed - q->cleaned; |
1414 | ||
1415 | return q->in_use - r < (q->size >> 1); | |
8199d3a7 CL |
1416 | } |
1417 | ||
1418 | /* | |
559fb51b SB |
1419 | * Called when sufficient space has become available in the SGE command queues |
1420 | * after the Tx packet schedulers have been suspended to restart the Tx path. | |
8199d3a7 | 1421 | */ |
559fb51b | 1422 | static void restart_tx_queues(struct sge *sge) |
8199d3a7 | 1423 | { |
559fb51b | 1424 | struct adapter *adap = sge->adapter; |
3e0f75be | 1425 | int i; |
8199d3a7 | 1426 | |
3e0f75be FR |
1427 | if (!enough_free_Tx_descs(&sge->cmdQ[0])) |
1428 | return; | |
559fb51b | 1429 | |
3e0f75be FR |
1430 | for_each_port(adap, i) { |
1431 | struct net_device *nd = adap->port[i].dev; | |
559fb51b | 1432 | |
3e0f75be FR |
1433 | if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && |
1434 | netif_running(nd)) { | |
1435 | sge->stats.cmdQ_restarted[2]++; | |
1436 | netif_wake_queue(nd); | |
559fb51b SB |
1437 | } |
1438 | } | |
1439 | } | |
1440 | ||
1441 | /* | |
356bd146 | 1442 | * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 |
559fb51b SB |
1443 | * information. |
1444 | */ | |
356bd146 FR |
1445 | static unsigned int update_tx_info(struct adapter *adapter, |
1446 | unsigned int flags, | |
559fb51b SB |
1447 | unsigned int pr0) |
1448 | { | |
1449 | struct sge *sge = adapter->sge; | |
1450 | struct cmdQ *cmdq = &sge->cmdQ[0]; | |
8199d3a7 | 1451 | |
559fb51b | 1452 | cmdq->processed += pr0; |
f1d3d38a SH |
1453 | if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) { |
1454 | freelQs_empty(sge); | |
1455 | flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE); | |
1456 | } | |
559fb51b SB |
1457 | if (flags & F_CMDQ0_ENABLE) { |
1458 | clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); | |
f1d3d38a | 1459 | |
559fb51b SB |
1460 | if (cmdq->cleaned + cmdq->in_use != cmdq->processed && |
1461 | !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { | |
1462 | set_bit(CMDQ_STAT_RUNNING, &cmdq->status); | |
1463 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); | |
1464 | } | |
f1d3d38a SH |
1465 | if (sge->tx_sched) |
1466 | tasklet_hi_schedule(&sge->tx_sched->sched_tsk); | |
1467 | ||
1468 | flags &= ~F_CMDQ0_ENABLE; | |
559fb51b | 1469 | } |
f1d3d38a | 1470 | |
559fb51b SB |
1471 | if (unlikely(sge->stopped_tx_queues != 0)) |
1472 | restart_tx_queues(sge); | |
8199d3a7 | 1473 | |
559fb51b SB |
1474 | return flags; |
1475 | } | |
8199d3a7 | 1476 | |
559fb51b SB |
1477 | /* |
1478 | * Process SGE responses, up to the supplied budget. Returns the number of | |
1479 | * responses processed. A negative budget is effectively unlimited. | |
1480 | */ | |
1481 | static int process_responses(struct adapter *adapter, int budget) | |
1482 | { | |
1483 | struct sge *sge = adapter->sge; | |
1484 | struct respQ *q = &sge->respQ; | |
1485 | struct respQ_e *e = &q->entries[q->cidx]; | |
24a427cf | 1486 | int done = 0; |
559fb51b SB |
1487 | unsigned int flags = 0; |
1488 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; | |
356bd146 | 1489 | |
24a427cf | 1490 | while (done < budget && e->GenerationBit == q->genbit) { |
559fb51b | 1491 | flags |= e->Qsleeping; |
356bd146 | 1492 | |
559fb51b SB |
1493 | cmdq_processed[0] += e->Cmdq0CreditReturn; |
1494 | cmdq_processed[1] += e->Cmdq1CreditReturn; | |
356bd146 | 1495 | |
559fb51b SB |
1496 | /* We batch updates to the TX side to avoid cacheline |
1497 | * ping-pong of TX state information on MP where the sender | |
1498 | * might run on a different CPU than this function... | |
1499 | */ | |
24a427cf | 1500 | if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) { |
559fb51b SB |
1501 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); |
1502 | cmdq_processed[0] = 0; | |
1503 | } | |
24a427cf | 1504 | |
559fb51b SB |
1505 | if (unlikely(cmdq_processed[1] > 16)) { |
1506 | sge->cmdQ[1].processed += cmdq_processed[1]; | |
1507 | cmdq_processed[1] = 0; | |
8199d3a7 | 1508 | } |
24a427cf | 1509 | |
8199d3a7 | 1510 | if (likely(e->DataValid)) { |
559fb51b SB |
1511 | struct freelQ *fl = &sge->freelQ[e->FreelistQid]; |
1512 | ||
5d9428de | 1513 | BUG_ON(!e->Sop || !e->Eop); |
559fb51b SB |
1514 | if (unlikely(e->Offload)) |
1515 | unexpected_offload(adapter, fl); | |
1516 | else | |
1517 | sge_rx(sge, fl, e->BufferLength); | |
1518 | ||
24a427cf SH |
1519 | ++done; |
1520 | ||
559fb51b SB |
1521 | /* |
1522 | * Note: this depends on each packet consuming a | |
1523 | * single free-list buffer; cf. the BUG above. | |
1524 | */ | |
1525 | if (++fl->cidx == fl->size) | |
1526 | fl->cidx = 0; | |
24a427cf SH |
1527 | prefetch(fl->centries[fl->cidx].skb); |
1528 | ||
559fb51b SB |
1529 | if (unlikely(--fl->credits < |
1530 | fl->size - SGE_FREEL_REFILL_THRESH)) | |
1531 | refill_free_list(sge, fl); | |
1532 | } else | |
1533 | sge->stats.pure_rsps++; | |
8199d3a7 | 1534 | |
8199d3a7 | 1535 | e++; |
559fb51b SB |
1536 | if (unlikely(++q->cidx == q->size)) { |
1537 | q->cidx = 0; | |
1538 | q->genbit ^= 1; | |
1539 | e = q->entries; | |
1540 | } | |
1541 | prefetch(e); | |
1542 | ||
1543 | if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { | |
1544 | writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); | |
1545 | q->credits = 0; | |
8199d3a7 CL |
1546 | } |
1547 | } | |
1548 | ||
356bd146 | 1549 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); |
559fb51b | 1550 | sge->cmdQ[1].processed += cmdq_processed[1]; |
8199d3a7 | 1551 | |
24a427cf | 1552 | return done; |
559fb51b | 1553 | } |
8199d3a7 | 1554 | |
3de00b89 SH |
1555 | static inline int responses_pending(const struct adapter *adapter) |
1556 | { | |
1557 | const struct respQ *Q = &adapter->sge->respQ; | |
1558 | const struct respQ_e *e = &Q->entries[Q->cidx]; | |
1559 | ||
807540ba | 1560 | return e->GenerationBit == Q->genbit; |
3de00b89 SH |
1561 | } |
1562 | ||
559fb51b SB |
1563 | /* |
1564 | * A simpler version of process_responses() that handles only pure (i.e., | |
1565 | * non data-carrying) responses. Such respones are too light-weight to justify | |
1566 | * calling a softirq when using NAPI, so we handle them specially in hard | |
1567 | * interrupt context. The function is called with a pointer to a response, | |
1568 | * which the caller must ensure is a valid pure response. Returns 1 if it | |
1569 | * encounters a valid data-carrying response, 0 otherwise. | |
1570 | */ | |
3de00b89 | 1571 | static int process_pure_responses(struct adapter *adapter) |
559fb51b SB |
1572 | { |
1573 | struct sge *sge = adapter->sge; | |
1574 | struct respQ *q = &sge->respQ; | |
3de00b89 | 1575 | struct respQ_e *e = &q->entries[q->cidx]; |
24a427cf | 1576 | const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; |
559fb51b SB |
1577 | unsigned int flags = 0; |
1578 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; | |
8199d3a7 | 1579 | |
24a427cf | 1580 | prefetch(fl->centries[fl->cidx].skb); |
3de00b89 SH |
1581 | if (e->DataValid) |
1582 | return 1; | |
24a427cf | 1583 | |
559fb51b SB |
1584 | do { |
1585 | flags |= e->Qsleeping; | |
8199d3a7 | 1586 | |
559fb51b SB |
1587 | cmdq_processed[0] += e->Cmdq0CreditReturn; |
1588 | cmdq_processed[1] += e->Cmdq1CreditReturn; | |
356bd146 | 1589 | |
559fb51b SB |
1590 | e++; |
1591 | if (unlikely(++q->cidx == q->size)) { | |
1592 | q->cidx = 0; | |
1593 | q->genbit ^= 1; | |
1594 | e = q->entries; | |
8199d3a7 | 1595 | } |
559fb51b | 1596 | prefetch(e); |
8199d3a7 | 1597 | |
559fb51b SB |
1598 | if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { |
1599 | writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); | |
1600 | q->credits = 0; | |
8199d3a7 | 1601 | } |
559fb51b SB |
1602 | sge->stats.pure_rsps++; |
1603 | } while (e->GenerationBit == q->genbit && !e->DataValid); | |
8199d3a7 | 1604 | |
356bd146 | 1605 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); |
559fb51b | 1606 | sge->cmdQ[1].processed += cmdq_processed[1]; |
8199d3a7 | 1607 | |
559fb51b | 1608 | return e->GenerationBit == q->genbit; |
8199d3a7 CL |
1609 | } |
1610 | ||
1611 | /* | |
559fb51b SB |
1612 | * Handler for new data events when using NAPI. This does not need any locking |
1613 | * or protection from interrupts as data interrupts are off at this point and | |
1614 | * other adapter interrupts do not interfere. | |
8199d3a7 | 1615 | */ |
bea3348e | 1616 | int t1_poll(struct napi_struct *napi, int budget) |
8199d3a7 | 1617 | { |
bea3348e | 1618 | struct adapter *adapter = container_of(napi, struct adapter, napi); |
445cf803 | 1619 | int work_done = process_responses(adapter, budget); |
7fe26a60 | 1620 | |
445cf803 | 1621 | if (likely(work_done < budget)) { |
288379f0 | 1622 | napi_complete(napi); |
bea3348e SH |
1623 | writel(adapter->sge->respQ.cidx, |
1624 | adapter->regs + A_SG_SLEEPING); | |
1625 | } | |
1626 | return work_done; | |
559fb51b | 1627 | } |
8199d3a7 | 1628 | |
7fe26a60 | 1629 | irqreturn_t t1_interrupt(int irq, void *data) |
559fb51b | 1630 | { |
559fb51b SB |
1631 | struct adapter *adapter = data; |
1632 | struct sge *sge = adapter->sge; | |
3de00b89 | 1633 | int handled; |
559fb51b | 1634 | |
3de00b89 | 1635 | if (likely(responses_pending(adapter))) { |
356bd146 | 1636 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); |
7fe26a60 | 1637 | |
bea3348e | 1638 | if (napi_schedule_prep(&adapter->napi)) { |
3de00b89 | 1639 | if (process_pure_responses(adapter)) |
288379f0 | 1640 | __napi_schedule(&adapter->napi); |
3de00b89 SH |
1641 | else { |
1642 | /* no data, no NAPI needed */ | |
1643 | writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); | |
4422b003 FR |
1644 | /* undo schedule_prep */ |
1645 | napi_enable(&adapter->napi); | |
7fe26a60 | 1646 | } |
7fe26a60 | 1647 | } |
3de00b89 SH |
1648 | return IRQ_HANDLED; |
1649 | } | |
1650 | ||
1651 | spin_lock(&adapter->async_lock); | |
1652 | handled = t1_slow_intr_handler(adapter); | |
1653 | spin_unlock(&adapter->async_lock); | |
7fe26a60 | 1654 | |
559fb51b SB |
1655 | if (!handled) |
1656 | sge->stats.unhandled_irqs++; | |
3de00b89 | 1657 | |
559fb51b SB |
1658 | return IRQ_RETVAL(handled != 0); |
1659 | } | |
8199d3a7 | 1660 | |
559fb51b SB |
1661 | /* |
1662 | * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. | |
1663 | * | |
1664 | * The code figures out how many entries the sk_buff will require in the | |
1665 | * cmdQ and updates the cmdQ data structure with the state once the enqueue | |
1666 | * has complete. Then, it doesn't access the global structure anymore, but | |
25985edc | 1667 | * uses the corresponding fields on the stack. In conjunction with a spinlock |
559fb51b SB |
1668 | * around that code, we can make the function reentrant without holding the |
1669 | * lock when we actually enqueue (which might be expensive, especially on | |
1670 | * architectures with IO MMUs). | |
1671 | * | |
1672 | * This runs with softirqs disabled. | |
1673 | */ | |
aa84505f SH |
1674 | static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, |
1675 | unsigned int qid, struct net_device *dev) | |
559fb51b SB |
1676 | { |
1677 | struct sge *sge = adapter->sge; | |
1678 | struct cmdQ *q = &sge->cmdQ[qid]; | |
f1d3d38a | 1679 | unsigned int credits, pidx, genbit, count, use_sched_skb = 0; |
559fb51b | 1680 | |
cabdfb37 SH |
1681 | if (!spin_trylock(&q->lock)) |
1682 | return NETDEV_TX_LOCKED; | |
1683 | ||
559fb51b SB |
1684 | reclaim_completed_tx(sge, q); |
1685 | ||
1686 | pidx = q->pidx; | |
1687 | credits = q->size - q->in_use; | |
1688 | count = 1 + skb_shinfo(skb)->nr_frags; | |
f1d3d38a | 1689 | count += compute_large_page_tx_descs(skb); |
559fb51b | 1690 | |
f1d3d38a SH |
1691 | /* Ethernet packet */ |
1692 | if (unlikely(credits < count)) { | |
1693 | if (!netif_queue_stopped(dev)) { | |
559fb51b SB |
1694 | netif_stop_queue(dev); |
1695 | set_bit(dev->if_port, &sge->stopped_tx_queues); | |
232a347a | 1696 | sge->stats.cmdQ_full[2]++; |
c1f51212 | 1697 | pr_err("%s: Tx ring full while queue awake!\n", |
f1d3d38a | 1698 | adapter->name); |
8199d3a7 | 1699 | } |
f1d3d38a SH |
1700 | spin_unlock(&q->lock); |
1701 | return NETDEV_TX_BUSY; | |
1702 | } | |
1703 | ||
1704 | if (unlikely(credits - count < q->stop_thres)) { | |
1705 | netif_stop_queue(dev); | |
1706 | set_bit(dev->if_port, &sge->stopped_tx_queues); | |
1707 | sge->stats.cmdQ_full[2]++; | |
1708 | } | |
1709 | ||
1710 | /* T204 cmdQ0 skbs that are destined for a certain port have to go | |
1711 | * through the scheduler. | |
1712 | */ | |
1713 | if (sge->tx_sched && !qid && skb->dev) { | |
356bd146 | 1714 | use_sched: |
f1d3d38a SH |
1715 | use_sched_skb = 1; |
1716 | /* Note that the scheduler might return a different skb than | |
1717 | * the one passed in. | |
1718 | */ | |
1719 | skb = sched_skb(sge, skb, credits); | |
1720 | if (!skb) { | |
1721 | spin_unlock(&q->lock); | |
1722 | return NETDEV_TX_OK; | |
559fb51b | 1723 | } |
f1d3d38a SH |
1724 | pidx = q->pidx; |
1725 | count = 1 + skb_shinfo(skb)->nr_frags; | |
1726 | count += compute_large_page_tx_descs(skb); | |
559fb51b | 1727 | } |
f1d3d38a | 1728 | |
559fb51b SB |
1729 | q->in_use += count; |
1730 | genbit = q->genbit; | |
f1d3d38a | 1731 | pidx = q->pidx; |
559fb51b SB |
1732 | q->pidx += count; |
1733 | if (q->pidx >= q->size) { | |
1734 | q->pidx -= q->size; | |
1735 | q->genbit ^= 1; | |
8199d3a7 | 1736 | } |
559fb51b | 1737 | spin_unlock(&q->lock); |
8199d3a7 | 1738 | |
559fb51b | 1739 | write_tx_descs(adapter, skb, pidx, genbit, q); |
8199d3a7 CL |
1740 | |
1741 | /* | |
1742 | * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring | |
1743 | * the doorbell if the Q is asleep. There is a natural race, where | |
1744 | * the hardware is going to sleep just after we checked, however, | |
1745 | * then the interrupt handler will detect the outstanding TX packet | |
1746 | * and ring the doorbell for us. | |
1747 | */ | |
559fb51b SB |
1748 | if (qid) |
1749 | doorbell_pio(adapter, F_CMDQ1_ENABLE); | |
1750 | else { | |
1751 | clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | |
1752 | if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { | |
1753 | set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | |
1754 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); | |
1755 | } | |
8199d3a7 | 1756 | } |
f1d3d38a SH |
1757 | |
1758 | if (use_sched_skb) { | |
1759 | if (spin_trylock(&q->lock)) { | |
1760 | credits = q->size - q->in_use; | |
1761 | skb = NULL; | |
1762 | goto use_sched; | |
1763 | } | |
1764 | } | |
aa84505f | 1765 | return NETDEV_TX_OK; |
8199d3a7 CL |
1766 | } |
1767 | ||
1768 | #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) | |
1769 | ||
559fb51b SB |
1770 | /* |
1771 | * eth_hdr_len - return the length of an Ethernet header | |
1772 | * @data: pointer to the start of the Ethernet header | |
1773 | * | |
1774 | * Returns the length of an Ethernet header, including optional VLAN tag. | |
1775 | */ | |
1776 | static inline int eth_hdr_len(const void *data) | |
1777 | { | |
1778 | const struct ethhdr *e = data; | |
1779 | ||
1780 | return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; | |
1781 | } | |
1782 | ||
8199d3a7 CL |
1783 | /* |
1784 | * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. | |
1785 | */ | |
61357325 | 1786 | netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) |
8199d3a7 | 1787 | { |
c3ccc123 | 1788 | struct adapter *adapter = dev->ml_priv; |
559fb51b | 1789 | struct sge *sge = adapter->sge; |
ca0c9584 | 1790 | struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); |
8199d3a7 | 1791 | struct cpl_tx_pkt *cpl; |
cabdfb37 SH |
1792 | struct sk_buff *orig_skb = skb; |
1793 | int ret; | |
8199d3a7 | 1794 | |
f1d3d38a SH |
1795 | if (skb->protocol == htons(ETH_P_CPL5)) |
1796 | goto send; | |
1797 | ||
7832ee03 DLR |
1798 | /* |
1799 | * We are using a non-standard hard_header_len. | |
1800 | * Allocate more header room in the rare cases it is not big enough. | |
1801 | */ | |
1802 | if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { | |
1803 | skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); | |
1804 | ++st->tx_need_hdrroom; | |
1805 | dev_kfree_skb_any(orig_skb); | |
1806 | if (!skb) | |
1807 | return NETDEV_TX_OK; | |
1808 | } | |
1809 | ||
f1d3d38a | 1810 | if (skb_shinfo(skb)->gso_size) { |
8199d3a7 CL |
1811 | int eth_type; |
1812 | struct cpl_tx_pkt_lso *hdr; | |
1813 | ||
56f643c2 | 1814 | ++st->tx_tso; |
559fb51b | 1815 | |
bbe735e4 | 1816 | eth_type = skb_network_offset(skb) == ETH_HLEN ? |
8199d3a7 CL |
1817 | CPL_ETH_II : CPL_ETH_II_VLAN; |
1818 | ||
1819 | hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); | |
1820 | hdr->opcode = CPL_TX_PKT_LSO; | |
1821 | hdr->ip_csum_dis = hdr->l4_csum_dis = 0; | |
eddc9ec5 | 1822 | hdr->ip_hdr_words = ip_hdr(skb)->ihl; |
aa8223c7 | 1823 | hdr->tcp_hdr_words = tcp_hdr(skb)->doff; |
8199d3a7 | 1824 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, |
f1d3d38a | 1825 | skb_shinfo(skb)->gso_size)); |
8199d3a7 CL |
1826 | hdr->len = htonl(skb->len - sizeof(*hdr)); |
1827 | cpl = (struct cpl_tx_pkt *)hdr; | |
f1d3d38a | 1828 | } else { |
8199d3a7 | 1829 | /* |
356bd146 | 1830 | * Packets shorter than ETH_HLEN can break the MAC, drop them |
559fb51b SB |
1831 | * early. Also, we may get oversized packets because some |
1832 | * parts of the kernel don't handle our unusual hard_header_len | |
1833 | * right, drop those too. | |
8199d3a7 | 1834 | */ |
559fb51b SB |
1835 | if (unlikely(skb->len < ETH_HLEN || |
1836 | skb->len > dev->mtu + eth_hdr_len(skb->data))) { | |
f1d3d38a SH |
1837 | pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name, |
1838 | skb->len, eth_hdr_len(skb->data), dev->mtu); | |
559fb51b | 1839 | dev_kfree_skb_any(skb); |
aa84505f | 1840 | return NETDEV_TX_OK; |
559fb51b SB |
1841 | } |
1842 | ||
30f554f9 | 1843 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
eddc9ec5 | 1844 | ip_hdr(skb)->protocol == IPPROTO_UDP) { |
84fa7933 | 1845 | if (unlikely(skb_checksum_help(skb))) { |
f1d3d38a | 1846 | pr_debug("%s: unable to do udp checksum\n", dev->name); |
559fb51b | 1847 | dev_kfree_skb_any(skb); |
aa84505f | 1848 | return NETDEV_TX_OK; |
559fb51b | 1849 | } |
f1d3d38a | 1850 | } |
8199d3a7 | 1851 | |
559fb51b SB |
1852 | /* Hmmm, assuming to catch the gratious arp... and we'll use |
1853 | * it to flush out stuck espi packets... | |
f1d3d38a SH |
1854 | */ |
1855 | if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { | |
8199d3a7 | 1856 | if (skb->protocol == htons(ETH_P_ARP) && |
d0a92be0 | 1857 | arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { |
f1d3d38a | 1858 | adapter->sge->espibug_skb[dev->if_port] = skb; |
559fb51b SB |
1859 | /* We want to re-use this skb later. We |
1860 | * simply bump the reference count and it | |
1861 | * will not be freed... | |
1862 | */ | |
1863 | skb = skb_get(skb); | |
1864 | } | |
8199d3a7 | 1865 | } |
559fb51b SB |
1866 | |
1867 | cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); | |
8199d3a7 CL |
1868 | cpl->opcode = CPL_TX_PKT; |
1869 | cpl->ip_csum_dis = 1; /* SW calculates IP csum */ | |
84fa7933 | 1870 | cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1; |
8199d3a7 | 1871 | /* the length field isn't used so don't bother setting it */ |
559fb51b | 1872 | |
84fa7933 | 1873 | st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL); |
8199d3a7 CL |
1874 | } |
1875 | cpl->iff = dev->if_port; | |
1876 | ||
eab6d18d | 1877 | if (vlan_tx_tag_present(skb)) { |
8199d3a7 CL |
1878 | cpl->vlan_valid = 1; |
1879 | cpl->vlan = htons(vlan_tx_tag_get(skb)); | |
559fb51b | 1880 | st->vlan_insert++; |
8199d3a7 | 1881 | } else |
8199d3a7 CL |
1882 | cpl->vlan_valid = 0; |
1883 | ||
f1d3d38a | 1884 | send: |
cabdfb37 SH |
1885 | ret = t1_sge_tx(skb, adapter, 0, dev); |
1886 | ||
1887 | /* If transmit busy, and we reallocated skb's due to headroom limit, | |
1888 | * then silently discard to avoid leak. | |
1889 | */ | |
1890 | if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { | |
356bd146 | 1891 | dev_kfree_skb_any(skb); |
cabdfb37 | 1892 | ret = NETDEV_TX_OK; |
356bd146 | 1893 | } |
cabdfb37 | 1894 | return ret; |
559fb51b | 1895 | } |
8199d3a7 | 1896 | |
559fb51b SB |
1897 | /* |
1898 | * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. | |
1899 | */ | |
1900 | static void sge_tx_reclaim_cb(unsigned long data) | |
1901 | { | |
1902 | int i; | |
1903 | struct sge *sge = (struct sge *)data; | |
1904 | ||
1905 | for (i = 0; i < SGE_CMDQ_N; ++i) { | |
1906 | struct cmdQ *q = &sge->cmdQ[i]; | |
1907 | ||
1908 | if (!spin_trylock(&q->lock)) | |
1909 | continue; | |
8199d3a7 | 1910 | |
559fb51b | 1911 | reclaim_completed_tx(sge, q); |
f1d3d38a SH |
1912 | if (i == 0 && q->in_use) { /* flush pending credits */ |
1913 | writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); | |
1914 | } | |
559fb51b SB |
1915 | spin_unlock(&q->lock); |
1916 | } | |
1917 | mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | |
1918 | } | |
1919 | ||
1920 | /* | |
1921 | * Propagate changes of the SGE coalescing parameters to the HW. | |
1922 | */ | |
1923 | int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) | |
1924 | { | |
559fb51b SB |
1925 | sge->fixed_intrtimer = p->rx_coalesce_usecs * |
1926 | core_ticks_per_usec(sge->adapter); | |
1927 | writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); | |
8199d3a7 CL |
1928 | return 0; |
1929 | } | |
1930 | ||
559fb51b SB |
1931 | /* |
1932 | * Allocates both RX and TX resources and configures the SGE. However, | |
1933 | * the hardware is not enabled yet. | |
1934 | */ | |
1935 | int t1_sge_configure(struct sge *sge, struct sge_params *p) | |
8199d3a7 | 1936 | { |
559fb51b SB |
1937 | if (alloc_rx_resources(sge, p)) |
1938 | return -ENOMEM; | |
1939 | if (alloc_tx_resources(sge, p)) { | |
1940 | free_rx_resources(sge); | |
1941 | return -ENOMEM; | |
1942 | } | |
1943 | configure_sge(sge, p); | |
1944 | ||
1945 | /* | |
1946 | * Now that we have sized the free lists calculate the payload | |
1947 | * capacity of the large buffers. Other parts of the driver use | |
1948 | * this to set the max offload coalescing size so that RX packets | |
1949 | * do not overflow our large buffers. | |
1950 | */ | |
1951 | p->large_buf_capacity = jumbo_payload_capacity(sge); | |
1952 | return 0; | |
1953 | } | |
8199d3a7 | 1954 | |
559fb51b SB |
1955 | /* |
1956 | * Disables the DMA engine. | |
1957 | */ | |
1958 | void t1_sge_stop(struct sge *sge) | |
1959 | { | |
f1d3d38a | 1960 | int i; |
559fb51b | 1961 | writel(0, sge->adapter->regs + A_SG_CONTROL); |
f1d3d38a SH |
1962 | readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ |
1963 | ||
559fb51b SB |
1964 | if (is_T2(sge->adapter)) |
1965 | del_timer_sync(&sge->espibug_timer); | |
f1d3d38a | 1966 | |
559fb51b | 1967 | del_timer_sync(&sge->tx_reclaim_timer); |
f1d3d38a SH |
1968 | if (sge->tx_sched) |
1969 | tx_sched_stop(sge); | |
1970 | ||
1971 | for (i = 0; i < MAX_NPORTS; i++) | |
f4fe5a9c | 1972 | kfree_skb(sge->espibug_skb[i]); |
8199d3a7 CL |
1973 | } |
1974 | ||
559fb51b SB |
1975 | /* |
1976 | * Enables the DMA engine. | |
1977 | */ | |
1978 | void t1_sge_start(struct sge *sge) | |
8199d3a7 | 1979 | { |
559fb51b SB |
1980 | refill_free_list(sge, &sge->freelQ[0]); |
1981 | refill_free_list(sge, &sge->freelQ[1]); | |
1982 | ||
1983 | writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); | |
1984 | doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); | |
f1d3d38a | 1985 | readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ |
559fb51b SB |
1986 | |
1987 | mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | |
1988 | ||
f1d3d38a | 1989 | if (is_T2(sge->adapter)) |
559fb51b SB |
1990 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); |
1991 | } | |
1992 | ||
1993 | /* | |
1994 | * Callback for the T2 ESPI 'stuck packet feature' workaorund | |
1995 | */ | |
f1d3d38a | 1996 | static void espibug_workaround_t204(unsigned long data) |
559fb51b SB |
1997 | { |
1998 | struct adapter *adapter = (struct adapter *)data; | |
8199d3a7 | 1999 | struct sge *sge = adapter->sge; |
f1d3d38a SH |
2000 | unsigned int nports = adapter->params.nports; |
2001 | u32 seop[MAX_NPORTS]; | |
8199d3a7 | 2002 | |
f1d3d38a SH |
2003 | if (adapter->open_device_map & PORT_MASK) { |
2004 | int i; | |
356bd146 FR |
2005 | |
2006 | if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) | |
f1d3d38a | 2007 | return; |
356bd146 | 2008 | |
f1d3d38a | 2009 | for (i = 0; i < nports; i++) { |
356bd146 FR |
2010 | struct sk_buff *skb = sge->espibug_skb[i]; |
2011 | ||
2012 | if (!netif_running(adapter->port[i].dev) || | |
2013 | netif_queue_stopped(adapter->port[i].dev) || | |
2014 | !seop[i] || ((seop[i] & 0xfff) != 0) || !skb) | |
2015 | continue; | |
2016 | ||
2017 | if (!skb->cb[0]) { | |
27d7ff46 ACM |
2018 | skb_copy_to_linear_data_offset(skb, |
2019 | sizeof(struct cpl_tx_pkt), | |
2020 | ch_mac_addr, | |
2021 | ETH_ALEN); | |
2022 | skb_copy_to_linear_data_offset(skb, | |
2023 | skb->len - 10, | |
2024 | ch_mac_addr, | |
2025 | ETH_ALEN); | |
356bd146 | 2026 | skb->cb[0] = 0xff; |
559fb51b | 2027 | } |
356bd146 FR |
2028 | |
2029 | /* bump the reference count to avoid freeing of | |
2030 | * the skb once the DMA has completed. | |
2031 | */ | |
2032 | skb = skb_get(skb); | |
2033 | t1_sge_tx(skb, adapter, 0, adapter->port[i].dev); | |
559fb51b SB |
2034 | } |
2035 | } | |
2036 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); | |
8199d3a7 CL |
2037 | } |
2038 | ||
f1d3d38a SH |
2039 | static void espibug_workaround(unsigned long data) |
2040 | { | |
2041 | struct adapter *adapter = (struct adapter *)data; | |
2042 | struct sge *sge = adapter->sge; | |
2043 | ||
2044 | if (netif_running(adapter->port[0].dev)) { | |
2045 | struct sk_buff *skb = sge->espibug_skb[0]; | |
2046 | u32 seop = t1_espi_get_mon(adapter, 0x930, 0); | |
2047 | ||
2048 | if ((seop & 0xfff0fff) == 0xfff && skb) { | |
2049 | if (!skb->cb[0]) { | |
27d7ff46 ACM |
2050 | skb_copy_to_linear_data_offset(skb, |
2051 | sizeof(struct cpl_tx_pkt), | |
2052 | ch_mac_addr, | |
2053 | ETH_ALEN); | |
2054 | skb_copy_to_linear_data_offset(skb, | |
2055 | skb->len - 10, | |
2056 | ch_mac_addr, | |
2057 | ETH_ALEN); | |
f1d3d38a SH |
2058 | skb->cb[0] = 0xff; |
2059 | } | |
2060 | ||
2061 | /* bump the reference count to avoid freeing of the | |
2062 | * skb once the DMA has completed. | |
2063 | */ | |
2064 | skb = skb_get(skb); | |
2065 | t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); | |
2066 | } | |
2067 | } | |
2068 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); | |
2069 | } | |
2070 | ||
559fb51b SB |
2071 | /* |
2072 | * Creates a t1_sge structure and returns suggested resource parameters. | |
2073 | */ | |
2074 | struct sge * __devinit t1_sge_create(struct adapter *adapter, | |
2075 | struct sge_params *p) | |
2076 | { | |
cbee9f91 | 2077 | struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); |
56f643c2 | 2078 | int i; |
559fb51b SB |
2079 | |
2080 | if (!sge) | |
2081 | return NULL; | |
559fb51b SB |
2082 | |
2083 | sge->adapter = adapter; | |
2084 | sge->netdev = adapter->port[0].dev; | |
2085 | sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; | |
2086 | sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; | |
2087 | ||
56f643c2 SH |
2088 | for_each_port(adapter, i) { |
2089 | sge->port_stats[i] = alloc_percpu(struct sge_port_stats); | |
2090 | if (!sge->port_stats[i]) | |
2091 | goto nomem_port; | |
2092 | } | |
2093 | ||
559fb51b SB |
2094 | init_timer(&sge->tx_reclaim_timer); |
2095 | sge->tx_reclaim_timer.data = (unsigned long)sge; | |
2096 | sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; | |
2097 | ||
2098 | if (is_T2(sge->adapter)) { | |
2099 | init_timer(&sge->espibug_timer); | |
f1d3d38a SH |
2100 | |
2101 | if (adapter->params.nports > 1) { | |
2102 | tx_sched_init(sge); | |
2103 | sge->espibug_timer.function = espibug_workaround_t204; | |
d7487421 | 2104 | } else |
f1d3d38a | 2105 | sge->espibug_timer.function = espibug_workaround; |
559fb51b | 2106 | sge->espibug_timer.data = (unsigned long)sge->adapter; |
f1d3d38a | 2107 | |
559fb51b | 2108 | sge->espibug_timeout = 1; |
f1d3d38a SH |
2109 | /* for T204, every 10ms */ |
2110 | if (adapter->params.nports > 1) | |
2111 | sge->espibug_timeout = HZ/100; | |
559fb51b | 2112 | } |
356bd146 | 2113 | |
559fb51b SB |
2114 | |
2115 | p->cmdQ_size[0] = SGE_CMDQ0_E_N; | |
2116 | p->cmdQ_size[1] = SGE_CMDQ1_E_N; | |
2117 | p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; | |
2118 | p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; | |
f1d3d38a SH |
2119 | if (sge->tx_sched) { |
2120 | if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) | |
2121 | p->rx_coalesce_usecs = 15; | |
2122 | else | |
2123 | p->rx_coalesce_usecs = 50; | |
2124 | } else | |
2125 | p->rx_coalesce_usecs = 50; | |
2126 | ||
559fb51b SB |
2127 | p->coalesce_enable = 0; |
2128 | p->sample_interval_usecs = 0; | |
559fb51b SB |
2129 | |
2130 | return sge; | |
56f643c2 SH |
2131 | nomem_port: |
2132 | while (i >= 0) { | |
2133 | free_percpu(sge->port_stats[i]); | |
2134 | --i; | |
2135 | } | |
2136 | kfree(sge); | |
2137 | return NULL; | |
2138 | ||
559fb51b | 2139 | } |