Commit | Line | Data |
---|---|---|
f078f209 LR |
1 | /* |
2 | * Copyright (c) 2008 Atheros Communications Inc. | |
3 | * | |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
17 | /* | |
18 | * Implementation of receive path. | |
19 | */ | |
20 | ||
21 | #include "core.h" | |
22 | ||
23 | /* | |
24 | * Setup and link descriptors. | |
25 | * | |
26 | * 11N: we can no longer afford to self link the last descriptor. | |
27 | * MAC acknowledges BA status as long as it copies frames to host | |
28 | * buffer (or rx fifo). This can incorrectly acknowledge packets | |
29 | * to a sender if last desc is self-linked. | |
30 | * | |
31 | * NOTE: Caller should hold the rxbuf lock. | |
32 | */ | |
33 | ||
34 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) | |
35 | { | |
36 | struct ath_hal *ah = sc->sc_ah; | |
37 | struct ath_desc *ds; | |
38 | struct sk_buff *skb; | |
39 | ||
40 | ATH_RXBUF_RESET(bf); | |
41 | ||
42 | ds = bf->bf_desc; | |
43 | ds->ds_link = 0; /* link to null */ | |
44 | ds->ds_data = bf->bf_buf_addr; | |
45 | ||
46 | /* XXX For RADAR? | |
47 | * virtual addr of the beginning of the buffer. */ | |
48 | skb = bf->bf_mpdu; | |
49 | ASSERT(skb != NULL); | |
50 | ds->ds_vdata = skb->data; | |
51 | ||
b4b6cda2 LR |
52 | /* setup rx descriptors. The sc_rxbufsize here tells the harware |
53 | * how much data it can DMA to us and that we are prepared | |
54 | * to process */ | |
f078f209 LR |
55 | ath9k_hw_setuprxdesc(ah, |
56 | ds, | |
b4b6cda2 | 57 | sc->sc_rxbufsize, |
f078f209 LR |
58 | 0); |
59 | ||
60 | if (sc->sc_rxlink == NULL) | |
61 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | |
62 | else | |
63 | *sc->sc_rxlink = bf->bf_daddr; | |
64 | ||
65 | sc->sc_rxlink = &ds->ds_link; | |
66 | ath9k_hw_rxena(ah); | |
67 | } | |
68 | ||
69 | /* Process received BAR frame */ | |
70 | ||
71 | static int ath_bar_rx(struct ath_softc *sc, | |
72 | struct ath_node *an, | |
73 | struct sk_buff *skb) | |
74 | { | |
75 | struct ieee80211_bar *bar; | |
76 | struct ath_arx_tid *rxtid; | |
77 | struct sk_buff *tskb; | |
78 | struct ath_recv_status *rx_status; | |
79 | int tidno, index, cindex; | |
80 | u16 seqno; | |
81 | ||
82 | /* look at BAR contents */ | |
83 | ||
84 | bar = (struct ieee80211_bar *)skb->data; | |
85 | tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M) | |
86 | >> IEEE80211_BAR_CTL_TID_S; | |
87 | seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT; | |
88 | ||
89 | /* process BAR - indicate all pending RX frames till the BAR seqno */ | |
90 | ||
91 | rxtid = &an->an_aggr.rx.tid[tidno]; | |
92 | ||
93 | spin_lock_bh(&rxtid->tidlock); | |
94 | ||
95 | /* get relative index */ | |
96 | ||
97 | index = ATH_BA_INDEX(rxtid->seq_next, seqno); | |
98 | ||
99 | /* drop BAR if old sequence (index is too large) */ | |
100 | ||
101 | if ((index > rxtid->baw_size) && | |
102 | (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2)))) | |
103 | /* discard frame, ieee layer may not treat frame as a dup */ | |
104 | goto unlock_and_free; | |
105 | ||
106 | /* complete receive processing for all pending frames upto BAR seqno */ | |
107 | ||
108 | cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | |
109 | while ((rxtid->baw_head != rxtid->baw_tail) && | |
110 | (rxtid->baw_head != cindex)) { | |
111 | tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf; | |
112 | rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status; | |
113 | rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL; | |
114 | ||
115 | if (tskb != NULL) | |
116 | ath_rx_subframe(an, tskb, rx_status); | |
117 | ||
118 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
119 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
120 | } | |
121 | ||
122 | /* ... and indicate rest of the frames in-order */ | |
123 | ||
124 | while (rxtid->baw_head != rxtid->baw_tail && | |
125 | rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) { | |
126 | tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf; | |
127 | rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status; | |
128 | rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL; | |
129 | ||
130 | ath_rx_subframe(an, tskb, rx_status); | |
131 | ||
132 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
133 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
134 | } | |
135 | ||
136 | unlock_and_free: | |
137 | spin_unlock_bh(&rxtid->tidlock); | |
138 | /* free bar itself */ | |
139 | dev_kfree_skb(skb); | |
140 | return IEEE80211_FTYPE_CTL; | |
141 | } | |
142 | ||
143 | /* Function to handle a subframe of aggregation when HT is enabled */ | |
144 | ||
145 | static int ath_ampdu_input(struct ath_softc *sc, | |
146 | struct ath_node *an, | |
147 | struct sk_buff *skb, | |
148 | struct ath_recv_status *rx_status) | |
149 | { | |
150 | struct ieee80211_hdr *hdr; | |
151 | struct ath_arx_tid *rxtid; | |
152 | struct ath_rxbuf *rxbuf; | |
153 | u8 type, subtype; | |
154 | u16 rxseq; | |
155 | int tid = 0, index, cindex, rxdiff; | |
156 | __le16 fc; | |
157 | u8 *qc; | |
158 | ||
159 | hdr = (struct ieee80211_hdr *)skb->data; | |
160 | fc = hdr->frame_control; | |
161 | ||
162 | /* collect stats of frames with non-zero version */ | |
163 | ||
164 | if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) { | |
165 | dev_kfree_skb(skb); | |
166 | return -1; | |
167 | } | |
168 | ||
169 | type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE; | |
170 | subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE; | |
171 | ||
172 | if (ieee80211_is_back_req(fc)) | |
173 | return ath_bar_rx(sc, an, skb); | |
174 | ||
175 | /* special aggregate processing only for qos unicast data frames */ | |
176 | ||
177 | if (!ieee80211_is_data(fc) || | |
178 | !ieee80211_is_data_qos(fc) || | |
179 | is_multicast_ether_addr(hdr->addr1)) | |
180 | return ath_rx_subframe(an, skb, rx_status); | |
181 | ||
182 | /* lookup rx tid state */ | |
183 | ||
184 | if (ieee80211_is_data_qos(fc)) { | |
185 | qc = ieee80211_get_qos_ctl(hdr); | |
186 | tid = qc[0] & 0xf; | |
187 | } | |
188 | ||
b4696c8b | 189 | if (sc->sc_ah->ah_opmode == ATH9K_M_STA) { |
f078f209 LR |
190 | /* Drop the frame not belonging to me. */ |
191 | if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) { | |
192 | dev_kfree_skb(skb); | |
193 | return -1; | |
194 | } | |
195 | } | |
196 | ||
197 | rxtid = &an->an_aggr.rx.tid[tid]; | |
198 | ||
199 | spin_lock(&rxtid->tidlock); | |
200 | ||
201 | rxdiff = (rxtid->baw_tail - rxtid->baw_head) & | |
202 | (ATH_TID_MAX_BUFS - 1); | |
203 | ||
204 | /* | |
205 | * If the ADDBA exchange has not been completed by the source, | |
206 | * process via legacy path (i.e. no reordering buffer is needed) | |
207 | */ | |
208 | if (!rxtid->addba_exchangecomplete) { | |
209 | spin_unlock(&rxtid->tidlock); | |
210 | return ath_rx_subframe(an, skb, rx_status); | |
211 | } | |
212 | ||
213 | /* extract sequence number from recvd frame */ | |
214 | ||
215 | rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT; | |
216 | ||
217 | if (rxtid->seq_reset) { | |
218 | rxtid->seq_reset = 0; | |
219 | rxtid->seq_next = rxseq; | |
220 | } | |
221 | ||
222 | index = ATH_BA_INDEX(rxtid->seq_next, rxseq); | |
223 | ||
224 | /* drop frame if old sequence (index is too large) */ | |
225 | ||
226 | if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) { | |
227 | /* discard frame, ieee layer may not treat frame as a dup */ | |
228 | spin_unlock(&rxtid->tidlock); | |
229 | dev_kfree_skb(skb); | |
230 | return IEEE80211_FTYPE_DATA; | |
231 | } | |
232 | ||
233 | /* sequence number is beyond block-ack window */ | |
234 | ||
235 | if (index >= rxtid->baw_size) { | |
236 | ||
237 | /* complete receive processing for all pending frames */ | |
238 | ||
239 | while (index >= rxtid->baw_size) { | |
240 | ||
241 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | |
242 | ||
243 | if (rxbuf->rx_wbuf != NULL) { | |
244 | ath_rx_subframe(an, rxbuf->rx_wbuf, | |
245 | &rxbuf->rx_status); | |
246 | rxbuf->rx_wbuf = NULL; | |
247 | } | |
248 | ||
249 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
250 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
251 | ||
252 | index--; | |
253 | } | |
254 | } | |
255 | ||
256 | /* add buffer to the recv ba window */ | |
257 | ||
258 | cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | |
259 | rxbuf = rxtid->rxbuf + cindex; | |
260 | ||
261 | if (rxbuf->rx_wbuf != NULL) { | |
262 | spin_unlock(&rxtid->tidlock); | |
263 | /* duplicate frame */ | |
264 | dev_kfree_skb(skb); | |
265 | return IEEE80211_FTYPE_DATA; | |
266 | } | |
267 | ||
268 | rxbuf->rx_wbuf = skb; | |
269 | rxbuf->rx_time = get_timestamp(); | |
270 | rxbuf->rx_status = *rx_status; | |
271 | ||
272 | /* advance tail if sequence received is newer | |
273 | * than any received so far */ | |
274 | ||
275 | if (index >= rxdiff) { | |
276 | rxtid->baw_tail = cindex; | |
277 | INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS); | |
278 | } | |
279 | ||
280 | /* indicate all in-order received frames */ | |
281 | ||
282 | while (rxtid->baw_head != rxtid->baw_tail) { | |
283 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | |
284 | if (!rxbuf->rx_wbuf) | |
285 | break; | |
286 | ||
287 | ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status); | |
288 | rxbuf->rx_wbuf = NULL; | |
289 | ||
290 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
291 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
292 | } | |
293 | ||
294 | /* | |
295 | * start a timer to flush all received frames if there are pending | |
296 | * receive frames | |
297 | */ | |
298 | if (rxtid->baw_head != rxtid->baw_tail) | |
299 | mod_timer(&rxtid->timer, ATH_RX_TIMEOUT); | |
300 | else | |
301 | del_timer_sync(&rxtid->timer); | |
302 | ||
303 | spin_unlock(&rxtid->tidlock); | |
304 | return IEEE80211_FTYPE_DATA; | |
305 | } | |
306 | ||
307 | /* Timer to flush all received sub-frames */ | |
308 | ||
309 | static void ath_rx_timer(unsigned long data) | |
310 | { | |
311 | struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data; | |
312 | struct ath_node *an = rxtid->an; | |
313 | struct ath_rxbuf *rxbuf; | |
314 | int nosched; | |
315 | ||
316 | spin_lock_bh(&rxtid->tidlock); | |
317 | while (rxtid->baw_head != rxtid->baw_tail) { | |
318 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | |
319 | if (!rxbuf->rx_wbuf) { | |
320 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
321 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
322 | continue; | |
323 | } | |
324 | ||
325 | /* | |
326 | * Stop if the next one is a very recent frame. | |
327 | * | |
328 | * Call get_timestamp in every iteration to protect against the | |
329 | * case in which a new frame is received while we are executing | |
330 | * this function. Using a timestamp obtained before entering | |
331 | * the loop could lead to a very large time interval | |
332 | * (a negative value typecast to unsigned), breaking the | |
333 | * function's logic. | |
334 | */ | |
335 | if ((get_timestamp() - rxbuf->rx_time) < | |
336 | (ATH_RX_TIMEOUT * HZ / 1000)) | |
337 | break; | |
338 | ||
339 | ath_rx_subframe(an, rxbuf->rx_wbuf, | |
340 | &rxbuf->rx_status); | |
341 | rxbuf->rx_wbuf = NULL; | |
342 | ||
343 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
344 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
345 | } | |
346 | ||
347 | /* | |
348 | * start a timer to flush all received frames if there are pending | |
349 | * receive frames | |
350 | */ | |
351 | if (rxtid->baw_head != rxtid->baw_tail) | |
352 | nosched = 0; | |
353 | else | |
354 | nosched = 1; /* no need to re-arm the timer again */ | |
355 | ||
356 | spin_unlock_bh(&rxtid->tidlock); | |
357 | } | |
358 | ||
359 | /* Free all pending sub-frames in the re-ordering buffer */ | |
360 | ||
361 | static void ath_rx_flush_tid(struct ath_softc *sc, | |
362 | struct ath_arx_tid *rxtid, int drop) | |
363 | { | |
364 | struct ath_rxbuf *rxbuf; | |
773b4e02 | 365 | unsigned long flag; |
f078f209 | 366 | |
773b4e02 | 367 | spin_lock_irqsave(&rxtid->tidlock, flag); |
f078f209 LR |
368 | while (rxtid->baw_head != rxtid->baw_tail) { |
369 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | |
370 | if (!rxbuf->rx_wbuf) { | |
371 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
372 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
373 | continue; | |
374 | } | |
375 | ||
376 | if (drop) | |
377 | dev_kfree_skb(rxbuf->rx_wbuf); | |
378 | else | |
379 | ath_rx_subframe(rxtid->an, | |
380 | rxbuf->rx_wbuf, | |
381 | &rxbuf->rx_status); | |
382 | ||
383 | rxbuf->rx_wbuf = NULL; | |
384 | ||
385 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
386 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
387 | } | |
773b4e02 | 388 | spin_unlock_irqrestore(&rxtid->tidlock, flag); |
f078f209 LR |
389 | } |
390 | ||
391 | static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, | |
392 | u32 len) | |
393 | { | |
394 | struct sk_buff *skb; | |
395 | u32 off; | |
396 | ||
397 | /* | |
398 | * Cache-line-align. This is important (for the | |
399 | * 5210 at least) as not doing so causes bogus data | |
400 | * in rx'd frames. | |
401 | */ | |
402 | ||
b4b6cda2 LR |
403 | /* Note: the kernel can allocate a value greater than |
404 | * what we ask it to give us. We really only need 4 KB as that | |
405 | * is this hardware supports and in fact we need at least 3849 | |
406 | * as that is the MAX AMSDU size this hardware supports. | |
407 | * Unfortunately this means we may get 8 KB here from the | |
408 | * kernel... and that is actually what is observed on some | |
409 | * systems :( */ | |
f078f209 LR |
410 | skb = dev_alloc_skb(len + sc->sc_cachelsz - 1); |
411 | if (skb != NULL) { | |
412 | off = ((unsigned long) skb->data) % sc->sc_cachelsz; | |
413 | if (off != 0) | |
414 | skb_reserve(skb, sc->sc_cachelsz - off); | |
415 | } else { | |
416 | DPRINTF(sc, ATH_DBG_FATAL, | |
417 | "%s: skbuff alloc of size %u failed\n", | |
418 | __func__, len); | |
419 | return NULL; | |
420 | } | |
421 | ||
422 | return skb; | |
423 | } | |
424 | ||
425 | static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb) | |
426 | { | |
427 | struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; | |
428 | ||
429 | ASSERT(bf != NULL); | |
430 | ||
431 | spin_lock_bh(&sc->sc_rxbuflock); | |
432 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | |
433 | /* | |
434 | * This buffer is still held for hw acess. | |
435 | * Mark it as free to be re-queued it later. | |
436 | */ | |
437 | bf->bf_status |= ATH_BUFSTATUS_FREE; | |
438 | } else { | |
439 | /* XXX: we probably never enter here, remove after | |
440 | * verification */ | |
441 | list_add_tail(&bf->list, &sc->sc_rxbuf); | |
442 | ath_rx_buf_link(sc, bf); | |
443 | } | |
444 | spin_unlock_bh(&sc->sc_rxbuflock); | |
445 | } | |
446 | ||
447 | /* | |
448 | * The skb indicated to upper stack won't be returned to us. | |
449 | * So we have to allocate a new one and queue it by ourselves. | |
450 | */ | |
451 | static int ath_rx_indicate(struct ath_softc *sc, | |
452 | struct sk_buff *skb, | |
453 | struct ath_recv_status *status, | |
454 | u16 keyix) | |
455 | { | |
456 | struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; | |
457 | struct sk_buff *nskb; | |
458 | int type; | |
459 | ||
460 | /* indicate frame to the stack, which will free the old skb. */ | |
19b73c7f | 461 | type = _ath_rx_indicate(sc, skb, status, keyix); |
f078f209 LR |
462 | |
463 | /* allocate a new skb and queue it to for H/W processing */ | |
464 | nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | |
465 | if (nskb != NULL) { | |
466 | bf->bf_mpdu = nskb; | |
927e70e9 | 467 | bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data, |
ca0c7e51 | 468 | sc->sc_rxbufsize, |
927e70e9 S |
469 | PCI_DMA_FROMDEVICE); |
470 | bf->bf_dmacontext = bf->bf_buf_addr; | |
f078f209 LR |
471 | ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf; |
472 | ||
473 | /* queue the new wbuf to H/W */ | |
474 | ath_rx_requeue(sc, nskb); | |
475 | } | |
476 | ||
477 | return type; | |
478 | } | |
479 | ||
480 | static void ath_opmode_init(struct ath_softc *sc) | |
481 | { | |
482 | struct ath_hal *ah = sc->sc_ah; | |
483 | u32 rfilt, mfilt[2]; | |
484 | ||
485 | /* configure rx filter */ | |
486 | rfilt = ath_calcrxfilter(sc); | |
487 | ath9k_hw_setrxfilter(ah, rfilt); | |
488 | ||
489 | /* configure bssid mask */ | |
60b67f51 | 490 | if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) |
f078f209 LR |
491 | ath9k_hw_setbssidmask(ah, sc->sc_bssidmask); |
492 | ||
493 | /* configure operational mode */ | |
494 | ath9k_hw_setopmode(ah); | |
495 | ||
496 | /* Handle any link-level address change. */ | |
497 | ath9k_hw_setmac(ah, sc->sc_myaddr); | |
498 | ||
499 | /* calculate and install multicast filter */ | |
500 | mfilt[0] = mfilt[1] = ~0; | |
501 | ||
502 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); | |
503 | DPRINTF(sc, ATH_DBG_CONFIG , | |
504 | "%s: RX filter 0x%x, MC filter %08x:%08x\n", | |
505 | __func__, rfilt, mfilt[0], mfilt[1]); | |
506 | } | |
507 | ||
508 | int ath_rx_init(struct ath_softc *sc, int nbufs) | |
509 | { | |
510 | struct sk_buff *skb; | |
511 | struct ath_buf *bf; | |
512 | int error = 0; | |
513 | ||
514 | do { | |
515 | spin_lock_init(&sc->sc_rxflushlock); | |
98deeea0 | 516 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
f078f209 LR |
517 | spin_lock_init(&sc->sc_rxbuflock); |
518 | ||
519 | /* | |
520 | * Cisco's VPN software requires that drivers be able to | |
521 | * receive encapsulated frames that are larger than the MTU. | |
522 | * Since we can't be sure how large a frame we'll get, setup | |
523 | * to handle the larges on possible. | |
524 | */ | |
525 | sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN, | |
526 | min(sc->sc_cachelsz, | |
527 | (u16)64)); | |
528 | ||
529 | DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n", | |
530 | __func__, sc->sc_cachelsz, sc->sc_rxbufsize); | |
531 | ||
532 | /* Initialize rx descriptors */ | |
533 | ||
534 | error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, | |
535 | "rx", nbufs, 1); | |
536 | if (error != 0) { | |
537 | DPRINTF(sc, ATH_DBG_FATAL, | |
538 | "%s: failed to allocate rx descriptors: %d\n", | |
539 | __func__, error); | |
540 | break; | |
541 | } | |
542 | ||
543 | /* Pre-allocate a wbuf for each rx buffer */ | |
544 | ||
545 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | |
546 | skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | |
547 | if (skb == NULL) { | |
548 | error = -ENOMEM; | |
549 | break; | |
550 | } | |
551 | ||
552 | bf->bf_mpdu = skb; | |
927e70e9 | 553 | bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data, |
ca0c7e51 | 554 | sc->sc_rxbufsize, |
927e70e9 S |
555 | PCI_DMA_FROMDEVICE); |
556 | bf->bf_dmacontext = bf->bf_buf_addr; | |
f078f209 LR |
557 | ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf; |
558 | } | |
559 | sc->sc_rxlink = NULL; | |
560 | ||
561 | } while (0); | |
562 | ||
563 | if (error) | |
564 | ath_rx_cleanup(sc); | |
565 | ||
566 | return error; | |
567 | } | |
568 | ||
569 | /* Reclaim all rx queue resources */ | |
570 | ||
571 | void ath_rx_cleanup(struct ath_softc *sc) | |
572 | { | |
573 | struct sk_buff *skb; | |
574 | struct ath_buf *bf; | |
575 | ||
576 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | |
577 | skb = bf->bf_mpdu; | |
578 | if (skb) | |
579 | dev_kfree_skb(skb); | |
580 | } | |
581 | ||
582 | /* cleanup rx descriptors */ | |
583 | ||
584 | if (sc->sc_rxdma.dd_desc_len != 0) | |
585 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); | |
586 | } | |
587 | ||
588 | /* | |
589 | * Calculate the receive filter according to the | |
590 | * operating mode and state: | |
591 | * | |
592 | * o always accept unicast, broadcast, and multicast traffic | |
593 | * o maintain current state of phy error reception (the hal | |
594 | * may enable phy error frames for noise immunity work) | |
595 | * o probe request frames are accepted only when operating in | |
596 | * hostap, adhoc, or monitor modes | |
597 | * o enable promiscuous mode according to the interface state | |
598 | * o accept beacons: | |
599 | * - when operating in adhoc mode so the 802.11 layer creates | |
600 | * node table entries for peers, | |
601 | * - when operating in station mode for collecting rssi data when | |
602 | * the station is otherwise quiet, or | |
603 | * - when operating as a repeater so we see repeater-sta beacons | |
604 | * - when scanning | |
605 | */ | |
606 | ||
607 | u32 ath_calcrxfilter(struct ath_softc *sc) | |
608 | { | |
609 | #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) | |
7dcfdcd9 | 610 | |
f078f209 LR |
611 | u32 rfilt; |
612 | ||
613 | rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) | |
614 | | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST | |
615 | | ATH9K_RX_FILTER_MCAST; | |
616 | ||
617 | /* If not a STA, enable processing of Probe Requests */ | |
b4696c8b | 618 | if (sc->sc_ah->ah_opmode != ATH9K_M_STA) |
f078f209 LR |
619 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; |
620 | ||
621 | /* Can't set HOSTAP into promiscous mode */ | |
b4696c8b | 622 | if (((sc->sc_ah->ah_opmode != ATH9K_M_HOSTAP) && |
7dcfdcd9 | 623 | (sc->rx_filter & FIF_PROMISC_IN_BSS)) || |
b4696c8b | 624 | (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR)) { |
f078f209 LR |
625 | rfilt |= ATH9K_RX_FILTER_PROM; |
626 | /* ??? To prevent from sending ACK */ | |
627 | rfilt &= ~ATH9K_RX_FILTER_UCAST; | |
628 | } | |
629 | ||
b4696c8b | 630 | if (((sc->sc_ah->ah_opmode == ATH9K_M_STA) && |
7dcfdcd9 | 631 | (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)) || |
b4696c8b | 632 | (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)) |
f078f209 LR |
633 | rfilt |= ATH9K_RX_FILTER_BEACON; |
634 | ||
635 | /* If in HOSTAP mode, want to enable reception of PSPOLL frames | |
636 | & beacon frames */ | |
b4696c8b | 637 | if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) |
f078f209 LR |
638 | rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); |
639 | return rfilt; | |
7dcfdcd9 | 640 | |
f078f209 LR |
641 | #undef RX_FILTER_PRESERVE |
642 | } | |
643 | ||
644 | /* Enable the receive h/w following a reset. */ | |
645 | ||
646 | int ath_startrecv(struct ath_softc *sc) | |
647 | { | |
648 | struct ath_hal *ah = sc->sc_ah; | |
649 | struct ath_buf *bf, *tbf; | |
650 | ||
651 | spin_lock_bh(&sc->sc_rxbuflock); | |
652 | if (list_empty(&sc->sc_rxbuf)) | |
653 | goto start_recv; | |
654 | ||
655 | sc->sc_rxlink = NULL; | |
656 | list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) { | |
657 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | |
658 | /* restarting h/w, no need for holding descriptors */ | |
659 | bf->bf_status &= ~ATH_BUFSTATUS_STALE; | |
660 | /* | |
661 | * Upper layer may not be done with the frame yet so | |
662 | * we can't just re-queue it to hardware. Remove it | |
663 | * from h/w queue. It'll be re-queued when upper layer | |
664 | * returns the frame and ath_rx_requeue_mpdu is called. | |
665 | */ | |
666 | if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) { | |
667 | list_del(&bf->list); | |
668 | continue; | |
669 | } | |
670 | } | |
671 | /* chain descriptors */ | |
672 | ath_rx_buf_link(sc, bf); | |
673 | } | |
674 | ||
675 | /* We could have deleted elements so the list may be empty now */ | |
676 | if (list_empty(&sc->sc_rxbuf)) | |
677 | goto start_recv; | |
678 | ||
679 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | |
680 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | |
681 | ath9k_hw_rxena(ah); /* enable recv descriptors */ | |
682 | ||
683 | start_recv: | |
684 | spin_unlock_bh(&sc->sc_rxbuflock); | |
685 | ath_opmode_init(sc); /* set filters, etc. */ | |
686 | ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */ | |
687 | return 0; | |
688 | } | |
689 | ||
690 | /* Disable the receive h/w in preparation for a reset. */ | |
691 | ||
692 | bool ath_stoprecv(struct ath_softc *sc) | |
693 | { | |
694 | struct ath_hal *ah = sc->sc_ah; | |
695 | u64 tsf; | |
696 | bool stopped; | |
697 | ||
698 | ath9k_hw_stoppcurecv(ah); /* disable PCU */ | |
699 | ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */ | |
700 | stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */ | |
701 | mdelay(3); /* 3ms is long enough for 1 frame */ | |
702 | tsf = ath9k_hw_gettsf64(ah); | |
703 | sc->sc_rxlink = NULL; /* just in case */ | |
704 | return stopped; | |
705 | } | |
706 | ||
707 | /* Flush receive queue */ | |
708 | ||
709 | void ath_flushrecv(struct ath_softc *sc) | |
710 | { | |
711 | /* | |
712 | * ath_rx_tasklet may be used to handle rx interrupt and flush receive | |
713 | * queue at the same time. Use a lock to serialize the access of rx | |
714 | * queue. | |
715 | * ath_rx_tasklet cannot hold the spinlock while indicating packets. | |
716 | * Instead, do not claim the spinlock but check for a flush in | |
717 | * progress (see references to sc_rxflush) | |
718 | */ | |
719 | spin_lock_bh(&sc->sc_rxflushlock); | |
98deeea0 | 720 | sc->sc_flags |= SC_OP_RXFLUSH; |
f078f209 LR |
721 | |
722 | ath_rx_tasklet(sc, 1); | |
723 | ||
98deeea0 | 724 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
f078f209 LR |
725 | spin_unlock_bh(&sc->sc_rxflushlock); |
726 | } | |
727 | ||
728 | /* Process an individual frame */ | |
729 | ||
730 | int ath_rx_input(struct ath_softc *sc, | |
731 | struct ath_node *an, | |
732 | int is_ampdu, | |
733 | struct sk_buff *skb, | |
734 | struct ath_recv_status *rx_status, | |
735 | enum ATH_RX_TYPE *status) | |
736 | { | |
672840ac | 737 | if (is_ampdu && (sc->sc_flags & SC_OP_RXAGGR)) { |
f078f209 LR |
738 | *status = ATH_RX_CONSUMED; |
739 | return ath_ampdu_input(sc, an, skb, rx_status); | |
740 | } else { | |
741 | *status = ATH_RX_NON_CONSUMED; | |
742 | return -1; | |
743 | } | |
744 | } | |
745 | ||
746 | /* Process receive queue, as well as LED, etc. */ | |
747 | ||
748 | int ath_rx_tasklet(struct ath_softc *sc, int flush) | |
749 | { | |
750 | #define PA2DESC(_sc, _pa) \ | |
751 | ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ | |
752 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) | |
753 | ||
754 | struct ath_buf *bf, *bf_held = NULL; | |
755 | struct ath_desc *ds; | |
756 | struct ieee80211_hdr *hdr; | |
757 | struct sk_buff *skb = NULL; | |
758 | struct ath_recv_status rx_status; | |
759 | struct ath_hal *ah = sc->sc_ah; | |
760 | int type, rx_processed = 0; | |
761 | u32 phyerr; | |
762 | u8 chainreset = 0; | |
763 | int retval; | |
764 | __le16 fc; | |
765 | ||
766 | do { | |
767 | /* If handling rx interrupt and flush is in progress => exit */ | |
98deeea0 | 768 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) |
f078f209 LR |
769 | break; |
770 | ||
771 | spin_lock_bh(&sc->sc_rxbuflock); | |
772 | if (list_empty(&sc->sc_rxbuf)) { | |
773 | sc->sc_rxlink = NULL; | |
774 | spin_unlock_bh(&sc->sc_rxbuflock); | |
775 | break; | |
776 | } | |
777 | ||
778 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | |
779 | ||
780 | /* | |
781 | * There is a race condition that BH gets scheduled after sw | |
782 | * writes RxE and before hw re-load the last descriptor to get | |
783 | * the newly chained one. Software must keep the last DONE | |
784 | * descriptor as a holding descriptor - software does so by | |
785 | * marking it with the STALE flag. | |
786 | */ | |
787 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | |
788 | bf_held = bf; | |
789 | if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) { | |
790 | /* | |
791 | * The holding descriptor is the last | |
792 | * descriptor in queue. It's safe to | |
793 | * remove the last holding descriptor | |
794 | * in BH context. | |
795 | */ | |
796 | list_del(&bf_held->list); | |
797 | bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; | |
798 | sc->sc_rxlink = NULL; | |
799 | ||
800 | if (bf_held->bf_status & ATH_BUFSTATUS_FREE) { | |
801 | list_add_tail(&bf_held->list, | |
802 | &sc->sc_rxbuf); | |
803 | ath_rx_buf_link(sc, bf_held); | |
804 | } | |
805 | spin_unlock_bh(&sc->sc_rxbuflock); | |
806 | break; | |
807 | } | |
808 | bf = list_entry(bf->list.next, struct ath_buf, list); | |
809 | } | |
810 | ||
811 | ds = bf->bf_desc; | |
812 | ++rx_processed; | |
813 | ||
814 | /* | |
815 | * Must provide the virtual address of the current | |
816 | * descriptor, the physical address, and the virtual | |
817 | * address of the next descriptor in the h/w chain. | |
818 | * This allows the HAL to look ahead to see if the | |
819 | * hardware is done with a descriptor by checking the | |
820 | * done bit in the following descriptor and the address | |
821 | * of the current descriptor the DMA engine is working | |
822 | * on. All this is necessary because of our use of | |
823 | * a self-linked list to avoid rx overruns. | |
824 | */ | |
825 | retval = ath9k_hw_rxprocdesc(ah, | |
826 | ds, | |
827 | bf->bf_daddr, | |
828 | PA2DESC(sc, ds->ds_link), | |
829 | 0); | |
830 | if (retval == -EINPROGRESS) { | |
831 | struct ath_buf *tbf; | |
832 | struct ath_desc *tds; | |
833 | ||
834 | if (list_is_last(&bf->list, &sc->sc_rxbuf)) { | |
835 | spin_unlock_bh(&sc->sc_rxbuflock); | |
836 | break; | |
837 | } | |
838 | ||
839 | tbf = list_entry(bf->list.next, struct ath_buf, list); | |
840 | ||
841 | /* | |
842 | * On some hardware the descriptor status words could | |
843 | * get corrupted, including the done bit. Because of | |
844 | * this, check if the next descriptor's done bit is | |
845 | * set or not. | |
846 | * | |
847 | * If the next descriptor's done bit is set, the current | |
848 | * descriptor has been corrupted. Force s/w to discard | |
849 | * this descriptor and continue... | |
850 | */ | |
851 | ||
852 | tds = tbf->bf_desc; | |
853 | retval = ath9k_hw_rxprocdesc(ah, | |
854 | tds, tbf->bf_daddr, | |
855 | PA2DESC(sc, tds->ds_link), 0); | |
856 | if (retval == -EINPROGRESS) { | |
857 | spin_unlock_bh(&sc->sc_rxbuflock); | |
858 | break; | |
859 | } | |
860 | } | |
861 | ||
862 | /* XXX: we do not support frames spanning | |
863 | * multiple descriptors */ | |
864 | bf->bf_status |= ATH_BUFSTATUS_DONE; | |
865 | ||
866 | skb = bf->bf_mpdu; | |
867 | if (skb == NULL) { /* XXX ??? can this happen */ | |
868 | spin_unlock_bh(&sc->sc_rxbuflock); | |
869 | continue; | |
870 | } | |
871 | /* | |
872 | * Now we know it's a completed frame, we can indicate the | |
873 | * frame. Remove the previous holding descriptor and leave | |
874 | * this one in the queue as the new holding descriptor. | |
875 | */ | |
876 | if (bf_held) { | |
877 | list_del(&bf_held->list); | |
878 | bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; | |
879 | if (bf_held->bf_status & ATH_BUFSTATUS_FREE) { | |
880 | list_add_tail(&bf_held->list, &sc->sc_rxbuf); | |
881 | /* try to requeue this descriptor */ | |
882 | ath_rx_buf_link(sc, bf_held); | |
883 | } | |
884 | } | |
885 | ||
886 | bf->bf_status |= ATH_BUFSTATUS_STALE; | |
887 | bf_held = bf; | |
888 | /* | |
889 | * Release the lock here in case ieee80211_input() return | |
890 | * the frame immediately by calling ath_rx_mpdu_requeue(). | |
891 | */ | |
892 | spin_unlock_bh(&sc->sc_rxbuflock); | |
893 | ||
894 | if (flush) { | |
895 | /* | |
896 | * If we're asked to flush receive queue, directly | |
897 | * chain it back at the queue without processing it. | |
898 | */ | |
899 | goto rx_next; | |
900 | } | |
901 | ||
902 | hdr = (struct ieee80211_hdr *)skb->data; | |
903 | fc = hdr->frame_control; | |
0345f37b | 904 | memset(&rx_status, 0, sizeof(struct ath_recv_status)); |
f078f209 LR |
905 | |
906 | if (ds->ds_rxstat.rs_more) { | |
907 | /* | |
908 | * Frame spans multiple descriptors; this | |
909 | * cannot happen yet as we don't support | |
910 | * jumbograms. If not in monitor mode, | |
911 | * discard the frame. | |
912 | */ | |
913 | #ifndef ERROR_FRAMES | |
914 | /* | |
915 | * Enable this if you want to see | |
916 | * error frames in Monitor mode. | |
917 | */ | |
b4696c8b | 918 | if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR) |
f078f209 LR |
919 | goto rx_next; |
920 | #endif | |
921 | /* fall thru for monitor mode handling... */ | |
922 | } else if (ds->ds_rxstat.rs_status != 0) { | |
923 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) | |
924 | rx_status.flags |= ATH_RX_FCS_ERROR; | |
925 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) { | |
926 | phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; | |
927 | goto rx_next; | |
928 | } | |
929 | ||
930 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) { | |
931 | /* | |
932 | * Decrypt error. We only mark packet status | |
933 | * here and always push up the frame up to let | |
934 | * mac80211 handle the actual error case, be | |
935 | * it no decryption key or real decryption | |
936 | * error. This let us keep statistics there. | |
937 | */ | |
938 | rx_status.flags |= ATH_RX_DECRYPT_ERROR; | |
939 | } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) { | |
940 | /* | |
941 | * Demic error. We only mark frame status here | |
942 | * and always push up the frame up to let | |
943 | * mac80211 handle the actual error case. This | |
944 | * let us keep statistics there. Hardware may | |
945 | * post a false-positive MIC error. | |
946 | */ | |
947 | if (ieee80211_is_ctl(fc)) | |
948 | /* | |
949 | * Sometimes, we get invalid | |
950 | * MIC failures on valid control frames. | |
951 | * Remove these mic errors. | |
952 | */ | |
953 | ds->ds_rxstat.rs_status &= | |
954 | ~ATH9K_RXERR_MIC; | |
955 | else | |
956 | rx_status.flags |= ATH_RX_MIC_ERROR; | |
957 | } | |
958 | /* | |
959 | * Reject error frames with the exception of | |
960 | * decryption and MIC failures. For monitor mode, | |
961 | * we also ignore the CRC error. | |
962 | */ | |
b4696c8b | 963 | if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) { |
f078f209 LR |
964 | if (ds->ds_rxstat.rs_status & |
965 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | | |
966 | ATH9K_RXERR_CRC)) | |
967 | goto rx_next; | |
968 | } else { | |
969 | if (ds->ds_rxstat.rs_status & | |
970 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { | |
971 | goto rx_next; | |
972 | } | |
973 | } | |
974 | } | |
975 | /* | |
976 | * The status portion of the descriptor could get corrupted. | |
977 | */ | |
978 | if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen) | |
979 | goto rx_next; | |
980 | /* | |
981 | * Sync and unmap the frame. At this point we're | |
982 | * committed to passing the sk_buff somewhere so | |
983 | * clear buf_skb; this means a new sk_buff must be | |
984 | * allocated when the rx descriptor is setup again | |
985 | * to receive another frame. | |
986 | */ | |
987 | skb_put(skb, ds->ds_rxstat.rs_datalen); | |
988 | skb->protocol = cpu_to_be16(ETH_P_CONTROL); | |
989 | rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp); | |
990 | rx_status.rateieee = | |
991 | sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate; | |
992 | rx_status.rateKbps = | |
993 | sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps; | |
994 | rx_status.ratecode = ds->ds_rxstat.rs_rate; | |
995 | ||
996 | /* HT rate */ | |
997 | if (rx_status.ratecode & 0x80) { | |
998 | /* TODO - add table to avoid division */ | |
999 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { | |
1000 | rx_status.flags |= ATH_RX_40MHZ; | |
1001 | rx_status.rateKbps = | |
1002 | (rx_status.rateKbps * 27) / 13; | |
1003 | } | |
1004 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI) | |
1005 | rx_status.rateKbps = | |
1006 | (rx_status.rateKbps * 10) / 9; | |
1007 | else | |
1008 | rx_status.flags |= ATH_RX_SHORT_GI; | |
1009 | } | |
1010 | ||
6f255425 | 1011 | /* sc_noise_floor is only available when the station |
f078f209 LR |
1012 | attaches to an AP, so we use a default value |
1013 | if we are not yet attached. */ | |
f078f209 | 1014 | rx_status.abs_rssi = |
6f255425 | 1015 | ds->ds_rxstat.rs_rssi + sc->sc_ani.sc_noise_floor; |
f078f209 LR |
1016 | |
1017 | pci_dma_sync_single_for_cpu(sc->pdev, | |
1018 | bf->bf_buf_addr, | |
ca0c7e51 | 1019 | sc->sc_rxbufsize, |
f078f209 LR |
1020 | PCI_DMA_FROMDEVICE); |
1021 | pci_unmap_single(sc->pdev, | |
1022 | bf->bf_buf_addr, | |
1023 | sc->sc_rxbufsize, | |
1024 | PCI_DMA_FROMDEVICE); | |
1025 | ||
1026 | /* XXX: Ah! make me more readable, use a helper */ | |
60b67f51 | 1027 | if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { |
f078f209 LR |
1028 | if (ds->ds_rxstat.rs_moreaggr == 0) { |
1029 | rx_status.rssictl[0] = | |
1030 | ds->ds_rxstat.rs_rssi_ctl0; | |
1031 | rx_status.rssictl[1] = | |
1032 | ds->ds_rxstat.rs_rssi_ctl1; | |
1033 | rx_status.rssictl[2] = | |
1034 | ds->ds_rxstat.rs_rssi_ctl2; | |
1035 | rx_status.rssi = ds->ds_rxstat.rs_rssi; | |
1036 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { | |
1037 | rx_status.rssiextn[0] = | |
1038 | ds->ds_rxstat.rs_rssi_ext0; | |
1039 | rx_status.rssiextn[1] = | |
1040 | ds->ds_rxstat.rs_rssi_ext1; | |
1041 | rx_status.rssiextn[2] = | |
1042 | ds->ds_rxstat.rs_rssi_ext2; | |
1043 | rx_status.flags |= | |
1044 | ATH_RX_RSSI_EXTN_VALID; | |
1045 | } | |
1046 | rx_status.flags |= ATH_RX_RSSI_VALID | | |
1047 | ATH_RX_CHAIN_RSSI_VALID; | |
1048 | } | |
1049 | } else { | |
1050 | /* | |
1051 | * Need to insert the "combined" rssi into the | |
1052 | * status structure for upper layer processing | |
1053 | */ | |
1054 | rx_status.rssi = ds->ds_rxstat.rs_rssi; | |
1055 | rx_status.flags |= ATH_RX_RSSI_VALID; | |
1056 | } | |
1057 | ||
1058 | /* Pass frames up to the stack. */ | |
1059 | ||
1060 | type = ath_rx_indicate(sc, skb, | |
1061 | &rx_status, ds->ds_rxstat.rs_keyix); | |
1062 | ||
1063 | /* | |
1064 | * change the default rx antenna if rx diversity chooses the | |
1065 | * other antenna 3 times in a row. | |
1066 | */ | |
1067 | if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { | |
1068 | if (++sc->sc_rxotherant >= 3) | |
1069 | ath_setdefantenna(sc, | |
1070 | ds->ds_rxstat.rs_antenna); | |
1071 | } else { | |
1072 | sc->sc_rxotherant = 0; | |
1073 | } | |
1074 | ||
1075 | #ifdef CONFIG_SLOW_ANT_DIV | |
1076 | if ((rx_status.flags & ATH_RX_RSSI_VALID) && | |
1077 | ieee80211_is_beacon(fc)) { | |
1078 | ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat); | |
1079 | } | |
1080 | #endif | |
1081 | /* | |
1082 | * For frames successfully indicated, the buffer will be | |
1083 | * returned to us by upper layers by calling | |
1084 | * ath_rx_mpdu_requeue, either synchronusly or asynchronously. | |
1085 | * So we don't want to do it here in this loop. | |
1086 | */ | |
1087 | continue; | |
1088 | ||
1089 | rx_next: | |
1090 | bf->bf_status |= ATH_BUFSTATUS_FREE; | |
1091 | } while (TRUE); | |
1092 | ||
1093 | if (chainreset) { | |
1094 | DPRINTF(sc, ATH_DBG_CONFIG, | |
1095 | "%s: Reset rx chain mask. " | |
1096 | "Do internal reset\n", __func__); | |
1097 | ASSERT(flush == 0); | |
f45144ef | 1098 | ath_reset(sc, false); |
f078f209 LR |
1099 | } |
1100 | ||
1101 | return 0; | |
1102 | #undef PA2DESC | |
1103 | } | |
1104 | ||
1105 | /* Process ADDBA request in per-TID data structure */ | |
1106 | ||
1107 | int ath_rx_aggr_start(struct ath_softc *sc, | |
1108 | const u8 *addr, | |
1109 | u16 tid, | |
1110 | u16 *ssn) | |
1111 | { | |
1112 | struct ath_arx_tid *rxtid; | |
1113 | struct ath_node *an; | |
1114 | struct ieee80211_hw *hw = sc->hw; | |
1115 | struct ieee80211_supported_band *sband; | |
1116 | u16 buffersize = 0; | |
1117 | ||
1118 | spin_lock_bh(&sc->node_lock); | |
1119 | an = ath_node_find(sc, (u8 *) addr); | |
1120 | spin_unlock_bh(&sc->node_lock); | |
1121 | ||
1122 | if (!an) { | |
1123 | DPRINTF(sc, ATH_DBG_AGGR, | |
1124 | "%s: Node not found to initialize RX aggregation\n", | |
1125 | __func__); | |
1126 | return -1; | |
1127 | } | |
1128 | ||
1129 | sband = hw->wiphy->bands[hw->conf.channel->band]; | |
1130 | buffersize = IEEE80211_MIN_AMPDU_BUF << | |
1131 | sband->ht_info.ampdu_factor; /* FIXME */ | |
1132 | ||
1133 | rxtid = &an->an_aggr.rx.tid[tid]; | |
1134 | ||
1135 | spin_lock_bh(&rxtid->tidlock); | |
672840ac | 1136 | if (sc->sc_flags & SC_OP_RXAGGR) { |
f078f209 LR |
1137 | /* Allow aggregation reception |
1138 | * Adjust rx BA window size. Peer might indicate a | |
1139 | * zero buffer size for a _dont_care_ condition. | |
1140 | */ | |
1141 | if (buffersize) | |
1142 | rxtid->baw_size = min(buffersize, rxtid->baw_size); | |
1143 | ||
1144 | /* set rx sequence number */ | |
1145 | rxtid->seq_next = *ssn; | |
1146 | ||
1147 | /* Allocate the receive buffers for this TID */ | |
1148 | DPRINTF(sc, ATH_DBG_AGGR, | |
1149 | "%s: Allcating rxbuffer for TID %d\n", __func__, tid); | |
1150 | ||
1151 | if (rxtid->rxbuf == NULL) { | |
1152 | /* | |
1153 | * If the rxbuff is not NULL at this point, we *probably* | |
1154 | * already allocated the buffer on a previous ADDBA, | |
1155 | * and this is a subsequent ADDBA that got through. | |
1156 | * Don't allocate, but use the value in the pointer, | |
1157 | * we zero it out when we de-allocate. | |
1158 | */ | |
1159 | rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS * | |
1160 | sizeof(struct ath_rxbuf), GFP_ATOMIC); | |
1161 | } | |
1162 | if (rxtid->rxbuf == NULL) { | |
1163 | DPRINTF(sc, ATH_DBG_AGGR, | |
1164 | "%s: Unable to allocate RX buffer, " | |
1165 | "refusing ADDBA\n", __func__); | |
1166 | } else { | |
1167 | /* Ensure the memory is zeroed out (all internal | |
1168 | * pointers are null) */ | |
0345f37b | 1169 | memset(rxtid->rxbuf, 0, ATH_TID_MAX_BUFS * |
f078f209 LR |
1170 | sizeof(struct ath_rxbuf)); |
1171 | DPRINTF(sc, ATH_DBG_AGGR, | |
1172 | "%s: Allocated @%p\n", __func__, rxtid->rxbuf); | |
1173 | ||
1174 | /* Allow aggregation reception */ | |
1175 | rxtid->addba_exchangecomplete = 1; | |
1176 | } | |
1177 | } | |
1178 | spin_unlock_bh(&rxtid->tidlock); | |
1179 | ||
1180 | return 0; | |
1181 | } | |
1182 | ||
1183 | /* Process DELBA */ | |
1184 | ||
1185 | int ath_rx_aggr_stop(struct ath_softc *sc, | |
1186 | const u8 *addr, | |
1187 | u16 tid) | |
1188 | { | |
1189 | struct ath_node *an; | |
1190 | ||
1191 | spin_lock_bh(&sc->node_lock); | |
1192 | an = ath_node_find(sc, (u8 *) addr); | |
1193 | spin_unlock_bh(&sc->node_lock); | |
1194 | ||
1195 | if (!an) { | |
1196 | DPRINTF(sc, ATH_DBG_AGGR, | |
1197 | "%s: RX aggr stop for non-existent node\n", __func__); | |
1198 | return -1; | |
1199 | } | |
1200 | ||
1201 | ath_rx_aggr_teardown(sc, an, tid); | |
1202 | return 0; | |
1203 | } | |
1204 | ||
1205 | /* Rx aggregation tear down */ | |
1206 | ||
1207 | void ath_rx_aggr_teardown(struct ath_softc *sc, | |
1208 | struct ath_node *an, u8 tid) | |
1209 | { | |
1210 | struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid]; | |
1211 | ||
1212 | if (!rxtid->addba_exchangecomplete) | |
1213 | return; | |
1214 | ||
1215 | del_timer_sync(&rxtid->timer); | |
1216 | ath_rx_flush_tid(sc, rxtid, 0); | |
1217 | rxtid->addba_exchangecomplete = 0; | |
1218 | ||
1219 | /* De-allocate the receive buffer array allocated when addba started */ | |
1220 | ||
1221 | if (rxtid->rxbuf) { | |
1222 | DPRINTF(sc, ATH_DBG_AGGR, | |
1223 | "%s: Deallocating TID %d rxbuff @%p\n", | |
1224 | __func__, tid, rxtid->rxbuf); | |
1225 | kfree(rxtid->rxbuf); | |
1226 | ||
1227 | /* Set pointer to null to avoid reuse*/ | |
1228 | rxtid->rxbuf = NULL; | |
1229 | } | |
1230 | } | |
1231 | ||
1232 | /* Initialize per-node receive state */ | |
1233 | ||
1234 | void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an) | |
1235 | { | |
672840ac | 1236 | if (sc->sc_flags & SC_OP_RXAGGR) { |
f078f209 LR |
1237 | struct ath_arx_tid *rxtid; |
1238 | int tidno; | |
1239 | ||
1240 | /* Init per tid rx state */ | |
1241 | for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno]; | |
1242 | tidno < WME_NUM_TID; | |
1243 | tidno++, rxtid++) { | |
1244 | rxtid->an = an; | |
1245 | rxtid->seq_reset = 1; | |
1246 | rxtid->seq_next = 0; | |
1247 | rxtid->baw_size = WME_MAX_BA; | |
1248 | rxtid->baw_head = rxtid->baw_tail = 0; | |
1249 | ||
1250 | /* | |
1251 | * Ensure the buffer pointer is null at this point | |
1252 | * (needs to be allocated when addba is received) | |
1253 | */ | |
1254 | ||
1255 | rxtid->rxbuf = NULL; | |
1256 | setup_timer(&rxtid->timer, ath_rx_timer, | |
1257 | (unsigned long)rxtid); | |
1258 | spin_lock_init(&rxtid->tidlock); | |
1259 | ||
1260 | /* ADDBA state */ | |
1261 | rxtid->addba_exchangecomplete = 0; | |
1262 | } | |
1263 | } | |
1264 | } | |
1265 | ||
1266 | void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an) | |
1267 | { | |
672840ac | 1268 | if (sc->sc_flags & SC_OP_RXAGGR) { |
f078f209 LR |
1269 | struct ath_arx_tid *rxtid; |
1270 | int tidno, i; | |
1271 | ||
1272 | /* Init per tid rx state */ | |
1273 | for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno]; | |
1274 | tidno < WME_NUM_TID; | |
1275 | tidno++, rxtid++) { | |
1276 | ||
1277 | if (!rxtid->addba_exchangecomplete) | |
1278 | continue; | |
1279 | ||
1280 | /* must cancel timer first */ | |
1281 | del_timer_sync(&rxtid->timer); | |
1282 | ||
1283 | /* drop any pending sub-frames */ | |
1284 | ath_rx_flush_tid(sc, rxtid, 1); | |
1285 | ||
1286 | for (i = 0; i < ATH_TID_MAX_BUFS; i++) | |
1287 | ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL); | |
1288 | ||
1289 | rxtid->addba_exchangecomplete = 0; | |
1290 | } | |
1291 | } | |
1292 | ||
1293 | } | |
1294 | ||
1295 | /* Cleanup per-node receive state */ | |
1296 | ||
1297 | void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an) | |
1298 | { | |
1299 | ath_rx_node_cleanup(sc, an); | |
1300 | } |