Commit | Line | Data |
---|---|---|
f078f209 | 1 | /* |
cee075a2 | 2 | * Copyright (c) 2008-2009 Atheros Communications Inc. |
f078f209 LR |
3 | * |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
394cf0a1 | 17 | #include "ath9k.h" |
b622a720 | 18 | #include "ar9003_mac.h" |
f078f209 | 19 | |
b5c80475 FF |
20 | #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) |
21 | ||
bce048d7 JM |
22 | static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, |
23 | struct ieee80211_hdr *hdr) | |
24 | { | |
c52f33d0 JM |
25 | struct ieee80211_hw *hw = sc->pri_wiphy->hw; |
26 | int i; | |
27 | ||
28 | spin_lock_bh(&sc->wiphy_lock); | |
29 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
30 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; | |
31 | if (aphy == NULL) | |
32 | continue; | |
33 | if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) | |
34 | == 0) { | |
35 | hw = aphy->hw; | |
36 | break; | |
37 | } | |
38 | } | |
39 | spin_unlock_bh(&sc->wiphy_lock); | |
40 | return hw; | |
bce048d7 JM |
41 | } |
42 | ||
f078f209 LR |
43 | /* |
44 | * Setup and link descriptors. | |
45 | * | |
46 | * 11N: we can no longer afford to self link the last descriptor. | |
47 | * MAC acknowledges BA status as long as it copies frames to host | |
48 | * buffer (or rx fifo). This can incorrectly acknowledge packets | |
49 | * to a sender if last desc is self-linked. | |
f078f209 | 50 | */ |
f078f209 LR |
51 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) |
52 | { | |
cbe61d8a | 53 | struct ath_hw *ah = sc->sc_ah; |
cc861f74 | 54 | struct ath_common *common = ath9k_hw_common(ah); |
f078f209 LR |
55 | struct ath_desc *ds; |
56 | struct sk_buff *skb; | |
57 | ||
58 | ATH_RXBUF_RESET(bf); | |
59 | ||
60 | ds = bf->bf_desc; | |
be0418ad | 61 | ds->ds_link = 0; /* link to null */ |
f078f209 LR |
62 | ds->ds_data = bf->bf_buf_addr; |
63 | ||
be0418ad | 64 | /* virtual addr of the beginning of the buffer. */ |
f078f209 | 65 | skb = bf->bf_mpdu; |
9680e8a3 | 66 | BUG_ON(skb == NULL); |
f078f209 LR |
67 | ds->ds_vdata = skb->data; |
68 | ||
cc861f74 LR |
69 | /* |
70 | * setup rx descriptors. The rx_bufsize here tells the hardware | |
b4b6cda2 | 71 | * how much data it can DMA to us and that we are prepared |
cc861f74 LR |
72 | * to process |
73 | */ | |
b77f483f | 74 | ath9k_hw_setuprxdesc(ah, ds, |
cc861f74 | 75 | common->rx_bufsize, |
f078f209 LR |
76 | 0); |
77 | ||
b77f483f | 78 | if (sc->rx.rxlink == NULL) |
f078f209 LR |
79 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
80 | else | |
b77f483f | 81 | *sc->rx.rxlink = bf->bf_daddr; |
f078f209 | 82 | |
b77f483f | 83 | sc->rx.rxlink = &ds->ds_link; |
f078f209 LR |
84 | ath9k_hw_rxena(ah); |
85 | } | |
86 | ||
ff37e337 S |
87 | static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) |
88 | { | |
89 | /* XXX block beacon interrupts */ | |
90 | ath9k_hw_setantenna(sc->sc_ah, antenna); | |
b77f483f S |
91 | sc->rx.defant = antenna; |
92 | sc->rx.rxotherant = 0; | |
ff37e337 S |
93 | } |
94 | ||
f078f209 LR |
95 | static void ath_opmode_init(struct ath_softc *sc) |
96 | { | |
cbe61d8a | 97 | struct ath_hw *ah = sc->sc_ah; |
1510718d LR |
98 | struct ath_common *common = ath9k_hw_common(ah); |
99 | ||
f078f209 LR |
100 | u32 rfilt, mfilt[2]; |
101 | ||
102 | /* configure rx filter */ | |
103 | rfilt = ath_calcrxfilter(sc); | |
104 | ath9k_hw_setrxfilter(ah, rfilt); | |
105 | ||
106 | /* configure bssid mask */ | |
2660b81a | 107 | if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) |
13b81559 | 108 | ath_hw_setbssidmask(common); |
f078f209 LR |
109 | |
110 | /* configure operational mode */ | |
111 | ath9k_hw_setopmode(ah); | |
112 | ||
113 | /* Handle any link-level address change. */ | |
1510718d | 114 | ath9k_hw_setmac(ah, common->macaddr); |
f078f209 LR |
115 | |
116 | /* calculate and install multicast filter */ | |
117 | mfilt[0] = mfilt[1] = ~0; | |
f078f209 | 118 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); |
f078f209 LR |
119 | } |
120 | ||
b5c80475 FF |
121 | static bool ath_rx_edma_buf_link(struct ath_softc *sc, |
122 | enum ath9k_rx_qtype qtype) | |
f078f209 | 123 | { |
b5c80475 FF |
124 | struct ath_hw *ah = sc->sc_ah; |
125 | struct ath_rx_edma *rx_edma; | |
f078f209 LR |
126 | struct sk_buff *skb; |
127 | struct ath_buf *bf; | |
f078f209 | 128 | |
b5c80475 FF |
129 | rx_edma = &sc->rx.rx_edma[qtype]; |
130 | if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) | |
131 | return false; | |
f078f209 | 132 | |
b5c80475 FF |
133 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
134 | list_del_init(&bf->list); | |
f078f209 | 135 | |
b5c80475 FF |
136 | skb = bf->bf_mpdu; |
137 | ||
138 | ATH_RXBUF_RESET(bf); | |
139 | memset(skb->data, 0, ah->caps.rx_status_len); | |
140 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
141 | ah->caps.rx_status_len, DMA_TO_DEVICE); | |
f078f209 | 142 | |
b5c80475 FF |
143 | SKB_CB_ATHBUF(skb) = bf; |
144 | ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); | |
145 | skb_queue_tail(&rx_edma->rx_fifo, skb); | |
f078f209 | 146 | |
b5c80475 FF |
147 | return true; |
148 | } | |
149 | ||
150 | static void ath_rx_addbuffer_edma(struct ath_softc *sc, | |
151 | enum ath9k_rx_qtype qtype, int size) | |
152 | { | |
153 | struct ath_rx_edma *rx_edma; | |
154 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
155 | u32 nbuf = 0; | |
156 | ||
157 | rx_edma = &sc->rx.rx_edma[qtype]; | |
158 | if (list_empty(&sc->rx.rxbuf)) { | |
159 | ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n"); | |
160 | return; | |
797fe5cb | 161 | } |
f078f209 | 162 | |
b5c80475 FF |
163 | while (!list_empty(&sc->rx.rxbuf)) { |
164 | nbuf++; | |
165 | ||
166 | if (!ath_rx_edma_buf_link(sc, qtype)) | |
167 | break; | |
168 | ||
169 | if (nbuf >= size) | |
170 | break; | |
171 | } | |
172 | } | |
173 | ||
174 | static void ath_rx_remove_buffer(struct ath_softc *sc, | |
175 | enum ath9k_rx_qtype qtype) | |
176 | { | |
177 | struct ath_buf *bf; | |
178 | struct ath_rx_edma *rx_edma; | |
179 | struct sk_buff *skb; | |
180 | ||
181 | rx_edma = &sc->rx.rx_edma[qtype]; | |
182 | ||
183 | while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { | |
184 | bf = SKB_CB_ATHBUF(skb); | |
185 | BUG_ON(!bf); | |
186 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
187 | } | |
188 | } | |
189 | ||
190 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | |
191 | { | |
192 | struct ath_buf *bf; | |
193 | ||
194 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
195 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | |
196 | ||
797fe5cb | 197 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
b5c80475 FF |
198 | if (bf->bf_mpdu) |
199 | dev_kfree_skb_any(bf->bf_mpdu); | |
200 | } | |
201 | ||
202 | INIT_LIST_HEAD(&sc->rx.rxbuf); | |
203 | ||
204 | kfree(sc->rx.rx_bufptr); | |
205 | sc->rx.rx_bufptr = NULL; | |
206 | } | |
207 | ||
208 | static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) | |
209 | { | |
210 | skb_queue_head_init(&rx_edma->rx_fifo); | |
211 | skb_queue_head_init(&rx_edma->rx_buffers); | |
212 | rx_edma->rx_fifo_hwsize = size; | |
213 | } | |
214 | ||
215 | static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) | |
216 | { | |
217 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
218 | struct ath_hw *ah = sc->sc_ah; | |
219 | struct sk_buff *skb; | |
220 | struct ath_buf *bf; | |
221 | int error = 0, i; | |
222 | u32 size; | |
223 | ||
224 | ||
225 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN + | |
226 | ah->caps.rx_status_len, | |
227 | min(common->cachelsz, (u16)64)); | |
228 | ||
229 | ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - | |
230 | ah->caps.rx_status_len); | |
231 | ||
232 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], | |
233 | ah->caps.rx_lp_qdepth); | |
234 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], | |
235 | ah->caps.rx_hp_qdepth); | |
236 | ||
237 | size = sizeof(struct ath_buf) * nbufs; | |
238 | bf = kzalloc(size, GFP_KERNEL); | |
239 | if (!bf) | |
240 | return -ENOMEM; | |
241 | ||
242 | INIT_LIST_HEAD(&sc->rx.rxbuf); | |
243 | sc->rx.rx_bufptr = bf; | |
244 | ||
245 | for (i = 0; i < nbufs; i++, bf++) { | |
cc861f74 | 246 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); |
b5c80475 | 247 | if (!skb) { |
797fe5cb | 248 | error = -ENOMEM; |
b5c80475 | 249 | goto rx_init_fail; |
f078f209 | 250 | } |
f078f209 | 251 | |
b5c80475 | 252 | memset(skb->data, 0, common->rx_bufsize); |
797fe5cb | 253 | bf->bf_mpdu = skb; |
b5c80475 | 254 | |
797fe5cb | 255 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, |
cc861f74 | 256 | common->rx_bufsize, |
b5c80475 | 257 | DMA_BIDIRECTIONAL); |
797fe5cb | 258 | if (unlikely(dma_mapping_error(sc->dev, |
b5c80475 FF |
259 | bf->bf_buf_addr))) { |
260 | dev_kfree_skb_any(skb); | |
261 | bf->bf_mpdu = NULL; | |
262 | ath_print(common, ATH_DBG_FATAL, | |
263 | "dma_mapping_error() on RX init\n"); | |
264 | error = -ENOMEM; | |
265 | goto rx_init_fail; | |
266 | } | |
267 | ||
268 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
269 | } | |
270 | ||
271 | return 0; | |
272 | ||
273 | rx_init_fail: | |
274 | ath_rx_edma_cleanup(sc); | |
275 | return error; | |
276 | } | |
277 | ||
278 | static void ath_edma_start_recv(struct ath_softc *sc) | |
279 | { | |
280 | spin_lock_bh(&sc->rx.rxbuflock); | |
281 | ||
282 | ath9k_hw_rxena(sc->sc_ah); | |
283 | ||
284 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, | |
285 | sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); | |
286 | ||
287 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, | |
288 | sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); | |
289 | ||
290 | spin_unlock_bh(&sc->rx.rxbuflock); | |
291 | ||
292 | ath_opmode_init(sc); | |
293 | ||
294 | ath9k_hw_startpcureceive(sc->sc_ah); | |
295 | } | |
296 | ||
297 | static void ath_edma_stop_recv(struct ath_softc *sc) | |
298 | { | |
299 | spin_lock_bh(&sc->rx.rxbuflock); | |
300 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | |
301 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
302 | spin_unlock_bh(&sc->rx.rxbuflock); | |
303 | } | |
304 | ||
305 | int ath_rx_init(struct ath_softc *sc, int nbufs) | |
306 | { | |
307 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
308 | struct sk_buff *skb; | |
309 | struct ath_buf *bf; | |
310 | int error = 0; | |
311 | ||
312 | spin_lock_init(&sc->rx.rxflushlock); | |
313 | sc->sc_flags &= ~SC_OP_RXFLUSH; | |
314 | spin_lock_init(&sc->rx.rxbuflock); | |
315 | ||
316 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { | |
317 | return ath_rx_edma_init(sc, nbufs); | |
318 | } else { | |
319 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, | |
320 | min(common->cachelsz, (u16)64)); | |
321 | ||
322 | ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", | |
323 | common->cachelsz, common->rx_bufsize); | |
324 | ||
325 | /* Initialize rx descriptors */ | |
326 | ||
327 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, | |
4adfcded | 328 | "rx", nbufs, 1, 0); |
b5c80475 | 329 | if (error != 0) { |
c46917bb | 330 | ath_print(common, ATH_DBG_FATAL, |
b5c80475 FF |
331 | "failed to allocate rx descriptors: %d\n", |
332 | error); | |
797fe5cb S |
333 | goto err; |
334 | } | |
b5c80475 FF |
335 | |
336 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
337 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, | |
338 | GFP_KERNEL); | |
339 | if (skb == NULL) { | |
340 | error = -ENOMEM; | |
341 | goto err; | |
342 | } | |
343 | ||
344 | bf->bf_mpdu = skb; | |
345 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, | |
346 | common->rx_bufsize, | |
347 | DMA_FROM_DEVICE); | |
348 | if (unlikely(dma_mapping_error(sc->dev, | |
349 | bf->bf_buf_addr))) { | |
350 | dev_kfree_skb_any(skb); | |
351 | bf->bf_mpdu = NULL; | |
352 | ath_print(common, ATH_DBG_FATAL, | |
353 | "dma_mapping_error() on RX init\n"); | |
354 | error = -ENOMEM; | |
355 | goto err; | |
356 | } | |
357 | bf->bf_dmacontext = bf->bf_buf_addr; | |
358 | } | |
359 | sc->rx.rxlink = NULL; | |
797fe5cb | 360 | } |
f078f209 | 361 | |
797fe5cb | 362 | err: |
f078f209 LR |
363 | if (error) |
364 | ath_rx_cleanup(sc); | |
365 | ||
366 | return error; | |
367 | } | |
368 | ||
f078f209 LR |
369 | void ath_rx_cleanup(struct ath_softc *sc) |
370 | { | |
cc861f74 LR |
371 | struct ath_hw *ah = sc->sc_ah; |
372 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 LR |
373 | struct sk_buff *skb; |
374 | struct ath_buf *bf; | |
375 | ||
b5c80475 FF |
376 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
377 | ath_rx_edma_cleanup(sc); | |
378 | return; | |
379 | } else { | |
380 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
381 | skb = bf->bf_mpdu; | |
382 | if (skb) { | |
383 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | |
384 | common->rx_bufsize, | |
385 | DMA_FROM_DEVICE); | |
386 | dev_kfree_skb(skb); | |
387 | } | |
051b9191 | 388 | } |
f078f209 | 389 | |
b5c80475 FF |
390 | if (sc->rx.rxdma.dd_desc_len != 0) |
391 | ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); | |
392 | } | |
f078f209 LR |
393 | } |
394 | ||
395 | /* | |
396 | * Calculate the receive filter according to the | |
397 | * operating mode and state: | |
398 | * | |
399 | * o always accept unicast, broadcast, and multicast traffic | |
400 | * o maintain current state of phy error reception (the hal | |
401 | * may enable phy error frames for noise immunity work) | |
402 | * o probe request frames are accepted only when operating in | |
403 | * hostap, adhoc, or monitor modes | |
404 | * o enable promiscuous mode according to the interface state | |
405 | * o accept beacons: | |
406 | * - when operating in adhoc mode so the 802.11 layer creates | |
407 | * node table entries for peers, | |
408 | * - when operating in station mode for collecting rssi data when | |
409 | * the station is otherwise quiet, or | |
410 | * - when operating as a repeater so we see repeater-sta beacons | |
411 | * - when scanning | |
412 | */ | |
413 | ||
414 | u32 ath_calcrxfilter(struct ath_softc *sc) | |
415 | { | |
416 | #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) | |
7dcfdcd9 | 417 | |
f078f209 LR |
418 | u32 rfilt; |
419 | ||
420 | rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) | |
421 | | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST | |
422 | | ATH9K_RX_FILTER_MCAST; | |
423 | ||
424 | /* If not a STA, enable processing of Probe Requests */ | |
2660b81a | 425 | if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) |
f078f209 LR |
426 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; |
427 | ||
217ba9da JM |
428 | /* |
429 | * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station | |
430 | * mode interface or when in monitor mode. AP mode does not need this | |
431 | * since it receives all in-BSS frames anyway. | |
432 | */ | |
2660b81a | 433 | if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && |
b77f483f | 434 | (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || |
217ba9da | 435 | (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) |
f078f209 | 436 | rfilt |= ATH9K_RX_FILTER_PROM; |
f078f209 | 437 | |
d42c6b71 S |
438 | if (sc->rx.rxfilter & FIF_CONTROL) |
439 | rfilt |= ATH9K_RX_FILTER_CONTROL; | |
440 | ||
dbaaa147 VT |
441 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && |
442 | !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) | |
443 | rfilt |= ATH9K_RX_FILTER_MYBEACON; | |
444 | else | |
f078f209 LR |
445 | rfilt |= ATH9K_RX_FILTER_BEACON; |
446 | ||
66afad01 SB |
447 | if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) || |
448 | AR_SREV_9285_10_OR_LATER(sc->sc_ah)) && | |
449 | (sc->sc_ah->opmode == NL80211_IFTYPE_AP) && | |
450 | (sc->rx.rxfilter & FIF_PSPOLL)) | |
dbaaa147 | 451 | rfilt |= ATH9K_RX_FILTER_PSPOLL; |
be0418ad | 452 | |
7ea310be S |
453 | if (conf_is_ht(&sc->hw->conf)) |
454 | rfilt |= ATH9K_RX_FILTER_COMP_BAR; | |
455 | ||
5eb6ba83 | 456 | if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) { |
b93bce2a JM |
457 | /* TODO: only needed if more than one BSSID is in use in |
458 | * station/adhoc mode */ | |
5eb6ba83 JC |
459 | /* The following may also be needed for other older chips */ |
460 | if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) | |
461 | rfilt |= ATH9K_RX_FILTER_PROM; | |
b93bce2a JM |
462 | rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; |
463 | } | |
464 | ||
f078f209 | 465 | return rfilt; |
7dcfdcd9 | 466 | |
f078f209 LR |
467 | #undef RX_FILTER_PRESERVE |
468 | } | |
469 | ||
f078f209 LR |
470 | int ath_startrecv(struct ath_softc *sc) |
471 | { | |
cbe61d8a | 472 | struct ath_hw *ah = sc->sc_ah; |
f078f209 LR |
473 | struct ath_buf *bf, *tbf; |
474 | ||
b5c80475 FF |
475 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
476 | ath_edma_start_recv(sc); | |
477 | return 0; | |
478 | } | |
479 | ||
b77f483f S |
480 | spin_lock_bh(&sc->rx.rxbuflock); |
481 | if (list_empty(&sc->rx.rxbuf)) | |
f078f209 LR |
482 | goto start_recv; |
483 | ||
b77f483f S |
484 | sc->rx.rxlink = NULL; |
485 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { | |
f078f209 LR |
486 | ath_rx_buf_link(sc, bf); |
487 | } | |
488 | ||
489 | /* We could have deleted elements so the list may be empty now */ | |
b77f483f | 490 | if (list_empty(&sc->rx.rxbuf)) |
f078f209 LR |
491 | goto start_recv; |
492 | ||
b77f483f | 493 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
f078f209 | 494 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
be0418ad | 495 | ath9k_hw_rxena(ah); |
f078f209 LR |
496 | |
497 | start_recv: | |
b77f483f | 498 | spin_unlock_bh(&sc->rx.rxbuflock); |
be0418ad S |
499 | ath_opmode_init(sc); |
500 | ath9k_hw_startpcureceive(ah); | |
501 | ||
f078f209 LR |
502 | return 0; |
503 | } | |
504 | ||
f078f209 LR |
505 | bool ath_stoprecv(struct ath_softc *sc) |
506 | { | |
cbe61d8a | 507 | struct ath_hw *ah = sc->sc_ah; |
f078f209 LR |
508 | bool stopped; |
509 | ||
be0418ad S |
510 | ath9k_hw_stoppcurecv(ah); |
511 | ath9k_hw_setrxfilter(ah, 0); | |
512 | stopped = ath9k_hw_stopdmarecv(ah); | |
b5c80475 FF |
513 | |
514 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) | |
515 | ath_edma_stop_recv(sc); | |
516 | else | |
517 | sc->rx.rxlink = NULL; | |
be0418ad | 518 | |
f078f209 LR |
519 | return stopped; |
520 | } | |
521 | ||
f078f209 LR |
522 | void ath_flushrecv(struct ath_softc *sc) |
523 | { | |
b77f483f | 524 | spin_lock_bh(&sc->rx.rxflushlock); |
98deeea0 | 525 | sc->sc_flags |= SC_OP_RXFLUSH; |
b5c80475 FF |
526 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
527 | ath_rx_tasklet(sc, 1, true); | |
528 | ath_rx_tasklet(sc, 1, false); | |
98deeea0 | 529 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
b77f483f | 530 | spin_unlock_bh(&sc->rx.rxflushlock); |
f078f209 LR |
531 | } |
532 | ||
cc65965c JM |
533 | static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) |
534 | { | |
535 | /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ | |
536 | struct ieee80211_mgmt *mgmt; | |
537 | u8 *pos, *end, id, elen; | |
538 | struct ieee80211_tim_ie *tim; | |
539 | ||
540 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
541 | pos = mgmt->u.beacon.variable; | |
542 | end = skb->data + skb->len; | |
543 | ||
544 | while (pos + 2 < end) { | |
545 | id = *pos++; | |
546 | elen = *pos++; | |
547 | if (pos + elen > end) | |
548 | break; | |
549 | ||
550 | if (id == WLAN_EID_TIM) { | |
551 | if (elen < sizeof(*tim)) | |
552 | break; | |
553 | tim = (struct ieee80211_tim_ie *) pos; | |
554 | if (tim->dtim_count != 0) | |
555 | break; | |
556 | return tim->bitmap_ctrl & 0x01; | |
557 | } | |
558 | ||
559 | pos += elen; | |
560 | } | |
561 | ||
562 | return false; | |
563 | } | |
564 | ||
cc65965c JM |
565 | static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) |
566 | { | |
567 | struct ieee80211_mgmt *mgmt; | |
1510718d | 568 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
569 | |
570 | if (skb->len < 24 + 8 + 2 + 2) | |
571 | return; | |
572 | ||
573 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
1510718d | 574 | if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) |
cc65965c JM |
575 | return; /* not from our current AP */ |
576 | ||
1b04b930 | 577 | sc->ps_flags &= ~PS_WAIT_FOR_BEACON; |
293dc5df | 578 | |
1b04b930 S |
579 | if (sc->ps_flags & PS_BEACON_SYNC) { |
580 | sc->ps_flags &= ~PS_BEACON_SYNC; | |
c46917bb LR |
581 | ath_print(common, ATH_DBG_PS, |
582 | "Reconfigure Beacon timers based on " | |
583 | "timestamp from the AP\n"); | |
ccdfeab6 JM |
584 | ath_beacon_config(sc, NULL); |
585 | } | |
586 | ||
cc65965c JM |
587 | if (ath_beacon_dtim_pending_cab(skb)) { |
588 | /* | |
589 | * Remain awake waiting for buffered broadcast/multicast | |
58f5fffd GJ |
590 | * frames. If the last broadcast/multicast frame is not |
591 | * received properly, the next beacon frame will work as | |
592 | * a backup trigger for returning into NETWORK SLEEP state, | |
593 | * so we are waiting for it as well. | |
cc65965c | 594 | */ |
c46917bb LR |
595 | ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " |
596 | "buffered broadcast/multicast frame(s)\n"); | |
1b04b930 | 597 | sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; |
cc65965c JM |
598 | return; |
599 | } | |
600 | ||
1b04b930 | 601 | if (sc->ps_flags & PS_WAIT_FOR_CAB) { |
cc65965c JM |
602 | /* |
603 | * This can happen if a broadcast frame is dropped or the AP | |
604 | * fails to send a frame indicating that all CAB frames have | |
605 | * been delivered. | |
606 | */ | |
1b04b930 | 607 | sc->ps_flags &= ~PS_WAIT_FOR_CAB; |
c46917bb LR |
608 | ath_print(common, ATH_DBG_PS, |
609 | "PS wait for CAB frames timed out\n"); | |
cc65965c | 610 | } |
cc65965c JM |
611 | } |
612 | ||
613 | static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) | |
614 | { | |
615 | struct ieee80211_hdr *hdr; | |
c46917bb | 616 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
617 | |
618 | hdr = (struct ieee80211_hdr *)skb->data; | |
619 | ||
620 | /* Process Beacon and CAB receive in PS state */ | |
1b04b930 | 621 | if ((sc->ps_flags & PS_WAIT_FOR_BEACON) && |
9a23f9ca | 622 | ieee80211_is_beacon(hdr->frame_control)) |
cc65965c | 623 | ath_rx_ps_beacon(sc, skb); |
1b04b930 | 624 | else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && |
cc65965c JM |
625 | (ieee80211_is_data(hdr->frame_control) || |
626 | ieee80211_is_action(hdr->frame_control)) && | |
627 | is_multicast_ether_addr(hdr->addr1) && | |
628 | !ieee80211_has_moredata(hdr->frame_control)) { | |
cc65965c JM |
629 | /* |
630 | * No more broadcast/multicast frames to be received at this | |
631 | * point. | |
632 | */ | |
1b04b930 | 633 | sc->ps_flags &= ~PS_WAIT_FOR_CAB; |
c46917bb LR |
634 | ath_print(common, ATH_DBG_PS, |
635 | "All PS CAB frames received, back to sleep\n"); | |
1b04b930 | 636 | } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && |
9a23f9ca JM |
637 | !is_multicast_ether_addr(hdr->addr1) && |
638 | !ieee80211_has_morefrags(hdr->frame_control)) { | |
1b04b930 | 639 | sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; |
c46917bb LR |
640 | ath_print(common, ATH_DBG_PS, |
641 | "Going back to sleep after having received " | |
f643e51d | 642 | "PS-Poll data (0x%lx)\n", |
1b04b930 S |
643 | sc->ps_flags & (PS_WAIT_FOR_BEACON | |
644 | PS_WAIT_FOR_CAB | | |
645 | PS_WAIT_FOR_PSPOLL_DATA | | |
646 | PS_WAIT_FOR_TX_ACK)); | |
cc65965c JM |
647 | } |
648 | } | |
649 | ||
b4afffc0 LR |
650 | static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, |
651 | struct ath_softc *sc, struct sk_buff *skb, | |
5ca42627 | 652 | struct ieee80211_rx_status *rxs) |
9d64a3cf JM |
653 | { |
654 | struct ieee80211_hdr *hdr; | |
655 | ||
656 | hdr = (struct ieee80211_hdr *)skb->data; | |
657 | ||
658 | /* Send the frame to mac80211 */ | |
659 | if (is_multicast_ether_addr(hdr->addr1)) { | |
660 | int i; | |
661 | /* | |
662 | * Deliver broadcast/multicast frames to all suitable | |
663 | * virtual wiphys. | |
664 | */ | |
665 | /* TODO: filter based on channel configuration */ | |
666 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
667 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; | |
668 | struct sk_buff *nskb; | |
669 | if (aphy == NULL) | |
670 | continue; | |
671 | nskb = skb_copy(skb, GFP_ATOMIC); | |
5ca42627 LR |
672 | if (!nskb) |
673 | continue; | |
674 | ieee80211_rx(aphy->hw, nskb); | |
9d64a3cf | 675 | } |
f1d58c25 | 676 | ieee80211_rx(sc->hw, skb); |
5ca42627 | 677 | } else |
9d64a3cf | 678 | /* Deliver unicast frames based on receiver address */ |
b4afffc0 | 679 | ieee80211_rx(hw, skb); |
9d64a3cf JM |
680 | } |
681 | ||
b5c80475 FF |
682 | static bool ath_edma_get_buffers(struct ath_softc *sc, |
683 | enum ath9k_rx_qtype qtype) | |
f078f209 | 684 | { |
b5c80475 FF |
685 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; |
686 | struct ath_hw *ah = sc->sc_ah; | |
687 | struct ath_common *common = ath9k_hw_common(ah); | |
688 | struct sk_buff *skb; | |
689 | struct ath_buf *bf; | |
690 | int ret; | |
691 | ||
692 | skb = skb_peek(&rx_edma->rx_fifo); | |
693 | if (!skb) | |
694 | return false; | |
695 | ||
696 | bf = SKB_CB_ATHBUF(skb); | |
697 | BUG_ON(!bf); | |
698 | ||
699 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
700 | common->rx_bufsize, DMA_FROM_DEVICE); | |
701 | ||
702 | ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); | |
703 | if (ret == -EINPROGRESS) | |
704 | return false; | |
705 | ||
706 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
707 | if (ret == -EINVAL) { | |
708 | /* corrupt descriptor, skip this one and the following one */ | |
709 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
710 | ath_rx_edma_buf_link(sc, qtype); | |
711 | skb = skb_peek(&rx_edma->rx_fifo); | |
712 | if (!skb) | |
713 | return true; | |
714 | ||
715 | bf = SKB_CB_ATHBUF(skb); | |
716 | BUG_ON(!bf); | |
717 | ||
718 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
719 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
720 | ath_rx_edma_buf_link(sc, qtype); | |
721 | } | |
722 | skb_queue_tail(&rx_edma->rx_buffers, skb); | |
723 | ||
724 | return true; | |
725 | } | |
f078f209 | 726 | |
b5c80475 FF |
727 | static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, |
728 | struct ath_rx_status *rs, | |
729 | enum ath9k_rx_qtype qtype) | |
730 | { | |
731 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; | |
732 | struct sk_buff *skb; | |
be0418ad | 733 | struct ath_buf *bf; |
b5c80475 FF |
734 | |
735 | while (ath_edma_get_buffers(sc, qtype)); | |
736 | skb = __skb_dequeue(&rx_edma->rx_buffers); | |
737 | if (!skb) | |
738 | return NULL; | |
739 | ||
740 | bf = SKB_CB_ATHBUF(skb); | |
741 | ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); | |
742 | return bf; | |
743 | } | |
744 | ||
745 | static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, | |
746 | struct ath_rx_status *rs) | |
747 | { | |
748 | struct ath_hw *ah = sc->sc_ah; | |
749 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 | 750 | struct ath_desc *ds; |
b5c80475 FF |
751 | struct ath_buf *bf; |
752 | int ret; | |
753 | ||
754 | if (list_empty(&sc->rx.rxbuf)) { | |
755 | sc->rx.rxlink = NULL; | |
756 | return NULL; | |
757 | } | |
758 | ||
759 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); | |
760 | ds = bf->bf_desc; | |
761 | ||
762 | /* | |
763 | * Must provide the virtual address of the current | |
764 | * descriptor, the physical address, and the virtual | |
765 | * address of the next descriptor in the h/w chain. | |
766 | * This allows the HAL to look ahead to see if the | |
767 | * hardware is done with a descriptor by checking the | |
768 | * done bit in the following descriptor and the address | |
769 | * of the current descriptor the DMA engine is working | |
770 | * on. All this is necessary because of our use of | |
771 | * a self-linked list to avoid rx overruns. | |
772 | */ | |
773 | ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); | |
774 | if (ret == -EINPROGRESS) { | |
775 | struct ath_rx_status trs; | |
776 | struct ath_buf *tbf; | |
777 | struct ath_desc *tds; | |
778 | ||
779 | memset(&trs, 0, sizeof(trs)); | |
780 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { | |
781 | sc->rx.rxlink = NULL; | |
782 | return NULL; | |
783 | } | |
784 | ||
785 | tbf = list_entry(bf->list.next, struct ath_buf, list); | |
786 | ||
787 | /* | |
788 | * On some hardware the descriptor status words could | |
789 | * get corrupted, including the done bit. Because of | |
790 | * this, check if the next descriptor's done bit is | |
791 | * set or not. | |
792 | * | |
793 | * If the next descriptor's done bit is set, the current | |
794 | * descriptor has been corrupted. Force s/w to discard | |
795 | * this descriptor and continue... | |
796 | */ | |
797 | ||
798 | tds = tbf->bf_desc; | |
799 | ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); | |
800 | if (ret == -EINPROGRESS) | |
801 | return NULL; | |
802 | } | |
803 | ||
804 | if (!bf->bf_mpdu) | |
805 | return bf; | |
806 | ||
807 | /* | |
808 | * Synchronize the DMA transfer with CPU before | |
809 | * 1. accessing the frame | |
810 | * 2. requeueing the same buffer to h/w | |
811 | */ | |
812 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
813 | common->rx_bufsize, | |
814 | DMA_FROM_DEVICE); | |
815 | ||
816 | return bf; | |
817 | } | |
818 | ||
819 | ||
820 | int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) | |
821 | { | |
822 | struct ath_buf *bf; | |
cb71d9ba | 823 | struct sk_buff *skb = NULL, *requeue_skb; |
5ca42627 | 824 | struct ieee80211_rx_status *rxs; |
cbe61d8a | 825 | struct ath_hw *ah = sc->sc_ah; |
27c51f1a | 826 | struct ath_common *common = ath9k_hw_common(ah); |
b4afffc0 LR |
827 | /* |
828 | * The hw can techncically differ from common->hw when using ath9k | |
829 | * virtual wiphy so to account for that we iterate over the active | |
830 | * wiphys and find the appropriate wiphy and therefore hw. | |
831 | */ | |
832 | struct ieee80211_hw *hw = NULL; | |
be0418ad | 833 | struct ieee80211_hdr *hdr; |
c9b14170 | 834 | int retval; |
be0418ad | 835 | bool decrypt_error = false; |
29bffa96 | 836 | struct ath_rx_status rs; |
b5c80475 FF |
837 | enum ath9k_rx_qtype qtype; |
838 | bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); | |
839 | int dma_type; | |
be0418ad | 840 | |
b5c80475 FF |
841 | if (edma) |
842 | dma_type = DMA_FROM_DEVICE; | |
843 | else | |
844 | dma_type = DMA_BIDIRECTIONAL; | |
845 | ||
846 | qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; | |
b77f483f | 847 | spin_lock_bh(&sc->rx.rxbuflock); |
f078f209 LR |
848 | |
849 | do { | |
850 | /* If handling rx interrupt and flush is in progress => exit */ | |
98deeea0 | 851 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) |
f078f209 LR |
852 | break; |
853 | ||
29bffa96 | 854 | memset(&rs, 0, sizeof(rs)); |
b5c80475 FF |
855 | if (edma) |
856 | bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); | |
857 | else | |
858 | bf = ath_get_next_rx_buf(sc, &rs); | |
f078f209 | 859 | |
b5c80475 FF |
860 | if (!bf) |
861 | break; | |
f078f209 | 862 | |
f078f209 | 863 | skb = bf->bf_mpdu; |
be0418ad | 864 | if (!skb) |
f078f209 | 865 | continue; |
f078f209 | 866 | |
b4afffc0 | 867 | hdr = (struct ieee80211_hdr *) skb->data; |
5ca42627 LR |
868 | rxs = IEEE80211_SKB_RXCB(skb); |
869 | ||
b4afffc0 LR |
870 | hw = ath_get_virt_hw(sc, hdr); |
871 | ||
29bffa96 | 872 | ath_debug_stat_rx(sc, &rs); |
1395d3f0 | 873 | |
f078f209 | 874 | /* |
be0418ad S |
875 | * If we're asked to flush receive queue, directly |
876 | * chain it back at the queue without processing it. | |
f078f209 | 877 | */ |
be0418ad | 878 | if (flush) |
cb71d9ba | 879 | goto requeue; |
f078f209 | 880 | |
29bffa96 | 881 | retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs, |
db86f07e | 882 | rxs, &decrypt_error); |
1e875e9f | 883 | if (retval) |
cb71d9ba LR |
884 | goto requeue; |
885 | ||
886 | /* Ensure we always have an skb to requeue once we are done | |
887 | * processing the current buffer's skb */ | |
cc861f74 | 888 | requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); |
cb71d9ba LR |
889 | |
890 | /* If there is no memory we ignore the current RX'd frame, | |
891 | * tell hardware it can give us a new frame using the old | |
b77f483f | 892 | * skb and put it at the tail of the sc->rx.rxbuf list for |
cb71d9ba LR |
893 | * processing. */ |
894 | if (!requeue_skb) | |
895 | goto requeue; | |
f078f209 | 896 | |
9bf9fca8 | 897 | /* Unmap the frame */ |
7da3c55c | 898 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
cc861f74 | 899 | common->rx_bufsize, |
b5c80475 | 900 | dma_type); |
f078f209 | 901 | |
b5c80475 FF |
902 | skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); |
903 | if (ah->caps.rx_status_len) | |
904 | skb_pull(skb, ah->caps.rx_status_len); | |
be0418ad | 905 | |
29bffa96 | 906 | ath9k_cmn_rx_skb_postprocess(common, skb, &rs, |
db86f07e | 907 | rxs, decrypt_error); |
be0418ad | 908 | |
cb71d9ba LR |
909 | /* We will now give hardware our shiny new allocated skb */ |
910 | bf->bf_mpdu = requeue_skb; | |
7da3c55c | 911 | bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, |
cc861f74 | 912 | common->rx_bufsize, |
b5c80475 | 913 | dma_type); |
7da3c55c | 914 | if (unlikely(dma_mapping_error(sc->dev, |
f8316df1 LR |
915 | bf->bf_buf_addr))) { |
916 | dev_kfree_skb_any(requeue_skb); | |
917 | bf->bf_mpdu = NULL; | |
c46917bb LR |
918 | ath_print(common, ATH_DBG_FATAL, |
919 | "dma_mapping_error() on RX\n"); | |
5ca42627 | 920 | ath_rx_send_to_mac80211(hw, sc, skb, rxs); |
f8316df1 LR |
921 | break; |
922 | } | |
cb71d9ba | 923 | bf->bf_dmacontext = bf->bf_buf_addr; |
f078f209 LR |
924 | |
925 | /* | |
926 | * change the default rx antenna if rx diversity chooses the | |
927 | * other antenna 3 times in a row. | |
928 | */ | |
29bffa96 | 929 | if (sc->rx.defant != rs.rs_antenna) { |
b77f483f | 930 | if (++sc->rx.rxotherant >= 3) |
29bffa96 | 931 | ath_setdefantenna(sc, rs.rs_antenna); |
f078f209 | 932 | } else { |
b77f483f | 933 | sc->rx.rxotherant = 0; |
f078f209 | 934 | } |
3cbb5dd7 | 935 | |
1b04b930 S |
936 | if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON | |
937 | PS_WAIT_FOR_CAB | | |
938 | PS_WAIT_FOR_PSPOLL_DATA))) | |
cc65965c JM |
939 | ath_rx_ps(sc, skb); |
940 | ||
5ca42627 | 941 | ath_rx_send_to_mac80211(hw, sc, skb, rxs); |
cc65965c | 942 | |
cb71d9ba | 943 | requeue: |
b5c80475 FF |
944 | if (edma) { |
945 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
946 | ath_rx_edma_buf_link(sc, qtype); | |
947 | } else { | |
948 | list_move_tail(&bf->list, &sc->rx.rxbuf); | |
949 | ath_rx_buf_link(sc, bf); | |
950 | } | |
be0418ad S |
951 | } while (1); |
952 | ||
b77f483f | 953 | spin_unlock_bh(&sc->rx.rxbuflock); |
f078f209 LR |
954 | |
955 | return 0; | |
f078f209 | 956 | } |