Commit | Line | Data |
---|---|---|
5e3dd157 KV |
1 | /* |
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | |
8b1083d6 | 3 | * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. |
caee728a | 4 | * Copyright (c) 2018, The Linux Foundation. All rights reserved. |
5e3dd157 KV |
5 | * |
6 | * Permission to use, copy, modify, and/or distribute this software for any | |
7 | * purpose with or without fee is hereby granted, provided that the above | |
8 | * copyright notice and this permission notice appear in all copies. | |
9 | * | |
10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
16 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
17 | */ | |
18 | ||
edb8236d | 19 | #include "core.h" |
5e3dd157 KV |
20 | #include "htc.h" |
21 | #include "htt.h" | |
22 | #include "txrx.h" | |
23 | #include "debug.h" | |
a9bf0506 | 24 | #include "trace.h" |
aa5b4fbc | 25 | #include "mac.h" |
5e3dd157 KV |
26 | |
27 | #include <linux/log2.h> | |
235b9c42 | 28 | #include <linux/bitfield.h> |
5e3dd157 | 29 | |
5e3dd157 KV |
30 | /* when under memory pressure rx ring refill may fail and needs a retry */ |
31 | #define HTT_RX_RING_REFILL_RETRY_MS 50 | |
32 | ||
5c86d97b RM |
33 | #define HTT_RX_RING_REFILL_RESCHED_MS 5 |
34 | ||
f6dc2095 | 35 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); |
f6dc2095 | 36 | |
c545070e | 37 | static struct sk_buff * |
a91a626b | 38 | ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr) |
c545070e MK |
39 | { |
40 | struct ath10k_skb_rxcb *rxcb; | |
41 | ||
42 | hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) | |
43 | if (rxcb->paddr == paddr) | |
44 | return ATH10K_RXCB_SKB(rxcb); | |
45 | ||
46 | WARN_ON_ONCE(1); | |
47 | return NULL; | |
48 | } | |
49 | ||
5e3dd157 KV |
50 | static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) |
51 | { | |
52 | struct sk_buff *skb; | |
c545070e MK |
53 | struct ath10k_skb_rxcb *rxcb; |
54 | struct hlist_node *n; | |
5e3dd157 KV |
55 | int i; |
56 | ||
c545070e MK |
57 | if (htt->rx_ring.in_ord_rx) { |
58 | hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { | |
59 | skb = ATH10K_RXCB_SKB(rxcb); | |
60 | dma_unmap_single(htt->ar->dev, rxcb->paddr, | |
61 | skb->len + skb_tailroom(skb), | |
62 | DMA_FROM_DEVICE); | |
63 | hash_del(&rxcb->hlist); | |
64 | dev_kfree_skb_any(skb); | |
65 | } | |
66 | } else { | |
67 | for (i = 0; i < htt->rx_ring.size; i++) { | |
68 | skb = htt->rx_ring.netbufs_ring[i]; | |
69 | if (!skb) | |
70 | continue; | |
71 | ||
72 | rxcb = ATH10K_SKB_RXCB(skb); | |
73 | dma_unmap_single(htt->ar->dev, rxcb->paddr, | |
74 | skb->len + skb_tailroom(skb), | |
75 | DMA_FROM_DEVICE); | |
76 | dev_kfree_skb_any(skb); | |
77 | } | |
5e3dd157 KV |
78 | } |
79 | ||
80 | htt->rx_ring.fill_cnt = 0; | |
c545070e MK |
81 | hash_init(htt->rx_ring.skb_table); |
82 | memset(htt->rx_ring.netbufs_ring, 0, | |
83 | htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); | |
5e3dd157 KV |
84 | } |
85 | ||
a91a626b GS |
86 | static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt) |
87 | { | |
88 | return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); | |
89 | } | |
90 | ||
91 | static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt) | |
92 | { | |
93 | return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64); | |
94 | } | |
95 | ||
96 | static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt, | |
97 | void *vaddr) | |
98 | { | |
99 | htt->rx_ring.paddrs_ring_32 = vaddr; | |
100 | } | |
101 | ||
102 | static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt, | |
103 | void *vaddr) | |
104 | { | |
105 | htt->rx_ring.paddrs_ring_64 = vaddr; | |
106 | } | |
107 | ||
108 | static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt, | |
109 | dma_addr_t paddr, int idx) | |
110 | { | |
111 | htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr); | |
112 | } | |
113 | ||
114 | static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt, | |
115 | dma_addr_t paddr, int idx) | |
116 | { | |
117 | htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr); | |
118 | } | |
119 | ||
120 | static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx) | |
121 | { | |
122 | htt->rx_ring.paddrs_ring_32[idx] = 0; | |
123 | } | |
124 | ||
125 | static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx) | |
126 | { | |
127 | htt->rx_ring.paddrs_ring_64[idx] = 0; | |
128 | } | |
129 | ||
130 | static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt) | |
131 | { | |
132 | return (void *)htt->rx_ring.paddrs_ring_32; | |
133 | } | |
134 | ||
135 | static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt) | |
136 | { | |
137 | return (void *)htt->rx_ring.paddrs_ring_64; | |
138 | } | |
139 | ||
5e3dd157 KV |
140 | static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) |
141 | { | |
142 | struct htt_rx_desc *rx_desc; | |
c545070e | 143 | struct ath10k_skb_rxcb *rxcb; |
5e3dd157 KV |
144 | struct sk_buff *skb; |
145 | dma_addr_t paddr; | |
146 | int ret = 0, idx; | |
147 | ||
c545070e MK |
148 | /* The Full Rx Reorder firmware has no way of telling the host |
149 | * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. | |
150 | * To keep things simple make sure ring is always half empty. This | |
151 | * guarantees there'll be no replenishment overruns possible. | |
152 | */ | |
153 | BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); | |
154 | ||
8cc7f26c | 155 | idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); |
5e3dd157 KV |
156 | while (num > 0) { |
157 | skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); | |
158 | if (!skb) { | |
159 | ret = -ENOMEM; | |
160 | goto fail; | |
161 | } | |
162 | ||
163 | if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) | |
164 | skb_pull(skb, | |
165 | PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - | |
166 | skb->data); | |
167 | ||
168 | /* Clear rx_desc attention word before posting to Rx ring */ | |
169 | rx_desc = (struct htt_rx_desc *)skb->data; | |
170 | rx_desc->attention.flags = __cpu_to_le32(0); | |
171 | ||
172 | paddr = dma_map_single(htt->ar->dev, skb->data, | |
173 | skb->len + skb_tailroom(skb), | |
174 | DMA_FROM_DEVICE); | |
175 | ||
176 | if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { | |
177 | dev_kfree_skb_any(skb); | |
178 | ret = -ENOMEM; | |
179 | goto fail; | |
180 | } | |
181 | ||
c545070e MK |
182 | rxcb = ATH10K_SKB_RXCB(skb); |
183 | rxcb->paddr = paddr; | |
5e3dd157 | 184 | htt->rx_ring.netbufs_ring[idx] = skb; |
9a5511d5 | 185 | ath10k_htt_set_paddrs_ring(htt, paddr, idx); |
5e3dd157 KV |
186 | htt->rx_ring.fill_cnt++; |
187 | ||
c545070e MK |
188 | if (htt->rx_ring.in_ord_rx) { |
189 | hash_add(htt->rx_ring.skb_table, | |
190 | &ATH10K_SKB_RXCB(skb)->hlist, | |
a91a626b | 191 | paddr); |
c545070e MK |
192 | } |
193 | ||
5e3dd157 KV |
194 | num--; |
195 | idx++; | |
196 | idx &= htt->rx_ring.size_mask; | |
197 | } | |
198 | ||
199 | fail: | |
5de6dfc8 VT |
200 | /* |
201 | * Make sure the rx buffer is updated before available buffer | |
202 | * index to avoid any potential rx ring corruption. | |
203 | */ | |
204 | mb(); | |
8cc7f26c | 205 | *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); |
5e3dd157 KV |
206 | return ret; |
207 | } | |
208 | ||
209 | static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | |
210 | { | |
211 | lockdep_assert_held(&htt->rx_ring.lock); | |
212 | return __ath10k_htt_rx_ring_fill_n(htt, num); | |
213 | } | |
214 | ||
215 | static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) | |
216 | { | |
6e712d42 | 217 | int ret, num_deficit, num_to_fill; |
5e3dd157 | 218 | |
6e712d42 MK |
219 | /* Refilling the whole RX ring buffer proves to be a bad idea. The |
220 | * reason is RX may take up significant amount of CPU cycles and starve | |
221 | * other tasks, e.g. TX on an ethernet device while acting as a bridge | |
222 | * with ath10k wlan interface. This ended up with very poor performance | |
223 | * once CPU the host system was overwhelmed with RX on ath10k. | |
224 | * | |
225 | * By limiting the number of refills the replenishing occurs | |
226 | * progressively. This in turns makes use of the fact tasklets are | |
227 | * processed in FIFO order. This means actual RX processing can starve | |
228 | * out refilling. If there's not enough buffers on RX ring FW will not | |
229 | * report RX until it is refilled with enough buffers. This | |
230 | * automatically balances load wrt to CPU power. | |
231 | * | |
232 | * This probably comes at a cost of lower maximum throughput but | |
d6dfe25c MR |
233 | * improves the average and stability. |
234 | */ | |
5e3dd157 | 235 | spin_lock_bh(&htt->rx_ring.lock); |
6e712d42 MK |
236 | num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; |
237 | num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); | |
238 | num_deficit -= num_to_fill; | |
5e3dd157 KV |
239 | ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); |
240 | if (ret == -ENOMEM) { | |
241 | /* | |
242 | * Failed to fill it to the desired level - | |
243 | * we'll start a timer and try again next time. | |
244 | * As long as enough buffers are left in the ring for | |
245 | * another A-MPDU rx, no special recovery is needed. | |
246 | */ | |
247 | mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + | |
248 | msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); | |
6e712d42 | 249 | } else if (num_deficit > 0) { |
5c86d97b RM |
250 | mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + |
251 | msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS)); | |
5e3dd157 KV |
252 | } |
253 | spin_unlock_bh(&htt->rx_ring.lock); | |
254 | } | |
255 | ||
7ac76764 | 256 | static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t) |
5e3dd157 | 257 | { |
7ac76764 | 258 | struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer); |
af762c0b | 259 | |
5e3dd157 KV |
260 | ath10k_htt_rx_msdu_buff_replenish(htt); |
261 | } | |
262 | ||
c545070e | 263 | int ath10k_htt_rx_ring_refill(struct ath10k *ar) |
5e3dd157 | 264 | { |
c545070e MK |
265 | struct ath10k_htt *htt = &ar->htt; |
266 | int ret; | |
3e841fd0 | 267 | |
f88d4934 ES |
268 | if (ar->dev_type == ATH10K_DEV_TYPE_HL) |
269 | return 0; | |
270 | ||
c545070e MK |
271 | spin_lock_bh(&htt->rx_ring.lock); |
272 | ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - | |
273 | htt->rx_ring.fill_cnt)); | |
3e841fd0 | 274 | |
c545070e MK |
275 | if (ret) |
276 | ath10k_htt_rx_ring_free(htt); | |
277 | ||
168f75f1 BG |
278 | spin_unlock_bh(&htt->rx_ring.lock); |
279 | ||
c545070e | 280 | return ret; |
3e841fd0 | 281 | } |
5e3dd157 | 282 | |
95bf21f9 | 283 | void ath10k_htt_rx_free(struct ath10k_htt *htt) |
3e841fd0 | 284 | { |
f88d4934 ES |
285 | if (htt->ar->dev_type == ATH10K_DEV_TYPE_HL) |
286 | return; | |
287 | ||
5e3dd157 | 288 | del_timer_sync(&htt->rx_ring.refill_retry_timer); |
6c5151a9 | 289 | |
deba1b9e | 290 | skb_queue_purge(&htt->rx_msdus_q); |
c545070e | 291 | skb_queue_purge(&htt->rx_in_ord_compl_q); |
426e10ea | 292 | skb_queue_purge(&htt->tx_fetch_ind_q); |
5e3dd157 | 293 | |
168f75f1 | 294 | spin_lock_bh(&htt->rx_ring.lock); |
c545070e | 295 | ath10k_htt_rx_ring_free(htt); |
168f75f1 | 296 | spin_unlock_bh(&htt->rx_ring.lock); |
5e3dd157 KV |
297 | |
298 | dma_free_coherent(htt->ar->dev, | |
9a5511d5 ES |
299 | ath10k_htt_get_rx_ring_size(htt), |
300 | ath10k_htt_get_vaddr_ring(htt), | |
5e3dd157 KV |
301 | htt->rx_ring.base_paddr); |
302 | ||
303 | dma_free_coherent(htt->ar->dev, | |
304 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
305 | htt->rx_ring.alloc_idx.vaddr, | |
306 | htt->rx_ring.alloc_idx.paddr); | |
307 | ||
308 | kfree(htt->rx_ring.netbufs_ring); | |
309 | } | |
310 | ||
311 | static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) | |
312 | { | |
7aa7a72a | 313 | struct ath10k *ar = htt->ar; |
5e3dd157 KV |
314 | int idx; |
315 | struct sk_buff *msdu; | |
316 | ||
45967089 | 317 | lockdep_assert_held(&htt->rx_ring.lock); |
5e3dd157 | 318 | |
8d60ee87 | 319 | if (htt->rx_ring.fill_cnt == 0) { |
7aa7a72a | 320 | ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); |
8d60ee87 MK |
321 | return NULL; |
322 | } | |
5e3dd157 KV |
323 | |
324 | idx = htt->rx_ring.sw_rd_idx.msdu_payld; | |
325 | msdu = htt->rx_ring.netbufs_ring[idx]; | |
3e841fd0 | 326 | htt->rx_ring.netbufs_ring[idx] = NULL; |
9a5511d5 | 327 | ath10k_htt_reset_paddrs_ring(htt, idx); |
5e3dd157 KV |
328 | |
329 | idx++; | |
330 | idx &= htt->rx_ring.size_mask; | |
331 | htt->rx_ring.sw_rd_idx.msdu_payld = idx; | |
332 | htt->rx_ring.fill_cnt--; | |
333 | ||
4de02806 | 334 | dma_unmap_single(htt->ar->dev, |
8582bf3b | 335 | ATH10K_SKB_RXCB(msdu)->paddr, |
4de02806 MK |
336 | msdu->len + skb_tailroom(msdu), |
337 | DMA_FROM_DEVICE); | |
338 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", | |
339 | msdu->data, msdu->len + skb_tailroom(msdu)); | |
4de02806 | 340 | |
5e3dd157 KV |
341 | return msdu; |
342 | } | |
343 | ||
d84dd60f | 344 | /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ |
5e3dd157 | 345 | static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, |
f0e2770f | 346 | struct sk_buff_head *amsdu) |
5e3dd157 | 347 | { |
7aa7a72a | 348 | struct ath10k *ar = htt->ar; |
5e3dd157 | 349 | int msdu_len, msdu_chaining = 0; |
9aa505d2 | 350 | struct sk_buff *msdu; |
5e3dd157 KV |
351 | struct htt_rx_desc *rx_desc; |
352 | ||
45967089 MK |
353 | lockdep_assert_held(&htt->rx_ring.lock); |
354 | ||
9aa505d2 | 355 | for (;;) { |
5e3dd157 KV |
356 | int last_msdu, msdu_len_invalid, msdu_chained; |
357 | ||
9aa505d2 MK |
358 | msdu = ath10k_htt_rx_netbuf_pop(htt); |
359 | if (!msdu) { | |
9aa505d2 | 360 | __skb_queue_purge(amsdu); |
e0bd7513 | 361 | return -ENOENT; |
9aa505d2 MK |
362 | } |
363 | ||
364 | __skb_queue_tail(amsdu, msdu); | |
365 | ||
5e3dd157 KV |
366 | rx_desc = (struct htt_rx_desc *)msdu->data; |
367 | ||
368 | /* FIXME: we must report msdu payload since this is what caller | |
d6dfe25c MR |
369 | * expects now |
370 | */ | |
5e3dd157 KV |
371 | skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); |
372 | skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); | |
373 | ||
374 | /* | |
375 | * Sanity check - confirm the HW is finished filling in the | |
376 | * rx data. | |
377 | * If the HW and SW are working correctly, then it's guaranteed | |
378 | * that the HW's MAC DMA is done before this point in the SW. | |
379 | * To prevent the case that we handle a stale Rx descriptor, | |
380 | * just assert for now until we have a way to recover. | |
381 | */ | |
382 | if (!(__le32_to_cpu(rx_desc->attention.flags) | |
383 | & RX_ATTENTION_FLAGS_MSDU_DONE)) { | |
9aa505d2 | 384 | __skb_queue_purge(amsdu); |
e0bd7513 | 385 | return -EIO; |
5e3dd157 KV |
386 | } |
387 | ||
5e3dd157 KV |
388 | msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) |
389 | & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | | |
390 | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); | |
1f5dbfbb | 391 | msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0), |
5e3dd157 KV |
392 | RX_MSDU_START_INFO0_MSDU_LENGTH); |
393 | msdu_chained = rx_desc->frag_info.ring2_more_count; | |
394 | ||
395 | if (msdu_len_invalid) | |
396 | msdu_len = 0; | |
397 | ||
398 | skb_trim(msdu, 0); | |
399 | skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); | |
400 | msdu_len -= msdu->len; | |
401 | ||
9aa505d2 | 402 | /* Note: Chained buffers do not contain rx descriptor */ |
5e3dd157 | 403 | while (msdu_chained--) { |
9aa505d2 MK |
404 | msdu = ath10k_htt_rx_netbuf_pop(htt); |
405 | if (!msdu) { | |
9aa505d2 | 406 | __skb_queue_purge(amsdu); |
e0bd7513 | 407 | return -ENOENT; |
b30595ae MK |
408 | } |
409 | ||
9aa505d2 MK |
410 | __skb_queue_tail(amsdu, msdu); |
411 | skb_trim(msdu, 0); | |
412 | skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); | |
413 | msdu_len -= msdu->len; | |
ede9c8e0 | 414 | msdu_chaining = 1; |
5e3dd157 KV |
415 | } |
416 | ||
1f5dbfbb | 417 | last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) & |
5e3dd157 KV |
418 | RX_MSDU_END_INFO0_LAST_MSDU; |
419 | ||
b04e204f | 420 | trace_ath10k_htt_rx_desc(ar, &rx_desc->attention, |
a0883cf7 | 421 | sizeof(*rx_desc) - sizeof(u32)); |
d8bb26b9 | 422 | |
9aa505d2 MK |
423 | if (last_msdu) |
424 | break; | |
5e3dd157 | 425 | } |
5e3dd157 | 426 | |
9aa505d2 | 427 | if (skb_queue_empty(amsdu)) |
d84dd60f JD |
428 | msdu_chaining = -1; |
429 | ||
5e3dd157 KV |
430 | /* |
431 | * Don't refill the ring yet. | |
432 | * | |
433 | * First, the elements popped here are still in use - it is not | |
434 | * safe to overwrite them until the matching call to | |
435 | * mpdu_desc_list_next. Second, for efficiency it is preferable to | |
436 | * refill the rx ring with 1 PPDU's worth of rx buffers (something | |
437 | * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers | |
438 | * (something like 3 buffers). Consequently, we'll rely on the txrx | |
439 | * SW to tell us when it is done pulling all the PPDU's rx buffers | |
440 | * out of the rx ring, and then refill it just once. | |
441 | */ | |
442 | ||
443 | return msdu_chaining; | |
444 | } | |
445 | ||
c545070e | 446 | static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, |
a91a626b | 447 | u64 paddr) |
c545070e MK |
448 | { |
449 | struct ath10k *ar = htt->ar; | |
450 | struct ath10k_skb_rxcb *rxcb; | |
451 | struct sk_buff *msdu; | |
452 | ||
453 | lockdep_assert_held(&htt->rx_ring.lock); | |
454 | ||
455 | msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); | |
456 | if (!msdu) | |
457 | return NULL; | |
458 | ||
459 | rxcb = ATH10K_SKB_RXCB(msdu); | |
460 | hash_del(&rxcb->hlist); | |
461 | htt->rx_ring.fill_cnt--; | |
462 | ||
463 | dma_unmap_single(htt->ar->dev, rxcb->paddr, | |
464 | msdu->len + skb_tailroom(msdu), | |
465 | DMA_FROM_DEVICE); | |
466 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", | |
467 | msdu->data, msdu->len + skb_tailroom(msdu)); | |
468 | ||
469 | return msdu; | |
470 | } | |
471 | ||
3b0b55b1 GS |
472 | static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, |
473 | struct htt_rx_in_ord_ind *ev, | |
474 | struct sk_buff_head *list) | |
c545070e MK |
475 | { |
476 | struct ath10k *ar = htt->ar; | |
3b0b55b1 | 477 | struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32; |
c545070e MK |
478 | struct htt_rx_desc *rxd; |
479 | struct sk_buff *msdu; | |
480 | int msdu_count; | |
481 | bool is_offload; | |
482 | u32 paddr; | |
483 | ||
484 | lockdep_assert_held(&htt->rx_ring.lock); | |
485 | ||
486 | msdu_count = __le16_to_cpu(ev->msdu_count); | |
487 | is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); | |
488 | ||
489 | while (msdu_count--) { | |
490 | paddr = __le32_to_cpu(msdu_desc->msdu_paddr); | |
491 | ||
492 | msdu = ath10k_htt_rx_pop_paddr(htt, paddr); | |
493 | if (!msdu) { | |
494 | __skb_queue_purge(list); | |
495 | return -ENOENT; | |
496 | } | |
497 | ||
498 | __skb_queue_tail(list, msdu); | |
499 | ||
500 | if (!is_offload) { | |
501 | rxd = (void *)msdu->data; | |
502 | ||
503 | trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); | |
504 | ||
505 | skb_put(msdu, sizeof(*rxd)); | |
506 | skb_pull(msdu, sizeof(*rxd)); | |
507 | skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); | |
508 | ||
509 | if (!(__le32_to_cpu(rxd->attention.flags) & | |
510 | RX_ATTENTION_FLAGS_MSDU_DONE)) { | |
511 | ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); | |
512 | return -EIO; | |
513 | } | |
514 | } | |
515 | ||
516 | msdu_desc++; | |
517 | } | |
518 | ||
519 | return 0; | |
520 | } | |
521 | ||
3b0b55b1 GS |
522 | static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt, |
523 | struct htt_rx_in_ord_ind *ev, | |
524 | struct sk_buff_head *list) | |
525 | { | |
526 | struct ath10k *ar = htt->ar; | |
527 | struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64; | |
528 | struct htt_rx_desc *rxd; | |
529 | struct sk_buff *msdu; | |
530 | int msdu_count; | |
531 | bool is_offload; | |
532 | u64 paddr; | |
533 | ||
534 | lockdep_assert_held(&htt->rx_ring.lock); | |
535 | ||
536 | msdu_count = __le16_to_cpu(ev->msdu_count); | |
537 | is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); | |
538 | ||
539 | while (msdu_count--) { | |
540 | paddr = __le64_to_cpu(msdu_desc->msdu_paddr); | |
541 | msdu = ath10k_htt_rx_pop_paddr(htt, paddr); | |
542 | if (!msdu) { | |
543 | __skb_queue_purge(list); | |
544 | return -ENOENT; | |
545 | } | |
546 | ||
547 | __skb_queue_tail(list, msdu); | |
548 | ||
549 | if (!is_offload) { | |
550 | rxd = (void *)msdu->data; | |
551 | ||
552 | trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); | |
553 | ||
554 | skb_put(msdu, sizeof(*rxd)); | |
555 | skb_pull(msdu, sizeof(*rxd)); | |
556 | skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); | |
557 | ||
558 | if (!(__le32_to_cpu(rxd->attention.flags) & | |
559 | RX_ATTENTION_FLAGS_MSDU_DONE)) { | |
560 | ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); | |
561 | return -EIO; | |
562 | } | |
563 | } | |
564 | ||
565 | msdu_desc++; | |
566 | } | |
567 | ||
568 | return 0; | |
569 | } | |
570 | ||
95bf21f9 | 571 | int ath10k_htt_rx_alloc(struct ath10k_htt *htt) |
5e3dd157 | 572 | { |
7aa7a72a | 573 | struct ath10k *ar = htt->ar; |
5e3dd157 | 574 | dma_addr_t paddr; |
a91a626b | 575 | void *vaddr, *vaddr_ring; |
bd8bdbb6 | 576 | size_t size; |
5e3dd157 KV |
577 | struct timer_list *timer = &htt->rx_ring.refill_retry_timer; |
578 | ||
f88d4934 ES |
579 | if (ar->dev_type == ATH10K_DEV_TYPE_HL) |
580 | return 0; | |
581 | ||
51fc7d74 MK |
582 | htt->rx_confused = false; |
583 | ||
fe2407a8 MK |
584 | /* XXX: The fill level could be changed during runtime in response to |
585 | * the host processing latency. Is this really worth it? | |
586 | */ | |
587 | htt->rx_ring.size = HTT_RX_RING_SIZE; | |
588 | htt->rx_ring.size_mask = htt->rx_ring.size - 1; | |
bb8d0d15 | 589 | htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level; |
fe2407a8 | 590 | |
5e3dd157 | 591 | if (!is_power_of_2(htt->rx_ring.size)) { |
7aa7a72a | 592 | ath10k_warn(ar, "htt rx ring size is not power of 2\n"); |
5e3dd157 KV |
593 | return -EINVAL; |
594 | } | |
595 | ||
5e3dd157 | 596 | htt->rx_ring.netbufs_ring = |
6396bb22 | 597 | kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *), |
5e3dd157 KV |
598 | GFP_KERNEL); |
599 | if (!htt->rx_ring.netbufs_ring) | |
600 | goto err_netbuf; | |
601 | ||
9a5511d5 | 602 | size = ath10k_htt_get_rx_ring_size(htt); |
bd8bdbb6 | 603 | |
a91a626b GS |
604 | vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); |
605 | if (!vaddr_ring) | |
5e3dd157 KV |
606 | goto err_dma_ring; |
607 | ||
9a5511d5 | 608 | ath10k_htt_config_paddrs_ring(htt, vaddr_ring); |
5e3dd157 KV |
609 | htt->rx_ring.base_paddr = paddr; |
610 | ||
611 | vaddr = dma_alloc_coherent(htt->ar->dev, | |
612 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
d6cb23b5 | 613 | &paddr, GFP_KERNEL); |
5e3dd157 KV |
614 | if (!vaddr) |
615 | goto err_dma_idx; | |
616 | ||
617 | htt->rx_ring.alloc_idx.vaddr = vaddr; | |
618 | htt->rx_ring.alloc_idx.paddr = paddr; | |
c545070e | 619 | htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; |
5e3dd157 KV |
620 | *htt->rx_ring.alloc_idx.vaddr = 0; |
621 | ||
622 | /* Initialize the Rx refill retry timer */ | |
7ac76764 | 623 | timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0); |
5e3dd157 KV |
624 | |
625 | spin_lock_init(&htt->rx_ring.lock); | |
626 | ||
627 | htt->rx_ring.fill_cnt = 0; | |
c545070e MK |
628 | htt->rx_ring.sw_rd_idx.msdu_payld = 0; |
629 | hash_init(htt->rx_ring.skb_table); | |
5e3dd157 | 630 | |
deba1b9e | 631 | skb_queue_head_init(&htt->rx_msdus_q); |
c545070e | 632 | skb_queue_head_init(&htt->rx_in_ord_compl_q); |
426e10ea | 633 | skb_queue_head_init(&htt->tx_fetch_ind_q); |
3128b3d8 | 634 | atomic_set(&htt->num_mpdus_ready, 0); |
6c5151a9 | 635 | |
7aa7a72a | 636 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", |
5e3dd157 KV |
637 | htt->rx_ring.size, htt->rx_ring.fill_level); |
638 | return 0; | |
639 | ||
5e3dd157 KV |
640 | err_dma_idx: |
641 | dma_free_coherent(htt->ar->dev, | |
9a5511d5 | 642 | ath10k_htt_get_rx_ring_size(htt), |
a91a626b | 643 | vaddr_ring, |
5e3dd157 KV |
644 | htt->rx_ring.base_paddr); |
645 | err_dma_ring: | |
646 | kfree(htt->rx_ring.netbufs_ring); | |
647 | err_netbuf: | |
648 | return -ENOMEM; | |
649 | } | |
650 | ||
7aa7a72a MK |
651 | static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, |
652 | enum htt_rx_mpdu_encrypt_type type) | |
5e3dd157 KV |
653 | { |
654 | switch (type) { | |
890d3b2a MK |
655 | case HTT_RX_MPDU_ENCRYPT_NONE: |
656 | return 0; | |
5e3dd157 KV |
657 | case HTT_RX_MPDU_ENCRYPT_WEP40: |
658 | case HTT_RX_MPDU_ENCRYPT_WEP104: | |
890d3b2a | 659 | return IEEE80211_WEP_IV_LEN; |
5e3dd157 | 660 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: |
5e3dd157 | 661 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: |
890d3b2a | 662 | return IEEE80211_TKIP_IV_LEN; |
5e3dd157 | 663 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: |
890d3b2a | 664 | return IEEE80211_CCMP_HDR_LEN; |
7eccb738 VT |
665 | case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: |
666 | return IEEE80211_CCMP_256_HDR_LEN; | |
667 | case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: | |
668 | case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: | |
669 | return IEEE80211_GCMP_HDR_LEN; | |
890d3b2a MK |
670 | case HTT_RX_MPDU_ENCRYPT_WEP128: |
671 | case HTT_RX_MPDU_ENCRYPT_WAPI: | |
672 | break; | |
5e3dd157 KV |
673 | } |
674 | ||
890d3b2a | 675 | ath10k_warn(ar, "unsupported encryption type %d\n", type); |
5e3dd157 KV |
676 | return 0; |
677 | } | |
678 | ||
890d3b2a MK |
679 | #define MICHAEL_MIC_LEN 8 |
680 | ||
307aeb31 VT |
681 | static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar, |
682 | enum htt_rx_mpdu_encrypt_type type) | |
5e3dd157 KV |
683 | { |
684 | switch (type) { | |
685 | case HTT_RX_MPDU_ENCRYPT_NONE: | |
686 | case HTT_RX_MPDU_ENCRYPT_WEP40: | |
687 | case HTT_RX_MPDU_ENCRYPT_WEP104: | |
5e3dd157 KV |
688 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: |
689 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: | |
307aeb31 | 690 | return 0; |
5e3dd157 | 691 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: |
890d3b2a | 692 | return IEEE80211_CCMP_MIC_LEN; |
7eccb738 VT |
693 | case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: |
694 | return IEEE80211_CCMP_256_MIC_LEN; | |
695 | case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: | |
696 | case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: | |
697 | return IEEE80211_GCMP_MIC_LEN; | |
890d3b2a MK |
698 | case HTT_RX_MPDU_ENCRYPT_WEP128: |
699 | case HTT_RX_MPDU_ENCRYPT_WAPI: | |
700 | break; | |
5e3dd157 KV |
701 | } |
702 | ||
890d3b2a | 703 | ath10k_warn(ar, "unsupported encryption type %d\n", type); |
5e3dd157 KV |
704 | return 0; |
705 | } | |
706 | ||
307aeb31 VT |
707 | static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar, |
708 | enum htt_rx_mpdu_encrypt_type type) | |
709 | { | |
710 | switch (type) { | |
711 | case HTT_RX_MPDU_ENCRYPT_NONE: | |
712 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: | |
713 | case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: | |
714 | case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: | |
715 | case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: | |
716 | return 0; | |
717 | case HTT_RX_MPDU_ENCRYPT_WEP40: | |
718 | case HTT_RX_MPDU_ENCRYPT_WEP104: | |
719 | return IEEE80211_WEP_ICV_LEN; | |
720 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: | |
721 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: | |
722 | return IEEE80211_TKIP_ICV_LEN; | |
723 | case HTT_RX_MPDU_ENCRYPT_WEP128: | |
724 | case HTT_RX_MPDU_ENCRYPT_WAPI: | |
725 | break; | |
726 | } | |
727 | ||
728 | ath10k_warn(ar, "unsupported encryption type %d\n", type); | |
729 | return 0; | |
730 | } | |
731 | ||
f6dc2095 MK |
732 | struct amsdu_subframe_hdr { |
733 | u8 dst[ETH_ALEN]; | |
734 | u8 src[ETH_ALEN]; | |
735 | __be16 len; | |
736 | } __packed; | |
737 | ||
6986fdd6 MK |
738 | #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63) |
739 | ||
91493e8e CL |
740 | static inline u8 ath10k_bw_to_mac80211_bw(u8 bw) |
741 | { | |
742 | u8 ret = 0; | |
743 | ||
744 | switch (bw) { | |
745 | case 0: | |
746 | ret = RATE_INFO_BW_20; | |
747 | break; | |
748 | case 1: | |
749 | ret = RATE_INFO_BW_40; | |
750 | break; | |
751 | case 2: | |
752 | ret = RATE_INFO_BW_80; | |
753 | break; | |
754 | case 3: | |
755 | ret = RATE_INFO_BW_160; | |
756 | break; | |
757 | } | |
758 | ||
759 | return ret; | |
760 | } | |
761 | ||
87326c97 | 762 | static void ath10k_htt_rx_h_rates(struct ath10k *ar, |
b9fd8a84 MK |
763 | struct ieee80211_rx_status *status, |
764 | struct htt_rx_desc *rxd) | |
73539b40 | 765 | { |
5528e032 MK |
766 | struct ieee80211_supported_band *sband; |
767 | u8 cck, rate, bw, sgi, mcs, nss; | |
73539b40 | 768 | u8 preamble = 0; |
6986fdd6 | 769 | u8 group_id; |
b9fd8a84 | 770 | u32 info1, info2, info3; |
73539b40 | 771 | |
b9fd8a84 MK |
772 | info1 = __le32_to_cpu(rxd->ppdu_start.info1); |
773 | info2 = __le32_to_cpu(rxd->ppdu_start.info2); | |
774 | info3 = __le32_to_cpu(rxd->ppdu_start.info3); | |
775 | ||
776 | preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); | |
73539b40 JD |
777 | |
778 | switch (preamble) { | |
779 | case HTT_RX_LEGACY: | |
5528e032 MK |
780 | /* To get legacy rate index band is required. Since band can't |
781 | * be undefined check if freq is non-zero. | |
782 | */ | |
783 | if (!status->freq) | |
784 | return; | |
785 | ||
b9fd8a84 MK |
786 | cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; |
787 | rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); | |
5528e032 | 788 | rate &= ~RX_PPDU_START_RATE_FLAG; |
73539b40 | 789 | |
5528e032 | 790 | sband = &ar->mac.sbands[status->band]; |
4b7f353b | 791 | status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck); |
73539b40 JD |
792 | break; |
793 | case HTT_RX_HT: | |
794 | case HTT_RX_HT_WITH_TXBF: | |
b9fd8a84 MK |
795 | /* HT-SIG - Table 20-11 in info2 and info3 */ |
796 | mcs = info2 & 0x1F; | |
73539b40 | 797 | nss = mcs >> 3; |
b9fd8a84 MK |
798 | bw = (info2 >> 7) & 1; |
799 | sgi = (info3 >> 7) & 1; | |
73539b40 JD |
800 | |
801 | status->rate_idx = mcs; | |
da6a4352 | 802 | status->encoding = RX_ENC_HT; |
73539b40 | 803 | if (sgi) |
7fdd69c5 | 804 | status->enc_flags |= RX_ENC_FLAG_SHORT_GI; |
73539b40 | 805 | if (bw) |
da6a4352 | 806 | status->bw = RATE_INFO_BW_40; |
73539b40 JD |
807 | break; |
808 | case HTT_RX_VHT: | |
809 | case HTT_RX_VHT_WITH_TXBF: | |
b9fd8a84 | 810 | /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 |
d6dfe25c MR |
811 | * TODO check this |
812 | */ | |
b9fd8a84 MK |
813 | bw = info2 & 3; |
814 | sgi = info3 & 1; | |
6986fdd6 MK |
815 | group_id = (info2 >> 4) & 0x3F; |
816 | ||
817 | if (GROUP_ID_IS_SU_MIMO(group_id)) { | |
818 | mcs = (info3 >> 4) & 0x0F; | |
819 | nss = ((info2 >> 10) & 0x07) + 1; | |
820 | } else { | |
821 | /* Hardware doesn't decode VHT-SIG-B into Rx descriptor | |
822 | * so it's impossible to decode MCS. Also since | |
823 | * firmware consumes Group Id Management frames host | |
824 | * has no knowledge regarding group/user position | |
825 | * mapping so it's impossible to pick the correct Nsts | |
826 | * from VHT-SIG-A1. | |
827 | * | |
828 | * Bandwidth and SGI are valid so report the rateinfo | |
829 | * on best-effort basis. | |
830 | */ | |
831 | mcs = 0; | |
832 | nss = 1; | |
833 | } | |
73539b40 | 834 | |
6ccea107 MP |
835 | if (mcs > 0x09) { |
836 | ath10k_warn(ar, "invalid MCS received %u\n", mcs); | |
837 | ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n", | |
838 | __le32_to_cpu(rxd->attention.flags), | |
839 | __le32_to_cpu(rxd->mpdu_start.info0), | |
840 | __le32_to_cpu(rxd->mpdu_start.info1), | |
841 | __le32_to_cpu(rxd->msdu_start.common.info0), | |
842 | __le32_to_cpu(rxd->msdu_start.common.info1), | |
843 | rxd->ppdu_start.info0, | |
844 | __le32_to_cpu(rxd->ppdu_start.info1), | |
845 | __le32_to_cpu(rxd->ppdu_start.info2), | |
846 | __le32_to_cpu(rxd->ppdu_start.info3), | |
847 | __le32_to_cpu(rxd->ppdu_start.info4)); | |
848 | ||
849 | ath10k_warn(ar, "msdu end %08x mpdu end %08x\n", | |
850 | __le32_to_cpu(rxd->msdu_end.common.info0), | |
851 | __le32_to_cpu(rxd->mpdu_end.info0)); | |
852 | ||
853 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, | |
854 | "rx desc msdu payload: ", | |
855 | rxd->msdu_payload, 50); | |
856 | } | |
857 | ||
73539b40 | 858 | status->rate_idx = mcs; |
8613c948 | 859 | status->nss = nss; |
73539b40 JD |
860 | |
861 | if (sgi) | |
7fdd69c5 | 862 | status->enc_flags |= RX_ENC_FLAG_SHORT_GI; |
73539b40 | 863 | |
91493e8e | 864 | status->bw = ath10k_bw_to_mac80211_bw(bw); |
da6a4352 | 865 | status->encoding = RX_ENC_VHT; |
73539b40 JD |
866 | break; |
867 | default: | |
868 | break; | |
869 | } | |
870 | } | |
871 | ||
500ff9f9 MK |
872 | static struct ieee80211_channel * |
873 | ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) | |
874 | { | |
875 | struct ath10k_peer *peer; | |
876 | struct ath10k_vif *arvif; | |
877 | struct cfg80211_chan_def def; | |
878 | u16 peer_id; | |
879 | ||
880 | lockdep_assert_held(&ar->data_lock); | |
881 | ||
882 | if (!rxd) | |
883 | return NULL; | |
884 | ||
885 | if (rxd->attention.flags & | |
886 | __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID)) | |
887 | return NULL; | |
888 | ||
1f5dbfbb | 889 | if (!(rxd->msdu_end.common.info0 & |
500ff9f9 MK |
890 | __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU))) |
891 | return NULL; | |
892 | ||
893 | peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0), | |
894 | RX_MPDU_START_INFO0_PEER_IDX); | |
895 | ||
896 | peer = ath10k_peer_find_by_id(ar, peer_id); | |
897 | if (!peer) | |
898 | return NULL; | |
899 | ||
900 | arvif = ath10k_get_arvif(ar, peer->vdev_id); | |
901 | if (WARN_ON_ONCE(!arvif)) | |
902 | return NULL; | |
903 | ||
569fba2c | 904 | if (ath10k_mac_vif_chan(arvif->vif, &def)) |
500ff9f9 MK |
905 | return NULL; |
906 | ||
907 | return def.chan; | |
908 | } | |
909 | ||
910 | static struct ieee80211_channel * | |
911 | ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id) | |
912 | { | |
913 | struct ath10k_vif *arvif; | |
914 | struct cfg80211_chan_def def; | |
915 | ||
916 | lockdep_assert_held(&ar->data_lock); | |
917 | ||
918 | list_for_each_entry(arvif, &ar->arvifs, list) { | |
919 | if (arvif->vdev_id == vdev_id && | |
920 | ath10k_mac_vif_chan(arvif->vif, &def) == 0) | |
921 | return def.chan; | |
922 | } | |
923 | ||
924 | return NULL; | |
925 | } | |
926 | ||
927 | static void | |
928 | ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw, | |
929 | struct ieee80211_chanctx_conf *conf, | |
930 | void *data) | |
931 | { | |
932 | struct cfg80211_chan_def *def = data; | |
933 | ||
934 | *def = conf->def; | |
935 | } | |
936 | ||
937 | static struct ieee80211_channel * | |
938 | ath10k_htt_rx_h_any_channel(struct ath10k *ar) | |
939 | { | |
940 | struct cfg80211_chan_def def = {}; | |
941 | ||
942 | ieee80211_iter_chan_contexts_atomic(ar->hw, | |
943 | ath10k_htt_rx_h_any_chan_iter, | |
944 | &def); | |
945 | ||
946 | return def.chan; | |
947 | } | |
948 | ||
36653f05 | 949 | static bool ath10k_htt_rx_h_channel(struct ath10k *ar, |
500ff9f9 MK |
950 | struct ieee80211_rx_status *status, |
951 | struct htt_rx_desc *rxd, | |
952 | u32 vdev_id) | |
36653f05 JD |
953 | { |
954 | struct ieee80211_channel *ch; | |
955 | ||
956 | spin_lock_bh(&ar->data_lock); | |
957 | ch = ar->scan_channel; | |
958 | if (!ch) | |
959 | ch = ar->rx_channel; | |
500ff9f9 MK |
960 | if (!ch) |
961 | ch = ath10k_htt_rx_h_peer_channel(ar, rxd); | |
962 | if (!ch) | |
963 | ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); | |
964 | if (!ch) | |
965 | ch = ath10k_htt_rx_h_any_channel(ar); | |
2ce9b25c RM |
966 | if (!ch) |
967 | ch = ar->tgt_oper_chan; | |
36653f05 JD |
968 | spin_unlock_bh(&ar->data_lock); |
969 | ||
970 | if (!ch) | |
971 | return false; | |
972 | ||
973 | status->band = ch->band; | |
974 | status->freq = ch->center_freq; | |
975 | ||
976 | return true; | |
977 | } | |
978 | ||
b9fd8a84 MK |
979 | static void ath10k_htt_rx_h_signal(struct ath10k *ar, |
980 | struct ieee80211_rx_status *status, | |
981 | struct htt_rx_desc *rxd) | |
982 | { | |
8241253d ND |
983 | int i; |
984 | ||
985 | for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) { | |
986 | status->chains &= ~BIT(i); | |
987 | ||
988 | if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) { | |
989 | status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + | |
990 | rxd->ppdu_start.rssi_chains[i].pri20_mhz; | |
991 | ||
992 | status->chains |= BIT(i); | |
993 | } | |
994 | } | |
995 | ||
b9fd8a84 MK |
996 | /* FIXME: Get real NF */ |
997 | status->signal = ATH10K_DEFAULT_NOISE_FLOOR + | |
998 | rxd->ppdu_start.rssi_comb; | |
999 | status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; | |
1000 | } | |
1001 | ||
1002 | static void ath10k_htt_rx_h_mactime(struct ath10k *ar, | |
1003 | struct ieee80211_rx_status *status, | |
1004 | struct htt_rx_desc *rxd) | |
1005 | { | |
1006 | /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This | |
1007 | * means all prior MSDUs in a PPDU are reported to mac80211 without the | |
1008 | * TSF. Is it worth holding frames until end of PPDU is known? | |
1009 | * | |
1010 | * FIXME: Can we get/compute 64bit TSF? | |
1011 | */ | |
3ec79e3a | 1012 | status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp); |
b9fd8a84 MK |
1013 | status->flag |= RX_FLAG_MACTIME_END; |
1014 | } | |
1015 | ||
1016 | static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, | |
1017 | struct sk_buff_head *amsdu, | |
500ff9f9 MK |
1018 | struct ieee80211_rx_status *status, |
1019 | u32 vdev_id) | |
b9fd8a84 MK |
1020 | { |
1021 | struct sk_buff *first; | |
1022 | struct htt_rx_desc *rxd; | |
1023 | bool is_first_ppdu; | |
1024 | bool is_last_ppdu; | |
1025 | ||
1026 | if (skb_queue_empty(amsdu)) | |
1027 | return; | |
1028 | ||
1029 | first = skb_peek(amsdu); | |
1030 | rxd = (void *)first->data - sizeof(*rxd); | |
1031 | ||
1032 | is_first_ppdu = !!(rxd->attention.flags & | |
1033 | __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); | |
1034 | is_last_ppdu = !!(rxd->attention.flags & | |
1035 | __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); | |
1036 | ||
1037 | if (is_first_ppdu) { | |
1038 | /* New PPDU starts so clear out the old per-PPDU status. */ | |
1039 | status->freq = 0; | |
1040 | status->rate_idx = 0; | |
8613c948 | 1041 | status->nss = 0; |
da6a4352 JB |
1042 | status->encoding = RX_ENC_LEGACY; |
1043 | status->bw = RATE_INFO_BW_20; | |
47cc0ca9 | 1044 | |
7fdd69c5 | 1045 | status->flag &= ~RX_FLAG_MACTIME_END; |
b9fd8a84 MK |
1046 | status->flag |= RX_FLAG_NO_SIGNAL_VAL; |
1047 | ||
47cc0ca9 MF |
1048 | status->flag &= ~(RX_FLAG_AMPDU_IS_LAST); |
1049 | status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; | |
1050 | status->ampdu_reference = ar->ampdu_reference; | |
1051 | ||
b9fd8a84 | 1052 | ath10k_htt_rx_h_signal(ar, status, rxd); |
500ff9f9 | 1053 | ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id); |
b9fd8a84 MK |
1054 | ath10k_htt_rx_h_rates(ar, status, rxd); |
1055 | } | |
1056 | ||
47cc0ca9 | 1057 | if (is_last_ppdu) { |
b9fd8a84 | 1058 | ath10k_htt_rx_h_mactime(ar, status, rxd); |
47cc0ca9 MF |
1059 | |
1060 | /* set ampdu last segment flag */ | |
1061 | status->flag |= RX_FLAG_AMPDU_IS_LAST; | |
1062 | ar->ampdu_reference++; | |
1063 | } | |
b9fd8a84 MK |
1064 | } |
1065 | ||
76f5329a JD |
1066 | static const char * const tid_to_ac[] = { |
1067 | "BE", | |
1068 | "BK", | |
1069 | "BK", | |
1070 | "BE", | |
1071 | "VI", | |
1072 | "VI", | |
1073 | "VO", | |
1074 | "VO", | |
1075 | }; | |
1076 | ||
1077 | static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) | |
1078 | { | |
1079 | u8 *qc; | |
1080 | int tid; | |
1081 | ||
1082 | if (!ieee80211_is_data_qos(hdr->frame_control)) | |
1083 | return ""; | |
1084 | ||
1085 | qc = ieee80211_get_qos_ctl(hdr); | |
1086 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; | |
1087 | if (tid < 8) | |
1088 | snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); | |
1089 | else | |
1090 | snprintf(out, size, "tid %d", tid); | |
1091 | ||
1092 | return out; | |
1093 | } | |
1094 | ||
deba1b9e RM |
1095 | static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar, |
1096 | struct ieee80211_rx_status *rx_status, | |
1097 | struct sk_buff *skb) | |
1098 | { | |
1099 | struct ieee80211_rx_status *status; | |
1100 | ||
1101 | status = IEEE80211_SKB_RXCB(skb); | |
1102 | *status = *rx_status; | |
1103 | ||
62652555 | 1104 | skb_queue_tail(&ar->htt.rx_msdus_q, skb); |
deba1b9e RM |
1105 | } |
1106 | ||
1107 | static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb) | |
73539b40 JD |
1108 | { |
1109 | struct ieee80211_rx_status *status; | |
76f5329a JD |
1110 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
1111 | char tid[32]; | |
73539b40 | 1112 | |
85f6d7cf | 1113 | status = IEEE80211_SKB_RXCB(skb); |
73539b40 | 1114 | |
7aa7a72a | 1115 | ath10k_dbg(ar, ATH10K_DBG_DATA, |
7fdd69c5 | 1116 | "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", |
85f6d7cf JD |
1117 | skb, |
1118 | skb->len, | |
76f5329a JD |
1119 | ieee80211_get_SA(hdr), |
1120 | ath10k_get_tid(hdr, tid, sizeof(tid)), | |
1121 | is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? | |
1122 | "mcast" : "ucast", | |
1123 | (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, | |
da6a4352 JB |
1124 | (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", |
1125 | (status->encoding == RX_ENC_HT) ? "ht" : "", | |
1126 | (status->encoding == RX_ENC_VHT) ? "vht" : "", | |
1127 | (status->bw == RATE_INFO_BW_40) ? "40" : "", | |
1128 | (status->bw == RATE_INFO_BW_80) ? "80" : "", | |
1129 | (status->bw == RATE_INFO_BW_160) ? "160" : "", | |
7fdd69c5 | 1130 | status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", |
73539b40 | 1131 | status->rate_idx, |
8613c948 | 1132 | status->nss, |
73539b40 | 1133 | status->freq, |
87326c97 | 1134 | status->band, status->flag, |
78433f96 | 1135 | !!(status->flag & RX_FLAG_FAILED_FCS_CRC), |
76f5329a JD |
1136 | !!(status->flag & RX_FLAG_MMIC_ERROR), |
1137 | !!(status->flag & RX_FLAG_AMSDU_MORE)); | |
7aa7a72a | 1138 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", |
85f6d7cf | 1139 | skb->data, skb->len); |
5ce8e7fd RM |
1140 | trace_ath10k_rx_hdr(ar, skb->data, skb->len); |
1141 | trace_ath10k_rx_payload(ar, skb->data, skb->len); | |
73539b40 | 1142 | |
3c97f5de | 1143 | ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); |
73539b40 JD |
1144 | } |
1145 | ||
48f4ca34 MK |
1146 | static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, |
1147 | struct ieee80211_hdr *hdr) | |
d960c369 | 1148 | { |
48f4ca34 MK |
1149 | int len = ieee80211_hdrlen(hdr->frame_control); |
1150 | ||
1151 | if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, | |
c4cdf753 | 1152 | ar->running_fw->fw_file.fw_features)) |
48f4ca34 MK |
1153 | len = round_up(len, 4); |
1154 | ||
1155 | return len; | |
d960c369 MK |
1156 | } |
1157 | ||
581c25f8 MK |
1158 | static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, |
1159 | struct sk_buff *msdu, | |
1160 | struct ieee80211_rx_status *status, | |
1161 | enum htt_rx_mpdu_encrypt_type enctype, | |
1162 | bool is_decrypted) | |
5e3dd157 | 1163 | { |
581c25f8 | 1164 | struct ieee80211_hdr *hdr; |
5e3dd157 | 1165 | struct htt_rx_desc *rxd; |
581c25f8 MK |
1166 | size_t hdr_len; |
1167 | size_t crypto_len; | |
1168 | bool is_first; | |
1169 | bool is_last; | |
1170 | ||
1171 | rxd = (void *)msdu->data - sizeof(*rxd); | |
1f5dbfbb | 1172 | is_first = !!(rxd->msdu_end.common.info0 & |
581c25f8 | 1173 | __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); |
1f5dbfbb | 1174 | is_last = !!(rxd->msdu_end.common.info0 & |
581c25f8 MK |
1175 | __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); |
1176 | ||
1177 | /* Delivered decapped frame: | |
1178 | * [802.11 header] | |
1179 | * [crypto param] <-- can be trimmed if !fcs_err && | |
1180 | * !decrypt_err && !peer_idx_invalid | |
1181 | * [amsdu header] <-- only if A-MSDU | |
1182 | * [rfc1042/llc] | |
1183 | * [payload] | |
1184 | * [FCS] <-- at end, needs to be trimmed | |
1185 | */ | |
1186 | ||
1187 | /* This probably shouldn't happen but warn just in case */ | |
7e41fb50 | 1188 | if (WARN_ON_ONCE(!is_first)) |
581c25f8 MK |
1189 | return; |
1190 | ||
1191 | /* This probably shouldn't happen but warn just in case */ | |
7e41fb50 | 1192 | if (WARN_ON_ONCE(!(is_first && is_last))) |
581c25f8 MK |
1193 | return; |
1194 | ||
1195 | skb_trim(msdu, msdu->len - FCS_LEN); | |
1196 | ||
1197 | /* In most cases this will be true for sniffed frames. It makes sense | |
ccec9038 DL |
1198 | * to deliver them as-is without stripping the crypto param. This is |
1199 | * necessary for software based decryption. | |
581c25f8 MK |
1200 | * |
1201 | * If there's no error then the frame is decrypted. At least that is | |
1202 | * the case for frames that come in via fragmented rx indication. | |
1203 | */ | |
1204 | if (!is_decrypted) | |
1205 | return; | |
1206 | ||
1207 | /* The payload is decrypted so strip crypto params. Start from tail | |
1208 | * since hdr is used to compute some stuff. | |
1209 | */ | |
1210 | ||
1211 | hdr = (void *)msdu->data; | |
1212 | ||
1213 | /* Tail */ | |
7eccb738 | 1214 | if (status->flag & RX_FLAG_IV_STRIPPED) { |
60549cab | 1215 | skb_trim(msdu, msdu->len - |
307aeb31 VT |
1216 | ath10k_htt_rx_crypto_mic_len(ar, enctype)); |
1217 | ||
1218 | skb_trim(msdu, msdu->len - | |
1219 | ath10k_htt_rx_crypto_icv_len(ar, enctype)); | |
7eccb738 VT |
1220 | } else { |
1221 | /* MIC */ | |
307aeb31 VT |
1222 | if (status->flag & RX_FLAG_MIC_STRIPPED) |
1223 | skb_trim(msdu, msdu->len - | |
1224 | ath10k_htt_rx_crypto_mic_len(ar, enctype)); | |
7eccb738 VT |
1225 | |
1226 | /* ICV */ | |
307aeb31 | 1227 | if (status->flag & RX_FLAG_ICV_STRIPPED) |
7eccb738 | 1228 | skb_trim(msdu, msdu->len - |
307aeb31 | 1229 | ath10k_htt_rx_crypto_icv_len(ar, enctype)); |
7eccb738 | 1230 | } |
581c25f8 MK |
1231 | |
1232 | /* MMIC */ | |
60549cab GB |
1233 | if ((status->flag & RX_FLAG_MMIC_STRIPPED) && |
1234 | !ieee80211_has_morefrags(hdr->frame_control) && | |
581c25f8 | 1235 | enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) |
307aeb31 | 1236 | skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN); |
581c25f8 MK |
1237 | |
1238 | /* Head */ | |
60549cab GB |
1239 | if (status->flag & RX_FLAG_IV_STRIPPED) { |
1240 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
1241 | crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); | |
581c25f8 | 1242 | |
60549cab GB |
1243 | memmove((void *)msdu->data + crypto_len, |
1244 | (void *)msdu->data, hdr_len); | |
1245 | skb_pull(msdu, crypto_len); | |
1246 | } | |
581c25f8 MK |
1247 | } |
1248 | ||
1249 | static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, | |
1250 | struct sk_buff *msdu, | |
1251 | struct ieee80211_rx_status *status, | |
7eccb738 VT |
1252 | const u8 first_hdr[64], |
1253 | enum htt_rx_mpdu_encrypt_type enctype) | |
581c25f8 | 1254 | { |
f6dc2095 | 1255 | struct ieee80211_hdr *hdr; |
9e19e132 | 1256 | struct htt_rx_desc *rxd; |
581c25f8 MK |
1257 | size_t hdr_len; |
1258 | u8 da[ETH_ALEN]; | |
1259 | u8 sa[ETH_ALEN]; | |
9e19e132 | 1260 | int l3_pad_bytes; |
7eccb738 | 1261 | int bytes_aligned = ar->hw_params.decap_align_bytes; |
5e3dd157 | 1262 | |
581c25f8 MK |
1263 | /* Delivered decapped frame: |
1264 | * [nwifi 802.11 header] <-- replaced with 802.11 hdr | |
1265 | * [rfc1042/llc] | |
1266 | * | |
1267 | * Note: The nwifi header doesn't have QoS Control and is | |
1268 | * (always?) a 3addr frame. | |
1269 | * | |
1270 | * Note2: There's no A-MSDU subframe header. Even if it's part | |
1271 | * of an A-MSDU. | |
1272 | */ | |
9aa505d2 | 1273 | |
581c25f8 | 1274 | /* pull decapped header and copy SA & DA */ |
9e19e132 VT |
1275 | rxd = (void *)msdu->data - sizeof(*rxd); |
1276 | ||
1277 | l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); | |
1278 | skb_put(msdu, l3_pad_bytes); | |
1279 | ||
1280 | hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); | |
b8d55fca | 1281 | |
48f4ca34 | 1282 | hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); |
581c25f8 MK |
1283 | ether_addr_copy(da, ieee80211_get_DA(hdr)); |
1284 | ether_addr_copy(sa, ieee80211_get_SA(hdr)); | |
1285 | skb_pull(msdu, hdr_len); | |
5e3dd157 | 1286 | |
581c25f8 MK |
1287 | /* push original 802.11 header */ |
1288 | hdr = (struct ieee80211_hdr *)first_hdr; | |
f6dc2095 | 1289 | hdr_len = ieee80211_hdrlen(hdr->frame_control); |
7eccb738 VT |
1290 | |
1291 | if (!(status->flag & RX_FLAG_IV_STRIPPED)) { | |
1292 | memcpy(skb_push(msdu, | |
1293 | ath10k_htt_rx_crypto_param_len(ar, enctype)), | |
1294 | (void *)hdr + round_up(hdr_len, bytes_aligned), | |
1295 | ath10k_htt_rx_crypto_param_len(ar, enctype)); | |
1296 | } | |
1297 | ||
581c25f8 | 1298 | memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); |
5e3dd157 | 1299 | |
581c25f8 MK |
1300 | /* original 802.11 header has a different DA and in |
1301 | * case of 4addr it may also have different SA | |
1302 | */ | |
1303 | hdr = (struct ieee80211_hdr *)msdu->data; | |
1304 | ether_addr_copy(ieee80211_get_DA(hdr), da); | |
1305 | ether_addr_copy(ieee80211_get_SA(hdr), sa); | |
1306 | } | |
5e3dd157 | 1307 | |
581c25f8 MK |
1308 | static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, |
1309 | struct sk_buff *msdu, | |
1310 | enum htt_rx_mpdu_encrypt_type enctype) | |
1311 | { | |
1312 | struct ieee80211_hdr *hdr; | |
1313 | struct htt_rx_desc *rxd; | |
1314 | size_t hdr_len, crypto_len; | |
1315 | void *rfc1042; | |
1316 | bool is_first, is_last, is_amsdu; | |
2f38c3c0 | 1317 | int bytes_aligned = ar->hw_params.decap_align_bytes; |
e3fbf8d2 | 1318 | |
581c25f8 MK |
1319 | rxd = (void *)msdu->data - sizeof(*rxd); |
1320 | hdr = (void *)rxd->rx_hdr_status; | |
f6dc2095 | 1321 | |
1f5dbfbb | 1322 | is_first = !!(rxd->msdu_end.common.info0 & |
581c25f8 | 1323 | __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); |
1f5dbfbb | 1324 | is_last = !!(rxd->msdu_end.common.info0 & |
581c25f8 MK |
1325 | __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); |
1326 | is_amsdu = !(is_first && is_last); | |
5e3dd157 | 1327 | |
581c25f8 | 1328 | rfc1042 = hdr; |
5e3dd157 | 1329 | |
581c25f8 MK |
1330 | if (is_first) { |
1331 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
1332 | crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); | |
652de35e | 1333 | |
2f38c3c0 VT |
1334 | rfc1042 += round_up(hdr_len, bytes_aligned) + |
1335 | round_up(crypto_len, bytes_aligned); | |
f6dc2095 | 1336 | } |
5e3dd157 | 1337 | |
581c25f8 MK |
1338 | if (is_amsdu) |
1339 | rfc1042 += sizeof(struct amsdu_subframe_hdr); | |
1340 | ||
1341 | return rfc1042; | |
5e3dd157 KV |
1342 | } |
1343 | ||
581c25f8 MK |
1344 | static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, |
1345 | struct sk_buff *msdu, | |
1346 | struct ieee80211_rx_status *status, | |
1347 | const u8 first_hdr[64], | |
1348 | enum htt_rx_mpdu_encrypt_type enctype) | |
5e3dd157 | 1349 | { |
5e3dd157 | 1350 | struct ieee80211_hdr *hdr; |
581c25f8 MK |
1351 | struct ethhdr *eth; |
1352 | size_t hdr_len; | |
e3fbf8d2 | 1353 | void *rfc1042; |
581c25f8 MK |
1354 | u8 da[ETH_ALEN]; |
1355 | u8 sa[ETH_ALEN]; | |
9e19e132 VT |
1356 | int l3_pad_bytes; |
1357 | struct htt_rx_desc *rxd; | |
7eccb738 | 1358 | int bytes_aligned = ar->hw_params.decap_align_bytes; |
5e3dd157 | 1359 | |
581c25f8 MK |
1360 | /* Delivered decapped frame: |
1361 | * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc | |
1362 | * [payload] | |
1363 | */ | |
1364 | ||
1365 | rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); | |
1366 | if (WARN_ON_ONCE(!rfc1042)) | |
1367 | return; | |
1368 | ||
9e19e132 VT |
1369 | rxd = (void *)msdu->data - sizeof(*rxd); |
1370 | l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); | |
1371 | skb_put(msdu, l3_pad_bytes); | |
1372 | skb_pull(msdu, l3_pad_bytes); | |
1373 | ||
581c25f8 MK |
1374 | /* pull decapped header and copy SA & DA */ |
1375 | eth = (struct ethhdr *)msdu->data; | |
1376 | ether_addr_copy(da, eth->h_dest); | |
1377 | ether_addr_copy(sa, eth->h_source); | |
1378 | skb_pull(msdu, sizeof(struct ethhdr)); | |
1379 | ||
1380 | /* push rfc1042/llc/snap */ | |
1381 | memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, | |
1382 | sizeof(struct rfc1042_hdr)); | |
1383 | ||
1384 | /* push original 802.11 header */ | |
1385 | hdr = (struct ieee80211_hdr *)first_hdr; | |
1386 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
7eccb738 VT |
1387 | |
1388 | if (!(status->flag & RX_FLAG_IV_STRIPPED)) { | |
1389 | memcpy(skb_push(msdu, | |
1390 | ath10k_htt_rx_crypto_param_len(ar, enctype)), | |
1391 | (void *)hdr + round_up(hdr_len, bytes_aligned), | |
1392 | ath10k_htt_rx_crypto_param_len(ar, enctype)); | |
1393 | } | |
1394 | ||
581c25f8 MK |
1395 | memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); |
1396 | ||
1397 | /* original 802.11 header has a different DA and in | |
1398 | * case of 4addr it may also have different SA | |
1399 | */ | |
1400 | hdr = (struct ieee80211_hdr *)msdu->data; | |
1401 | ether_addr_copy(ieee80211_get_DA(hdr), da); | |
1402 | ether_addr_copy(ieee80211_get_SA(hdr), sa); | |
1403 | } | |
1404 | ||
1405 | static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, | |
1406 | struct sk_buff *msdu, | |
1407 | struct ieee80211_rx_status *status, | |
7eccb738 VT |
1408 | const u8 first_hdr[64], |
1409 | enum htt_rx_mpdu_encrypt_type enctype) | |
581c25f8 MK |
1410 | { |
1411 | struct ieee80211_hdr *hdr; | |
1412 | size_t hdr_len; | |
9e19e132 VT |
1413 | int l3_pad_bytes; |
1414 | struct htt_rx_desc *rxd; | |
7eccb738 | 1415 | int bytes_aligned = ar->hw_params.decap_align_bytes; |
581c25f8 MK |
1416 | |
1417 | /* Delivered decapped frame: | |
1418 | * [amsdu header] <-- replaced with 802.11 hdr | |
1419 | * [rfc1042/llc] | |
1420 | * [payload] | |
1421 | */ | |
1422 | ||
9e19e132 VT |
1423 | rxd = (void *)msdu->data - sizeof(*rxd); |
1424 | l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); | |
1425 | ||
1426 | skb_put(msdu, l3_pad_bytes); | |
1427 | skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes); | |
581c25f8 MK |
1428 | |
1429 | hdr = (struct ieee80211_hdr *)first_hdr; | |
e3fbf8d2 | 1430 | hdr_len = ieee80211_hdrlen(hdr->frame_control); |
7eccb738 VT |
1431 | |
1432 | if (!(status->flag & RX_FLAG_IV_STRIPPED)) { | |
1433 | memcpy(skb_push(msdu, | |
1434 | ath10k_htt_rx_crypto_param_len(ar, enctype)), | |
1435 | (void *)hdr + round_up(hdr_len, bytes_aligned), | |
1436 | ath10k_htt_rx_crypto_param_len(ar, enctype)); | |
1437 | } | |
1438 | ||
581c25f8 MK |
1439 | memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); |
1440 | } | |
5e3dd157 | 1441 | |
581c25f8 MK |
1442 | static void ath10k_htt_rx_h_undecap(struct ath10k *ar, |
1443 | struct sk_buff *msdu, | |
1444 | struct ieee80211_rx_status *status, | |
1445 | u8 first_hdr[64], | |
1446 | enum htt_rx_mpdu_encrypt_type enctype, | |
1447 | bool is_decrypted) | |
1448 | { | |
1449 | struct htt_rx_desc *rxd; | |
1450 | enum rx_msdu_decap_format decap; | |
f6dc2095 | 1451 | |
581c25f8 MK |
1452 | /* First msdu's decapped header: |
1453 | * [802.11 header] <-- padded to 4 bytes long | |
1454 | * [crypto param] <-- padded to 4 bytes long | |
1455 | * [amsdu header] <-- only if A-MSDU | |
1456 | * [rfc1042/llc] | |
1457 | * | |
1458 | * Other (2nd, 3rd, ..) msdu's decapped header: | |
1459 | * [amsdu header] <-- only if A-MSDU | |
1460 | * [rfc1042/llc] | |
1461 | */ | |
1462 | ||
1463 | rxd = (void *)msdu->data - sizeof(*rxd); | |
1f5dbfbb | 1464 | decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), |
581c25f8 MK |
1465 | RX_MSDU_START_INFO1_DECAP_FORMAT); |
1466 | ||
1467 | switch (decap) { | |
5e3dd157 | 1468 | case RX_MSDU_DECAP_RAW: |
581c25f8 MK |
1469 | ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, |
1470 | is_decrypted); | |
5e3dd157 KV |
1471 | break; |
1472 | case RX_MSDU_DECAP_NATIVE_WIFI: | |
7eccb738 VT |
1473 | ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr, |
1474 | enctype); | |
5e3dd157 KV |
1475 | break; |
1476 | case RX_MSDU_DECAP_ETHERNET2_DIX: | |
581c25f8 | 1477 | ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); |
e3fbf8d2 MK |
1478 | break; |
1479 | case RX_MSDU_DECAP_8023_SNAP_LLC: | |
7eccb738 VT |
1480 | ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr, |
1481 | enctype); | |
e3fbf8d2 | 1482 | break; |
5e3dd157 | 1483 | } |
5e3dd157 KV |
1484 | } |
1485 | ||
605f81aa MK |
1486 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) |
1487 | { | |
1488 | struct htt_rx_desc *rxd; | |
1489 | u32 flags, info; | |
1490 | bool is_ip4, is_ip6; | |
1491 | bool is_tcp, is_udp; | |
1492 | bool ip_csum_ok, tcpudp_csum_ok; | |
1493 | ||
1494 | rxd = (void *)skb->data - sizeof(*rxd); | |
1495 | flags = __le32_to_cpu(rxd->attention.flags); | |
1f5dbfbb | 1496 | info = __le32_to_cpu(rxd->msdu_start.common.info1); |
605f81aa MK |
1497 | |
1498 | is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); | |
1499 | is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); | |
1500 | is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); | |
1501 | is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); | |
1502 | ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); | |
1503 | tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); | |
1504 | ||
1505 | if (!is_ip4 && !is_ip6) | |
1506 | return CHECKSUM_NONE; | |
1507 | if (!is_tcp && !is_udp) | |
1508 | return CHECKSUM_NONE; | |
1509 | if (!ip_csum_ok) | |
1510 | return CHECKSUM_NONE; | |
1511 | if (!tcpudp_csum_ok) | |
1512 | return CHECKSUM_NONE; | |
1513 | ||
1514 | return CHECKSUM_UNNECESSARY; | |
1515 | } | |
1516 | ||
581c25f8 MK |
1517 | static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu) |
1518 | { | |
1519 | msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu); | |
1520 | } | |
1521 | ||
1522 | static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, | |
1523 | struct sk_buff_head *amsdu, | |
7eccb738 | 1524 | struct ieee80211_rx_status *status, |
caee728a VT |
1525 | bool fill_crypt_header, |
1526 | u8 *rx_hdr, | |
1527 | enum ath10k_pkt_rx_err *err) | |
581c25f8 MK |
1528 | { |
1529 | struct sk_buff *first; | |
1530 | struct sk_buff *last; | |
1531 | struct sk_buff *msdu; | |
1532 | struct htt_rx_desc *rxd; | |
1533 | struct ieee80211_hdr *hdr; | |
1534 | enum htt_rx_mpdu_encrypt_type enctype; | |
1535 | u8 first_hdr[64]; | |
1536 | u8 *qos; | |
581c25f8 MK |
1537 | bool has_fcs_err; |
1538 | bool has_crypto_err; | |
1539 | bool has_tkip_err; | |
1540 | bool has_peer_idx_invalid; | |
1541 | bool is_decrypted; | |
60549cab | 1542 | bool is_mgmt; |
581c25f8 MK |
1543 | u32 attention; |
1544 | ||
1545 | if (skb_queue_empty(amsdu)) | |
1546 | return; | |
1547 | ||
1548 | first = skb_peek(amsdu); | |
1549 | rxd = (void *)first->data - sizeof(*rxd); | |
1550 | ||
60549cab GB |
1551 | is_mgmt = !!(rxd->attention.flags & |
1552 | __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); | |
1553 | ||
581c25f8 MK |
1554 | enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), |
1555 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); | |
1556 | ||
1557 | /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 | |
1558 | * decapped header. It'll be used for undecapping of each MSDU. | |
1559 | */ | |
1560 | hdr = (void *)rxd->rx_hdr_status; | |
7eccb738 | 1561 | memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN); |
581c25f8 | 1562 | |
caee728a VT |
1563 | if (rx_hdr) |
1564 | memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN); | |
1565 | ||
581c25f8 MK |
1566 | /* Each A-MSDU subframe will use the original header as the base and be |
1567 | * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. | |
1568 | */ | |
1569 | hdr = (void *)first_hdr; | |
7eccb738 VT |
1570 | |
1571 | if (ieee80211_is_data_qos(hdr->frame_control)) { | |
1572 | qos = ieee80211_get_qos_ctl(hdr); | |
1573 | qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; | |
1574 | } | |
581c25f8 MK |
1575 | |
1576 | /* Some attention flags are valid only in the last MSDU. */ | |
1577 | last = skb_peek_tail(amsdu); | |
1578 | rxd = (void *)last->data - sizeof(*rxd); | |
1579 | attention = __le32_to_cpu(rxd->attention.flags); | |
1580 | ||
1581 | has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); | |
1582 | has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); | |
1583 | has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); | |
1584 | has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); | |
1585 | ||
1586 | /* Note: If hardware captures an encrypted frame that it can't decrypt, | |
1587 | * e.g. due to fcs error, missing peer or invalid key data it will | |
1588 | * report the frame as raw. | |
1589 | */ | |
1590 | is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && | |
1591 | !has_fcs_err && | |
1592 | !has_crypto_err && | |
1593 | !has_peer_idx_invalid); | |
1594 | ||
1595 | /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ | |
1596 | status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | | |
1597 | RX_FLAG_MMIC_ERROR | | |
1598 | RX_FLAG_DECRYPTED | | |
1599 | RX_FLAG_IV_STRIPPED | | |
60549cab | 1600 | RX_FLAG_ONLY_MONITOR | |
581c25f8 MK |
1601 | RX_FLAG_MMIC_STRIPPED); |
1602 | ||
1603 | if (has_fcs_err) | |
1604 | status->flag |= RX_FLAG_FAILED_FCS_CRC; | |
1605 | ||
1606 | if (has_tkip_err) | |
1607 | status->flag |= RX_FLAG_MMIC_ERROR; | |
1608 | ||
caee728a VT |
1609 | if (err) { |
1610 | if (has_fcs_err) | |
1611 | *err = ATH10K_PKT_RX_ERR_FCS; | |
1612 | else if (has_tkip_err) | |
1613 | *err = ATH10K_PKT_RX_ERR_TKIP; | |
1614 | else if (has_crypto_err) | |
1615 | *err = ATH10K_PKT_RX_ERR_CRYPT; | |
1616 | else if (has_peer_idx_invalid) | |
1617 | *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL; | |
1618 | } | |
1619 | ||
60549cab GB |
1620 | /* Firmware reports all necessary management frames via WMI already. |
1621 | * They are not reported to monitor interfaces at all so pass the ones | |
1622 | * coming via HTT to monitor interfaces instead. This simplifies | |
1623 | * matters a lot. | |
1624 | */ | |
1625 | if (is_mgmt) | |
1626 | status->flag |= RX_FLAG_ONLY_MONITOR; | |
1627 | ||
1628 | if (is_decrypted) { | |
1629 | status->flag |= RX_FLAG_DECRYPTED; | |
1630 | ||
1631 | if (likely(!is_mgmt)) | |
7eccb738 VT |
1632 | status->flag |= RX_FLAG_MMIC_STRIPPED; |
1633 | ||
1634 | if (fill_crypt_header) | |
1635 | status->flag |= RX_FLAG_MIC_STRIPPED | | |
1636 | RX_FLAG_ICV_STRIPPED; | |
1637 | else | |
1638 | status->flag |= RX_FLAG_IV_STRIPPED; | |
1639 | } | |
581c25f8 MK |
1640 | |
1641 | skb_queue_walk(amsdu, msdu) { | |
1642 | ath10k_htt_rx_h_csum_offload(msdu); | |
1643 | ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, | |
1644 | is_decrypted); | |
1645 | ||
1646 | /* Undecapping involves copying the original 802.11 header back | |
1647 | * to sk_buff. If frame is protected and hardware has decrypted | |
1648 | * it then remove the protected bit. | |
1649 | */ | |
1650 | if (!is_decrypted) | |
1651 | continue; | |
60549cab GB |
1652 | if (is_mgmt) |
1653 | continue; | |
581c25f8 | 1654 | |
7eccb738 VT |
1655 | if (fill_crypt_header) |
1656 | continue; | |
1657 | ||
581c25f8 MK |
1658 | hdr = (void *)msdu->data; |
1659 | hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); | |
1660 | } | |
1661 | } | |
1662 | ||
deba1b9e | 1663 | static void ath10k_htt_rx_h_enqueue(struct ath10k *ar, |
581c25f8 MK |
1664 | struct sk_buff_head *amsdu, |
1665 | struct ieee80211_rx_status *status) | |
1666 | { | |
1667 | struct sk_buff *msdu; | |
7eccb738 VT |
1668 | struct sk_buff *first_subframe; |
1669 | ||
1670 | first_subframe = skb_peek(amsdu); | |
581c25f8 MK |
1671 | |
1672 | while ((msdu = __skb_dequeue(amsdu))) { | |
1673 | /* Setup per-MSDU flags */ | |
1674 | if (skb_queue_empty(amsdu)) | |
1675 | status->flag &= ~RX_FLAG_AMSDU_MORE; | |
1676 | else | |
1677 | status->flag |= RX_FLAG_AMSDU_MORE; | |
1678 | ||
7eccb738 VT |
1679 | if (msdu == first_subframe) { |
1680 | first_subframe = NULL; | |
1681 | status->flag &= ~RX_FLAG_ALLOW_SAME_PN; | |
1682 | } else { | |
1683 | status->flag |= RX_FLAG_ALLOW_SAME_PN; | |
1684 | } | |
1685 | ||
deba1b9e | 1686 | ath10k_htt_rx_h_queue_msdu(ar, status, msdu); |
581c25f8 MK |
1687 | } |
1688 | } | |
1689 | ||
caee728a VT |
1690 | static int ath10k_unchain_msdu(struct sk_buff_head *amsdu, |
1691 | unsigned long int *unchain_cnt) | |
bfa35368 | 1692 | { |
9aa505d2 | 1693 | struct sk_buff *skb, *first; |
bfa35368 BG |
1694 | int space; |
1695 | int total_len = 0; | |
caee728a | 1696 | int amsdu_len = skb_queue_len(amsdu); |
bfa35368 BG |
1697 | |
1698 | /* TODO: Might could optimize this by using | |
1699 | * skb_try_coalesce or similar method to | |
1700 | * decrease copying, or maybe get mac80211 to | |
1701 | * provide a way to just receive a list of | |
1702 | * skb? | |
1703 | */ | |
1704 | ||
9aa505d2 | 1705 | first = __skb_dequeue(amsdu); |
bfa35368 BG |
1706 | |
1707 | /* Allocate total length all at once. */ | |
9aa505d2 MK |
1708 | skb_queue_walk(amsdu, skb) |
1709 | total_len += skb->len; | |
bfa35368 | 1710 | |
9aa505d2 | 1711 | space = total_len - skb_tailroom(first); |
bfa35368 | 1712 | if ((space > 0) && |
9aa505d2 | 1713 | (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { |
bfa35368 BG |
1714 | /* TODO: bump some rx-oom error stat */ |
1715 | /* put it back together so we can free the | |
1716 | * whole list at once. | |
1717 | */ | |
9aa505d2 | 1718 | __skb_queue_head(amsdu, first); |
bfa35368 BG |
1719 | return -1; |
1720 | } | |
1721 | ||
1722 | /* Walk list again, copying contents into | |
1723 | * msdu_head | |
1724 | */ | |
9aa505d2 MK |
1725 | while ((skb = __skb_dequeue(amsdu))) { |
1726 | skb_copy_from_linear_data(skb, skb_put(first, skb->len), | |
1727 | skb->len); | |
1728 | dev_kfree_skb_any(skb); | |
bfa35368 BG |
1729 | } |
1730 | ||
9aa505d2 | 1731 | __skb_queue_head(amsdu, first); |
caee728a VT |
1732 | |
1733 | *unchain_cnt += amsdu_len - 1; | |
1734 | ||
bfa35368 BG |
1735 | return 0; |
1736 | } | |
1737 | ||
581c25f8 | 1738 | static void ath10k_htt_rx_h_unchain(struct ath10k *ar, |
caee728a VT |
1739 | struct sk_buff_head *amsdu, |
1740 | unsigned long int *drop_cnt, | |
1741 | unsigned long int *unchain_cnt) | |
2acc4eb2 | 1742 | { |
581c25f8 MK |
1743 | struct sk_buff *first; |
1744 | struct htt_rx_desc *rxd; | |
1745 | enum rx_msdu_decap_format decap; | |
7aa7a72a | 1746 | |
581c25f8 MK |
1747 | first = skb_peek(amsdu); |
1748 | rxd = (void *)first->data - sizeof(*rxd); | |
1f5dbfbb | 1749 | decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), |
581c25f8 | 1750 | RX_MSDU_START_INFO1_DECAP_FORMAT); |
2acc4eb2 | 1751 | |
581c25f8 MK |
1752 | /* FIXME: Current unchaining logic can only handle simple case of raw |
1753 | * msdu chaining. If decapping is other than raw the chaining may be | |
1754 | * more complex and this isn't handled by the current code. Don't even | |
1755 | * try re-constructing such frames - it'll be pretty much garbage. | |
1756 | */ | |
1757 | if (decap != RX_MSDU_DECAP_RAW || | |
1758 | skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) { | |
caee728a | 1759 | *drop_cnt += skb_queue_len(amsdu); |
581c25f8 MK |
1760 | __skb_queue_purge(amsdu); |
1761 | return; | |
2acc4eb2 JD |
1762 | } |
1763 | ||
caee728a | 1764 | ath10k_unchain_msdu(amsdu, unchain_cnt); |
581c25f8 MK |
1765 | } |
1766 | ||
1767 | static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, | |
1768 | struct sk_buff_head *amsdu, | |
1769 | struct ieee80211_rx_status *rx_status) | |
1770 | { | |
581c25f8 MK |
1771 | /* FIXME: It might be a good idea to do some fuzzy-testing to drop |
1772 | * invalid/dangerous frames. | |
1773 | */ | |
1774 | ||
1775 | if (!rx_status->freq) { | |
984eb905 | 1776 | ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); |
36653f05 JD |
1777 | return false; |
1778 | } | |
1779 | ||
581c25f8 MK |
1780 | if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { |
1781 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); | |
2acc4eb2 JD |
1782 | return false; |
1783 | } | |
1784 | ||
1785 | return true; | |
1786 | } | |
1787 | ||
581c25f8 MK |
1788 | static void ath10k_htt_rx_h_filter(struct ath10k *ar, |
1789 | struct sk_buff_head *amsdu, | |
caee728a VT |
1790 | struct ieee80211_rx_status *rx_status, |
1791 | unsigned long int *drop_cnt) | |
581c25f8 MK |
1792 | { |
1793 | if (skb_queue_empty(amsdu)) | |
1794 | return; | |
1795 | ||
1796 | if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) | |
1797 | return; | |
1798 | ||
caee728a VT |
1799 | if (drop_cnt) |
1800 | *drop_cnt += skb_queue_len(amsdu); | |
1801 | ||
581c25f8 MK |
1802 | __skb_queue_purge(amsdu); |
1803 | } | |
1804 | ||
18235664 | 1805 | static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) |
5e3dd157 | 1806 | { |
7aa7a72a | 1807 | struct ath10k *ar = htt->ar; |
237e15df | 1808 | struct ieee80211_rx_status *rx_status = &htt->rx_status; |
9aa505d2 | 1809 | struct sk_buff_head amsdu; |
deba1b9e | 1810 | int ret; |
caee728a VT |
1811 | unsigned long int drop_cnt = 0; |
1812 | unsigned long int unchain_cnt = 0; | |
1813 | unsigned long int drop_cnt_filter = 0; | |
1814 | unsigned long int msdus_to_queue, num_msdus; | |
1815 | enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX; | |
1816 | u8 first_hdr[RX_HTT_HDR_STATUS_LEN]; | |
5e3dd157 | 1817 | |
18235664 | 1818 | __skb_queue_head_init(&amsdu); |
45967089 | 1819 | |
18235664 RM |
1820 | spin_lock_bh(&htt->rx_ring.lock); |
1821 | if (htt->rx_confused) { | |
1822 | spin_unlock_bh(&htt->rx_ring.lock); | |
1823 | return -EIO; | |
1824 | } | |
1825 | ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); | |
1826 | spin_unlock_bh(&htt->rx_ring.lock); | |
e0bd7513 | 1827 | |
18235664 RM |
1828 | if (ret < 0) { |
1829 | ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); | |
1830 | __skb_queue_purge(&amsdu); | |
1831 | /* FIXME: It's probably a good idea to reboot the | |
1832 | * device instead of leaving it inoperable. | |
1833 | */ | |
1834 | htt->rx_confused = true; | |
1835 | return ret; | |
1836 | } | |
1837 | ||
caee728a VT |
1838 | num_msdus = skb_queue_len(&amsdu); |
1839 | ||
237e15df | 1840 | ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); |
7543d116 MSS |
1841 | |
1842 | /* only for ret = 1 indicates chained msdus */ | |
1843 | if (ret > 0) | |
caee728a | 1844 | ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt); |
7543d116 | 1845 | |
caee728a VT |
1846 | ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter); |
1847 | ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err); | |
1848 | msdus_to_queue = skb_queue_len(&amsdu); | |
deba1b9e | 1849 | ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status); |
18235664 | 1850 | |
caee728a VT |
1851 | ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err, |
1852 | unchain_cnt, drop_cnt, drop_cnt_filter, | |
1853 | msdus_to_queue); | |
1854 | ||
deba1b9e | 1855 | return 0; |
18235664 RM |
1856 | } |
1857 | ||
f88d4934 ES |
1858 | static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, |
1859 | struct htt_rx_indication_hl *rx, | |
1860 | struct sk_buff *skb) | |
1861 | { | |
1862 | struct ath10k *ar = htt->ar; | |
1863 | struct ath10k_peer *peer; | |
1864 | struct htt_rx_indication_mpdu_range *mpdu_ranges; | |
1865 | struct fw_rx_desc_hl *fw_desc; | |
1866 | struct ieee80211_hdr *hdr; | |
1867 | struct ieee80211_rx_status *rx_status; | |
1868 | u16 peer_id; | |
1869 | u8 rx_desc_len; | |
1870 | int num_mpdu_ranges; | |
1871 | size_t tot_hdr_len; | |
1872 | struct ieee80211_channel *ch; | |
1873 | ||
1874 | peer_id = __le16_to_cpu(rx->hdr.peer_id); | |
1875 | ||
1876 | spin_lock_bh(&ar->data_lock); | |
1877 | peer = ath10k_peer_find_by_id(ar, peer_id); | |
1878 | spin_unlock_bh(&ar->data_lock); | |
1879 | if (!peer) | |
1880 | ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id); | |
1881 | ||
1882 | num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), | |
1883 | HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); | |
1884 | mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx); | |
1885 | fw_desc = &rx->fw_desc; | |
1886 | rx_desc_len = fw_desc->len; | |
1887 | ||
1888 | /* I have not yet seen any case where num_mpdu_ranges > 1. | |
1889 | * qcacld does not seem handle that case either, so we introduce the | |
1890 | * same limitiation here as well. | |
1891 | */ | |
1892 | if (num_mpdu_ranges > 1) | |
1893 | ath10k_warn(ar, | |
1894 | "Unsupported number of MPDU ranges: %d, ignoring all but the first\n", | |
1895 | num_mpdu_ranges); | |
1896 | ||
1897 | if (mpdu_ranges->mpdu_range_status != | |
1898 | HTT_RX_IND_MPDU_STATUS_OK) { | |
1899 | ath10k_warn(ar, "MPDU range status: %d\n", | |
1900 | mpdu_ranges->mpdu_range_status); | |
1901 | goto err; | |
1902 | } | |
1903 | ||
1904 | /* Strip off all headers before the MAC header before delivery to | |
1905 | * mac80211 | |
1906 | */ | |
1907 | tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) + | |
1908 | sizeof(rx->ppdu) + sizeof(rx->prefix) + | |
1909 | sizeof(rx->fw_desc) + | |
1910 | sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len; | |
1911 | skb_pull(skb, tot_hdr_len); | |
1912 | ||
1913 | hdr = (struct ieee80211_hdr *)skb->data; | |
1914 | rx_status = IEEE80211_SKB_RXCB(skb); | |
1915 | rx_status->chains |= BIT(0); | |
1916 | rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + | |
1917 | rx->ppdu.combined_rssi; | |
1918 | rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; | |
1919 | ||
1920 | spin_lock_bh(&ar->data_lock); | |
1921 | ch = ar->scan_channel; | |
1922 | if (!ch) | |
1923 | ch = ar->rx_channel; | |
1924 | if (!ch) | |
1925 | ch = ath10k_htt_rx_h_any_channel(ar); | |
1926 | if (!ch) | |
1927 | ch = ar->tgt_oper_chan; | |
1928 | spin_unlock_bh(&ar->data_lock); | |
1929 | ||
1930 | if (ch) { | |
1931 | rx_status->band = ch->band; | |
1932 | rx_status->freq = ch->center_freq; | |
1933 | } | |
1934 | if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU) | |
1935 | rx_status->flag &= ~RX_FLAG_AMSDU_MORE; | |
1936 | else | |
1937 | rx_status->flag |= RX_FLAG_AMSDU_MORE; | |
1938 | ||
1939 | /* Not entirely sure about this, but all frames from the chipset has | |
1940 | * the protected flag set even though they have already been decrypted. | |
1941 | * Unmasking this flag is necessary in order for mac80211 not to drop | |
1942 | * the frame. | |
1943 | * TODO: Verify this is always the case or find out a way to check | |
1944 | * if there has been hw decryption. | |
1945 | */ | |
1946 | if (ieee80211_has_protected(hdr->frame_control)) { | |
1947 | hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); | |
1948 | rx_status->flag |= RX_FLAG_DECRYPTED | | |
1949 | RX_FLAG_IV_STRIPPED | | |
1950 | RX_FLAG_MMIC_STRIPPED; | |
1951 | } | |
1952 | ||
1953 | ieee80211_rx_ni(ar->hw, skb); | |
1954 | ||
1955 | /* We have delivered the skb to the upper layers (mac80211) so we | |
1956 | * must not free it. | |
1957 | */ | |
1958 | return false; | |
1959 | err: | |
1960 | /* Tell the caller that it must free the skb since we have not | |
1961 | * consumed it | |
1962 | */ | |
1963 | return true; | |
1964 | } | |
1965 | ||
1966 | static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt, | |
1967 | struct htt_rx_indication *rx) | |
5e3dd157 | 1968 | { |
7aa7a72a | 1969 | struct ath10k *ar = htt->ar; |
5e3dd157 | 1970 | struct htt_rx_indication_mpdu_range *mpdu_ranges; |
5e3dd157 | 1971 | int num_mpdu_ranges; |
18235664 | 1972 | int i, mpdu_count = 0; |
caee728a VT |
1973 | u16 peer_id; |
1974 | u8 tid; | |
5e3dd157 KV |
1975 | |
1976 | num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), | |
1977 | HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); | |
caee728a VT |
1978 | peer_id = __le16_to_cpu(rx->hdr.peer_id); |
1979 | tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); | |
1980 | ||
5e3dd157 KV |
1981 | mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); |
1982 | ||
7aa7a72a | 1983 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", |
5e3dd157 KV |
1984 | rx, sizeof(*rx) + |
1985 | (sizeof(struct htt_rx_indication_mpdu_range) * | |
1986 | num_mpdu_ranges)); | |
1987 | ||
d540690d MK |
1988 | for (i = 0; i < num_mpdu_ranges; i++) |
1989 | mpdu_count += mpdu_ranges[i].mpdu_count; | |
1990 | ||
3128b3d8 | 1991 | atomic_add(mpdu_count, &htt->num_mpdus_ready); |
caee728a VT |
1992 | |
1993 | ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges, | |
1994 | num_mpdu_ranges); | |
5e3dd157 KV |
1995 | } |
1996 | ||
59465fe4 | 1997 | static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, |
6c5151a9 MK |
1998 | struct sk_buff *skb) |
1999 | { | |
2000 | struct ath10k_htt *htt = &ar->htt; | |
2001 | struct htt_resp *resp = (struct htt_resp *)skb->data; | |
2002 | struct htt_tx_done tx_done = {}; | |
2003 | int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); | |
c7fd8d23 BP |
2004 | __le16 msdu_id, *msdus; |
2005 | bool rssi_enabled = false; | |
2006 | u8 msdu_count = 0; | |
6c5151a9 MK |
2007 | int i; |
2008 | ||
2009 | switch (status) { | |
2010 | case HTT_DATA_TX_STATUS_NO_ACK: | |
59465fe4 | 2011 | tx_done.status = HTT_TX_COMPL_STATE_NOACK; |
6c5151a9 MK |
2012 | break; |
2013 | case HTT_DATA_TX_STATUS_OK: | |
59465fe4 | 2014 | tx_done.status = HTT_TX_COMPL_STATE_ACK; |
6c5151a9 MK |
2015 | break; |
2016 | case HTT_DATA_TX_STATUS_DISCARD: | |
2017 | case HTT_DATA_TX_STATUS_POSTPONE: | |
2018 | case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: | |
59465fe4 | 2019 | tx_done.status = HTT_TX_COMPL_STATE_DISCARD; |
6c5151a9 MK |
2020 | break; |
2021 | default: | |
7aa7a72a | 2022 | ath10k_warn(ar, "unhandled tx completion status %d\n", status); |
59465fe4 | 2023 | tx_done.status = HTT_TX_COMPL_STATE_DISCARD; |
6c5151a9 MK |
2024 | break; |
2025 | } | |
2026 | ||
7aa7a72a | 2027 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", |
6c5151a9 MK |
2028 | resp->data_tx_completion.num_msdus); |
2029 | ||
c7fd8d23 BP |
2030 | msdu_count = resp->data_tx_completion.num_msdus; |
2031 | ||
2032 | if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI) | |
2033 | rssi_enabled = true; | |
2034 | ||
2035 | for (i = 0; i < msdu_count; i++) { | |
2036 | msdus = resp->data_tx_completion.msdus; | |
2037 | msdu_id = msdus[i]; | |
6c5151a9 | 2038 | tx_done.msdu_id = __le16_to_cpu(msdu_id); |
59465fe4 | 2039 | |
c7fd8d23 BP |
2040 | if (rssi_enabled) { |
2041 | /* Total no of MSDUs should be even, | |
2042 | * if odd MSDUs are sent firmware fills | |
2043 | * last msdu id with 0xffff | |
2044 | */ | |
2045 | if (msdu_count & 0x01) { | |
2046 | msdu_id = msdus[msdu_count + i + 1]; | |
2047 | tx_done.ack_rssi = __le16_to_cpu(msdu_id); | |
2048 | } else { | |
2049 | msdu_id = msdus[msdu_count + i]; | |
2050 | tx_done.ack_rssi = __le16_to_cpu(msdu_id); | |
2051 | } | |
2052 | } | |
2053 | ||
59465fe4 RM |
2054 | /* kfifo_put: In practice firmware shouldn't fire off per-CE |
2055 | * interrupt and main interrupt (MSI/-X range case) for the same | |
2056 | * HTC service so it should be safe to use kfifo_put w/o lock. | |
2057 | * | |
2058 | * From kfifo_put() documentation: | |
2059 | * Note that with only one concurrent reader and one concurrent | |
2060 | * writer, you don't need extra locking to use these macro. | |
2061 | */ | |
2062 | if (!kfifo_put(&htt->txdone_fifo, tx_done)) { | |
2063 | ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n", | |
2064 | tx_done.msdu_id, tx_done.status); | |
2065 | ath10k_txrx_tx_unref(htt, &tx_done); | |
2066 | } | |
6c5151a9 MK |
2067 | } |
2068 | } | |
2069 | ||
aa5b4fbc MK |
2070 | static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) |
2071 | { | |
2072 | struct htt_rx_addba *ev = &resp->rx_addba; | |
2073 | struct ath10k_peer *peer; | |
2074 | struct ath10k_vif *arvif; | |
2075 | u16 info0, tid, peer_id; | |
2076 | ||
2077 | info0 = __le16_to_cpu(ev->info0); | |
2078 | tid = MS(info0, HTT_RX_BA_INFO0_TID); | |
2079 | peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); | |
2080 | ||
7aa7a72a | 2081 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
aa5b4fbc MK |
2082 | "htt rx addba tid %hu peer_id %hu size %hhu\n", |
2083 | tid, peer_id, ev->window_size); | |
2084 | ||
2085 | spin_lock_bh(&ar->data_lock); | |
2086 | peer = ath10k_peer_find_by_id(ar, peer_id); | |
2087 | if (!peer) { | |
7aa7a72a | 2088 | ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", |
aa5b4fbc MK |
2089 | peer_id); |
2090 | spin_unlock_bh(&ar->data_lock); | |
2091 | return; | |
2092 | } | |
2093 | ||
2094 | arvif = ath10k_get_arvif(ar, peer->vdev_id); | |
2095 | if (!arvif) { | |
7aa7a72a | 2096 | ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", |
aa5b4fbc MK |
2097 | peer->vdev_id); |
2098 | spin_unlock_bh(&ar->data_lock); | |
2099 | return; | |
2100 | } | |
2101 | ||
7aa7a72a | 2102 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
aa5b4fbc MK |
2103 | "htt rx start rx ba session sta %pM tid %hu size %hhu\n", |
2104 | peer->addr, tid, ev->window_size); | |
2105 | ||
2106 | ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid); | |
2107 | spin_unlock_bh(&ar->data_lock); | |
2108 | } | |
2109 | ||
2110 | static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) | |
2111 | { | |
2112 | struct htt_rx_delba *ev = &resp->rx_delba; | |
2113 | struct ath10k_peer *peer; | |
2114 | struct ath10k_vif *arvif; | |
2115 | u16 info0, tid, peer_id; | |
2116 | ||
2117 | info0 = __le16_to_cpu(ev->info0); | |
2118 | tid = MS(info0, HTT_RX_BA_INFO0_TID); | |
2119 | peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); | |
2120 | ||
7aa7a72a | 2121 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
aa5b4fbc MK |
2122 | "htt rx delba tid %hu peer_id %hu\n", |
2123 | tid, peer_id); | |
2124 | ||
2125 | spin_lock_bh(&ar->data_lock); | |
2126 | peer = ath10k_peer_find_by_id(ar, peer_id); | |
2127 | if (!peer) { | |
7aa7a72a | 2128 | ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", |
aa5b4fbc MK |
2129 | peer_id); |
2130 | spin_unlock_bh(&ar->data_lock); | |
2131 | return; | |
2132 | } | |
2133 | ||
2134 | arvif = ath10k_get_arvif(ar, peer->vdev_id); | |
2135 | if (!arvif) { | |
7aa7a72a | 2136 | ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", |
aa5b4fbc MK |
2137 | peer->vdev_id); |
2138 | spin_unlock_bh(&ar->data_lock); | |
2139 | return; | |
2140 | } | |
2141 | ||
7aa7a72a | 2142 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
aa5b4fbc MK |
2143 | "htt rx stop rx ba session sta %pM tid %hu\n", |
2144 | peer->addr, tid); | |
2145 | ||
2146 | ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid); | |
2147 | spin_unlock_bh(&ar->data_lock); | |
2148 | } | |
2149 | ||
c545070e | 2150 | static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, |
e48e9c42 | 2151 | struct sk_buff_head *amsdu) |
c545070e MK |
2152 | { |
2153 | struct sk_buff *msdu; | |
2154 | struct htt_rx_desc *rxd; | |
2155 | ||
2156 | if (skb_queue_empty(list)) | |
2157 | return -ENOBUFS; | |
2158 | ||
2159 | if (WARN_ON(!skb_queue_empty(amsdu))) | |
2160 | return -EINVAL; | |
2161 | ||
e48e9c42 | 2162 | while ((msdu = __skb_dequeue(list))) { |
c545070e MK |
2163 | __skb_queue_tail(amsdu, msdu); |
2164 | ||
2165 | rxd = (void *)msdu->data - sizeof(*rxd); | |
1f5dbfbb | 2166 | if (rxd->msdu_end.common.info0 & |
c545070e MK |
2167 | __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) |
2168 | break; | |
2169 | } | |
2170 | ||
2171 | msdu = skb_peek_tail(amsdu); | |
2172 | rxd = (void *)msdu->data - sizeof(*rxd); | |
1f5dbfbb | 2173 | if (!(rxd->msdu_end.common.info0 & |
c545070e MK |
2174 | __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { |
2175 | skb_queue_splice_init(amsdu, list); | |
2176 | return -EAGAIN; | |
2177 | } | |
2178 | ||
2179 | return 0; | |
2180 | } | |
2181 | ||
2182 | static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, | |
2183 | struct sk_buff *skb) | |
2184 | { | |
2185 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
2186 | ||
2187 | if (!ieee80211_has_protected(hdr->frame_control)) | |
2188 | return; | |
2189 | ||
2190 | /* Offloaded frames are already decrypted but firmware insists they are | |
2191 | * protected in the 802.11 header. Strip the flag. Otherwise mac80211 | |
2192 | * will drop the frame. | |
2193 | */ | |
2194 | ||
2195 | hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); | |
2196 | status->flag |= RX_FLAG_DECRYPTED | | |
2197 | RX_FLAG_IV_STRIPPED | | |
2198 | RX_FLAG_MMIC_STRIPPED; | |
2199 | } | |
2200 | ||
deba1b9e RM |
2201 | static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, |
2202 | struct sk_buff_head *list) | |
c545070e MK |
2203 | { |
2204 | struct ath10k_htt *htt = &ar->htt; | |
2205 | struct ieee80211_rx_status *status = &htt->rx_status; | |
2206 | struct htt_rx_offload_msdu *rx; | |
2207 | struct sk_buff *msdu; | |
2208 | size_t offset; | |
2209 | ||
2210 | while ((msdu = __skb_dequeue(list))) { | |
2211 | /* Offloaded frames don't have Rx descriptor. Instead they have | |
2212 | * a short meta information header. | |
2213 | */ | |
2214 | ||
2215 | rx = (void *)msdu->data; | |
2216 | ||
2217 | skb_put(msdu, sizeof(*rx)); | |
2218 | skb_pull(msdu, sizeof(*rx)); | |
2219 | ||
2220 | if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) { | |
2221 | ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); | |
2222 | dev_kfree_skb_any(msdu); | |
2223 | continue; | |
2224 | } | |
2225 | ||
2226 | skb_put(msdu, __le16_to_cpu(rx->msdu_len)); | |
2227 | ||
2228 | /* Offloaded rx header length isn't multiple of 2 nor 4 so the | |
2229 | * actual payload is unaligned. Align the frame. Otherwise | |
2230 | * mac80211 complains. This shouldn't reduce performance much | |
2231 | * because these offloaded frames are rare. | |
2232 | */ | |
2233 | offset = 4 - ((unsigned long)msdu->data & 3); | |
2234 | skb_put(msdu, offset); | |
2235 | memmove(msdu->data + offset, msdu->data, msdu->len); | |
2236 | skb_pull(msdu, offset); | |
2237 | ||
2238 | /* FIXME: The frame is NWifi. Re-construct QoS Control | |
2239 | * if possible later. | |
2240 | */ | |
2241 | ||
2242 | memset(status, 0, sizeof(*status)); | |
2243 | status->flag |= RX_FLAG_NO_SIGNAL_VAL; | |
2244 | ||
2245 | ath10k_htt_rx_h_rx_offload_prot(status, msdu); | |
500ff9f9 | 2246 | ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); |
deba1b9e | 2247 | ath10k_htt_rx_h_queue_msdu(ar, status, msdu); |
c545070e MK |
2248 | } |
2249 | } | |
2250 | ||
e48e9c42 | 2251 | static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) |
c545070e MK |
2252 | { |
2253 | struct ath10k_htt *htt = &ar->htt; | |
2254 | struct htt_resp *resp = (void *)skb->data; | |
2255 | struct ieee80211_rx_status *status = &htt->rx_status; | |
2256 | struct sk_buff_head list; | |
2257 | struct sk_buff_head amsdu; | |
2258 | u16 peer_id; | |
2259 | u16 msdu_count; | |
2260 | u8 vdev_id; | |
2261 | u8 tid; | |
2262 | bool offload; | |
2263 | bool frag; | |
deba1b9e | 2264 | int ret; |
c545070e MK |
2265 | |
2266 | lockdep_assert_held(&htt->rx_ring.lock); | |
2267 | ||
2268 | if (htt->rx_confused) | |
3c97f5de | 2269 | return -EIO; |
c545070e MK |
2270 | |
2271 | skb_pull(skb, sizeof(resp->hdr)); | |
2272 | skb_pull(skb, sizeof(resp->rx_in_ord_ind)); | |
2273 | ||
2274 | peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id); | |
2275 | msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count); | |
2276 | vdev_id = resp->rx_in_ord_ind.vdev_id; | |
2277 | tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID); | |
2278 | offload = !!(resp->rx_in_ord_ind.info & | |
2279 | HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); | |
2280 | frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK); | |
2281 | ||
2282 | ath10k_dbg(ar, ATH10K_DBG_HTT, | |
2283 | "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", | |
2284 | vdev_id, peer_id, tid, offload, frag, msdu_count); | |
2285 | ||
3b0b55b1 | 2286 | if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) { |
c545070e | 2287 | ath10k_warn(ar, "dropping invalid in order rx indication\n"); |
3c97f5de | 2288 | return -EINVAL; |
c545070e MK |
2289 | } |
2290 | ||
2291 | /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later | |
2292 | * extracted and processed. | |
2293 | */ | |
2294 | __skb_queue_head_init(&list); | |
3b0b55b1 GS |
2295 | if (ar->hw_params.target_64bit) |
2296 | ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind, | |
2297 | &list); | |
2298 | else | |
2299 | ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind, | |
2300 | &list); | |
2301 | ||
c545070e MK |
2302 | if (ret < 0) { |
2303 | ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); | |
2304 | htt->rx_confused = true; | |
3c97f5de | 2305 | return -EIO; |
c545070e MK |
2306 | } |
2307 | ||
2308 | /* Offloaded frames are very different and need to be handled | |
2309 | * separately. | |
2310 | */ | |
2311 | if (offload) | |
deba1b9e | 2312 | ath10k_htt_rx_h_rx_offload(ar, &list); |
c545070e | 2313 | |
e48e9c42 | 2314 | while (!skb_queue_empty(&list)) { |
c545070e | 2315 | __skb_queue_head_init(&amsdu); |
e48e9c42 | 2316 | ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu); |
c545070e MK |
2317 | switch (ret) { |
2318 | case 0: | |
2319 | /* Note: The in-order indication may report interleaved | |
2320 | * frames from different PPDUs meaning reported rx rate | |
2321 | * to mac80211 isn't accurate/reliable. It's still | |
2322 | * better to report something than nothing though. This | |
2323 | * should still give an idea about rx rate to the user. | |
2324 | */ | |
500ff9f9 | 2325 | ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); |
caee728a VT |
2326 | ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL); |
2327 | ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL, | |
2328 | NULL); | |
deba1b9e | 2329 | ath10k_htt_rx_h_enqueue(ar, &amsdu, status); |
c545070e MK |
2330 | break; |
2331 | case -EAGAIN: | |
2332 | /* fall through */ | |
2333 | default: | |
2334 | /* Should not happen. */ | |
2335 | ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); | |
2336 | htt->rx_confused = true; | |
2337 | __skb_queue_purge(&list); | |
3c97f5de | 2338 | return -EIO; |
c545070e MK |
2339 | } |
2340 | } | |
deba1b9e | 2341 | return ret; |
c545070e MK |
2342 | } |
2343 | ||
839ae637 MK |
2344 | static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, |
2345 | const __le32 *resp_ids, | |
2346 | int num_resp_ids) | |
2347 | { | |
2348 | int i; | |
2349 | u32 resp_id; | |
2350 | ||
2351 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n", | |
2352 | num_resp_ids); | |
2353 | ||
2354 | for (i = 0; i < num_resp_ids; i++) { | |
2355 | resp_id = le32_to_cpu(resp_ids[i]); | |
2356 | ||
2357 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n", | |
2358 | resp_id); | |
2359 | ||
2360 | /* TODO: free resp_id */ | |
2361 | } | |
2362 | } | |
2363 | ||
2364 | static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) | |
2365 | { | |
426e10ea MK |
2366 | struct ieee80211_hw *hw = ar->hw; |
2367 | struct ieee80211_txq *txq; | |
839ae637 MK |
2368 | struct htt_resp *resp = (struct htt_resp *)skb->data; |
2369 | struct htt_tx_fetch_record *record; | |
2370 | size_t len; | |
2371 | size_t max_num_bytes; | |
2372 | size_t max_num_msdus; | |
426e10ea MK |
2373 | size_t num_bytes; |
2374 | size_t num_msdus; | |
839ae637 MK |
2375 | const __le32 *resp_ids; |
2376 | u16 num_records; | |
2377 | u16 num_resp_ids; | |
2378 | u16 peer_id; | |
2379 | u8 tid; | |
426e10ea | 2380 | int ret; |
839ae637 MK |
2381 | int i; |
2382 | ||
2383 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); | |
2384 | ||
2385 | len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind); | |
2386 | if (unlikely(skb->len < len)) { | |
2387 | ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n"); | |
2388 | return; | |
2389 | } | |
2390 | ||
2391 | num_records = le16_to_cpu(resp->tx_fetch_ind.num_records); | |
2392 | num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids); | |
2393 | ||
2394 | len += sizeof(resp->tx_fetch_ind.records[0]) * num_records; | |
2395 | len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids; | |
2396 | ||
2397 | if (unlikely(skb->len < len)) { | |
2398 | ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n"); | |
2399 | return; | |
2400 | } | |
2401 | ||
2402 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n", | |
2403 | num_records, num_resp_ids, | |
2404 | le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); | |
2405 | ||
426e10ea MK |
2406 | if (!ar->htt.tx_q_state.enabled) { |
2407 | ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n"); | |
2408 | return; | |
2409 | } | |
2410 | ||
2411 | if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) { | |
2412 | ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n"); | |
2413 | return; | |
2414 | } | |
2415 | ||
2416 | rcu_read_lock(); | |
839ae637 MK |
2417 | |
2418 | for (i = 0; i < num_records; i++) { | |
2419 | record = &resp->tx_fetch_ind.records[i]; | |
2420 | peer_id = MS(le16_to_cpu(record->info), | |
2421 | HTT_TX_FETCH_RECORD_INFO_PEER_ID); | |
2422 | tid = MS(le16_to_cpu(record->info), | |
2423 | HTT_TX_FETCH_RECORD_INFO_TID); | |
2424 | max_num_msdus = le16_to_cpu(record->num_msdus); | |
2425 | max_num_bytes = le32_to_cpu(record->num_bytes); | |
2426 | ||
2427 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n", | |
2428 | i, peer_id, tid, max_num_msdus, max_num_bytes); | |
2429 | ||
2430 | if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || | |
2431 | unlikely(tid >= ar->htt.tx_q_state.num_tids)) { | |
2432 | ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", | |
2433 | peer_id, tid); | |
2434 | continue; | |
2435 | } | |
2436 | ||
426e10ea MK |
2437 | spin_lock_bh(&ar->data_lock); |
2438 | txq = ath10k_mac_txq_lookup(ar, peer_id, tid); | |
2439 | spin_unlock_bh(&ar->data_lock); | |
2440 | ||
2441 | /* It is okay to release the lock and use txq because RCU read | |
2442 | * lock is held. | |
2443 | */ | |
2444 | ||
2445 | if (unlikely(!txq)) { | |
2446 | ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", | |
2447 | peer_id, tid); | |
2448 | continue; | |
2449 | } | |
2450 | ||
2451 | num_msdus = 0; | |
2452 | num_bytes = 0; | |
2453 | ||
2454 | while (num_msdus < max_num_msdus && | |
2455 | num_bytes < max_num_bytes) { | |
2456 | ret = ath10k_mac_tx_push_txq(hw, txq); | |
2457 | if (ret < 0) | |
2458 | break; | |
2459 | ||
2460 | num_msdus++; | |
2461 | num_bytes += ret; | |
2462 | } | |
2463 | ||
2464 | record->num_msdus = cpu_to_le16(num_msdus); | |
2465 | record->num_bytes = cpu_to_le32(num_bytes); | |
2466 | ||
2467 | ath10k_htt_tx_txq_recalc(hw, txq); | |
839ae637 MK |
2468 | } |
2469 | ||
426e10ea MK |
2470 | rcu_read_unlock(); |
2471 | ||
839ae637 MK |
2472 | resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); |
2473 | ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); | |
2474 | ||
426e10ea MK |
2475 | ret = ath10k_htt_tx_fetch_resp(ar, |
2476 | resp->tx_fetch_ind.token, | |
2477 | resp->tx_fetch_ind.fetch_seq_num, | |
2478 | resp->tx_fetch_ind.records, | |
2479 | num_records); | |
2480 | if (unlikely(ret)) { | |
2481 | ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n", | |
2482 | le32_to_cpu(resp->tx_fetch_ind.token), ret); | |
2483 | /* FIXME: request fw restart */ | |
2484 | } | |
c545070e | 2485 | |
426e10ea | 2486 | ath10k_htt_tx_txq_sync(ar); |
c545070e MK |
2487 | } |
2488 | ||
839ae637 MK |
2489 | static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, |
2490 | struct sk_buff *skb) | |
2491 | { | |
2492 | const struct htt_resp *resp = (void *)skb->data; | |
2493 | size_t len; | |
2494 | int num_resp_ids; | |
2495 | ||
2496 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n"); | |
2497 | ||
2498 | len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm); | |
2499 | if (unlikely(skb->len < len)) { | |
2500 | ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n"); | |
2501 | return; | |
2502 | } | |
2503 | ||
2504 | num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids); | |
2505 | len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids; | |
2506 | ||
2507 | if (unlikely(skb->len < len)) { | |
2508 | ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n"); | |
2509 | return; | |
2510 | } | |
2511 | ||
2512 | ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, | |
2513 | resp->tx_fetch_confirm.resp_ids, | |
2514 | num_resp_ids); | |
2515 | } | |
2516 | ||
2517 | static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, | |
2518 | struct sk_buff *skb) | |
2519 | { | |
2520 | const struct htt_resp *resp = (void *)skb->data; | |
2521 | const struct htt_tx_mode_switch_record *record; | |
426e10ea MK |
2522 | struct ieee80211_txq *txq; |
2523 | struct ath10k_txq *artxq; | |
839ae637 MK |
2524 | size_t len; |
2525 | size_t num_records; | |
2526 | enum htt_tx_mode_switch_mode mode; | |
2527 | bool enable; | |
2528 | u16 info0; | |
2529 | u16 info1; | |
2530 | u16 threshold; | |
2531 | u16 peer_id; | |
2532 | u8 tid; | |
2533 | int i; | |
2534 | ||
2535 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n"); | |
2536 | ||
2537 | len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind); | |
2538 | if (unlikely(skb->len < len)) { | |
2539 | ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n"); | |
2540 | return; | |
2541 | } | |
2542 | ||
2543 | info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0); | |
2544 | info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1); | |
2545 | ||
2546 | enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE); | |
2547 | num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); | |
2548 | mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE); | |
2549 | threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); | |
2550 | ||
2551 | ath10k_dbg(ar, ATH10K_DBG_HTT, | |
2552 | "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n", | |
2553 | info0, info1, enable, num_records, mode, threshold); | |
2554 | ||
2555 | len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records; | |
2556 | ||
2557 | if (unlikely(skb->len < len)) { | |
2558 | ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n"); | |
2559 | return; | |
2560 | } | |
2561 | ||
2562 | switch (mode) { | |
2563 | case HTT_TX_MODE_SWITCH_PUSH: | |
2564 | case HTT_TX_MODE_SWITCH_PUSH_PULL: | |
2565 | break; | |
2566 | default: | |
2567 | ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n", | |
2568 | mode); | |
2569 | return; | |
2570 | } | |
2571 | ||
2572 | if (!enable) | |
2573 | return; | |
2574 | ||
426e10ea MK |
2575 | ar->htt.tx_q_state.enabled = enable; |
2576 | ar->htt.tx_q_state.mode = mode; | |
2577 | ar->htt.tx_q_state.num_push_allowed = threshold; | |
2578 | ||
2579 | rcu_read_lock(); | |
839ae637 MK |
2580 | |
2581 | for (i = 0; i < num_records; i++) { | |
2582 | record = &resp->tx_mode_switch_ind.records[i]; | |
2583 | info0 = le16_to_cpu(record->info0); | |
2584 | peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID); | |
2585 | tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID); | |
2586 | ||
2587 | if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || | |
2588 | unlikely(tid >= ar->htt.tx_q_state.num_tids)) { | |
2589 | ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", | |
2590 | peer_id, tid); | |
2591 | continue; | |
2592 | } | |
2593 | ||
426e10ea MK |
2594 | spin_lock_bh(&ar->data_lock); |
2595 | txq = ath10k_mac_txq_lookup(ar, peer_id, tid); | |
2596 | spin_unlock_bh(&ar->data_lock); | |
2597 | ||
2598 | /* It is okay to release the lock and use txq because RCU read | |
2599 | * lock is held. | |
2600 | */ | |
2601 | ||
2602 | if (unlikely(!txq)) { | |
2603 | ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", | |
2604 | peer_id, tid); | |
2605 | continue; | |
2606 | } | |
2607 | ||
2608 | spin_lock_bh(&ar->htt.tx_lock); | |
2609 | artxq = (void *)txq->drv_priv; | |
2610 | artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus); | |
2611 | spin_unlock_bh(&ar->htt.tx_lock); | |
839ae637 MK |
2612 | } |
2613 | ||
426e10ea MK |
2614 | rcu_read_unlock(); |
2615 | ||
2616 | ath10k_mac_tx_push_pending(ar); | |
839ae637 MK |
2617 | } |
2618 | ||
e3a91f87 RM |
2619 | void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) |
2620 | { | |
2621 | bool release; | |
2622 | ||
2623 | release = ath10k_htt_t2h_msg_handler(ar, skb); | |
2624 | ||
2625 | /* Free the indication buffer */ | |
2626 | if (release) | |
2627 | dev_kfree_skb_any(skb); | |
2628 | } | |
2629 | ||
0189dbd7 | 2630 | static inline int ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate) |
cec17c38 AK |
2631 | { |
2632 | static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12, | |
2633 | 18, 24, 36, 48, 54}; | |
2634 | int i; | |
2635 | ||
2636 | for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) { | |
2637 | if (rate == legacy_rates[i]) | |
0189dbd7 | 2638 | return i; |
cec17c38 AK |
2639 | } |
2640 | ||
0189dbd7 AK |
2641 | ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate); |
2642 | return -EINVAL; | |
cec17c38 AK |
2643 | } |
2644 | ||
a904417f AK |
2645 | static void |
2646 | ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, | |
2647 | struct ath10k_sta *arsta, | |
2648 | struct ath10k_per_peer_tx_stats *pstats, | |
2649 | u8 legacy_rate_idx) | |
2650 | { | |
2651 | struct rate_info *txrate = &arsta->txrate; | |
2652 | struct ath10k_htt_tx_stats *tx_stats; | |
e88975ca | 2653 | int idx, ht_idx, gi, mcs, bw, nss; |
a904417f AK |
2654 | |
2655 | if (!arsta->tx_stats) | |
2656 | return; | |
2657 | ||
2658 | tx_stats = arsta->tx_stats; | |
2659 | gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI); | |
2660 | ht_idx = txrate->mcs + txrate->nss * 8; | |
2661 | mcs = txrate->mcs; | |
2662 | bw = txrate->bw; | |
2663 | nss = txrate->nss; | |
e88975ca AK |
2664 | idx = mcs * 8 + 8 * 10 * nss; |
2665 | idx += bw * 2 + gi; | |
a904417f AK |
2666 | |
2667 | #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] | |
2668 | ||
2669 | if (txrate->flags == RATE_INFO_FLAGS_VHT_MCS) { | |
2670 | STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes; | |
2671 | STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts; | |
2672 | STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes; | |
2673 | STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts; | |
2674 | STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes; | |
2675 | STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts; | |
2676 | } else if (txrate->flags == RATE_INFO_FLAGS_MCS) { | |
2677 | STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes; | |
2678 | STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts; | |
2679 | STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes; | |
2680 | STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts; | |
2681 | STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes; | |
2682 | STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; | |
2683 | } else { | |
2684 | mcs = legacy_rate_idx; | |
a904417f AK |
2685 | |
2686 | STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; | |
2687 | STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; | |
2688 | STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes; | |
2689 | STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts; | |
2690 | STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes; | |
2691 | STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts; | |
2692 | } | |
2693 | ||
2694 | if (ATH10K_HW_AMPDU(pstats->flags)) { | |
2695 | tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags); | |
2696 | ||
2697 | if (txrate->flags == RATE_INFO_FLAGS_MCS) { | |
2698 | STATS_OP_FMT(AMPDU).ht[0][ht_idx] += | |
2699 | pstats->succ_bytes + pstats->retry_bytes; | |
2700 | STATS_OP_FMT(AMPDU).ht[1][ht_idx] += | |
2701 | pstats->succ_pkts + pstats->retry_pkts; | |
2702 | } else { | |
2703 | STATS_OP_FMT(AMPDU).vht[0][mcs] += | |
2704 | pstats->succ_bytes + pstats->retry_bytes; | |
2705 | STATS_OP_FMT(AMPDU).vht[1][mcs] += | |
2706 | pstats->succ_pkts + pstats->retry_pkts; | |
2707 | } | |
2708 | STATS_OP_FMT(AMPDU).bw[0][bw] += | |
2709 | pstats->succ_bytes + pstats->retry_bytes; | |
2710 | STATS_OP_FMT(AMPDU).nss[0][nss] += | |
2711 | pstats->succ_bytes + pstats->retry_bytes; | |
2712 | STATS_OP_FMT(AMPDU).gi[0][gi] += | |
2713 | pstats->succ_bytes + pstats->retry_bytes; | |
e88975ca AK |
2714 | STATS_OP_FMT(AMPDU).rate_table[0][idx] += |
2715 | pstats->succ_bytes + pstats->retry_bytes; | |
a904417f AK |
2716 | STATS_OP_FMT(AMPDU).bw[1][bw] += |
2717 | pstats->succ_pkts + pstats->retry_pkts; | |
2718 | STATS_OP_FMT(AMPDU).nss[1][nss] += | |
2719 | pstats->succ_pkts + pstats->retry_pkts; | |
2720 | STATS_OP_FMT(AMPDU).gi[1][gi] += | |
2721 | pstats->succ_pkts + pstats->retry_pkts; | |
e88975ca AK |
2722 | STATS_OP_FMT(AMPDU).rate_table[1][idx] += |
2723 | pstats->succ_pkts + pstats->retry_pkts; | |
a904417f AK |
2724 | } else { |
2725 | tx_stats->ack_fails += | |
2726 | ATH10K_HW_BA_FAIL(pstats->flags); | |
2727 | } | |
2728 | ||
2729 | STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes; | |
2730 | STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes; | |
2731 | STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes; | |
2732 | ||
2733 | STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts; | |
2734 | STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts; | |
2735 | STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts; | |
2736 | ||
2737 | STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes; | |
2738 | STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes; | |
2739 | STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes; | |
2740 | ||
2741 | STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts; | |
2742 | STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts; | |
2743 | STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts; | |
2744 | ||
2745 | STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes; | |
2746 | STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes; | |
2747 | STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes; | |
2748 | ||
2749 | STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; | |
2750 | STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts; | |
2751 | STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; | |
e88975ca AK |
2752 | |
2753 | if (txrate->flags >= RATE_INFO_FLAGS_MCS) { | |
2754 | STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes; | |
2755 | STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts; | |
2756 | STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes; | |
2757 | STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts; | |
2758 | STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes; | |
2759 | STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts; | |
2760 | } | |
cec17c38 AK |
2761 | } |
2762 | ||
2763 | static void | |
2764 | ath10k_update_per_peer_tx_stats(struct ath10k *ar, | |
2765 | struct ieee80211_sta *sta, | |
2766 | struct ath10k_per_peer_tx_stats *peer_stats) | |
2767 | { | |
2768 | struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; | |
9d9cdbf3 GS |
2769 | u8 rate = 0, sgi; |
2770 | s8 rate_idx = 0; | |
cec17c38 AK |
2771 | struct rate_info txrate; |
2772 | ||
2773 | lockdep_assert_held(&ar->data_lock); | |
2774 | ||
2775 | txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode); | |
2776 | txrate.bw = ATH10K_HW_BW(peer_stats->flags); | |
2777 | txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode); | |
2778 | txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode); | |
2779 | sgi = ATH10K_HW_GI(peer_stats->flags); | |
2780 | ||
c1dd8016 SE |
2781 | if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) { |
2782 | ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs); | |
2783 | return; | |
2784 | } | |
2785 | ||
2786 | if (txrate.flags == WMI_RATE_PREAMBLE_HT && | |
2787 | (txrate.mcs > 7 || txrate.nss < 1)) { | |
2788 | ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats", | |
2789 | txrate.mcs, txrate.nss); | |
cec17c38 AK |
2790 | return; |
2791 | } | |
2792 | ||
0f8a2b77 MSS |
2793 | memset(&arsta->txrate, 0, sizeof(arsta->txrate)); |
2794 | ||
cec17c38 AK |
2795 | if (txrate.flags == WMI_RATE_PREAMBLE_CCK || |
2796 | txrate.flags == WMI_RATE_PREAMBLE_OFDM) { | |
2797 | rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); | |
cec17c38 | 2798 | /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */ |
0189dbd7 AK |
2799 | if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK) |
2800 | rate = 5; | |
2801 | rate_idx = ath10k_get_legacy_rate_idx(ar, rate); | |
2802 | if (rate_idx < 0) | |
2803 | return; | |
cd591027 | 2804 | arsta->txrate.legacy = rate; |
cec17c38 AK |
2805 | } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { |
2806 | arsta->txrate.flags = RATE_INFO_FLAGS_MCS; | |
c1dd8016 | 2807 | arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1); |
cec17c38 AK |
2808 | } else { |
2809 | arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; | |
2810 | arsta->txrate.mcs = txrate.mcs; | |
2811 | } | |
2812 | ||
2813 | if (sgi) | |
2814 | arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; | |
2815 | ||
2816 | arsta->txrate.nss = txrate.nss; | |
91493e8e | 2817 | arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); |
a904417f AK |
2818 | |
2819 | if (ath10k_debug_is_extd_tx_stats_enabled(ar)) | |
2820 | ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats, | |
2821 | rate_idx); | |
cec17c38 AK |
2822 | } |
2823 | ||
2824 | static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, | |
2825 | struct sk_buff *skb) | |
2826 | { | |
2827 | struct htt_resp *resp = (struct htt_resp *)skb->data; | |
2828 | struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; | |
2829 | struct htt_per_peer_tx_stats_ind *tx_stats; | |
2830 | struct ieee80211_sta *sta; | |
2831 | struct ath10k_peer *peer; | |
2832 | int peer_id, i; | |
2833 | u8 ppdu_len, num_ppdu; | |
2834 | ||
2835 | num_ppdu = resp->peer_tx_stats.num_ppdu; | |
2836 | ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32); | |
2837 | ||
2838 | if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) { | |
2839 | ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len); | |
2840 | return; | |
2841 | } | |
2842 | ||
2843 | tx_stats = (struct htt_per_peer_tx_stats_ind *) | |
2844 | (resp->peer_tx_stats.payload); | |
2845 | peer_id = __le16_to_cpu(tx_stats->peer_id); | |
2846 | ||
2847 | rcu_read_lock(); | |
2848 | spin_lock_bh(&ar->data_lock); | |
2849 | peer = ath10k_peer_find_by_id(ar, peer_id); | |
2850 | if (!peer) { | |
2851 | ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", | |
2852 | peer_id); | |
2853 | goto out; | |
2854 | } | |
2855 | ||
2856 | sta = peer->sta; | |
2857 | for (i = 0; i < num_ppdu; i++) { | |
2858 | tx_stats = (struct htt_per_peer_tx_stats_ind *) | |
2859 | (resp->peer_tx_stats.payload + i * ppdu_len); | |
2860 | ||
2861 | p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes); | |
2862 | p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes); | |
2863 | p_tx_stats->failed_bytes = | |
2864 | __le32_to_cpu(tx_stats->failed_bytes); | |
2865 | p_tx_stats->ratecode = tx_stats->ratecode; | |
2866 | p_tx_stats->flags = tx_stats->flags; | |
2867 | p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts); | |
2868 | p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts); | |
2869 | p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts); | |
2870 | ||
2871 | ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); | |
2872 | } | |
2873 | ||
2874 | out: | |
2875 | spin_unlock_bh(&ar->data_lock); | |
2876 | rcu_read_unlock(); | |
2877 | } | |
2878 | ||
e8123bb7 AK |
2879 | static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) |
2880 | { | |
2881 | struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data; | |
2882 | struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; | |
2883 | struct ath10k_10_2_peer_tx_stats *tx_stats; | |
2884 | struct ieee80211_sta *sta; | |
2885 | struct ath10k_peer *peer; | |
2886 | u16 log_type = __le16_to_cpu(hdr->log_type); | |
2887 | u32 peer_id = 0, i; | |
2888 | ||
2889 | if (log_type != ATH_PKTLOG_TYPE_TX_STAT) | |
2890 | return; | |
2891 | ||
2892 | tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) + | |
2893 | ATH10K_10_2_TX_STATS_OFFSET); | |
2894 | ||
2895 | if (!tx_stats->tx_ppdu_cnt) | |
2896 | return; | |
2897 | ||
2898 | peer_id = tx_stats->peer_id; | |
2899 | ||
2900 | rcu_read_lock(); | |
2901 | spin_lock_bh(&ar->data_lock); | |
2902 | peer = ath10k_peer_find_by_id(ar, peer_id); | |
2903 | if (!peer) { | |
2904 | ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", | |
2905 | peer_id); | |
2906 | goto out; | |
2907 | } | |
2908 | ||
2909 | sta = peer->sta; | |
2910 | for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) { | |
2911 | p_tx_stats->succ_bytes = | |
2912 | __le16_to_cpu(tx_stats->success_bytes[i]); | |
2913 | p_tx_stats->retry_bytes = | |
2914 | __le16_to_cpu(tx_stats->retry_bytes[i]); | |
2915 | p_tx_stats->failed_bytes = | |
2916 | __le16_to_cpu(tx_stats->failed_bytes[i]); | |
2917 | p_tx_stats->ratecode = tx_stats->ratecode[i]; | |
2918 | p_tx_stats->flags = tx_stats->flags[i]; | |
2919 | p_tx_stats->succ_pkts = tx_stats->success_pkts[i]; | |
2920 | p_tx_stats->retry_pkts = tx_stats->retry_pkts[i]; | |
2921 | p_tx_stats->failed_pkts = tx_stats->failed_pkts[i]; | |
2922 | ||
2923 | ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); | |
2924 | } | |
2925 | spin_unlock_bh(&ar->data_lock); | |
2926 | rcu_read_unlock(); | |
2927 | ||
2928 | return; | |
2929 | ||
2930 | out: | |
2931 | spin_unlock_bh(&ar->data_lock); | |
2932 | rcu_read_unlock(); | |
2933 | } | |
2934 | ||
e3a91f87 | 2935 | bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) |
5e3dd157 | 2936 | { |
edb8236d | 2937 | struct ath10k_htt *htt = &ar->htt; |
5e3dd157 | 2938 | struct htt_resp *resp = (struct htt_resp *)skb->data; |
8348db29 | 2939 | enum htt_t2h_msg_type type; |
5e3dd157 KV |
2940 | |
2941 | /* confirm alignment */ | |
2942 | if (!IS_ALIGNED((unsigned long)skb->data, 4)) | |
7aa7a72a | 2943 | ath10k_warn(ar, "unaligned htt message, expect trouble\n"); |
5e3dd157 | 2944 | |
7aa7a72a | 2945 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", |
5e3dd157 | 2946 | resp->hdr.msg_type); |
8348db29 RM |
2947 | |
2948 | if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { | |
2949 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", | |
2950 | resp->hdr.msg_type, ar->htt.t2h_msg_types_max); | |
e3a91f87 | 2951 | return true; |
8348db29 RM |
2952 | } |
2953 | type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; | |
2954 | ||
2955 | switch (type) { | |
5e3dd157 KV |
2956 | case HTT_T2H_MSG_TYPE_VERSION_CONF: { |
2957 | htt->target_version_major = resp->ver_resp.major; | |
2958 | htt->target_version_minor = resp->ver_resp.minor; | |
2959 | complete(&htt->target_version_received); | |
2960 | break; | |
2961 | } | |
6c5151a9 | 2962 | case HTT_T2H_MSG_TYPE_RX_IND: |
f88d4934 ES |
2963 | if (ar->dev_type == ATH10K_DEV_TYPE_HL) |
2964 | return ath10k_htt_rx_proc_rx_ind_hl(htt, | |
2965 | &resp->rx_ind_hl, | |
2966 | skb); | |
2967 | else | |
2968 | ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind); | |
3128b3d8 | 2969 | break; |
5e3dd157 KV |
2970 | case HTT_T2H_MSG_TYPE_PEER_MAP: { |
2971 | struct htt_peer_map_event ev = { | |
2972 | .vdev_id = resp->peer_map.vdev_id, | |
2973 | .peer_id = __le16_to_cpu(resp->peer_map.peer_id), | |
2974 | }; | |
2975 | memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); | |
2976 | ath10k_peer_map_event(htt, &ev); | |
2977 | break; | |
2978 | } | |
2979 | case HTT_T2H_MSG_TYPE_PEER_UNMAP: { | |
2980 | struct htt_peer_unmap_event ev = { | |
2981 | .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), | |
2982 | }; | |
2983 | ath10k_peer_unmap_event(htt, &ev); | |
2984 | break; | |
2985 | } | |
2986 | case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { | |
2987 | struct htt_tx_done tx_done = {}; | |
2988 | int status = __le32_to_cpu(resp->mgmt_tx_completion.status); | |
235b9c42 | 2989 | int info = __le32_to_cpu(resp->mgmt_tx_completion.info); |
5e3dd157 | 2990 | |
59465fe4 | 2991 | tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); |
5e3dd157 KV |
2992 | |
2993 | switch (status) { | |
2994 | case HTT_MGMT_TX_STATUS_OK: | |
59465fe4 | 2995 | tx_done.status = HTT_TX_COMPL_STATE_ACK; |
235b9c42 VN |
2996 | if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, |
2997 | ar->wmi.svc_map) && | |
2998 | (resp->mgmt_tx_completion.flags & | |
2999 | HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) { | |
3000 | tx_done.ack_rssi = | |
3001 | FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK, | |
3002 | info); | |
3003 | } | |
5e3dd157 KV |
3004 | break; |
3005 | case HTT_MGMT_TX_STATUS_RETRY: | |
59465fe4 | 3006 | tx_done.status = HTT_TX_COMPL_STATE_NOACK; |
5e3dd157 KV |
3007 | break; |
3008 | case HTT_MGMT_TX_STATUS_DROP: | |
59465fe4 | 3009 | tx_done.status = HTT_TX_COMPL_STATE_DISCARD; |
5e3dd157 KV |
3010 | break; |
3011 | } | |
3012 | ||
cac08552 RM |
3013 | status = ath10k_txrx_tx_unref(htt, &tx_done); |
3014 | if (!status) { | |
3015 | spin_lock_bh(&htt->tx_lock); | |
3016 | ath10k_htt_tx_mgmt_dec_pending(htt); | |
3017 | spin_unlock_bh(&htt->tx_lock); | |
3018 | } | |
5e3dd157 KV |
3019 | break; |
3020 | } | |
6c5151a9 | 3021 | case HTT_T2H_MSG_TYPE_TX_COMPL_IND: |
59465fe4 | 3022 | ath10k_htt_rx_tx_compl_ind(htt->ar, skb); |
59465fe4 | 3023 | break; |
5e3dd157 KV |
3024 | case HTT_T2H_MSG_TYPE_SEC_IND: { |
3025 | struct ath10k *ar = htt->ar; | |
3026 | struct htt_security_indication *ev = &resp->security_indication; | |
3027 | ||
7aa7a72a | 3028 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
5e3dd157 KV |
3029 | "sec ind peer_id %d unicast %d type %d\n", |
3030 | __le16_to_cpu(ev->peer_id), | |
3031 | !!(ev->flags & HTT_SECURITY_IS_UNICAST), | |
3032 | MS(ev->flags, HTT_SECURITY_TYPE)); | |
3033 | complete(&ar->install_key_done); | |
3034 | break; | |
3035 | } | |
3036 | case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { | |
7aa7a72a | 3037 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", |
5e3dd157 | 3038 | skb->data, skb->len); |
3c97f5de | 3039 | atomic_inc(&htt->num_mpdus_ready); |
5e3dd157 KV |
3040 | break; |
3041 | } | |
3042 | case HTT_T2H_MSG_TYPE_TEST: | |
5e3dd157 | 3043 | break; |
5e3dd157 | 3044 | case HTT_T2H_MSG_TYPE_STATS_CONF: |
d35a6c18 | 3045 | trace_ath10k_htt_stats(ar, skb->data, skb->len); |
a9bf0506 KV |
3046 | break; |
3047 | case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: | |
708b9bde MK |
3048 | /* Firmware can return tx frames if it's unable to fully |
3049 | * process them and suspects host may be able to fix it. ath10k | |
3050 | * sends all tx frames as already inspected so this shouldn't | |
3051 | * happen unless fw has a bug. | |
3052 | */ | |
7aa7a72a | 3053 | ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); |
708b9bde | 3054 | break; |
5e3dd157 | 3055 | case HTT_T2H_MSG_TYPE_RX_ADDBA: |
aa5b4fbc MK |
3056 | ath10k_htt_rx_addba(ar, resp); |
3057 | break; | |
5e3dd157 | 3058 | case HTT_T2H_MSG_TYPE_RX_DELBA: |
aa5b4fbc MK |
3059 | ath10k_htt_rx_delba(ar, resp); |
3060 | break; | |
bfdd7937 | 3061 | case HTT_T2H_MSG_TYPE_PKTLOG: { |
bfdd7937 | 3062 | trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, |
34293f75 ARN |
3063 | skb->len - |
3064 | offsetof(struct htt_resp, | |
3065 | pktlog_msg.payload)); | |
e8123bb7 AK |
3066 | |
3067 | if (ath10k_peer_stats_enabled(ar)) | |
3068 | ath10k_fetch_10_2_tx_stats(ar, | |
3069 | resp->pktlog_msg.payload); | |
bfdd7937 RM |
3070 | break; |
3071 | } | |
aa5b4fbc MK |
3072 | case HTT_T2H_MSG_TYPE_RX_FLUSH: { |
3073 | /* Ignore this event because mac80211 takes care of Rx | |
3074 | * aggregation reordering. | |
3075 | */ | |
3076 | break; | |
3077 | } | |
c545070e | 3078 | case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { |
62652555 | 3079 | skb_queue_tail(&htt->rx_in_ord_compl_q, skb); |
e3a91f87 | 3080 | return false; |
c545070e MK |
3081 | } |
3082 | case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: | |
8348db29 | 3083 | break; |
2ce9b25c RM |
3084 | case HTT_T2H_MSG_TYPE_CHAN_CHANGE: { |
3085 | u32 phymode = __le32_to_cpu(resp->chan_change.phymode); | |
3086 | u32 freq = __le32_to_cpu(resp->chan_change.freq); | |
3087 | ||
543b921b | 3088 | ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); |
2ce9b25c RM |
3089 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
3090 | "htt chan change freq %u phymode %s\n", | |
3091 | freq, ath10k_wmi_phymode_str(phymode)); | |
c545070e | 3092 | break; |
2ce9b25c | 3093 | } |
ccec9038 DL |
3094 | case HTT_T2H_MSG_TYPE_AGGR_CONF: |
3095 | break; | |
b2fdbccd RM |
3096 | case HTT_T2H_MSG_TYPE_TX_FETCH_IND: { |
3097 | struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC); | |
3098 | ||
3099 | if (!tx_fetch_ind) { | |
3100 | ath10k_warn(ar, "failed to copy htt tx fetch ind\n"); | |
3101 | break; | |
3102 | } | |
3103 | skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind); | |
b2fdbccd RM |
3104 | break; |
3105 | } | |
df94e702 | 3106 | case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: |
839ae637 MK |
3107 | ath10k_htt_rx_tx_fetch_confirm(ar, skb); |
3108 | break; | |
df94e702 | 3109 | case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: |
839ae637 | 3110 | ath10k_htt_rx_tx_mode_switch_ind(ar, skb); |
9b158736 | 3111 | break; |
cec17c38 AK |
3112 | case HTT_T2H_MSG_TYPE_PEER_STATS: |
3113 | ath10k_htt_fetch_peer_stats(ar, skb); | |
3114 | break; | |
9b158736 | 3115 | case HTT_T2H_MSG_TYPE_EN_STATS: |
5e3dd157 | 3116 | default: |
2358a544 MK |
3117 | ath10k_warn(ar, "htt event (%d) not handled\n", |
3118 | resp->hdr.msg_type); | |
7aa7a72a | 3119 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", |
5e3dd157 KV |
3120 | skb->data, skb->len); |
3121 | break; | |
dab55d10 | 3122 | } |
e3a91f87 | 3123 | return true; |
5e3dd157 | 3124 | } |
3f0f7ed4 | 3125 | EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); |
6c5151a9 | 3126 | |
afb0bf7f VN |
3127 | void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, |
3128 | struct sk_buff *skb) | |
3129 | { | |
53a5c9bc | 3130 | trace_ath10k_htt_pktlog(ar, skb->data, skb->len); |
afb0bf7f VN |
3131 | dev_kfree_skb_any(skb); |
3132 | } | |
3133 | EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); | |
3134 | ||
deba1b9e RM |
3135 | static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget) |
3136 | { | |
3137 | struct sk_buff *skb; | |
3138 | ||
3139 | while (quota < budget) { | |
3140 | if (skb_queue_empty(&ar->htt.rx_msdus_q)) | |
3141 | break; | |
3142 | ||
62652555 | 3143 | skb = skb_dequeue(&ar->htt.rx_msdus_q); |
deba1b9e RM |
3144 | if (!skb) |
3145 | break; | |
3146 | ath10k_process_rx(ar, skb); | |
3147 | quota++; | |
3148 | } | |
3149 | ||
3150 | return quota; | |
3151 | } | |
3152 | ||
3c97f5de | 3153 | int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) |
6c5151a9 | 3154 | { |
3c97f5de | 3155 | struct ath10k_htt *htt = &ar->htt; |
59465fe4 | 3156 | struct htt_tx_done tx_done = {}; |
426e10ea | 3157 | struct sk_buff_head tx_ind_q; |
6c5151a9 | 3158 | struct sk_buff *skb; |
d742c969 | 3159 | unsigned long flags; |
deba1b9e | 3160 | int quota = 0, done, ret; |
3c97f5de | 3161 | bool resched_napi = false; |
6c5151a9 | 3162 | |
426e10ea | 3163 | __skb_queue_head_init(&tx_ind_q); |
da6416ca | 3164 | |
deba1b9e RM |
3165 | /* Process pending frames before dequeuing more data |
3166 | * from hardware. | |
3c97f5de | 3167 | */ |
deba1b9e RM |
3168 | quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); |
3169 | if (quota == budget) { | |
3170 | resched_napi = true; | |
3171 | goto exit; | |
3172 | } | |
3c97f5de | 3173 | |
62652555 | 3174 | while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) { |
3c97f5de | 3175 | spin_lock_bh(&htt->rx_ring.lock); |
deba1b9e | 3176 | ret = ath10k_htt_rx_in_ord_ind(ar, skb); |
3c97f5de | 3177 | spin_unlock_bh(&htt->rx_ring.lock); |
3c97f5de RM |
3178 | |
3179 | dev_kfree_skb_any(skb); | |
deba1b9e | 3180 | if (ret == -EIO) { |
3c97f5de RM |
3181 | resched_napi = true; |
3182 | goto exit; | |
3183 | } | |
3184 | } | |
3185 | ||
deba1b9e RM |
3186 | while (atomic_read(&htt->num_mpdus_ready)) { |
3187 | ret = ath10k_htt_rx_handle_amsdu(htt); | |
3188 | if (ret == -EIO) { | |
3c97f5de RM |
3189 | resched_napi = true; |
3190 | goto exit; | |
3191 | } | |
3c97f5de | 3192 | atomic_dec(&htt->num_mpdus_ready); |
3c97f5de RM |
3193 | } |
3194 | ||
deba1b9e RM |
3195 | /* Deliver received data after processing data from hardware */ |
3196 | quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); | |
3197 | ||
3c97f5de RM |
3198 | /* From NAPI documentation: |
3199 | * The napi poll() function may also process TX completions, in which | |
3200 | * case if it processes the entire TX ring then it should count that | |
3201 | * work as the rest of the budget. | |
3202 | */ | |
3203 | if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo)) | |
3204 | quota = budget; | |
426e10ea | 3205 | |
59465fe4 RM |
3206 | /* kfifo_get: called only within txrx_tasklet so it's neatly serialized. |
3207 | * From kfifo_get() documentation: | |
3208 | * Note that with only one concurrent reader and one concurrent writer, | |
3209 | * you don't need extra locking to use these macro. | |
3210 | */ | |
3211 | while (kfifo_get(&htt->txdone_fifo, &tx_done)) | |
3212 | ath10k_txrx_tx_unref(htt, &tx_done); | |
6c5151a9 | 3213 | |
18f53fe0 RM |
3214 | ath10k_mac_tx_push_pending(ar); |
3215 | ||
3c97f5de RM |
3216 | spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); |
3217 | skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); | |
3218 | spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); | |
3219 | ||
426e10ea MK |
3220 | while ((skb = __skb_dequeue(&tx_ind_q))) { |
3221 | ath10k_htt_rx_tx_fetch_ind(ar, skb); | |
6c5151a9 MK |
3222 | dev_kfree_skb_any(skb); |
3223 | } | |
3224 | ||
3c97f5de | 3225 | exit: |
5c86d97b | 3226 | ath10k_htt_rx_msdu_buff_replenish(htt); |
3c97f5de RM |
3227 | /* In case of rx failure or more data to read, report budget |
3228 | * to reschedule NAPI poll | |
3229 | */ | |
3230 | done = resched_napi ? budget : quota; | |
3231 | ||
3232 | return done; | |
6c5151a9 | 3233 | } |
3c97f5de | 3234 | EXPORT_SYMBOL(ath10k_htt_txrx_compl_task); |
a91a626b GS |
3235 | |
3236 | static const struct ath10k_htt_rx_ops htt_rx_ops_32 = { | |
3237 | .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32, | |
3238 | .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32, | |
3239 | .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32, | |
3240 | .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32, | |
3241 | .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32, | |
3242 | }; | |
3243 | ||
3244 | static const struct ath10k_htt_rx_ops htt_rx_ops_64 = { | |
3245 | .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64, | |
3246 | .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64, | |
3247 | .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64, | |
3248 | .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64, | |
3249 | .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64, | |
3250 | }; | |
3251 | ||
d4e7f553 ES |
3252 | static const struct ath10k_htt_rx_ops htt_rx_ops_hl = { |
3253 | }; | |
3254 | ||
a91a626b GS |
3255 | void ath10k_htt_set_rx_ops(struct ath10k_htt *htt) |
3256 | { | |
3257 | struct ath10k *ar = htt->ar; | |
3258 | ||
d4e7f553 ES |
3259 | if (ar->dev_type == ATH10K_DEV_TYPE_HL) |
3260 | htt->rx_ops = &htt_rx_ops_hl; | |
3261 | else if (ar->hw_params.target_64bit) | |
a91a626b GS |
3262 | htt->rx_ops = &htt_rx_ops_64; |
3263 | else | |
3264 | htt->rx_ops = &htt_rx_ops_32; | |
3265 | } |