Merge remote-tracking branch 'asoc/topic/pcm5102a' into asoc-next
[linux-2.6-block.git] / drivers / net / wireless / intel / iwlwifi / mvm / rxmq.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * The full GNU General Public License is included in this distribution
22  * in the file called COPYING.
23  *
24  * Contact Information:
25  *  Intel Linux Wireless <ilw@linux.intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *
28  * BSD LICENSE
29  *
30  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
31  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
33  * All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  *
39  *  * Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  *  * Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in
43  *    the documentation and/or other materials provided with the
44  *    distribution.
45  *  * Neither the name Intel Corporation nor the names of its
46  *    contributors may be used to endorse or promote products derived
47  *    from this software without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60  *****************************************************************************/
61 #include <linux/etherdevice.h>
62 #include <linux/skbuff.h>
63 #include "iwl-trans.h"
64 #include "mvm.h"
65 #include "fw-api.h"
66
67 static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
68                                    int queue, struct ieee80211_sta *sta)
69 {
70         struct iwl_mvm_sta *mvmsta;
71         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
72         struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
73         struct iwl_mvm_key_pn *ptk_pn;
74         int res;
75         u8 tid, keyidx;
76         u8 pn[IEEE80211_CCMP_PN_LEN];
77         u8 *extiv;
78
79         /* do PN checking */
80
81         /* multicast and non-data only arrives on default queue */
82         if (!ieee80211_is_data(hdr->frame_control) ||
83             is_multicast_ether_addr(hdr->addr1))
84                 return 0;
85
86         /* do not check PN for open AP */
87         if (!(stats->flag & RX_FLAG_DECRYPTED))
88                 return 0;
89
90         /*
91          * avoid checking for default queue - we don't want to replicate
92          * all the logic that's necessary for checking the PN on fragmented
93          * frames, leave that to mac80211
94          */
95         if (queue == 0)
96                 return 0;
97
98         /* if we are here - this for sure is either CCMP or GCMP */
99         if (IS_ERR_OR_NULL(sta)) {
100                 IWL_ERR(mvm,
101                         "expected hw-decrypted unicast frame for station\n");
102                 return -1;
103         }
104
105         mvmsta = iwl_mvm_sta_from_mac80211(sta);
106
107         extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
108         keyidx = extiv[3] >> 6;
109
110         ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
111         if (!ptk_pn)
112                 return -1;
113
114         if (ieee80211_is_data_qos(hdr->frame_control))
115                 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
116         else
117                 tid = 0;
118
119         /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
120         if (tid >= IWL_MAX_TID_COUNT)
121                 return -1;
122
123         /* load pn */
124         pn[0] = extiv[7];
125         pn[1] = extiv[6];
126         pn[2] = extiv[5];
127         pn[3] = extiv[4];
128         pn[4] = extiv[1];
129         pn[5] = extiv[0];
130
131         res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
132         if (res < 0)
133                 return -1;
134         if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
135                 return -1;
136
137         memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
138         stats->flag |= RX_FLAG_PN_VALIDATED;
139
140         return 0;
141 }
142
143 /* iwl_mvm_create_skb Adds the rxb to a new skb */
144 static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
145                                u16 len, u8 crypt_len,
146                                struct iwl_rx_cmd_buffer *rxb)
147 {
148         struct iwl_rx_packet *pkt = rxb_addr(rxb);
149         struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
150         unsigned int headlen, fraglen, pad_len = 0;
151         unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
152
153         if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
154                 pad_len = 2;
155
156                 /*
157                  * If the device inserted padding it means that (it thought)
158                  * the 802.11 header wasn't a multiple of 4 bytes long. In
159                  * this case, reserve two bytes at the start of the SKB to
160                  * align the payload properly in case we end up copying it.
161                  */
162                 skb_reserve(skb, pad_len);
163         }
164         len -= pad_len;
165
166         /* If frame is small enough to fit in skb->head, pull it completely.
167          * If not, only pull ieee80211_hdr (including crypto if present, and
168          * an additional 8 bytes for SNAP/ethertype, see below) so that
169          * splice() or TCP coalesce are more efficient.
170          *
171          * Since, in addition, ieee80211_data_to_8023() always pull in at
172          * least 8 bytes (possibly more for mesh) we can do the same here
173          * to save the cost of doing it later. That still doesn't pull in
174          * the actual IP header since the typical case has a SNAP header.
175          * If the latter changes (there are efforts in the standards group
176          * to do so) we should revisit this and ieee80211_data_to_8023().
177          */
178         headlen = (len <= skb_tailroom(skb)) ? len :
179                                                hdrlen + crypt_len + 8;
180
181         /* The firmware may align the packet to DWORD.
182          * The padding is inserted after the IV.
183          * After copying the header + IV skip the padding if
184          * present before copying packet data.
185          */
186         hdrlen += crypt_len;
187         skb_put_data(skb, hdr, hdrlen);
188         skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
189
190         fraglen = len - headlen;
191
192         if (fraglen) {
193                 int offset = (void *)hdr + headlen + pad_len -
194                              rxb_addr(rxb) + rxb_offset(rxb);
195
196                 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
197                                 fraglen, rxb->truesize);
198         }
199 }
200
201 /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
202 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
203                                             struct napi_struct *napi,
204                                             struct sk_buff *skb, int queue,
205                                             struct ieee80211_sta *sta)
206 {
207         if (iwl_mvm_check_pn(mvm, skb, queue, sta))
208                 kfree_skb(skb);
209         else
210                 ieee80211_rx_napi(mvm->hw, sta, skb, napi);
211 }
212
213 static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
214                                         struct iwl_rx_mpdu_desc *desc,
215                                         struct ieee80211_rx_status *rx_status)
216 {
217         int energy_a, energy_b, max_energy;
218         u32 rate_flags = le32_to_cpu(desc->rate_n_flags);
219
220         energy_a = desc->energy_a;
221         energy_a = energy_a ? -energy_a : S8_MIN;
222         energy_b = desc->energy_b;
223         energy_b = energy_b ? -energy_b : S8_MIN;
224         max_energy = max(energy_a, energy_b);
225
226         IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
227                         energy_a, energy_b, max_energy);
228
229         rx_status->signal = max_energy;
230         rx_status->chains =
231                 (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
232         rx_status->chain_signal[0] = energy_a;
233         rx_status->chain_signal[1] = energy_b;
234         rx_status->chain_signal[2] = S8_MIN;
235 }
236
237 static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
238                              struct ieee80211_rx_status *stats,
239                              struct iwl_rx_mpdu_desc *desc, u32 pkt_flags,
240                              int queue, u8 *crypt_len)
241 {
242         u16 status = le16_to_cpu(desc->status);
243
244         if (!ieee80211_has_protected(hdr->frame_control) ||
245             (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
246             IWL_RX_MPDU_STATUS_SEC_NONE)
247                 return 0;
248
249         /* TODO: handle packets encrypted with unknown alg */
250
251         switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
252         case IWL_RX_MPDU_STATUS_SEC_CCM:
253         case IWL_RX_MPDU_STATUS_SEC_GCM:
254                 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
255                 /* alg is CCM: check MIC only */
256                 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
257                         return -1;
258
259                 stats->flag |= RX_FLAG_DECRYPTED;
260                 if (pkt_flags & FH_RSCSR_RADA_EN)
261                         stats->flag |= RX_FLAG_MIC_STRIPPED;
262                 *crypt_len = IEEE80211_CCMP_HDR_LEN;
263                 return 0;
264         case IWL_RX_MPDU_STATUS_SEC_TKIP:
265                 /* Don't drop the frame and decrypt it in SW */
266                 if (!fw_has_api(&mvm->fw->ucode_capa,
267                                 IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
268                     !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
269                         return 0;
270
271                 *crypt_len = IEEE80211_TKIP_IV_LEN;
272                 /* fall through if TTAK OK */
273         case IWL_RX_MPDU_STATUS_SEC_WEP:
274                 if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
275                         return -1;
276
277                 stats->flag |= RX_FLAG_DECRYPTED;
278                 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
279                                 IWL_RX_MPDU_STATUS_SEC_WEP)
280                         *crypt_len = IEEE80211_WEP_IV_LEN;
281
282                 if (pkt_flags & FH_RSCSR_RADA_EN)
283                         stats->flag |= RX_FLAG_ICV_STRIPPED;
284
285                 return 0;
286         case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
287                 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
288                         return -1;
289                 stats->flag |= RX_FLAG_DECRYPTED;
290                 return 0;
291         default:
292                 /* Expected in monitor (not having the keys) */
293                 if (!mvm->monitor_on)
294                         IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
295         }
296
297         return 0;
298 }
299
300 static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
301                             struct sk_buff *skb,
302                             struct iwl_rx_mpdu_desc *desc)
303 {
304         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
305         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
306         u16 flags = le16_to_cpu(desc->l3l4_flags);
307         u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
308                           IWL_RX_L3_PROTO_POS);
309
310         if (mvmvif->features & NETIF_F_RXCSUM &&
311             flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
312             (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
313              l3_prot == IWL_RX_L3_TYPE_IPV6 ||
314              l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
315                 skb->ip_summed = CHECKSUM_UNNECESSARY;
316 }
317
318 /*
319  * returns true if a packet is a duplicate and should be dropped.
320  * Updates AMSDU PN tracking info
321  */
322 static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
323                            struct ieee80211_rx_status *rx_status,
324                            struct ieee80211_hdr *hdr,
325                            struct iwl_rx_mpdu_desc *desc)
326 {
327         struct iwl_mvm_sta *mvm_sta;
328         struct iwl_mvm_rxq_dup_data *dup_data;
329         u8 tid, sub_frame_idx;
330
331         if (WARN_ON(IS_ERR_OR_NULL(sta)))
332                 return false;
333
334         mvm_sta = iwl_mvm_sta_from_mac80211(sta);
335         dup_data = &mvm_sta->dup_data[queue];
336
337         /*
338          * Drop duplicate 802.11 retransmissions
339          * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
340          */
341         if (ieee80211_is_ctl(hdr->frame_control) ||
342             ieee80211_is_qos_nullfunc(hdr->frame_control) ||
343             is_multicast_ether_addr(hdr->addr1)) {
344                 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
345                 return false;
346         }
347
348         if (ieee80211_is_data_qos(hdr->frame_control))
349                 /* frame has qos control */
350                 tid = *ieee80211_get_qos_ctl(hdr) &
351                         IEEE80211_QOS_CTL_TID_MASK;
352         else
353                 tid = IWL_MAX_TID_COUNT;
354
355         /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
356         sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
357
358         if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
359                      dup_data->last_seq[tid] == hdr->seq_ctrl &&
360                      dup_data->last_sub_frame[tid] >= sub_frame_idx))
361                 return true;
362
363         /* Allow same PN as the first subframe for following sub frames */
364         if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
365             sub_frame_idx > dup_data->last_sub_frame[tid] &&
366             desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
367                 rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
368
369         dup_data->last_seq[tid] = hdr->seq_ctrl;
370         dup_data->last_sub_frame[tid] = sub_frame_idx;
371
372         rx_status->flag |= RX_FLAG_DUP_VALIDATED;
373
374         return false;
375 }
376
377 int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
378                             const u8 *data, u32 count)
379 {
380         struct iwl_rxq_sync_cmd *cmd;
381         u32 data_size = sizeof(*cmd) + count;
382         int ret;
383
384         /* should be DWORD aligned */
385         if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
386                 return -EINVAL;
387
388         cmd = kzalloc(data_size, GFP_KERNEL);
389         if (!cmd)
390                 return -ENOMEM;
391
392         cmd->rxq_mask = cpu_to_le32(rxq_mask);
393         cmd->count =  cpu_to_le32(count);
394         cmd->flags = 0;
395         memcpy(cmd->payload, data, count);
396
397         ret = iwl_mvm_send_cmd_pdu(mvm,
398                                    WIDE_ID(DATA_PATH_GROUP,
399                                            TRIGGER_RX_QUEUES_NOTIF_CMD),
400                                    0, data_size, cmd);
401
402         kfree(cmd);
403         return ret;
404 }
405
406 /*
407  * Returns true if sn2 - buffer_size < sn1 < sn2.
408  * To be used only in order to compare reorder buffer head with NSSN.
409  * We fully trust NSSN unless it is behind us due to reorder timeout.
410  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
411  */
412 static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
413 {
414         return ieee80211_sn_less(sn1, sn2) &&
415                !ieee80211_sn_less(sn1, sn2 - buffer_size);
416 }
417
418 #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
419
420 static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
421                                    struct ieee80211_sta *sta,
422                                    struct napi_struct *napi,
423                                    struct iwl_mvm_baid_data *baid_data,
424                                    struct iwl_mvm_reorder_buffer *reorder_buf,
425                                    u16 nssn)
426 {
427         struct iwl_mvm_reorder_buf_entry *entries =
428                 &baid_data->entries[reorder_buf->queue *
429                                     baid_data->entries_per_queue];
430         u16 ssn = reorder_buf->head_sn;
431
432         lockdep_assert_held(&reorder_buf->lock);
433
434         /* ignore nssn smaller than head sn - this can happen due to timeout */
435         if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
436                 goto set_timer;
437
438         while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
439                 int index = ssn % reorder_buf->buf_size;
440                 struct sk_buff_head *skb_list = &entries[index].e.frames;
441                 struct sk_buff *skb;
442
443                 ssn = ieee80211_sn_inc(ssn);
444
445                 /*
446                  * Empty the list. Will have more than one frame for A-MSDU.
447                  * Empty list is valid as well since nssn indicates frames were
448                  * received.
449                  */
450                 while ((skb = __skb_dequeue(skb_list))) {
451                         iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
452                                                         reorder_buf->queue,
453                                                         sta);
454                         reorder_buf->num_stored--;
455                 }
456         }
457         reorder_buf->head_sn = nssn;
458
459 set_timer:
460         if (reorder_buf->num_stored && !reorder_buf->removed) {
461                 u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
462
463                 while (skb_queue_empty(&entries[index].e.frames))
464                         index = (index + 1) % reorder_buf->buf_size;
465                 /* modify timer to match next frame's expiration time */
466                 mod_timer(&reorder_buf->reorder_timer,
467                           entries[index].e.reorder_time + 1 +
468                           RX_REORDER_BUF_TIMEOUT_MQ);
469         } else {
470                 del_timer(&reorder_buf->reorder_timer);
471         }
472 }
473
474 void iwl_mvm_reorder_timer_expired(struct timer_list *t)
475 {
476         struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer);
477         struct iwl_mvm_baid_data *baid_data =
478                 iwl_mvm_baid_data_from_reorder_buf(buf);
479         struct iwl_mvm_reorder_buf_entry *entries =
480                 &baid_data->entries[buf->queue * baid_data->entries_per_queue];
481         int i;
482         u16 sn = 0, index = 0;
483         bool expired = false;
484         bool cont = false;
485
486         spin_lock(&buf->lock);
487
488         if (!buf->num_stored || buf->removed) {
489                 spin_unlock(&buf->lock);
490                 return;
491         }
492
493         for (i = 0; i < buf->buf_size ; i++) {
494                 index = (buf->head_sn + i) % buf->buf_size;
495
496                 if (skb_queue_empty(&entries[index].e.frames)) {
497                         /*
498                          * If there is a hole and the next frame didn't expire
499                          * we want to break and not advance SN
500                          */
501                         cont = false;
502                         continue;
503                 }
504                 if (!cont &&
505                     !time_after(jiffies, entries[index].e.reorder_time +
506                                          RX_REORDER_BUF_TIMEOUT_MQ))
507                         break;
508
509                 expired = true;
510                 /* continue until next hole after this expired frames */
511                 cont = true;
512                 sn = ieee80211_sn_add(buf->head_sn, i + 1);
513         }
514
515         if (expired) {
516                 struct ieee80211_sta *sta;
517                 struct iwl_mvm_sta *mvmsta;
518                 u8 sta_id = baid_data->sta_id;
519
520                 rcu_read_lock();
521                 sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
522                 mvmsta = iwl_mvm_sta_from_mac80211(sta);
523
524                 /* SN is set to the last expired frame + 1 */
525                 IWL_DEBUG_HT(buf->mvm,
526                              "Releasing expired frames for sta %u, sn %d\n",
527                              sta_id, sn);
528                 iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
529                                                      sta, baid_data->tid);
530                 iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn);
531                 rcu_read_unlock();
532         } else {
533                 /*
534                  * If no frame expired and there are stored frames, index is now
535                  * pointing to the first unexpired frame - modify timer
536                  * accordingly to this frame.
537                  */
538                 mod_timer(&buf->reorder_timer,
539                           entries[index].e.reorder_time +
540                           1 + RX_REORDER_BUF_TIMEOUT_MQ);
541         }
542         spin_unlock(&buf->lock);
543 }
544
545 static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
546                            struct iwl_mvm_delba_data *data)
547 {
548         struct iwl_mvm_baid_data *ba_data;
549         struct ieee80211_sta *sta;
550         struct iwl_mvm_reorder_buffer *reorder_buf;
551         u8 baid = data->baid;
552
553         if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
554                 return;
555
556         rcu_read_lock();
557
558         ba_data = rcu_dereference(mvm->baid_map[baid]);
559         if (WARN_ON_ONCE(!ba_data))
560                 goto out;
561
562         sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
563         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
564                 goto out;
565
566         reorder_buf = &ba_data->reorder_buf[queue];
567
568         /* release all frames that are in the reorder buffer to the stack */
569         spin_lock_bh(&reorder_buf->lock);
570         iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
571                                ieee80211_sn_add(reorder_buf->head_sn,
572                                                 reorder_buf->buf_size));
573         spin_unlock_bh(&reorder_buf->lock);
574         del_timer_sync(&reorder_buf->reorder_timer);
575
576 out:
577         rcu_read_unlock();
578 }
579
580 void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
581                             int queue)
582 {
583         struct iwl_rx_packet *pkt = rxb_addr(rxb);
584         struct iwl_rxq_sync_notification *notif;
585         struct iwl_mvm_internal_rxq_notif *internal_notif;
586
587         notif = (void *)pkt->data;
588         internal_notif = (void *)notif->payload;
589
590         if (internal_notif->sync) {
591                 if (mvm->queue_sync_cookie != internal_notif->cookie) {
592                         WARN_ONCE(1,
593                                   "Received expired RX queue sync message\n");
594                         return;
595                 }
596                 if (!atomic_dec_return(&mvm->queue_sync_counter))
597                         wake_up(&mvm->rx_sync_waitq);
598         }
599
600         switch (internal_notif->type) {
601         case IWL_MVM_RXQ_EMPTY:
602                 break;
603         case IWL_MVM_RXQ_NOTIF_DEL_BA:
604                 iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
605                 break;
606         default:
607                 WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
608         }
609 }
610
611 /*
612  * Returns true if the MPDU was buffered\dropped, false if it should be passed
613  * to upper layer.
614  */
615 static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
616                             struct napi_struct *napi,
617                             int queue,
618                             struct ieee80211_sta *sta,
619                             struct sk_buff *skb,
620                             struct iwl_rx_mpdu_desc *desc)
621 {
622         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
623         struct iwl_mvm_sta *mvm_sta;
624         struct iwl_mvm_baid_data *baid_data;
625         struct iwl_mvm_reorder_buffer *buffer;
626         struct sk_buff *tail;
627         u32 reorder = le32_to_cpu(desc->reorder_data);
628         bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
629         bool last_subframe =
630                 desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
631         u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
632         u8 sub_frame_idx = desc->amsdu_info &
633                            IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
634         struct iwl_mvm_reorder_buf_entry *entries;
635         int index;
636         u16 nssn, sn;
637         u8 baid;
638
639         baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
640                 IWL_RX_MPDU_REORDER_BAID_SHIFT;
641
642         /*
643          * This also covers the case of receiving a Block Ack Request
644          * outside a BA session; we'll pass it to mac80211 and that
645          * then sends a delBA action frame.
646          */
647         if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
648                 return false;
649
650         /* no sta yet */
651         if (WARN_ONCE(IS_ERR_OR_NULL(sta),
652                       "Got valid BAID without a valid station assigned\n"))
653                 return false;
654
655         mvm_sta = iwl_mvm_sta_from_mac80211(sta);
656
657         /* not a data packet or a bar */
658         if (!ieee80211_is_back_req(hdr->frame_control) &&
659             (!ieee80211_is_data_qos(hdr->frame_control) ||
660              is_multicast_ether_addr(hdr->addr1)))
661                 return false;
662
663         if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
664                 return false;
665
666         baid_data = rcu_dereference(mvm->baid_map[baid]);
667         if (!baid_data) {
668                 IWL_DEBUG_RX(mvm,
669                              "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
670                               baid, reorder);
671                 return false;
672         }
673
674         if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
675                  "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
676                  baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
677                  tid))
678                 return false;
679
680         nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
681         sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
682                 IWL_RX_MPDU_REORDER_SN_SHIFT;
683
684         buffer = &baid_data->reorder_buf[queue];
685         entries = &baid_data->entries[queue * baid_data->entries_per_queue];
686
687         spin_lock_bh(&buffer->lock);
688
689         if (!buffer->valid) {
690                 if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
691                         spin_unlock_bh(&buffer->lock);
692                         return false;
693                 }
694                 buffer->valid = true;
695         }
696
697         if (ieee80211_is_back_req(hdr->frame_control)) {
698                 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
699                 goto drop;
700         }
701
702         /*
703          * If there was a significant jump in the nssn - adjust.
704          * If the SN is smaller than the NSSN it might need to first go into
705          * the reorder buffer, in which case we just release up to it and the
706          * rest of the function will take care of storing it and releasing up to
707          * the nssn
708          */
709         if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
710                                 buffer->buf_size) ||
711             !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
712                 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
713
714                 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
715                                        min_sn);
716         }
717
718         /* drop any oudated packets */
719         if (ieee80211_sn_less(sn, buffer->head_sn))
720                 goto drop;
721
722         /* release immediately if allowed by nssn and no stored frames */
723         if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
724                 if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
725                                        buffer->buf_size) &&
726                    (!amsdu || last_subframe))
727                         buffer->head_sn = nssn;
728                 /* No need to update AMSDU last SN - we are moving the head */
729                 spin_unlock_bh(&buffer->lock);
730                 return false;
731         }
732
733         /*
734          * release immediately if there are no stored frames, and the sn is
735          * equal to the head.
736          * This can happen due to reorder timer, where NSSN is behind head_sn.
737          * When we released everything, and we got the next frame in the
738          * sequence, according to the NSSN we can't release immediately,
739          * while technically there is no hole and we can move forward.
740          */
741         if (!buffer->num_stored && sn == buffer->head_sn) {
742                 if (!amsdu || last_subframe)
743                         buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
744                 /* No need to update AMSDU last SN - we are moving the head */
745                 spin_unlock_bh(&buffer->lock);
746                 return false;
747         }
748
749         index = sn % buffer->buf_size;
750
751         /*
752          * Check if we already stored this frame
753          * As AMSDU is either received or not as whole, logic is simple:
754          * If we have frames in that position in the buffer and the last frame
755          * originated from AMSDU had a different SN then it is a retransmission.
756          * If it is the same SN then if the subframe index is incrementing it
757          * is the same AMSDU - otherwise it is a retransmission.
758          */
759         tail = skb_peek_tail(&entries[index].e.frames);
760         if (tail && !amsdu)
761                 goto drop;
762         else if (tail && (sn != buffer->last_amsdu ||
763                           buffer->last_sub_index >= sub_frame_idx))
764                 goto drop;
765
766         /* put in reorder buffer */
767         __skb_queue_tail(&entries[index].e.frames, skb);
768         buffer->num_stored++;
769         entries[index].e.reorder_time = jiffies;
770
771         if (amsdu) {
772                 buffer->last_amsdu = sn;
773                 buffer->last_sub_index = sub_frame_idx;
774         }
775
776         /*
777          * We cannot trust NSSN for AMSDU sub-frames that are not the last.
778          * The reason is that NSSN advances on the first sub-frame, and may
779          * cause the reorder buffer to advance before all the sub-frames arrive.
780          * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
781          * SN 1. NSSN for first sub frame will be 3 with the result of driver
782          * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
783          * already ahead and it will be dropped.
784          * If the last sub-frame is not on this queue - we will get frame
785          * release notification with up to date NSSN.
786          */
787         if (!amsdu || last_subframe)
788                 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
789
790         spin_unlock_bh(&buffer->lock);
791         return true;
792
793 drop:
794         kfree_skb(skb);
795         spin_unlock_bh(&buffer->lock);
796         return true;
797 }
798
799 static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
800                                     u32 reorder_data, u8 baid)
801 {
802         unsigned long now = jiffies;
803         unsigned long timeout;
804         struct iwl_mvm_baid_data *data;
805
806         rcu_read_lock();
807
808         data = rcu_dereference(mvm->baid_map[baid]);
809         if (!data) {
810                 IWL_DEBUG_RX(mvm,
811                              "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
812                               baid, reorder_data);
813                 goto out;
814         }
815
816         if (!data->timeout)
817                 goto out;
818
819         timeout = data->timeout;
820         /*
821          * Do not update last rx all the time to avoid cache bouncing
822          * between the rx queues.
823          * Update it every timeout. Worst case is the session will
824          * expire after ~ 2 * timeout, which doesn't matter that much.
825          */
826         if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
827                 /* Update is atomic */
828                 data->last_rx = now;
829
830 out:
831         rcu_read_unlock();
832 }
833
834 void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
835                         struct iwl_rx_cmd_buffer *rxb, int queue)
836 {
837         struct ieee80211_rx_status *rx_status;
838         struct iwl_rx_packet *pkt = rxb_addr(rxb);
839         struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
840         struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
841         u32 len = le16_to_cpu(desc->mpdu_len);
842         u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
843         u16 phy_info = le16_to_cpu(desc->phy_info);
844         struct ieee80211_sta *sta = NULL;
845         struct sk_buff *skb;
846         u8 crypt_len = 0;
847
848         if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
849                 return;
850
851         /* Dont use dev_alloc_skb(), we'll have enough headroom once
852          * ieee80211_hdr pulled.
853          */
854         skb = alloc_skb(128, GFP_ATOMIC);
855         if (!skb) {
856                 IWL_ERR(mvm, "alloc_skb failed\n");
857                 return;
858         }
859
860         rx_status = IEEE80211_SKB_RXCB(skb);
861
862         if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc,
863                               le32_to_cpu(pkt->len_n_flags), queue,
864                               &crypt_len)) {
865                 kfree_skb(skb);
866                 return;
867         }
868
869         /*
870          * Keep packets with CRC errors (and with overrun) for monitor mode
871          * (otherwise the firmware discards them) but mark them as bad.
872          */
873         if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
874             !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
875                 IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
876                              le16_to_cpu(desc->status));
877                 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
878         }
879         /* set the preamble flag if appropriate */
880         if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
881                 rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
882
883         if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
884                 rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
885                 /* TSF as indicated by the firmware is at INA time */
886                 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
887         }
888         rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
889         rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
890                                                NL80211_BAND_2GHZ;
891         rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
892                                                          rx_status->band);
893         iwl_mvm_get_signal_strength(mvm, desc, rx_status);
894
895         /* update aggregation data for monitor sake on default queue */
896         if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
897                 bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
898
899                 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
900                 rx_status->ampdu_reference = mvm->ampdu_ref;
901                 /* toggle is switched whenever new aggregation starts */
902                 if (toggle_bit != mvm->ampdu_toggle) {
903                         mvm->ampdu_ref++;
904                         mvm->ampdu_toggle = toggle_bit;
905                 }
906         }
907
908         rcu_read_lock();
909
910         if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
911                 u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
912
913                 if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
914                         sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
915                         if (IS_ERR(sta))
916                                 sta = NULL;
917                 }
918         } else if (!is_multicast_ether_addr(hdr->addr2)) {
919                 /*
920                  * This is fine since we prevent two stations with the same
921                  * address from being added.
922                  */
923                 sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
924         }
925
926         if (sta) {
927                 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
928                 struct ieee80211_vif *tx_blocked_vif =
929                         rcu_dereference(mvm->csa_tx_blocked_vif);
930                 u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
931                                IWL_RX_MPDU_REORDER_BAID_MASK) >>
932                                IWL_RX_MPDU_REORDER_BAID_SHIFT);
933
934                 /*
935                  * We have tx blocked stations (with CS bit). If we heard
936                  * frames from a blocked station on a new channel we can
937                  * TX to it again.
938                  */
939                 if (unlikely(tx_blocked_vif) &&
940                     tx_blocked_vif == mvmsta->vif) {
941                         struct iwl_mvm_vif *mvmvif =
942                                 iwl_mvm_vif_from_mac80211(tx_blocked_vif);
943
944                         if (mvmvif->csa_target_freq == rx_status->freq)
945                                 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
946                                                                  false);
947                 }
948
949                 rs_update_last_rssi(mvm, mvmsta, rx_status);
950
951                 if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
952                     ieee80211_is_beacon(hdr->frame_control)) {
953                         struct iwl_fw_dbg_trigger_tlv *trig;
954                         struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
955                         bool trig_check;
956                         s32 rssi;
957
958                         trig = iwl_fw_dbg_get_trigger(mvm->fw,
959                                                       FW_DBG_TRIGGER_RSSI);
960                         rssi_trig = (void *)trig->data;
961                         rssi = le32_to_cpu(rssi_trig->rssi);
962
963                         trig_check =
964                                 iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
965                                                               ieee80211_vif_to_wdev(mvmsta->vif),
966                                                               trig);
967                         if (trig_check && rx_status->signal < rssi)
968                                 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
969                                                         NULL);
970                 }
971
972                 if (ieee80211_is_data(hdr->frame_control))
973                         iwl_mvm_rx_csum(sta, skb, desc);
974
975                 if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
976                         kfree_skb(skb);
977                         goto out;
978                 }
979
980                 /*
981                  * Our hardware de-aggregates AMSDUs but copies the mac header
982                  * as it to the de-aggregated MPDUs. We need to turn off the
983                  * AMSDU bit in the QoS control ourselves.
984                  * In addition, HW reverses addr3 and addr4 - reverse it back.
985                  */
986                 if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
987                     !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
988                         int i;
989                         u8 *qc = ieee80211_get_qos_ctl(hdr);
990                         u8 mac_addr[ETH_ALEN];
991
992                         *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
993
994                         for (i = 0; i < ETH_ALEN; i++)
995                                 mac_addr[i] = hdr->addr3[ETH_ALEN - i - 1];
996                         ether_addr_copy(hdr->addr3, mac_addr);
997
998                         if (ieee80211_has_a4(hdr->frame_control)) {
999                                 for (i = 0; i < ETH_ALEN; i++)
1000                                         mac_addr[i] =
1001                                                 hdr->addr4[ETH_ALEN - i - 1];
1002                                 ether_addr_copy(hdr->addr4, mac_addr);
1003                         }
1004                 }
1005                 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
1006                         u32 reorder_data = le32_to_cpu(desc->reorder_data);
1007
1008                         iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
1009                 }
1010         }
1011
1012         /* Set up the HT phy flags */
1013         switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1014         case RATE_MCS_CHAN_WIDTH_20:
1015                 break;
1016         case RATE_MCS_CHAN_WIDTH_40:
1017                 rx_status->bw = RATE_INFO_BW_40;
1018                 break;
1019         case RATE_MCS_CHAN_WIDTH_80:
1020                 rx_status->bw = RATE_INFO_BW_80;
1021                 break;
1022         case RATE_MCS_CHAN_WIDTH_160:
1023                 rx_status->bw = RATE_INFO_BW_160;
1024                 break;
1025         }
1026
1027         if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
1028             rate_n_flags & RATE_MCS_SGI_MSK)
1029                 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1030         if (rate_n_flags & RATE_HT_MCS_GF_MSK)
1031                 rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
1032         if (rate_n_flags & RATE_MCS_LDPC_MSK)
1033                 rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
1034         if (rate_n_flags & RATE_MCS_HT_MSK) {
1035                 u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
1036                                 RATE_MCS_STBC_POS;
1037                 rx_status->encoding = RX_ENC_HT;
1038                 rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
1039                 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1040         } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
1041                 u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
1042                                 RATE_MCS_STBC_POS;
1043                 rx_status->nss =
1044                         ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1045                                                 RATE_VHT_MCS_NSS_POS) + 1;
1046                 rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
1047                 rx_status->encoding = RX_ENC_VHT;
1048                 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1049                 if (rate_n_flags & RATE_MCS_BF_MSK)
1050                         rx_status->enc_flags |= RX_ENC_FLAG_BF;
1051         } else {
1052                 int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1053                                                                rx_status->band);
1054
1055                 if (WARN(rate < 0 || rate > 0xFF,
1056                          "Invalid rate flags 0x%x, band %d,\n",
1057                          rate_n_flags, rx_status->band)) {
1058                         kfree_skb(skb);
1059                         goto out;
1060                 }
1061                 rx_status->rate_idx = rate;
1062
1063         }
1064
1065         /* management stuff on default queue */
1066         if (!queue) {
1067                 if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
1068                               ieee80211_is_probe_resp(hdr->frame_control)) &&
1069                              mvm->sched_scan_pass_all ==
1070                              SCHED_SCAN_PASS_ALL_ENABLED))
1071                         mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
1072
1073                 if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
1074                              ieee80211_is_probe_resp(hdr->frame_control)))
1075                         rx_status->boottime_ns = ktime_get_boot_ns();
1076         }
1077
1078         iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
1079         if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
1080                 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
1081 out:
1082         rcu_read_unlock();
1083 }
1084
1085 void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
1086                               struct iwl_rx_cmd_buffer *rxb, int queue)
1087 {
1088         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1089         struct iwl_frame_release *release = (void *)pkt->data;
1090         struct ieee80211_sta *sta;
1091         struct iwl_mvm_reorder_buffer *reorder_buf;
1092         struct iwl_mvm_baid_data *ba_data;
1093
1094         int baid = release->baid;
1095
1096         IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
1097                      release->baid, le16_to_cpu(release->nssn));
1098
1099         if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
1100                 return;
1101
1102         rcu_read_lock();
1103
1104         ba_data = rcu_dereference(mvm->baid_map[baid]);
1105         if (WARN_ON_ONCE(!ba_data))
1106                 goto out;
1107
1108         sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
1109         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1110                 goto out;
1111
1112         reorder_buf = &ba_data->reorder_buf[queue];
1113
1114         spin_lock_bh(&reorder_buf->lock);
1115         iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
1116                                le16_to_cpu(release->nssn));
1117         spin_unlock_bh(&reorder_buf->lock);
1118
1119 out:
1120         rcu_read_unlock();
1121 }