2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
25 void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
27 htt->num_pending_tx--;
28 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
29 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
32 static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
34 spin_lock_bh(&htt->tx_lock);
35 __ath10k_htt_tx_dec_pending(htt);
36 spin_unlock_bh(&htt->tx_lock);
39 static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
43 spin_lock_bh(&htt->tx_lock);
45 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
50 htt->num_pending_tx++;
51 if (htt->num_pending_tx == htt->max_num_pending_tx)
52 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
55 spin_unlock_bh(&htt->tx_lock);
59 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
61 struct ath10k *ar = htt->ar;
64 lockdep_assert_held(&htt->tx_lock);
66 ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
68 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
73 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
75 struct ath10k *ar = htt->ar;
77 lockdep_assert_held(&htt->tx_lock);
79 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
81 idr_remove(&htt->pending_tx, msdu_id);
84 int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
86 struct ath10k *ar = htt->ar;
89 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
90 htt->max_num_pending_tx);
92 spin_lock_init(&htt->tx_lock);
93 idr_init(&htt->pending_tx);
95 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
96 sizeof(struct ath10k_htt_txbuf), 4, 0);
99 goto free_idr_pending_tx;
102 if (!ar->hw_params.continuous_frag_desc)
103 goto skip_frag_desc_alloc;
105 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
106 htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
107 &htt->frag_desc.paddr,
109 if (!htt->frag_desc.vaddr) {
110 ath10k_warn(ar, "failed to alloc fragment desc memory\n");
115 skip_frag_desc_alloc:
119 dma_pool_destroy(htt->tx_pool);
121 idr_destroy(&htt->pending_tx);
125 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
127 struct ath10k *ar = ctx;
128 struct ath10k_htt *htt = &ar->htt;
129 struct htt_tx_done tx_done = {0};
131 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
134 tx_done.msdu_id = msdu_id;
136 spin_lock_bh(&htt->tx_lock);
137 ath10k_txrx_tx_unref(htt, &tx_done);
138 spin_unlock_bh(&htt->tx_lock);
143 void ath10k_htt_tx_free(struct ath10k_htt *htt)
147 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
148 idr_destroy(&htt->pending_tx);
149 dma_pool_destroy(htt->tx_pool);
151 if (htt->frag_desc.vaddr) {
152 size = htt->max_num_pending_tx *
153 sizeof(struct htt_msdu_ext_desc);
154 dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
155 htt->frag_desc.paddr);
159 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
161 dev_kfree_skb_any(skb);
164 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
166 struct ath10k *ar = htt->ar;
172 len += sizeof(cmd->hdr);
173 len += sizeof(cmd->ver_req);
175 skb = ath10k_htc_alloc_skb(ar, len);
180 cmd = (struct htt_cmd *)skb->data;
181 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
183 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
185 dev_kfree_skb_any(skb);
192 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
194 struct ath10k *ar = htt->ar;
195 struct htt_stats_req *req;
200 len += sizeof(cmd->hdr);
201 len += sizeof(cmd->stats_req);
203 skb = ath10k_htc_alloc_skb(ar, len);
208 cmd = (struct htt_cmd *)skb->data;
209 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
211 req = &cmd->stats_req;
213 memset(req, 0, sizeof(*req));
215 /* currently we support only max 8 bit masks so no need to worry
216 * about endian support */
217 req->upload_types[0] = mask;
218 req->reset_types[0] = mask;
219 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
220 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
221 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
223 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
225 ath10k_warn(ar, "failed to send htt type stats request: %d",
227 dev_kfree_skb_any(skb);
234 int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
236 struct ath10k *ar = htt->ar;
241 if (!ar->hw_params.continuous_frag_desc)
244 if (!htt->frag_desc.paddr) {
245 ath10k_warn(ar, "invalid frag desc memory\n");
249 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
250 skb = ath10k_htc_alloc_skb(ar, size);
255 cmd = (struct htt_cmd *)skb->data;
256 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
257 cmd->frag_desc_bank_cfg.info = 0;
258 cmd->frag_desc_bank_cfg.num_banks = 1;
259 cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
260 cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
261 __cpu_to_le32(htt->frag_desc.paddr);
262 cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
263 __cpu_to_le16(htt->max_num_pending_tx - 1);
265 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
267 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
269 dev_kfree_skb_any(skb);
276 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
278 struct ath10k *ar = htt->ar;
281 struct htt_rx_ring_setup_ring *ring;
282 const int num_rx_ring = 1;
289 * the HW expects the buffer to be an integral number of 4-byte
292 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
293 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
295 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
296 + (sizeof(*ring) * num_rx_ring);
297 skb = ath10k_htc_alloc_skb(ar, len);
303 cmd = (struct htt_cmd *)skb->data;
304 ring = &cmd->rx_setup.rings[0];
306 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
307 cmd->rx_setup.hdr.num_rings = 1;
309 /* FIXME: do we need all of this? */
311 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
312 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
313 flags |= HTT_RX_RING_FLAGS_PPDU_START;
314 flags |= HTT_RX_RING_FLAGS_PPDU_END;
315 flags |= HTT_RX_RING_FLAGS_MPDU_START;
316 flags |= HTT_RX_RING_FLAGS_MPDU_END;
317 flags |= HTT_RX_RING_FLAGS_MSDU_START;
318 flags |= HTT_RX_RING_FLAGS_MSDU_END;
319 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
320 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
321 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
322 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
323 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
324 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
325 flags |= HTT_RX_RING_FLAGS_NULL_RX;
326 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
328 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
330 ring->fw_idx_shadow_reg_paddr =
331 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
332 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
333 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
334 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
335 ring->flags = __cpu_to_le16(flags);
336 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
338 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
340 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
341 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
342 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
343 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
344 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
345 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
346 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
347 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
348 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
349 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
353 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
355 dev_kfree_skb_any(skb);
362 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
363 u8 max_subfrms_ampdu,
364 u8 max_subfrms_amsdu)
366 struct ath10k *ar = htt->ar;
367 struct htt_aggr_conf *aggr_conf;
373 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
375 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
378 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
381 len = sizeof(cmd->hdr);
382 len += sizeof(cmd->aggr_conf);
384 skb = ath10k_htc_alloc_skb(ar, len);
389 cmd = (struct htt_cmd *)skb->data;
390 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
392 aggr_conf = &cmd->aggr_conf;
393 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
394 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
396 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
397 aggr_conf->max_num_amsdu_subframes,
398 aggr_conf->max_num_ampdu_subframes);
400 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
402 dev_kfree_skb_any(skb);
409 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
411 struct ath10k *ar = htt->ar;
412 struct device *dev = ar->dev;
413 struct sk_buff *txdesc = NULL;
415 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
416 u8 vdev_id = skb_cb->vdev_id;
421 res = ath10k_htt_tx_inc_pending(htt);
425 len += sizeof(cmd->hdr);
426 len += sizeof(cmd->mgmt_tx);
428 spin_lock_bh(&htt->tx_lock);
429 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
431 spin_unlock_bh(&htt->tx_lock);
435 spin_unlock_bh(&htt->tx_lock);
437 txdesc = ath10k_htc_alloc_skb(ar, len);
440 goto err_free_msdu_id;
443 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
445 res = dma_mapping_error(dev, skb_cb->paddr);
447 goto err_free_txdesc;
449 skb_put(txdesc, len);
450 cmd = (struct htt_cmd *)txdesc->data;
451 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
452 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
453 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
454 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
455 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
456 memcpy(cmd->mgmt_tx.hdr, msdu->data,
457 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
459 skb_cb->htt.txbuf = NULL;
461 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
468 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
470 dev_kfree_skb_any(txdesc);
472 spin_lock_bh(&htt->tx_lock);
473 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
474 spin_unlock_bh(&htt->tx_lock);
476 ath10k_htt_tx_dec_pending(htt);
481 int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
483 struct ath10k *ar = htt->ar;
484 struct device *dev = ar->dev;
485 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
486 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
487 struct ath10k_hif_sg_item sg_items[2];
488 struct htt_data_tx_desc_frag *frags;
489 u8 vdev_id = skb_cb->vdev_id;
490 u8 tid = skb_cb->htt.tid;
494 u16 msdu_id, flags1 = 0;
495 dma_addr_t paddr = 0;
498 res = ath10k_htt_tx_inc_pending(htt);
502 spin_lock_bh(&htt->tx_lock);
503 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
505 spin_unlock_bh(&htt->tx_lock);
509 spin_unlock_bh(&htt->tx_lock);
511 prefetch_len = min(htt->prefetch_len, msdu->len);
512 prefetch_len = roundup(prefetch_len, 4);
514 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
516 if (!skb_cb->htt.txbuf) {
518 goto err_free_msdu_id;
520 skb_cb->htt.txbuf_paddr = paddr;
522 if ((ieee80211_is_action(hdr->frame_control) ||
523 ieee80211_is_deauth(hdr->frame_control) ||
524 ieee80211_is_disassoc(hdr->frame_control)) &&
525 ieee80211_has_protected(hdr->frame_control))
526 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
528 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
530 res = dma_mapping_error(dev, skb_cb->paddr);
534 switch (skb_cb->txmode) {
535 case ATH10K_HW_TXRX_RAW:
536 case ATH10K_HW_TXRX_NATIVE_WIFI:
537 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
539 case ATH10K_HW_TXRX_ETHERNET:
540 frags = skb_cb->htt.txbuf->frags;
542 frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
543 frags[0].len = __cpu_to_le32(msdu->len);
547 flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
549 frags_paddr = skb_cb->htt.txbuf_paddr;
551 case ATH10K_HW_TXRX_MGMT:
552 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
553 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
554 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
556 frags_paddr = skb_cb->paddr;
560 /* Normally all commands go through HTC which manages tx credits for
561 * each endpoint and notifies when tx is completed.
563 * HTT endpoint is creditless so there's no need to care about HTC
564 * flags. In that case it is trivial to fill the HTC header here.
566 * MSDU transmission is considered completed upon HTT event. This
567 * implies no relevant resources can be freed until after the event is
568 * received. That's why HTC tx completion handler itself is ignored by
569 * setting NULL to transfer_context for all sg items.
571 * There is simply no point in pushing HTT TX_FRM through HTC tx path
572 * as it's a waste of resources. By bypassing HTC it is possible to
573 * avoid extra memory allocations, compress data structures and thus
574 * improve performance. */
576 skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
577 skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
578 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
579 sizeof(skb_cb->htt.txbuf->cmd_tx) +
581 skb_cb->htt.txbuf->htc_hdr.flags = 0;
583 if (!skb_cb->is_protected)
584 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
586 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
587 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
588 if (msdu->ip_summed == CHECKSUM_PARTIAL) {
589 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
590 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
593 /* Prevent firmware from sending up tx inspection requests. There's
594 * nothing ath10k can do with frames requested for inspection so force
595 * it to simply rely a regular tx completion with discard status.
597 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
599 skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
600 skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
601 skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
602 skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
603 skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
604 skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
605 skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
606 skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
608 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
609 ath10k_dbg(ar, ATH10K_DBG_HTT,
610 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
611 flags0, flags1, msdu->len, msdu_id, frags_paddr,
612 (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
613 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
614 msdu->data, msdu->len);
615 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
616 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
618 sg_items[0].transfer_id = 0;
619 sg_items[0].transfer_context = NULL;
620 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
621 sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
622 sizeof(skb_cb->htt.txbuf->frags);
623 sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
624 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
625 sizeof(skb_cb->htt.txbuf->cmd_tx);
627 sg_items[1].transfer_id = 0;
628 sg_items[1].transfer_context = NULL;
629 sg_items[1].vaddr = msdu->data;
630 sg_items[1].paddr = skb_cb->paddr;
631 sg_items[1].len = prefetch_len;
633 res = ath10k_hif_tx_sg(htt->ar,
634 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
635 sg_items, ARRAY_SIZE(sg_items));
642 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
644 dma_pool_free(htt->tx_pool,
646 skb_cb->htt.txbuf_paddr);
648 spin_lock_bh(&htt->tx_lock);
649 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
650 spin_unlock_bh(&htt->tx_lock);
652 ath10k_htt_tx_dec_pending(htt);