2 * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/etherdevice.h>
18 #include <net/ieee80211_radiotap.h>
19 #include <linux/if_arp.h>
20 #include <linux/moduleparam.h>
22 #include <linux/ipv6.h>
24 #include <linux/prefetch.h>
31 static bool rtap_include_phy_info;
32 module_param(rtap_include_phy_info, bool, S_IRUGO);
33 MODULE_PARM_DESC(rtap_include_phy_info,
34 " Include PHY info in the radiotap header, default - no");
37 module_param(rx_align_2, bool, S_IRUGO);
38 MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
40 static inline uint wil_rx_snaplen(void)
42 return rx_align_2 ? 6 : 0;
45 static inline int wil_vring_is_empty(struct vring *vring)
47 return vring->swhead == vring->swtail;
50 static inline u32 wil_vring_next_tail(struct vring *vring)
52 return (vring->swtail + 1) % vring->size;
55 static inline void wil_vring_advance_head(struct vring *vring, int n)
57 vring->swhead = (vring->swhead + n) % vring->size;
60 static inline int wil_vring_is_full(struct vring *vring)
62 return wil_vring_next_tail(vring) == vring->swhead;
65 /* Used space in Tx Vring */
66 static inline int wil_vring_used_tx(struct vring *vring)
68 u32 swhead = vring->swhead;
69 u32 swtail = vring->swtail;
70 return (vring->size + swhead - swtail) % vring->size;
73 /* Available space in Tx Vring */
74 static inline int wil_vring_avail_tx(struct vring *vring)
76 return vring->size - wil_vring_used_tx(vring) - 1;
79 /* wil_vring_wmark_low - low watermark for available descriptor space */
80 static inline int wil_vring_wmark_low(struct vring *vring)
85 /* wil_vring_wmark_high - high watermark for available descriptor space */
86 static inline int wil_vring_wmark_high(struct vring *vring)
91 /* wil_val_in_range - check if value in [min,max) */
92 static inline bool wil_val_in_range(int val, int min, int max)
94 return val >= min && val < max;
97 static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
99 struct device *dev = wil_to_dev(wil);
100 size_t sz = vring->size * sizeof(vring->va[0]);
103 wil_dbg_misc(wil, "%s()\n", __func__);
105 BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
109 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
114 /* vring->va should be aligned on its size rounded up to power of 2
115 * This is granted by the dma_alloc_coherent
117 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
123 /* initially, all descriptors are SW owned
124 * For Tx and Rx, ownership bit is at the same location, thus
127 for (i = 0; i < vring->size; i++) {
128 volatile struct vring_tx_desc *_d = &vring->va[i].tx;
130 _d->dma.status = TX_DMA_STATUS_DU;
133 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
134 vring->va, &vring->pa, vring->ctx);
139 static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
142 dma_addr_t pa = wil_desc_addr(&d->dma.addr);
143 u16 dmalen = le16_to_cpu(d->dma.length);
145 switch (ctx->mapped_as) {
146 case wil_mapped_as_single:
147 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
149 case wil_mapped_as_page:
150 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
157 static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
160 struct device *dev = wil_to_dev(wil);
161 size_t sz = vring->size * sizeof(vring->va[0]);
163 lockdep_assert_held(&wil->mutex);
165 int vring_index = vring - wil->vring_tx;
167 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
168 vring_index, vring->size, vring->va,
169 &vring->pa, vring->ctx);
171 wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
172 vring->size, vring->va,
173 &vring->pa, vring->ctx);
176 while (!wil_vring_is_empty(vring)) {
182 struct vring_tx_desc dd, *d = ⅆ
183 volatile struct vring_tx_desc *_d =
184 &vring->va[vring->swtail].tx;
186 ctx = &vring->ctx[vring->swtail];
188 wil_txdesc_unmap(dev, d, ctx);
190 dev_kfree_skb_any(ctx->skb);
191 vring->swtail = wil_vring_next_tail(vring);
193 struct vring_rx_desc dd, *d = ⅆ
194 volatile struct vring_rx_desc *_d =
195 &vring->va[vring->swhead].rx;
197 ctx = &vring->ctx[vring->swhead];
199 pa = wil_desc_addr(&d->dma.addr);
200 dmalen = le16_to_cpu(d->dma.length);
201 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
203 wil_vring_advance_head(vring, 1);
206 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
214 * Allocate one skb for Rx VRING
216 * Safe to call from IRQ
218 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
221 struct device *dev = wil_to_dev(wil);
222 unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
223 struct vring_rx_desc dd, *d = ⅆ
224 volatile struct vring_rx_desc *_d = &vring->va[i].rx;
226 struct sk_buff *skb = dev_alloc_skb(sz + headroom);
231 skb_reserve(skb, headroom);
234 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
235 if (unlikely(dma_mapping_error(dev, pa))) {
240 d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
241 wil_desc_addr_set(&d->dma.addr, pa);
242 /* ip_length don't care */
244 /* error don't care */
245 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
246 d->dma.length = cpu_to_le16(sz);
248 vring->ctx[i].skb = skb;
254 * Adds radiotap header
256 * Any error indicated as "Bad FCS"
258 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
259 * - Rx descriptor: 32 bytes
262 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
265 struct wireless_dev *wdev = wil->wdev;
266 struct wil6210_rtap {
267 struct ieee80211_radiotap_header rthdr;
268 /* fields should be in the order of bits in rthdr.it_present */
272 __le16 chnl_freq __aligned(2);
279 struct wil6210_rtap_vendor {
280 struct wil6210_rtap rtap;
282 u8 vendor_oui[3] __aligned(2);
287 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
288 struct wil6210_rtap_vendor *rtap_vendor;
289 int rtap_len = sizeof(struct wil6210_rtap);
290 int phy_length = 0; /* phy info header size, bytes */
291 static char phy_data[128];
292 struct ieee80211_channel *ch = wdev->preset_chandef.chan;
294 if (rtap_include_phy_info) {
295 rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
296 /* calculate additional length */
297 if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
299 * PHY info starts from 8-byte boundary
300 * there are 8-byte lines, last line may be partially
301 * written (HW bug), thus FW configures for last line
302 * to be excessive. Driver skips this last line.
304 int len = min_t(int, 8 + sizeof(phy_data),
305 wil_rxdesc_phy_length(d));
308 void *p = skb_tail_pointer(skb);
309 void *pa = PTR_ALIGN(p, 8);
311 if (skb_tailroom(skb) >= len + (pa - p)) {
312 phy_length = len - 8;
313 memcpy(phy_data, pa, phy_length);
317 rtap_len += phy_length;
320 if (skb_headroom(skb) < rtap_len &&
321 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
322 wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
326 rtap_vendor = (void *)skb_push(skb, rtap_len);
327 memset(rtap_vendor, 0, rtap_len);
329 rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
330 rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
331 rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
332 (1 << IEEE80211_RADIOTAP_FLAGS) |
333 (1 << IEEE80211_RADIOTAP_CHANNEL) |
334 (1 << IEEE80211_RADIOTAP_MCS));
335 if (d->dma.status & RX_DMA_STATUS_ERROR)
336 rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
338 rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
339 rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
341 rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
342 rtap_vendor->rtap.mcs_flags = 0;
343 rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
345 if (rtap_include_phy_info) {
346 rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
347 IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
348 /* OUI for Wilocity 04:ce:14 */
349 rtap_vendor->vendor_oui[0] = 0x04;
350 rtap_vendor->vendor_oui[1] = 0xce;
351 rtap_vendor->vendor_oui[2] = 0x14;
352 rtap_vendor->vendor_ns = 1;
353 /* Rx descriptor + PHY data */
354 rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
356 memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
357 memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
362 /* similar to ieee80211_ version, but FC contain only 1-st byte */
363 static inline int wil_is_back_req(u8 fc)
365 return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
366 (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
370 * reap 1 frame from @swhead
372 * Rx descriptor copied to skb->cb
374 * Safe to call from IRQ
376 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
379 struct device *dev = wil_to_dev(wil);
380 struct net_device *ndev = wil_to_ndev(wil);
381 volatile struct vring_rx_desc *_d;
382 struct vring_rx_desc *d;
385 unsigned int snaplen = wil_rx_snaplen();
386 unsigned int sz = mtu_max + ETH_HLEN + snaplen;
391 struct wil_net_stats *stats;
393 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
396 if (unlikely(wil_vring_is_empty(vring)))
399 i = (int)vring->swhead;
400 _d = &vring->va[i].rx;
401 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
402 /* it is not error, we just reached end of Rx done area */
406 skb = vring->ctx[i].skb;
407 vring->ctx[i].skb = NULL;
408 wil_vring_advance_head(vring, 1);
410 wil_err(wil, "No Rx skb at [%d]\n", i);
413 d = wil_skb_rxdesc(skb);
415 pa = wil_desc_addr(&d->dma.addr);
417 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
418 dmalen = le16_to_cpu(d->dma.length);
420 trace_wil6210_rx(i, d);
421 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
422 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
423 (const void *)d, sizeof(*d), false);
425 cid = wil_rxdesc_cid(d);
426 stats = &wil->sta[cid].stats;
428 if (unlikely(dmalen > sz)) {
429 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
430 stats->rx_large_frame++;
434 skb_trim(skb, dmalen);
438 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
439 skb->data, skb_headlen(skb), false);
441 stats->last_mcs_rx = wil_rxdesc_mcs(d);
442 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
443 stats->rx_per_mcs[stats->last_mcs_rx]++;
445 /* use radiotap header only if required */
446 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
447 wil_rx_add_radiotap_header(wil, skb);
449 /* no extra checks if in sniffer mode */
450 if (ndev->type != ARPHRD_ETHER)
452 /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
453 * Driver should recognize it by frame type, that is found
454 * in Rx descriptor. If type is not data, it is 802.11 frame as is
456 ftype = wil_rxdesc_ftype(d) << 2;
457 if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
458 u8 fc1 = wil_rxdesc_fc1(d);
459 int mid = wil_rxdesc_mid(d);
460 int tid = wil_rxdesc_tid(d);
461 u16 seq = wil_rxdesc_seq(d);
464 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
465 fc1, mid, cid, tid, seq);
466 stats->rx_non_data_frame++;
467 if (wil_is_back_req(fc1)) {
469 "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
471 wil_rx_bar(wil, cid, tid, seq);
473 /* print again all info. One can enable only this
474 * without overhead for printing every Rx frame
477 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
478 fc1, mid, cid, tid, seq);
479 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
480 (const void *)d, sizeof(*d), false);
481 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
482 skb->data, skb_headlen(skb), false);
488 if (unlikely(skb->len < ETH_HLEN + snaplen)) {
489 wil_err(wil, "Short frame, len = %d\n", skb->len);
490 stats->rx_short_frame++;
495 /* L4 IDENT is on when HW calculated checksum, check status
496 * and in case of error drop the packet
497 * higher stack layers will handle retransmission (if required)
499 if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
500 /* L4 protocol identified, csum calculated */
501 if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
502 skb->ip_summed = CHECKSUM_UNNECESSARY;
503 /* If HW reports bad checksum, let IP stack re-check it
504 * For example, HW don't understand Microsoft IP stack that
505 * mis-calculates TCP checksum - if it should be 0x0,
506 * it writes 0xffff in violation of RFC 1624
512 * +-------+-------+---------+------------+------+
513 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
514 * +-------+-------+---------+------------+------+
515 * Need to remove SNAP, shifting SA and DA forward
517 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
518 skb_pull(skb, snaplen);
525 * allocate and fill up to @count buffers in rx ring
526 * buffers posted at @swtail
528 static int wil_rx_refill(struct wil6210_priv *wil, int count)
530 struct net_device *ndev = wil_to_ndev(wil);
531 struct vring *v = &wil->vring_rx;
534 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
535 WIL6210_RTAP_SIZE : 0;
537 for (; next_tail = wil_vring_next_tail(v),
538 (next_tail != v->swhead) && (count-- > 0);
539 v->swtail = next_tail) {
540 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
542 wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
547 wil_w(wil, v->hwtail, v->swtail);
553 * reverse_memcmp - Compare two areas of memory, in reverse order
554 * @cs: One area of memory
555 * @ct: Another area of memory
556 * @count: The size of the area.
558 * Cut'n'paste from original memcmp (see lib/string.c)
559 * with minimal modifications
561 static int reverse_memcmp(const void *cs, const void *ct, size_t count)
563 const unsigned char *su1, *su2;
566 for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
567 --su1, --su2, count--) {
575 static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
577 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
578 int cid = wil_rxdesc_cid(d);
579 int tid = wil_rxdesc_tid(d);
580 int key_id = wil_rxdesc_key_id(d);
581 int mc = wil_rxdesc_mcast(d);
582 struct wil_sta_info *s = &wil->sta[cid];
583 struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
584 &s->tid_crypto_rx[tid];
585 struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
586 const u8 *pn = (u8 *)&d->mac.pn_15_0;
589 wil_err_ratelimited(wil,
590 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
591 cid, tid, mc, key_id);
595 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
596 wil_err_ratelimited(wil,
597 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
598 cid, tid, mc, key_id, pn, cc->pn);
601 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
607 * Pass Rx packet to the netif. Update statistics.
608 * Called in softirq context (NAPI poll).
610 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
612 gro_result_t rc = GRO_NORMAL;
613 struct wil6210_priv *wil = ndev_to_wil(ndev);
614 struct wireless_dev *wdev = wil_to_wdev(wil);
615 unsigned int len = skb->len;
616 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
617 int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
618 int security = wil_rxdesc_security(d);
619 struct ethhdr *eth = (void *)skb->data;
620 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
621 * is not suitable, need to look at data
623 int mcast = is_multicast_ether_addr(eth->h_dest);
624 struct wil_net_stats *stats = &wil->sta[cid].stats;
625 struct sk_buff *xmit_skb = NULL;
626 static const char * const gro_res_str[] = {
627 [GRO_MERGED] = "GRO_MERGED",
628 [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
629 [GRO_HELD] = "GRO_HELD",
630 [GRO_NORMAL] = "GRO_NORMAL",
631 [GRO_DROP] = "GRO_DROP",
634 if (ndev->features & NETIF_F_RXHASH)
635 /* fake L4 to ensure it won't be re-calculated later
636 * set hash to any non-zero value to activate rps
637 * mechanism, core will be chosen according
638 * to user-level rps configuration.
640 skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
644 if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
651 if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
653 /* send multicast frames both to higher layers in
654 * local net stack and back to the wireless medium
656 xmit_skb = skb_copy(skb, GFP_ATOMIC);
658 int xmit_cid = wil_find_cid(wil, eth->h_dest);
661 /* The destination station is associated to
662 * this AP (in this VLAN), so send the frame
663 * directly to it and do not pass it to local
672 /* Send to wireless media and increase priority by 256 to
673 * keep the received priority instead of reclassifying
674 * the frame (see cfg80211_classify8021d).
676 xmit_skb->dev = ndev;
677 xmit_skb->priority += 256;
678 xmit_skb->protocol = htons(ETH_P_802_3);
679 skb_reset_network_header(xmit_skb);
680 skb_reset_mac_header(xmit_skb);
681 wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
682 dev_queue_xmit(xmit_skb);
685 if (skb) { /* deliver to local stack */
687 skb->protocol = eth_type_trans(skb, ndev);
688 rc = napi_gro_receive(&wil->napi_rx, skb);
689 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
690 len, gro_res_str[rc]);
693 /* statistics. rc set to GRO_NORMAL for AP bridging */
694 if (unlikely(rc == GRO_DROP)) {
695 ndev->stats.rx_dropped++;
697 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
699 ndev->stats.rx_packets++;
701 ndev->stats.rx_bytes += len;
702 stats->rx_bytes += len;
704 ndev->stats.multicast++;
709 * Proceed all completed skb's from Rx VRING
711 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
713 void wil_rx_handle(struct wil6210_priv *wil, int *quota)
715 struct net_device *ndev = wil_to_ndev(wil);
716 struct vring *v = &wil->vring_rx;
719 if (unlikely(!v->va)) {
720 wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
723 wil_dbg_txrx(wil, "%s()\n", __func__);
724 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
727 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
729 skb_reset_mac_header(skb);
730 skb->ip_summed = CHECKSUM_UNNECESSARY;
731 skb->pkt_type = PACKET_OTHERHOST;
732 skb->protocol = htons(ETH_P_802_2);
733 wil_netif_rx_any(skb, ndev);
735 wil_rx_reorder(wil, skb);
738 wil_rx_refill(wil, v->size);
741 int wil_rx_init(struct wil6210_priv *wil, u16 size)
743 struct vring *vring = &wil->vring_rx;
746 wil_dbg_misc(wil, "%s()\n", __func__);
749 wil_err(wil, "Rx ring already allocated\n");
754 rc = wil_vring_alloc(wil, vring);
758 rc = wmi_rx_chain_add(wil, vring);
762 rc = wil_rx_refill(wil, vring->size);
768 wil_vring_free(wil, vring, 0);
773 void wil_rx_fini(struct wil6210_priv *wil)
775 struct vring *vring = &wil->vring_rx;
777 wil_dbg_misc(wil, "%s()\n", __func__);
780 wil_vring_free(wil, vring, 0);
783 static inline void wil_tx_data_init(struct vring_tx_data *txdata)
785 spin_lock_bh(&txdata->lock);
786 txdata->dot1x_open = 0;
789 txdata->last_idle = 0;
791 txdata->agg_wsize = 0;
792 txdata->agg_timeout = 0;
793 txdata->agg_amsdu = 0;
794 txdata->addba_in_progress = false;
795 spin_unlock_bh(&txdata->lock);
798 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
802 struct wmi_vring_cfg_cmd cmd = {
803 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
807 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
808 .ring_size = cpu_to_le16(size),
811 .cidxtid = mk_cidxtid(cid, tid),
812 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
817 .priority = cpu_to_le16(0),
818 .timeslot_us = cpu_to_le16(0xfff),
823 struct wmi_cmd_hdr wmi;
824 struct wmi_vring_cfg_done_event cmd;
826 struct vring *vring = &wil->vring_tx[id];
827 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
829 wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
830 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
831 lockdep_assert_held(&wil->mutex);
834 wil_err(wil, "Tx ring [%d] already allocated\n", id);
839 wil_tx_data_init(txdata);
841 rc = wil_vring_alloc(wil, vring);
845 wil->vring2cid_tid[id][0] = cid;
846 wil->vring2cid_tid[id][1] = tid;
848 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
851 txdata->dot1x_open = true;
852 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
853 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
857 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
858 wil_err(wil, "Tx config failed, status 0x%02x\n",
863 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
866 if (txdata->dot1x_open && (agg_wsize >= 0))
867 wil_addba_tx_request(wil, id, agg_wsize);
871 spin_lock_bh(&txdata->lock);
872 txdata->dot1x_open = false;
874 spin_unlock_bh(&txdata->lock);
875 wil_vring_free(wil, vring, 1);
876 wil->vring2cid_tid[id][0] = WIL6210_MAX_CID;
877 wil->vring2cid_tid[id][1] = 0;
884 int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
887 struct wmi_bcast_vring_cfg_cmd cmd = {
888 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
892 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
893 .ring_size = cpu_to_le16(size),
896 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
900 struct wmi_cmd_hdr wmi;
901 struct wmi_vring_cfg_done_event cmd;
903 struct vring *vring = &wil->vring_tx[id];
904 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
906 wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
907 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
908 lockdep_assert_held(&wil->mutex);
911 wil_err(wil, "Tx ring [%d] already allocated\n", id);
916 wil_tx_data_init(txdata);
918 rc = wil_vring_alloc(wil, vring);
922 wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
923 wil->vring2cid_tid[id][1] = 0; /* TID */
925 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
928 txdata->dot1x_open = true;
929 rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
930 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
934 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
935 wil_err(wil, "Tx config failed, status 0x%02x\n",
940 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
946 spin_lock_bh(&txdata->lock);
948 txdata->dot1x_open = false;
949 spin_unlock_bh(&txdata->lock);
950 wil_vring_free(wil, vring, 1);
956 void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
958 struct vring *vring = &wil->vring_tx[id];
959 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
961 lockdep_assert_held(&wil->mutex);
966 wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
968 spin_lock_bh(&txdata->lock);
969 txdata->dot1x_open = false;
970 txdata->enabled = 0; /* no Tx can be in progress or start anew */
971 spin_unlock_bh(&txdata->lock);
972 /* make sure NAPI won't touch this vring */
973 if (test_bit(wil_status_napi_en, wil->status))
974 napi_synchronize(&wil->napi_tx);
976 wil_vring_free(wil, vring, 1);
979 static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
983 struct ethhdr *eth = (void *)skb->data;
984 int cid = wil_find_cid(wil, eth->h_dest);
989 /* TODO: fix for multiple TID */
990 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
991 if (!wil->vring_tx_data[i].dot1x_open &&
992 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
994 if (wil->vring2cid_tid[i][0] == cid) {
995 struct vring *v = &wil->vring_tx[i];
996 struct vring_tx_data *txdata = &wil->vring_tx_data[i];
998 wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
999 __func__, eth->h_dest, i);
1000 if (v->va && txdata->enabled) {
1003 wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
1012 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1013 struct sk_buff *skb);
1015 static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
1016 struct sk_buff *skb)
1021 struct vring_tx_data *txdata;
1023 /* In the STA mode, it is expected to have only 1 VRING
1024 * for the AP we connected to.
1025 * find 1-st vring eligible for this skb and use it.
1027 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
1028 v = &wil->vring_tx[i];
1029 txdata = &wil->vring_tx_data[i];
1030 if (!v->va || !txdata->enabled)
1033 cid = wil->vring2cid_tid[i][0];
1034 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1037 if (!wil->vring_tx_data[i].dot1x_open &&
1038 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1041 wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
1046 wil_dbg_txrx(wil, "Tx while no vrings active?\n");
1051 /* Use one of 2 strategies:
1053 * 1. New (real broadcast):
1054 * use dedicated broadcast vring
1055 * 2. Old (pseudo-DMS):
1056 * Find 1-st vring and return it;
1057 * duplicate skb and send it to other active vrings;
1058 * in all cases override dest address to unicast peer's address
1059 * Use old strategy when new is not supported yet:
1062 static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
1063 struct sk_buff *skb)
1066 struct vring_tx_data *txdata;
1067 int i = wil->bcast_vring;
1071 v = &wil->vring_tx[i];
1072 txdata = &wil->vring_tx_data[i];
1073 if (!v->va || !txdata->enabled)
1075 if (!wil->vring_tx_data[i].dot1x_open &&
1076 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1082 static void wil_set_da_for_vring(struct wil6210_priv *wil,
1083 struct sk_buff *skb, int vring_index)
1085 struct ethhdr *eth = (void *)skb->data;
1086 int cid = wil->vring2cid_tid[vring_index][0];
1088 ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
1091 static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
1092 struct sk_buff *skb)
1094 struct vring *v, *v2;
1095 struct sk_buff *skb2;
1098 struct ethhdr *eth = (void *)skb->data;
1099 char *src = eth->h_source;
1100 struct vring_tx_data *txdata;
1102 /* find 1-st vring eligible for data */
1103 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
1104 v = &wil->vring_tx[i];
1105 txdata = &wil->vring_tx_data[i];
1106 if (!v->va || !txdata->enabled)
1109 cid = wil->vring2cid_tid[i][0];
1110 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1112 if (!wil->vring_tx_data[i].dot1x_open &&
1113 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1116 /* don't Tx back to source when re-routing Rx->Tx at the AP */
1117 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1123 wil_dbg_txrx(wil, "Tx while no vrings active?\n");
1128 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
1129 wil_set_da_for_vring(wil, skb, i);
1131 /* find other active vrings and duplicate skb for each */
1132 for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
1133 v2 = &wil->vring_tx[i];
1136 cid = wil->vring2cid_tid[i][0];
1137 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1139 if (!wil->vring_tx_data[i].dot1x_open &&
1140 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1143 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1146 skb2 = skb_copy(skb, GFP_ATOMIC);
1148 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
1149 wil_set_da_for_vring(wil, skb2, i);
1150 wil_tx_vring(wil, v2, skb2);
1152 wil_err(wil, "skb_copy failed\n");
1159 static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
1160 struct sk_buff *skb)
1162 struct wireless_dev *wdev = wil->wdev;
1164 if (wdev->iftype != NL80211_IFTYPE_AP)
1165 return wil_find_tx_bcast_2(wil, skb);
1167 return wil_find_tx_bcast_1(wil, skb);
1170 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
1173 wil_desc_addr_set(&d->dma.addr, pa);
1174 d->dma.ip_length = 0;
1175 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
1176 d->dma.b11 = 0/*14 | BIT(7)*/;
1178 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
1179 d->dma.length = cpu_to_le16((u16)len);
1180 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
1184 d->mac.ucode_cmd = 0;
1185 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
1186 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
1187 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
1193 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
1195 d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
1199 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1200 * @skb is used to obtain the protocol and headers length.
1201 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1202 * 2 - middle, 3 - last descriptor.
1205 static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
1206 struct sk_buff *skb,
1207 int tso_desc_type, bool is_ipv4,
1208 int tcp_hdr_len, int skb_net_hdr_len)
1210 d->dma.b11 = ETH_HLEN; /* MAC header length */
1211 d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
1213 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1214 /* L4 header len: TCP header length */
1215 d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1217 /* Setup TSO: bit and desc type */
1218 d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
1219 (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
1220 d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
1222 d->dma.ip_length = skb_net_hdr_len;
1223 /* Enable TCP/UDP checksum */
1224 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1225 /* Calculate pseudo-header */
1226 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1230 * Sets the descriptor @d up for csum. The corresponding
1231 * @skb is used to obtain the protocol and headers length.
1232 * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
1233 * Note, if d==NULL, the function only returns the protocol result.
1235 * It is very similar to previous wil_tx_desc_offload_setup_tso. This
1236 * is "if unrolling" to optimize the critical path.
1239 static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
1240 struct sk_buff *skb){
1243 if (skb->ip_summed != CHECKSUM_PARTIAL)
1246 d->dma.b11 = ETH_HLEN; /* MAC header length */
1248 switch (skb->protocol) {
1249 case cpu_to_be16(ETH_P_IP):
1250 protocol = ip_hdr(skb)->protocol;
1251 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
1253 case cpu_to_be16(ETH_P_IPV6):
1254 protocol = ipv6_hdr(skb)->nexthdr;
1262 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1263 /* L4 header len: TCP header length */
1265 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1268 /* L4 header len: UDP header length */
1270 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1276 d->dma.ip_length = skb_network_header_len(skb);
1277 /* Enable TCP/UDP checksum */
1278 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1279 /* Calculate pseudo-header */
1280 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1285 static inline void wil_tx_last_desc(struct vring_tx_desc *d)
1287 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
1288 BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
1289 BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1292 static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
1294 d->dma.d0 |= wil_tso_type_lst <<
1295 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
1298 static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
1299 struct sk_buff *skb)
1301 struct device *dev = wil_to_dev(wil);
1303 /* point to descriptors in shared memory */
1304 volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
1305 *_first_desc = NULL;
1307 /* pointers to shadow descriptors */
1308 struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
1309 *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
1310 *first_desc = &first_desc_mem;
1312 /* pointer to shadow descriptors' context */
1313 struct wil_ctx *hdr_ctx, *first_ctx = NULL;
1315 int descs_used = 0; /* total number of used descriptors */
1316 int sg_desc_cnt = 0; /* number of descriptors for current mss*/
1318 u32 swhead = vring->swhead;
1319 int used, avail = wil_vring_avail_tx(vring);
1320 int nr_frags = skb_shinfo(skb)->nr_frags;
1321 int min_desc_required = nr_frags + 1;
1322 int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
1323 int f, len, hdrlen, headlen;
1324 int vring_index = vring - wil->vring_tx;
1325 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1328 const skb_frag_t *frag = NULL;
1331 int hdr_compensation_need = true;
1332 int desc_tso_type = wil_tso_type_first;
1335 int skb_net_hdr_len;
1339 wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
1340 __func__, skb->len, vring_index);
1342 if (unlikely(!txdata->enabled))
1345 /* A typical page 4K is 3-4 payloads, we assume each fragment
1346 * is a full payload, that's how min_desc_required has been
1347 * calculated. In real we might need more or less descriptors,
1348 * this is the initial check only.
1350 if (unlikely(avail < min_desc_required)) {
1351 wil_err_ratelimited(wil,
1352 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1353 vring_index, min_desc_required);
1357 /* Header Length = MAC header len + IP header len + TCP header len*/
1359 (int)skb_network_header_len(skb) +
1362 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
1365 /* TCP v4, zero out the IP length and IPv4 checksum fields
1366 * as required by the offloading doc
1368 ip_hdr(skb)->tot_len = 0;
1369 ip_hdr(skb)->check = 0;
1373 /* TCP v6, zero out the payload length */
1374 ipv6_hdr(skb)->payload_len = 0;
1378 /* other than TCPv4 or TCPv6 types are not supported for TSO.
1379 * It is also illegal for both to be set simultaneously
1384 if (skb->ip_summed != CHECKSUM_PARTIAL)
1387 /* tcp header length and skb network header length are fixed for all
1388 * packet's descriptors - read then once here
1390 tcp_hdr_len = tcp_hdrlen(skb);
1391 skb_net_hdr_len = skb_network_header_len(skb);
1393 _hdr_desc = &vring->va[i].tx;
1395 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
1396 if (unlikely(dma_mapping_error(dev, pa))) {
1397 wil_err(wil, "TSO: Skb head DMA map error\n");
1401 wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
1402 wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
1403 tcp_hdr_len, skb_net_hdr_len);
1404 wil_tx_last_desc(hdr_desc);
1406 vring->ctx[i].mapped_as = wil_mapped_as_single;
1407 hdr_ctx = &vring->ctx[i];
1410 headlen = skb_headlen(skb) - hdrlen;
1412 for (f = headlen ? -1 : 0; f < nr_frags; f++) {
1415 wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
1418 frag = &skb_shinfo(skb)->frags[f];
1420 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
1425 "TSO: len %d, rem_data %d, descs_used %d\n",
1426 len, rem_data, descs_used);
1428 if (descs_used == avail) {
1429 wil_err_ratelimited(wil, "TSO: ring overflow\n");
1434 lenmss = min_t(int, rem_data, len);
1435 i = (swhead + descs_used) % vring->size;
1436 wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
1439 pa = skb_frag_dma_map(dev, frag,
1440 frag->size - len, lenmss,
1442 vring->ctx[i].mapped_as = wil_mapped_as_page;
1444 pa = dma_map_single(dev,
1446 skb_headlen(skb) - headlen,
1449 vring->ctx[i].mapped_as = wil_mapped_as_single;
1453 if (unlikely(dma_mapping_error(dev, pa))) {
1454 wil_err(wil, "TSO: DMA map page error\n");
1458 _desc = &vring->va[i].tx;
1461 _first_desc = _desc;
1462 first_ctx = &vring->ctx[i];
1468 wil_tx_desc_map(d, pa, lenmss, vring_index);
1469 wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
1470 is_ipv4, tcp_hdr_len,
1473 /* use tso_type_first only once */
1474 desc_tso_type = wil_tso_type_mid;
1476 descs_used++; /* desc used so far */
1477 sg_desc_cnt++; /* desc used for this segment */
1482 "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
1483 len, rem_data, descs_used, sg_desc_cnt);
1485 /* Close the segment if reached mss size or last frag*/
1486 if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
1487 if (hdr_compensation_need) {
1488 /* first segment include hdr desc for
1491 hdr_ctx->nr_frags = sg_desc_cnt;
1492 wil_tx_desc_set_nr_frags(first_desc,
1495 hdr_compensation_need = false;
1497 wil_tx_desc_set_nr_frags(first_desc,
1500 first_ctx->nr_frags = sg_desc_cnt - 1;
1502 wil_tx_last_desc(d);
1504 /* first descriptor may also be the last
1505 * for this mss - make sure not to copy
1508 if (first_desc != d)
1509 *_first_desc = *first_desc;
1511 /*last descriptor will be copied at the end
1512 * of this TS processing
1514 if (f < nr_frags - 1 || len > 0)
1520 } else if (first_desc != d) /* update mid descriptor */
1525 /* first descriptor may also be the last.
1526 * in this case d pointer is invalid
1528 if (_first_desc == _desc)
1531 /* Last data descriptor */
1532 wil_set_tx_desc_last_tso(d);
1535 /* Fill the total number of descriptors in first desc (hdr)*/
1536 wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
1537 *_hdr_desc = *hdr_desc;
1539 /* hold reference to skb
1540 * to prevent skb release before accounting
1541 * in case of immediate "tx done"
1543 vring->ctx[i].skb = skb_get(skb);
1545 /* performance monitoring */
1546 used = wil_vring_used_tx(vring);
1547 if (wil_val_in_range(vring_idle_trsh,
1548 used, used + descs_used)) {
1549 txdata->idle += get_cycles() - txdata->last_idle;
1550 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1551 vring_index, used, used + descs_used);
1554 /* advance swhead */
1555 wil_vring_advance_head(vring, descs_used);
1556 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
1558 /* make sure all writes to descriptors (shared memory) are done before
1559 * committing them to HW
1563 wil_w(wil, vring->hwtail, vring->swhead);
1567 while (descs_used > 0) {
1568 struct wil_ctx *ctx;
1570 i = (swhead + descs_used) % vring->size;
1571 d = (struct vring_tx_desc *)&vring->va[i].tx;
1572 _desc = &vring->va[i].tx;
1574 _desc->dma.status = TX_DMA_STATUS_DU;
1575 ctx = &vring->ctx[i];
1576 wil_txdesc_unmap(dev, d, ctx);
1577 memset(ctx, 0, sizeof(*ctx));
1584 static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1585 struct sk_buff *skb)
1587 struct device *dev = wil_to_dev(wil);
1588 struct vring_tx_desc dd, *d = ⅆ
1589 volatile struct vring_tx_desc *_d;
1590 u32 swhead = vring->swhead;
1591 int avail = wil_vring_avail_tx(vring);
1592 int nr_frags = skb_shinfo(skb)->nr_frags;
1594 int vring_index = vring - wil->vring_tx;
1595 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1599 bool mcast = (vring_index == wil->bcast_vring);
1600 uint len = skb_headlen(skb);
1602 wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
1603 __func__, skb->len, vring_index);
1605 if (unlikely(!txdata->enabled))
1608 if (unlikely(avail < 1 + nr_frags)) {
1609 wil_err_ratelimited(wil,
1610 "Tx ring[%2d] full. No space for %d fragments\n",
1611 vring_index, 1 + nr_frags);
1614 _d = &vring->va[i].tx;
1616 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
1618 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
1619 skb_headlen(skb), skb->data, &pa);
1620 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
1621 skb->data, skb_headlen(skb), false);
1623 if (unlikely(dma_mapping_error(dev, pa)))
1625 vring->ctx[i].mapped_as = wil_mapped_as_single;
1627 wil_tx_desc_map(d, pa, len, vring_index);
1628 if (unlikely(mcast)) {
1629 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
1630 if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
1631 d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
1633 /* Process TCP/UDP checksum offloading */
1634 if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
1635 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
1640 vring->ctx[i].nr_frags = nr_frags;
1641 wil_tx_desc_set_nr_frags(d, nr_frags + 1);
1643 /* middle segments */
1644 for (; f < nr_frags; f++) {
1645 const struct skb_frag_struct *frag =
1646 &skb_shinfo(skb)->frags[f];
1647 int len = skb_frag_size(frag);
1650 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
1651 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1652 (const void *)d, sizeof(*d), false);
1653 i = (swhead + f + 1) % vring->size;
1654 _d = &vring->va[i].tx;
1655 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
1657 if (unlikely(dma_mapping_error(dev, pa))) {
1658 wil_err(wil, "Tx[%2d] failed to map fragment\n",
1662 vring->ctx[i].mapped_as = wil_mapped_as_page;
1663 wil_tx_desc_map(d, pa, len, vring_index);
1664 /* no need to check return code -
1665 * if it succeeded for 1-st descriptor,
1666 * it will succeed here too
1668 wil_tx_desc_offload_setup(d, skb);
1670 /* for the last seg only */
1671 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
1672 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
1673 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1675 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
1676 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1677 (const void *)d, sizeof(*d), false);
1679 /* hold reference to skb
1680 * to prevent skb release before accounting
1681 * in case of immediate "tx done"
1683 vring->ctx[i].skb = skb_get(skb);
1685 /* performance monitoring */
1686 used = wil_vring_used_tx(vring);
1687 if (wil_val_in_range(vring_idle_trsh,
1688 used, used + nr_frags + 1)) {
1689 txdata->idle += get_cycles() - txdata->last_idle;
1690 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1691 vring_index, used, used + nr_frags + 1);
1694 /* advance swhead */
1695 wil_vring_advance_head(vring, nr_frags + 1);
1696 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
1698 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
1700 /* make sure all writes to descriptors (shared memory) are done before
1701 * committing them to HW
1705 wil_w(wil, vring->hwtail, vring->swhead);
1709 /* unmap what we have mapped */
1710 nr_frags = f + 1; /* frags mapped + one for skb head */
1711 for (f = 0; f < nr_frags; f++) {
1712 struct wil_ctx *ctx;
1714 i = (swhead + f) % vring->size;
1715 ctx = &vring->ctx[i];
1716 _d = &vring->va[i].tx;
1718 _d->dma.status = TX_DMA_STATUS_DU;
1719 wil_txdesc_unmap(dev, d, ctx);
1721 memset(ctx, 0, sizeof(*ctx));
1727 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1728 struct sk_buff *skb)
1730 int vring_index = vring - wil->vring_tx;
1731 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1734 spin_lock(&txdata->lock);
1736 rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
1739 spin_unlock(&txdata->lock);
1744 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1746 struct wil6210_priv *wil = ndev_to_wil(ndev);
1747 struct ethhdr *eth = (void *)skb->data;
1748 bool bcast = is_multicast_ether_addr(eth->h_dest);
1749 struct vring *vring;
1750 static bool pr_once_fw;
1753 wil_dbg_txrx(wil, "%s()\n", __func__);
1754 if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
1756 wil_err(wil, "FW not ready\n");
1761 if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
1762 wil_err_ratelimited(wil, "FW not connected\n");
1765 if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
1766 wil_err(wil, "Xmit in monitor mode not supported\n");
1772 if (wil->wdev->iftype == NL80211_IFTYPE_STATION) {
1773 /* in STA mode (ESS), all to same VRING */
1774 vring = wil_find_tx_vring_sta(wil, skb);
1775 } else { /* direct communication, find matching VRING */
1776 vring = bcast ? wil_find_tx_bcast(wil, skb) :
1777 wil_find_tx_ucast(wil, skb);
1779 if (unlikely(!vring)) {
1780 wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
1783 /* set up vring entry */
1784 rc = wil_tx_vring(wil, vring, skb);
1786 /* do we still have enough room in the vring? */
1787 if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) {
1788 netif_tx_stop_all_queues(wil_to_ndev(wil));
1789 wil_dbg_txrx(wil, "netif_tx_stop : ring full\n");
1794 /* statistics will be updated on the tx_complete */
1795 dev_kfree_skb_any(skb);
1796 return NETDEV_TX_OK;
1798 return NETDEV_TX_BUSY;
1800 break; /* goto drop; */
1803 ndev->stats.tx_dropped++;
1804 dev_kfree_skb_any(skb);
1806 return NET_XMIT_DROP;
1809 static inline bool wil_need_txstat(struct sk_buff *skb)
1811 struct ethhdr *eth = (void *)skb->data;
1813 return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
1814 (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
1817 static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
1819 if (unlikely(wil_need_txstat(skb)))
1820 skb_complete_wifi_ack(skb, acked);
1822 acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
1826 * Clean up transmitted skb's from the Tx VRING
1828 * Return number of descriptors cleared
1830 * Safe to call from IRQ
1832 int wil_tx_complete(struct wil6210_priv *wil, int ringid)
1834 struct net_device *ndev = wil_to_ndev(wil);
1835 struct device *dev = wil_to_dev(wil);
1836 struct vring *vring = &wil->vring_tx[ringid];
1837 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
1839 int cid = wil->vring2cid_tid[ringid][0];
1840 struct wil_net_stats *stats = NULL;
1841 volatile struct vring_tx_desc *_d;
1842 int used_before_complete;
1845 if (unlikely(!vring->va)) {
1846 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
1850 if (unlikely(!txdata->enabled)) {
1851 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
1855 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
1857 used_before_complete = wil_vring_used_tx(vring);
1859 if (cid < WIL6210_MAX_CID)
1860 stats = &wil->sta[cid].stats;
1862 while (!wil_vring_is_empty(vring)) {
1864 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
1866 * For the fragmented skb, HW will set DU bit only for the
1867 * last fragment. look for it.
1868 * In TSO the first DU will include hdr desc
1870 int lf = (vring->swtail + ctx->nr_frags) % vring->size;
1871 /* TODO: check we are not past head */
1873 _d = &vring->va[lf].tx;
1874 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
1877 new_swtail = (lf + 1) % vring->size;
1878 while (vring->swtail != new_swtail) {
1879 struct vring_tx_desc dd, *d = ⅆ
1881 struct sk_buff *skb;
1883 ctx = &vring->ctx[vring->swtail];
1885 _d = &vring->va[vring->swtail].tx;
1889 dmalen = le16_to_cpu(d->dma.length);
1890 trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
1893 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
1894 ringid, vring->swtail, dmalen,
1895 d->dma.status, d->dma.error);
1896 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
1897 (const void *)d, sizeof(*d), false);
1899 wil_txdesc_unmap(dev, d, ctx);
1902 if (likely(d->dma.error == 0)) {
1903 ndev->stats.tx_packets++;
1904 ndev->stats.tx_bytes += skb->len;
1906 stats->tx_packets++;
1907 stats->tx_bytes += skb->len;
1910 ndev->stats.tx_errors++;
1914 wil_consume_skb(skb, d->dma.error == 0);
1916 memset(ctx, 0, sizeof(*ctx));
1917 /* There is no need to touch HW descriptor:
1918 * - ststus bit TX_DMA_STATUS_DU is set by design,
1919 * so hardware will not try to process this desc.,
1920 * - rest of descriptor will be initialized on Tx.
1922 vring->swtail = wil_vring_next_tail(vring);
1927 /* performance monitoring */
1928 used_new = wil_vring_used_tx(vring);
1929 if (wil_val_in_range(vring_idle_trsh,
1930 used_new, used_before_complete)) {
1931 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
1932 ringid, used_before_complete, used_new);
1933 txdata->last_idle = get_cycles();
1936 if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring)) {
1937 wil_dbg_txrx(wil, "netif_tx_wake : ring not full\n");
1938 netif_tx_wake_all_queues(wil_to_ndev(wil));