2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Atul Gupta (atul.gupta@chelsio.com)
38 #define pr_fmt(fmt) "chcr:" fmt
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/crypto.h>
43 #include <linux/cryptohash.h>
44 #include <linux/skbuff.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/highmem.h>
47 #include <linux/if_vlan.h>
49 #include <linux/netdevice.h>
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/sha.h>
56 #include <crypto/authenc.h>
57 #include <crypto/internal/aead.h>
58 #include <crypto/null.h>
59 #include <crypto/internal/skcipher.h>
60 #include <crypto/aead.h>
61 #include <crypto/scatterwalk.h>
62 #include <crypto/internal/hash.h>
64 #include "chcr_core.h"
65 #include "chcr_algo.h"
66 #include "chcr_crypto.h"
69 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
72 #define MAX_IMM_TX_PKT_LEN 256
73 #define GCM_ESP_IV_SIZE 8
75 static int chcr_xfrm_add_state(struct xfrm_state *x);
76 static void chcr_xfrm_del_state(struct xfrm_state *x);
77 static void chcr_xfrm_free_state(struct xfrm_state *x);
78 static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
79 static void chcr_advance_esn_state(struct xfrm_state *x);
81 static const struct xfrmdev_ops chcr_xfrmdev_ops = {
82 .xdo_dev_state_add = chcr_xfrm_add_state,
83 .xdo_dev_state_delete = chcr_xfrm_del_state,
84 .xdo_dev_state_free = chcr_xfrm_free_state,
85 .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
86 .xdo_dev_state_advance_esn = chcr_advance_esn_state,
89 /* Add offload xfrms to Chelsio Interface */
90 void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
92 struct net_device *netdev = NULL;
95 for (i = 0; i < lld->nports; i++) {
96 netdev = lld->ports[i];
99 netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
100 netdev->hw_enc_features |= NETIF_F_HW_ESP;
101 netdev->features |= NETIF_F_HW_ESP;
103 netdev_change_features(netdev);
108 static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
109 struct ipsec_sa_entry *sa_entry)
112 int authsize = x->aead->alg_icv_len / 8;
114 sa_entry->authsize = authsize;
118 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
121 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
124 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
132 static inline int chcr_ipsec_setkey(struct xfrm_state *x,
133 struct ipsec_sa_entry *sa_entry)
135 struct crypto_cipher *cipher;
136 int keylen = (x->aead->alg_key_len + 7) / 8;
137 unsigned char *key = x->aead->alg_key;
138 int ck_size, key_ctx_size = 0;
139 unsigned char ghash_h[AEAD_H_SIZE];
143 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
144 memcpy(sa_entry->salt, key + keylen, 4);
147 if (keylen == AES_KEYSIZE_128) {
148 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
149 } else if (keylen == AES_KEYSIZE_192) {
150 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
151 } else if (keylen == AES_KEYSIZE_256) {
152 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
154 pr_err("GCM: Invalid key length %d\n", keylen);
159 memcpy(sa_entry->key, key, keylen);
160 sa_entry->enckey_len = keylen;
161 key_ctx_size = sizeof(struct _key_ctx) +
162 ((DIV_ROUND_UP(keylen, 16)) << 4) +
165 sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
166 CHCR_KEYCTX_MAC_KEY_SIZE_128,
170 /* Calculate the H = CIPH(K, 0 repeated 16 times).
171 * It will go in key context
173 cipher = crypto_alloc_cipher("aes-generic", 0, 0);
174 if (IS_ERR(cipher)) {
175 sa_entry->enckey_len = 0;
180 ret = crypto_cipher_setkey(cipher, key, keylen);
182 sa_entry->enckey_len = 0;
185 memset(ghash_h, 0, AEAD_H_SIZE);
186 crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
187 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
188 16), ghash_h, AEAD_H_SIZE);
189 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
192 crypto_free_cipher(cipher);
198 * chcr_xfrm_add_state
199 * returns 0 on success, negative error if failed to send message to FPGA
200 * positive error if FPGA returned a bad response
202 static int chcr_xfrm_add_state(struct xfrm_state *x)
204 struct ipsec_sa_entry *sa_entry;
207 if (x->props.aalgo != SADB_AALG_NONE) {
208 pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
211 if (x->props.calgo != SADB_X_CALG_NONE) {
212 pr_debug("CHCR: Cannot offload compressed xfrm states\n");
215 if (x->props.family != AF_INET &&
216 x->props.family != AF_INET6) {
217 pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
220 if (x->props.mode != XFRM_MODE_TRANSPORT &&
221 x->props.mode != XFRM_MODE_TUNNEL) {
222 pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
225 if (x->id.proto != IPPROTO_ESP) {
226 pr_debug("CHCR: Only ESP xfrm state offloaded\n");
230 pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
234 pr_debug("CHCR: Cannot offload xfrm states without aead\n");
237 if (x->aead->alg_icv_len != 128 &&
238 x->aead->alg_icv_len != 96) {
239 pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
242 if ((x->aead->alg_key_len != 128 + 32) &&
243 (x->aead->alg_key_len != 256 + 32)) {
244 pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
248 pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
252 pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
255 if (strcmp(x->geniv, "seqiv")) {
256 pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
260 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
266 sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
267 if (x->props.flags & XFRM_STATE_ESN)
269 chcr_ipsec_setkey(x, sa_entry);
270 x->xso.offload_handle = (unsigned long)sa_entry;
271 try_module_get(THIS_MODULE);
276 static void chcr_xfrm_del_state(struct xfrm_state *x)
279 if (!x->xso.offload_handle)
283 static void chcr_xfrm_free_state(struct xfrm_state *x)
285 struct ipsec_sa_entry *sa_entry;
287 if (!x->xso.offload_handle)
290 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
292 module_put(THIS_MODULE);
295 static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
297 if (x->props.family == AF_INET) {
298 /* Offload with IP options is not supported yet */
299 if (ip_hdr(skb)->ihl > 5)
302 /* Offload with IPv6 extension headers is not support yet */
303 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
309 static void chcr_advance_esn_state(struct xfrm_state *x)
312 if (!x->xso.offload_handle)
316 static inline int is_eth_imm(const struct sk_buff *skb,
317 struct ipsec_sa_entry *sa_entry)
319 unsigned int kctx_len;
322 kctx_len = sa_entry->kctx_len;
323 hdrlen = sizeof(struct fw_ulptx_wr) +
324 sizeof(struct chcr_ipsec_req) + kctx_len;
326 hdrlen += sizeof(struct cpl_tx_pkt);
328 hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
330 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
335 static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
336 struct ipsec_sa_entry *sa_entry)
338 unsigned int kctx_len;
343 kctx_len = sa_entry->kctx_len;
344 hdrlen = is_eth_imm(skb, sa_entry);
345 aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
349 /* If the skb is small enough, we can pump it out as a work request
350 * with only immediate data. In that case we just have to have the
351 * TX Packet header plus the skb data in the Work Request.
355 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
357 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
359 /* Otherwise, we're going to have to construct a Scatter gather list
360 * of the skb body and fragments. We also include the flits necessary
361 * for the TX Packet Work Request and CPL. We always have a firmware
362 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
363 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
364 * message or, if we're doing a Large Send Offload, an LSO CPL message
365 * with an embedded TX Packet Write CPL message.
367 flits += (sizeof(struct fw_ulptx_wr) +
368 sizeof(struct chcr_ipsec_req) +
370 sizeof(struct cpl_tx_pkt_core) +
371 aadivlen) / sizeof(__be64);
375 inline void *copy_esn_pktxt(struct sk_buff *skb,
376 struct net_device *dev,
378 struct ipsec_sa_entry *sa_entry)
380 struct chcr_ipsec_aadiv *aadiv;
381 struct ulptx_idata *sc_imm;
382 struct ip_esp_hdr *esphdr;
383 struct xfrm_offload *xo;
384 struct sge_eth_txq *q;
385 struct adapter *adap;
386 struct port_info *pi;
394 pi = netdev_priv(dev);
396 qidx = skb->queue_mapping;
397 q = &adap->sge.ethtxq[qidx + pi->first_qset];
399 /* end of queue, reset pos to start of queue */
400 eoq = (void *)q->q.stat - pos;
404 len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
406 aadiv = (struct chcr_ipsec_aadiv *)pos;
407 esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
408 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
409 xo = xfrm_offload(skb);
411 aadiv->spi = (esphdr->spi);
412 seqlo = htonl(esphdr->seq_no);
413 seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
414 memcpy(aadiv->seq_no, &seqno, 8);
415 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
416 memcpy(aadiv->iv, iv, 8);
419 sc_imm = (struct ulptx_idata *)(pos +
420 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
421 sizeof(__be64)) << 3));
422 sc_imm->cmd_more = FILL_CMD_MORE(!sa_entry->imm);
423 sc_imm->len = cpu_to_be32(sa_entry->imm);
429 inline void *copy_cpltx_pktxt(struct sk_buff *skb,
430 struct net_device *dev,
432 struct ipsec_sa_entry *sa_entry)
434 struct cpl_tx_pkt_core *cpl;
435 struct sge_eth_txq *q;
436 struct adapter *adap;
437 struct port_info *pi;
442 pi = netdev_priv(dev);
444 qidx = skb->queue_mapping;
445 q = &adap->sge.ethtxq[qidx + pi->first_qset];
447 left = (void *)q->q.stat - pos;
451 cpl = (struct cpl_tx_pkt_core *)pos;
453 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
454 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
455 TXPKT_PF_V(adap->pf);
456 if (skb_vlan_tag_present(skb)) {
458 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
461 cpl->ctrl0 = htonl(ctrl0);
462 cpl->pack = htons(0);
463 cpl->len = htons(skb->len);
464 cpl->ctrl1 = cpu_to_be64(cntrl);
466 pos += sizeof(struct cpl_tx_pkt_core);
467 /* Copy ESN info for HW */
469 pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
473 inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
474 struct net_device *dev,
476 struct ipsec_sa_entry *sa_entry)
478 struct _key_ctx *key_ctx;
479 int left, eoq, key_len;
480 struct sge_eth_txq *q;
481 struct adapter *adap;
482 struct port_info *pi;
485 pi = netdev_priv(dev);
487 qidx = skb->queue_mapping;
488 q = &adap->sge.ethtxq[qidx + pi->first_qset];
489 key_len = sa_entry->kctx_len;
491 /* end of queue, reset pos to start of queue */
492 eoq = (void *)q->q.stat - pos;
496 left = 64 * q->q.size;
499 /* Copy the Key context header */
500 key_ctx = (struct _key_ctx *)pos;
501 key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
502 memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
503 pos += sizeof(struct _key_ctx);
504 left -= sizeof(struct _key_ctx);
506 if (likely(key_len <= left)) {
507 memcpy(key_ctx->key, sa_entry->key, key_len);
510 memcpy(pos, sa_entry->key, left);
511 memcpy(q->q.desc, sa_entry->key + left,
513 pos = (u8 *)q->q.desc + (key_len - left);
515 /* Copy CPL TX PKT XT */
516 pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
521 inline void *chcr_crypto_wreq(struct sk_buff *skb,
522 struct net_device *dev,
525 struct ipsec_sa_entry *sa_entry)
527 struct port_info *pi = netdev_priv(dev);
528 struct adapter *adap = pi->adapter;
529 unsigned int ivsize = GCM_ESP_IV_SIZE;
530 struct chcr_ipsec_wr *wr;
540 int qidx = skb_get_queue_mapping(skb);
541 struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
542 unsigned int kctx_len = sa_entry->kctx_len;
543 int qid = q->q.cntxt_id;
545 atomic_inc(&adap->chcr_stats.ipsec_cnt);
547 flits = calc_tx_sec_flits(skb, sa_entry);
551 if (is_eth_imm(skb, sa_entry)) {
552 immdatalen = skb->len;
553 sa_entry->imm = immdatalen;
557 esnlen = sizeof(struct chcr_ipsec_aadiv);
560 wr = (struct chcr_ipsec_wr *)pos;
561 wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
562 wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
564 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
565 netif_tx_stop_queue(q->txq);
567 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
569 wr_mid |= FW_ULPTX_WR_DATA_F;
570 wr->wreq.flowid_len16 = htonl(wr_mid);
573 wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
574 wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
577 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
578 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
579 sizeof(wr->req.key_ctx) +
581 sizeof(struct cpl_tx_pkt_core) +
583 (esnlen ? 0 : immdatalen));
586 ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
587 (skb_transport_offset(skb) +
588 sizeof(struct ip_esp_hdr) + 1);
589 wr->req.sec_cpl.op_ivinsrtofst = htonl(
590 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
591 CPL_TX_SEC_PDU_CPLLEN_V(2) |
592 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
593 CPL_TX_SEC_PDU_IVINSRTOFST_V(
596 wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
597 aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
598 aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
599 (skb_transport_offset(skb) +
600 sizeof(struct ip_esp_hdr));
601 ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
603 ciphstart += sa_entry->esn ? esnlen : 0;
605 wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
610 wr->req.sec_cpl.cipherstop_lo_authinsert =
611 FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
614 wr->req.sec_cpl.seqno_numivs =
615 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
616 CHCR_SCMD_CIPHER_MODE_AES_GCM,
617 CHCR_SCMD_AUTH_MODE_GHASH,
620 wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
623 pos += sizeof(struct fw_ulptx_wr) +
624 sizeof(struct ulp_txpkt) +
625 sizeof(struct ulptx_idata) +
626 sizeof(struct cpl_tx_sec_pdu);
628 pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
634 * flits_to_desc - returns the num of Tx descriptors for the given flits
635 * @n: the number of flits
637 * Returns the number of Tx descriptors needed for the supplied number
640 static inline unsigned int flits_to_desc(unsigned int n)
642 WARN_ON(n > SGE_MAX_WR_LEN / 8);
643 return DIV_ROUND_UP(n, 8);
646 static inline unsigned int txq_avail(const struct sge_txq *q)
648 return q->size - 1 - q->in_use;
651 static void eth_txq_stop(struct sge_eth_txq *q)
653 netif_tx_stop_queue(q->txq);
657 static inline void txq_advance(struct sge_txq *q, unsigned int n)
661 if (q->pidx >= q->size)
666 * chcr_ipsec_xmit called from ULD Tx handler
668 int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
670 struct xfrm_state *x = xfrm_input_state(skb);
671 struct ipsec_sa_entry *sa_entry;
672 u64 *pos, *end, *before, *sgl;
673 int qidx, left, credits;
674 unsigned int flits = 0, ndesc;
675 struct adapter *adap;
676 struct sge_eth_txq *q;
677 struct port_info *pi;
678 dma_addr_t addr[MAX_SKB_FRAGS + 1];
680 bool immediate = false;
682 if (!x->xso.offload_handle)
683 return NETDEV_TX_BUSY;
685 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
687 sp = skb_sec_path(skb);
689 out_free: dev_kfree_skb_any(skb);
693 pi = netdev_priv(dev);
695 qidx = skb->queue_mapping;
696 q = &adap->sge.ethtxq[qidx + pi->first_qset];
698 cxgb4_reclaim_completed_tx(adap, &q->q, true);
700 flits = calc_tx_sec_flits(skb, sa_entry);
701 ndesc = flits_to_desc(flits);
702 credits = txq_avail(&q->q) - ndesc;
704 if (unlikely(credits < 0)) {
706 dev_err(adap->pdev_dev,
707 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
708 dev->name, qidx, credits, ndesc, txq_avail(&q->q),
710 return NETDEV_TX_BUSY;
713 if (is_eth_imm(skb, sa_entry))
717 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
722 pos = (u64 *)&q->q.desc[q->q.pidx];
724 end = (u64 *)pos + flits;
725 /* Setup IPSec CPL */
726 pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
728 if (before > (u64 *)pos) {
729 left = (u8 *)end - (u8 *)q->q.stat;
730 end = (void *)q->q.desc + left;
732 if (pos == (u64 *)q->q.stat) {
733 left = (u8 *)end - (u8 *)q->q.stat;
734 end = (void *)q->q.desc + left;
735 pos = (void *)q->q.desc;
740 cxgb4_inline_tx_skb(skb, &q->q, sgl);
741 dev_consume_skb_any(skb);
745 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
749 last_desc = q->q.pidx + ndesc - 1;
750 if (last_desc >= q->q.size)
751 last_desc -= q->q.size;
752 q->q.sdesc[last_desc].skb = skb;
753 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
755 txq_advance(&q->q, ndesc);
757 cxgb4_ring_tx_db(adap, &q->q, ndesc);