1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2018 Chelsio Communications, Inc.
5 * Written by: Atul Gupta (atul.gupta@chelsio.com)
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/workqueue.h>
11 #include <linux/skbuff.h>
12 #include <linux/timer.h>
13 #include <linux/notifier.h>
14 #include <linux/inetdevice.h>
16 #include <linux/tcp.h>
17 #include <linux/sched/signal.h>
18 #include <linux/kallsyms.h>
19 #include <linux/kprobes.h>
20 #include <linux/if_vlan.h>
21 #include <net/inet_common.h>
30 * State transitions and actions for close. Note that if we are in SYN_SENT
31 * we remain in that state as we cannot control a connection while it's in
32 * SYN_SENT; such connections are allowed to establish and are then aborted.
34 static unsigned char new_state[16] = {
35 /* current state: new state: action: */
36 /* (Invalid) */ TCP_CLOSE,
37 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
38 /* TCP_SYN_SENT */ TCP_SYN_SENT,
39 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
40 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
41 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
42 /* TCP_TIME_WAIT */ TCP_CLOSE,
43 /* TCP_CLOSE */ TCP_CLOSE,
44 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
45 /* TCP_LAST_ACK */ TCP_LAST_ACK,
46 /* TCP_LISTEN */ TCP_CLOSE,
47 /* TCP_CLOSING */ TCP_CLOSING,
50 static struct chtls_sock *chtls_sock_create(struct chtls_dev *cdev)
52 struct chtls_sock *csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
57 csk->txdata_skb_cache = alloc_skb(TXDATA_SKB_LEN, GFP_ATOMIC);
58 if (!csk->txdata_skb_cache) {
63 kref_init(&csk->kref);
65 skb_queue_head_init(&csk->txq);
66 csk->wr_skb_head = NULL;
67 csk->wr_skb_tail = NULL;
70 csk->tlshws.txkey = -1;
71 csk->tlshws.rxkey = -1;
72 csk->tlshws.mfs = TLS_MFS;
73 skb_queue_head_init(&csk->tlshws.sk_recv_queue);
77 static void chtls_sock_release(struct kref *ref)
79 struct chtls_sock *csk =
80 container_of(ref, struct chtls_sock, kref);
85 static struct net_device *chtls_ipv4_netdev(struct chtls_dev *cdev,
88 struct net_device *ndev = cdev->ports[0];
90 if (likely(!inet_sk(sk)->inet_rcv_saddr))
93 ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
97 if (is_vlan_dev(ndev))
98 return vlan_dev_real_dev(ndev);
102 static void assign_rxopt(struct sock *sk, unsigned int opt)
104 const struct chtls_dev *cdev;
105 struct chtls_sock *csk;
108 csk = rcu_dereference_sk_user_data(sk);
112 tp->tcp_header_len = sizeof(struct tcphdr);
113 tp->rx_opt.mss_clamp = cdev->mtus[TCPOPT_MSS_G(opt)] - 40;
114 tp->mss_cache = tp->rx_opt.mss_clamp;
115 tp->rx_opt.tstamp_ok = TCPOPT_TSTAMP_G(opt);
116 tp->rx_opt.snd_wscale = TCPOPT_SACK_G(opt);
117 tp->rx_opt.wscale_ok = TCPOPT_WSCALE_OK_G(opt);
118 SND_WSCALE(tp) = TCPOPT_SND_WSCALE_G(opt);
119 if (!tp->rx_opt.wscale_ok)
120 tp->rx_opt.rcv_wscale = 0;
121 if (tp->rx_opt.tstamp_ok) {
122 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
123 tp->rx_opt.mss_clamp -= TCPOLEN_TSTAMP_ALIGNED;
124 } else if (csk->opt2 & TSTAMPS_EN_F) {
125 csk->opt2 &= ~TSTAMPS_EN_F;
126 csk->mtu_idx = TCPOPT_MSS_G(opt);
130 static void chtls_purge_receive_queue(struct sock *sk)
134 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
135 skb_dst_set(skb, (void *)NULL);
140 static void chtls_purge_write_queue(struct sock *sk)
142 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
145 while ((skb = __skb_dequeue(&csk->txq))) {
146 sk->sk_wmem_queued -= skb->truesize;
151 static void chtls_purge_recv_queue(struct sock *sk)
153 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
154 struct chtls_hws *tlsk = &csk->tlshws;
157 while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
158 skb_dst_set(skb, NULL);
163 static void abort_arp_failure(void *handle, struct sk_buff *skb)
165 struct cpl_abort_req *req = cplhdr(skb);
166 struct chtls_dev *cdev;
168 cdev = (struct chtls_dev *)handle;
169 req->cmd = CPL_ABORT_NO_RST;
170 cxgb4_ofld_send(cdev->lldi->ports[0], skb);
173 static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
175 if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
177 refcount_add(2, &skb->users);
179 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
184 static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb)
186 struct cpl_abort_req *req;
187 struct chtls_sock *csk;
190 csk = rcu_dereference_sk_user_data(sk);
194 skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req));
196 req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req));
197 INIT_TP_WR_CPL(req, CPL_ABORT_REQ, csk->tid);
198 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
199 req->rsvd0 = htonl(tp->snd_nxt);
200 req->rsvd1 = !csk_flag_nochk(csk, CSK_TX_DATA_SENT);
202 t4_set_arp_err_handler(skb, csk->cdev, abort_arp_failure);
203 send_or_defer(sk, tp, skb, mode == CPL_ABORT_SEND_RST);
206 static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
208 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
210 if (unlikely(csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) ||
212 if (sk->sk_state == TCP_SYN_RECV)
213 csk_set_flag(csk, CSK_RST_ABORTED);
217 if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
218 struct tcp_sock *tp = tcp_sk(sk);
220 if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
221 WARN_ONCE(1, "send tx flowc error");
222 csk_set_flag(csk, CSK_TX_DATA_SENT);
225 csk_set_flag(csk, CSK_ABORT_RPL_PENDING);
226 chtls_purge_write_queue(sk);
228 csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
229 if (sk->sk_state != TCP_SYN_RECV)
230 chtls_send_abort(sk, mode, skb);
239 static void release_tcp_port(struct sock *sk)
241 if (inet_csk(sk)->icsk_bind_hash)
245 static void tcp_uncork(struct sock *sk)
247 struct tcp_sock *tp = tcp_sk(sk);
249 if (tp->nonagle & TCP_NAGLE_CORK) {
250 tp->nonagle &= ~TCP_NAGLE_CORK;
251 chtls_tcp_push(sk, 0);
255 static void chtls_close_conn(struct sock *sk)
257 struct cpl_close_con_req *req;
258 struct chtls_sock *csk;
263 len = roundup(sizeof(struct cpl_close_con_req), 16);
264 csk = rcu_dereference_sk_user_data(sk);
267 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
268 req = (struct cpl_close_con_req *)__skb_put(skb, len);
270 req->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) |
271 FW_WR_IMMDLEN_V(sizeof(*req) -
273 req->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)) |
274 FW_WR_FLOWID_V(tid));
276 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
279 skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
280 if (sk->sk_state != TCP_SYN_SENT)
281 chtls_push_frames(csk, 1);
285 * Perform a state transition during close and return the actions indicated
286 * for the transition. Do not make this function inline, the main reason
287 * it exists at all is to avoid multiple inlining of tcp_set_state.
289 static int make_close_transition(struct sock *sk)
291 int next = (int)new_state[sk->sk_state];
293 tcp_set_state(sk, next & TCP_STATE_MASK);
294 return next & TCP_ACTION_FIN;
297 void chtls_close(struct sock *sk, long timeout)
299 int data_lost, prev_state;
300 struct chtls_sock *csk;
302 csk = rcu_dereference_sk_user_data(sk);
305 sk->sk_shutdown |= SHUTDOWN_MASK;
307 data_lost = skb_queue_len(&sk->sk_receive_queue);
308 data_lost |= skb_queue_len(&csk->tlshws.sk_recv_queue);
309 chtls_purge_recv_queue(sk);
310 chtls_purge_receive_queue(sk);
312 if (sk->sk_state == TCP_CLOSE) {
314 } else if (data_lost || sk->sk_state == TCP_SYN_SENT) {
315 chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
316 release_tcp_port(sk);
318 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
319 sk->sk_prot->disconnect(sk, 0);
320 } else if (make_close_transition(sk)) {
321 chtls_close_conn(sk);
325 sk_stream_wait_close(sk, timeout);
328 prev_state = sk->sk_state;
337 if (prev_state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
340 if (sk->sk_state == TCP_FIN_WAIT2 && tcp_sk(sk)->linger2 < 0 &&
341 !csk_flag(sk, CSK_ABORT_SHUTDOWN)) {
344 skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
346 chtls_send_reset(sk, CPL_ABORT_SEND_RST, skb);
349 if (sk->sk_state == TCP_CLOSE)
350 inet_csk_destroy_sock(sk);
359 * Wait until a socket enters on of the given states.
361 static int wait_for_states(struct sock *sk, unsigned int states)
363 DECLARE_WAITQUEUE(wait, current);
364 struct socket_wq _sk_wq;
371 * We want this to work even when there's no associated struct socket.
372 * In that case we provide a temporary wait_queue_head_t.
375 init_waitqueue_head(&_sk_wq.wait);
376 _sk_wq.fasync_list = NULL;
377 init_rcu_head_on_stack(&_sk_wq.rcu);
378 RCU_INIT_POINTER(sk->sk_wq, &_sk_wq);
381 add_wait_queue(sk_sleep(sk), &wait);
382 while (!sk_in_state(sk, states)) {
383 if (!current_timeo) {
387 if (signal_pending(current)) {
388 err = sock_intr_errno(current_timeo);
391 set_current_state(TASK_UNINTERRUPTIBLE);
393 if (!sk_in_state(sk, states))
394 current_timeo = schedule_timeout(current_timeo);
395 __set_current_state(TASK_RUNNING);
398 remove_wait_queue(sk_sleep(sk), &wait);
400 if (rcu_dereference(sk->sk_wq) == &_sk_wq)
405 int chtls_disconnect(struct sock *sk, int flags)
411 chtls_purge_recv_queue(sk);
412 chtls_purge_receive_queue(sk);
413 chtls_purge_write_queue(sk);
415 if (sk->sk_state != TCP_CLOSE) {
416 sk->sk_err = ECONNRESET;
417 chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
418 err = wait_for_states(sk, TCPF_CLOSE);
422 chtls_purge_recv_queue(sk);
423 chtls_purge_receive_queue(sk);
424 tp->max_window = 0xFFFF << (tp->rx_opt.snd_wscale);
425 return tcp_disconnect(sk, flags);
428 #define SHUTDOWN_ELIGIBLE_STATE (TCPF_ESTABLISHED | \
429 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)
430 void chtls_shutdown(struct sock *sk, int how)
432 if ((how & SEND_SHUTDOWN) &&
433 sk_in_state(sk, SHUTDOWN_ELIGIBLE_STATE) &&
434 make_close_transition(sk))
435 chtls_close_conn(sk);
438 void chtls_destroy_sock(struct sock *sk)
440 struct chtls_sock *csk;
442 csk = rcu_dereference_sk_user_data(sk);
443 chtls_purge_recv_queue(sk);
444 csk->ulp_mode = ULP_MODE_NONE;
445 chtls_purge_write_queue(sk);
447 kref_put(&csk->kref, chtls_sock_release);
448 sk->sk_prot = &tcp_prot;
449 sk->sk_prot->destroy(sk);
452 static void reset_listen_child(struct sock *child)
454 struct chtls_sock *csk = rcu_dereference_sk_user_data(child);
457 skb = alloc_ctrl_skb(csk->txdata_skb_cache,
458 sizeof(struct cpl_abort_req));
460 chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
462 INC_ORPHAN_COUNT(child);
463 if (child->sk_state == TCP_CLOSE)
464 inet_csk_destroy_sock(child);
467 static void chtls_disconnect_acceptq(struct sock *listen_sk)
469 struct request_sock **pprev;
471 pprev = ACCEPT_QUEUE(listen_sk);
473 struct request_sock *req = *pprev;
475 if (req->rsk_ops == &chtls_rsk_ops) {
476 struct sock *child = req->sk;
478 *pprev = req->dl_next;
479 sk_acceptq_removed(listen_sk);
484 release_tcp_port(child);
485 reset_listen_child(child);
486 bh_unlock_sock(child);
490 pprev = &req->dl_next;
495 static int listen_hashfn(const struct sock *sk)
497 return ((unsigned long)sk >> 10) & (LISTEN_INFO_HASH_SIZE - 1);
500 static struct listen_info *listen_hash_add(struct chtls_dev *cdev,
504 struct listen_info *p = kmalloc(sizeof(*p), GFP_KERNEL);
507 int key = listen_hashfn(sk);
511 spin_lock(&cdev->listen_lock);
512 p->next = cdev->listen_hash_tab[key];
513 cdev->listen_hash_tab[key] = p;
514 spin_unlock(&cdev->listen_lock);
519 static int listen_hash_find(struct chtls_dev *cdev,
522 struct listen_info *p;
526 key = listen_hashfn(sk);
528 spin_lock(&cdev->listen_lock);
529 for (p = cdev->listen_hash_tab[key]; p; p = p->next)
534 spin_unlock(&cdev->listen_lock);
538 static int listen_hash_del(struct chtls_dev *cdev,
541 struct listen_info *p, **prev;
545 key = listen_hashfn(sk);
546 prev = &cdev->listen_hash_tab[key];
548 spin_lock(&cdev->listen_lock);
549 for (p = *prev; p; prev = &p->next, p = p->next)
556 spin_unlock(&cdev->listen_lock);
560 static void cleanup_syn_rcv_conn(struct sock *child, struct sock *parent)
562 struct request_sock *req;
563 struct chtls_sock *csk;
565 csk = rcu_dereference_sk_user_data(child);
566 req = csk->passive_reap_next;
568 reqsk_queue_removed(&inet_csk(parent)->icsk_accept_queue, req);
569 __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
570 chtls_reqsk_free(req);
571 csk->passive_reap_next = NULL;
574 static void chtls_reset_synq(struct listen_ctx *listen_ctx)
576 struct sock *listen_sk = listen_ctx->lsk;
578 while (!skb_queue_empty(&listen_ctx->synq)) {
579 struct chtls_sock *csk =
580 container_of((struct synq *)__skb_dequeue
581 (&listen_ctx->synq), struct chtls_sock, synq);
582 struct sock *child = csk->sk;
584 cleanup_syn_rcv_conn(child, listen_sk);
588 release_tcp_port(child);
589 reset_listen_child(child);
590 bh_unlock_sock(child);
596 int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
598 struct net_device *ndev;
599 struct listen_ctx *ctx;
600 struct adapter *adap;
601 struct port_info *pi;
605 if (sk->sk_family != PF_INET)
609 ndev = chtls_ipv4_netdev(cdev, sk);
614 pi = netdev_priv(ndev);
616 if (!(adap->flags & CXGB4_FULL_INIT_DONE))
619 if (listen_hash_find(cdev, sk) >= 0) /* already have it */
622 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
626 __module_get(THIS_MODULE);
629 ctx->state = T4_LISTEN_START_PENDING;
630 skb_queue_head_init(&ctx->synq);
632 stid = cxgb4_alloc_stid(cdev->tids, sk->sk_family, ctx);
637 if (!listen_hash_add(cdev, sk, stid))
640 ret = cxgb4_create_server(ndev, stid,
641 inet_sk(sk)->inet_rcv_saddr,
642 inet_sk(sk)->inet_sport, 0,
643 cdev->lldi->rxq_ids[0]);
645 ret = net_xmit_errno(ret);
650 listen_hash_del(cdev, sk);
652 cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
656 module_put(THIS_MODULE);
660 void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
662 struct listen_ctx *listen_ctx;
665 stid = listen_hash_del(cdev, sk);
669 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
670 chtls_reset_synq(listen_ctx);
672 cxgb4_remove_server(cdev->lldi->ports[0], stid,
673 cdev->lldi->rxq_ids[0], 0);
674 chtls_disconnect_acceptq(sk);
677 static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
679 struct cpl_pass_open_rpl *rpl = cplhdr(skb) + RSS_HDR;
680 unsigned int stid = GET_TID(rpl);
681 struct listen_ctx *listen_ctx;
683 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
685 return CPL_RET_BUF_DONE;
687 if (listen_ctx->state == T4_LISTEN_START_PENDING) {
688 listen_ctx->state = T4_LISTEN_STARTED;
689 return CPL_RET_BUF_DONE;
692 if (rpl->status != CPL_ERR_NONE) {
693 pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
695 return CPL_RET_BUF_DONE;
697 cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
698 sock_put(listen_ctx->lsk);
700 module_put(THIS_MODULE);
705 static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
707 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb) + RSS_HDR;
708 struct listen_ctx *listen_ctx;
713 data = lookup_stid(cdev->tids, stid);
714 listen_ctx = (struct listen_ctx *)data;
716 if (rpl->status != CPL_ERR_NONE) {
717 pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
719 return CPL_RET_BUF_DONE;
722 cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
723 sock_put(listen_ctx->lsk);
725 module_put(THIS_MODULE);
730 static void chtls_release_resources(struct sock *sk)
732 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
733 struct chtls_dev *cdev = csk->cdev;
734 unsigned int tid = csk->tid;
735 struct tid_info *tids;
741 kfree_skb(csk->txdata_skb_cache);
742 csk->txdata_skb_cache = NULL;
744 if (csk->l2t_entry) {
745 cxgb4_l2t_release(csk->l2t_entry);
746 csk->l2t_entry = NULL;
749 cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
753 static void chtls_conn_done(struct sock *sk)
755 if (sock_flag(sk, SOCK_DEAD))
756 chtls_purge_receive_queue(sk);
757 sk_wakeup_sleepers(sk, 0);
761 static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
764 * If the server is still open we clean up the child connection,
765 * otherwise the server already did the clean up as it was purging
766 * its SYN queue and the skb was just sitting in its backlog.
768 if (likely(parent->sk_state == TCP_LISTEN)) {
769 cleanup_syn_rcv_conn(child, parent);
770 /* Without the below call to sock_orphan,
771 * we leak the socket resource with syn_flood test
772 * as inet_csk_destroy_sock will not be called
773 * in tcp_done since SOCK_DEAD flag is not set.
774 * Kernel handles this differently where new socket is
775 * created only after 3 way handshake is done.
778 percpu_counter_inc((child)->sk_prot->orphan_count);
779 chtls_release_resources(child);
780 chtls_conn_done(child);
782 if (csk_flag(child, CSK_RST_ABORTED)) {
783 chtls_release_resources(child);
784 chtls_conn_done(child);
789 static void pass_open_abort(struct sock *child, struct sock *parent,
792 do_abort_syn_rcv(child, parent);
796 static void bl_pass_open_abort(struct sock *lsk, struct sk_buff *skb)
798 pass_open_abort(skb->sk, lsk, skb);
801 static void chtls_pass_open_arp_failure(struct sock *sk,
804 const struct request_sock *oreq;
805 struct chtls_sock *csk;
806 struct chtls_dev *cdev;
810 csk = rcu_dereference_sk_user_data(sk);
814 * If the connection is being aborted due to the parent listening
815 * socket going away there's nothing to do, the ABORT_REQ will close
818 if (csk_flag(sk, CSK_ABORT_RPL_PENDING)) {
823 oreq = csk->passive_reap_next;
824 data = lookup_stid(cdev->tids, oreq->ts_recent);
825 parent = ((struct listen_ctx *)data)->lsk;
827 bh_lock_sock(parent);
828 if (!sock_owned_by_user(parent)) {
829 pass_open_abort(sk, parent, skb);
831 BLOG_SKB_CB(skb)->backlog_rcv = bl_pass_open_abort;
832 __sk_add_backlog(parent, skb);
834 bh_unlock_sock(parent);
837 static void chtls_accept_rpl_arp_failure(void *handle,
840 struct sock *sk = (struct sock *)handle;
843 process_cpl_msg(chtls_pass_open_arp_failure, sk, skb);
847 static unsigned int chtls_select_mss(const struct chtls_sock *csk,
849 struct cpl_pass_accept_req *req)
851 struct chtls_dev *cdev;
852 struct dst_entry *dst;
853 unsigned int tcpoptsz;
854 unsigned int iphdrsz;
855 unsigned int mtu_idx;
860 mss = ntohs(req->tcpopt.mss);
862 dst = __sk_dst_get(sk);
867 iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
868 if (req->tcpopt.tstamp)
869 tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
871 tp->advmss = dst_metric_advmss(dst);
872 if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
873 tp->advmss = USER_MSS(tp);
874 if (tp->advmss > pmtu - iphdrsz)
875 tp->advmss = pmtu - iphdrsz;
876 if (mss && tp->advmss > mss)
879 tp->advmss = cxgb4_best_aligned_mtu(cdev->lldi->mtus,
881 tp->advmss - tcpoptsz,
883 tp->advmss -= iphdrsz;
885 inet_csk(sk)->icsk_pmtu_cookie = pmtu;
889 static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
893 if (space > MAX_RCV_WND)
895 if (win_clamp && win_clamp < space)
899 while (wscale < 14 && (65535 << wscale) < space)
905 static void chtls_pass_accept_rpl(struct sk_buff *skb,
906 struct cpl_pass_accept_req *req,
910 struct cpl_t5_pass_accept_rpl *rpl5;
911 struct cxgb4_lld_info *lldi;
912 const struct tcphdr *tcph;
913 const struct tcp_sock *tp;
914 struct chtls_sock *csk;
922 csk = sk->sk_user_data;
924 lldi = csk->cdev->lldi;
925 len = roundup(sizeof(*rpl5), 16);
927 rpl5 = __skb_put_zero(skb, len);
928 INIT_TP_WR(rpl5, tid);
930 OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
932 csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
934 opt0 = TCAM_BYPASS_F |
935 WND_SCALE_V(RCV_WSCALE(tp)) |
936 MSS_IDX_V(csk->mtu_idx) |
937 L2T_IDX_V(csk->l2t_entry->idx) |
938 NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
939 TX_CHAN_V(csk->tx_chan) |
940 SMAC_SEL_V(csk->smac_idx) |
941 DSCP_V(csk->tos >> 2) |
942 ULP_MODE_V(ULP_MODE_TLS) |
943 RCV_BUFSIZ_V(min(tp->rcv_wnd >> 10, RCV_BUFSIZ_M));
945 opt2 = RX_CHANNEL_V(0) |
946 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
948 if (!is_t5(lldi->adapter_type))
949 opt2 |= RX_FC_DISABLE_F;
950 if (req->tcpopt.tstamp)
951 opt2 |= TSTAMPS_EN_F;
952 if (req->tcpopt.sack)
954 hlen = ntohl(req->hdr_len);
956 tcph = (struct tcphdr *)((u8 *)(req + 1) +
957 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
958 if (tcph->ece && tcph->cwr)
959 opt2 |= CCTRL_ECN_V(1);
960 opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
962 opt2 |= T5_OPT_2_VALID_F;
963 rpl5->opt0 = cpu_to_be64(opt0);
964 rpl5->opt2 = cpu_to_be32(opt2);
965 rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
966 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
967 t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
968 cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
971 static void inet_inherit_port(struct inet_hashinfo *hash_info,
972 struct sock *lsk, struct sock *newsk)
975 __inet_inherit_port(lsk, newsk);
979 static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
985 BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
989 static void chtls_set_tcp_window(struct chtls_sock *csk)
991 struct net_device *ndev = csk->egress_dev;
992 struct port_info *pi = netdev_priv(ndev);
993 unsigned int linkspeed;
996 linkspeed = pi->link_cfg.speed;
997 scale = linkspeed / SPEED_10000;
998 #define CHTLS_10G_RCVWIN (256 * 1024)
999 csk->rcv_win = CHTLS_10G_RCVWIN;
1001 csk->rcv_win *= scale;
1002 #define CHTLS_10G_SNDWIN (256 * 1024)
1003 csk->snd_win = CHTLS_10G_SNDWIN;
1005 csk->snd_win *= scale;
1008 static struct sock *chtls_recv_sock(struct sock *lsk,
1009 struct request_sock *oreq,
1011 const struct cpl_pass_accept_req *req,
1012 struct chtls_dev *cdev)
1014 struct inet_sock *newinet;
1015 const struct iphdr *iph;
1016 struct tls_context *ctx;
1017 struct net_device *ndev;
1018 struct chtls_sock *csk;
1019 struct dst_entry *dst;
1020 struct neighbour *n;
1021 struct tcp_sock *tp;
1027 iph = (const struct iphdr *)network_hdr;
1028 newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
1032 dst = inet_csk_route_child_sock(lsk, newsk, oreq);
1036 n = dst_neigh_lookup(dst, &iph->saddr);
1043 port_id = cxgb4_port_idx(ndev);
1045 csk = chtls_sock_create(cdev);
1049 csk->l2t_entry = cxgb4_l2t_get(cdev->lldi->l2t, n, ndev, 0);
1050 if (!csk->l2t_entry)
1053 newsk->sk_user_data = csk;
1054 newsk->sk_backlog_rcv = chtls_backlog_rcv;
1057 newinet = inet_sk(newsk);
1059 newinet->inet_daddr = iph->saddr;
1060 newinet->inet_rcv_saddr = iph->daddr;
1061 newinet->inet_saddr = iph->daddr;
1063 oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1064 sk_setup_caps(newsk, dst);
1065 ctx = tls_get_ctx(lsk);
1066 newsk->sk_destruct = ctx->sk_destruct;
1068 csk->passive_reap_next = oreq;
1069 csk->tx_chan = cxgb4_port_chan(ndev);
1070 csk->port_id = port_id;
1071 csk->egress_dev = ndev;
1072 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1073 chtls_set_tcp_window(csk);
1074 tp->rcv_wnd = csk->rcv_win;
1075 csk->sndbuf = csk->snd_win;
1076 csk->ulp_mode = ULP_MODE_TLS;
1077 step = cdev->lldi->nrxq / cdev->lldi->nchan;
1078 csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
1079 rxq_idx = port_id * step;
1080 csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
1082 csk->sndbuf = newsk->sk_sndbuf;
1083 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
1084 RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
1086 ipv4.sysctl_tcp_window_scaling,
1089 inet_inherit_port(&tcp_hashinfo, lsk, newsk);
1090 csk_set_flag(csk, CSK_CONN_INLINE);
1091 bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
1095 chtls_sock_release(&csk->kref);
1099 inet_csk_prepare_forced_close(newsk);
1102 chtls_reqsk_free(oreq);
1107 * Populate a TID_RELEASE WR. The skb must be already propely sized.
1109 static void mk_tid_release(struct sk_buff *skb,
1110 unsigned int chan, unsigned int tid)
1112 struct cpl_tid_release *req;
1115 len = roundup(sizeof(struct cpl_tid_release), 16);
1116 req = (struct cpl_tid_release *)__skb_put(skb, len);
1117 memset(req, 0, len);
1118 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1119 INIT_TP_WR_CPL(req, CPL_TID_RELEASE, tid);
1122 static int chtls_get_module(struct sock *sk)
1124 struct inet_connection_sock *icsk = inet_csk(sk);
1126 if (!try_module_get(icsk->icsk_ulp_ops->owner))
1132 static void chtls_pass_accept_request(struct sock *sk,
1133 struct sk_buff *skb)
1135 struct cpl_t5_pass_accept_rpl *rpl;
1136 struct cpl_pass_accept_req *req;
1137 struct listen_ctx *listen_ctx;
1138 struct vlan_ethhdr *vlan_eh;
1139 struct request_sock *oreq;
1140 struct sk_buff *reply_skb;
1141 struct chtls_sock *csk;
1142 struct chtls_dev *cdev;
1143 struct tcphdr *tcph;
1152 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
1156 req = cplhdr(skb) + RSS_HDR;
1158 cdev = BLOG_SKB_CB(skb)->cdev;
1159 newsk = lookup_tid(cdev->tids, tid);
1160 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1162 pr_info("tid (%d) already in use\n", tid);
1166 len = roundup(sizeof(*rpl), 16);
1167 reply_skb = alloc_skb(len, GFP_ATOMIC);
1169 cxgb4_remove_tid(cdev->tids, 0, tid, sk->sk_family);
1174 if (sk->sk_state != TCP_LISTEN)
1177 if (inet_csk_reqsk_queue_is_full(sk))
1180 if (sk_acceptq_is_full(sk))
1183 oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
1187 oreq->rsk_rcv_wnd = 0;
1188 oreq->rsk_window_clamp = 0;
1189 oreq->cookie_ts = 0;
1191 oreq->ts_recent = 0;
1193 eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
1194 if (eth_hdr_len == ETH_HLEN) {
1195 eh = (struct ethhdr *)(req + 1);
1196 iph = (struct iphdr *)(eh + 1);
1197 network_hdr = (void *)(eh + 1);
1199 vlan_eh = (struct vlan_ethhdr *)(req + 1);
1200 iph = (struct iphdr *)(vlan_eh + 1);
1201 network_hdr = (void *)(vlan_eh + 1);
1203 if (iph->version != 0x4)
1206 tcph = (struct tcphdr *)(iph + 1);
1207 skb_set_network_header(skb, (void *)iph - (void *)req);
1209 tcp_rsk(oreq)->tfo_listener = false;
1210 tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
1211 chtls_set_req_port(oreq, tcph->source, tcph->dest);
1212 chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
1213 ip_dsfield = ipv4_get_dsfield(iph);
1214 if (req->tcpopt.wsf <= 14 &&
1215 sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
1216 inet_rsk(oreq)->wscale_ok = 1;
1217 inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
1219 inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
1220 th_ecn = tcph->ece && tcph->cwr;
1222 ect = !INET_ECN_is_not_ect(ip_dsfield);
1223 ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
1224 if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
1225 inet_rsk(oreq)->ecn_ok = 1;
1228 newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
1232 if (chtls_get_module(newsk))
1234 inet_csk_reqsk_queue_added(sk);
1235 reply_skb->sk = newsk;
1236 chtls_install_cpl_ops(newsk);
1237 cxgb4_insert_tid(cdev->tids, newsk, tid, newsk->sk_family);
1238 csk = rcu_dereference_sk_user_data(newsk);
1239 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
1240 csk->listen_ctx = listen_ctx;
1241 __skb_queue_tail(&listen_ctx->synq, (struct sk_buff *)&csk->synq);
1242 chtls_pass_accept_rpl(reply_skb, req, tid);
1247 chtls_reqsk_free(oreq);
1249 mk_tid_release(reply_skb, 0, tid);
1250 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1255 * Handle a CPL_PASS_ACCEPT_REQ message.
1257 static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
1259 struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR;
1260 struct listen_ctx *ctx;
1266 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1269 data = lookup_stid(cdev->tids, stid);
1273 ctx = (struct listen_ctx *)data;
1276 if (unlikely(tid >= cdev->tids->ntids)) {
1277 pr_info("passive open TID %u too large\n", tid);
1281 BLOG_SKB_CB(skb)->cdev = cdev;
1282 process_cpl_msg(chtls_pass_accept_request, lsk, skb);
1287 * Completes some final bits of initialization for just established connections
1288 * and changes their state to TCP_ESTABLISHED.
1290 * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1.
1292 static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
1294 struct tcp_sock *tp = tcp_sk(sk);
1296 tp->pushed_seq = snd_isn;
1297 tp->write_seq = snd_isn;
1298 tp->snd_nxt = snd_isn;
1299 tp->snd_una = snd_isn;
1300 inet_sk(sk)->inet_id = prandom_u32();
1301 assign_rxopt(sk, opt);
1303 if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
1304 tp->rcv_wup -= tp->rcv_wnd - (RCV_BUFSIZ_M << 10);
1307 tcp_set_state(sk, TCP_ESTABLISHED);
1310 static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb)
1312 struct sk_buff *abort_skb;
1314 abort_skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
1316 chtls_send_reset(sk, CPL_ABORT_SEND_RST, abort_skb);
1319 static struct sock *reap_list;
1320 static DEFINE_SPINLOCK(reap_list_lock);
1323 * Process the reap list.
1325 DECLARE_TASK_FUNC(process_reap_list, task_param)
1327 spin_lock_bh(&reap_list_lock);
1329 struct sock *sk = reap_list;
1330 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1332 reap_list = csk->passive_reap_next;
1333 csk->passive_reap_next = NULL;
1334 spin_unlock(&reap_list_lock);
1338 chtls_abort_conn(sk, NULL);
1340 if (sk->sk_state == TCP_CLOSE)
1341 inet_csk_destroy_sock(sk);
1344 spin_lock(&reap_list_lock);
1346 spin_unlock_bh(&reap_list_lock);
1349 static DECLARE_WORK(reap_task, process_reap_list);
1351 static void add_to_reap_list(struct sock *sk)
1353 struct chtls_sock *csk = sk->sk_user_data;
1357 release_tcp_port(sk); /* release the port immediately */
1359 spin_lock(&reap_list_lock);
1360 csk->passive_reap_next = reap_list;
1362 if (!csk->passive_reap_next)
1363 schedule_work(&reap_task);
1364 spin_unlock(&reap_list_lock);
1369 static void add_pass_open_to_parent(struct sock *child, struct sock *lsk,
1370 struct chtls_dev *cdev)
1372 struct request_sock *oreq;
1373 struct chtls_sock *csk;
1375 if (lsk->sk_state != TCP_LISTEN)
1378 csk = child->sk_user_data;
1379 oreq = csk->passive_reap_next;
1380 csk->passive_reap_next = NULL;
1382 reqsk_queue_removed(&inet_csk(lsk)->icsk_accept_queue, oreq);
1383 __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
1385 if (sk_acceptq_is_full(lsk)) {
1386 chtls_reqsk_free(oreq);
1387 add_to_reap_list(child);
1389 refcount_set(&oreq->rsk_refcnt, 1);
1390 inet_csk_reqsk_queue_add(lsk, oreq, child);
1391 lsk->sk_data_ready(lsk);
1395 static void bl_add_pass_open_to_parent(struct sock *lsk, struct sk_buff *skb)
1397 struct sock *child = skb->sk;
1400 add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->cdev);
1404 static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
1406 struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR;
1407 struct chtls_sock *csk;
1408 struct sock *lsk, *sk;
1411 hwtid = GET_TID(req);
1412 sk = lookup_tid(cdev->tids, hwtid);
1414 return (CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE);
1417 if (unlikely(sock_owned_by_user(sk))) {
1423 csk = sk->sk_user_data;
1424 csk->wr_max_credits = 64;
1425 csk->wr_credits = 64;
1426 csk->wr_unacked = 0;
1427 make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
1428 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1429 sk->sk_state_change(sk);
1430 if (unlikely(sk->sk_socket))
1431 sk_wake_async(sk, 0, POLL_OUT);
1433 data = lookup_stid(cdev->tids, stid);
1434 lsk = ((struct listen_ctx *)data)->lsk;
1437 if (unlikely(skb_queue_empty(&csk->listen_ctx->synq))) {
1438 /* removed from synq */
1439 bh_unlock_sock(lsk);
1444 if (likely(!sock_owned_by_user(lsk))) {
1446 add_pass_open_to_parent(sk, lsk, cdev);
1449 BLOG_SKB_CB(skb)->cdev = cdev;
1450 BLOG_SKB_CB(skb)->backlog_rcv =
1451 bl_add_pass_open_to_parent;
1452 __sk_add_backlog(lsk, skb);
1454 bh_unlock_sock(lsk);
1462 * Handle receipt of an urgent pointer.
1464 static void handle_urg_ptr(struct sock *sk, u32 urg_seq)
1466 struct tcp_sock *tp = tcp_sk(sk);
1469 if (tp->urg_data && !after(urg_seq, tp->urg_seq))
1470 return; /* duplicate pointer */
1473 if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
1474 !sock_flag(sk, SOCK_URGINLINE) &&
1475 tp->copied_seq != tp->rcv_nxt) {
1476 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1479 if (skb && tp->copied_seq - ULP_SKB_CB(skb)->seq >= skb->len)
1480 chtls_free_skb(sk, skb);
1483 tp->urg_data = TCP_URG_NOTYET;
1484 tp->urg_seq = urg_seq;
1487 static void check_sk_callbacks(struct chtls_sock *csk)
1489 struct sock *sk = csk->sk;
1491 if (unlikely(sk->sk_user_data &&
1492 !csk_flag_nochk(csk, CSK_CALLBACKS_CHKD)))
1493 csk_set_flag(csk, CSK_CALLBACKS_CHKD);
1497 * Handles Rx data that arrives in a state where the socket isn't accepting
1500 static void handle_excess_rx(struct sock *sk, struct sk_buff *skb)
1502 if (!csk_flag(sk, CSK_ABORT_SHUTDOWN))
1503 chtls_abort_conn(sk, skb);
1508 static void chtls_recv_data(struct sock *sk, struct sk_buff *skb)
1510 struct cpl_rx_data *hdr = cplhdr(skb) + RSS_HDR;
1511 struct chtls_sock *csk;
1512 struct tcp_sock *tp;
1514 csk = rcu_dereference_sk_user_data(sk);
1517 if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
1518 handle_excess_rx(sk, skb);
1522 ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
1523 ULP_SKB_CB(skb)->psh = hdr->psh;
1524 skb_ulp_mode(skb) = ULP_MODE_NONE;
1526 skb_reset_transport_header(skb);
1527 __skb_pull(skb, sizeof(*hdr) + RSS_HDR);
1529 __skb_trim(skb, ntohs(hdr->len));
1531 if (unlikely(hdr->urg))
1532 handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg));
1533 if (unlikely(tp->urg_data == TCP_URG_NOTYET &&
1534 tp->urg_seq - tp->rcv_nxt < skb->len))
1535 tp->urg_data = TCP_URG_VALID |
1536 skb->data[tp->urg_seq - tp->rcv_nxt];
1538 if (unlikely(hdr->dack_mode != csk->delack_mode)) {
1539 csk->delack_mode = hdr->dack_mode;
1540 csk->delack_seq = tp->rcv_nxt;
1543 tcp_hdr(skb)->fin = 0;
1544 tp->rcv_nxt += skb->len;
1546 __skb_queue_tail(&sk->sk_receive_queue, skb);
1548 if (!sock_flag(sk, SOCK_DEAD)) {
1549 check_sk_callbacks(csk);
1550 sk->sk_data_ready(sk);
1554 static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
1556 struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR;
1557 unsigned int hwtid = GET_TID(req);
1560 sk = lookup_tid(cdev->tids, hwtid);
1561 if (unlikely(!sk)) {
1562 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1565 skb_dst_set(skb, NULL);
1566 process_cpl_msg(chtls_recv_data, sk, skb);
1570 static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb)
1572 struct cpl_tls_data *hdr = cplhdr(skb);
1573 struct chtls_sock *csk;
1574 struct chtls_hws *tlsk;
1575 struct tcp_sock *tp;
1577 csk = rcu_dereference_sk_user_data(sk);
1578 tlsk = &csk->tlshws;
1581 if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
1582 handle_excess_rx(sk, skb);
1586 ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
1587 ULP_SKB_CB(skb)->flags = 0;
1588 skb_ulp_mode(skb) = ULP_MODE_TLS;
1590 skb_reset_transport_header(skb);
1591 __skb_pull(skb, sizeof(*hdr));
1594 CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd)));
1596 if (unlikely(tp->urg_data == TCP_URG_NOTYET && tp->urg_seq -
1597 tp->rcv_nxt < skb->len))
1598 tp->urg_data = TCP_URG_VALID |
1599 skb->data[tp->urg_seq - tp->rcv_nxt];
1601 tcp_hdr(skb)->fin = 0;
1602 tlsk->pldlen = CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd));
1603 __skb_queue_tail(&tlsk->sk_recv_queue, skb);
1606 static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
1608 struct cpl_tls_data *req = cplhdr(skb);
1609 unsigned int hwtid = GET_TID(req);
1612 sk = lookup_tid(cdev->tids, hwtid);
1613 if (unlikely(!sk)) {
1614 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1617 skb_dst_set(skb, NULL);
1618 process_cpl_msg(chtls_recv_pdu, sk, skb);
1622 static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen)
1624 struct tlsrx_cmp_hdr *tls_cmp_hdr = cplhdr(skb);
1626 skb->hdr_len = ntohs((__force __be16)tls_cmp_hdr->length);
1627 tls_cmp_hdr->length = ntohs((__force __be16)nlen);
1630 static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
1632 struct tlsrx_cmp_hdr *tls_hdr_pkt;
1633 struct cpl_rx_tls_cmp *cmp_cpl;
1634 struct sk_buff *skb_rec;
1635 struct chtls_sock *csk;
1636 struct chtls_hws *tlsk;
1637 struct tcp_sock *tp;
1639 cmp_cpl = cplhdr(skb);
1640 csk = rcu_dereference_sk_user_data(sk);
1641 tlsk = &csk->tlshws;
1644 ULP_SKB_CB(skb)->seq = ntohl(cmp_cpl->seq);
1645 ULP_SKB_CB(skb)->flags = 0;
1647 skb_reset_transport_header(skb);
1648 __skb_pull(skb, sizeof(*cmp_cpl));
1649 tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data;
1650 if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M)
1651 tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
1653 __skb_trim(skb, TLS_HEADER_LENGTH);
1656 CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length));
1658 ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR;
1659 skb_rec = __skb_dequeue(&tlsk->sk_recv_queue);
1661 __skb_queue_tail(&sk->sk_receive_queue, skb);
1663 chtls_set_hdrlen(skb, tlsk->pldlen);
1665 __skb_queue_tail(&sk->sk_receive_queue, skb);
1666 __skb_queue_tail(&sk->sk_receive_queue, skb_rec);
1669 if (!sock_flag(sk, SOCK_DEAD)) {
1670 check_sk_callbacks(csk);
1671 sk->sk_data_ready(sk);
1675 static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
1677 struct cpl_rx_tls_cmp *req = cplhdr(skb);
1678 unsigned int hwtid = GET_TID(req);
1681 sk = lookup_tid(cdev->tids, hwtid);
1682 if (unlikely(!sk)) {
1683 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1686 skb_dst_set(skb, NULL);
1687 process_cpl_msg(chtls_rx_hdr, sk, skb);
1692 static void chtls_timewait(struct sock *sk)
1694 struct tcp_sock *tp = tcp_sk(sk);
1697 tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
1699 tcp_time_wait(sk, TCP_TIME_WAIT, 0);
1702 static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
1704 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1706 sk->sk_shutdown |= RCV_SHUTDOWN;
1707 sock_set_flag(sk, SOCK_DONE);
1709 switch (sk->sk_state) {
1711 case TCP_ESTABLISHED:
1712 tcp_set_state(sk, TCP_CLOSE_WAIT);
1715 tcp_set_state(sk, TCP_CLOSING);
1718 chtls_release_resources(sk);
1719 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1720 chtls_conn_done(sk);
1725 pr_info("cpl_peer_close in bad state %d\n", sk->sk_state);
1728 if (!sock_flag(sk, SOCK_DEAD)) {
1729 sk->sk_state_change(sk);
1730 /* Do not send POLL_HUP for half duplex close. */
1732 if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1733 sk->sk_state == TCP_CLOSE)
1734 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
1736 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1740 static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
1742 struct cpl_close_con_rpl *rpl = cplhdr(skb) + RSS_HDR;
1743 struct chtls_sock *csk;
1744 struct tcp_sock *tp;
1746 csk = rcu_dereference_sk_user_data(sk);
1749 tp->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
1751 switch (sk->sk_state) {
1753 chtls_release_resources(sk);
1754 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1755 chtls_conn_done(sk);
1760 chtls_release_resources(sk);
1761 chtls_conn_done(sk);
1764 tcp_set_state(sk, TCP_FIN_WAIT2);
1765 sk->sk_shutdown |= SEND_SHUTDOWN;
1767 if (!sock_flag(sk, SOCK_DEAD))
1768 sk->sk_state_change(sk);
1769 else if (tcp_sk(sk)->linger2 < 0 &&
1770 !csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
1771 chtls_abort_conn(sk, skb);
1774 pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
1779 static struct sk_buff *get_cpl_skb(struct sk_buff *skb,
1780 size_t len, gfp_t gfp)
1782 if (likely(!skb_is_nonlinear(skb) && !skb_cloned(skb))) {
1783 WARN_ONCE(skb->len < len, "skb alloc error");
1784 __skb_trim(skb, len);
1787 skb = alloc_skb(len, gfp);
1789 __skb_put(skb, len);
1794 static void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid,
1797 struct cpl_abort_rpl *rpl = cplhdr(skb);
1799 INIT_TP_WR_CPL(rpl, CPL_ABORT_RPL, tid);
1803 static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
1805 struct cpl_abort_req_rss *req = cplhdr(skb);
1806 struct sk_buff *reply_skb;
1808 reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
1809 GFP_KERNEL | __GFP_NOFAIL);
1810 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
1811 set_abort_rpl_wr(reply_skb, GET_TID(req),
1812 (req->status & CPL_ABORT_NO_RST));
1813 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, req->status >> 1);
1814 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1818 static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
1819 struct chtls_dev *cdev, int status, int queue)
1821 struct cpl_abort_req_rss *req = cplhdr(skb);
1822 struct sk_buff *reply_skb;
1823 struct chtls_sock *csk;
1825 csk = rcu_dereference_sk_user_data(sk);
1827 reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
1831 req->status = (queue << 1);
1832 send_defer_abort_rpl(cdev, skb);
1836 set_abort_rpl_wr(reply_skb, GET_TID(req), status);
1839 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
1840 if (csk_conn_inline(csk)) {
1841 struct l2t_entry *e = csk->l2t_entry;
1843 if (e && sk->sk_state != TCP_SYN_RECV) {
1844 cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
1848 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1852 * Add an skb to the deferred skb queue for processing from process context.
1854 static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
1855 defer_handler_t handler)
1857 DEFERRED_SKB_CB(skb)->handler = handler;
1858 spin_lock_bh(&cdev->deferq.lock);
1859 __skb_queue_tail(&cdev->deferq, skb);
1860 if (skb_queue_len(&cdev->deferq) == 1)
1861 schedule_work(&cdev->deferq_task);
1862 spin_unlock_bh(&cdev->deferq.lock);
1865 static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
1866 struct chtls_dev *cdev,
1867 int status, int queue)
1869 struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
1870 struct sk_buff *reply_skb;
1871 struct chtls_sock *csk;
1874 csk = rcu_dereference_sk_user_data(sk);
1877 reply_skb = get_cpl_skb(skb, sizeof(struct cpl_abort_rpl), gfp_any());
1879 req->status = (queue << 1) | status;
1880 t4_defer_reply(skb, cdev, send_defer_abort_rpl);
1884 set_abort_rpl_wr(reply_skb, tid, status);
1885 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
1886 if (csk_conn_inline(csk)) {
1887 struct l2t_entry *e = csk->l2t_entry;
1889 if (e && sk->sk_state != TCP_SYN_RECV) {
1890 cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
1894 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1899 * This is run from a listener's backlog to abort a child connection in
1900 * SYN_RCV state (i.e., one on the listener's SYN queue).
1902 static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
1904 struct chtls_sock *csk;
1909 csk = rcu_dereference_sk_user_data(child);
1910 queue = csk->txq_idx;
1913 do_abort_syn_rcv(child, lsk);
1914 send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
1915 CPL_ABORT_NO_RST, queue);
1918 static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
1920 const struct request_sock *oreq;
1921 struct listen_ctx *listen_ctx;
1922 struct chtls_sock *csk;
1923 struct chtls_dev *cdev;
1927 csk = sk->sk_user_data;
1928 oreq = csk->passive_reap_next;
1934 ctx = lookup_stid(cdev->tids, oreq->ts_recent);
1938 listen_ctx = (struct listen_ctx *)ctx;
1939 psk = listen_ctx->lsk;
1942 if (!sock_owned_by_user(psk)) {
1943 int queue = csk->txq_idx;
1945 do_abort_syn_rcv(sk, psk);
1946 send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
1949 BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
1950 __sk_add_backlog(psk, skb);
1952 bh_unlock_sock(psk);
1956 static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
1958 const struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
1959 struct chtls_sock *csk = sk->sk_user_data;
1960 int rst_status = CPL_ABORT_NO_RST;
1961 int queue = csk->txq_idx;
1963 if (is_neg_adv(req->status)) {
1964 if (sk->sk_state == TCP_SYN_RECV)
1965 chtls_set_tcb_tflag(sk, 0, 0);
1971 csk_reset_flag(csk, CSK_ABORT_REQ_RCVD);
1973 if (!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) &&
1974 !csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
1975 struct tcp_sock *tp = tcp_sk(sk);
1977 if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
1978 WARN_ONCE(1, "send_tx_flowc error");
1979 csk_set_flag(csk, CSK_TX_DATA_SENT);
1982 csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
1984 if (!csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
1985 sk->sk_err = ETIMEDOUT;
1987 if (!sock_flag(sk, SOCK_DEAD))
1988 sk->sk_error_report(sk);
1990 if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
1993 chtls_release_resources(sk);
1994 chtls_conn_done(sk);
1997 chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue);
2000 static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
2002 struct cpl_abort_rpl_rss *rpl = cplhdr(skb) + RSS_HDR;
2003 struct chtls_sock *csk;
2004 struct chtls_dev *cdev;
2006 csk = rcu_dereference_sk_user_data(sk);
2009 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
2010 csk_reset_flag(csk, CSK_ABORT_RPL_PENDING);
2011 if (!csk_flag_nochk(csk, CSK_ABORT_REQ_RCVD)) {
2012 if (sk->sk_state == TCP_SYN_SENT) {
2013 cxgb4_remove_tid(cdev->tids,
2019 chtls_release_resources(sk);
2020 chtls_conn_done(sk);
2026 static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
2028 struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR;
2029 void (*fn)(struct sock *sk, struct sk_buff *skb);
2030 unsigned int hwtid = GET_TID(req);
2034 opcode = ((const struct rss_header *)cplhdr(skb))->opcode;
2036 sk = lookup_tid(cdev->tids, hwtid);
2041 case CPL_PEER_CLOSE:
2042 fn = chtls_peer_close;
2044 case CPL_CLOSE_CON_RPL:
2045 fn = chtls_close_con_rpl;
2047 case CPL_ABORT_REQ_RSS:
2048 fn = chtls_abort_req_rss;
2050 case CPL_ABORT_RPL_RSS:
2051 fn = chtls_abort_rpl_rss;
2057 process_cpl_msg(fn, sk, skb);
2065 static struct sk_buff *dequeue_wr(struct sock *sk)
2067 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
2068 struct sk_buff *skb = csk->wr_skb_head;
2071 /* Don't bother clearing the tail */
2072 csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
2073 WR_SKB_CB(skb)->next_wr = NULL;
2078 static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
2080 struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
2081 struct chtls_sock *csk = sk->sk_user_data;
2082 struct tcp_sock *tp = tcp_sk(sk);
2083 u32 credits = hdr->credits;
2086 snd_una = ntohl(hdr->snd_una);
2087 csk->wr_credits += credits;
2089 if (csk->wr_unacked > csk->wr_max_credits - csk->wr_credits)
2090 csk->wr_unacked = csk->wr_max_credits - csk->wr_credits;
2093 struct sk_buff *pskb = csk->wr_skb_head;
2096 if (unlikely(!pskb)) {
2097 if (csk->wr_nondata)
2098 csk->wr_nondata -= credits;
2101 csum = (__force u32)pskb->csum;
2102 if (unlikely(credits < csum)) {
2103 pskb->csum = (__force __wsum)(csum - credits);
2110 if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
2111 if (unlikely(before(snd_una, tp->snd_una))) {
2116 if (tp->snd_una != snd_una) {
2117 tp->snd_una = snd_una;
2118 tp->rcv_tstamp = tcp_time_stamp(tp);
2119 if (tp->snd_una == tp->snd_nxt &&
2120 !csk_flag_nochk(csk, CSK_TX_FAILOVER))
2121 csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
2125 if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_CH) {
2126 unsigned int fclen16 = roundup(failover_flowc_wr_len, 16);
2128 csk->wr_credits -= fclen16;
2129 csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
2130 csk_reset_flag(csk, CSK_TX_FAILOVER);
2132 if (skb_queue_len(&csk->txq) && chtls_push_frames(csk, 0))
2133 sk->sk_write_space(sk);
2138 static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
2140 struct cpl_fw4_ack *rpl = cplhdr(skb) + RSS_HDR;
2141 unsigned int hwtid = GET_TID(rpl);
2144 sk = lookup_tid(cdev->tids, hwtid);
2145 if (unlikely(!sk)) {
2146 pr_err("can't find conn. for hwtid %u.\n", hwtid);
2149 process_cpl_msg(chtls_rx_ack, sk, skb);
2154 chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
2155 [CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl,
2156 [CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
2157 [CPL_PASS_ACCEPT_REQ] = chtls_pass_accept_req,
2158 [CPL_PASS_ESTABLISH] = chtls_pass_establish,
2159 [CPL_RX_DATA] = chtls_rx_data,
2160 [CPL_TLS_DATA] = chtls_rx_pdu,
2161 [CPL_RX_TLS_CMP] = chtls_rx_cmp,
2162 [CPL_PEER_CLOSE] = chtls_conn_cpl,
2163 [CPL_CLOSE_CON_RPL] = chtls_conn_cpl,
2164 [CPL_ABORT_REQ_RSS] = chtls_conn_cpl,
2165 [CPL_ABORT_RPL_RSS] = chtls_conn_cpl,
2166 [CPL_FW4_ACK] = chtls_wr_ack,