Commit | Line | Data |
---|---|---|
34aba2c4 RM |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (C) 2020 Chelsio Communications. All rights reserved. */ | |
3 | ||
a8c16e8e RM |
4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
5 | ||
6 | #include <linux/skbuff.h> | |
7 | #include <linux/module.h> | |
21f6f946 | 8 | #include <linux/highmem.h> |
a8c16e8e RM |
9 | #include <linux/ip.h> |
10 | #include <net/ipv6.h> | |
11 | #include <linux/netdevice.h> | |
34aba2c4 | 12 | #include "chcr_ktls.h" |
a8c16e8e RM |
13 | |
14 | static LIST_HEAD(uld_ctx_list); | |
15 | static DEFINE_MUTEX(dev_mutex); | |
34aba2c4 | 16 | |
687823d2 RM |
17 | /* chcr_get_nfrags_to_send: get the remaining nfrags after start offset |
18 | * @skb: skb | |
19 | * @start: start offset. | |
20 | * @len: how much data to send after @start | |
21 | */ | |
22 | static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len) | |
23 | { | |
24 | struct skb_shared_info *si = skb_shinfo(skb); | |
25 | u32 frag_size, skb_linear_data_len = skb_headlen(skb); | |
26 | u8 nfrags = 0, frag_idx = 0; | |
27 | skb_frag_t *frag; | |
28 | ||
29 | /* if its a linear skb then return 1 */ | |
30 | if (!skb_is_nonlinear(skb)) | |
31 | return 1; | |
32 | ||
33 | if (unlikely(start < skb_linear_data_len)) { | |
34 | frag_size = min(len, skb_linear_data_len - start); | |
35 | start = 0; | |
36 | } else { | |
37 | start -= skb_linear_data_len; | |
38 | ||
39 | frag = &si->frags[frag_idx]; | |
40 | frag_size = skb_frag_size(frag); | |
41 | while (start >= frag_size) { | |
42 | start -= frag_size; | |
43 | frag_idx++; | |
44 | frag = &si->frags[frag_idx]; | |
45 | frag_size = skb_frag_size(frag); | |
46 | } | |
47 | frag_size = min(len, skb_frag_size(frag) - start); | |
48 | } | |
49 | len -= frag_size; | |
50 | nfrags++; | |
51 | ||
52 | while (len) { | |
53 | frag_size = min(len, skb_frag_size(&si->frags[frag_idx])); | |
54 | len -= frag_size; | |
55 | nfrags++; | |
56 | frag_idx++; | |
57 | } | |
58 | return nfrags; | |
59 | } | |
60 | ||
8a30923e RM |
61 | static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info); |
62 | /* | |
63 | * chcr_ktls_save_keys: calculate and save crypto keys. | |
64 | * @tx_info - driver specific tls info. | |
65 | * @crypto_info - tls crypto information. | |
66 | * @direction - TX/RX direction. | |
67 | * return - SUCCESS/FAILURE. | |
68 | */ | |
69 | static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info, | |
70 | struct tls_crypto_info *crypto_info, | |
71 | enum tls_offload_ctx_dir direction) | |
72 | { | |
73 | int ck_size, key_ctx_size, mac_key_size, keylen, ghash_size, ret; | |
74 | unsigned char ghash_h[TLS_CIPHER_AES_GCM_256_TAG_SIZE]; | |
75 | struct tls12_crypto_info_aes_gcm_128 *info_128_gcm; | |
76 | struct ktls_key_ctx *kctx = &tx_info->key_ctx; | |
77 | struct crypto_cipher *cipher; | |
78 | unsigned char *key, *salt; | |
79 | ||
80 | switch (crypto_info->cipher_type) { | |
81 | case TLS_CIPHER_AES_GCM_128: | |
82 | info_128_gcm = | |
83 | (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; | |
84 | keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE; | |
85 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
86 | tx_info->salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE; | |
87 | mac_key_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; | |
88 | tx_info->iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; | |
89 | tx_info->iv = be64_to_cpu(*(__be64 *)info_128_gcm->iv); | |
90 | ||
91 | ghash_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; | |
92 | key = info_128_gcm->key; | |
93 | salt = info_128_gcm->salt; | |
94 | tx_info->record_no = *(u64 *)info_128_gcm->rec_seq; | |
95 | ||
5a4b9fe7 RM |
96 | /* The SCMD fields used when encrypting a full TLS |
97 | * record. Its a one time calculation till the | |
98 | * connection exists. | |
99 | */ | |
100 | tx_info->scmd0_seqno_numivs = | |
101 | SCMD_SEQ_NO_CTRL_V(CHCR_SCMD_SEQ_NO_CTRL_64BIT) | | |
102 | SCMD_CIPH_AUTH_SEQ_CTRL_F | | |
103 | SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_TLS) | | |
104 | SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_GCM) | | |
105 | SCMD_AUTH_MODE_V(CHCR_SCMD_AUTH_MODE_GHASH) | | |
106 | SCMD_IV_SIZE_V(TLS_CIPHER_AES_GCM_128_IV_SIZE >> 1) | | |
107 | SCMD_NUM_IVS_V(1); | |
108 | ||
109 | /* keys will be sent inline. */ | |
110 | tx_info->scmd0_ivgen_hdrlen = SCMD_KEY_CTX_INLINE_F; | |
111 | ||
dc05f3df RM |
112 | /* The SCMD fields used when encrypting a partial TLS |
113 | * record (no trailer and possibly a truncated payload). | |
114 | */ | |
115 | tx_info->scmd0_short_seqno_numivs = | |
116 | SCMD_CIPH_AUTH_SEQ_CTRL_F | | |
117 | SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) | | |
118 | SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_CTR) | | |
119 | SCMD_IV_SIZE_V(AES_BLOCK_LEN >> 1); | |
120 | ||
121 | tx_info->scmd0_short_ivgen_hdrlen = | |
122 | tx_info->scmd0_ivgen_hdrlen | SCMD_AADIVDROP_F; | |
123 | ||
8a30923e RM |
124 | break; |
125 | ||
126 | default: | |
127 | pr_err("GCM: cipher type 0x%x not supported\n", | |
128 | crypto_info->cipher_type); | |
129 | ret = -EINVAL; | |
130 | goto out; | |
131 | } | |
132 | ||
133 | key_ctx_size = CHCR_KTLS_KEY_CTX_LEN + | |
134 | roundup(keylen, 16) + ghash_size; | |
135 | /* Calculate the H = CIPH(K, 0 repeated 16 times). | |
136 | * It will go in key context | |
137 | */ | |
138 | cipher = crypto_alloc_cipher("aes", 0, 0); | |
139 | if (IS_ERR(cipher)) { | |
140 | ret = -ENOMEM; | |
141 | goto out; | |
142 | } | |
143 | ||
144 | ret = crypto_cipher_setkey(cipher, key, keylen); | |
145 | if (ret) | |
146 | goto out1; | |
147 | ||
148 | memset(ghash_h, 0, ghash_size); | |
149 | crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h); | |
150 | ||
151 | /* fill the Key context */ | |
152 | if (direction == TLS_OFFLOAD_CTX_DIR_TX) { | |
153 | kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size, | |
154 | mac_key_size, | |
155 | key_ctx_size >> 4); | |
156 | } else { | |
157 | ret = -EINVAL; | |
158 | goto out1; | |
159 | } | |
160 | ||
161 | memcpy(kctx->salt, salt, tx_info->salt_size); | |
162 | memcpy(kctx->key, key, keylen); | |
163 | memcpy(kctx->key + keylen, ghash_h, ghash_size); | |
164 | tx_info->key_ctx_len = key_ctx_size; | |
165 | ||
166 | out1: | |
167 | crypto_free_cipher(cipher); | |
168 | out: | |
169 | return ret; | |
170 | } | |
171 | ||
34aba2c4 RM |
172 | /* |
173 | * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection. | |
174 | * @sk - tcp socket. | |
175 | * @tx_info - driver specific tls info. | |
176 | * @atid - connection active tid. | |
177 | * return - send success/failure. | |
178 | */ | |
179 | static int chcr_ktls_act_open_req(struct sock *sk, | |
180 | struct chcr_ktls_info *tx_info, | |
181 | int atid) | |
182 | { | |
183 | struct inet_sock *inet = inet_sk(sk); | |
184 | struct cpl_t6_act_open_req *cpl6; | |
185 | struct cpl_act_open_req *cpl; | |
186 | struct sk_buff *skb; | |
187 | unsigned int len; | |
188 | int qid_atid; | |
189 | u64 options; | |
190 | ||
191 | len = sizeof(*cpl6); | |
192 | skb = alloc_skb(len, GFP_KERNEL); | |
193 | if (unlikely(!skb)) | |
194 | return -ENOMEM; | |
195 | /* mark it a control pkt */ | |
196 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id); | |
197 | ||
198 | cpl6 = __skb_put_zero(skb, len); | |
199 | cpl = (struct cpl_act_open_req *)cpl6; | |
200 | INIT_TP_WR(cpl6, 0); | |
201 | qid_atid = TID_QID_V(tx_info->rx_qid) | | |
202 | TID_TID_V(atid); | |
203 | OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid)); | |
204 | cpl->local_port = inet->inet_sport; | |
205 | cpl->peer_port = inet->inet_dport; | |
206 | cpl->local_ip = inet->inet_rcv_saddr; | |
207 | cpl->peer_ip = inet->inet_daddr; | |
208 | ||
209 | /* fill first 64 bit option field. */ | |
210 | options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F | | |
211 | SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan); | |
212 | cpl->opt0 = cpu_to_be64(options); | |
213 | ||
214 | /* next 64 bit option field. */ | |
215 | options = | |
216 | TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]); | |
217 | cpl->opt2 = htonl(options); | |
218 | ||
219 | return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te); | |
220 | } | |
221 | ||
76d7728d | 222 | #if IS_ENABLED(CONFIG_IPV6) |
62370a4f RM |
223 | /* |
224 | * chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection. | |
225 | * @sk - tcp socket. | |
226 | * @tx_info - driver specific tls info. | |
227 | * @atid - connection active tid. | |
228 | * return - send success/failure. | |
229 | */ | |
230 | static int chcr_ktls_act_open_req6(struct sock *sk, | |
231 | struct chcr_ktls_info *tx_info, | |
232 | int atid) | |
233 | { | |
234 | struct inet_sock *inet = inet_sk(sk); | |
235 | struct cpl_t6_act_open_req6 *cpl6; | |
236 | struct cpl_act_open_req6 *cpl; | |
237 | struct sk_buff *skb; | |
238 | unsigned int len; | |
239 | int qid_atid; | |
240 | u64 options; | |
241 | ||
242 | len = sizeof(*cpl6); | |
243 | skb = alloc_skb(len, GFP_KERNEL); | |
244 | if (unlikely(!skb)) | |
245 | return -ENOMEM; | |
246 | /* mark it a control pkt */ | |
247 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id); | |
248 | ||
249 | cpl6 = __skb_put_zero(skb, len); | |
250 | cpl = (struct cpl_act_open_req6 *)cpl6; | |
251 | INIT_TP_WR(cpl6, 0); | |
252 | qid_atid = TID_QID_V(tx_info->rx_qid) | TID_TID_V(atid); | |
253 | OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid)); | |
254 | cpl->local_port = inet->inet_sport; | |
255 | cpl->peer_port = inet->inet_dport; | |
256 | cpl->local_ip_hi = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[0]; | |
257 | cpl->local_ip_lo = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[8]; | |
258 | cpl->peer_ip_hi = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[0]; | |
259 | cpl->peer_ip_lo = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[8]; | |
260 | ||
261 | /* first 64 bit option field. */ | |
262 | options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F | | |
263 | SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan); | |
264 | cpl->opt0 = cpu_to_be64(options); | |
265 | /* next 64 bit option field. */ | |
266 | options = | |
267 | TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]); | |
268 | cpl->opt2 = htonl(options); | |
269 | ||
270 | return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te); | |
271 | } | |
76d7728d | 272 | #endif /* #if IS_ENABLED(CONFIG_IPV6) */ |
62370a4f | 273 | |
34aba2c4 RM |
274 | /* |
275 | * chcr_setup_connection: create a TCB entry so that TP will form tcp packets. | |
276 | * @sk - tcp socket. | |
277 | * @tx_info - driver specific tls info. | |
278 | * return: NET_TX_OK/NET_XMIT_DROP | |
279 | */ | |
280 | static int chcr_setup_connection(struct sock *sk, | |
281 | struct chcr_ktls_info *tx_info) | |
282 | { | |
283 | struct tid_info *t = &tx_info->adap->tids; | |
284 | int atid, ret = 0; | |
285 | ||
286 | atid = cxgb4_alloc_atid(t, tx_info); | |
287 | if (atid == -1) | |
288 | return -EINVAL; | |
289 | ||
290 | tx_info->atid = atid; | |
34aba2c4 | 291 | |
efca3878 | 292 | if (tx_info->ip_family == AF_INET) { |
34aba2c4 | 293 | ret = chcr_ktls_act_open_req(sk, tx_info, atid); |
76d7728d | 294 | #if IS_ENABLED(CONFIG_IPV6) |
34aba2c4 | 295 | } else { |
efca3878 RM |
296 | ret = cxgb4_clip_get(tx_info->netdev, (const u32 *) |
297 | &sk->sk_v6_rcv_saddr, | |
298 | 1); | |
299 | if (ret) | |
300 | return ret; | |
301 | ret = chcr_ktls_act_open_req6(sk, tx_info, atid); | |
76d7728d | 302 | #endif |
34aba2c4 RM |
303 | } |
304 | ||
305 | /* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret | |
306 | * success, if any other return type clear atid and return that failure. | |
307 | */ | |
308 | if (ret) { | |
efca3878 | 309 | if (ret == NET_XMIT_CN) { |
34aba2c4 | 310 | ret = 0; |
efca3878 RM |
311 | } else { |
312 | #if IS_ENABLED(CONFIG_IPV6) | |
313 | /* clear clip entry */ | |
314 | if (tx_info->ip_family == AF_INET6) | |
315 | cxgb4_clip_release(tx_info->netdev, | |
316 | (const u32 *) | |
317 | &sk->sk_v6_rcv_saddr, | |
318 | 1); | |
319 | #endif | |
34aba2c4 | 320 | cxgb4_free_atid(t, atid); |
efca3878 | 321 | } |
34aba2c4 RM |
322 | } |
323 | ||
324 | return ret; | |
325 | } | |
326 | ||
327 | /* | |
328 | * chcr_set_tcb_field: update tcb fields. | |
329 | * @tx_info - driver specific tls info. | |
330 | * @word - TCB word. | |
331 | * @mask - TCB word related mask. | |
332 | * @val - TCB word related value. | |
333 | * @no_reply - set 1 if not looking for TP response. | |
334 | */ | |
335 | static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word, | |
336 | u64 mask, u64 val, int no_reply) | |
337 | { | |
338 | struct cpl_set_tcb_field *req; | |
339 | struct sk_buff *skb; | |
340 | ||
341 | skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC); | |
342 | if (!skb) | |
343 | return -ENOMEM; | |
344 | ||
345 | req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req)); | |
346 | INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, tx_info->tid); | |
347 | req->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) | | |
348 | NO_REPLY_V(no_reply)); | |
349 | req->word_cookie = htons(TCB_WORD_V(word)); | |
350 | req->mask = cpu_to_be64(mask); | |
351 | req->val = cpu_to_be64(val); | |
352 | ||
353 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id); | |
354 | return cxgb4_ofld_send(tx_info->netdev, skb); | |
355 | } | |
356 | ||
357 | /* | |
358 | * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE | |
359 | * @tx_info - driver specific tls info. | |
360 | * return: NET_TX_OK/NET_XMIT_DROP. | |
361 | */ | |
362 | static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info) | |
363 | { | |
364 | return chcr_set_tcb_field(tx_info, TCB_T_STATE_W, | |
365 | TCB_T_STATE_V(TCB_T_STATE_M), | |
366 | CHCR_TCB_STATE_CLOSED, 1); | |
367 | } | |
368 | ||
369 | /* | |
370 | * chcr_ktls_dev_del: call back for tls_dev_del. | |
371 | * Remove the tid and l2t entry and close the connection. | |
372 | * it per connection basis. | |
373 | * @netdev - net device. | |
374 | * @tls_cts - tls context. | |
375 | * @direction - TX/RX crypto direction | |
376 | */ | |
a8c16e8e RM |
377 | static void chcr_ktls_dev_del(struct net_device *netdev, |
378 | struct tls_context *tls_ctx, | |
379 | enum tls_offload_ctx_dir direction) | |
34aba2c4 RM |
380 | { |
381 | struct chcr_ktls_ofld_ctx_tx *tx_ctx = | |
382 | chcr_get_ktls_tx_context(tls_ctx); | |
383 | struct chcr_ktls_info *tx_info = tx_ctx->chcr_info; | |
3427e13e | 384 | struct ch_ktls_port_stats_debug *port_stats; |
34aba2c4 RM |
385 | |
386 | if (!tx_info) | |
387 | return; | |
34aba2c4 | 388 | |
62370a4f | 389 | /* clear l2t entry */ |
34aba2c4 RM |
390 | if (tx_info->l2te) |
391 | cxgb4_l2t_release(tx_info->l2te); | |
392 | ||
76d7728d | 393 | #if IS_ENABLED(CONFIG_IPV6) |
62370a4f RM |
394 | /* clear clip entry */ |
395 | if (tx_info->ip_family == AF_INET6) | |
efca3878 RM |
396 | cxgb4_clip_release(netdev, (const u32 *) |
397 | &tx_info->sk->sk_v6_rcv_saddr, | |
62370a4f | 398 | 1); |
76d7728d | 399 | #endif |
62370a4f RM |
400 | |
401 | /* clear tid */ | |
34aba2c4 RM |
402 | if (tx_info->tid != -1) { |
403 | /* clear tcb state and then release tid */ | |
404 | chcr_ktls_mark_tcb_close(tx_info); | |
405 | cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, | |
406 | tx_info->tid, tx_info->ip_family); | |
407 | } | |
62370a4f | 408 | |
3427e13e RM |
409 | port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; |
410 | atomic64_inc(&port_stats->ktls_tx_connection_close); | |
34aba2c4 RM |
411 | kvfree(tx_info); |
412 | tx_ctx->chcr_info = NULL; | |
a3ac249a RM |
413 | /* release module refcount */ |
414 | module_put(THIS_MODULE); | |
34aba2c4 RM |
415 | } |
416 | ||
417 | /* | |
418 | * chcr_ktls_dev_add: call back for tls_dev_add. | |
419 | * Create a tcb entry for TP. Also add l2t entry for the connection. And | |
420 | * generate keys & save those keys locally. | |
421 | * @netdev - net device. | |
422 | * @tls_cts - tls context. | |
423 | * @direction - TX/RX crypto direction | |
424 | * return: SUCCESS/FAILURE. | |
425 | */ | |
a8c16e8e RM |
426 | static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, |
427 | enum tls_offload_ctx_dir direction, | |
428 | struct tls_crypto_info *crypto_info, | |
429 | u32 start_offload_tcp_sn) | |
34aba2c4 RM |
430 | { |
431 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
3427e13e | 432 | struct ch_ktls_port_stats_debug *port_stats; |
34aba2c4 RM |
433 | struct chcr_ktls_ofld_ctx_tx *tx_ctx; |
434 | struct chcr_ktls_info *tx_info; | |
435 | struct dst_entry *dst; | |
436 | struct adapter *adap; | |
437 | struct port_info *pi; | |
438 | struct neighbour *n; | |
439 | u8 daaddr[16]; | |
440 | int ret = -1; | |
441 | ||
442 | tx_ctx = chcr_get_ktls_tx_context(tls_ctx); | |
443 | ||
444 | pi = netdev_priv(netdev); | |
445 | adap = pi->adapter; | |
3427e13e RM |
446 | port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id]; |
447 | atomic64_inc(&port_stats->ktls_tx_connection_open); | |
efca3878 | 448 | |
34aba2c4 RM |
449 | if (direction == TLS_OFFLOAD_CTX_DIR_RX) { |
450 | pr_err("not expecting for RX direction\n"); | |
34aba2c4 RM |
451 | goto out; |
452 | } | |
efca3878 RM |
453 | |
454 | if (tx_ctx->chcr_info) | |
34aba2c4 | 455 | goto out; |
34aba2c4 RM |
456 | |
457 | tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL); | |
efca3878 | 458 | if (!tx_info) |
34aba2c4 | 459 | goto out; |
34aba2c4 RM |
460 | |
461 | tx_info->sk = sk; | |
efca3878 | 462 | spin_lock_init(&tx_info->lock); |
34aba2c4 RM |
463 | /* initialize tid and atid to -1, 0 is a also a valid id. */ |
464 | tx_info->tid = -1; | |
465 | tx_info->atid = -1; | |
466 | ||
467 | tx_info->adap = adap; | |
468 | tx_info->netdev = netdev; | |
5a4b9fe7 | 469 | tx_info->first_qset = pi->first_qset; |
34aba2c4 RM |
470 | tx_info->tx_chan = pi->tx_chan; |
471 | tx_info->smt_idx = pi->smt_idx; | |
472 | tx_info->port_id = pi->port_id; | |
efca3878 RM |
473 | tx_info->prev_ack = 0; |
474 | tx_info->prev_win = 0; | |
34aba2c4 RM |
475 | |
476 | tx_info->rx_qid = chcr_get_first_rx_qid(adap); | |
477 | if (unlikely(tx_info->rx_qid < 0)) | |
efca3878 | 478 | goto free_tx_info; |
34aba2c4 RM |
479 | |
480 | tx_info->prev_seq = start_offload_tcp_sn; | |
481 | tx_info->tcp_start_seq_number = start_offload_tcp_sn; | |
482 | ||
8a30923e RM |
483 | /* save crypto keys */ |
484 | ret = chcr_ktls_save_keys(tx_info, crypto_info, direction); | |
485 | if (ret < 0) | |
efca3878 | 486 | goto free_tx_info; |
8a30923e | 487 | |
34aba2c4 | 488 | /* get peer ip */ |
76d7728d | 489 | if (sk->sk_family == AF_INET) { |
34aba2c4 | 490 | memcpy(daaddr, &sk->sk_daddr, 4); |
efca3878 | 491 | tx_info->ip_family = AF_INET; |
76d7728d | 492 | #if IS_ENABLED(CONFIG_IPV6) |
34aba2c4 | 493 | } else { |
76d7728d | 494 | if (!sk->sk_ipv6only && |
efca3878 | 495 | ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { |
76d7728d | 496 | memcpy(daaddr, &sk->sk_daddr, 4); |
efca3878 RM |
497 | tx_info->ip_family = AF_INET; |
498 | } else { | |
76d7728d | 499 | memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16); |
efca3878 RM |
500 | tx_info->ip_family = AF_INET6; |
501 | } | |
76d7728d | 502 | #endif |
34aba2c4 RM |
503 | } |
504 | ||
505 | /* get the l2t index */ | |
506 | dst = sk_dst_get(sk); | |
507 | if (!dst) { | |
508 | pr_err("DST entry not found\n"); | |
efca3878 | 509 | goto free_tx_info; |
34aba2c4 RM |
510 | } |
511 | n = dst_neigh_lookup(dst, daaddr); | |
512 | if (!n || !n->dev) { | |
513 | pr_err("neighbour not found\n"); | |
514 | dst_release(dst); | |
efca3878 | 515 | goto free_tx_info; |
34aba2c4 RM |
516 | } |
517 | tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0); | |
518 | ||
519 | neigh_release(n); | |
520 | dst_release(dst); | |
521 | ||
522 | if (!tx_info->l2te) { | |
523 | pr_err("l2t entry not found\n"); | |
efca3878 | 524 | goto free_tx_info; |
34aba2c4 RM |
525 | } |
526 | ||
efca3878 RM |
527 | /* Driver shouldn't be removed until any single connection exists */ |
528 | if (!try_module_get(THIS_MODULE)) | |
529 | goto free_l2t; | |
34aba2c4 | 530 | |
efca3878 | 531 | init_completion(&tx_info->completion); |
34aba2c4 RM |
532 | /* create a filter and call cxgb4_l2t_send to send the packet out, which |
533 | * will take care of updating l2t entry in hw if not already done. | |
534 | */ | |
efca3878 | 535 | tx_info->open_state = CH_KTLS_OPEN_PENDING; |
34aba2c4 | 536 | |
efca3878 RM |
537 | if (chcr_setup_connection(sk, tx_info)) |
538 | goto put_module; | |
539 | ||
540 | /* Wait for reply */ | |
541 | wait_for_completion_timeout(&tx_info->completion, 30 * HZ); | |
542 | spin_lock_bh(&tx_info->lock); | |
543 | if (tx_info->open_state) { | |
544 | /* need to wait for hw response, can't free tx_info yet. */ | |
545 | if (tx_info->open_state == CH_KTLS_OPEN_PENDING) | |
546 | tx_info->pending_close = true; | |
547 | /* free the lock after the cleanup */ | |
548 | goto put_module; | |
549 | } | |
550 | spin_unlock_bh(&tx_info->lock); | |
551 | ||
552 | /* initialize tcb */ | |
553 | reinit_completion(&tx_info->completion); | |
554 | /* mark it pending for hw response */ | |
555 | tx_info->open_state = CH_KTLS_OPEN_PENDING; | |
556 | ||
557 | if (chcr_init_tcb_fields(tx_info)) | |
558 | goto free_tid; | |
559 | ||
560 | /* Wait for reply */ | |
561 | wait_for_completion_timeout(&tx_info->completion, 30 * HZ); | |
562 | spin_lock_bh(&tx_info->lock); | |
563 | if (tx_info->open_state) { | |
564 | /* need to wait for hw response, can't free tx_info yet. */ | |
565 | tx_info->pending_close = true; | |
566 | /* free the lock after cleanup */ | |
567 | goto free_tid; | |
a3ac249a | 568 | } |
efca3878 RM |
569 | spin_unlock_bh(&tx_info->lock); |
570 | ||
571 | if (!cxgb4_check_l2t_valid(tx_info->l2te)) | |
572 | goto free_tid; | |
573 | ||
3427e13e | 574 | atomic64_inc(&port_stats->ktls_tx_ctx); |
efca3878 | 575 | tx_ctx->chcr_info = tx_info; |
a3ac249a | 576 | |
34aba2c4 | 577 | return 0; |
efca3878 RM |
578 | |
579 | free_tid: | |
580 | chcr_ktls_mark_tcb_close(tx_info); | |
581 | #if IS_ENABLED(CONFIG_IPV6) | |
582 | /* clear clip entry */ | |
583 | if (tx_info->ip_family == AF_INET6) | |
584 | cxgb4_clip_release(netdev, (const u32 *) | |
585 | &sk->sk_v6_rcv_saddr, | |
586 | 1); | |
587 | #endif | |
588 | cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, | |
589 | tx_info->tid, tx_info->ip_family); | |
590 | ||
591 | put_module: | |
592 | /* release module refcount */ | |
593 | module_put(THIS_MODULE); | |
594 | free_l2t: | |
595 | cxgb4_l2t_release(tx_info->l2te); | |
596 | free_tx_info: | |
597 | if (tx_info->pending_close) | |
598 | spin_unlock_bh(&tx_info->lock); | |
599 | else | |
600 | kvfree(tx_info); | |
34aba2c4 | 601 | out: |
3427e13e | 602 | atomic64_inc(&port_stats->ktls_tx_connection_fail); |
efca3878 | 603 | return -1; |
34aba2c4 RM |
604 | } |
605 | ||
8a30923e RM |
606 | /* |
607 | * chcr_init_tcb_fields: Initialize tcb fields to handle TCP seq number | |
608 | * handling. | |
609 | * @tx_info - driver specific tls info. | |
610 | * return: NET_TX_OK/NET_XMIT_DROP | |
611 | */ | |
612 | static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info) | |
613 | { | |
614 | int ret = 0; | |
615 | ||
616 | /* set tcb in offload and bypass */ | |
617 | ret = | |
618 | chcr_set_tcb_field(tx_info, TCB_T_FLAGS_W, | |
619 | TCB_T_FLAGS_V(TF_CORE_BYPASS_F | TF_NON_OFFLOAD_F), | |
620 | TCB_T_FLAGS_V(TF_CORE_BYPASS_F), 1); | |
621 | if (ret) | |
622 | return ret; | |
623 | /* reset snd_una and snd_next fields in tcb */ | |
624 | ret = chcr_set_tcb_field(tx_info, TCB_SND_UNA_RAW_W, | |
625 | TCB_SND_NXT_RAW_V(TCB_SND_NXT_RAW_M) | | |
626 | TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M), | |
627 | 0, 1); | |
628 | if (ret) | |
629 | return ret; | |
630 | ||
631 | /* reset send max */ | |
632 | ret = chcr_set_tcb_field(tx_info, TCB_SND_MAX_RAW_W, | |
633 | TCB_SND_MAX_RAW_V(TCB_SND_MAX_RAW_M), | |
634 | 0, 1); | |
635 | if (ret) | |
636 | return ret; | |
637 | ||
638 | /* update l2t index and request for tp reply to confirm tcb is | |
639 | * initialised to handle tx traffic. | |
640 | */ | |
641 | ret = chcr_set_tcb_field(tx_info, TCB_L2T_IX_W, | |
642 | TCB_L2T_IX_V(TCB_L2T_IX_M), | |
643 | TCB_L2T_IX_V(tx_info->l2te->idx), 0); | |
644 | return ret; | |
645 | } | |
646 | ||
647 | /* | |
648 | * chcr_ktls_cpl_act_open_rpl: connection reply received from TP. | |
649 | */ | |
a8c16e8e RM |
650 | static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, |
651 | unsigned char *input) | |
8a30923e RM |
652 | { |
653 | const struct cpl_act_open_rpl *p = (void *)input; | |
654 | struct chcr_ktls_info *tx_info = NULL; | |
655 | unsigned int atid, tid, status; | |
656 | struct tid_info *t; | |
657 | ||
658 | tid = GET_TID(p); | |
659 | status = AOPEN_STATUS_G(ntohl(p->atid_status)); | |
660 | atid = TID_TID_G(AOPEN_ATID_G(ntohl(p->atid_status))); | |
661 | ||
662 | t = &adap->tids; | |
663 | tx_info = lookup_atid(t, atid); | |
664 | ||
665 | if (!tx_info || tx_info->atid != atid) { | |
efca3878 | 666 | pr_err("%s: incorrect tx_info or atid\n", __func__); |
8a30923e RM |
667 | return -1; |
668 | } | |
669 | ||
efca3878 RM |
670 | cxgb4_free_atid(t, atid); |
671 | tx_info->atid = -1; | |
672 | ||
673 | spin_lock(&tx_info->lock); | |
674 | /* HW response is very close, finish pending cleanup */ | |
675 | if (tx_info->pending_close) { | |
676 | spin_unlock(&tx_info->lock); | |
677 | if (!status) { | |
678 | /* it's a late success, tcb status is establised, | |
679 | * mark it close. | |
680 | */ | |
681 | chcr_ktls_mark_tcb_close(tx_info); | |
682 | cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, | |
683 | tid, tx_info->ip_family); | |
684 | } | |
685 | kvfree(tx_info); | |
686 | return 0; | |
687 | } | |
688 | ||
8a30923e RM |
689 | if (!status) { |
690 | tx_info->tid = tid; | |
691 | cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family); | |
efca3878 RM |
692 | tx_info->open_state = CH_KTLS_OPEN_SUCCESS; |
693 | } else { | |
694 | tx_info->open_state = CH_KTLS_OPEN_FAILURE; | |
8a30923e | 695 | } |
efca3878 RM |
696 | spin_unlock(&tx_info->lock); |
697 | ||
698 | complete(&tx_info->completion); | |
8a30923e RM |
699 | return 0; |
700 | } | |
701 | ||
702 | /* | |
703 | * chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP. | |
704 | */ | |
a8c16e8e | 705 | static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input) |
8a30923e RM |
706 | { |
707 | const struct cpl_set_tcb_rpl *p = (void *)input; | |
708 | struct chcr_ktls_info *tx_info = NULL; | |
709 | struct tid_info *t; | |
a1dd3875 | 710 | u32 tid; |
8a30923e RM |
711 | |
712 | tid = GET_TID(p); | |
8a30923e RM |
713 | |
714 | t = &adap->tids; | |
715 | tx_info = lookup_tid(t, tid); | |
efca3878 | 716 | |
8a30923e | 717 | if (!tx_info || tx_info->tid != tid) { |
efca3878 | 718 | pr_err("%s: incorrect tx_info or tid\n", __func__); |
8a30923e RM |
719 | return -1; |
720 | } | |
efca3878 RM |
721 | |
722 | spin_lock(&tx_info->lock); | |
723 | if (tx_info->pending_close) { | |
724 | spin_unlock(&tx_info->lock); | |
725 | kvfree(tx_info); | |
726 | return 0; | |
727 | } | |
728 | tx_info->open_state = false; | |
729 | spin_unlock(&tx_info->lock); | |
730 | ||
731 | complete(&tx_info->completion); | |
8a30923e RM |
732 | return 0; |
733 | } | |
5a4b9fe7 | 734 | |
071a43e6 AB |
735 | static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, |
736 | u32 tid, void *pos, u16 word, u64 mask, | |
5a4b9fe7 RM |
737 | u64 val, u32 reply) |
738 | { | |
739 | struct cpl_set_tcb_field_core *cpl; | |
740 | struct ulptx_idata *idata; | |
741 | struct ulp_txpkt *txpkt; | |
5a4b9fe7 | 742 | |
5a4b9fe7 RM |
743 | /* ULP_TXPKT */ |
744 | txpkt = pos; | |
745 | txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0)); | |
746 | txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16)); | |
747 | ||
748 | /* ULPTX_IDATA sub-command */ | |
749 | idata = (struct ulptx_idata *)(txpkt + 1); | |
750 | idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); | |
751 | idata->len = htonl(sizeof(*cpl)); | |
752 | pos = idata + 1; | |
753 | ||
754 | cpl = pos; | |
755 | /* CPL_SET_TCB_FIELD */ | |
756 | OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | |
757 | cpl->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) | | |
758 | NO_REPLY_V(!reply)); | |
759 | cpl->word_cookie = htons(TCB_WORD_V(word)); | |
760 | cpl->mask = cpu_to_be64(mask); | |
761 | cpl->val = cpu_to_be64(val); | |
762 | ||
763 | /* ULPTX_NOOP */ | |
764 | idata = (struct ulptx_idata *)(cpl + 1); | |
765 | idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP)); | |
766 | idata->len = htonl(0); | |
071a43e6 | 767 | pos = idata + 1; |
5a4b9fe7 | 768 | |
071a43e6 AB |
769 | return pos; |
770 | } | |
771 | ||
772 | ||
773 | /* | |
774 | * chcr_write_cpl_set_tcb_ulp: update tcb values. | |
775 | * TCB is responsible to create tcp headers, so all the related values | |
776 | * should be correctly updated. | |
777 | * @tx_info - driver specific tls info. | |
778 | * @q - tx queue on which packet is going out. | |
779 | * @tid - TCB identifier. | |
780 | * @pos - current index where should we start writing. | |
781 | * @word - TCB word. | |
782 | * @mask - TCB word related mask. | |
783 | * @val - TCB word related value. | |
784 | * @reply - set 1 if looking for TP response. | |
785 | * return - next position to write. | |
786 | */ | |
787 | static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, | |
788 | struct sge_eth_txq *q, u32 tid, | |
789 | void *pos, u16 word, u64 mask, | |
790 | u64 val, u32 reply) | |
791 | { | |
792 | int left = (void *)q->q.stat - pos; | |
793 | ||
794 | if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) { | |
795 | if (!left) { | |
5a4b9fe7 | 796 | pos = q->q.desc; |
071a43e6 AB |
797 | } else { |
798 | u8 buf[48] = {0}; | |
799 | ||
800 | __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, | |
801 | mask, val, reply); | |
802 | ||
803 | return chcr_copy_to_txd(buf, &q->q, pos, | |
804 | CHCR_SET_TCB_FIELD_LEN); | |
805 | } | |
5a4b9fe7 RM |
806 | } |
807 | ||
071a43e6 AB |
808 | pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, |
809 | mask, val, reply); | |
810 | ||
811 | /* check again if we are at the end of the queue */ | |
812 | if (left == CHCR_SET_TCB_FIELD_LEN) | |
813 | pos = q->q.desc; | |
814 | ||
5a4b9fe7 RM |
815 | return pos; |
816 | } | |
817 | ||
818 | /* | |
819 | * chcr_ktls_xmit_tcb_cpls: update tcb entry so that TP will create the header | |
820 | * with updated values like tcp seq, ack, window etc. | |
821 | * @tx_info - driver specific tls info. | |
822 | * @q - TX queue. | |
823 | * @tcp_seq | |
824 | * @tcp_ack | |
825 | * @tcp_win | |
826 | * return: NETDEV_TX_BUSY/NET_TX_OK. | |
827 | */ | |
828 | static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info, | |
829 | struct sge_eth_txq *q, u64 tcp_seq, | |
63ee4591 | 830 | u64 tcp_ack, u64 tcp_win, bool offset) |
5a4b9fe7 RM |
831 | { |
832 | bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0)); | |
3427e13e | 833 | struct ch_ktls_port_stats_debug *port_stats; |
5a4b9fe7 RM |
834 | u32 len, cpl = 0, ndesc, wr_len; |
835 | struct fw_ulptx_wr *wr; | |
836 | int credits; | |
837 | void *pos; | |
838 | ||
839 | wr_len = sizeof(*wr); | |
840 | /* there can be max 4 cpls, check if we have enough credits */ | |
841 | len = wr_len + 4 * roundup(CHCR_SET_TCB_FIELD_LEN, 16); | |
842 | ndesc = DIV_ROUND_UP(len, 64); | |
843 | ||
844 | credits = chcr_txq_avail(&q->q) - ndesc; | |
845 | if (unlikely(credits < 0)) { | |
846 | chcr_eth_txq_stop(q); | |
847 | return NETDEV_TX_BUSY; | |
848 | } | |
849 | ||
850 | pos = &q->q.desc[q->q.pidx]; | |
851 | /* make space for WR, we'll fill it later when we know all the cpls | |
852 | * being sent out and have complete length. | |
853 | */ | |
854 | wr = pos; | |
855 | pos += wr_len; | |
856 | /* update tx_max if its a re-transmit or the first wr */ | |
857 | if (first_wr || tcp_seq != tx_info->prev_seq) { | |
858 | pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos, | |
859 | TCB_TX_MAX_W, | |
860 | TCB_TX_MAX_V(TCB_TX_MAX_M), | |
861 | TCB_TX_MAX_V(tcp_seq), 0); | |
862 | cpl++; | |
863 | } | |
864 | /* reset snd una if it's a re-transmit pkt */ | |
63ee4591 | 865 | if (tcp_seq != tx_info->prev_seq || offset) { |
5a4b9fe7 | 866 | /* reset snd_una */ |
3427e13e RM |
867 | port_stats = |
868 | &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; | |
5a4b9fe7 RM |
869 | pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos, |
870 | TCB_SND_UNA_RAW_W, | |
871 | TCB_SND_UNA_RAW_V | |
872 | (TCB_SND_UNA_RAW_M), | |
873 | TCB_SND_UNA_RAW_V(0), 0); | |
63ee4591 RM |
874 | if (tcp_seq != tx_info->prev_seq) |
875 | atomic64_inc(&port_stats->ktls_tx_ooo); | |
5a4b9fe7 RM |
876 | cpl++; |
877 | } | |
878 | /* update ack */ | |
879 | if (first_wr || tx_info->prev_ack != tcp_ack) { | |
880 | pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos, | |
881 | TCB_RCV_NXT_W, | |
882 | TCB_RCV_NXT_V(TCB_RCV_NXT_M), | |
883 | TCB_RCV_NXT_V(tcp_ack), 0); | |
884 | tx_info->prev_ack = tcp_ack; | |
885 | cpl++; | |
886 | } | |
887 | /* update receive window */ | |
888 | if (first_wr || tx_info->prev_win != tcp_win) { | |
889 | pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos, | |
890 | TCB_RCV_WND_W, | |
891 | TCB_RCV_WND_V(TCB_RCV_WND_M), | |
892 | TCB_RCV_WND_V(tcp_win), 0); | |
893 | tx_info->prev_win = tcp_win; | |
894 | cpl++; | |
895 | } | |
896 | ||
897 | if (cpl) { | |
898 | /* get the actual length */ | |
899 | len = wr_len + cpl * roundup(CHCR_SET_TCB_FIELD_LEN, 16); | |
900 | /* ULPTX wr */ | |
901 | wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); | |
902 | wr->cookie = 0; | |
903 | /* fill len in wr field */ | |
904 | wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16))); | |
905 | ||
906 | ndesc = DIV_ROUND_UP(len, 64); | |
907 | chcr_txq_advance(&q->q, ndesc); | |
908 | cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc); | |
909 | } | |
910 | return 0; | |
911 | } | |
912 | ||
5a4b9fe7 RM |
913 | /* |
914 | * chcr_ktls_get_tx_flits | |
915 | * returns number of flits to be sent out, it includes key context length, WR | |
916 | * size and skb fragments. | |
917 | */ | |
918 | static unsigned int | |
687823d2 | 919 | chcr_ktls_get_tx_flits(u32 nr_frags, unsigned int key_ctx_len) |
5a4b9fe7 | 920 | { |
687823d2 | 921 | return chcr_sgl_len(nr_frags) + |
5a4b9fe7 RM |
922 | DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8); |
923 | } | |
924 | ||
429765a1 RM |
925 | /* |
926 | * chcr_ktls_check_tcp_options: To check if there is any TCP option availbale | |
927 | * other than timestamp. | |
928 | * @skb - skb contains partial record.. | |
929 | * return: 1 / 0 | |
930 | */ | |
931 | static int | |
932 | chcr_ktls_check_tcp_options(struct tcphdr *tcp) | |
933 | { | |
934 | int cnt, opt, optlen; | |
935 | u_char *cp; | |
936 | ||
937 | cp = (u_char *)(tcp + 1); | |
938 | cnt = (tcp->doff << 2) - sizeof(struct tcphdr); | |
939 | for (; cnt > 0; cnt -= optlen, cp += optlen) { | |
940 | opt = cp[0]; | |
941 | if (opt == TCPOPT_EOL) | |
942 | break; | |
943 | if (opt == TCPOPT_NOP) { | |
944 | optlen = 1; | |
945 | } else { | |
946 | if (cnt < 2) | |
947 | break; | |
948 | optlen = cp[1]; | |
949 | if (optlen < 2 || optlen > cnt) | |
950 | break; | |
951 | } | |
952 | switch (opt) { | |
953 | case TCPOPT_NOP: | |
954 | break; | |
955 | default: | |
956 | return 1; | |
957 | } | |
958 | } | |
959 | return 0; | |
960 | } | |
961 | ||
962 | /* | |
963 | * chcr_ktls_write_tcp_options : TP can't send out all the options, we need to | |
964 | * send out separately. | |
965 | * @tx_info - driver specific tls info. | |
966 | * @skb - skb contains partial record.. | |
967 | * @q - TX queue. | |
968 | * @tx_chan - channel number. | |
969 | * return: NETDEV_TX_OK/NETDEV_TX_BUSY. | |
970 | */ | |
971 | static int | |
972 | chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, | |
973 | struct sge_eth_txq *q, uint32_t tx_chan) | |
974 | { | |
975 | struct fw_eth_tx_pkt_wr *wr; | |
976 | struct cpl_tx_pkt_core *cpl; | |
977 | u32 ctrl, iplen, maclen; | |
76d7728d | 978 | #if IS_ENABLED(CONFIG_IPV6) |
429765a1 | 979 | struct ipv6hdr *ip6; |
76d7728d | 980 | #endif |
429765a1 RM |
981 | unsigned int ndesc; |
982 | struct tcphdr *tcp; | |
983 | int len16, pktlen; | |
984 | struct iphdr *ip; | |
985 | int credits; | |
986 | u8 buf[150]; | |
86716b51 | 987 | u64 cntrl1; |
429765a1 RM |
988 | void *pos; |
989 | ||
990 | iplen = skb_network_header_len(skb); | |
991 | maclen = skb_mac_header_len(skb); | |
992 | ||
993 | /* packet length = eth hdr len + ip hdr len + tcp hdr len | |
994 | * (including options). | |
995 | */ | |
b1b5cb18 | 996 | pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb); |
429765a1 RM |
997 | |
998 | ctrl = sizeof(*cpl) + pktlen; | |
999 | len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16); | |
1000 | /* check how many descriptors needed */ | |
1001 | ndesc = DIV_ROUND_UP(len16, 4); | |
1002 | ||
1003 | credits = chcr_txq_avail(&q->q) - ndesc; | |
1004 | if (unlikely(credits < 0)) { | |
1005 | chcr_eth_txq_stop(q); | |
1006 | return NETDEV_TX_BUSY; | |
1007 | } | |
1008 | ||
1009 | pos = &q->q.desc[q->q.pidx]; | |
1010 | wr = pos; | |
1011 | ||
1012 | /* Firmware work request header */ | |
1013 | wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | | |
1014 | FW_WR_IMMDLEN_V(ctrl)); | |
1015 | ||
1016 | wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16)); | |
1017 | wr->r3 = 0; | |
1018 | ||
1019 | cpl = (void *)(wr + 1); | |
1020 | ||
1021 | /* CPL header */ | |
1022 | cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) | TXPKT_INTF_V(tx_chan) | | |
1023 | TXPKT_PF_V(tx_info->adap->pf)); | |
1024 | cpl->pack = 0; | |
1025 | cpl->len = htons(pktlen); | |
429765a1 RM |
1026 | |
1027 | memcpy(buf, skb->data, pktlen); | |
1028 | if (tx_info->ip_family == AF_INET) { | |
1029 | /* we need to correct ip header len */ | |
1030 | ip = (struct iphdr *)(buf + maclen); | |
1031 | ip->tot_len = htons(pktlen - maclen); | |
86716b51 | 1032 | cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP); |
76d7728d | 1033 | #if IS_ENABLED(CONFIG_IPV6) |
429765a1 RM |
1034 | } else { |
1035 | ip6 = (struct ipv6hdr *)(buf + maclen); | |
e14394e6 | 1036 | ip6->payload_len = htons(pktlen - maclen - iplen); |
86716b51 | 1037 | cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6); |
76d7728d | 1038 | #endif |
429765a1 | 1039 | } |
86716b51 RM |
1040 | |
1041 | cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) | | |
1042 | TXPKT_IPHDR_LEN_V(iplen); | |
1043 | /* checksum offload */ | |
1044 | cpl->ctrl1 = cpu_to_be64(cntrl1); | |
1045 | ||
1046 | pos = cpl + 1; | |
1047 | ||
429765a1 RM |
1048 | /* now take care of the tcp header, if fin is not set then clear push |
1049 | * bit as well, and if fin is set, it will be sent at the last so we | |
1050 | * need to update the tcp sequence number as per the last packet. | |
1051 | */ | |
1052 | tcp = (struct tcphdr *)(buf + maclen + iplen); | |
1053 | ||
1054 | if (!tcp->fin) | |
1055 | tcp->psh = 0; | |
1056 | else | |
1057 | tcp->seq = htonl(tx_info->prev_seq); | |
1058 | ||
1059 | chcr_copy_to_txd(buf, &q->q, pos, pktlen); | |
1060 | ||
1061 | chcr_txq_advance(&q->q, ndesc); | |
1062 | cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc); | |
1063 | return 0; | |
1064 | } | |
1065 | ||
5a4b9fe7 RM |
1066 | /* |
1067 | * chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb | |
1068 | * received has partial end part of the record, send out the complete record, so | |
1069 | * that crypto block will be able to generate TAG/HASH. | |
1070 | * @skb - segment which has complete or partial end part. | |
1071 | * @tx_info - driver specific tls info. | |
1072 | * @q - TX queue. | |
1073 | * @tcp_seq | |
1074 | * @tcp_push - tcp push bit. | |
1075 | * @mss - segment size. | |
1076 | * return: NETDEV_TX_BUSY/NET_TX_OK. | |
1077 | */ | |
1078 | static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb, | |
1079 | struct chcr_ktls_info *tx_info, | |
1080 | struct sge_eth_txq *q, u32 tcp_seq, | |
687823d2 RM |
1081 | bool is_last_wr, u32 data_len, |
1082 | u32 skb_offset, u32 nfrags, | |
5a4b9fe7 RM |
1083 | bool tcp_push, u32 mss) |
1084 | { | |
1085 | u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start; | |
1086 | struct adapter *adap = tx_info->adap; | |
1087 | int credits, left, last_desc; | |
1088 | struct tx_sw_desc *sgl_sdesc; | |
1089 | struct cpl_tx_data *tx_data; | |
1090 | struct cpl_tx_sec_pdu *cpl; | |
1091 | struct ulptx_idata *idata; | |
1092 | struct ulp_txpkt *ulptx; | |
1093 | struct fw_ulptx_wr *wr; | |
1094 | void *pos; | |
1095 | u64 *end; | |
1096 | ||
1097 | /* get the number of flits required */ | |
687823d2 | 1098 | flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len); |
5a4b9fe7 RM |
1099 | /* number of descriptors */ |
1100 | ndesc = chcr_flits_to_desc(flits); | |
1101 | /* check if enough credits available */ | |
1102 | credits = chcr_txq_avail(&q->q) - ndesc; | |
1103 | if (unlikely(credits < 0)) { | |
1104 | chcr_eth_txq_stop(q); | |
1105 | return NETDEV_TX_BUSY; | |
1106 | } | |
1107 | ||
1108 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { | |
1109 | /* Credits are below the threshold vaues, stop the queue after | |
1110 | * injecting the Work Request for this packet. | |
1111 | */ | |
1112 | chcr_eth_txq_stop(q); | |
1113 | wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; | |
1114 | } | |
1115 | ||
1116 | last_desc = q->q.pidx + ndesc - 1; | |
1117 | if (last_desc >= q->q.size) | |
1118 | last_desc -= q->q.size; | |
1119 | sgl_sdesc = &q->q.sdesc[last_desc]; | |
1120 | ||
1121 | if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { | |
1122 | memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); | |
1123 | q->mapping_err++; | |
1124 | return NETDEV_TX_BUSY; | |
1125 | } | |
1126 | ||
687823d2 RM |
1127 | if (!is_last_wr) |
1128 | skb_get(skb); | |
1129 | ||
5a4b9fe7 RM |
1130 | pos = &q->q.desc[q->q.pidx]; |
1131 | end = (u64 *)pos + flits; | |
1132 | /* FW_ULPTX_WR */ | |
1133 | wr = pos; | |
1134 | /* WR will need len16 */ | |
1135 | len16 = DIV_ROUND_UP(flits, 2); | |
1136 | wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); | |
1137 | wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16)); | |
1138 | wr->cookie = 0; | |
1139 | pos += sizeof(*wr); | |
1140 | /* ULP_TXPKT */ | |
1141 | ulptx = pos; | |
1142 | ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | | |
1143 | ULP_TXPKT_CHANNELID_V(tx_info->port_id) | | |
1144 | ULP_TXPKT_FID_V(q->q.cntxt_id) | | |
1145 | ULP_TXPKT_RO_F); | |
1146 | ulptx->len = htonl(len16 - 1); | |
1147 | /* ULPTX_IDATA sub-command */ | |
1148 | idata = (struct ulptx_idata *)(ulptx + 1); | |
1149 | idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F); | |
1150 | /* idata length will include cpl_tx_sec_pdu + key context size + | |
1151 | * cpl_tx_data header. | |
1152 | */ | |
1153 | idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len + | |
1154 | sizeof(*tx_data)); | |
1155 | /* SEC CPL */ | |
1156 | cpl = (struct cpl_tx_sec_pdu *)(idata + 1); | |
1157 | cpl->op_ivinsrtofst = | |
1158 | htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | | |
1159 | CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) | | |
1160 | CPL_TX_SEC_PDU_PLACEHOLDER_V(1) | | |
1161 | CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1)); | |
687823d2 | 1162 | cpl->pldlen = htonl(data_len); |
5a4b9fe7 RM |
1163 | |
1164 | /* encryption should start after tls header size + iv size */ | |
1165 | cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1; | |
1166 | ||
1167 | cpl->aadstart_cipherstop_hi = | |
1168 | htonl(CPL_TX_SEC_PDU_AADSTART_V(1) | | |
1169 | CPL_TX_SEC_PDU_AADSTOP_V(TLS_HEADER_SIZE) | | |
1170 | CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start)); | |
1171 | ||
1172 | /* authentication will also start after tls header + iv size */ | |
1173 | cpl->cipherstop_lo_authinsert = | |
1174 | htonl(CPL_TX_SEC_PDU_AUTHSTART_V(cipher_start) | | |
1175 | CPL_TX_SEC_PDU_AUTHSTOP_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE) | | |
1176 | CPL_TX_SEC_PDU_AUTHINSERT_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE)); | |
1177 | ||
1178 | /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ | |
1179 | cpl->seqno_numivs = htonl(tx_info->scmd0_seqno_numivs); | |
1180 | cpl->ivgen_hdrlen = htonl(tx_info->scmd0_ivgen_hdrlen); | |
1181 | cpl->scmd1 = cpu_to_be64(tx_info->record_no); | |
1182 | ||
1183 | pos = cpl + 1; | |
1184 | /* check if space left to fill the keys */ | |
1185 | left = (void *)q->q.stat - pos; | |
1186 | if (!left) { | |
1187 | left = (void *)end - (void *)q->q.stat; | |
1188 | pos = q->q.desc; | |
1189 | end = pos + left; | |
1190 | } | |
1191 | ||
1192 | pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos, | |
1193 | tx_info->key_ctx_len); | |
1194 | left = (void *)q->q.stat - pos; | |
1195 | ||
1196 | if (!left) { | |
1197 | left = (void *)end - (void *)q->q.stat; | |
1198 | pos = q->q.desc; | |
1199 | end = pos + left; | |
1200 | } | |
1201 | /* CPL_TX_DATA */ | |
1202 | tx_data = (void *)pos; | |
1203 | OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid)); | |
687823d2 | 1204 | tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(data_len)); |
5a4b9fe7 RM |
1205 | |
1206 | tx_data->rsvd = htonl(tcp_seq); | |
1207 | ||
1208 | tx_data->flags = htonl(TX_BYPASS_F); | |
1209 | if (tcp_push) | |
1210 | tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F); | |
1211 | ||
1212 | /* check left again, it might go beyond queue limit */ | |
1213 | pos = tx_data + 1; | |
1214 | left = (void *)q->q.stat - pos; | |
1215 | ||
1216 | /* check the position again */ | |
1217 | if (!left) { | |
1218 | left = (void *)end - (void *)q->q.stat; | |
1219 | pos = q->q.desc; | |
1220 | end = pos + left; | |
1221 | } | |
1222 | ||
1223 | /* send the complete packet except the header */ | |
687823d2 RM |
1224 | cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr, |
1225 | skb_offset, data_len); | |
5a4b9fe7 RM |
1226 | sgl_sdesc->skb = skb; |
1227 | ||
1228 | chcr_txq_advance(&q->q, ndesc); | |
1229 | cxgb4_ring_tx_db(adap, &q->q, ndesc); | |
a8c16e8e | 1230 | atomic64_inc(&adap->ch_ktls_stats.ktls_tx_send_records); |
5a4b9fe7 RM |
1231 | |
1232 | return 0; | |
1233 | } | |
1234 | ||
dc05f3df RM |
1235 | /* |
1236 | * chcr_ktls_xmit_wr_short: This is to send out partial records. If its | |
1237 | * a middle part of a record, fetch the prior data to make it 16 byte aligned | |
1238 | * and then only send it out. | |
1239 | * | |
1240 | * @skb - skb contains partial record.. | |
1241 | * @tx_info - driver specific tls info. | |
1242 | * @q - TX queue. | |
1243 | * @tcp_seq | |
1244 | * @tcp_push - tcp push bit. | |
1245 | * @mss - segment size. | |
1246 | * @tls_rec_offset - offset from start of the tls record. | |
1247 | * @perior_data - data before the current segment, required to make this record | |
1248 | * 16 byte aligned. | |
1249 | * @prior_data_len - prior_data length (less than 16) | |
1250 | * return: NETDEV_TX_BUSY/NET_TX_OK. | |
1251 | */ | |
1252 | static int chcr_ktls_xmit_wr_short(struct sk_buff *skb, | |
1253 | struct chcr_ktls_info *tx_info, | |
1254 | struct sge_eth_txq *q, | |
1255 | u32 tcp_seq, bool tcp_push, u32 mss, | |
1256 | u32 tls_rec_offset, u8 *prior_data, | |
687823d2 RM |
1257 | u32 prior_data_len, u32 data_len, |
1258 | u32 skb_offset) | |
dc05f3df | 1259 | { |
687823d2 | 1260 | u32 len16, wr_mid = 0, cipher_start, nfrags; |
dc05f3df | 1261 | struct adapter *adap = tx_info->adap; |
dc05f3df RM |
1262 | unsigned int flits = 0, ndesc; |
1263 | int credits, left, last_desc; | |
1264 | struct tx_sw_desc *sgl_sdesc; | |
1265 | struct cpl_tx_data *tx_data; | |
1266 | struct cpl_tx_sec_pdu *cpl; | |
1267 | struct ulptx_idata *idata; | |
1268 | struct ulp_txpkt *ulptx; | |
1269 | struct fw_ulptx_wr *wr; | |
1270 | __be64 iv_record; | |
1271 | void *pos; | |
1272 | u64 *end; | |
1273 | ||
687823d2 | 1274 | nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len); |
dc05f3df RM |
1275 | /* get the number of flits required, it's a partial record so 2 flits |
1276 | * (AES_BLOCK_SIZE) will be added. | |
1277 | */ | |
687823d2 | 1278 | flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len) + 2; |
dc05f3df RM |
1279 | /* get the correct 8 byte IV of this record */ |
1280 | iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no); | |
1281 | /* If it's a middle record and not 16 byte aligned to run AES CTR, need | |
1282 | * to make it 16 byte aligned. So atleadt 2 extra flits of immediate | |
1283 | * data will be added. | |
1284 | */ | |
1285 | if (prior_data_len) | |
1286 | flits += 2; | |
1287 | /* number of descriptors */ | |
1288 | ndesc = chcr_flits_to_desc(flits); | |
1289 | /* check if enough credits available */ | |
1290 | credits = chcr_txq_avail(&q->q) - ndesc; | |
1291 | if (unlikely(credits < 0)) { | |
1292 | chcr_eth_txq_stop(q); | |
1293 | return NETDEV_TX_BUSY; | |
1294 | } | |
1295 | ||
1296 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { | |
1297 | chcr_eth_txq_stop(q); | |
1298 | wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; | |
1299 | } | |
1300 | ||
1301 | last_desc = q->q.pidx + ndesc - 1; | |
1302 | if (last_desc >= q->q.size) | |
1303 | last_desc -= q->q.size; | |
1304 | sgl_sdesc = &q->q.sdesc[last_desc]; | |
1305 | ||
1306 | if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { | |
1307 | memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); | |
1308 | q->mapping_err++; | |
1309 | return NETDEV_TX_BUSY; | |
1310 | } | |
1311 | ||
1312 | pos = &q->q.desc[q->q.pidx]; | |
1313 | end = (u64 *)pos + flits; | |
1314 | /* FW_ULPTX_WR */ | |
1315 | wr = pos; | |
1316 | /* WR will need len16 */ | |
1317 | len16 = DIV_ROUND_UP(flits, 2); | |
1318 | wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); | |
1319 | wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16)); | |
1320 | wr->cookie = 0; | |
1321 | pos += sizeof(*wr); | |
1322 | /* ULP_TXPKT */ | |
1323 | ulptx = pos; | |
1324 | ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | | |
1325 | ULP_TXPKT_CHANNELID_V(tx_info->port_id) | | |
1326 | ULP_TXPKT_FID_V(q->q.cntxt_id) | | |
1327 | ULP_TXPKT_RO_F); | |
1328 | ulptx->len = htonl(len16 - 1); | |
1329 | /* ULPTX_IDATA sub-command */ | |
1330 | idata = (struct ulptx_idata *)(ulptx + 1); | |
1331 | idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F); | |
1332 | /* idata length will include cpl_tx_sec_pdu + key context size + | |
1333 | * cpl_tx_data header. | |
1334 | */ | |
1335 | idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len + | |
1336 | sizeof(*tx_data) + AES_BLOCK_LEN + prior_data_len); | |
1337 | /* SEC CPL */ | |
1338 | cpl = (struct cpl_tx_sec_pdu *)(idata + 1); | |
1339 | /* cipher start will have tls header + iv size extra if its a header | |
1340 | * part of tls record. else only 16 byte IV will be added. | |
1341 | */ | |
1342 | cipher_start = | |
1343 | AES_BLOCK_LEN + 1 + | |
1344 | (!tls_rec_offset ? TLS_HEADER_SIZE + tx_info->iv_size : 0); | |
1345 | ||
1346 | cpl->op_ivinsrtofst = | |
1347 | htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | | |
1348 | CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) | | |
1349 | CPL_TX_SEC_PDU_IVINSRTOFST_V(1)); | |
687823d2 | 1350 | cpl->pldlen = htonl(data_len + AES_BLOCK_LEN + prior_data_len); |
dc05f3df RM |
1351 | cpl->aadstart_cipherstop_hi = |
1352 | htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start)); | |
1353 | cpl->cipherstop_lo_authinsert = 0; | |
1354 | /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ | |
1355 | cpl->seqno_numivs = htonl(tx_info->scmd0_short_seqno_numivs); | |
1356 | cpl->ivgen_hdrlen = htonl(tx_info->scmd0_short_ivgen_hdrlen); | |
1357 | cpl->scmd1 = 0; | |
1358 | ||
1359 | pos = cpl + 1; | |
1360 | /* check if space left to fill the keys */ | |
1361 | left = (void *)q->q.stat - pos; | |
1362 | if (!left) { | |
1363 | left = (void *)end - (void *)q->q.stat; | |
1364 | pos = q->q.desc; | |
1365 | end = pos + left; | |
1366 | } | |
1367 | ||
1368 | pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos, | |
1369 | tx_info->key_ctx_len); | |
1370 | left = (void *)q->q.stat - pos; | |
1371 | ||
1372 | if (!left) { | |
1373 | left = (void *)end - (void *)q->q.stat; | |
1374 | pos = q->q.desc; | |
1375 | end = pos + left; | |
1376 | } | |
1377 | /* CPL_TX_DATA */ | |
1378 | tx_data = (void *)pos; | |
1379 | OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid)); | |
1380 | tx_data->len = htonl(TX_DATA_MSS_V(mss) | | |
687823d2 | 1381 | TX_LENGTH_V(data_len + prior_data_len)); |
dc05f3df RM |
1382 | tx_data->rsvd = htonl(tcp_seq); |
1383 | tx_data->flags = htonl(TX_BYPASS_F); | |
1384 | if (tcp_push) | |
1385 | tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F); | |
1386 | ||
1387 | /* check left again, it might go beyond queue limit */ | |
1388 | pos = tx_data + 1; | |
1389 | left = (void *)q->q.stat - pos; | |
1390 | ||
1391 | /* check the position again */ | |
1392 | if (!left) { | |
1393 | left = (void *)end - (void *)q->q.stat; | |
1394 | pos = q->q.desc; | |
1395 | end = pos + left; | |
1396 | } | |
1397 | /* copy the 16 byte IV for AES-CTR, which includes 4 bytes of salt, 8 | |
1398 | * bytes of actual IV and 4 bytes of 16 byte-sequence. | |
1399 | */ | |
1400 | memcpy(pos, tx_info->key_ctx.salt, tx_info->salt_size); | |
1401 | memcpy(pos + tx_info->salt_size, &iv_record, tx_info->iv_size); | |
1402 | *(__be32 *)(pos + tx_info->salt_size + tx_info->iv_size) = | |
1403 | htonl(2 + (tls_rec_offset ? ((tls_rec_offset - | |
1404 | (TLS_HEADER_SIZE + tx_info->iv_size)) / AES_BLOCK_LEN) : 0)); | |
1405 | ||
1406 | pos += 16; | |
1407 | /* Prior_data_len will always be less than 16 bytes, fill the | |
1408 | * prio_data_len after AES_CTRL_BLOCK and clear the remaining length | |
1409 | * to 0. | |
1410 | */ | |
1411 | if (prior_data_len) | |
1412 | pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16); | |
1413 | /* send the complete packet except the header */ | |
687823d2 RM |
1414 | cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr, |
1415 | skb_offset, data_len); | |
dc05f3df RM |
1416 | sgl_sdesc->skb = skb; |
1417 | ||
1418 | chcr_txq_advance(&q->q, ndesc); | |
1419 | cxgb4_ring_tx_db(adap, &q->q, ndesc); | |
1420 | ||
1421 | return 0; | |
1422 | } | |
1423 | ||
1424 | /* | |
1425 | * chcr_ktls_tx_plaintxt: This handler will take care of the records which has | |
1426 | * only plain text (only tls header and iv) | |
1427 | * @tx_info - driver specific tls info. | |
1428 | * @skb - skb contains partial record.. | |
1429 | * @tcp_seq | |
1430 | * @mss - segment size. | |
1431 | * @tcp_push - tcp push bit. | |
1432 | * @q - TX queue. | |
1433 | * @port_id : port number | |
1434 | * @perior_data - data before the current segment, required to make this record | |
1435 | * 16 byte aligned. | |
1436 | * @prior_data_len - prior_data length (less than 16) | |
1437 | * return: NETDEV_TX_BUSY/NET_TX_OK. | |
1438 | */ | |
1439 | static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info, | |
1440 | struct sk_buff *skb, u32 tcp_seq, u32 mss, | |
1441 | bool tcp_push, struct sge_eth_txq *q, | |
1442 | u32 port_id, u8 *prior_data, | |
687823d2 | 1443 | u32 data_len, u32 skb_offset, |
dc05f3df RM |
1444 | u32 prior_data_len) |
1445 | { | |
1446 | int credits, left, len16, last_desc; | |
1447 | unsigned int flits = 0, ndesc; | |
1448 | struct tx_sw_desc *sgl_sdesc; | |
1449 | struct cpl_tx_data *tx_data; | |
1450 | struct ulptx_idata *idata; | |
1451 | struct ulp_txpkt *ulptx; | |
1452 | struct fw_ulptx_wr *wr; | |
687823d2 | 1453 | u32 wr_mid = 0, nfrags; |
dc05f3df RM |
1454 | void *pos; |
1455 | u64 *end; | |
1456 | ||
1457 | flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8); | |
687823d2 RM |
1458 | nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len); |
1459 | flits += chcr_sgl_len(nfrags); | |
dc05f3df RM |
1460 | if (prior_data_len) |
1461 | flits += 2; | |
687823d2 | 1462 | |
dc05f3df RM |
1463 | /* WR will need len16 */ |
1464 | len16 = DIV_ROUND_UP(flits, 2); | |
1465 | /* check how many descriptors needed */ | |
1466 | ndesc = DIV_ROUND_UP(flits, 8); | |
1467 | ||
1468 | credits = chcr_txq_avail(&q->q) - ndesc; | |
1469 | if (unlikely(credits < 0)) { | |
1470 | chcr_eth_txq_stop(q); | |
1471 | return NETDEV_TX_BUSY; | |
1472 | } | |
1473 | ||
1474 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { | |
1475 | chcr_eth_txq_stop(q); | |
1476 | wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; | |
1477 | } | |
1478 | ||
1479 | last_desc = q->q.pidx + ndesc - 1; | |
1480 | if (last_desc >= q->q.size) | |
1481 | last_desc -= q->q.size; | |
1482 | sgl_sdesc = &q->q.sdesc[last_desc]; | |
1483 | ||
1484 | if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb, | |
1485 | sgl_sdesc->addr) < 0)) { | |
1486 | memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); | |
1487 | q->mapping_err++; | |
1488 | return NETDEV_TX_BUSY; | |
1489 | } | |
1490 | ||
1491 | pos = &q->q.desc[q->q.pidx]; | |
1492 | end = (u64 *)pos + flits; | |
1493 | /* FW_ULPTX_WR */ | |
1494 | wr = pos; | |
1495 | wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); | |
1496 | wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16)); | |
1497 | wr->cookie = 0; | |
1498 | pos += sizeof(*wr); | |
1499 | /* ULP_TXPKT */ | |
1500 | ulptx = (struct ulp_txpkt *)(wr + 1); | |
1501 | ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | | |
1502 | ULP_TXPKT_DATAMODIFY_V(0) | | |
1503 | ULP_TXPKT_CHANNELID_V(tx_info->port_id) | | |
1504 | ULP_TXPKT_DEST_V(0) | | |
1505 | ULP_TXPKT_FID_V(q->q.cntxt_id) | ULP_TXPKT_RO_V(1)); | |
1506 | ulptx->len = htonl(len16 - 1); | |
1507 | /* ULPTX_IDATA sub-command */ | |
1508 | idata = (struct ulptx_idata *)(ulptx + 1); | |
1509 | idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F); | |
1510 | idata->len = htonl(sizeof(*tx_data) + prior_data_len); | |
1511 | /* CPL_TX_DATA */ | |
1512 | tx_data = (struct cpl_tx_data *)(idata + 1); | |
1513 | OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid)); | |
1514 | tx_data->len = htonl(TX_DATA_MSS_V(mss) | | |
687823d2 | 1515 | TX_LENGTH_V(data_len + prior_data_len)); |
dc05f3df RM |
1516 | /* set tcp seq number */ |
1517 | tx_data->rsvd = htonl(tcp_seq); | |
1518 | tx_data->flags = htonl(TX_BYPASS_F); | |
1519 | if (tcp_push) | |
1520 | tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F); | |
1521 | ||
1522 | pos = tx_data + 1; | |
1523 | /* apart from prior_data_len, we should set remaining part of 16 bytes | |
1524 | * to be zero. | |
1525 | */ | |
1526 | if (prior_data_len) | |
1527 | pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16); | |
1528 | ||
1529 | /* check left again, it might go beyond queue limit */ | |
1530 | left = (void *)q->q.stat - pos; | |
1531 | ||
1532 | /* check the position again */ | |
1533 | if (!left) { | |
1534 | left = (void *)end - (void *)q->q.stat; | |
1535 | pos = q->q.desc; | |
1536 | end = pos + left; | |
1537 | } | |
1538 | /* send the complete packet including the header */ | |
687823d2 RM |
1539 | cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr, |
1540 | skb_offset, data_len); | |
dc05f3df RM |
1541 | sgl_sdesc->skb = skb; |
1542 | ||
1543 | chcr_txq_advance(&q->q, ndesc); | |
1544 | cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc); | |
1545 | return 0; | |
1546 | } | |
1547 | ||
21f82acb RM |
1548 | static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info, |
1549 | struct sk_buff *skb, | |
1550 | struct sge_eth_txq *q) | |
1551 | { | |
1552 | u32 ctrl, iplen, maclen, wr_mid = 0, len16; | |
1553 | struct tx_sw_desc *sgl_sdesc; | |
1554 | struct fw_eth_tx_pkt_wr *wr; | |
1555 | struct cpl_tx_pkt_core *cpl; | |
1556 | unsigned int flits, ndesc; | |
1557 | int credits, last_desc; | |
1558 | u64 cntrl1, *end; | |
1559 | void *pos; | |
1560 | ||
1561 | ctrl = sizeof(*cpl); | |
1562 | flits = DIV_ROUND_UP(sizeof(*wr) + ctrl, 8); | |
1563 | ||
1564 | flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags + 1); | |
1565 | len16 = DIV_ROUND_UP(flits, 2); | |
1566 | /* check how many descriptors needed */ | |
1567 | ndesc = DIV_ROUND_UP(flits, 8); | |
1568 | ||
1569 | credits = chcr_txq_avail(&q->q) - ndesc; | |
1570 | if (unlikely(credits < 0)) { | |
1571 | chcr_eth_txq_stop(q); | |
1572 | return -ENOMEM; | |
1573 | } | |
1574 | ||
1575 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { | |
1576 | chcr_eth_txq_stop(q); | |
1577 | wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; | |
1578 | } | |
1579 | ||
1580 | last_desc = q->q.pidx + ndesc - 1; | |
1581 | if (last_desc >= q->q.size) | |
1582 | last_desc -= q->q.size; | |
1583 | sgl_sdesc = &q->q.sdesc[last_desc]; | |
1584 | ||
1585 | if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb, | |
1586 | sgl_sdesc->addr) < 0)) { | |
1587 | memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); | |
1588 | q->mapping_err++; | |
1589 | return -ENOMEM; | |
1590 | } | |
1591 | ||
1592 | iplen = skb_network_header_len(skb); | |
1593 | maclen = skb_mac_header_len(skb); | |
1594 | ||
1595 | pos = &q->q.desc[q->q.pidx]; | |
1596 | end = (u64 *)pos + flits; | |
1597 | wr = pos; | |
1598 | ||
1599 | /* Firmware work request header */ | |
1600 | wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | | |
1601 | FW_WR_IMMDLEN_V(ctrl)); | |
1602 | ||
1603 | wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16)); | |
1604 | wr->r3 = 0; | |
1605 | ||
1606 | cpl = (void *)(wr + 1); | |
1607 | ||
1608 | /* CPL header */ | |
1609 | cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) | | |
1610 | TXPKT_INTF_V(tx_info->tx_chan) | | |
1611 | TXPKT_PF_V(tx_info->adap->pf)); | |
1612 | cpl->pack = 0; | |
1613 | cntrl1 = TXPKT_CSUM_TYPE_V(tx_info->ip_family == AF_INET ? | |
1614 | TX_CSUM_TCPIP : TX_CSUM_TCPIP6); | |
1615 | cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) | | |
1616 | TXPKT_IPHDR_LEN_V(iplen); | |
1617 | /* checksum offload */ | |
1618 | cpl->ctrl1 = cpu_to_be64(cntrl1); | |
1619 | cpl->len = htons(skb->len); | |
1620 | ||
1621 | pos = cpl + 1; | |
1622 | ||
1623 | cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr); | |
1624 | sgl_sdesc->skb = skb; | |
1625 | chcr_txq_advance(&q->q, ndesc); | |
1626 | cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc); | |
1627 | return 0; | |
1628 | } | |
1629 | ||
429765a1 RM |
1630 | /* |
1631 | * chcr_ktls_copy_record_in_skb | |
1632 | * @nskb - new skb where the frags to be added. | |
687823d2 | 1633 | * @skb - old skb, to copy socket and destructor details. |
429765a1 RM |
1634 | * @record - specific record which has complete 16k record in frags. |
1635 | */ | |
1636 | static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb, | |
687823d2 | 1637 | struct sk_buff *skb, |
429765a1 RM |
1638 | struct tls_record_info *record) |
1639 | { | |
1640 | int i = 0; | |
1641 | ||
1642 | for (i = 0; i < record->num_frags; i++) { | |
1643 | skb_shinfo(nskb)->frags[i] = record->frags[i]; | |
1644 | /* increase the frag ref count */ | |
1645 | __skb_frag_ref(&skb_shinfo(nskb)->frags[i]); | |
1646 | } | |
1647 | ||
1648 | skb_shinfo(nskb)->nr_frags = record->num_frags; | |
1649 | nskb->data_len = record->len; | |
1650 | nskb->len += record->len; | |
1651 | nskb->truesize += record->len; | |
687823d2 RM |
1652 | nskb->sk = skb->sk; |
1653 | nskb->destructor = skb->destructor; | |
1654 | refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc); | |
429765a1 RM |
1655 | } |
1656 | ||
1657 | /* | |
1658 | * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid | |
1659 | * sending the same segment again. It will discard the segment which is before | |
1660 | * the current tx max. | |
1661 | * @tx_info - driver specific tls info. | |
1662 | * @q - TX queue. | |
1663 | * return: NET_TX_OK/NET_XMIT_DROP. | |
1664 | */ | |
1665 | static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info, | |
1666 | struct sge_eth_txq *q) | |
1667 | { | |
1668 | struct fw_ulptx_wr *wr; | |
1669 | unsigned int ndesc; | |
1670 | int credits; | |
1671 | void *pos; | |
1672 | u32 len; | |
1673 | ||
1674 | len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16); | |
1675 | ndesc = DIV_ROUND_UP(len, 64); | |
1676 | ||
1677 | credits = chcr_txq_avail(&q->q) - ndesc; | |
1678 | if (unlikely(credits < 0)) { | |
1679 | chcr_eth_txq_stop(q); | |
1680 | return NETDEV_TX_BUSY; | |
1681 | } | |
1682 | ||
1683 | pos = &q->q.desc[q->q.pidx]; | |
1684 | ||
1685 | wr = pos; | |
1686 | /* ULPTX wr */ | |
1687 | wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); | |
1688 | wr->cookie = 0; | |
1689 | /* fill len in wr field */ | |
1690 | wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16))); | |
1691 | ||
1692 | pos += sizeof(*wr); | |
1693 | ||
1694 | pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos, | |
1695 | TCB_SND_UNA_RAW_W, | |
1696 | TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M), | |
1697 | TCB_SND_UNA_RAW_V(0), 0); | |
1698 | ||
1699 | chcr_txq_advance(&q->q, ndesc); | |
1700 | cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc); | |
1701 | ||
1702 | return 0; | |
1703 | } | |
1704 | ||
5a4b9fe7 RM |
1705 | /* |
1706 | * chcr_end_part_handler: This handler will handle the record which | |
1707 | * is complete or if record's end part is received. T6 adapter has a issue that | |
1708 | * it can't send out TAG with partial record so if its an end part then we have | |
1709 | * to send TAG as well and for which we need to fetch the complete record and | |
1710 | * send it to crypto module. | |
1711 | * @tx_info - driver specific tls info. | |
1712 | * @skb - skb contains partial record. | |
1713 | * @record - complete record of 16K size. | |
1714 | * @tcp_seq | |
1715 | * @mss - segment size in which TP needs to chop a packet. | |
1716 | * @tcp_push_no_fin - tcp push if fin is not set. | |
1717 | * @q - TX queue. | |
1718 | * @tls_end_offset - offset from end of the record. | |
1719 | * @last wr : check if this is the last part of the skb going out. | |
1720 | * return: NETDEV_TX_OK/NETDEV_TX_BUSY. | |
1721 | */ | |
1722 | static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, | |
1723 | struct sk_buff *skb, | |
1724 | struct tls_record_info *record, | |
1725 | u32 tcp_seq, int mss, bool tcp_push_no_fin, | |
687823d2 | 1726 | struct sge_eth_txq *q, u32 skb_offset, |
5a4b9fe7 RM |
1727 | u32 tls_end_offset, bool last_wr) |
1728 | { | |
1729 | struct sk_buff *nskb = NULL; | |
1730 | /* check if it is a complete record */ | |
1731 | if (tls_end_offset == record->len) { | |
1732 | nskb = skb; | |
a8c16e8e | 1733 | atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts); |
5a4b9fe7 | 1734 | } else { |
687823d2 RM |
1735 | nskb = alloc_skb(0, GFP_ATOMIC); |
1736 | if (!nskb) { | |
1737 | dev_kfree_skb_any(skb); | |
429765a1 | 1738 | return NETDEV_TX_BUSY; |
687823d2 RM |
1739 | } |
1740 | ||
429765a1 | 1741 | /* copy complete record in skb */ |
687823d2 | 1742 | chcr_ktls_copy_record_in_skb(nskb, skb, record); |
429765a1 RM |
1743 | /* packet is being sent from the beginning, update the tcp_seq |
1744 | * accordingly. | |
1745 | */ | |
1746 | tcp_seq = tls_record_start_seq(record); | |
687823d2 RM |
1747 | /* reset skb offset */ |
1748 | skb_offset = 0; | |
1749 | ||
1750 | if (last_wr) | |
1751 | dev_kfree_skb_any(skb); | |
1752 | ||
1753 | last_wr = true; | |
1754 | ||
a8c16e8e | 1755 | atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts); |
5a4b9fe7 RM |
1756 | } |
1757 | ||
1758 | if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq, | |
687823d2 RM |
1759 | last_wr, record->len, skb_offset, |
1760 | record->num_frags, | |
5a4b9fe7 RM |
1761 | (last_wr && tcp_push_no_fin), |
1762 | mss)) { | |
1763 | goto out; | |
1764 | } | |
63ee4591 | 1765 | tx_info->prev_seq = record->end_seq; |
5a4b9fe7 RM |
1766 | return 0; |
1767 | out: | |
429765a1 | 1768 | dev_kfree_skb_any(nskb); |
5a4b9fe7 RM |
1769 | return NETDEV_TX_BUSY; |
1770 | } | |
1771 | ||
dc05f3df RM |
1772 | /* |
1773 | * chcr_short_record_handler: This handler will take care of the records which | |
1774 | * doesn't have end part (1st part or the middle part(/s) of a record). In such | |
1775 | * cases, AES CTR will be used in place of AES GCM to send out partial packet. | |
1776 | * This partial record might be the first part of the record, or the middle | |
1777 | * part. In case of middle record we should fetch the prior data to make it 16 | |
1778 | * byte aligned. If it has a partial tls header or iv then get to the start of | |
1779 | * tls header. And if it has partial TAG, then remove the complete TAG and send | |
1780 | * only the payload. | |
1781 | * There is one more possibility that it gets a partial header, send that | |
1782 | * portion as a plaintext. | |
1783 | * @tx_info - driver specific tls info. | |
1784 | * @skb - skb contains partial record.. | |
1785 | * @record - complete record of 16K size. | |
1786 | * @tcp_seq | |
1787 | * @mss - segment size in which TP needs to chop a packet. | |
1788 | * @tcp_push_no_fin - tcp push if fin is not set. | |
1789 | * @q - TX queue. | |
1790 | * @tls_end_offset - offset from end of the record. | |
1791 | * return: NETDEV_TX_OK/NETDEV_TX_BUSY. | |
1792 | */ | |
1793 | static int chcr_short_record_handler(struct chcr_ktls_info *tx_info, | |
1794 | struct sk_buff *skb, | |
1795 | struct tls_record_info *record, | |
1796 | u32 tcp_seq, int mss, bool tcp_push_no_fin, | |
687823d2 | 1797 | u32 data_len, u32 skb_offset, |
dc05f3df RM |
1798 | struct sge_eth_txq *q, u32 tls_end_offset) |
1799 | { | |
1800 | u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record); | |
1801 | u8 prior_data[16] = {0}; | |
1802 | u32 prior_data_len = 0; | |
dc05f3df RM |
1803 | |
1804 | /* check if the skb is ending in middle of tag/HASH, its a big | |
1805 | * trouble, send the packet before the HASH. | |
1806 | */ | |
687823d2 | 1807 | int remaining_record = tls_end_offset - data_len; |
dc05f3df RM |
1808 | |
1809 | if (remaining_record > 0 && | |
1810 | remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) { | |
c68a28a9 RM |
1811 | int trimmed_len = 0; |
1812 | ||
1813 | if (tls_end_offset > TLS_CIPHER_AES_GCM_128_TAG_SIZE) | |
1814 | trimmed_len = data_len - | |
1815 | (TLS_CIPHER_AES_GCM_128_TAG_SIZE - | |
1816 | remaining_record); | |
1817 | if (!trimmed_len) | |
21f82acb | 1818 | return FALLBACK; |
dc05f3df | 1819 | |
687823d2 | 1820 | WARN_ON(trimmed_len > data_len); |
dc05f3df | 1821 | |
687823d2 | 1822 | data_len = trimmed_len; |
a8c16e8e | 1823 | atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts); |
dc05f3df | 1824 | } |
687823d2 | 1825 | |
83deb094 RM |
1826 | /* check if it is only the header part. */ |
1827 | if (tls_rec_offset + data_len <= (TLS_HEADER_SIZE + tx_info->iv_size)) { | |
1828 | if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss, | |
1829 | tcp_push_no_fin, q, | |
1830 | tx_info->port_id, prior_data, | |
1831 | data_len, skb_offset, prior_data_len)) | |
1832 | goto out; | |
1833 | ||
63ee4591 | 1834 | tx_info->prev_seq = tcp_seq + data_len; |
83deb094 RM |
1835 | return 0; |
1836 | } | |
1837 | ||
dc05f3df RM |
1838 | /* check if the middle record's start point is 16 byte aligned. CTR |
1839 | * needs 16 byte aligned start point to start encryption. | |
1840 | */ | |
1841 | if (tls_rec_offset) { | |
1842 | /* there is an offset from start, means its a middle record */ | |
1843 | int remaining = 0; | |
1844 | ||
1845 | if (tls_rec_offset < (TLS_HEADER_SIZE + tx_info->iv_size)) { | |
1846 | prior_data_len = tls_rec_offset; | |
1847 | tls_rec_offset = 0; | |
1848 | remaining = 0; | |
1849 | } else { | |
1850 | prior_data_len = | |
1851 | (tls_rec_offset - | |
1852 | (TLS_HEADER_SIZE + tx_info->iv_size)) | |
1853 | % AES_BLOCK_LEN; | |
1854 | remaining = tls_rec_offset - prior_data_len; | |
1855 | } | |
1856 | ||
1857 | /* if prior_data_len is not zero, means we need to fetch prior | |
1858 | * data to make this record 16 byte aligned, or we need to reach | |
1859 | * to start offset. | |
1860 | */ | |
1861 | if (prior_data_len) { | |
1862 | int i = 0; | |
1863 | u8 *data = NULL; | |
1864 | skb_frag_t *f; | |
1865 | u8 *vaddr; | |
1866 | int frag_size = 0, frag_delta = 0; | |
1867 | ||
1868 | while (remaining > 0) { | |
1869 | frag_size = skb_frag_size(&record->frags[i]); | |
1870 | if (remaining < frag_size) | |
1871 | break; | |
1872 | ||
1873 | remaining -= frag_size; | |
1874 | i++; | |
1875 | } | |
1876 | f = &record->frags[i]; | |
1877 | vaddr = kmap_atomic(skb_frag_page(f)); | |
1878 | ||
1879 | data = vaddr + skb_frag_off(f) + remaining; | |
1880 | frag_delta = skb_frag_size(f) - remaining; | |
1881 | ||
1882 | if (frag_delta >= prior_data_len) { | |
1883 | memcpy(prior_data, data, prior_data_len); | |
1884 | kunmap_atomic(vaddr); | |
1885 | } else { | |
1886 | memcpy(prior_data, data, frag_delta); | |
1887 | kunmap_atomic(vaddr); | |
1888 | /* get the next page */ | |
1889 | f = &record->frags[i + 1]; | |
1890 | vaddr = kmap_atomic(skb_frag_page(f)); | |
1891 | data = vaddr + skb_frag_off(f); | |
1892 | memcpy(prior_data + frag_delta, | |
1893 | data, (prior_data_len - frag_delta)); | |
1894 | kunmap_atomic(vaddr); | |
1895 | } | |
1896 | /* reset tcp_seq as per the prior_data_required len */ | |
1897 | tcp_seq -= prior_data_len; | |
dc05f3df RM |
1898 | } |
1899 | /* reset snd una, so the middle record won't send the already | |
1900 | * sent part. | |
1901 | */ | |
1902 | if (chcr_ktls_update_snd_una(tx_info, q)) | |
1903 | goto out; | |
a8c16e8e | 1904 | atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts); |
dc05f3df | 1905 | } else { |
a8c16e8e | 1906 | atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts); |
dc05f3df RM |
1907 | } |
1908 | ||
1909 | if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin, | |
1910 | mss, tls_rec_offset, prior_data, | |
687823d2 | 1911 | prior_data_len, data_len, skb_offset)) { |
dc05f3df RM |
1912 | goto out; |
1913 | } | |
1914 | ||
63ee4591 | 1915 | tx_info->prev_seq = tcp_seq + data_len + prior_data_len; |
dc05f3df RM |
1916 | return 0; |
1917 | out: | |
1918 | dev_kfree_skb_any(skb); | |
1919 | return NETDEV_TX_BUSY; | |
1920 | } | |
1921 | ||
21f82acb RM |
1922 | static int chcr_ktls_sw_fallback(struct sk_buff *skb, |
1923 | struct chcr_ktls_info *tx_info, | |
1924 | struct sge_eth_txq *q) | |
1925 | { | |
1926 | u32 data_len, skb_offset; | |
1927 | struct sk_buff *nskb; | |
1928 | struct tcphdr *th; | |
1929 | ||
1930 | nskb = tls_encrypt_skb(skb); | |
1931 | ||
1932 | if (!nskb) | |
1933 | return 0; | |
1934 | ||
1935 | th = tcp_hdr(nskb); | |
1936 | skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb); | |
1937 | data_len = nskb->len - skb_offset; | |
1938 | skb_tx_timestamp(nskb); | |
1939 | ||
1940 | if (chcr_ktls_tunnel_pkt(tx_info, nskb, q)) | |
1941 | goto out; | |
1942 | ||
1943 | tx_info->prev_seq = ntohl(th->seq) + data_len; | |
1944 | atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_fallback); | |
1945 | return 0; | |
1946 | out: | |
1947 | dev_kfree_skb_any(nskb); | |
1948 | return 0; | |
1949 | } | |
5a4b9fe7 | 1950 | /* nic tls TX handler */ |
a8c16e8e | 1951 | static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) |
5a4b9fe7 | 1952 | { |
b1b5cb18 | 1953 | u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset; |
3427e13e | 1954 | struct ch_ktls_port_stats_debug *port_stats; |
5a4b9fe7 | 1955 | struct chcr_ktls_ofld_ctx_tx *tx_ctx; |
a8c16e8e | 1956 | struct ch_ktls_stats_debug *stats; |
5a4b9fe7 RM |
1957 | struct tcphdr *th = tcp_hdr(skb); |
1958 | int data_len, qidx, ret = 0, mss; | |
1959 | struct tls_record_info *record; | |
1960 | struct chcr_ktls_info *tx_info; | |
5a4b9fe7 | 1961 | struct tls_context *tls_ctx; |
5a4b9fe7 RM |
1962 | struct sge_eth_txq *q; |
1963 | struct adapter *adap; | |
1964 | unsigned long flags; | |
1965 | ||
1966 | tcp_seq = ntohl(th->seq); | |
b1b5cb18 RM |
1967 | skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb); |
1968 | skb_data_len = skb->len - skb_offset; | |
1969 | data_len = skb_data_len; | |
5a4b9fe7 | 1970 | |
b1b5cb18 | 1971 | mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len; |
5a4b9fe7 | 1972 | |
5a4b9fe7 RM |
1973 | tls_ctx = tls_get_ctx(skb->sk); |
1974 | if (unlikely(tls_ctx->netdev != dev)) | |
1975 | goto out; | |
1976 | ||
1977 | tx_ctx = chcr_get_ktls_tx_context(tls_ctx); | |
1978 | tx_info = tx_ctx->chcr_info; | |
1979 | ||
1980 | if (unlikely(!tx_info)) | |
1981 | goto out; | |
1982 | ||
5a4b9fe7 | 1983 | adap = tx_info->adap; |
a8c16e8e | 1984 | stats = &adap->ch_ktls_stats; |
3427e13e | 1985 | port_stats = &stats->ktls_port[tx_info->port_id]; |
62370a4f | 1986 | |
5a4b9fe7 RM |
1987 | qidx = skb->queue_mapping; |
1988 | q = &adap->sge.ethtxq[qidx + tx_info->first_qset]; | |
1989 | cxgb4_reclaim_completed_tx(adap, &q->q, true); | |
429765a1 RM |
1990 | /* if tcp options are set but finish is not send the options first */ |
1991 | if (!th->fin && chcr_ktls_check_tcp_options(th)) { | |
1992 | ret = chcr_ktls_write_tcp_options(tx_info, skb, q, | |
1993 | tx_info->tx_chan); | |
1994 | if (ret) | |
1995 | return NETDEV_TX_BUSY; | |
1996 | } | |
5a4b9fe7 | 1997 | |
429765a1 | 1998 | /* TCP segments can be in received either complete or partial. |
5a4b9fe7 RM |
1999 | * chcr_end_part_handler will handle cases if complete record or end |
2000 | * part of the record is received. Incase of partial end part of record, | |
2001 | * we will send the complete record again. | |
2002 | */ | |
62370a4f | 2003 | |
5a4b9fe7 RM |
2004 | do { |
2005 | int i; | |
2006 | ||
2007 | cxgb4_reclaim_completed_tx(adap, &q->q, true); | |
2008 | /* lock taken */ | |
2009 | spin_lock_irqsave(&tx_ctx->base.lock, flags); | |
2010 | /* fetch the tls record */ | |
2011 | record = tls_get_record(&tx_ctx->base, tcp_seq, | |
2012 | &tx_info->record_no); | |
2013 | /* By the time packet reached to us, ACK is received, and record | |
2014 | * won't be found in that case, handle it gracefully. | |
2015 | */ | |
2016 | if (unlikely(!record)) { | |
2017 | spin_unlock_irqrestore(&tx_ctx->base.lock, flags); | |
3427e13e | 2018 | atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data); |
5a4b9fe7 RM |
2019 | goto out; |
2020 | } | |
2021 | ||
63ee4591 RM |
2022 | tls_end_offset = record->end_seq - tcp_seq; |
2023 | ||
2024 | pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n", | |
2025 | tcp_seq, record->end_seq, tx_info->prev_seq, data_len); | |
2026 | /* update tcb for the skb */ | |
2027 | if (skb_data_len == data_len) { | |
2028 | u32 tx_max = tcp_seq; | |
2029 | ||
2030 | if (!tls_record_is_start_marker(record) && | |
2031 | tls_end_offset < TLS_CIPHER_AES_GCM_128_TAG_SIZE) | |
2032 | tx_max = record->end_seq - | |
2033 | TLS_CIPHER_AES_GCM_128_TAG_SIZE; | |
2034 | ||
2035 | ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, tx_max, | |
2036 | ntohl(th->ack_seq), | |
2037 | ntohs(th->window), | |
2038 | tls_end_offset != | |
2039 | record->len); | |
2040 | if (ret) { | |
2041 | spin_unlock_irqrestore(&tx_ctx->base.lock, | |
2042 | flags); | |
2043 | goto out; | |
2044 | } | |
659bf038 RM |
2045 | |
2046 | if (th->fin) | |
2047 | skb_get(skb); | |
63ee4591 | 2048 | } |
9478e083 RM |
2049 | |
2050 | if (unlikely(tls_record_is_start_marker(record))) { | |
2051 | atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data); | |
2052 | /* If tls_end_offset < data_len, means there is some | |
2053 | * data after start marker, which needs encryption, send | |
2054 | * plaintext first and take skb refcount. else send out | |
2055 | * complete pkt as plaintext. | |
2056 | */ | |
2057 | if (tls_end_offset < data_len) | |
2058 | skb_get(skb); | |
2059 | else | |
2060 | tls_end_offset = data_len; | |
2061 | ||
2062 | ret = chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss, | |
2063 | (!th->fin && th->psh), q, | |
2064 | tx_info->port_id, NULL, | |
2065 | tls_end_offset, skb_offset, | |
2066 | 0); | |
2067 | ||
2068 | spin_unlock_irqrestore(&tx_ctx->base.lock, flags); | |
2069 | if (ret) { | |
2070 | /* free the refcount taken earlier */ | |
2071 | if (tls_end_offset < data_len) | |
2072 | dev_kfree_skb_any(skb); | |
2073 | goto out; | |
2074 | } | |
2075 | ||
2076 | data_len -= tls_end_offset; | |
2077 | tcp_seq = record->end_seq; | |
2078 | skb_offset += tls_end_offset; | |
2079 | continue; | |
2080 | } | |
2081 | ||
5a4b9fe7 RM |
2082 | /* increase page reference count of the record, so that there |
2083 | * won't be any chance of page free in middle if in case stack | |
2084 | * receives ACK and try to delete the record. | |
2085 | */ | |
2086 | for (i = 0; i < record->num_frags; i++) | |
2087 | __skb_frag_ref(&record->frags[i]); | |
2088 | /* lock cleared */ | |
2089 | spin_unlock_irqrestore(&tx_ctx->base.lock, flags); | |
2090 | ||
5a4b9fe7 | 2091 | |
5a4b9fe7 RM |
2092 | /* if a tls record is finishing in this SKB */ |
2093 | if (tls_end_offset <= data_len) { | |
687823d2 | 2094 | ret = chcr_end_part_handler(tx_info, skb, record, |
5a4b9fe7 RM |
2095 | tcp_seq, mss, |
2096 | (!th->fin && th->psh), q, | |
687823d2 | 2097 | skb_offset, |
5a4b9fe7 | 2098 | tls_end_offset, |
687823d2 RM |
2099 | skb_offset + |
2100 | tls_end_offset == skb->len); | |
5a4b9fe7 RM |
2101 | |
2102 | data_len -= tls_end_offset; | |
2103 | /* tcp_seq increment is required to handle next record. | |
2104 | */ | |
2105 | tcp_seq += tls_end_offset; | |
687823d2 | 2106 | skb_offset += tls_end_offset; |
dc05f3df | 2107 | } else { |
687823d2 | 2108 | ret = chcr_short_record_handler(tx_info, skb, |
dc05f3df RM |
2109 | record, tcp_seq, mss, |
2110 | (!th->fin && th->psh), | |
687823d2 | 2111 | data_len, skb_offset, |
dc05f3df RM |
2112 | q, tls_end_offset); |
2113 | data_len = 0; | |
5a4b9fe7 | 2114 | } |
687823d2 | 2115 | |
5a4b9fe7 RM |
2116 | /* clear the frag ref count which increased locally before */ |
2117 | for (i = 0; i < record->num_frags; i++) { | |
2118 | /* clear the frag ref count */ | |
2119 | __skb_frag_unref(&record->frags[i]); | |
2120 | } | |
dc05f3df | 2121 | /* if any failure, come out from the loop. */ |
659bf038 RM |
2122 | if (ret) { |
2123 | if (th->fin) | |
2124 | dev_kfree_skb_any(skb); | |
21f82acb RM |
2125 | |
2126 | if (ret == FALLBACK) | |
2127 | return chcr_ktls_sw_fallback(skb, tx_info, q); | |
2128 | ||
687823d2 | 2129 | return NETDEV_TX_OK; |
659bf038 | 2130 | } |
687823d2 | 2131 | |
dc05f3df | 2132 | /* length should never be less than 0 */ |
5a4b9fe7 RM |
2133 | WARN_ON(data_len < 0); |
2134 | ||
2135 | } while (data_len > 0); | |
2136 | ||
3427e13e | 2137 | atomic64_inc(&port_stats->ktls_tx_encrypted_packets); |
b1b5cb18 | 2138 | atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes); |
62370a4f | 2139 | |
429765a1 RM |
2140 | /* tcp finish is set, send a separate tcp msg including all the options |
2141 | * as well. | |
2142 | */ | |
659bf038 | 2143 | if (th->fin) { |
429765a1 | 2144 | chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan); |
659bf038 RM |
2145 | dev_kfree_skb_any(skb); |
2146 | } | |
429765a1 | 2147 | |
687823d2 | 2148 | return NETDEV_TX_OK; |
5a4b9fe7 RM |
2149 | out: |
2150 | dev_kfree_skb_any(skb); | |
2151 | return NETDEV_TX_OK; | |
2152 | } | |
a8c16e8e RM |
2153 | |
2154 | static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi) | |
2155 | { | |
2156 | struct chcr_ktls_uld_ctx *u_ctx; | |
2157 | ||
2158 | pr_info_once("%s - version %s\n", CHCR_KTLS_DRV_DESC, | |
2159 | CHCR_KTLS_DRV_VERSION); | |
2160 | u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); | |
2161 | if (!u_ctx) { | |
2162 | u_ctx = ERR_PTR(-ENOMEM); | |
2163 | goto out; | |
2164 | } | |
2165 | u_ctx->lldi = *lldi; | |
2166 | out: | |
2167 | return u_ctx; | |
2168 | } | |
2169 | ||
2170 | static const struct tlsdev_ops chcr_ktls_ops = { | |
2171 | .tls_dev_add = chcr_ktls_dev_add, | |
2172 | .tls_dev_del = chcr_ktls_dev_del, | |
2173 | }; | |
2174 | ||
2175 | static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { | |
2176 | [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl, | |
2177 | [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl, | |
2178 | }; | |
2179 | ||
2180 | static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp, | |
2181 | const struct pkt_gl *pgl) | |
2182 | { | |
2183 | const struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)rsp; | |
2184 | struct chcr_ktls_uld_ctx *u_ctx = handle; | |
2185 | u8 opcode = rpl->ot.opcode; | |
2186 | struct adapter *adap; | |
2187 | ||
2188 | adap = pci_get_drvdata(u_ctx->lldi.pdev); | |
2189 | ||
2190 | if (!work_handlers[opcode]) { | |
2191 | pr_err("Unsupported opcode %d received\n", opcode); | |
2192 | return 0; | |
2193 | } | |
2194 | ||
2195 | work_handlers[opcode](adap, (unsigned char *)&rsp[1]); | |
2196 | return 0; | |
2197 | } | |
2198 | ||
2199 | static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state) | |
2200 | { | |
2201 | struct chcr_ktls_uld_ctx *u_ctx = handle; | |
2202 | ||
2203 | switch (new_state) { | |
2204 | case CXGB4_STATE_UP: | |
2205 | pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev)); | |
2206 | mutex_lock(&dev_mutex); | |
2207 | list_add_tail(&u_ctx->entry, &uld_ctx_list); | |
2208 | mutex_unlock(&dev_mutex); | |
2209 | break; | |
2210 | case CXGB4_STATE_START_RECOVERY: | |
2211 | case CXGB4_STATE_DOWN: | |
2212 | case CXGB4_STATE_DETACH: | |
2213 | pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev)); | |
2214 | mutex_lock(&dev_mutex); | |
2215 | list_del(&u_ctx->entry); | |
2216 | mutex_unlock(&dev_mutex); | |
2217 | break; | |
2218 | default: | |
2219 | break; | |
2220 | } | |
2221 | ||
2222 | return 0; | |
2223 | } | |
2224 | ||
2225 | static struct cxgb4_uld_info chcr_ktls_uld_info = { | |
2226 | .name = CHCR_KTLS_DRV_MODULE_NAME, | |
2227 | .nrxq = 1, | |
2228 | .rxq_size = 1024, | |
2229 | .add = chcr_ktls_uld_add, | |
2230 | .tx_handler = chcr_ktls_xmit, | |
2231 | .rx_handler = chcr_ktls_uld_rx_handler, | |
2232 | .state_change = chcr_ktls_uld_state_change, | |
2233 | .tlsdev_ops = &chcr_ktls_ops, | |
2234 | }; | |
2235 | ||
2236 | static int __init chcr_ktls_init(void) | |
2237 | { | |
2238 | cxgb4_register_uld(CXGB4_ULD_KTLS, &chcr_ktls_uld_info); | |
2239 | return 0; | |
2240 | } | |
2241 | ||
2242 | static void __exit chcr_ktls_exit(void) | |
2243 | { | |
2244 | struct chcr_ktls_uld_ctx *u_ctx, *tmp; | |
2245 | struct adapter *adap; | |
2246 | ||
2247 | pr_info("driver unloaded\n"); | |
2248 | ||
2249 | mutex_lock(&dev_mutex); | |
2250 | list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) { | |
2251 | adap = pci_get_drvdata(u_ctx->lldi.pdev); | |
2252 | memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats)); | |
2253 | list_del(&u_ctx->entry); | |
2254 | kfree(u_ctx); | |
2255 | } | |
2256 | mutex_unlock(&dev_mutex); | |
2257 | cxgb4_unregister_uld(CXGB4_ULD_KTLS); | |
2258 | } | |
2259 | ||
2260 | module_init(chcr_ktls_init); | |
2261 | module_exit(chcr_ktls_exit); | |
2262 | ||
2263 | MODULE_DESCRIPTION("Chelsio NIC TLS ULD driver"); | |
2264 | MODULE_LICENSE("GPL"); | |
2265 | MODULE_AUTHOR("Chelsio Communications"); | |
2266 | MODULE_VERSION(CHCR_KTLS_DRV_VERSION); |