Commit | Line | Data |
---|---|---|
872d26a3 SG |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * NVMe over Fabrics TCP target. | |
4 | * Copyright (c) 2018 Lightbits Labs. All rights reserved. | |
5 | */ | |
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
7 | #include <linux/module.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/slab.h> | |
10 | #include <linux/err.h> | |
675b453e | 11 | #include <linux/key.h> |
872d26a3 | 12 | #include <linux/nvme-tcp.h> |
675b453e | 13 | #include <linux/nvme-keyring.h> |
872d26a3 SG |
14 | #include <net/sock.h> |
15 | #include <net/tcp.h> | |
675b453e | 16 | #include <net/tls.h> |
a1c5dd83 | 17 | #include <net/tls_prot.h> |
675b453e | 18 | #include <net/handshake.h> |
872d26a3 SG |
19 | #include <linux/inet.h> |
20 | #include <linux/llist.h> | |
21 | #include <crypto/hash.h> | |
40e0b090 | 22 | #include <trace/events/sock.h> |
872d26a3 SG |
23 | |
24 | #include "nvmet.h" | |
25 | ||
26 | #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) | |
efa56305 | 27 | #define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */ |
07a29b13 | 28 | #define NVMET_TCP_BACKLOG 128 |
872d26a3 | 29 | |
44aef3b8 CK |
30 | static int param_store_val(const char *str, int *val, int min, int max) |
31 | { | |
32 | int ret, new_val; | |
33 | ||
34 | ret = kstrtoint(str, 10, &new_val); | |
35 | if (ret) | |
36 | return -EINVAL; | |
37 | ||
38 | if (new_val < min || new_val > max) | |
39 | return -EINVAL; | |
40 | ||
41 | *val = new_val; | |
42 | return 0; | |
43 | } | |
44 | ||
45 | static int set_params(const char *str, const struct kernel_param *kp) | |
46 | { | |
47 | return param_store_val(str, kp->arg, 0, INT_MAX); | |
48 | } | |
49 | ||
50 | static const struct kernel_param_ops set_param_ops = { | |
51 | .set = set_params, | |
52 | .get = param_get_int, | |
53 | }; | |
54 | ||
43cc6689 WM |
55 | /* Define the socket priority to use for connections were it is desirable |
56 | * that the NIC consider performing optimized packet processing or filtering. | |
57 | * A non-zero value being sufficient to indicate general consideration of any | |
58 | * possible optimization. Making it a module param allows for alternative | |
59 | * values that may be unique for some NIC implementations. | |
60 | */ | |
61 | static int so_priority; | |
44aef3b8 CK |
62 | device_param_cb(so_priority, &set_param_ops, &so_priority, 0644); |
63 | MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0"); | |
43cc6689 | 64 | |
d8e7b462 WM |
65 | /* Define a time period (in usecs) that io_work() shall sample an activated |
66 | * queue before determining it to be idle. This optional module behavior | |
67 | * can enable NIC solutions that support socket optimized packet processing | |
68 | * using advanced interrupt moderation techniques. | |
69 | */ | |
70 | static int idle_poll_period_usecs; | |
6fe240bc CK |
71 | device_param_cb(idle_poll_period_usecs, &set_param_ops, |
72 | &idle_poll_period_usecs, 0644); | |
d8e7b462 | 73 | MODULE_PARM_DESC(idle_poll_period_usecs, |
6fe240bc | 74 | "nvmet tcp io_work poll till idle time period in usecs: Default 0"); |
d8e7b462 | 75 | |
675b453e HR |
76 | #ifdef CONFIG_NVME_TARGET_TCP_TLS |
77 | /* | |
78 | * TLS handshake timeout | |
79 | */ | |
80 | static int tls_handshake_timeout = 10; | |
81 | module_param(tls_handshake_timeout, int, 0644); | |
82 | MODULE_PARM_DESC(tls_handshake_timeout, | |
83 | "nvme TLS handshake timeout in seconds (default 10)"); | |
84 | #endif | |
85 | ||
872d26a3 SG |
86 | #define NVMET_TCP_RECV_BUDGET 8 |
87 | #define NVMET_TCP_SEND_BUDGET 8 | |
88 | #define NVMET_TCP_IO_WORK_BUDGET 64 | |
89 | ||
90 | enum nvmet_tcp_send_state { | |
91 | NVMET_TCP_SEND_DATA_PDU, | |
92 | NVMET_TCP_SEND_DATA, | |
93 | NVMET_TCP_SEND_R2T, | |
94 | NVMET_TCP_SEND_DDGST, | |
95 | NVMET_TCP_SEND_RESPONSE | |
96 | }; | |
97 | ||
98 | enum nvmet_tcp_recv_state { | |
99 | NVMET_TCP_RECV_PDU, | |
100 | NVMET_TCP_RECV_DATA, | |
101 | NVMET_TCP_RECV_DDGST, | |
102 | NVMET_TCP_RECV_ERR, | |
103 | }; | |
104 | ||
105 | enum { | |
106 | NVMET_TCP_F_INIT_FAILED = (1 << 0), | |
107 | }; | |
108 | ||
109 | struct nvmet_tcp_cmd { | |
110 | struct nvmet_tcp_queue *queue; | |
111 | struct nvmet_req req; | |
112 | ||
113 | struct nvme_tcp_cmd_pdu *cmd_pdu; | |
114 | struct nvme_tcp_rsp_pdu *rsp_pdu; | |
115 | struct nvme_tcp_data_pdu *data_pdu; | |
116 | struct nvme_tcp_r2t_pdu *r2t_pdu; | |
117 | ||
118 | u32 rbytes_done; | |
119 | u32 wbytes_done; | |
120 | ||
121 | u32 pdu_len; | |
122 | u32 pdu_recv; | |
123 | int sg_idx; | |
a1c5dd83 | 124 | char recv_cbuf[CMSG_LEN(sizeof(char))]; |
872d26a3 | 125 | struct msghdr recv_msg; |
5bfaba27 | 126 | struct bio_vec *iov; |
872d26a3 SG |
127 | u32 flags; |
128 | ||
129 | struct list_head entry; | |
130 | struct llist_node lentry; | |
131 | ||
132 | /* send state */ | |
133 | u32 offset; | |
134 | struct scatterlist *cur_sg; | |
135 | enum nvmet_tcp_send_state state; | |
136 | ||
137 | __le32 exp_ddgst; | |
138 | __le32 recv_ddgst; | |
139 | }; | |
140 | ||
141 | enum nvmet_tcp_queue_state { | |
142 | NVMET_TCP_Q_CONNECTING, | |
675b453e | 143 | NVMET_TCP_Q_TLS_HANDSHAKE, |
872d26a3 SG |
144 | NVMET_TCP_Q_LIVE, |
145 | NVMET_TCP_Q_DISCONNECTING, | |
675b453e | 146 | NVMET_TCP_Q_FAILED, |
872d26a3 SG |
147 | }; |
148 | ||
149 | struct nvmet_tcp_queue { | |
150 | struct socket *sock; | |
151 | struct nvmet_tcp_port *port; | |
152 | struct work_struct io_work; | |
872d26a3 SG |
153 | struct nvmet_cq nvme_cq; |
154 | struct nvmet_sq nvme_sq; | |
675b453e | 155 | struct kref kref; |
872d26a3 SG |
156 | |
157 | /* send state */ | |
158 | struct nvmet_tcp_cmd *cmds; | |
159 | unsigned int nr_cmds; | |
160 | struct list_head free_list; | |
161 | struct llist_head resp_list; | |
162 | struct list_head resp_send_list; | |
163 | int send_list_len; | |
164 | struct nvmet_tcp_cmd *snd_cmd; | |
165 | ||
166 | /* recv state */ | |
167 | int offset; | |
168 | int left; | |
169 | enum nvmet_tcp_recv_state rcv_state; | |
170 | struct nvmet_tcp_cmd *cmd; | |
171 | union nvme_tcp_pdu pdu; | |
172 | ||
173 | /* digest state */ | |
174 | bool hdr_digest; | |
175 | bool data_digest; | |
176 | struct ahash_request *snd_hash; | |
177 | struct ahash_request *rcv_hash; | |
178 | ||
675b453e HR |
179 | /* TLS state */ |
180 | key_serial_t tls_pskid; | |
181 | struct delayed_work tls_handshake_tmo_work; | |
182 | ||
d8e7b462 WM |
183 | unsigned long poll_end; |
184 | ||
872d26a3 SG |
185 | spinlock_t state_lock; |
186 | enum nvmet_tcp_queue_state state; | |
187 | ||
188 | struct sockaddr_storage sockaddr; | |
189 | struct sockaddr_storage sockaddr_peer; | |
190 | struct work_struct release_work; | |
191 | ||
192 | int idx; | |
193 | struct list_head queue_list; | |
194 | ||
195 | struct nvmet_tcp_cmd connect; | |
196 | ||
197 | struct page_frag_cache pf_cache; | |
198 | ||
199 | void (*data_ready)(struct sock *); | |
200 | void (*state_change)(struct sock *); | |
201 | void (*write_space)(struct sock *); | |
202 | }; | |
203 | ||
204 | struct nvmet_tcp_port { | |
205 | struct socket *sock; | |
206 | struct work_struct accept_work; | |
207 | struct nvmet_port *nport; | |
208 | struct sockaddr_storage addr; | |
872d26a3 SG |
209 | void (*data_ready)(struct sock *); |
210 | }; | |
211 | ||
212 | static DEFINE_IDA(nvmet_tcp_queue_ida); | |
213 | static LIST_HEAD(nvmet_tcp_queue_list); | |
214 | static DEFINE_MUTEX(nvmet_tcp_queue_mutex); | |
215 | ||
216 | static struct workqueue_struct *nvmet_tcp_wq; | |
a40aae6b | 217 | static const struct nvmet_fabrics_ops nvmet_tcp_ops; |
872d26a3 | 218 | static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); |
69b85e1f | 219 | static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd); |
872d26a3 SG |
220 | |
221 | static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, | |
222 | struct nvmet_tcp_cmd *cmd) | |
223 | { | |
a6ce7d7b ZY |
224 | if (unlikely(!queue->nr_cmds)) { |
225 | /* We didn't allocate cmds yet, send 0xffff */ | |
226 | return USHRT_MAX; | |
227 | } | |
228 | ||
872d26a3 SG |
229 | return cmd - queue->cmds; |
230 | } | |
231 | ||
232 | static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) | |
233 | { | |
234 | return nvme_is_write(cmd->req.cmd) && | |
235 | cmd->rbytes_done < cmd->req.transfer_len; | |
236 | } | |
237 | ||
238 | static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) | |
239 | { | |
fc6c9730 | 240 | return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; |
872d26a3 SG |
241 | } |
242 | ||
243 | static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) | |
244 | { | |
245 | return !nvme_is_write(cmd->req.cmd) && | |
246 | cmd->req.transfer_len > 0 && | |
fc6c9730 | 247 | !cmd->req.cqe->status; |
872d26a3 SG |
248 | } |
249 | ||
250 | static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) | |
251 | { | |
252 | return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && | |
253 | !cmd->rbytes_done; | |
254 | } | |
255 | ||
256 | static inline struct nvmet_tcp_cmd * | |
257 | nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) | |
258 | { | |
259 | struct nvmet_tcp_cmd *cmd; | |
260 | ||
261 | cmd = list_first_entry_or_null(&queue->free_list, | |
262 | struct nvmet_tcp_cmd, entry); | |
263 | if (!cmd) | |
264 | return NULL; | |
265 | list_del_init(&cmd->entry); | |
266 | ||
267 | cmd->rbytes_done = cmd->wbytes_done = 0; | |
268 | cmd->pdu_len = 0; | |
269 | cmd->pdu_recv = 0; | |
270 | cmd->iov = NULL; | |
271 | cmd->flags = 0; | |
272 | return cmd; | |
273 | } | |
274 | ||
275 | static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) | |
276 | { | |
277 | if (unlikely(cmd == &cmd->queue->connect)) | |
278 | return; | |
279 | ||
280 | list_add_tail(&cmd->entry, &cmd->queue->free_list); | |
281 | } | |
282 | ||
f7790e5d MW |
283 | static inline int queue_cpu(struct nvmet_tcp_queue *queue) |
284 | { | |
285 | return queue->sock->sk->sk_incoming_cpu; | |
286 | } | |
287 | ||
872d26a3 SG |
288 | static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) |
289 | { | |
290 | return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; | |
291 | } | |
292 | ||
293 | static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) | |
294 | { | |
295 | return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; | |
296 | } | |
297 | ||
298 | static inline void nvmet_tcp_hdgst(struct ahash_request *hash, | |
299 | void *pdu, size_t len) | |
300 | { | |
301 | struct scatterlist sg; | |
302 | ||
303 | sg_init_one(&sg, pdu, len); | |
304 | ahash_request_set_crypt(hash, &sg, pdu + len, len); | |
305 | crypto_ahash_digest(hash); | |
306 | } | |
307 | ||
308 | static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, | |
309 | void *pdu, size_t len) | |
310 | { | |
311 | struct nvme_tcp_hdr *hdr = pdu; | |
312 | __le32 recv_digest; | |
313 | __le32 exp_digest; | |
314 | ||
315 | if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { | |
316 | pr_err("queue %d: header digest enabled but no header digest\n", | |
317 | queue->idx); | |
318 | return -EPROTO; | |
319 | } | |
320 | ||
321 | recv_digest = *(__le32 *)(pdu + hdr->hlen); | |
322 | nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); | |
323 | exp_digest = *(__le32 *)(pdu + hdr->hlen); | |
324 | if (recv_digest != exp_digest) { | |
325 | pr_err("queue %d: header digest error: recv %#x expected %#x\n", | |
326 | queue->idx, le32_to_cpu(recv_digest), | |
327 | le32_to_cpu(exp_digest)); | |
328 | return -EPROTO; | |
329 | } | |
330 | ||
331 | return 0; | |
332 | } | |
333 | ||
334 | static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) | |
335 | { | |
336 | struct nvme_tcp_hdr *hdr = pdu; | |
337 | u8 digest_len = nvmet_tcp_hdgst_len(queue); | |
338 | u32 len; | |
339 | ||
340 | len = le32_to_cpu(hdr->plen) - hdr->hlen - | |
341 | (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0); | |
342 | ||
343 | if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { | |
344 | pr_err("queue %d: data digest flag is cleared\n", queue->idx); | |
345 | return -EPROTO; | |
346 | } | |
347 | ||
348 | return 0; | |
349 | } | |
350 | ||
6825bdde | 351 | /* If cmd buffers are NULL, no operation is performed */ |
69b85e1f ML |
352 | static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) |
353 | { | |
69b85e1f ML |
354 | kfree(cmd->iov); |
355 | sgl_free(cmd->req.sg); | |
356 | cmd->iov = NULL; | |
357 | cmd->req.sg = NULL; | |
358 | } | |
359 | ||
5bfaba27 | 360 | static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) |
872d26a3 | 361 | { |
5bfaba27 | 362 | struct bio_vec *iov = cmd->iov; |
872d26a3 SG |
363 | struct scatterlist *sg; |
364 | u32 length, offset, sg_offset; | |
5bfaba27 | 365 | int nr_pages; |
872d26a3 SG |
366 | |
367 | length = cmd->pdu_len; | |
5bfaba27 | 368 | nr_pages = DIV_ROUND_UP(length, PAGE_SIZE); |
872d26a3 | 369 | offset = cmd->rbytes_done; |
cb8563f5 | 370 | cmd->sg_idx = offset / PAGE_SIZE; |
872d26a3 SG |
371 | sg_offset = offset % PAGE_SIZE; |
372 | sg = &cmd->req.sg[cmd->sg_idx]; | |
373 | ||
374 | while (length) { | |
375 | u32 iov_len = min_t(u32, length, sg->length - sg_offset); | |
376 | ||
1f0bbf28 | 377 | bvec_set_page(iov, sg_page(sg), iov_len, |
fc41c97a | 378 | sg->offset + sg_offset); |
872d26a3 SG |
379 | |
380 | length -= iov_len; | |
381 | sg = sg_next(sg); | |
382 | iov++; | |
cb8563f5 | 383 | sg_offset = 0; |
872d26a3 SG |
384 | } |
385 | ||
de4eda9d | 386 | iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov, |
5bfaba27 | 387 | nr_pages, cmd->pdu_len); |
872d26a3 SG |
388 | } |
389 | ||
390 | static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) | |
391 | { | |
392 | queue->rcv_state = NVMET_TCP_RECV_ERR; | |
393 | if (queue->nvme_sq.ctrl) | |
394 | nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); | |
395 | else | |
396 | kernel_sock_shutdown(queue->sock, SHUT_RDWR); | |
397 | } | |
398 | ||
0236d343 SG |
399 | static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) |
400 | { | |
d920abd1 | 401 | queue->rcv_state = NVMET_TCP_RECV_ERR; |
0236d343 SG |
402 | if (status == -EPIPE || status == -ECONNRESET) |
403 | kernel_sock_shutdown(queue->sock, SHUT_RDWR); | |
404 | else | |
405 | nvmet_tcp_fatal_error(queue); | |
406 | } | |
407 | ||
872d26a3 SG |
408 | static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) |
409 | { | |
410 | struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; | |
411 | u32 len = le32_to_cpu(sgl->length); | |
412 | ||
e0bace71 | 413 | if (!len) |
872d26a3 SG |
414 | return 0; |
415 | ||
416 | if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | | |
417 | NVME_SGL_FMT_OFFSET)) { | |
418 | if (!nvme_is_write(cmd->req.cmd)) | |
dd0b0a4a | 419 | return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; |
872d26a3 SG |
420 | |
421 | if (len > cmd->req.port->inline_data_size) | |
dd0b0a4a | 422 | return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; |
872d26a3 SG |
423 | cmd->pdu_len = len; |
424 | } | |
425 | cmd->req.transfer_len += len; | |
426 | ||
427 | cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); | |
428 | if (!cmd->req.sg) | |
429 | return NVME_SC_INTERNAL; | |
430 | cmd->cur_sg = cmd->req.sg; | |
431 | ||
432 | if (nvmet_tcp_has_data_in(cmd)) { | |
433 | cmd->iov = kmalloc_array(cmd->req.sg_cnt, | |
434 | sizeof(*cmd->iov), GFP_KERNEL); | |
435 | if (!cmd->iov) | |
436 | goto err; | |
437 | } | |
438 | ||
439 | return 0; | |
440 | err: | |
69b85e1f | 441 | nvmet_tcp_free_cmd_buffers(cmd); |
872d26a3 SG |
442 | return NVME_SC_INTERNAL; |
443 | } | |
444 | ||
ed0691cf | 445 | static void nvmet_tcp_calc_ddgst(struct ahash_request *hash, |
872d26a3 SG |
446 | struct nvmet_tcp_cmd *cmd) |
447 | { | |
448 | ahash_request_set_crypt(hash, cmd->req.sg, | |
449 | (void *)&cmd->exp_ddgst, cmd->req.transfer_len); | |
450 | crypto_ahash_digest(hash); | |
451 | } | |
452 | ||
453 | static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) | |
454 | { | |
455 | struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; | |
456 | struct nvmet_tcp_queue *queue = cmd->queue; | |
457 | u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); | |
458 | u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); | |
459 | ||
460 | cmd->offset = 0; | |
461 | cmd->state = NVMET_TCP_SEND_DATA_PDU; | |
462 | ||
463 | pdu->hdr.type = nvme_tcp_c2h_data; | |
70583295 SG |
464 | pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? |
465 | NVME_TCP_F_DATA_SUCCESS : 0); | |
872d26a3 SG |
466 | pdu->hdr.hlen = sizeof(*pdu); |
467 | pdu->hdr.pdo = pdu->hdr.hlen + hdgst; | |
468 | pdu->hdr.plen = | |
469 | cpu_to_le32(pdu->hdr.hlen + hdgst + | |
470 | cmd->req.transfer_len + ddgst); | |
fc6c9730 | 471 | pdu->command_id = cmd->req.cqe->command_id; |
872d26a3 SG |
472 | pdu->data_length = cpu_to_le32(cmd->req.transfer_len); |
473 | pdu->data_offset = cpu_to_le32(cmd->wbytes_done); | |
474 | ||
475 | if (queue->data_digest) { | |
476 | pdu->hdr.flags |= NVME_TCP_F_DDGST; | |
ed0691cf | 477 | nvmet_tcp_calc_ddgst(queue->snd_hash, cmd); |
872d26a3 SG |
478 | } |
479 | ||
480 | if (cmd->queue->hdr_digest) { | |
481 | pdu->hdr.flags |= NVME_TCP_F_HDGST; | |
482 | nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); | |
483 | } | |
484 | } | |
485 | ||
486 | static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) | |
487 | { | |
488 | struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; | |
489 | struct nvmet_tcp_queue *queue = cmd->queue; | |
490 | u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); | |
491 | ||
492 | cmd->offset = 0; | |
493 | cmd->state = NVMET_TCP_SEND_R2T; | |
494 | ||
495 | pdu->hdr.type = nvme_tcp_r2t; | |
496 | pdu->hdr.flags = 0; | |
497 | pdu->hdr.hlen = sizeof(*pdu); | |
498 | pdu->hdr.pdo = 0; | |
499 | pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); | |
500 | ||
501 | pdu->command_id = cmd->req.cmd->common.command_id; | |
502 | pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); | |
503 | pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); | |
504 | pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); | |
505 | if (cmd->queue->hdr_digest) { | |
506 | pdu->hdr.flags |= NVME_TCP_F_HDGST; | |
507 | nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); | |
508 | } | |
509 | } | |
510 | ||
511 | static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) | |
512 | { | |
513 | struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; | |
514 | struct nvmet_tcp_queue *queue = cmd->queue; | |
515 | u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); | |
516 | ||
517 | cmd->offset = 0; | |
518 | cmd->state = NVMET_TCP_SEND_RESPONSE; | |
519 | ||
520 | pdu->hdr.type = nvme_tcp_rsp; | |
521 | pdu->hdr.flags = 0; | |
522 | pdu->hdr.hlen = sizeof(*pdu); | |
523 | pdu->hdr.pdo = 0; | |
524 | pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); | |
525 | if (cmd->queue->hdr_digest) { | |
526 | pdu->hdr.flags |= NVME_TCP_F_HDGST; | |
527 | nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); | |
528 | } | |
529 | } | |
530 | ||
531 | static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) | |
532 | { | |
533 | struct llist_node *node; | |
b8a12e93 | 534 | struct nvmet_tcp_cmd *cmd; |
872d26a3 | 535 | |
b8a12e93 SG |
536 | for (node = llist_del_all(&queue->resp_list); node; node = node->next) { |
537 | cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); | |
872d26a3 | 538 | list_add(&cmd->entry, &queue->resp_send_list); |
872d26a3 SG |
539 | queue->send_list_len++; |
540 | } | |
541 | } | |
542 | ||
543 | static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) | |
544 | { | |
545 | queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, | |
546 | struct nvmet_tcp_cmd, entry); | |
547 | if (!queue->snd_cmd) { | |
548 | nvmet_tcp_process_resp_list(queue); | |
549 | queue->snd_cmd = | |
550 | list_first_entry_or_null(&queue->resp_send_list, | |
551 | struct nvmet_tcp_cmd, entry); | |
552 | if (unlikely(!queue->snd_cmd)) | |
553 | return NULL; | |
554 | } | |
555 | ||
556 | list_del_init(&queue->snd_cmd->entry); | |
557 | queue->send_list_len--; | |
558 | ||
559 | if (nvmet_tcp_need_data_out(queue->snd_cmd)) | |
560 | nvmet_setup_c2h_data_pdu(queue->snd_cmd); | |
561 | else if (nvmet_tcp_need_data_in(queue->snd_cmd)) | |
562 | nvmet_setup_r2t_pdu(queue->snd_cmd); | |
563 | else | |
564 | nvmet_setup_response_pdu(queue->snd_cmd); | |
565 | ||
566 | return queue->snd_cmd; | |
567 | } | |
568 | ||
569 | static void nvmet_tcp_queue_response(struct nvmet_req *req) | |
570 | { | |
571 | struct nvmet_tcp_cmd *cmd = | |
572 | container_of(req, struct nvmet_tcp_cmd, req); | |
573 | struct nvmet_tcp_queue *queue = cmd->queue; | |
bdaf1327 EG |
574 | struct nvme_sgl_desc *sgl; |
575 | u32 len; | |
576 | ||
577 | if (unlikely(cmd == queue->cmd)) { | |
578 | sgl = &cmd->req.cmd->common.dptr.sgl; | |
579 | len = le32_to_cpu(sgl->length); | |
580 | ||
581 | /* | |
582 | * Wait for inline data before processing the response. | |
583 | * Avoid using helpers, this might happen before | |
584 | * nvmet_req_init is completed. | |
585 | */ | |
586 | if (queue->rcv_state == NVMET_TCP_RECV_PDU && | |
25df1acd | 587 | len && len <= cmd->req.port->inline_data_size && |
bdaf1327 EG |
588 | nvme_is_write(cmd->req.cmd)) |
589 | return; | |
590 | } | |
872d26a3 SG |
591 | |
592 | llist_add(&cmd->lentry, &queue->resp_list); | |
f7790e5d | 593 | queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); |
872d26a3 SG |
594 | } |
595 | ||
bdaf1327 EG |
596 | static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd) |
597 | { | |
598 | if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED)) | |
599 | nvmet_tcp_queue_response(&cmd->req); | |
600 | else | |
601 | cmd->req.execute(&cmd->req); | |
602 | } | |
603 | ||
872d26a3 SG |
604 | static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) |
605 | { | |
c336a799 DH |
606 | struct msghdr msg = { |
607 | .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES, | |
608 | }; | |
609 | struct bio_vec bvec; | |
872d26a3 SG |
610 | u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); |
611 | int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; | |
612 | int ret; | |
613 | ||
c336a799 DH |
614 | bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left); |
615 | iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); | |
616 | ret = sock_sendmsg(cmd->queue->sock, &msg); | |
872d26a3 SG |
617 | if (ret <= 0) |
618 | return ret; | |
619 | ||
620 | cmd->offset += ret; | |
621 | left -= ret; | |
622 | ||
623 | if (left) | |
624 | return -EAGAIN; | |
625 | ||
626 | cmd->state = NVMET_TCP_SEND_DATA; | |
627 | cmd->offset = 0; | |
628 | return 1; | |
629 | } | |
630 | ||
98fd5c72 | 631 | static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) |
872d26a3 SG |
632 | { |
633 | struct nvmet_tcp_queue *queue = cmd->queue; | |
634 | int ret; | |
635 | ||
636 | while (cmd->cur_sg) { | |
c336a799 DH |
637 | struct msghdr msg = { |
638 | .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, | |
639 | }; | |
872d26a3 | 640 | struct page *page = sg_page(cmd->cur_sg); |
c336a799 | 641 | struct bio_vec bvec; |
872d26a3 | 642 | u32 left = cmd->cur_sg->length - cmd->offset; |
98fd5c72 SG |
643 | |
644 | if ((!last_in_batch && cmd->queue->send_list_len) || | |
645 | cmd->wbytes_done + left < cmd->req.transfer_len || | |
646 | queue->data_digest || !queue->nvme_sq.sqhd_disabled) | |
c336a799 | 647 | msg.msg_flags |= MSG_MORE; |
872d26a3 | 648 | |
c336a799 DH |
649 | bvec_set_page(&bvec, page, left, cmd->offset); |
650 | iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); | |
651 | ret = sock_sendmsg(cmd->queue->sock, &msg); | |
872d26a3 SG |
652 | if (ret <= 0) |
653 | return ret; | |
654 | ||
655 | cmd->offset += ret; | |
656 | cmd->wbytes_done += ret; | |
657 | ||
658 | /* Done with sg?*/ | |
659 | if (cmd->offset == cmd->cur_sg->length) { | |
660 | cmd->cur_sg = sg_next(cmd->cur_sg); | |
661 | cmd->offset = 0; | |
662 | } | |
663 | } | |
664 | ||
665 | if (queue->data_digest) { | |
666 | cmd->state = NVMET_TCP_SEND_DDGST; | |
667 | cmd->offset = 0; | |
668 | } else { | |
70583295 SG |
669 | if (queue->nvme_sq.sqhd_disabled) { |
670 | cmd->queue->snd_cmd = NULL; | |
671 | nvmet_tcp_put_cmd(cmd); | |
672 | } else { | |
673 | nvmet_setup_response_pdu(cmd); | |
674 | } | |
872d26a3 | 675 | } |
70583295 | 676 | |
69b85e1f ML |
677 | if (queue->nvme_sq.sqhd_disabled) |
678 | nvmet_tcp_free_cmd_buffers(cmd); | |
70583295 | 679 | |
872d26a3 SG |
680 | return 1; |
681 | ||
682 | } | |
683 | ||
684 | static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, | |
685 | bool last_in_batch) | |
686 | { | |
c336a799 DH |
687 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; |
688 | struct bio_vec bvec; | |
872d26a3 SG |
689 | u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); |
690 | int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; | |
872d26a3 SG |
691 | int ret; |
692 | ||
693 | if (!last_in_batch && cmd->queue->send_list_len) | |
c336a799 | 694 | msg.msg_flags |= MSG_MORE; |
872d26a3 | 695 | else |
c336a799 | 696 | msg.msg_flags |= MSG_EOR; |
872d26a3 | 697 | |
c336a799 DH |
698 | bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left); |
699 | iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); | |
700 | ret = sock_sendmsg(cmd->queue->sock, &msg); | |
872d26a3 SG |
701 | if (ret <= 0) |
702 | return ret; | |
703 | cmd->offset += ret; | |
704 | left -= ret; | |
705 | ||
706 | if (left) | |
707 | return -EAGAIN; | |
708 | ||
69b85e1f | 709 | nvmet_tcp_free_cmd_buffers(cmd); |
872d26a3 SG |
710 | cmd->queue->snd_cmd = NULL; |
711 | nvmet_tcp_put_cmd(cmd); | |
712 | return 1; | |
713 | } | |
714 | ||
715 | static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) | |
716 | { | |
c336a799 DH |
717 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; |
718 | struct bio_vec bvec; | |
872d26a3 SG |
719 | u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); |
720 | int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; | |
872d26a3 SG |
721 | int ret; |
722 | ||
723 | if (!last_in_batch && cmd->queue->send_list_len) | |
c336a799 | 724 | msg.msg_flags |= MSG_MORE; |
872d26a3 | 725 | else |
c336a799 | 726 | msg.msg_flags |= MSG_EOR; |
872d26a3 | 727 | |
c336a799 DH |
728 | bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left); |
729 | iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); | |
730 | ret = sock_sendmsg(cmd->queue->sock, &msg); | |
872d26a3 SG |
731 | if (ret <= 0) |
732 | return ret; | |
733 | cmd->offset += ret; | |
734 | left -= ret; | |
735 | ||
736 | if (left) | |
737 | return -EAGAIN; | |
738 | ||
739 | cmd->queue->snd_cmd = NULL; | |
740 | return 1; | |
741 | } | |
742 | ||
e90d172b | 743 | static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) |
872d26a3 SG |
744 | { |
745 | struct nvmet_tcp_queue *queue = cmd->queue; | |
102110ef | 746 | int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; |
872d26a3 SG |
747 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; |
748 | struct kvec iov = { | |
e790de54 | 749 | .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, |
102110ef | 750 | .iov_len = left |
872d26a3 SG |
751 | }; |
752 | int ret; | |
753 | ||
e90d172b SG |
754 | if (!last_in_batch && cmd->queue->send_list_len) |
755 | msg.msg_flags |= MSG_MORE; | |
f381ab1f SG |
756 | else |
757 | msg.msg_flags |= MSG_EOR; | |
e90d172b | 758 | |
872d26a3 SG |
759 | ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); |
760 | if (unlikely(ret <= 0)) | |
761 | return ret; | |
762 | ||
763 | cmd->offset += ret; | |
102110ef VP |
764 | left -= ret; |
765 | ||
766 | if (left) | |
767 | return -EAGAIN; | |
70583295 SG |
768 | |
769 | if (queue->nvme_sq.sqhd_disabled) { | |
770 | cmd->queue->snd_cmd = NULL; | |
771 | nvmet_tcp_put_cmd(cmd); | |
772 | } else { | |
773 | nvmet_setup_response_pdu(cmd); | |
774 | } | |
872d26a3 SG |
775 | return 1; |
776 | } | |
777 | ||
778 | static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, | |
779 | bool last_in_batch) | |
780 | { | |
781 | struct nvmet_tcp_cmd *cmd = queue->snd_cmd; | |
782 | int ret = 0; | |
783 | ||
784 | if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { | |
785 | cmd = nvmet_tcp_fetch_cmd(queue); | |
786 | if (unlikely(!cmd)) | |
787 | return 0; | |
788 | } | |
789 | ||
790 | if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { | |
791 | ret = nvmet_try_send_data_pdu(cmd); | |
792 | if (ret <= 0) | |
793 | goto done_send; | |
794 | } | |
795 | ||
796 | if (cmd->state == NVMET_TCP_SEND_DATA) { | |
98fd5c72 | 797 | ret = nvmet_try_send_data(cmd, last_in_batch); |
872d26a3 SG |
798 | if (ret <= 0) |
799 | goto done_send; | |
800 | } | |
801 | ||
802 | if (cmd->state == NVMET_TCP_SEND_DDGST) { | |
e90d172b | 803 | ret = nvmet_try_send_ddgst(cmd, last_in_batch); |
872d26a3 SG |
804 | if (ret <= 0) |
805 | goto done_send; | |
806 | } | |
807 | ||
808 | if (cmd->state == NVMET_TCP_SEND_R2T) { | |
809 | ret = nvmet_try_send_r2t(cmd, last_in_batch); | |
810 | if (ret <= 0) | |
811 | goto done_send; | |
812 | } | |
813 | ||
814 | if (cmd->state == NVMET_TCP_SEND_RESPONSE) | |
815 | ret = nvmet_try_send_response(cmd, last_in_batch); | |
816 | ||
817 | done_send: | |
818 | if (ret < 0) { | |
819 | if (ret == -EAGAIN) | |
820 | return 0; | |
821 | return ret; | |
822 | } | |
823 | ||
824 | return 1; | |
825 | } | |
826 | ||
827 | static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, | |
828 | int budget, int *sends) | |
829 | { | |
830 | int i, ret = 0; | |
831 | ||
832 | for (i = 0; i < budget; i++) { | |
833 | ret = nvmet_tcp_try_send_one(queue, i == budget - 1); | |
0236d343 SG |
834 | if (unlikely(ret < 0)) { |
835 | nvmet_tcp_socket_error(queue, ret); | |
836 | goto done; | |
837 | } else if (ret == 0) { | |
872d26a3 | 838 | break; |
0236d343 | 839 | } |
872d26a3 SG |
840 | (*sends)++; |
841 | } | |
0236d343 | 842 | done: |
872d26a3 SG |
843 | return ret; |
844 | } | |
845 | ||
846 | static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) | |
847 | { | |
848 | queue->offset = 0; | |
849 | queue->left = sizeof(struct nvme_tcp_hdr); | |
850 | queue->cmd = NULL; | |
851 | queue->rcv_state = NVMET_TCP_RECV_PDU; | |
852 | } | |
853 | ||
854 | static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) | |
855 | { | |
856 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); | |
857 | ||
858 | ahash_request_free(queue->rcv_hash); | |
859 | ahash_request_free(queue->snd_hash); | |
860 | crypto_free_ahash(tfm); | |
861 | } | |
862 | ||
863 | static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) | |
864 | { | |
865 | struct crypto_ahash *tfm; | |
866 | ||
867 | tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); | |
868 | if (IS_ERR(tfm)) | |
869 | return PTR_ERR(tfm); | |
870 | ||
871 | queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); | |
872 | if (!queue->snd_hash) | |
873 | goto free_tfm; | |
874 | ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); | |
875 | ||
876 | queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); | |
877 | if (!queue->rcv_hash) | |
878 | goto free_snd_hash; | |
879 | ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); | |
880 | ||
881 | return 0; | |
882 | free_snd_hash: | |
883 | ahash_request_free(queue->snd_hash); | |
884 | free_tfm: | |
885 | crypto_free_ahash(tfm); | |
886 | return -ENOMEM; | |
887 | } | |
888 | ||
889 | ||
890 | static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) | |
891 | { | |
892 | struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; | |
893 | struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; | |
894 | struct msghdr msg = {}; | |
895 | struct kvec iov; | |
896 | int ret; | |
897 | ||
898 | if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { | |
899 | pr_err("bad nvme-tcp pdu length (%d)\n", | |
900 | le32_to_cpu(icreq->hdr.plen)); | |
901 | nvmet_tcp_fatal_error(queue); | |
0889d13b | 902 | return -EPROTO; |
872d26a3 SG |
903 | } |
904 | ||
905 | if (icreq->pfv != NVME_TCP_PFV_1_0) { | |
906 | pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); | |
907 | return -EPROTO; | |
908 | } | |
909 | ||
910 | if (icreq->hpda != 0) { | |
911 | pr_err("queue %d: unsupported hpda %d\n", queue->idx, | |
912 | icreq->hpda); | |
913 | return -EPROTO; | |
914 | } | |
915 | ||
872d26a3 SG |
916 | queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); |
917 | queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); | |
918 | if (queue->hdr_digest || queue->data_digest) { | |
919 | ret = nvmet_tcp_alloc_crypto(queue); | |
920 | if (ret) | |
921 | return ret; | |
922 | } | |
923 | ||
924 | memset(icresp, 0, sizeof(*icresp)); | |
925 | icresp->hdr.type = nvme_tcp_icresp; | |
926 | icresp->hdr.hlen = sizeof(*icresp); | |
927 | icresp->hdr.pdo = 0; | |
928 | icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); | |
929 | icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); | |
efa56305 | 930 | icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA); |
872d26a3 SG |
931 | icresp->cpda = 0; |
932 | if (queue->hdr_digest) | |
933 | icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; | |
934 | if (queue->data_digest) | |
935 | icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE; | |
936 | ||
937 | iov.iov_base = icresp; | |
938 | iov.iov_len = sizeof(*icresp); | |
939 | ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); | |
90d624af LT |
940 | if (ret < 0) { |
941 | queue->state = NVMET_TCP_Q_FAILED; | |
d920abd1 | 942 | return ret; /* queue removal will cleanup */ |
90d624af | 943 | } |
872d26a3 SG |
944 | |
945 | queue->state = NVMET_TCP_Q_LIVE; | |
946 | nvmet_prepare_receive_pdu(queue); | |
947 | return 0; | |
872d26a3 SG |
948 | } |
949 | ||
950 | static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, | |
951 | struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) | |
952 | { | |
c73eebc0 | 953 | size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); |
872d26a3 SG |
954 | int ret; |
955 | ||
30e32f30 SG |
956 | /* |
957 | * This command has not been processed yet, hence we are trying to | |
958 | * figure out if there is still pending data left to receive. If | |
959 | * we don't, we can simply prepare for the next pdu and bail out, | |
960 | * otherwise we will need to prepare a buffer and receive the | |
961 | * stale data before continuing forward. | |
962 | */ | |
963 | if (!nvme_is_write(cmd->req.cmd) || !data_len || | |
c73eebc0 | 964 | data_len > cmd->req.port->inline_data_size) { |
872d26a3 SG |
965 | nvmet_prepare_receive_pdu(queue); |
966 | return; | |
967 | } | |
968 | ||
969 | ret = nvmet_tcp_map_data(cmd); | |
970 | if (unlikely(ret)) { | |
971 | pr_err("queue %d: failed to map data\n", queue->idx); | |
972 | nvmet_tcp_fatal_error(queue); | |
973 | return; | |
974 | } | |
975 | ||
976 | queue->rcv_state = NVMET_TCP_RECV_DATA; | |
5bfaba27 | 977 | nvmet_tcp_build_pdu_iovec(cmd); |
872d26a3 SG |
978 | cmd->flags |= NVMET_TCP_F_INIT_FAILED; |
979 | } | |
980 | ||
981 | static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) | |
982 | { | |
983 | struct nvme_tcp_data_pdu *data = &queue->pdu.data; | |
984 | struct nvmet_tcp_cmd *cmd; | |
9a1abc24 | 985 | unsigned int exp_data_len; |
872d26a3 | 986 | |
b6a545ff VP |
987 | if (likely(queue->nr_cmds)) { |
988 | if (unlikely(data->ttag >= queue->nr_cmds)) { | |
989 | pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n", | |
990 | queue->idx, data->ttag, queue->nr_cmds); | |
75011bd0 | 991 | goto err_proto; |
b6a545ff | 992 | } |
a6ce7d7b | 993 | cmd = &queue->cmds[data->ttag]; |
b6a545ff | 994 | } else { |
a6ce7d7b | 995 | cmd = &queue->connect; |
b6a545ff | 996 | } |
872d26a3 SG |
997 | |
998 | if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { | |
999 | pr_err("ttag %u unexpected data offset %u (expected %u)\n", | |
1000 | data->ttag, le32_to_cpu(data->data_offset), | |
1001 | cmd->rbytes_done); | |
75011bd0 | 1002 | goto err_proto; |
872d26a3 SG |
1003 | } |
1004 | ||
9a1abc24 ML |
1005 | exp_data_len = le32_to_cpu(data->hdr.plen) - |
1006 | nvmet_tcp_hdgst_len(queue) - | |
1007 | nvmet_tcp_ddgst_len(queue) - | |
1008 | sizeof(*data); | |
1009 | ||
872d26a3 | 1010 | cmd->pdu_len = le32_to_cpu(data->data_length); |
9a1abc24 | 1011 | if (unlikely(cmd->pdu_len != exp_data_len || |
efa56305 ML |
1012 | cmd->pdu_len == 0 || |
1013 | cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) { | |
1014 | pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len); | |
75011bd0 | 1015 | goto err_proto; |
efa56305 | 1016 | } |
872d26a3 | 1017 | cmd->pdu_recv = 0; |
5bfaba27 | 1018 | nvmet_tcp_build_pdu_iovec(cmd); |
872d26a3 SG |
1019 | queue->cmd = cmd; |
1020 | queue->rcv_state = NVMET_TCP_RECV_DATA; | |
1021 | ||
1022 | return 0; | |
75011bd0 ML |
1023 | |
1024 | err_proto: | |
1025 | /* FIXME: use proper transport errors */ | |
1026 | nvmet_tcp_fatal_error(queue); | |
1027 | return -EPROTO; | |
872d26a3 SG |
1028 | } |
1029 | ||
1030 | static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) | |
1031 | { | |
1032 | struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; | |
1033 | struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; | |
1034 | struct nvmet_req *req; | |
1035 | int ret; | |
1036 | ||
1037 | if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { | |
1038 | if (hdr->type != nvme_tcp_icreq) { | |
1039 | pr_err("unexpected pdu type (%d) before icreq\n", | |
1040 | hdr->type); | |
1041 | nvmet_tcp_fatal_error(queue); | |
1042 | return -EPROTO; | |
1043 | } | |
1044 | return nvmet_tcp_handle_icreq(queue); | |
1045 | } | |
1046 | ||
f614b937 VP |
1047 | if (unlikely(hdr->type == nvme_tcp_icreq)) { |
1048 | pr_err("queue %d: received icreq pdu in state %d\n", | |
1049 | queue->idx, queue->state); | |
1050 | nvmet_tcp_fatal_error(queue); | |
1051 | return -EPROTO; | |
1052 | } | |
1053 | ||
872d26a3 SG |
1054 | if (hdr->type == nvme_tcp_h2c_data) { |
1055 | ret = nvmet_tcp_handle_h2c_data_pdu(queue); | |
1056 | if (unlikely(ret)) | |
1057 | return ret; | |
1058 | return 0; | |
1059 | } | |
1060 | ||
1061 | queue->cmd = nvmet_tcp_get_cmd(queue); | |
1062 | if (unlikely(!queue->cmd)) { | |
1063 | /* This should never happen */ | |
1064 | pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", | |
1065 | queue->idx, queue->nr_cmds, queue->send_list_len, | |
1066 | nvme_cmd->common.opcode); | |
1067 | nvmet_tcp_fatal_error(queue); | |
1068 | return -ENOMEM; | |
1069 | } | |
1070 | ||
1071 | req = &queue->cmd->req; | |
1072 | memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); | |
1073 | ||
1074 | if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, | |
1075 | &queue->nvme_sq, &nvmet_tcp_ops))) { | |
1076 | pr_err("failed cmd %p id %d opcode %d, data_len: %d\n", | |
1077 | req->cmd, req->cmd->common.command_id, | |
1078 | req->cmd->common.opcode, | |
1079 | le32_to_cpu(req->cmd->common.dptr.sgl.length)); | |
1080 | ||
1081 | nvmet_tcp_handle_req_failure(queue, queue->cmd, req); | |
bdaf1327 | 1082 | return 0; |
872d26a3 SG |
1083 | } |
1084 | ||
1085 | ret = nvmet_tcp_map_data(queue->cmd); | |
1086 | if (unlikely(ret)) { | |
1087 | pr_err("queue %d: failed to map data\n", queue->idx); | |
1088 | if (nvmet_tcp_has_inline_data(queue->cmd)) | |
1089 | nvmet_tcp_fatal_error(queue); | |
1090 | else | |
1091 | nvmet_req_complete(req, ret); | |
1092 | ret = -EAGAIN; | |
1093 | goto out; | |
1094 | } | |
1095 | ||
1096 | if (nvmet_tcp_need_data_in(queue->cmd)) { | |
1097 | if (nvmet_tcp_has_inline_data(queue->cmd)) { | |
1098 | queue->rcv_state = NVMET_TCP_RECV_DATA; | |
5bfaba27 | 1099 | nvmet_tcp_build_pdu_iovec(queue->cmd); |
872d26a3 SG |
1100 | return 0; |
1101 | } | |
1102 | /* send back R2T */ | |
1103 | nvmet_tcp_queue_response(&queue->cmd->req); | |
1104 | goto out; | |
1105 | } | |
1106 | ||
be3f3114 | 1107 | queue->cmd->req.execute(&queue->cmd->req); |
872d26a3 SG |
1108 | out: |
1109 | nvmet_prepare_receive_pdu(queue); | |
1110 | return ret; | |
1111 | } | |
1112 | ||
1113 | static const u8 nvme_tcp_pdu_sizes[] = { | |
1114 | [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu), | |
1115 | [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu), | |
1116 | [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu), | |
1117 | }; | |
1118 | ||
1119 | static inline u8 nvmet_tcp_pdu_size(u8 type) | |
1120 | { | |
1121 | size_t idx = type; | |
1122 | ||
1123 | return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) && | |
1124 | nvme_tcp_pdu_sizes[idx]) ? | |
1125 | nvme_tcp_pdu_sizes[idx] : 0; | |
1126 | } | |
1127 | ||
1128 | static inline bool nvmet_tcp_pdu_valid(u8 type) | |
1129 | { | |
1130 | switch (type) { | |
1131 | case nvme_tcp_icreq: | |
1132 | case nvme_tcp_cmd: | |
1133 | case nvme_tcp_h2c_data: | |
1134 | /* fallthru */ | |
1135 | return true; | |
1136 | } | |
1137 | ||
1138 | return false; | |
1139 | } | |
1140 | ||
a1c5dd83 HR |
1141 | static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue, |
1142 | struct msghdr *msg, char *cbuf) | |
1143 | { | |
1144 | struct cmsghdr *cmsg = (struct cmsghdr *)cbuf; | |
1145 | u8 ctype, level, description; | |
1146 | int ret = 0; | |
1147 | ||
1148 | ctype = tls_get_record_type(queue->sock->sk, cmsg); | |
1149 | switch (ctype) { | |
1150 | case 0: | |
1151 | break; | |
1152 | case TLS_RECORD_TYPE_DATA: | |
1153 | break; | |
1154 | case TLS_RECORD_TYPE_ALERT: | |
1155 | tls_alert_recv(queue->sock->sk, msg, &level, &description); | |
1156 | if (level == TLS_ALERT_LEVEL_FATAL) { | |
1157 | pr_err("queue %d: TLS Alert desc %u\n", | |
1158 | queue->idx, description); | |
1159 | ret = -ENOTCONN; | |
1160 | } else { | |
1161 | pr_warn("queue %d: TLS Alert desc %u\n", | |
1162 | queue->idx, description); | |
1163 | ret = -EAGAIN; | |
1164 | } | |
1165 | break; | |
1166 | default: | |
1167 | /* discard this record type */ | |
1168 | pr_err("queue %d: TLS record %d unhandled\n", | |
1169 | queue->idx, ctype); | |
1170 | ret = -EAGAIN; | |
1171 | break; | |
1172 | } | |
1173 | return ret; | |
1174 | } | |
1175 | ||
872d26a3 SG |
1176 | static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) |
1177 | { | |
1178 | struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; | |
a1c5dd83 | 1179 | int len, ret; |
872d26a3 | 1180 | struct kvec iov; |
a1c5dd83 | 1181 | char cbuf[CMSG_LEN(sizeof(char))] = {}; |
872d26a3 SG |
1182 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; |
1183 | ||
1184 | recv: | |
1185 | iov.iov_base = (void *)&queue->pdu + queue->offset; | |
1186 | iov.iov_len = queue->left; | |
a1c5dd83 HR |
1187 | if (queue->tls_pskid) { |
1188 | msg.msg_control = cbuf; | |
1189 | msg.msg_controllen = sizeof(cbuf); | |
1190 | } | |
872d26a3 SG |
1191 | len = kernel_recvmsg(queue->sock, &msg, &iov, 1, |
1192 | iov.iov_len, msg.msg_flags); | |
1193 | if (unlikely(len < 0)) | |
1194 | return len; | |
a1c5dd83 HR |
1195 | if (queue->tls_pskid) { |
1196 | ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); | |
1197 | if (ret < 0) | |
1198 | return ret; | |
1199 | } | |
872d26a3 SG |
1200 | |
1201 | queue->offset += len; | |
1202 | queue->left -= len; | |
1203 | if (queue->left) | |
1204 | return -EAGAIN; | |
1205 | ||
1206 | if (queue->offset == sizeof(struct nvme_tcp_hdr)) { | |
1207 | u8 hdgst = nvmet_tcp_hdgst_len(queue); | |
1208 | ||
1209 | if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { | |
1210 | pr_err("unexpected pdu type %d\n", hdr->type); | |
1211 | nvmet_tcp_fatal_error(queue); | |
1212 | return -EIO; | |
1213 | } | |
1214 | ||
1215 | if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) { | |
1216 | pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen); | |
1217 | return -EIO; | |
1218 | } | |
1219 | ||
1220 | queue->left = hdr->hlen - queue->offset + hdgst; | |
1221 | goto recv; | |
1222 | } | |
1223 | ||
1224 | if (queue->hdr_digest && | |
86aeda32 | 1225 | nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { |
872d26a3 SG |
1226 | nvmet_tcp_fatal_error(queue); /* fatal */ |
1227 | return -EPROTO; | |
1228 | } | |
1229 | ||
1230 | if (queue->data_digest && | |
1231 | nvmet_tcp_check_ddgst(queue, &queue->pdu)) { | |
1232 | nvmet_tcp_fatal_error(queue); /* fatal */ | |
1233 | return -EPROTO; | |
1234 | } | |
1235 | ||
1236 | return nvmet_tcp_done_recv_pdu(queue); | |
1237 | } | |
1238 | ||
1239 | static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) | |
1240 | { | |
1241 | struct nvmet_tcp_queue *queue = cmd->queue; | |
1242 | ||
ed0691cf | 1243 | nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd); |
872d26a3 SG |
1244 | queue->offset = 0; |
1245 | queue->left = NVME_TCP_DIGEST_LENGTH; | |
1246 | queue->rcv_state = NVMET_TCP_RECV_DDGST; | |
1247 | } | |
1248 | ||
1249 | static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) | |
1250 | { | |
1251 | struct nvmet_tcp_cmd *cmd = queue->cmd; | |
a1c5dd83 | 1252 | int len, ret; |
872d26a3 SG |
1253 | |
1254 | while (msg_data_left(&cmd->recv_msg)) { | |
a1c5dd83 | 1255 | len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, |
872d26a3 | 1256 | cmd->recv_msg.msg_flags); |
a1c5dd83 HR |
1257 | if (len <= 0) |
1258 | return len; | |
1259 | if (queue->tls_pskid) { | |
1260 | ret = nvmet_tcp_tls_record_ok(cmd->queue, | |
1261 | &cmd->recv_msg, cmd->recv_cbuf); | |
1262 | if (ret < 0) | |
1263 | return ret; | |
1264 | } | |
872d26a3 | 1265 | |
a1c5dd83 HR |
1266 | cmd->pdu_recv += len; |
1267 | cmd->rbytes_done += len; | |
872d26a3 SG |
1268 | } |
1269 | ||
fda871c0 SG |
1270 | if (queue->data_digest) { |
1271 | nvmet_tcp_prep_recv_ddgst(cmd); | |
1272 | return 0; | |
1273 | } | |
872d26a3 | 1274 | |
bdaf1327 EG |
1275 | if (cmd->rbytes_done == cmd->req.transfer_len) |
1276 | nvmet_tcp_execute_request(cmd); | |
872d26a3 SG |
1277 | |
1278 | nvmet_prepare_receive_pdu(queue); | |
1279 | return 0; | |
1280 | } | |
1281 | ||
1282 | static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) | |
1283 | { | |
1284 | struct nvmet_tcp_cmd *cmd = queue->cmd; | |
a1c5dd83 HR |
1285 | int ret, len; |
1286 | char cbuf[CMSG_LEN(sizeof(char))] = {}; | |
872d26a3 SG |
1287 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; |
1288 | struct kvec iov = { | |
1289 | .iov_base = (void *)&cmd->recv_ddgst + queue->offset, | |
1290 | .iov_len = queue->left | |
1291 | }; | |
1292 | ||
a1c5dd83 HR |
1293 | if (queue->tls_pskid) { |
1294 | msg.msg_control = cbuf; | |
1295 | msg.msg_controllen = sizeof(cbuf); | |
1296 | } | |
1297 | len = kernel_recvmsg(queue->sock, &msg, &iov, 1, | |
872d26a3 | 1298 | iov.iov_len, msg.msg_flags); |
a1c5dd83 HR |
1299 | if (unlikely(len < 0)) |
1300 | return len; | |
1301 | if (queue->tls_pskid) { | |
1302 | ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); | |
1303 | if (ret < 0) | |
1304 | return ret; | |
1305 | } | |
872d26a3 | 1306 | |
a1c5dd83 HR |
1307 | queue->offset += len; |
1308 | queue->left -= len; | |
872d26a3 SG |
1309 | if (queue->left) |
1310 | return -EAGAIN; | |
1311 | ||
1312 | if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { | |
1313 | pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", | |
1314 | queue->idx, cmd->req.cmd->common.command_id, | |
1315 | queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), | |
1316 | le32_to_cpu(cmd->exp_ddgst)); | |
0700542a | 1317 | nvmet_req_uninit(&cmd->req); |
1318 | nvmet_tcp_free_cmd_buffers(cmd); | |
872d26a3 SG |
1319 | nvmet_tcp_fatal_error(queue); |
1320 | ret = -EPROTO; | |
1321 | goto out; | |
1322 | } | |
1323 | ||
bdaf1327 EG |
1324 | if (cmd->rbytes_done == cmd->req.transfer_len) |
1325 | nvmet_tcp_execute_request(cmd); | |
1326 | ||
872d26a3 SG |
1327 | ret = 0; |
1328 | out: | |
1329 | nvmet_prepare_receive_pdu(queue); | |
1330 | return ret; | |
1331 | } | |
1332 | ||
1333 | static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) | |
1334 | { | |
fb865858 | 1335 | int result = 0; |
872d26a3 SG |
1336 | |
1337 | if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) | |
1338 | return 0; | |
1339 | ||
1340 | if (queue->rcv_state == NVMET_TCP_RECV_PDU) { | |
1341 | result = nvmet_tcp_try_recv_pdu(queue); | |
1342 | if (result != 0) | |
1343 | goto done_recv; | |
1344 | } | |
1345 | ||
1346 | if (queue->rcv_state == NVMET_TCP_RECV_DATA) { | |
1347 | result = nvmet_tcp_try_recv_data(queue); | |
1348 | if (result != 0) | |
1349 | goto done_recv; | |
1350 | } | |
1351 | ||
1352 | if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { | |
1353 | result = nvmet_tcp_try_recv_ddgst(queue); | |
1354 | if (result != 0) | |
1355 | goto done_recv; | |
1356 | } | |
1357 | ||
1358 | done_recv: | |
1359 | if (result < 0) { | |
1360 | if (result == -EAGAIN) | |
1361 | return 0; | |
1362 | return result; | |
1363 | } | |
1364 | return 1; | |
1365 | } | |
1366 | ||
1367 | static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, | |
1368 | int budget, int *recvs) | |
1369 | { | |
1370 | int i, ret = 0; | |
1371 | ||
1372 | for (i = 0; i < budget; i++) { | |
1373 | ret = nvmet_tcp_try_recv_one(queue); | |
0236d343 SG |
1374 | if (unlikely(ret < 0)) { |
1375 | nvmet_tcp_socket_error(queue, ret); | |
1376 | goto done; | |
1377 | } else if (ret == 0) { | |
872d26a3 | 1378 | break; |
0236d343 | 1379 | } |
872d26a3 SG |
1380 | (*recvs)++; |
1381 | } | |
0236d343 | 1382 | done: |
872d26a3 SG |
1383 | return ret; |
1384 | } | |
1385 | ||
675b453e HR |
1386 | static void nvmet_tcp_release_queue(struct kref *kref) |
1387 | { | |
1388 | struct nvmet_tcp_queue *queue = | |
1389 | container_of(kref, struct nvmet_tcp_queue, kref); | |
1390 | ||
1391 | WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING); | |
1392 | queue_work(nvmet_wq, &queue->release_work); | |
1393 | } | |
1394 | ||
872d26a3 SG |
1395 | static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) |
1396 | { | |
bbacf792 | 1397 | spin_lock_bh(&queue->state_lock); |
675b453e HR |
1398 | if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { |
1399 | /* Socket closed during handshake */ | |
1400 | tls_handshake_cancel(queue->sock->sk); | |
1401 | } | |
872d26a3 SG |
1402 | if (queue->state != NVMET_TCP_Q_DISCONNECTING) { |
1403 | queue->state = NVMET_TCP_Q_DISCONNECTING; | |
675b453e | 1404 | kref_put(&queue->kref, nvmet_tcp_release_queue); |
872d26a3 | 1405 | } |
bbacf792 | 1406 | spin_unlock_bh(&queue->state_lock); |
872d26a3 SG |
1407 | } |
1408 | ||
d8e7b462 WM |
1409 | static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue) |
1410 | { | |
1411 | queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs); | |
1412 | } | |
1413 | ||
1414 | static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue, | |
1415 | int ops) | |
1416 | { | |
1417 | if (!idle_poll_period_usecs) | |
1418 | return false; | |
1419 | ||
1420 | if (ops) | |
1421 | nvmet_tcp_arm_queue_deadline(queue); | |
1422 | ||
1423 | return !time_after(jiffies, queue->poll_end); | |
1424 | } | |
1425 | ||
872d26a3 SG |
1426 | static void nvmet_tcp_io_work(struct work_struct *w) |
1427 | { | |
1428 | struct nvmet_tcp_queue *queue = | |
1429 | container_of(w, struct nvmet_tcp_queue, io_work); | |
1430 | bool pending; | |
1431 | int ret, ops = 0; | |
1432 | ||
1433 | do { | |
1434 | pending = false; | |
1435 | ||
1436 | ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); | |
0236d343 | 1437 | if (ret > 0) |
872d26a3 | 1438 | pending = true; |
0236d343 | 1439 | else if (ret < 0) |
872d26a3 | 1440 | return; |
872d26a3 SG |
1441 | |
1442 | ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); | |
0236d343 | 1443 | if (ret > 0) |
872d26a3 | 1444 | pending = true; |
0236d343 | 1445 | else if (ret < 0) |
872d26a3 | 1446 | return; |
872d26a3 SG |
1447 | |
1448 | } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET); | |
1449 | ||
1450 | /* | |
d8e7b462 WM |
1451 | * Requeue the worker if idle deadline period is in progress or any |
1452 | * ops activity was recorded during the do-while loop above. | |
872d26a3 | 1453 | */ |
d8e7b462 | 1454 | if (nvmet_tcp_check_queue_deadline(queue, ops) || pending) |
f7790e5d | 1455 | queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); |
872d26a3 SG |
1456 | } |
1457 | ||
1458 | static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, | |
1459 | struct nvmet_tcp_cmd *c) | |
1460 | { | |
1461 | u8 hdgst = nvmet_tcp_hdgst_len(queue); | |
1462 | ||
1463 | c->queue = queue; | |
1464 | c->req.port = queue->port->nport; | |
1465 | ||
1466 | c->cmd_pdu = page_frag_alloc(&queue->pf_cache, | |
1467 | sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); | |
1468 | if (!c->cmd_pdu) | |
1469 | return -ENOMEM; | |
1470 | c->req.cmd = &c->cmd_pdu->cmd; | |
1471 | ||
1472 | c->rsp_pdu = page_frag_alloc(&queue->pf_cache, | |
1473 | sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); | |
1474 | if (!c->rsp_pdu) | |
1475 | goto out_free_cmd; | |
fc6c9730 | 1476 | c->req.cqe = &c->rsp_pdu->cqe; |
872d26a3 SG |
1477 | |
1478 | c->data_pdu = page_frag_alloc(&queue->pf_cache, | |
1479 | sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); | |
1480 | if (!c->data_pdu) | |
1481 | goto out_free_rsp; | |
1482 | ||
1483 | c->r2t_pdu = page_frag_alloc(&queue->pf_cache, | |
1484 | sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); | |
1485 | if (!c->r2t_pdu) | |
1486 | goto out_free_data; | |
1487 | ||
a1c5dd83 HR |
1488 | if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { |
1489 | c->recv_msg.msg_control = c->recv_cbuf; | |
1490 | c->recv_msg.msg_controllen = sizeof(c->recv_cbuf); | |
1491 | } | |
872d26a3 SG |
1492 | c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; |
1493 | ||
1494 | list_add_tail(&c->entry, &queue->free_list); | |
1495 | ||
1496 | return 0; | |
1497 | out_free_data: | |
1498 | page_frag_free(c->data_pdu); | |
1499 | out_free_rsp: | |
1500 | page_frag_free(c->rsp_pdu); | |
1501 | out_free_cmd: | |
1502 | page_frag_free(c->cmd_pdu); | |
1503 | return -ENOMEM; | |
1504 | } | |
1505 | ||
1506 | static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c) | |
1507 | { | |
1508 | page_frag_free(c->r2t_pdu); | |
1509 | page_frag_free(c->data_pdu); | |
1510 | page_frag_free(c->rsp_pdu); | |
1511 | page_frag_free(c->cmd_pdu); | |
1512 | } | |
1513 | ||
1514 | static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) | |
1515 | { | |
1516 | struct nvmet_tcp_cmd *cmds; | |
1517 | int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; | |
1518 | ||
1519 | cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); | |
1520 | if (!cmds) | |
1521 | goto out; | |
1522 | ||
1523 | for (i = 0; i < nr_cmds; i++) { | |
1524 | ret = nvmet_tcp_alloc_cmd(queue, cmds + i); | |
1525 | if (ret) | |
1526 | goto out_free; | |
1527 | } | |
1528 | ||
1529 | queue->cmds = cmds; | |
1530 | ||
1531 | return 0; | |
1532 | out_free: | |
1533 | while (--i >= 0) | |
1534 | nvmet_tcp_free_cmd(cmds + i); | |
1535 | kfree(cmds); | |
1536 | out: | |
1537 | return ret; | |
1538 | } | |
1539 | ||
1540 | static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) | |
1541 | { | |
1542 | struct nvmet_tcp_cmd *cmds = queue->cmds; | |
1543 | int i; | |
1544 | ||
1545 | for (i = 0; i < queue->nr_cmds; i++) | |
1546 | nvmet_tcp_free_cmd(cmds + i); | |
1547 | ||
1548 | nvmet_tcp_free_cmd(&queue->connect); | |
1549 | kfree(cmds); | |
1550 | } | |
1551 | ||
1552 | static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) | |
1553 | { | |
1554 | struct socket *sock = queue->sock; | |
1555 | ||
1556 | write_lock_bh(&sock->sk->sk_callback_lock); | |
1557 | sock->sk->sk_data_ready = queue->data_ready; | |
1558 | sock->sk->sk_state_change = queue->state_change; | |
1559 | sock->sk->sk_write_space = queue->write_space; | |
1560 | sock->sk->sk_user_data = NULL; | |
1561 | write_unlock_bh(&sock->sk->sk_callback_lock); | |
1562 | } | |
1563 | ||
872d26a3 SG |
1564 | static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) |
1565 | { | |
1566 | struct nvmet_tcp_cmd *cmd = queue->cmds; | |
1567 | int i; | |
1568 | ||
1569 | for (i = 0; i < queue->nr_cmds; i++, cmd++) { | |
1570 | if (nvmet_tcp_need_data_in(cmd)) | |
af21250b | 1571 | nvmet_req_uninit(&cmd->req); |
872d26a3 SG |
1572 | } |
1573 | ||
1574 | if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { | |
1575 | /* failed in connect */ | |
db94f240 | 1576 | nvmet_req_uninit(&queue->connect.req); |
1577 | } | |
1578 | } | |
1579 | ||
1580 | static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue) | |
1581 | { | |
1582 | struct nvmet_tcp_cmd *cmd = queue->cmds; | |
1583 | int i; | |
1584 | ||
6825bdde SG |
1585 | for (i = 0; i < queue->nr_cmds; i++, cmd++) |
1586 | nvmet_tcp_free_cmd_buffers(cmd); | |
1587 | nvmet_tcp_free_cmd_buffers(&queue->connect); | |
872d26a3 SG |
1588 | } |
1589 | ||
1590 | static void nvmet_tcp_release_queue_work(struct work_struct *w) | |
1591 | { | |
1592 | struct nvmet_tcp_queue *queue = | |
1593 | container_of(w, struct nvmet_tcp_queue, release_work); | |
1594 | ||
1595 | mutex_lock(&nvmet_tcp_queue_mutex); | |
1596 | list_del_init(&queue->queue_list); | |
1597 | mutex_unlock(&nvmet_tcp_queue_mutex); | |
1598 | ||
1599 | nvmet_tcp_restore_socket_callbacks(queue); | |
675b453e | 1600 | cancel_delayed_work_sync(&queue->tls_handshake_tmo_work); |
a208fc56 ML |
1601 | cancel_work_sync(&queue->io_work); |
1602 | /* stop accepting incoming data */ | |
1603 | queue->rcv_state = NVMET_TCP_RECV_ERR; | |
872d26a3 SG |
1604 | |
1605 | nvmet_tcp_uninit_data_in_cmds(queue); | |
1606 | nvmet_sq_destroy(&queue->nvme_sq); | |
1607 | cancel_work_sync(&queue->io_work); | |
db94f240 | 1608 | nvmet_tcp_free_cmd_data_in_buffers(queue); |
79a4f186 HR |
1609 | /* ->sock will be released by fput() */ |
1610 | fput(queue->sock->file); | |
872d26a3 SG |
1611 | nvmet_tcp_free_cmds(queue); |
1612 | if (queue->hdr_digest || queue->data_digest) | |
1613 | nvmet_tcp_free_crypto(queue); | |
44f331a6 | 1614 | ida_free(&nvmet_tcp_queue_ida, queue->idx); |
a0727489 | 1615 | page_frag_cache_drain(&queue->pf_cache); |
872d26a3 SG |
1616 | kfree(queue); |
1617 | } | |
1618 | ||
1619 | static void nvmet_tcp_data_ready(struct sock *sk) | |
1620 | { | |
1621 | struct nvmet_tcp_queue *queue; | |
1622 | ||
40e0b090 PY |
1623 | trace_sk_data_ready(sk); |
1624 | ||
872d26a3 SG |
1625 | read_lock_bh(&sk->sk_callback_lock); |
1626 | queue = sk->sk_user_data; | |
675b453e HR |
1627 | if (likely(queue)) { |
1628 | if (queue->data_ready) | |
1629 | queue->data_ready(sk); | |
1630 | if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) | |
1631 | queue_work_on(queue_cpu(queue), nvmet_tcp_wq, | |
1632 | &queue->io_work); | |
1633 | } | |
872d26a3 SG |
1634 | read_unlock_bh(&sk->sk_callback_lock); |
1635 | } | |
1636 | ||
1637 | static void nvmet_tcp_write_space(struct sock *sk) | |
1638 | { | |
1639 | struct nvmet_tcp_queue *queue; | |
1640 | ||
1641 | read_lock_bh(&sk->sk_callback_lock); | |
1642 | queue = sk->sk_user_data; | |
1643 | if (unlikely(!queue)) | |
1644 | goto out; | |
1645 | ||
1646 | if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { | |
1647 | queue->write_space(sk); | |
1648 | goto out; | |
1649 | } | |
1650 | ||
1651 | if (sk_stream_is_writeable(sk)) { | |
1652 | clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
f7790e5d | 1653 | queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); |
872d26a3 SG |
1654 | } |
1655 | out: | |
1656 | read_unlock_bh(&sk->sk_callback_lock); | |
1657 | } | |
1658 | ||
1659 | static void nvmet_tcp_state_change(struct sock *sk) | |
1660 | { | |
1661 | struct nvmet_tcp_queue *queue; | |
1662 | ||
b5332a9f | 1663 | read_lock_bh(&sk->sk_callback_lock); |
872d26a3 SG |
1664 | queue = sk->sk_user_data; |
1665 | if (!queue) | |
1666 | goto done; | |
1667 | ||
1668 | switch (sk->sk_state) { | |
478814a5 ML |
1669 | case TCP_FIN_WAIT2: |
1670 | case TCP_LAST_ACK: | |
1671 | break; | |
872d26a3 SG |
1672 | case TCP_FIN_WAIT1: |
1673 | case TCP_CLOSE_WAIT: | |
1674 | case TCP_CLOSE: | |
1675 | /* FALLTHRU */ | |
872d26a3 SG |
1676 | nvmet_tcp_schedule_release_queue(queue); |
1677 | break; | |
1678 | default: | |
1679 | pr_warn("queue %d unhandled state %d\n", | |
1680 | queue->idx, sk->sk_state); | |
1681 | } | |
1682 | done: | |
b5332a9f | 1683 | read_unlock_bh(&sk->sk_callback_lock); |
872d26a3 SG |
1684 | } |
1685 | ||
1686 | static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) | |
1687 | { | |
1688 | struct socket *sock = queue->sock; | |
89275a96 | 1689 | struct inet_sock *inet = inet_sk(sock->sk); |
872d26a3 SG |
1690 | int ret; |
1691 | ||
1692 | ret = kernel_getsockname(sock, | |
1693 | (struct sockaddr *)&queue->sockaddr); | |
1694 | if (ret < 0) | |
1695 | return ret; | |
1696 | ||
1697 | ret = kernel_getpeername(sock, | |
1698 | (struct sockaddr *)&queue->sockaddr_peer); | |
1699 | if (ret < 0) | |
1700 | return ret; | |
1701 | ||
1702 | /* | |
1703 | * Cleanup whatever is sitting in the TCP transmit queue on socket | |
1704 | * close. This is done to prevent stale data from being sent should | |
1705 | * the network connection be restored before TCP times out. | |
1706 | */ | |
c433594c | 1707 | sock_no_linger(sock->sk); |
872d26a3 | 1708 | |
6e434967 CH |
1709 | if (so_priority > 0) |
1710 | sock_set_priority(sock->sk, so_priority); | |
43cc6689 | 1711 | |
89275a96 | 1712 | /* Set socket type of service */ |
6ebf71ba CH |
1713 | if (inet->rcv_tos > 0) |
1714 | ip_sock_set_tos(sock->sk, inet->rcv_tos); | |
89275a96 | 1715 | |
0fbcfb08 | 1716 | ret = 0; |
872d26a3 | 1717 | write_lock_bh(&sock->sk->sk_callback_lock); |
0fbcfb08 SG |
1718 | if (sock->sk->sk_state != TCP_ESTABLISHED) { |
1719 | /* | |
1720 | * If the socket is already closing, don't even start | |
1721 | * consuming it | |
1722 | */ | |
1723 | ret = -ENOTCONN; | |
1724 | } else { | |
1725 | sock->sk->sk_user_data = queue; | |
1726 | queue->data_ready = sock->sk->sk_data_ready; | |
1727 | sock->sk->sk_data_ready = nvmet_tcp_data_ready; | |
1728 | queue->state_change = sock->sk->sk_state_change; | |
1729 | sock->sk->sk_state_change = nvmet_tcp_state_change; | |
1730 | queue->write_space = sock->sk->sk_write_space; | |
1731 | sock->sk->sk_write_space = nvmet_tcp_write_space; | |
d8e7b462 WM |
1732 | if (idle_poll_period_usecs) |
1733 | nvmet_tcp_arm_queue_deadline(queue); | |
0fbcfb08 SG |
1734 | queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); |
1735 | } | |
872d26a3 SG |
1736 | write_unlock_bh(&sock->sk->sk_callback_lock); |
1737 | ||
0fbcfb08 | 1738 | return ret; |
872d26a3 SG |
1739 | } |
1740 | ||
675b453e | 1741 | #ifdef CONFIG_NVME_TARGET_TCP_TLS |
70525e5d HR |
1742 | static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue) |
1743 | { | |
1744 | struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; | |
1745 | int len, ret; | |
1746 | struct kvec iov = { | |
1747 | .iov_base = (u8 *)&queue->pdu + queue->offset, | |
1748 | .iov_len = sizeof(struct nvme_tcp_hdr), | |
1749 | }; | |
1750 | char cbuf[CMSG_LEN(sizeof(char))] = {}; | |
1751 | struct msghdr msg = { | |
1752 | .msg_control = cbuf, | |
1753 | .msg_controllen = sizeof(cbuf), | |
1754 | .msg_flags = MSG_PEEK, | |
1755 | }; | |
1756 | ||
1757 | if (nvmet_port_secure_channel_required(queue->port->nport)) | |
1758 | return 0; | |
1759 | ||
1760 | len = kernel_recvmsg(queue->sock, &msg, &iov, 1, | |
1761 | iov.iov_len, msg.msg_flags); | |
1762 | if (unlikely(len < 0)) { | |
1763 | pr_debug("queue %d: peek error %d\n", | |
1764 | queue->idx, len); | |
1765 | return len; | |
1766 | } | |
1767 | ||
1768 | ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); | |
1769 | if (ret < 0) | |
1770 | return ret; | |
1771 | ||
1772 | if (len < sizeof(struct nvme_tcp_hdr)) { | |
1773 | pr_debug("queue %d: short read, %d bytes missing\n", | |
1774 | queue->idx, (int)iov.iov_len - len); | |
1775 | return -EAGAIN; | |
1776 | } | |
1777 | pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n", | |
1778 | queue->idx, hdr->type, hdr->hlen, hdr->plen, | |
1779 | (int)sizeof(struct nvme_tcp_icreq_pdu)); | |
1780 | if (hdr->type == nvme_tcp_icreq && | |
1781 | hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) && | |
3a96bff2 | 1782 | hdr->plen == cpu_to_le32(sizeof(struct nvme_tcp_icreq_pdu))) { |
70525e5d HR |
1783 | pr_debug("queue %d: icreq detected\n", |
1784 | queue->idx); | |
1785 | return len; | |
1786 | } | |
1787 | return 0; | |
1788 | } | |
1789 | ||
675b453e HR |
1790 | static void nvmet_tcp_tls_handshake_done(void *data, int status, |
1791 | key_serial_t peerid) | |
1792 | { | |
1793 | struct nvmet_tcp_queue *queue = data; | |
1794 | ||
1795 | pr_debug("queue %d: TLS handshake done, key %x, status %d\n", | |
1796 | queue->idx, peerid, status); | |
1797 | spin_lock_bh(&queue->state_lock); | |
1798 | if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) { | |
1799 | spin_unlock_bh(&queue->state_lock); | |
1800 | return; | |
1801 | } | |
1802 | if (!status) { | |
1803 | queue->tls_pskid = peerid; | |
1804 | queue->state = NVMET_TCP_Q_CONNECTING; | |
1805 | } else | |
1806 | queue->state = NVMET_TCP_Q_FAILED; | |
1807 | spin_unlock_bh(&queue->state_lock); | |
1808 | ||
1809 | cancel_delayed_work_sync(&queue->tls_handshake_tmo_work); | |
1810 | if (status) | |
1811 | nvmet_tcp_schedule_release_queue(queue); | |
1812 | else | |
1813 | nvmet_tcp_set_queue_sock(queue); | |
1814 | kref_put(&queue->kref, nvmet_tcp_release_queue); | |
1815 | } | |
1816 | ||
1817 | static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) | |
1818 | { | |
1819 | struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w), | |
1820 | struct nvmet_tcp_queue, tls_handshake_tmo_work); | |
1821 | ||
1822 | pr_warn("queue %d: TLS handshake timeout\n", queue->idx); | |
1823 | /* | |
1824 | * If tls_handshake_cancel() fails we've lost the race with | |
1825 | * nvmet_tcp_tls_handshake_done() */ | |
1826 | if (!tls_handshake_cancel(queue->sock->sk)) | |
1827 | return; | |
1828 | spin_lock_bh(&queue->state_lock); | |
1829 | if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) { | |
1830 | spin_unlock_bh(&queue->state_lock); | |
1831 | return; | |
1832 | } | |
1833 | queue->state = NVMET_TCP_Q_FAILED; | |
1834 | spin_unlock_bh(&queue->state_lock); | |
1835 | nvmet_tcp_schedule_release_queue(queue); | |
1836 | kref_put(&queue->kref, nvmet_tcp_release_queue); | |
1837 | } | |
1838 | ||
1839 | static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue) | |
1840 | { | |
1841 | int ret = -EOPNOTSUPP; | |
1842 | struct tls_handshake_args args; | |
1843 | ||
1844 | if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) { | |
1845 | pr_warn("cannot start TLS in state %d\n", queue->state); | |
1846 | return -EINVAL; | |
1847 | } | |
1848 | ||
1849 | kref_get(&queue->kref); | |
1850 | pr_debug("queue %d: TLS ServerHello\n", queue->idx); | |
1851 | memset(&args, 0, sizeof(args)); | |
1852 | args.ta_sock = queue->sock; | |
1853 | args.ta_done = nvmet_tcp_tls_handshake_done; | |
1854 | args.ta_data = queue; | |
1855 | args.ta_keyring = key_serial(queue->port->nport->keyring); | |
1856 | args.ta_timeout_ms = tls_handshake_timeout * 1000; | |
1857 | ||
1858 | ret = tls_server_hello_psk(&args, GFP_KERNEL); | |
1859 | if (ret) { | |
1860 | kref_put(&queue->kref, nvmet_tcp_release_queue); | |
1861 | pr_err("failed to start TLS, err=%d\n", ret); | |
1862 | } else { | |
1863 | queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work, | |
1864 | tls_handshake_timeout * HZ); | |
1865 | } | |
1866 | return ret; | |
1867 | } | |
11b9d0b4 HR |
1868 | #else |
1869 | static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {} | |
675b453e HR |
1870 | #endif |
1871 | ||
4f8cce2d | 1872 | static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, |
872d26a3 SG |
1873 | struct socket *newsock) |
1874 | { | |
1875 | struct nvmet_tcp_queue *queue; | |
79a4f186 | 1876 | struct file *sock_file = NULL; |
872d26a3 SG |
1877 | int ret; |
1878 | ||
1879 | queue = kzalloc(sizeof(*queue), GFP_KERNEL); | |
4f8cce2d HR |
1880 | if (!queue) { |
1881 | ret = -ENOMEM; | |
1882 | goto out_release; | |
1883 | } | |
872d26a3 SG |
1884 | |
1885 | INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); | |
1886 | INIT_WORK(&queue->io_work, nvmet_tcp_io_work); | |
675b453e | 1887 | kref_init(&queue->kref); |
872d26a3 SG |
1888 | queue->sock = newsock; |
1889 | queue->port = port; | |
1890 | queue->nr_cmds = 0; | |
1891 | spin_lock_init(&queue->state_lock); | |
675b453e HR |
1892 | if (queue->port->nport->disc_addr.tsas.tcp.sectype == |
1893 | NVMF_TCP_SECTYPE_TLS13) | |
1894 | queue->state = NVMET_TCP_Q_TLS_HANDSHAKE; | |
1895 | else | |
1896 | queue->state = NVMET_TCP_Q_CONNECTING; | |
872d26a3 SG |
1897 | INIT_LIST_HEAD(&queue->free_list); |
1898 | init_llist_head(&queue->resp_list); | |
1899 | INIT_LIST_HEAD(&queue->resp_send_list); | |
1900 | ||
79a4f186 HR |
1901 | sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL); |
1902 | if (IS_ERR(sock_file)) { | |
1903 | ret = PTR_ERR(sock_file); | |
1904 | goto out_free_queue; | |
1905 | } | |
1906 | ||
44f331a6 | 1907 | queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL); |
872d26a3 SG |
1908 | if (queue->idx < 0) { |
1909 | ret = queue->idx; | |
79a4f186 | 1910 | goto out_sock; |
872d26a3 SG |
1911 | } |
1912 | ||
1913 | ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); | |
1914 | if (ret) | |
1915 | goto out_ida_remove; | |
1916 | ||
1917 | ret = nvmet_sq_init(&queue->nvme_sq); | |
1918 | if (ret) | |
1919 | goto out_free_connect; | |
1920 | ||
872d26a3 SG |
1921 | nvmet_prepare_receive_pdu(queue); |
1922 | ||
1923 | mutex_lock(&nvmet_tcp_queue_mutex); | |
1924 | list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); | |
1925 | mutex_unlock(&nvmet_tcp_queue_mutex); | |
1926 | ||
675b453e HR |
1927 | INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work, |
1928 | nvmet_tcp_tls_handshake_timeout); | |
11b9d0b4 | 1929 | #ifdef CONFIG_NVME_TARGET_TCP_TLS |
675b453e HR |
1930 | if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { |
1931 | struct sock *sk = queue->sock->sk; | |
1932 | ||
1933 | /* Restore the default callbacks before starting upcall */ | |
1934 | read_lock_bh(&sk->sk_callback_lock); | |
1935 | sk->sk_user_data = NULL; | |
1936 | sk->sk_data_ready = port->data_ready; | |
1937 | read_unlock_bh(&sk->sk_callback_lock); | |
70525e5d HR |
1938 | if (!nvmet_tcp_try_peek_pdu(queue)) { |
1939 | if (!nvmet_tcp_tls_handshake(queue)) | |
1940 | return; | |
1941 | /* TLS handshake failed, terminate the connection */ | |
1942 | goto out_destroy_sq; | |
1943 | } | |
1944 | /* Not a TLS connection, continue with normal processing */ | |
1945 | queue->state = NVMET_TCP_Q_CONNECTING; | |
675b453e HR |
1946 | } |
1947 | #endif | |
1948 | ||
872d26a3 SG |
1949 | ret = nvmet_tcp_set_queue_sock(queue); |
1950 | if (ret) | |
1951 | goto out_destroy_sq; | |
1952 | ||
4f8cce2d | 1953 | return; |
872d26a3 SG |
1954 | out_destroy_sq: |
1955 | mutex_lock(&nvmet_tcp_queue_mutex); | |
1956 | list_del_init(&queue->queue_list); | |
1957 | mutex_unlock(&nvmet_tcp_queue_mutex); | |
1958 | nvmet_sq_destroy(&queue->nvme_sq); | |
1959 | out_free_connect: | |
1960 | nvmet_tcp_free_cmd(&queue->connect); | |
1961 | out_ida_remove: | |
44f331a6 | 1962 | ida_free(&nvmet_tcp_queue_ida, queue->idx); |
79a4f186 HR |
1963 | out_sock: |
1964 | fput(queue->sock->file); | |
872d26a3 SG |
1965 | out_free_queue: |
1966 | kfree(queue); | |
4f8cce2d HR |
1967 | out_release: |
1968 | pr_err("failed to allocate queue, error %d\n", ret); | |
79a4f186 HR |
1969 | if (!sock_file) |
1970 | sock_release(newsock); | |
872d26a3 SG |
1971 | } |
1972 | ||
1973 | static void nvmet_tcp_accept_work(struct work_struct *w) | |
1974 | { | |
1975 | struct nvmet_tcp_port *port = | |
1976 | container_of(w, struct nvmet_tcp_port, accept_work); | |
1977 | struct socket *newsock; | |
1978 | int ret; | |
1979 | ||
1980 | while (true) { | |
1981 | ret = kernel_accept(port->sock, &newsock, O_NONBLOCK); | |
1982 | if (ret < 0) { | |
1983 | if (ret != -EAGAIN) | |
1984 | pr_warn("failed to accept err=%d\n", ret); | |
1985 | return; | |
1986 | } | |
4f8cce2d | 1987 | nvmet_tcp_alloc_queue(port, newsock); |
872d26a3 SG |
1988 | } |
1989 | } | |
1990 | ||
1991 | static void nvmet_tcp_listen_data_ready(struct sock *sk) | |
1992 | { | |
1993 | struct nvmet_tcp_port *port; | |
1994 | ||
40e0b090 PY |
1995 | trace_sk_data_ready(sk); |
1996 | ||
872d26a3 SG |
1997 | read_lock_bh(&sk->sk_callback_lock); |
1998 | port = sk->sk_user_data; | |
1999 | if (!port) | |
2000 | goto out; | |
2001 | ||
2002 | if (sk->sk_state == TCP_LISTEN) | |
8832cf92 | 2003 | queue_work(nvmet_wq, &port->accept_work); |
872d26a3 SG |
2004 | out: |
2005 | read_unlock_bh(&sk->sk_callback_lock); | |
2006 | } | |
2007 | ||
2008 | static int nvmet_tcp_add_port(struct nvmet_port *nport) | |
2009 | { | |
2010 | struct nvmet_tcp_port *port; | |
2011 | __kernel_sa_family_t af; | |
12abc5ee | 2012 | int ret; |
872d26a3 SG |
2013 | |
2014 | port = kzalloc(sizeof(*port), GFP_KERNEL); | |
2015 | if (!port) | |
2016 | return -ENOMEM; | |
2017 | ||
2018 | switch (nport->disc_addr.adrfam) { | |
2019 | case NVMF_ADDR_FAMILY_IP4: | |
2020 | af = AF_INET; | |
2021 | break; | |
2022 | case NVMF_ADDR_FAMILY_IP6: | |
2023 | af = AF_INET6; | |
2024 | break; | |
2025 | default: | |
2026 | pr_err("address family %d not supported\n", | |
2027 | nport->disc_addr.adrfam); | |
2028 | ret = -EINVAL; | |
2029 | goto err_port; | |
2030 | } | |
2031 | ||
2032 | ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, | |
2033 | nport->disc_addr.trsvcid, &port->addr); | |
2034 | if (ret) { | |
2035 | pr_err("malformed ip/port passed: %s:%s\n", | |
2036 | nport->disc_addr.traddr, nport->disc_addr.trsvcid); | |
2037 | goto err_port; | |
2038 | } | |
2039 | ||
2040 | port->nport = nport; | |
872d26a3 SG |
2041 | INIT_WORK(&port->accept_work, nvmet_tcp_accept_work); |
2042 | if (port->nport->inline_data_size < 0) | |
2043 | port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE; | |
2044 | ||
2045 | ret = sock_create(port->addr.ss_family, SOCK_STREAM, | |
2046 | IPPROTO_TCP, &port->sock); | |
2047 | if (ret) { | |
2048 | pr_err("failed to create a socket\n"); | |
2049 | goto err_port; | |
2050 | } | |
2051 | ||
2052 | port->sock->sk->sk_user_data = port; | |
2053 | port->data_ready = port->sock->sk->sk_data_ready; | |
2054 | port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready; | |
b58f0e8f | 2055 | sock_set_reuseaddr(port->sock->sk); |
12abc5ee | 2056 | tcp_sock_set_nodelay(port->sock->sk); |
6e434967 CH |
2057 | if (so_priority > 0) |
2058 | sock_set_priority(port->sock->sk, so_priority); | |
43cc6689 | 2059 | |
872d26a3 SG |
2060 | ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr, |
2061 | sizeof(port->addr)); | |
2062 | if (ret) { | |
2063 | pr_err("failed to bind port socket %d\n", ret); | |
2064 | goto err_sock; | |
2065 | } | |
2066 | ||
07a29b13 | 2067 | ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG); |
872d26a3 SG |
2068 | if (ret) { |
2069 | pr_err("failed to listen %d on port sock\n", ret); | |
2070 | goto err_sock; | |
2071 | } | |
2072 | ||
2073 | nport->priv = port; | |
2074 | pr_info("enabling port %d (%pISpc)\n", | |
2075 | le16_to_cpu(nport->disc_addr.portid), &port->addr); | |
2076 | ||
2077 | return 0; | |
2078 | ||
2079 | err_sock: | |
2080 | sock_release(port->sock); | |
2081 | err_port: | |
2082 | kfree(port); | |
2083 | return ret; | |
2084 | } | |
2085 | ||
2351ead9 IR |
2086 | static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port) |
2087 | { | |
2088 | struct nvmet_tcp_queue *queue; | |
2089 | ||
2090 | mutex_lock(&nvmet_tcp_queue_mutex); | |
2091 | list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) | |
2092 | if (queue->port == port) | |
2093 | kernel_sock_shutdown(queue->sock, SHUT_RDWR); | |
2094 | mutex_unlock(&nvmet_tcp_queue_mutex); | |
2095 | } | |
2096 | ||
872d26a3 SG |
2097 | static void nvmet_tcp_remove_port(struct nvmet_port *nport) |
2098 | { | |
2099 | struct nvmet_tcp_port *port = nport->priv; | |
2100 | ||
2101 | write_lock_bh(&port->sock->sk->sk_callback_lock); | |
2102 | port->sock->sk->sk_data_ready = port->data_ready; | |
2103 | port->sock->sk->sk_user_data = NULL; | |
2104 | write_unlock_bh(&port->sock->sk->sk_callback_lock); | |
2105 | cancel_work_sync(&port->accept_work); | |
2351ead9 IR |
2106 | /* |
2107 | * Destroy the remaining queues, which are not belong to any | |
2108 | * controller yet. | |
2109 | */ | |
2110 | nvmet_tcp_destroy_port_queues(port); | |
872d26a3 SG |
2111 | |
2112 | sock_release(port->sock); | |
2113 | kfree(port); | |
2114 | } | |
2115 | ||
2116 | static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl) | |
2117 | { | |
2118 | struct nvmet_tcp_queue *queue; | |
2119 | ||
2120 | mutex_lock(&nvmet_tcp_queue_mutex); | |
2121 | list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) | |
2122 | if (queue->nvme_sq.ctrl == ctrl) | |
2123 | kernel_sock_shutdown(queue->sock, SHUT_RDWR); | |
2124 | mutex_unlock(&nvmet_tcp_queue_mutex); | |
2125 | } | |
2126 | ||
2127 | static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) | |
2128 | { | |
2129 | struct nvmet_tcp_queue *queue = | |
2130 | container_of(sq, struct nvmet_tcp_queue, nvme_sq); | |
2131 | ||
2132 | if (sq->qid == 0) { | |
07a29b13 HR |
2133 | struct nvmet_tcp_queue *q; |
2134 | int pending = 0; | |
2135 | ||
2136 | /* Check for pending controller teardown */ | |
2137 | mutex_lock(&nvmet_tcp_queue_mutex); | |
2138 | list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) { | |
2139 | if (q->nvme_sq.ctrl == sq->ctrl && | |
2140 | q->state == NVMET_TCP_Q_DISCONNECTING) | |
2141 | pending++; | |
2142 | } | |
2143 | mutex_unlock(&nvmet_tcp_queue_mutex); | |
2144 | if (pending > NVMET_TCP_BACKLOG) | |
2145 | return NVME_SC_CONNECT_CTRL_BUSY; | |
872d26a3 SG |
2146 | } |
2147 | ||
2148 | queue->nr_cmds = sq->size * 2; | |
5572a55a ML |
2149 | if (nvmet_tcp_alloc_cmds(queue)) { |
2150 | queue->nr_cmds = 0; | |
872d26a3 | 2151 | return NVME_SC_INTERNAL; |
5572a55a | 2152 | } |
872d26a3 SG |
2153 | return 0; |
2154 | } | |
2155 | ||
2156 | static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, | |
2157 | struct nvmet_port *nport, char *traddr) | |
2158 | { | |
2159 | struct nvmet_tcp_port *port = nport->priv; | |
2160 | ||
2161 | if (inet_addr_is_any((struct sockaddr *)&port->addr)) { | |
2162 | struct nvmet_tcp_cmd *cmd = | |
2163 | container_of(req, struct nvmet_tcp_cmd, req); | |
2164 | struct nvmet_tcp_queue *queue = cmd->queue; | |
2165 | ||
2166 | sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); | |
2167 | } else { | |
2168 | memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); | |
2169 | } | |
2170 | } | |
2171 | ||
b4bbe00d HR |
2172 | static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl, |
2173 | char *traddr, size_t traddr_len) | |
2174 | { | |
2175 | struct nvmet_sq *sq = ctrl->sqs[0]; | |
2176 | struct nvmet_tcp_queue *queue = | |
2177 | container_of(sq, struct nvmet_tcp_queue, nvme_sq); | |
2178 | ||
2179 | if (queue->sockaddr_peer.ss_family == AF_UNSPEC) | |
2180 | return -EINVAL; | |
2181 | return snprintf(traddr, traddr_len, "%pISc", | |
2182 | (struct sockaddr *)&queue->sockaddr_peer); | |
2183 | } | |
2184 | ||
a40aae6b | 2185 | static const struct nvmet_fabrics_ops nvmet_tcp_ops = { |
872d26a3 SG |
2186 | .owner = THIS_MODULE, |
2187 | .type = NVMF_TRTYPE_TCP, | |
2188 | .msdbd = 1, | |
872d26a3 SG |
2189 | .add_port = nvmet_tcp_add_port, |
2190 | .remove_port = nvmet_tcp_remove_port, | |
2191 | .queue_response = nvmet_tcp_queue_response, | |
2192 | .delete_ctrl = nvmet_tcp_delete_ctrl, | |
2193 | .install_queue = nvmet_tcp_install_queue, | |
2194 | .disc_traddr = nvmet_tcp_disc_port_addr, | |
b4bbe00d | 2195 | .host_traddr = nvmet_tcp_host_port_addr, |
872d26a3 SG |
2196 | }; |
2197 | ||
2198 | static int __init nvmet_tcp_init(void) | |
2199 | { | |
2200 | int ret; | |
2201 | ||
533d2e8b SG |
2202 | nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", |
2203 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | |
872d26a3 SG |
2204 | if (!nvmet_tcp_wq) |
2205 | return -ENOMEM; | |
2206 | ||
2207 | ret = nvmet_register_transport(&nvmet_tcp_ops); | |
2208 | if (ret) | |
2209 | goto err; | |
2210 | ||
2211 | return 0; | |
2212 | err: | |
2213 | destroy_workqueue(nvmet_tcp_wq); | |
2214 | return ret; | |
2215 | } | |
2216 | ||
2217 | static void __exit nvmet_tcp_exit(void) | |
2218 | { | |
2219 | struct nvmet_tcp_queue *queue; | |
2220 | ||
2221 | nvmet_unregister_transport(&nvmet_tcp_ops); | |
2222 | ||
8832cf92 | 2223 | flush_workqueue(nvmet_wq); |
872d26a3 SG |
2224 | mutex_lock(&nvmet_tcp_queue_mutex); |
2225 | list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) | |
2226 | kernel_sock_shutdown(queue->sock, SHUT_RDWR); | |
2227 | mutex_unlock(&nvmet_tcp_queue_mutex); | |
8832cf92 | 2228 | flush_workqueue(nvmet_wq); |
872d26a3 SG |
2229 | |
2230 | destroy_workqueue(nvmet_tcp_wq); | |
47c5dd66 | 2231 | ida_destroy(&nvmet_tcp_queue_ida); |
872d26a3 SG |
2232 | } |
2233 | ||
2234 | module_init(nvmet_tcp_init); | |
2235 | module_exit(nvmet_tcp_exit); | |
2236 | ||
41951f83 | 2237 | MODULE_DESCRIPTION("NVMe target TCP transport driver"); |
872d26a3 SG |
2238 | MODULE_LICENSE("GPL v2"); |
2239 | MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */ |