1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * iSCSI Initiator over TCP/IP Data-Path
5 * Copyright (C) 2004 Dmitry Yusupov
6 * Copyright (C) 2004 Alex Aizman
7 * Copyright (C) 2005 - 2006 Mike Christie
8 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
9 * maintained by open-iscsi@googlegroups.com
11 * See the file COPYING included with this distribution for more details.
20 #include <linux/types.h>
21 #include <linux/inet.h>
22 #include <linux/slab.h>
23 #include <linux/sched/mm.h>
24 #include <linux/file.h>
25 #include <linux/blkdev.h>
26 #include <linux/delay.h>
27 #include <linux/kfifo.h>
28 #include <linux/scatterlist.h>
29 #include <linux/module.h>
30 #include <linux/backing-dev.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_transport_iscsi.h>
37 #include <trace/events/iscsi.h>
38 #include <trace/events/sock.h>
40 #include "iscsi_tcp.h"
42 MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
43 "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
44 "Alex Aizman <itn780@yahoo.com>");
45 MODULE_DESCRIPTION("iSCSI/TCP data-path");
46 MODULE_LICENSE("GPL");
48 static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
49 static const struct scsi_host_template iscsi_sw_tcp_sht;
50 static struct iscsi_transport iscsi_sw_tcp_transport;
52 static unsigned int iscsi_max_lun = ~0;
53 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
55 static bool iscsi_recv_from_iscsi_q;
56 module_param_named(recv_from_iscsi_q, iscsi_recv_from_iscsi_q, bool, 0644);
57 MODULE_PARM_DESC(recv_from_iscsi_q, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context.");
59 static int iscsi_sw_tcp_dbg;
60 module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
62 MODULE_PARM_DESC(debug_iscsi_tcp, "Turn on debugging for iscsi_tcp module "
63 "Set to 1 to turn on, and zero to turn off. Default is off.");
65 #define ISCSI_SW_TCP_DBG(_conn, dbg_fmt, arg...) \
67 if (iscsi_sw_tcp_dbg) \
68 iscsi_conn_printk(KERN_INFO, _conn, \
71 iscsi_dbg_trace(trace_iscsi_dbg_sw_tcp, \
72 &(_conn)->cls_conn->dev, \
73 "%s " dbg_fmt, __func__, ##arg);\
78 * iscsi_sw_tcp_recv - TCP receive in sendfile fashion
79 * @rd_desc: read descriptor
81 * @offset: offset in skb
82 * @len: skb->len - offset
84 static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
85 unsigned int offset, size_t len)
87 struct iscsi_conn *conn = rd_desc->arg.data;
88 unsigned int consumed, total_consumed = 0;
91 ISCSI_SW_TCP_DBG(conn, "in %d bytes\n", skb->len - offset);
95 consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status);
97 total_consumed += consumed;
98 } while (consumed != 0 && status != ISCSI_TCP_SKB_DONE);
100 ISCSI_SW_TCP_DBG(conn, "read %d bytes status %d\n",
101 skb->len - offset, status);
102 return total_consumed;
106 * iscsi_sw_sk_state_check - check socket state
109 * If the socket is in CLOSE or CLOSE_WAIT we should
110 * not close the connection if there is still some
113 * Must be called with sk_callback_lock.
115 static inline int iscsi_sw_sk_state_check(struct sock *sk)
117 struct iscsi_conn *conn = sk->sk_user_data;
119 if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
120 (conn->session->state != ISCSI_STATE_LOGGING_OUT) &&
121 !atomic_read(&sk->sk_rmem_alloc)) {
122 ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
123 iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
129 static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn)
131 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
132 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
133 struct sock *sk = tcp_sw_conn->sock->sk;
134 read_descriptor_t rd_desc;
137 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
138 * We set count to 1 because we want the network layer to
139 * hand us all the skbs that are available. iscsi_tcp_recv
140 * handled pdus that cross buffers or pdus that still need data.
142 rd_desc.arg.data = conn;
145 tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
147 /* If we had to (atomically) map a highmem page,
149 iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
151 iscsi_sw_sk_state_check(sk);
154 static void iscsi_sw_tcp_recv_data_work(struct work_struct *work)
156 struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
158 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
159 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
160 struct sock *sk = tcp_sw_conn->sock->sk;
163 iscsi_sw_tcp_recv_data(conn);
167 static void iscsi_sw_tcp_data_ready(struct sock *sk)
169 struct iscsi_sw_tcp_conn *tcp_sw_conn;
170 struct iscsi_tcp_conn *tcp_conn;
171 struct iscsi_conn *conn;
173 trace_sk_data_ready(sk);
175 read_lock_bh(&sk->sk_callback_lock);
176 conn = sk->sk_user_data;
178 read_unlock_bh(&sk->sk_callback_lock);
181 tcp_conn = conn->dd_data;
182 tcp_sw_conn = tcp_conn->dd_data;
184 if (tcp_sw_conn->queue_recv)
185 iscsi_conn_queue_recv(conn);
187 iscsi_sw_tcp_recv_data(conn);
188 read_unlock_bh(&sk->sk_callback_lock);
191 static void iscsi_sw_tcp_state_change(struct sock *sk)
193 struct iscsi_tcp_conn *tcp_conn;
194 struct iscsi_sw_tcp_conn *tcp_sw_conn;
195 struct iscsi_conn *conn;
196 void (*old_state_change)(struct sock *);
198 read_lock_bh(&sk->sk_callback_lock);
199 conn = sk->sk_user_data;
201 read_unlock_bh(&sk->sk_callback_lock);
205 iscsi_sw_sk_state_check(sk);
207 tcp_conn = conn->dd_data;
208 tcp_sw_conn = tcp_conn->dd_data;
209 old_state_change = tcp_sw_conn->old_state_change;
211 read_unlock_bh(&sk->sk_callback_lock);
213 old_state_change(sk);
217 * iscsi_sw_tcp_write_space - Called when more output buffer space is available
218 * @sk: socket space is available for
220 static void iscsi_sw_tcp_write_space(struct sock *sk)
222 struct iscsi_conn *conn;
223 struct iscsi_tcp_conn *tcp_conn;
224 struct iscsi_sw_tcp_conn *tcp_sw_conn;
225 void (*old_write_space)(struct sock *);
227 read_lock_bh(&sk->sk_callback_lock);
228 conn = sk->sk_user_data;
230 read_unlock_bh(&sk->sk_callback_lock);
234 tcp_conn = conn->dd_data;
235 tcp_sw_conn = tcp_conn->dd_data;
236 old_write_space = tcp_sw_conn->old_write_space;
237 read_unlock_bh(&sk->sk_callback_lock);
241 ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
242 iscsi_conn_queue_xmit(conn);
245 static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
247 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
248 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
249 struct sock *sk = tcp_sw_conn->sock->sk;
251 /* assign new callbacks */
252 write_lock_bh(&sk->sk_callback_lock);
253 sk->sk_user_data = conn;
254 tcp_sw_conn->old_data_ready = sk->sk_data_ready;
255 tcp_sw_conn->old_state_change = sk->sk_state_change;
256 tcp_sw_conn->old_write_space = sk->sk_write_space;
257 sk->sk_data_ready = iscsi_sw_tcp_data_ready;
258 sk->sk_state_change = iscsi_sw_tcp_state_change;
259 sk->sk_write_space = iscsi_sw_tcp_write_space;
260 write_unlock_bh(&sk->sk_callback_lock);
264 iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
266 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
267 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
268 struct sock *sk = tcp_sw_conn->sock->sk;
270 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
271 write_lock_bh(&sk->sk_callback_lock);
272 sk->sk_user_data = NULL;
273 sk->sk_data_ready = tcp_sw_conn->old_data_ready;
274 sk->sk_state_change = tcp_sw_conn->old_state_change;
275 sk->sk_write_space = tcp_sw_conn->old_write_space;
276 sk->sk_no_check_tx = 0;
277 write_unlock_bh(&sk->sk_callback_lock);
281 * iscsi_sw_tcp_xmit_segment - transmit segment
282 * @tcp_conn: the iSCSI TCP connection
283 * @segment: the buffer to transmnit
285 * This function transmits as much of the buffer as
286 * the network layer will accept, and returns the number of
289 * If CRC hashing is enabled, the function will compute the
290 * hash as it goes. When the entire segment has been transmitted,
291 * it will retrieve the hash value and send it as well.
293 static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
294 struct iscsi_segment *segment)
296 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
297 struct socket *sk = tcp_sw_conn->sock;
298 unsigned int copied = 0;
301 while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
302 struct scatterlist *sg;
303 struct msghdr msg = {};
305 unsigned int offset, copy;
308 offset = segment->copied;
309 copy = segment->size - offset;
311 if (segment->total_copied + segment->size < segment->total_size)
312 msg.msg_flags |= MSG_MORE;
314 if (tcp_sw_conn->queue_recv)
315 msg.msg_flags |= MSG_DONTWAIT;
317 if (!segment->data) {
318 if (!tcp_conn->iscsi_conn->datadgst_en)
319 msg.msg_flags |= MSG_SPLICE_PAGES;
321 offset += segment->sg_offset + sg->offset;
322 bvec_set_page(&bv, sg_page(sg), copy, offset);
324 bvec_set_virt(&bv, segment->data + offset, copy);
326 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, copy);
328 r = sock_sendmsg(sk, &msg);
330 iscsi_tcp_segment_unmap(segment);
339 * iscsi_sw_tcp_xmit - TCP transmit
340 * @conn: iscsi connection
342 static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
344 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
345 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
346 struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
347 unsigned int consumed = 0;
351 rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
353 * We may not have been able to send data because the conn
354 * is getting stopped. libiscsi will know so propagate err
355 * for it to do the right thing.
360 rc = ISCSI_ERR_XMIT_FAILED;
367 if (segment->total_copied >= segment->total_size) {
368 if (segment->done != NULL) {
369 rc = segment->done(tcp_conn, segment);
376 ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed);
378 conn->txdata_octets += consumed;
382 /* Transmit error. We could initiate error recovery
384 ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc);
385 iscsi_conn_failure(conn, rc);
390 * iscsi_sw_tcp_xmit_qlen - return the number of bytes queued for xmit
391 * @conn: iscsi connection
393 static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
395 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
396 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
397 struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
399 return segment->total_copied - segment->total_size;
402 static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
404 struct iscsi_conn *conn = task->conn;
405 unsigned int noreclaim_flag;
406 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
407 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
410 if (!tcp_sw_conn->sock) {
411 iscsi_conn_printk(KERN_ERR, conn,
412 "Transport not bound to socket!\n");
416 noreclaim_flag = memalloc_noreclaim_save();
418 while (iscsi_sw_tcp_xmit_qlen(conn)) {
419 rc = iscsi_sw_tcp_xmit(conn);
429 memalloc_noreclaim_restore(noreclaim_flag);
434 * This is called when we're done sending the header.
435 * Simply copy the data_segment to the send segment, and return.
437 static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
438 struct iscsi_segment *segment)
440 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
442 tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
443 ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn,
444 "Header done. Next segment size %u total_size %u\n",
445 tcp_sw_conn->out.segment.size,
446 tcp_sw_conn->out.segment.total_size);
450 static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
453 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
454 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
456 ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ?
457 "digest enabled" : "digest disabled");
459 /* Clear the data segment - needs to be filled in by the
460 * caller using iscsi_tcp_send_data_prep() */
461 memset(&tcp_sw_conn->out.data_segment, 0,
462 sizeof(struct iscsi_segment));
464 /* If header digest is enabled, compute the CRC and
465 * place the digest into the same buffer. We make
466 * sure that both iscsi_tcp_task and mtask have
469 if (conn->hdrdgst_en) {
470 iscsi_tcp_dgst_header(hdr, hdrlen, hdr + hdrlen);
471 hdrlen += ISCSI_DIGEST_SIZE;
474 /* Remember header pointer for later, when we need
475 * to decide whether there's a payload to go along
476 * with the header. */
477 tcp_sw_conn->out.hdr = hdr;
479 iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
480 iscsi_sw_tcp_send_hdr_done, NULL);
484 * Prepare the send buffer for the payload data.
485 * Padding and checksumming will all be taken care
486 * of by the iscsi_segment routines.
489 iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
490 unsigned int count, unsigned int offset,
493 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
494 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
496 unsigned int hdr_spec_len;
498 ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
500 "digest enabled" : "digest disabled");
502 /* Make sure the datalen matches what the caller
503 said he would send. */
504 hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
505 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
507 if (conn->datadgst_en)
508 tx_crcp = &tcp_sw_conn->tx_crc;
510 return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
511 sg, count, offset, len, NULL, tx_crcp);
515 iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
518 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
519 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
521 unsigned int hdr_spec_len;
523 ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
524 "digest enabled" : "digest disabled");
526 /* Make sure the datalen matches what the caller
527 said he would send. */
528 hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
529 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
531 if (conn->datadgst_en)
532 tx_crcp = &tcp_sw_conn->tx_crc;
534 iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
535 data, len, NULL, tx_crcp);
538 static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
539 unsigned int offset, unsigned int count)
541 struct iscsi_conn *conn = task->conn;
544 iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
550 iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
552 struct scsi_data_buffer *sdb = &task->sc->sdb;
554 err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
555 sdb->table.nents, offset,
560 /* got invalid offset/len */
566 static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
568 struct iscsi_tcp_task *tcp_task = task->dd_data;
570 task->hdr = task->dd_data + sizeof(*tcp_task);
571 task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
575 static struct iscsi_cls_conn *
576 iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
579 struct iscsi_conn *conn;
580 struct iscsi_cls_conn *cls_conn;
581 struct iscsi_tcp_conn *tcp_conn;
582 struct iscsi_sw_tcp_conn *tcp_sw_conn;
584 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
588 conn = cls_conn->dd_data;
589 tcp_conn = conn->dd_data;
590 tcp_sw_conn = tcp_conn->dd_data;
591 INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
592 tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
594 mutex_init(&tcp_sw_conn->sock_lock);
595 tcp_conn->rx_crcp = &tcp_sw_conn->rx_crc;
600 static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
602 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
603 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
604 struct socket *sock = tcp_sw_conn->sock;
607 * The iscsi transport class will make sure we are not called in
608 * parallel with start, stop, bind and destroys. However, this can be
609 * called twice if userspace does a stop then a destroy.
615 * Make sure we start socket shutdown now in case userspace is up
616 * but delayed in releasing the socket.
618 kernel_sock_shutdown(sock, SHUT_RDWR);
621 iscsi_sw_tcp_conn_restore_callbacks(conn);
624 iscsi_suspend_rx(conn);
626 mutex_lock(&tcp_sw_conn->sock_lock);
627 tcp_sw_conn->sock = NULL;
628 mutex_unlock(&tcp_sw_conn->sock_lock);
632 static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
634 struct iscsi_conn *conn = cls_conn->dd_data;
636 iscsi_sw_tcp_release_conn(conn);
637 iscsi_tcp_conn_teardown(cls_conn);
640 static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
642 struct iscsi_conn *conn = cls_conn->dd_data;
643 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
644 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
645 struct socket *sock = tcp_sw_conn->sock;
647 /* userspace may have goofed up and not bound us */
651 sock->sk->sk_err = EIO;
652 wake_up_interruptible(sk_sleep(sock->sk));
655 iscsi_suspend_tx(conn);
657 /* stop recv side and release socket */
658 iscsi_sw_tcp_release_conn(conn);
660 iscsi_conn_stop(cls_conn, flag);
664 iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
665 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
668 struct iscsi_conn *conn = cls_conn->dd_data;
669 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
670 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
675 /* lookup for existing socket */
676 sock = sockfd_lookup((int)transport_eph, &err);
678 iscsi_conn_printk(KERN_ERR, conn,
679 "sockfd_lookup failed %d\n", err);
684 if (!sk_is_tcp(sock->sk))
687 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
691 mutex_lock(&tcp_sw_conn->sock_lock);
692 /* bind iSCSI connection and socket */
693 tcp_sw_conn->sock = sock;
694 mutex_unlock(&tcp_sw_conn->sock_lock);
696 /* setup Socket parameters */
698 sk->sk_reuse = SK_CAN_REUSE;
699 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
700 sk->sk_allocation = GFP_ATOMIC;
701 sk->sk_use_task_frag = false;
705 iscsi_sw_tcp_conn_set_callbacks(conn);
707 * set receive state machine into initial state
709 iscsi_tcp_hdr_recv_prep(tcp_conn);
717 static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
718 enum iscsi_param param, char *buf,
721 struct iscsi_conn *conn = cls_conn->dd_data;
722 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
723 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
726 case ISCSI_PARAM_HDRDGST_EN:
727 iscsi_set_param(cls_conn, param, buf, buflen);
729 case ISCSI_PARAM_DATADGST_EN:
730 mutex_lock(&tcp_sw_conn->sock_lock);
731 if (!tcp_sw_conn->sock) {
732 mutex_unlock(&tcp_sw_conn->sock_lock);
735 iscsi_set_param(cls_conn, param, buf, buflen);
736 mutex_unlock(&tcp_sw_conn->sock_lock);
738 case ISCSI_PARAM_MAX_R2T:
739 return iscsi_tcp_set_max_r2t(conn, buf);
741 return iscsi_set_param(cls_conn, param, buf, buflen);
747 static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
748 enum iscsi_param param, char *buf)
750 struct iscsi_conn *conn = cls_conn->dd_data;
751 struct iscsi_sw_tcp_conn *tcp_sw_conn;
752 struct iscsi_tcp_conn *tcp_conn;
753 struct sockaddr_in6 addr;
758 case ISCSI_PARAM_CONN_PORT:
759 case ISCSI_PARAM_CONN_ADDRESS:
760 case ISCSI_PARAM_LOCAL_PORT:
761 spin_lock_bh(&conn->session->frwd_lock);
762 if (!conn->session->leadconn) {
763 spin_unlock_bh(&conn->session->frwd_lock);
767 * The conn has been setup and bound, so just grab a ref
768 * incase a destroy runs while we are in the net layer.
770 iscsi_get_conn(conn->cls_conn);
771 spin_unlock_bh(&conn->session->frwd_lock);
773 tcp_conn = conn->dd_data;
774 tcp_sw_conn = tcp_conn->dd_data;
776 mutex_lock(&tcp_sw_conn->sock_lock);
777 sock = tcp_sw_conn->sock;
783 if (param == ISCSI_PARAM_LOCAL_PORT)
784 rc = kernel_getsockname(sock,
785 (struct sockaddr *)&addr);
787 rc = kernel_getpeername(sock,
788 (struct sockaddr *)&addr);
790 mutex_unlock(&tcp_sw_conn->sock_lock);
791 iscsi_put_conn(conn->cls_conn);
795 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
798 return iscsi_conn_get_param(cls_conn, param, buf);
804 static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
805 enum iscsi_host_param param, char *buf)
807 struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
808 struct iscsi_session *session;
809 struct iscsi_conn *conn;
810 struct iscsi_tcp_conn *tcp_conn;
811 struct iscsi_sw_tcp_conn *tcp_sw_conn;
812 struct sockaddr_in6 addr;
817 case ISCSI_HOST_PARAM_IPADDRESS:
818 session = tcp_sw_host->session;
822 spin_lock_bh(&session->frwd_lock);
823 conn = session->leadconn;
825 spin_unlock_bh(&session->frwd_lock);
828 tcp_conn = conn->dd_data;
829 tcp_sw_conn = tcp_conn->dd_data;
831 * The conn has been setup and bound, so just grab a ref
832 * incase a destroy runs while we are in the net layer.
834 iscsi_get_conn(conn->cls_conn);
835 spin_unlock_bh(&session->frwd_lock);
837 mutex_lock(&tcp_sw_conn->sock_lock);
838 sock = tcp_sw_conn->sock;
842 rc = kernel_getsockname(sock, (struct sockaddr *)&addr);
843 mutex_unlock(&tcp_sw_conn->sock_lock);
844 iscsi_put_conn(conn->cls_conn);
848 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
850 (enum iscsi_param)param, buf);
852 return iscsi_host_get_param(shost, param, buf);
859 iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
860 struct iscsi_stats *stats)
862 struct iscsi_conn *conn = cls_conn->dd_data;
863 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
864 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
866 stats->custom_length = 3;
867 strcpy(stats->custom[0].desc, "tx_sendpage_failures");
868 stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
869 strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
870 stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
871 strcpy(stats->custom[2].desc, "eh_abort_cnt");
872 stats->custom[2].value = conn->eh_abort_cnt;
874 iscsi_tcp_conn_get_stats(cls_conn, stats);
877 static struct iscsi_cls_session *
878 iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
879 uint16_t qdepth, uint32_t initial_cmdsn)
881 struct iscsi_cls_session *cls_session;
882 struct iscsi_session *session;
883 struct iscsi_sw_tcp_host *tcp_sw_host;
884 struct Scsi_Host *shost;
888 printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
892 shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
893 sizeof(struct iscsi_sw_tcp_host), 1);
896 shost->transportt = iscsi_sw_tcp_scsi_transport;
897 shost->cmd_per_lun = qdepth;
898 shost->max_lun = iscsi_max_lun;
900 shost->max_channel = 0;
901 shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
902 shost->dma_alignment = 0;
904 rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
907 shost->can_queue = rc;
909 if (iscsi_host_add(shost, NULL))
912 cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
914 sizeof(struct iscsi_tcp_task) +
915 sizeof(struct iscsi_sw_tcp_hdrbuf),
919 session = cls_session->dd_data;
921 if (iscsi_tcp_r2tpool_alloc(session))
924 /* We are now fully setup so expose the session to sysfs. */
925 tcp_sw_host = iscsi_host_priv(shost);
926 tcp_sw_host->session = session;
930 iscsi_session_teardown(cls_session);
932 iscsi_host_remove(shost, false);
934 iscsi_host_free(shost);
938 static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
940 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
941 struct iscsi_session *session = cls_session->dd_data;
943 if (WARN_ON_ONCE(session->leadconn))
946 iscsi_session_remove(cls_session);
948 * Our get_host_param needs to access the session, so remove the
949 * host from sysfs before freeing the session to make sure userspace
950 * is no longer accessing the callout.
952 iscsi_host_remove(shost, false);
954 iscsi_tcp_r2tpool_free(cls_session->dd_data);
956 iscsi_session_free(cls_session);
957 iscsi_host_free(shost);
960 static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
962 switch (param_type) {
963 case ISCSI_HOST_PARAM:
965 case ISCSI_HOST_PARAM_NETDEV_NAME:
966 case ISCSI_HOST_PARAM_HWADDRESS:
967 case ISCSI_HOST_PARAM_IPADDRESS:
968 case ISCSI_HOST_PARAM_INITIATOR_NAME:
975 case ISCSI_PARAM_MAX_RECV_DLENGTH:
976 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
977 case ISCSI_PARAM_HDRDGST_EN:
978 case ISCSI_PARAM_DATADGST_EN:
979 case ISCSI_PARAM_CONN_ADDRESS:
980 case ISCSI_PARAM_CONN_PORT:
981 case ISCSI_PARAM_LOCAL_PORT:
982 case ISCSI_PARAM_EXP_STATSN:
983 case ISCSI_PARAM_PERSISTENT_ADDRESS:
984 case ISCSI_PARAM_PERSISTENT_PORT:
985 case ISCSI_PARAM_PING_TMO:
986 case ISCSI_PARAM_RECV_TMO:
987 case ISCSI_PARAM_INITIAL_R2T_EN:
988 case ISCSI_PARAM_MAX_R2T:
989 case ISCSI_PARAM_IMM_DATA_EN:
990 case ISCSI_PARAM_FIRST_BURST:
991 case ISCSI_PARAM_MAX_BURST:
992 case ISCSI_PARAM_PDU_INORDER_EN:
993 case ISCSI_PARAM_DATASEQ_INORDER_EN:
994 case ISCSI_PARAM_ERL:
995 case ISCSI_PARAM_TARGET_NAME:
996 case ISCSI_PARAM_TPGT:
997 case ISCSI_PARAM_USERNAME:
998 case ISCSI_PARAM_PASSWORD:
999 case ISCSI_PARAM_USERNAME_IN:
1000 case ISCSI_PARAM_PASSWORD_IN:
1001 case ISCSI_PARAM_FAST_ABORT:
1002 case ISCSI_PARAM_ABORT_TMO:
1003 case ISCSI_PARAM_LU_RESET_TMO:
1004 case ISCSI_PARAM_TGT_RESET_TMO:
1005 case ISCSI_PARAM_IFACE_NAME:
1006 case ISCSI_PARAM_INITIATOR_NAME:
1016 static int iscsi_sw_tcp_sdev_configure(struct scsi_device *sdev,
1017 struct queue_limits *lim)
1019 struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
1020 struct iscsi_session *session = tcp_sw_host->session;
1021 struct iscsi_conn *conn = session->leadconn;
1023 if (conn->datadgst_en)
1024 lim->features |= BLK_FEAT_STABLE_WRITES;
1028 static const struct scsi_host_template iscsi_sw_tcp_sht = {
1029 .module = THIS_MODULE,
1030 .name = "iSCSI Initiator over TCP/IP",
1031 .queuecommand = iscsi_queuecommand,
1032 .change_queue_depth = scsi_change_queue_depth,
1033 .can_queue = ISCSI_TOTAL_CMDS_MAX,
1034 .sg_tablesize = 4096,
1035 .max_sectors = 0xFFFF,
1036 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
1037 .eh_timed_out = iscsi_eh_cmd_timed_out,
1038 .eh_abort_handler = iscsi_eh_abort,
1039 .eh_device_reset_handler= iscsi_eh_device_reset,
1040 .eh_target_reset_handler = iscsi_eh_recover_target,
1041 .dma_boundary = PAGE_SIZE - 1,
1042 .sdev_configure = iscsi_sw_tcp_sdev_configure,
1043 .proc_name = "iscsi_tcp",
1045 .track_queue_depth = 1,
1046 .cmd_size = sizeof(struct iscsi_cmd),
1049 static struct iscsi_transport iscsi_sw_tcp_transport = {
1050 .owner = THIS_MODULE,
1052 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
1054 /* session management */
1055 .create_session = iscsi_sw_tcp_session_create,
1056 .destroy_session = iscsi_sw_tcp_session_destroy,
1057 /* connection management */
1058 .create_conn = iscsi_sw_tcp_conn_create,
1059 .bind_conn = iscsi_sw_tcp_conn_bind,
1060 .destroy_conn = iscsi_sw_tcp_conn_destroy,
1061 .attr_is_visible = iscsi_sw_tcp_attr_is_visible,
1062 .set_param = iscsi_sw_tcp_conn_set_param,
1063 .get_conn_param = iscsi_sw_tcp_conn_get_param,
1064 .get_session_param = iscsi_session_get_param,
1065 .start_conn = iscsi_conn_start,
1066 .stop_conn = iscsi_sw_tcp_conn_stop,
1067 /* iscsi host params */
1068 .get_host_param = iscsi_sw_tcp_host_get_param,
1069 .set_host_param = iscsi_host_set_param,
1071 .send_pdu = iscsi_conn_send_pdu,
1072 .get_stats = iscsi_sw_tcp_conn_get_stats,
1073 /* iscsi task/cmd helpers */
1074 .init_task = iscsi_tcp_task_init,
1075 .xmit_task = iscsi_tcp_task_xmit,
1076 .cleanup_task = iscsi_tcp_cleanup_task,
1077 /* low level pdu helpers */
1078 .xmit_pdu = iscsi_sw_tcp_pdu_xmit,
1079 .init_pdu = iscsi_sw_tcp_pdu_init,
1080 .alloc_pdu = iscsi_sw_tcp_pdu_alloc,
1082 .session_recovery_timedout = iscsi_session_recovery_timedout,
1085 static int __init iscsi_sw_tcp_init(void)
1087 if (iscsi_max_lun < 1) {
1088 printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
1093 iscsi_sw_tcp_scsi_transport = iscsi_register_transport(
1094 &iscsi_sw_tcp_transport);
1095 if (!iscsi_sw_tcp_scsi_transport)
1101 static void __exit iscsi_sw_tcp_exit(void)
1103 iscsi_unregister_transport(&iscsi_sw_tcp_transport);
1106 module_init(iscsi_sw_tcp_init);
1107 module_exit(iscsi_sw_tcp_exit);