2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/ctype.h>
40 #include <linux/kthread.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/atomic.h>
44 #include <scsi/scsi_proto.h>
45 #include <scsi/scsi_tcq.h>
46 #include <target/target_core_base.h>
47 #include <target/target_core_fabric.h>
50 /* Name of this kernel module. */
51 #define DRV_NAME "ib_srpt"
52 #define DRV_VERSION "2.0.0"
53 #define DRV_RELDATE "2011-02-14"
55 #define SRPT_ID_STRING "Linux SRP target"
58 #define pr_fmt(fmt) DRV_NAME " " fmt
60 MODULE_AUTHOR("Vu Pham and Bart Van Assche");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63 MODULE_LICENSE("Dual BSD/GPL");
69 static u64 srpt_service_guid;
70 static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
71 static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
73 static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
74 module_param(srp_max_req_size, int, 0444);
75 MODULE_PARM_DESC(srp_max_req_size,
76 "Maximum size of SRP request messages in bytes.");
78 static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
79 module_param(srpt_srq_size, int, 0444);
80 MODULE_PARM_DESC(srpt_srq_size,
81 "Shared receive queue (SRQ) size.");
83 static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
85 return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
87 module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
89 MODULE_PARM_DESC(srpt_service_guid,
90 "Using this value for ioc_guid, id_ext, and cm_listen_id"
91 " instead of using the node_guid of the first HCA.");
93 static struct ib_client srpt_client;
94 static void srpt_release_cmd(struct se_cmd *se_cmd);
95 static void srpt_free_ch(struct kref *kref);
96 static int srpt_queue_status(struct se_cmd *cmd);
97 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
98 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
99 static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
102 * The only allowed channel state changes are those that change the channel
103 * state into a state with a higher numerical value. Hence the new > prev test.
105 static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
108 enum rdma_ch_state prev;
109 bool changed = false;
111 spin_lock_irqsave(&ch->spinlock, flags);
117 spin_unlock_irqrestore(&ch->spinlock, flags);
123 * srpt_event_handler() - Asynchronous IB event callback function.
125 * Callback function called by the InfiniBand core when an asynchronous IB
126 * event occurs. This callback may occur in interrupt context. See also
127 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
128 * Architecture Specification.
130 static void srpt_event_handler(struct ib_event_handler *handler,
131 struct ib_event *event)
133 struct srpt_device *sdev;
134 struct srpt_port *sport;
136 sdev = ib_get_client_data(event->device, &srpt_client);
137 if (!sdev || sdev->device != event->device)
140 pr_debug("ASYNC event= %d on device= %s\n", event->event,
143 switch (event->event) {
144 case IB_EVENT_PORT_ERR:
145 if (event->element.port_num <= sdev->device->phys_port_cnt) {
146 sport = &sdev->port[event->element.port_num - 1];
151 case IB_EVENT_PORT_ACTIVE:
152 case IB_EVENT_LID_CHANGE:
153 case IB_EVENT_PKEY_CHANGE:
154 case IB_EVENT_SM_CHANGE:
155 case IB_EVENT_CLIENT_REREGISTER:
156 case IB_EVENT_GID_CHANGE:
157 /* Refresh port data asynchronously. */
158 if (event->element.port_num <= sdev->device->phys_port_cnt) {
159 sport = &sdev->port[event->element.port_num - 1];
160 if (!sport->lid && !sport->sm_lid)
161 schedule_work(&sport->work);
165 pr_err("received unrecognized IB event %d\n",
172 * srpt_srq_event() - SRQ event callback function.
174 static void srpt_srq_event(struct ib_event *event, void *ctx)
176 pr_info("SRQ event %d\n", event->event);
179 static const char *get_ch_state_name(enum rdma_ch_state s)
186 case CH_DISCONNECTING:
187 return "disconnecting";
190 case CH_DISCONNECTED:
191 return "disconnected";
197 * srpt_qp_event() - QP event callback function.
199 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
201 pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
202 event->event, ch->cm_id, ch->sess_name, ch->state);
204 switch (event->event) {
205 case IB_EVENT_COMM_EST:
206 ib_cm_notify(ch->cm_id, event->event);
208 case IB_EVENT_QP_LAST_WQE_REACHED:
209 pr_debug("%s-%d, state %s: received Last WQE event.\n",
210 ch->sess_name, ch->qp->qp_num,
211 get_ch_state_name(ch->state));
214 pr_err("received unrecognized IB QP event %d\n", event->event);
220 * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
222 * @slot: one-based slot number.
223 * @value: four-bit value.
225 * Copies the lowest four bits of value in element slot of the array of four
226 * bit elements called c_list (controller list). The index slot is one-based.
228 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
235 tmp = c_list[id] & 0xf;
236 c_list[id] = (value << 4) | tmp;
238 tmp = c_list[id] & 0xf0;
239 c_list[id] = (value & 0xf) | tmp;
244 * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
246 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
249 static void srpt_get_class_port_info(struct ib_dm_mad *mad)
251 struct ib_class_port_info *cif;
253 cif = (struct ib_class_port_info *)mad->data;
254 memset(cif, 0, sizeof(*cif));
255 cif->base_version = 1;
256 cif->class_version = 1;
257 cif->resp_time_value = 20;
259 mad->mad_hdr.status = 0;
263 * srpt_get_iou() - Write IOUnitInfo to a management datagram.
265 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
266 * Specification. See also section B.7, table B.6 in the SRP r16a document.
268 static void srpt_get_iou(struct ib_dm_mad *mad)
270 struct ib_dm_iou_info *ioui;
274 ioui = (struct ib_dm_iou_info *)mad->data;
275 ioui->change_id = cpu_to_be16(1);
276 ioui->max_controllers = 16;
278 /* set present for slot 1 and empty for the rest */
279 srpt_set_ioc(ioui->controller_list, 1, 1);
280 for (i = 1, slot = 2; i < 16; i++, slot++)
281 srpt_set_ioc(ioui->controller_list, slot, 0);
283 mad->mad_hdr.status = 0;
287 * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
289 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
290 * Architecture Specification. See also section B.7, table B.7 in the SRP
293 static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
294 struct ib_dm_mad *mad)
296 struct srpt_device *sdev = sport->sdev;
297 struct ib_dm_ioc_profile *iocp;
299 iocp = (struct ib_dm_ioc_profile *)mad->data;
301 if (!slot || slot > 16) {
303 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
309 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
313 memset(iocp, 0, sizeof(*iocp));
314 strcpy(iocp->id_string, SRPT_ID_STRING);
315 iocp->guid = cpu_to_be64(srpt_service_guid);
316 iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
317 iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
318 iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
319 iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
320 iocp->subsys_device_id = 0x0;
321 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
322 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
323 iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
324 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
325 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
326 iocp->rdma_read_depth = 4;
327 iocp->send_size = cpu_to_be32(srp_max_req_size);
328 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
330 iocp->num_svc_entries = 1;
331 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
332 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
334 mad->mad_hdr.status = 0;
338 * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
340 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
341 * Specification. See also section B.7, table B.8 in the SRP r16a document.
343 static void srpt_get_svc_entries(u64 ioc_guid,
344 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
346 struct ib_dm_svc_entries *svc_entries;
350 if (!slot || slot > 16) {
352 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
356 if (slot > 2 || lo > hi || hi > 1) {
358 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
362 svc_entries = (struct ib_dm_svc_entries *)mad->data;
363 memset(svc_entries, 0, sizeof(*svc_entries));
364 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
365 snprintf(svc_entries->service_entries[0].name,
366 sizeof(svc_entries->service_entries[0].name),
368 SRP_SERVICE_NAME_PREFIX,
371 mad->mad_hdr.status = 0;
375 * srpt_mgmt_method_get() - Process a received management datagram.
376 * @sp: source port through which the MAD has been received.
377 * @rq_mad: received MAD.
378 * @rsp_mad: response MAD.
380 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
381 struct ib_dm_mad *rsp_mad)
387 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
389 case DM_ATTR_CLASS_PORT_INFO:
390 srpt_get_class_port_info(rsp_mad);
392 case DM_ATTR_IOU_INFO:
393 srpt_get_iou(rsp_mad);
395 case DM_ATTR_IOC_PROFILE:
396 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
397 srpt_get_ioc(sp, slot, rsp_mad);
399 case DM_ATTR_SVC_ENTRIES:
400 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
401 hi = (u8) ((slot >> 8) & 0xff);
402 lo = (u8) (slot & 0xff);
403 slot = (u16) ((slot >> 16) & 0xffff);
404 srpt_get_svc_entries(srpt_service_guid,
405 slot, hi, lo, rsp_mad);
408 rsp_mad->mad_hdr.status =
409 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
415 * srpt_mad_send_handler() - Post MAD-send callback function.
417 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
418 struct ib_mad_send_wc *mad_wc)
420 ib_destroy_ah(mad_wc->send_buf->ah);
421 ib_free_send_mad(mad_wc->send_buf);
425 * srpt_mad_recv_handler() - MAD reception callback function.
427 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
428 struct ib_mad_send_buf *send_buf,
429 struct ib_mad_recv_wc *mad_wc)
431 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
433 struct ib_mad_send_buf *rsp;
434 struct ib_dm_mad *dm_mad;
436 if (!mad_wc || !mad_wc->recv_buf.mad)
439 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
440 mad_wc->recv_buf.grh, mad_agent->port_num);
444 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
446 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
447 mad_wc->wc->pkey_index, 0,
448 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
450 IB_MGMT_BASE_VERSION);
457 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
458 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
459 dm_mad->mad_hdr.status = 0;
461 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
462 case IB_MGMT_METHOD_GET:
463 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
465 case IB_MGMT_METHOD_SET:
466 dm_mad->mad_hdr.status =
467 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
470 dm_mad->mad_hdr.status =
471 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
475 if (!ib_post_send_mad(rsp, NULL)) {
476 ib_free_recv_mad(mad_wc);
477 /* will destroy_ah & free_send_mad in send completion */
481 ib_free_send_mad(rsp);
486 ib_free_recv_mad(mad_wc);
490 * srpt_refresh_port() - Configure a HCA port.
492 * Enable InfiniBand management datagram processing, update the cached sm_lid,
493 * lid and gid values, and register a callback function for processing MADs
494 * on the specified port.
496 * Note: It is safe to call this function more than once for the same port.
498 static int srpt_refresh_port(struct srpt_port *sport)
500 struct ib_mad_reg_req reg_req;
501 struct ib_port_modify port_modify;
502 struct ib_port_attr port_attr;
505 memset(&port_modify, 0, sizeof(port_modify));
506 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
507 port_modify.clr_port_cap_mask = 0;
509 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
513 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
517 sport->sm_lid = port_attr.sm_lid;
518 sport->lid = port_attr.lid;
520 ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid,
525 if (!sport->mad_agent) {
526 memset(®_req, 0, sizeof(reg_req));
527 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
528 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
529 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
530 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
532 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
536 srpt_mad_send_handler,
537 srpt_mad_recv_handler,
539 if (IS_ERR(sport->mad_agent)) {
540 ret = PTR_ERR(sport->mad_agent);
541 sport->mad_agent = NULL;
550 port_modify.set_port_cap_mask = 0;
551 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
552 ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
560 * srpt_unregister_mad_agent() - Unregister MAD callback functions.
562 * Note: It is safe to call this function more than once for the same device.
564 static void srpt_unregister_mad_agent(struct srpt_device *sdev)
566 struct ib_port_modify port_modify = {
567 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
569 struct srpt_port *sport;
572 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
573 sport = &sdev->port[i - 1];
574 WARN_ON(sport->port != i);
575 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
576 pr_err("disabling MAD processing failed.\n");
577 if (sport->mad_agent) {
578 ib_unregister_mad_agent(sport->mad_agent);
579 sport->mad_agent = NULL;
585 * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
587 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
588 int ioctx_size, int dma_size,
589 enum dma_data_direction dir)
591 struct srpt_ioctx *ioctx;
593 ioctx = kmalloc(ioctx_size, GFP_KERNEL);
597 ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
601 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
602 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
616 * srpt_free_ioctx() - Free an SRPT I/O context structure.
618 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
619 int dma_size, enum dma_data_direction dir)
624 ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
630 * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
631 * @sdev: Device to allocate the I/O context ring for.
632 * @ring_size: Number of elements in the I/O context ring.
633 * @ioctx_size: I/O context size.
634 * @dma_size: DMA buffer size.
635 * @dir: DMA data direction.
637 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
638 int ring_size, int ioctx_size,
639 int dma_size, enum dma_data_direction dir)
641 struct srpt_ioctx **ring;
644 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
645 && ioctx_size != sizeof(struct srpt_send_ioctx));
647 ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
650 for (i = 0; i < ring_size; ++i) {
651 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
660 srpt_free_ioctx(sdev, ring[i], dma_size, dir);
668 * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
670 static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
671 struct srpt_device *sdev, int ring_size,
672 int dma_size, enum dma_data_direction dir)
676 for (i = 0; i < ring_size; ++i)
677 srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
682 * srpt_get_cmd_state() - Get the state of a SCSI command.
684 static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
686 enum srpt_command_state state;
691 spin_lock_irqsave(&ioctx->spinlock, flags);
692 state = ioctx->state;
693 spin_unlock_irqrestore(&ioctx->spinlock, flags);
698 * srpt_set_cmd_state() - Set the state of a SCSI command.
700 * Does not modify the state of aborted commands. Returns the previous command
703 static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
704 enum srpt_command_state new)
706 enum srpt_command_state previous;
711 spin_lock_irqsave(&ioctx->spinlock, flags);
712 previous = ioctx->state;
713 if (previous != SRPT_STATE_DONE)
715 spin_unlock_irqrestore(&ioctx->spinlock, flags);
721 * srpt_test_and_set_cmd_state() - Test and set the state of a command.
723 * Returns true if and only if the previous command state was equal to 'old'.
725 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
726 enum srpt_command_state old,
727 enum srpt_command_state new)
729 enum srpt_command_state previous;
733 WARN_ON(old == SRPT_STATE_DONE);
734 WARN_ON(new == SRPT_STATE_NEW);
736 spin_lock_irqsave(&ioctx->spinlock, flags);
737 previous = ioctx->state;
740 spin_unlock_irqrestore(&ioctx->spinlock, flags);
741 return previous == old;
745 * srpt_post_recv() - Post an IB receive request.
747 static int srpt_post_recv(struct srpt_device *sdev,
748 struct srpt_recv_ioctx *ioctx)
751 struct ib_recv_wr wr, *bad_wr;
754 list.addr = ioctx->ioctx.dma;
755 list.length = srp_max_req_size;
756 list.lkey = sdev->pd->local_dma_lkey;
758 ioctx->ioctx.cqe.done = srpt_recv_done;
759 wr.wr_cqe = &ioctx->ioctx.cqe;
764 return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
768 * srpt_post_send() - Post an IB send request.
770 * Returns zero upon success and a non-zero value upon failure.
772 static int srpt_post_send(struct srpt_rdma_ch *ch,
773 struct srpt_send_ioctx *ioctx, int len)
776 struct ib_send_wr wr, *bad_wr;
777 struct srpt_device *sdev = ch->sport->sdev;
780 atomic_inc(&ch->req_lim);
783 if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
784 pr_warn("IB send queue full (needed 1)\n");
788 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
791 list.addr = ioctx->ioctx.dma;
793 list.lkey = sdev->pd->local_dma_lkey;
795 ioctx->ioctx.cqe.done = srpt_send_done;
797 wr.wr_cqe = &ioctx->ioctx.cqe;
800 wr.opcode = IB_WR_SEND;
801 wr.send_flags = IB_SEND_SIGNALED;
803 ret = ib_post_send(ch->qp, &wr, &bad_wr);
807 atomic_inc(&ch->sq_wr_avail);
808 atomic_dec(&ch->req_lim);
814 * srpt_zerolength_write() - Perform a zero-length RDMA write.
816 * A quote from the InfiniBand specification: C9-88: For an HCA responder
817 * using Reliable Connection service, for each zero-length RDMA READ or WRITE
818 * request, the R_Key shall not be validated, even if the request includes
821 static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
823 struct ib_send_wr wr, *bad_wr;
825 memset(&wr, 0, sizeof(wr));
826 wr.opcode = IB_WR_RDMA_WRITE;
827 wr.wr_cqe = &ch->zw_cqe;
828 wr.send_flags = IB_SEND_SIGNALED;
829 return ib_post_send(ch->qp, &wr, &bad_wr);
832 static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
834 struct srpt_rdma_ch *ch = cq->cq_context;
836 if (wc->status == IB_WC_SUCCESS) {
837 srpt_process_wait_list(ch);
839 if (srpt_set_ch_state(ch, CH_DISCONNECTED))
840 schedule_work(&ch->release_work);
842 WARN_ONCE(1, "%s-%d\n", ch->sess_name, ch->qp->qp_num);
847 * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
848 * @ioctx: Pointer to the I/O context associated with the request.
849 * @srp_cmd: Pointer to the SRP_CMD request data.
850 * @dir: Pointer to the variable to which the transfer direction will be
852 * @data_len: Pointer to the variable to which the total data length of all
853 * descriptors in the SRP_CMD request will be written.
855 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
857 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
858 * -ENOMEM when memory allocation fails and zero upon success.
860 static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
861 struct srp_cmd *srp_cmd,
862 enum dma_data_direction *dir, u64 *data_len)
864 struct srp_indirect_buf *idb;
865 struct srp_direct_buf *db;
866 unsigned add_cdb_offset;
870 * The pointer computations below will only be compiled correctly
871 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
872 * whether srp_cmd::add_data has been declared as a byte pointer.
874 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
875 && !__same_type(srp_cmd->add_data[0], (u8)0));
884 * The lower four bits of the buffer format field contain the DATA-IN
885 * buffer descriptor format, and the highest four bits contain the
886 * DATA-OUT buffer descriptor format.
889 if (srp_cmd->buf_fmt & 0xf)
890 /* DATA-IN: transfer data from target to initiator (read). */
891 *dir = DMA_FROM_DEVICE;
892 else if (srp_cmd->buf_fmt >> 4)
893 /* DATA-OUT: transfer data from initiator to target (write). */
894 *dir = DMA_TO_DEVICE;
897 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
898 * CDB LENGTH' field are reserved and the size in bytes of this field
899 * is four times the value specified in bits 3..7. Hence the "& ~3".
901 add_cdb_offset = srp_cmd->add_cdb_len & ~3;
902 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
903 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
905 ioctx->rbufs = &ioctx->single_rbuf;
907 db = (struct srp_direct_buf *)(srp_cmd->add_data
909 memcpy(ioctx->rbufs, db, sizeof(*db));
910 *data_len = be32_to_cpu(db->len);
911 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
912 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
913 idb = (struct srp_indirect_buf *)(srp_cmd->add_data
916 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof(*db);
919 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
920 pr_err("received unsupported SRP_CMD request"
921 " type (%u out + %u in != %u / %zu)\n",
922 srp_cmd->data_out_desc_cnt,
923 srp_cmd->data_in_desc_cnt,
924 be32_to_cpu(idb->table_desc.len),
931 if (ioctx->n_rbuf == 1)
932 ioctx->rbufs = &ioctx->single_rbuf;
935 kmalloc(ioctx->n_rbuf * sizeof(*db), GFP_ATOMIC);
944 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof(*db));
945 *data_len = be32_to_cpu(idb->len);
952 * srpt_init_ch_qp() - Initialize queue pair attributes.
954 * Initialized the attributes of queue pair 'qp' by allowing local write,
955 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
957 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
959 struct ib_qp_attr *attr;
962 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
966 attr->qp_state = IB_QPS_INIT;
967 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
968 IB_ACCESS_REMOTE_WRITE;
969 attr->port_num = ch->sport->port;
970 attr->pkey_index = 0;
972 ret = ib_modify_qp(qp, attr,
973 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
981 * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
982 * @ch: channel of the queue pair.
983 * @qp: queue pair to change the state of.
985 * Returns zero upon success and a negative value upon failure.
987 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
988 * If this structure ever becomes larger, it might be necessary to allocate
989 * it dynamically instead of on the stack.
991 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
993 struct ib_qp_attr qp_attr;
997 qp_attr.qp_state = IB_QPS_RTR;
998 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1002 qp_attr.max_dest_rd_atomic = 4;
1004 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1011 * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
1012 * @ch: channel of the queue pair.
1013 * @qp: queue pair to change the state of.
1015 * Returns zero upon success and a negative value upon failure.
1017 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1018 * If this structure ever becomes larger, it might be necessary to allocate
1019 * it dynamically instead of on the stack.
1021 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1023 struct ib_qp_attr qp_attr;
1027 qp_attr.qp_state = IB_QPS_RTS;
1028 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1032 qp_attr.max_rd_atomic = 4;
1034 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1041 * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
1043 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1045 struct ib_qp_attr qp_attr;
1047 qp_attr.qp_state = IB_QPS_ERR;
1048 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1052 * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
1054 static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1055 struct srpt_send_ioctx *ioctx)
1057 struct scatterlist *sg;
1058 enum dma_data_direction dir;
1062 BUG_ON(ioctx->n_rdma && !ioctx->rdma_wrs);
1064 while (ioctx->n_rdma)
1065 kfree(ioctx->rdma_wrs[--ioctx->n_rdma].wr.sg_list);
1067 kfree(ioctx->rdma_wrs);
1068 ioctx->rdma_wrs = NULL;
1070 if (ioctx->mapped_sg_count) {
1073 dir = ioctx->cmd.data_direction;
1074 BUG_ON(dir == DMA_NONE);
1075 ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
1076 target_reverse_dma_direction(&ioctx->cmd));
1077 ioctx->mapped_sg_count = 0;
1082 * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
1084 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1085 struct srpt_send_ioctx *ioctx)
1087 struct ib_device *dev = ch->sport->sdev->device;
1089 struct scatterlist *sg, *sg_orig;
1091 enum dma_data_direction dir;
1092 struct ib_rdma_wr *riu;
1093 struct srp_direct_buf *db;
1094 dma_addr_t dma_addr;
1106 dir = cmd->data_direction;
1107 BUG_ON(dir == DMA_NONE);
1109 ioctx->sg = sg = sg_orig = cmd->t_data_sg;
1110 ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
1112 count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
1113 target_reverse_dma_direction(cmd));
1114 if (unlikely(!count))
1117 ioctx->mapped_sg_count = count;
1119 if (ioctx->rdma_wrs && ioctx->n_rdma_wrs)
1120 nrdma = ioctx->n_rdma_wrs;
1122 nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
1125 ioctx->rdma_wrs = kcalloc(nrdma, sizeof(*ioctx->rdma_wrs),
1127 if (!ioctx->rdma_wrs)
1130 ioctx->n_rdma_wrs = nrdma;
1134 tsize = cmd->data_length;
1135 dma_len = ib_sg_dma_len(dev, &sg[0]);
1136 riu = ioctx->rdma_wrs;
1139 * For each remote desc - calculate the #ib_sge.
1140 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
1141 * each remote desc rdma_iu is required a rdma wr;
1143 * we need to allocate extra rdma_iu to carry extra #ib_sge in
1147 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1148 rsize = be32_to_cpu(db->len);
1149 raddr = be64_to_cpu(db->va);
1150 riu->remote_addr = raddr;
1151 riu->rkey = be32_to_cpu(db->key);
1152 riu->wr.num_sge = 0;
1154 /* calculate how many sge required for this remote_buf */
1155 while (rsize > 0 && tsize > 0) {
1157 if (rsize >= dma_len) {
1166 dma_len = ib_sg_dma_len(
1179 riu->wr.num_sge == SRPT_DEF_SG_PER_WQE) {
1181 riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
1182 sizeof(*riu->wr.sg_list),
1184 if (!riu->wr.sg_list)
1188 riu->wr.num_sge = 0;
1189 riu->remote_addr = raddr;
1190 riu->rkey = be32_to_cpu(db->key);
1195 riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
1196 sizeof(*riu->wr.sg_list),
1198 if (!riu->wr.sg_list)
1203 tsize = cmd->data_length;
1204 riu = ioctx->rdma_wrs;
1206 dma_len = ib_sg_dma_len(dev, &sg[0]);
1207 dma_addr = ib_sg_dma_address(dev, &sg[0]);
1209 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1211 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1212 rsize = be32_to_cpu(db->len);
1213 sge = riu->wr.sg_list;
1216 while (rsize > 0 && tsize > 0) {
1217 sge->addr = dma_addr;
1218 sge->lkey = ch->sport->sdev->pd->local_dma_lkey;
1220 if (rsize >= dma_len) {
1222 (tsize < dma_len) ? tsize : dma_len;
1230 dma_len = ib_sg_dma_len(
1232 dma_addr = ib_sg_dma_address(
1237 sge->length = (tsize < rsize) ? tsize : rsize;
1245 if (k == riu->wr.num_sge && rsize > 0 && tsize > 0) {
1247 sge = riu->wr.sg_list;
1249 } else if (rsize > 0 && tsize > 0)
1257 srpt_unmap_sg_to_ib_sge(ch, ioctx);
1263 * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
1265 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1267 struct srpt_send_ioctx *ioctx;
1268 unsigned long flags;
1273 spin_lock_irqsave(&ch->spinlock, flags);
1274 if (!list_empty(&ch->free_list)) {
1275 ioctx = list_first_entry(&ch->free_list,
1276 struct srpt_send_ioctx, free_list);
1277 list_del(&ioctx->free_list);
1279 spin_unlock_irqrestore(&ch->spinlock, flags);
1284 BUG_ON(ioctx->ch != ch);
1285 spin_lock_init(&ioctx->spinlock);
1286 ioctx->state = SRPT_STATE_NEW;
1288 ioctx->rbufs = NULL;
1290 ioctx->n_rdma_wrs = 0;
1291 ioctx->rdma_wrs = NULL;
1292 ioctx->mapped_sg_count = 0;
1293 init_completion(&ioctx->tx_done);
1294 ioctx->queue_status_only = false;
1296 * transport_init_se_cmd() does not initialize all fields, so do it
1299 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1300 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1306 * srpt_abort_cmd() - Abort a SCSI command.
1307 * @ioctx: I/O context associated with the SCSI command.
1308 * @context: Preferred execution context.
1310 static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1312 enum srpt_command_state state;
1313 unsigned long flags;
1318 * If the command is in a state where the target core is waiting for
1319 * the ib_srpt driver, change the state to the next state.
1322 spin_lock_irqsave(&ioctx->spinlock, flags);
1323 state = ioctx->state;
1325 case SRPT_STATE_NEED_DATA:
1326 ioctx->state = SRPT_STATE_DATA_IN;
1328 case SRPT_STATE_CMD_RSP_SENT:
1329 case SRPT_STATE_MGMT_RSP_SENT:
1330 ioctx->state = SRPT_STATE_DONE;
1333 WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
1337 spin_unlock_irqrestore(&ioctx->spinlock, flags);
1339 pr_debug("Aborting cmd with state %d and tag %lld\n", state,
1343 case SRPT_STATE_NEW:
1344 case SRPT_STATE_DATA_IN:
1345 case SRPT_STATE_MGMT:
1346 case SRPT_STATE_DONE:
1348 * Do nothing - defer abort processing until
1349 * srpt_queue_response() is invoked.
1352 case SRPT_STATE_NEED_DATA:
1353 pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
1354 transport_generic_request_failure(&ioctx->cmd,
1355 TCM_CHECK_CONDITION_ABORT_CMD);
1357 case SRPT_STATE_CMD_RSP_SENT:
1359 * SRP_RSP sending failed or the SRP_RSP send completion has
1360 * not been received in time.
1362 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1363 transport_generic_free_cmd(&ioctx->cmd, 0);
1365 case SRPT_STATE_MGMT_RSP_SENT:
1366 transport_generic_free_cmd(&ioctx->cmd, 0);
1369 WARN(1, "Unexpected command state (%d)", state);
1377 * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
1378 * the data that has been transferred via IB RDMA had to be postponed until the
1379 * check_stop_free() callback. None of this is necessary anymore and needs to
1382 static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1384 struct srpt_rdma_ch *ch = cq->cq_context;
1385 struct srpt_send_ioctx *ioctx =
1386 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
1388 WARN_ON(ioctx->n_rdma <= 0);
1389 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1391 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1392 pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1394 srpt_abort_cmd(ioctx);
1398 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1399 SRPT_STATE_DATA_IN))
1400 target_execute_cmd(&ioctx->cmd);
1402 pr_err("%s[%d]: wrong state = %d\n", __func__,
1403 __LINE__, srpt_get_cmd_state(ioctx));
1406 static void srpt_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1408 struct srpt_send_ioctx *ioctx =
1409 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
1411 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1413 * Note: if an RDMA write error completion is received that
1414 * means that a SEND also has been posted. Defer further
1415 * processing of the associated command until the send error
1416 * completion has been received.
1418 pr_info("RDMA_WRITE for ioctx 0x%p failed with status %d\n",
1424 * srpt_build_cmd_rsp() - Build an SRP_RSP response.
1425 * @ch: RDMA channel through which the request has been received.
1426 * @ioctx: I/O context associated with the SRP_CMD request. The response will
1427 * be built in the buffer ioctx->buf points at and hence this function will
1428 * overwrite the request data.
1429 * @tag: tag of the request for which this response is being generated.
1430 * @status: value for the STATUS field of the SRP_RSP information unit.
1432 * Returns the size in bytes of the SRP_RSP response.
1434 * An SRP_RSP response contains a SCSI status or service response. See also
1435 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1436 * response. See also SPC-2 for more information about sense data.
1438 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1439 struct srpt_send_ioctx *ioctx, u64 tag,
1442 struct srp_rsp *srp_rsp;
1443 const u8 *sense_data;
1444 int sense_data_len, max_sense_len;
1447 * The lowest bit of all SAM-3 status codes is zero (see also
1448 * paragraph 5.3 in SAM-3).
1450 WARN_ON(status & 1);
1452 srp_rsp = ioctx->ioctx.buf;
1455 sense_data = ioctx->sense_data;
1456 sense_data_len = ioctx->cmd.scsi_sense_length;
1457 WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1459 memset(srp_rsp, 0, sizeof(*srp_rsp));
1460 srp_rsp->opcode = SRP_RSP;
1461 srp_rsp->req_lim_delta =
1462 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1464 srp_rsp->status = status;
1466 if (sense_data_len) {
1467 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1468 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1469 if (sense_data_len > max_sense_len) {
1470 pr_warn("truncated sense data from %d to %d"
1471 " bytes\n", sense_data_len, max_sense_len);
1472 sense_data_len = max_sense_len;
1475 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1476 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1477 memcpy(srp_rsp + 1, sense_data, sense_data_len);
1480 return sizeof(*srp_rsp) + sense_data_len;
1484 * srpt_build_tskmgmt_rsp() - Build a task management response.
1485 * @ch: RDMA channel through which the request has been received.
1486 * @ioctx: I/O context in which the SRP_RSP response will be built.
1487 * @rsp_code: RSP_CODE that will be stored in the response.
1488 * @tag: Tag of the request for which this response is being generated.
1490 * Returns the size in bytes of the SRP_RSP response.
1492 * An SRP_RSP response contains a SCSI status or service response. See also
1493 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1496 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1497 struct srpt_send_ioctx *ioctx,
1498 u8 rsp_code, u64 tag)
1500 struct srp_rsp *srp_rsp;
1505 resp_len = sizeof(*srp_rsp) + resp_data_len;
1507 srp_rsp = ioctx->ioctx.buf;
1509 memset(srp_rsp, 0, sizeof(*srp_rsp));
1511 srp_rsp->opcode = SRP_RSP;
1512 srp_rsp->req_lim_delta =
1513 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1516 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1517 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1518 srp_rsp->data[3] = rsp_code;
1523 static int srpt_check_stop_free(struct se_cmd *cmd)
1525 struct srpt_send_ioctx *ioctx = container_of(cmd,
1526 struct srpt_send_ioctx, cmd);
1528 return target_put_sess_cmd(&ioctx->cmd);
1532 * srpt_handle_cmd() - Process SRP_CMD.
1534 static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
1535 struct srpt_recv_ioctx *recv_ioctx,
1536 struct srpt_send_ioctx *send_ioctx)
1539 struct srp_cmd *srp_cmd;
1541 enum dma_data_direction dir;
1544 BUG_ON(!send_ioctx);
1546 srp_cmd = recv_ioctx->ioctx.buf;
1547 cmd = &send_ioctx->cmd;
1548 cmd->tag = srp_cmd->tag;
1550 switch (srp_cmd->task_attr) {
1551 case SRP_CMD_SIMPLE_Q:
1552 cmd->sam_task_attr = TCM_SIMPLE_TAG;
1554 case SRP_CMD_ORDERED_Q:
1556 cmd->sam_task_attr = TCM_ORDERED_TAG;
1558 case SRP_CMD_HEAD_OF_Q:
1559 cmd->sam_task_attr = TCM_HEAD_TAG;
1562 cmd->sam_task_attr = TCM_ACA_TAG;
1566 if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
1567 pr_err("0x%llx: parsing SRP descriptor table failed.\n",
1572 rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
1573 &send_ioctx->sense_data[0],
1574 scsilun_to_int(&srp_cmd->lun), data_len,
1575 TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
1577 pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
1584 send_ioctx->state = SRPT_STATE_DONE;
1585 srpt_release_cmd(cmd);
1588 static int srp_tmr_to_tcm(int fn)
1591 case SRP_TSK_ABORT_TASK:
1592 return TMR_ABORT_TASK;
1593 case SRP_TSK_ABORT_TASK_SET:
1594 return TMR_ABORT_TASK_SET;
1595 case SRP_TSK_CLEAR_TASK_SET:
1596 return TMR_CLEAR_TASK_SET;
1597 case SRP_TSK_LUN_RESET:
1598 return TMR_LUN_RESET;
1599 case SRP_TSK_CLEAR_ACA:
1600 return TMR_CLEAR_ACA;
1607 * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
1609 * Returns 0 if and only if the request will be processed by the target core.
1611 * For more information about SRP_TSK_MGMT information units, see also section
1612 * 6.7 in the SRP r16a document.
1614 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1615 struct srpt_recv_ioctx *recv_ioctx,
1616 struct srpt_send_ioctx *send_ioctx)
1618 struct srp_tsk_mgmt *srp_tsk;
1620 struct se_session *sess = ch->sess;
1624 BUG_ON(!send_ioctx);
1626 srp_tsk = recv_ioctx->ioctx.buf;
1627 cmd = &send_ioctx->cmd;
1629 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
1630 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
1631 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
1633 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1634 send_ioctx->cmd.tag = srp_tsk->tag;
1635 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1636 rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
1637 scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
1638 GFP_KERNEL, srp_tsk->task_tag,
1639 TARGET_SCF_ACK_KREF);
1641 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1646 transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
1650 * srpt_handle_new_iu() - Process a newly received information unit.
1651 * @ch: RDMA channel through which the information unit has been received.
1652 * @ioctx: SRPT I/O context associated with the information unit.
1654 static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1655 struct srpt_recv_ioctx *recv_ioctx,
1656 struct srpt_send_ioctx *send_ioctx)
1658 struct srp_cmd *srp_cmd;
1661 BUG_ON(!recv_ioctx);
1663 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1664 recv_ioctx->ioctx.dma, srp_max_req_size,
1667 if (unlikely(ch->state == CH_CONNECTING)) {
1668 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1672 if (unlikely(ch->state != CH_LIVE))
1675 srp_cmd = recv_ioctx->ioctx.buf;
1676 if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
1678 send_ioctx = srpt_get_send_ioctx(ch);
1679 if (unlikely(!send_ioctx)) {
1680 list_add_tail(&recv_ioctx->wait_list,
1681 &ch->cmd_wait_list);
1686 switch (srp_cmd->opcode) {
1688 srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1691 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1694 pr_err("Not yet implemented: SRP_I_LOGOUT\n");
1697 pr_debug("received SRP_CRED_RSP\n");
1700 pr_debug("received SRP_AER_RSP\n");
1703 pr_err("Received SRP_RSP\n");
1706 pr_err("received IU with unknown opcode 0x%x\n",
1711 srpt_post_recv(ch->sport->sdev, recv_ioctx);
1716 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1718 struct srpt_rdma_ch *ch = cq->cq_context;
1719 struct srpt_recv_ioctx *ioctx =
1720 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
1722 if (wc->status == IB_WC_SUCCESS) {
1725 req_lim = atomic_dec_return(&ch->req_lim);
1726 if (unlikely(req_lim < 0))
1727 pr_err("req_lim = %d < 0\n", req_lim);
1728 srpt_handle_new_iu(ch, ioctx, NULL);
1730 pr_info("receiving failed for ioctx %p with status %d\n",
1736 * This function must be called from the context in which RDMA completions are
1737 * processed because it accesses the wait list without protection against
1738 * access from other threads.
1740 static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
1742 struct srpt_send_ioctx *ioctx;
1744 while (!list_empty(&ch->cmd_wait_list) &&
1745 ch->state >= CH_LIVE &&
1746 (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
1747 struct srpt_recv_ioctx *recv_ioctx;
1749 recv_ioctx = list_first_entry(&ch->cmd_wait_list,
1750 struct srpt_recv_ioctx,
1752 list_del(&recv_ioctx->wait_list);
1753 srpt_handle_new_iu(ch, recv_ioctx, ioctx);
1758 * Note: Although this has not yet been observed during tests, at least in
1759 * theory it is possible that the srpt_get_send_ioctx() call invoked by
1760 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
1761 * value in each response is set to one, and it is possible that this response
1762 * makes the initiator send a new request before the send completion for that
1763 * response has been processed. This could e.g. happen if the call to
1764 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
1765 * if IB retransmission causes generation of the send completion to be
1766 * delayed. Incoming information units for which srpt_get_send_ioctx() fails
1767 * are queued on cmd_wait_list. The code below processes these delayed
1768 * requests one at a time.
1770 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
1772 struct srpt_rdma_ch *ch = cq->cq_context;
1773 struct srpt_send_ioctx *ioctx =
1774 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
1775 enum srpt_command_state state;
1777 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1779 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
1780 state != SRPT_STATE_MGMT_RSP_SENT);
1782 atomic_inc(&ch->sq_wr_avail);
1784 if (wc->status != IB_WC_SUCCESS)
1785 pr_info("sending response for ioctx 0x%p failed"
1786 " with status %d\n", ioctx, wc->status);
1788 if (state != SRPT_STATE_DONE) {
1789 srpt_unmap_sg_to_ib_sge(ch, ioctx);
1790 transport_generic_free_cmd(&ioctx->cmd, 0);
1792 pr_err("IB completion has been received too late for"
1793 " wr_id = %u.\n", ioctx->ioctx.index);
1796 srpt_process_wait_list(ch);
1800 * srpt_create_ch_ib() - Create receive and send completion queues.
1802 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1804 struct ib_qp_init_attr *qp_init;
1805 struct srpt_port *sport = ch->sport;
1806 struct srpt_device *sdev = sport->sdev;
1807 u32 srp_sq_size = sport->port_attrib.srp_sq_size;
1810 WARN_ON(ch->rq_size < 1);
1813 qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
1818 ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + srp_sq_size,
1819 0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
1820 if (IS_ERR(ch->cq)) {
1821 ret = PTR_ERR(ch->cq);
1822 pr_err("failed to create CQ cqe= %d ret= %d\n",
1823 ch->rq_size + srp_sq_size, ret);
1827 qp_init->qp_context = (void *)ch;
1828 qp_init->event_handler
1829 = (void(*)(struct ib_event *, void*))srpt_qp_event;
1830 qp_init->send_cq = ch->cq;
1831 qp_init->recv_cq = ch->cq;
1832 qp_init->srq = sdev->srq;
1833 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1834 qp_init->qp_type = IB_QPT_RC;
1835 qp_init->cap.max_send_wr = srp_sq_size;
1836 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
1838 ch->qp = ib_create_qp(sdev->pd, qp_init);
1839 if (IS_ERR(ch->qp)) {
1840 ret = PTR_ERR(ch->qp);
1841 if (ret == -ENOMEM) {
1843 if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
1844 ib_destroy_cq(ch->cq);
1848 pr_err("failed to create_qp ret= %d\n", ret);
1849 goto err_destroy_cq;
1852 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
1854 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1855 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1856 qp_init->cap.max_send_wr, ch->cm_id);
1858 ret = srpt_init_ch_qp(ch, ch->qp);
1860 goto err_destroy_qp;
1867 ib_destroy_qp(ch->qp);
1873 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
1875 ib_destroy_qp(ch->qp);
1880 * srpt_close_ch() - Close an RDMA channel.
1882 * Make sure all resources associated with the channel will be deallocated at
1883 * an appropriate time.
1885 * Returns true if and only if the channel state has been modified into
1888 static bool srpt_close_ch(struct srpt_rdma_ch *ch)
1892 if (!srpt_set_ch_state(ch, CH_DRAINING)) {
1893 pr_debug("%s-%d: already closed\n", ch->sess_name,
1898 kref_get(&ch->kref);
1900 ret = srpt_ch_qp_err(ch);
1902 pr_err("%s-%d: changing queue pair into error state failed: %d\n",
1903 ch->sess_name, ch->qp->qp_num, ret);
1905 pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
1907 ret = srpt_zerolength_write(ch);
1909 pr_err("%s-%d: queuing zero-length write failed: %d\n",
1910 ch->sess_name, ch->qp->qp_num, ret);
1911 if (srpt_set_ch_state(ch, CH_DISCONNECTED))
1912 schedule_work(&ch->release_work);
1917 kref_put(&ch->kref, srpt_free_ch);
1923 * Change the channel state into CH_DISCONNECTING. If a channel has not yet
1924 * reached the connected state, close it. If a channel is in the connected
1925 * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
1926 * the responsibility of the caller to ensure that this function is not
1927 * invoked concurrently with the code that accepts a connection. This means
1928 * that this function must either be invoked from inside a CM callback
1929 * function or that it must be invoked with the srpt_port.mutex held.
1931 static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
1935 if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
1938 ret = ib_send_cm_dreq(ch->cm_id, NULL, 0);
1940 ret = ib_send_cm_drep(ch->cm_id, NULL, 0);
1942 if (ret < 0 && srpt_close_ch(ch))
1948 static void __srpt_close_all_ch(struct srpt_device *sdev)
1950 struct srpt_rdma_ch *ch;
1952 lockdep_assert_held(&sdev->mutex);
1954 list_for_each_entry(ch, &sdev->rch_list, list) {
1955 if (srpt_disconnect_ch(ch) >= 0)
1956 pr_info("Closing channel %s-%d because target %s has been disabled\n",
1957 ch->sess_name, ch->qp->qp_num,
1958 sdev->device->name);
1963 static void srpt_free_ch(struct kref *kref)
1965 struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
1970 static void srpt_release_channel_work(struct work_struct *w)
1972 struct srpt_rdma_ch *ch;
1973 struct srpt_device *sdev;
1974 struct se_session *se_sess;
1976 ch = container_of(w, struct srpt_rdma_ch, release_work);
1977 pr_debug("%s: %s-%d; release_done = %p\n", __func__, ch->sess_name,
1978 ch->qp->qp_num, ch->release_done);
1980 sdev = ch->sport->sdev;
1986 target_sess_cmd_list_set_waiting(se_sess);
1987 target_wait_for_sess_cmds(se_sess);
1989 transport_deregister_session_configfs(se_sess);
1990 transport_deregister_session(se_sess);
1993 ib_destroy_cm_id(ch->cm_id);
1995 srpt_destroy_ch_ib(ch);
1997 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
1998 ch->sport->sdev, ch->rq_size,
1999 ch->rsp_size, DMA_TO_DEVICE);
2001 mutex_lock(&sdev->mutex);
2002 list_del_init(&ch->list);
2003 if (ch->release_done)
2004 complete(ch->release_done);
2005 mutex_unlock(&sdev->mutex);
2007 wake_up(&sdev->ch_releaseQ);
2009 kref_put(&ch->kref, srpt_free_ch);
2013 * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
2015 * Ownership of the cm_id is transferred to the target session if this
2016 * functions returns zero. Otherwise the caller remains the owner of cm_id.
2018 static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2019 struct ib_cm_req_event_param *param,
2022 struct srpt_device *sdev = cm_id->context;
2023 struct srpt_port *sport = &sdev->port[param->port - 1];
2024 struct srp_login_req *req;
2025 struct srp_login_rsp *rsp;
2026 struct srp_login_rej *rej;
2027 struct ib_cm_rep_param *rep_param;
2028 struct srpt_rdma_ch *ch, *tmp_ch;
2033 WARN_ON_ONCE(irqs_disabled());
2035 if (WARN_ON(!sdev || !private_data))
2038 req = (struct srp_login_req *)private_data;
2040 it_iu_len = be32_to_cpu(req->req_it_iu_len);
2042 pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
2043 " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
2044 " (guid=0x%llx:0x%llx)\n",
2045 be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
2046 be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
2047 be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
2048 be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
2051 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
2052 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
2054 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
2055 rej = kzalloc(sizeof(*rej), GFP_KERNEL);
2056 rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
2058 if (!rsp || !rej || !rep_param) {
2063 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2064 rej->reason = cpu_to_be32(
2065 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2067 pr_err("rejected SRP_LOGIN_REQ because its"
2068 " length (%d bytes) is out of range (%d .. %d)\n",
2069 it_iu_len, 64, srp_max_req_size);
2073 if (!sport->enabled) {
2074 rej->reason = cpu_to_be32(
2075 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2077 pr_err("rejected SRP_LOGIN_REQ because the target port"
2078 " has not yet been enabled\n");
2082 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2083 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
2085 mutex_lock(&sdev->mutex);
2087 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2088 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
2089 && !memcmp(ch->t_port_id, req->target_port_id, 16)
2090 && param->port == ch->sport->port
2091 && param->listen_id == ch->sport->sdev->cm_id
2093 if (srpt_disconnect_ch(ch) < 0)
2095 pr_info("Relogin - closed existing channel %s\n",
2098 SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2102 mutex_unlock(&sdev->mutex);
2105 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2107 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2108 || *(__be64 *)(req->target_port_id + 8) !=
2109 cpu_to_be64(srpt_service_guid)) {
2110 rej->reason = cpu_to_be32(
2111 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2113 pr_err("rejected SRP_LOGIN_REQ because it"
2114 " has an invalid target port identifier.\n");
2118 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
2120 rej->reason = cpu_to_be32(
2121 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2122 pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
2127 kref_init(&ch->kref);
2128 ch->zw_cqe.done = srpt_zerolength_write_done;
2129 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2130 memcpy(ch->i_port_id, req->initiator_port_id, 16);
2131 memcpy(ch->t_port_id, req->target_port_id, 16);
2132 ch->sport = &sdev->port[param->port - 1];
2134 cm_id->context = ch;
2136 * Avoid QUEUE_FULL conditions by limiting the number of buffers used
2137 * for the SRP protocol to the command queue size.
2139 ch->rq_size = SRPT_RQ_SIZE;
2140 spin_lock_init(&ch->spinlock);
2141 ch->state = CH_CONNECTING;
2142 INIT_LIST_HEAD(&ch->cmd_wait_list);
2143 ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2145 ch->ioctx_ring = (struct srpt_send_ioctx **)
2146 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2147 sizeof(*ch->ioctx_ring[0]),
2148 ch->rsp_size, DMA_TO_DEVICE);
2149 if (!ch->ioctx_ring)
2152 INIT_LIST_HEAD(&ch->free_list);
2153 for (i = 0; i < ch->rq_size; i++) {
2154 ch->ioctx_ring[i]->ch = ch;
2155 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2158 ret = srpt_create_ch_ib(ch);
2160 rej->reason = cpu_to_be32(
2161 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2162 pr_err("rejected SRP_LOGIN_REQ because creating"
2163 " a new RDMA channel failed.\n");
2167 ret = srpt_ch_qp_rtr(ch, ch->qp);
2169 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2170 pr_err("rejected SRP_LOGIN_REQ because enabling"
2171 " RTR failed (error code = %d)\n", ret);
2176 * Use the initator port identifier as the session name, when
2177 * checking against se_node_acl->initiatorname[] this can be
2178 * with or without preceeding '0x'.
2180 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
2181 be64_to_cpu(*(__be64 *)ch->i_port_id),
2182 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
2184 pr_debug("registering session %s\n", ch->sess_name);
2185 p = &ch->sess_name[0];
2188 ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
2189 TARGET_PROT_NORMAL, p, ch, NULL);
2190 if (IS_ERR(ch->sess)) {
2191 pr_info("Rejected login because no ACL has been"
2192 " configured yet for initiator %s.\n", p);
2194 * XXX: Hack to retry of ch->i_port_id without leading '0x'
2196 if (p == &ch->sess_name[0]) {
2200 rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
2201 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
2202 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2206 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2207 ch->sess_name, ch->cm_id);
2209 /* create srp_login_response */
2210 rsp->opcode = SRP_LOGIN_RSP;
2211 rsp->tag = req->tag;
2212 rsp->max_it_iu_len = req->req_it_iu_len;
2213 rsp->max_ti_iu_len = req->req_it_iu_len;
2214 ch->max_ti_iu_len = it_iu_len;
2215 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2216 | SRP_BUF_FORMAT_INDIRECT);
2217 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2218 atomic_set(&ch->req_lim, ch->rq_size);
2219 atomic_set(&ch->req_lim_delta, 0);
2221 /* create cm reply */
2222 rep_param->qp_num = ch->qp->qp_num;
2223 rep_param->private_data = (void *)rsp;
2224 rep_param->private_data_len = sizeof(*rsp);
2225 rep_param->rnr_retry_count = 7;
2226 rep_param->flow_control = 1;
2227 rep_param->failover_accepted = 0;
2229 rep_param->responder_resources = 4;
2230 rep_param->initiator_depth = 4;
2232 ret = ib_send_cm_rep(cm_id, rep_param);
2234 pr_err("sending SRP_LOGIN_REQ response failed"
2235 " (error code = %d)\n", ret);
2236 goto release_channel;
2239 mutex_lock(&sdev->mutex);
2240 list_add_tail(&ch->list, &sdev->rch_list);
2241 mutex_unlock(&sdev->mutex);
2246 srpt_disconnect_ch(ch);
2247 transport_deregister_session_configfs(ch->sess);
2248 transport_deregister_session(ch->sess);
2252 srpt_destroy_ch_ib(ch);
2255 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2256 ch->sport->sdev, ch->rq_size,
2257 ch->rsp_size, DMA_TO_DEVICE);
2262 rej->opcode = SRP_LOGIN_REJ;
2263 rej->tag = req->tag;
2264 rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2265 | SRP_BUF_FORMAT_INDIRECT);
2267 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2268 (void *)rej, sizeof(*rej));
2278 static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
2279 enum ib_cm_rej_reason reason,
2280 const u8 *private_data,
2281 u8 private_data_len)
2286 if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
2288 for (i = 0; i < private_data_len; i++)
2289 sprintf(priv + 3 * i, " %02x", private_data[i]);
2291 pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
2292 ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
2293 "; private data" : "", priv ? priv : " (?)");
2298 * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
2300 * An IB_CM_RTU_RECEIVED message indicates that the connection is established
2301 * and that the recipient may begin transmitting (RTU = ready to use).
2303 static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
2307 if (srpt_set_ch_state(ch, CH_LIVE)) {
2308 ret = srpt_ch_qp_rts(ch, ch->qp);
2311 /* Trigger wait list processing. */
2312 ret = srpt_zerolength_write(ch);
2313 WARN_ONCE(ret < 0, "%d\n", ret);
2321 * srpt_cm_handler() - IB connection manager callback function.
2323 * A non-zero return value will cause the caller destroy the CM ID.
2325 * Note: srpt_cm_handler() must only return a non-zero value when transferring
2326 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2327 * a non-zero value in any other case will trigger a race with the
2328 * ib_destroy_cm_id() call in srpt_release_channel().
2330 static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2332 struct srpt_rdma_ch *ch = cm_id->context;
2336 switch (event->event) {
2337 case IB_CM_REQ_RECEIVED:
2338 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
2339 event->private_data);
2341 case IB_CM_REJ_RECEIVED:
2342 srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
2343 event->private_data,
2344 IB_CM_REJ_PRIVATE_DATA_SIZE);
2346 case IB_CM_RTU_RECEIVED:
2347 case IB_CM_USER_ESTABLISHED:
2348 srpt_cm_rtu_recv(ch);
2350 case IB_CM_DREQ_RECEIVED:
2351 srpt_disconnect_ch(ch);
2353 case IB_CM_DREP_RECEIVED:
2354 pr_info("Received CM DREP message for ch %s-%d.\n",
2355 ch->sess_name, ch->qp->qp_num);
2358 case IB_CM_TIMEWAIT_EXIT:
2359 pr_info("Received CM TimeWait exit for ch %s-%d.\n",
2360 ch->sess_name, ch->qp->qp_num);
2363 case IB_CM_REP_ERROR:
2364 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2367 case IB_CM_DREQ_ERROR:
2368 pr_info("Received CM DREQ ERROR event.\n");
2370 case IB_CM_MRA_RECEIVED:
2371 pr_info("Received CM MRA event\n");
2374 pr_err("received unrecognized CM event %d\n", event->event);
2382 * srpt_perform_rdmas() - Perform IB RDMA.
2384 * Returns zero upon success or a negative number upon failure.
2386 static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2387 struct srpt_send_ioctx *ioctx)
2389 struct ib_send_wr *bad_wr;
2390 int sq_wr_avail, ret, i;
2391 enum dma_data_direction dir;
2392 const int n_rdma = ioctx->n_rdma;
2394 dir = ioctx->cmd.data_direction;
2395 if (dir == DMA_TO_DEVICE) {
2398 sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
2399 if (sq_wr_avail < 0) {
2400 pr_warn("IB send queue full (needed %d)\n",
2406 for (i = 0; i < n_rdma; i++) {
2407 struct ib_send_wr *wr = &ioctx->rdma_wrs[i].wr;
2409 wr->opcode = (dir == DMA_FROM_DEVICE) ?
2410 IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
2412 if (i == n_rdma - 1) {
2413 /* only get completion event for the last rdma read */
2414 if (dir == DMA_TO_DEVICE) {
2415 wr->send_flags = IB_SEND_SIGNALED;
2416 ioctx->rdma_cqe.done = srpt_rdma_read_done;
2418 ioctx->rdma_cqe.done = srpt_rdma_write_done;
2420 wr->wr_cqe = &ioctx->rdma_cqe;
2424 wr->next = &ioctx->rdma_wrs[i + 1].wr;
2428 ret = ib_post_send(ch->qp, &ioctx->rdma_wrs->wr, &bad_wr);
2430 pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n",
2431 __func__, __LINE__, ret, i, n_rdma);
2433 if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
2434 atomic_add(n_rdma, &ch->sq_wr_avail);
2439 * srpt_xfer_data() - Start data transfer from initiator to target.
2441 static int srpt_xfer_data(struct srpt_rdma_ch *ch,
2442 struct srpt_send_ioctx *ioctx)
2446 ret = srpt_map_sg_to_ib_sge(ch, ioctx);
2448 pr_err("%s[%d] ret=%d\n", __func__, __LINE__, ret);
2452 ret = srpt_perform_rdmas(ch, ioctx);
2454 if (ret == -EAGAIN || ret == -ENOMEM)
2455 pr_info("%s[%d] queue full -- ret=%d\n",
2456 __func__, __LINE__, ret);
2458 pr_err("%s[%d] fatal error -- ret=%d\n",
2459 __func__, __LINE__, ret);
2466 srpt_unmap_sg_to_ib_sge(ch, ioctx);
2470 static int srpt_write_pending_status(struct se_cmd *se_cmd)
2472 struct srpt_send_ioctx *ioctx;
2474 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2475 return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
2479 * srpt_write_pending() - Start data transfer from initiator to target (write).
2481 static int srpt_write_pending(struct se_cmd *se_cmd)
2483 struct srpt_send_ioctx *ioctx =
2484 container_of(se_cmd, struct srpt_send_ioctx, cmd);
2485 struct srpt_rdma_ch *ch = ioctx->ch;
2486 enum srpt_command_state new_state;
2488 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2489 WARN_ON(new_state == SRPT_STATE_DONE);
2490 return srpt_xfer_data(ch, ioctx);
2493 static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
2495 switch (tcm_mgmt_status) {
2496 case TMR_FUNCTION_COMPLETE:
2497 return SRP_TSK_MGMT_SUCCESS;
2498 case TMR_FUNCTION_REJECTED:
2499 return SRP_TSK_MGMT_FUNC_NOT_SUPP;
2501 return SRP_TSK_MGMT_FAILED;
2505 * srpt_queue_response() - Transmits the response to a SCSI command.
2507 * Callback function called by the TCM core. Must not block since it can be
2508 * invoked on the context of the IB completion handler.
2510 static void srpt_queue_response(struct se_cmd *cmd)
2512 struct srpt_rdma_ch *ch;
2513 struct srpt_send_ioctx *ioctx;
2514 enum srpt_command_state state;
2515 unsigned long flags;
2517 enum dma_data_direction dir;
2521 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2525 spin_lock_irqsave(&ioctx->spinlock, flags);
2526 state = ioctx->state;
2528 case SRPT_STATE_NEW:
2529 case SRPT_STATE_DATA_IN:
2530 ioctx->state = SRPT_STATE_CMD_RSP_SENT;
2532 case SRPT_STATE_MGMT:
2533 ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
2536 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
2537 ch, ioctx->ioctx.index, ioctx->state);
2540 spin_unlock_irqrestore(&ioctx->spinlock, flags);
2542 if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
2543 || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
2544 atomic_inc(&ch->req_lim_delta);
2545 srpt_abort_cmd(ioctx);
2549 dir = ioctx->cmd.data_direction;
2551 /* For read commands, transfer the data to the initiator. */
2552 if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
2553 !ioctx->queue_status_only) {
2554 ret = srpt_xfer_data(ch, ioctx);
2556 pr_err("xfer_data failed for tag %llu\n",
2562 if (state != SRPT_STATE_MGMT)
2563 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
2567 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
2568 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
2571 ret = srpt_post_send(ch, ioctx, resp_len);
2573 pr_err("sending cmd response failed for tag %llu\n",
2575 srpt_unmap_sg_to_ib_sge(ch, ioctx);
2576 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
2577 target_put_sess_cmd(&ioctx->cmd);
2581 static int srpt_queue_data_in(struct se_cmd *cmd)
2583 srpt_queue_response(cmd);
2587 static void srpt_queue_tm_rsp(struct se_cmd *cmd)
2589 srpt_queue_response(cmd);
2592 static void srpt_aborted_task(struct se_cmd *cmd)
2594 struct srpt_send_ioctx *ioctx = container_of(cmd,
2595 struct srpt_send_ioctx, cmd);
2597 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
2600 static int srpt_queue_status(struct se_cmd *cmd)
2602 struct srpt_send_ioctx *ioctx;
2604 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2605 BUG_ON(ioctx->sense_data != cmd->sense_buffer);
2606 if (cmd->se_cmd_flags &
2607 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
2608 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
2609 ioctx->queue_status_only = true;
2610 srpt_queue_response(cmd);
2614 static void srpt_refresh_port_work(struct work_struct *work)
2616 struct srpt_port *sport = container_of(work, struct srpt_port, work);
2618 srpt_refresh_port(sport);
2622 * srpt_release_sdev() - Free the channel resources associated with a target.
2624 static int srpt_release_sdev(struct srpt_device *sdev)
2628 WARN_ON_ONCE(irqs_disabled());
2632 mutex_lock(&sdev->mutex);
2633 for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
2634 sdev->port[i].enabled = false;
2635 __srpt_close_all_ch(sdev);
2636 mutex_unlock(&sdev->mutex);
2638 res = wait_event_interruptible(sdev->ch_releaseQ,
2639 list_empty_careful(&sdev->rch_list));
2641 pr_err("%s: interrupted.\n", __func__);
2646 static struct srpt_port *__srpt_lookup_port(const char *name)
2648 struct ib_device *dev;
2649 struct srpt_device *sdev;
2650 struct srpt_port *sport;
2653 list_for_each_entry(sdev, &srpt_dev_list, list) {
2658 for (i = 0; i < dev->phys_port_cnt; i++) {
2659 sport = &sdev->port[i];
2661 if (!strcmp(sport->port_guid, name))
2669 static struct srpt_port *srpt_lookup_port(const char *name)
2671 struct srpt_port *sport;
2673 spin_lock(&srpt_dev_lock);
2674 sport = __srpt_lookup_port(name);
2675 spin_unlock(&srpt_dev_lock);
2681 * srpt_add_one() - Infiniband device addition callback function.
2683 static void srpt_add_one(struct ib_device *device)
2685 struct srpt_device *sdev;
2686 struct srpt_port *sport;
2687 struct ib_srq_init_attr srq_attr;
2690 pr_debug("device = %p, device->dma_ops = %p\n", device,
2693 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
2697 sdev->device = device;
2698 INIT_LIST_HEAD(&sdev->rch_list);
2699 init_waitqueue_head(&sdev->ch_releaseQ);
2700 mutex_init(&sdev->mutex);
2702 sdev->pd = ib_alloc_pd(device);
2703 if (IS_ERR(sdev->pd))
2706 sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
2708 srq_attr.event_handler = srpt_srq_event;
2709 srq_attr.srq_context = (void *)sdev;
2710 srq_attr.attr.max_wr = sdev->srq_size;
2711 srq_attr.attr.max_sge = 1;
2712 srq_attr.attr.srq_limit = 0;
2713 srq_attr.srq_type = IB_SRQT_BASIC;
2715 sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
2716 if (IS_ERR(sdev->srq))
2719 pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
2720 __func__, sdev->srq_size, sdev->device->attrs.max_srq_wr,
2723 if (!srpt_service_guid)
2724 srpt_service_guid = be64_to_cpu(device->node_guid);
2726 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
2727 if (IS_ERR(sdev->cm_id))
2730 /* print out target login information */
2731 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
2732 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
2733 srpt_service_guid, srpt_service_guid);
2736 * We do not have a consistent service_id (ie. also id_ext of target_id)
2737 * to identify this target. We currently use the guid of the first HCA
2738 * in the system as service_id; therefore, the target_id will change
2739 * if this HCA is gone bad and replaced by different HCA
2741 if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0))
2744 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
2745 srpt_event_handler);
2746 if (ib_register_event_handler(&sdev->event_handler))
2749 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
2750 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
2751 sizeof(*sdev->ioctx_ring[0]),
2752 srp_max_req_size, DMA_FROM_DEVICE);
2753 if (!sdev->ioctx_ring)
2756 for (i = 0; i < sdev->srq_size; ++i)
2757 srpt_post_recv(sdev, sdev->ioctx_ring[i]);
2759 WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
2761 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
2762 sport = &sdev->port[i - 1];
2765 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
2766 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
2767 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
2768 INIT_WORK(&sport->work, srpt_refresh_port_work);
2770 if (srpt_refresh_port(sport)) {
2771 pr_err("MAD registration failed for %s-%d.\n",
2772 sdev->device->name, i);
2775 snprintf(sport->port_guid, sizeof(sport->port_guid),
2777 be64_to_cpu(sport->gid.global.subnet_prefix),
2778 be64_to_cpu(sport->gid.global.interface_id));
2781 spin_lock(&srpt_dev_lock);
2782 list_add_tail(&sdev->list, &srpt_dev_list);
2783 spin_unlock(&srpt_dev_lock);
2786 ib_set_client_data(device, &srpt_client, sdev);
2787 pr_debug("added %s.\n", device->name);
2791 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
2792 sdev->srq_size, srp_max_req_size,
2795 ib_unregister_event_handler(&sdev->event_handler);
2797 ib_destroy_cm_id(sdev->cm_id);
2799 ib_destroy_srq(sdev->srq);
2801 ib_dealloc_pd(sdev->pd);
2806 pr_info("%s(%s) failed.\n", __func__, device->name);
2811 * srpt_remove_one() - InfiniBand device removal callback function.
2813 static void srpt_remove_one(struct ib_device *device, void *client_data)
2815 struct srpt_device *sdev = client_data;
2819 pr_info("%s(%s): nothing to do.\n", __func__, device->name);
2823 srpt_unregister_mad_agent(sdev);
2825 ib_unregister_event_handler(&sdev->event_handler);
2827 /* Cancel any work queued by the just unregistered IB event handler. */
2828 for (i = 0; i < sdev->device->phys_port_cnt; i++)
2829 cancel_work_sync(&sdev->port[i].work);
2831 ib_destroy_cm_id(sdev->cm_id);
2834 * Unregistering a target must happen after destroying sdev->cm_id
2835 * such that no new SRP_LOGIN_REQ information units can arrive while
2836 * destroying the target.
2838 spin_lock(&srpt_dev_lock);
2839 list_del(&sdev->list);
2840 spin_unlock(&srpt_dev_lock);
2841 srpt_release_sdev(sdev);
2843 ib_destroy_srq(sdev->srq);
2844 ib_dealloc_pd(sdev->pd);
2846 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
2847 sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
2848 sdev->ioctx_ring = NULL;
2852 static struct ib_client srpt_client = {
2854 .add = srpt_add_one,
2855 .remove = srpt_remove_one
2858 static int srpt_check_true(struct se_portal_group *se_tpg)
2863 static int srpt_check_false(struct se_portal_group *se_tpg)
2868 static char *srpt_get_fabric_name(void)
2873 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
2875 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
2877 return sport->port_guid;
2880 static u16 srpt_get_tag(struct se_portal_group *tpg)
2885 static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
2890 static void srpt_release_cmd(struct se_cmd *se_cmd)
2892 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
2893 struct srpt_send_ioctx, cmd);
2894 struct srpt_rdma_ch *ch = ioctx->ch;
2895 unsigned long flags;
2897 WARN_ON(ioctx->state != SRPT_STATE_DONE);
2898 WARN_ON(ioctx->mapped_sg_count != 0);
2900 if (ioctx->n_rbuf > 1) {
2901 kfree(ioctx->rbufs);
2902 ioctx->rbufs = NULL;
2906 spin_lock_irqsave(&ch->spinlock, flags);
2907 list_add(&ioctx->free_list, &ch->free_list);
2908 spin_unlock_irqrestore(&ch->spinlock, flags);
2912 * srpt_close_session() - Forcibly close a session.
2914 * Callback function invoked by the TCM core to clean up sessions associated
2915 * with a node ACL when the user invokes
2916 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
2918 static void srpt_close_session(struct se_session *se_sess)
2920 DECLARE_COMPLETION_ONSTACK(release_done);
2921 struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
2922 struct srpt_device *sdev = ch->sport->sdev;
2925 pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
2928 mutex_lock(&sdev->mutex);
2929 BUG_ON(ch->release_done);
2930 ch->release_done = &release_done;
2931 wait = !list_empty(&ch->list);
2932 srpt_disconnect_ch(ch);
2933 mutex_unlock(&sdev->mutex);
2938 while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
2939 pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
2940 ch->sess_name, ch->qp->qp_num, ch->state);
2944 * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
2946 * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
2947 * This object represents an arbitrary integer used to uniquely identify a
2948 * particular attached remote initiator port to a particular SCSI target port
2949 * within a particular SCSI target device within a particular SCSI instance.
2951 static u32 srpt_sess_get_index(struct se_session *se_sess)
2956 static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
2960 /* Note: only used from inside debug printk's by the TCM core. */
2961 static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
2963 struct srpt_send_ioctx *ioctx;
2965 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2966 return srpt_get_cmd_state(ioctx);
2970 * srpt_parse_i_port_id() - Parse an initiator port ID.
2971 * @name: ASCII representation of a 128-bit initiator port ID.
2972 * @i_port_id: Binary 128-bit port ID.
2974 static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
2977 unsigned len, count, leading_zero_bytes;
2981 if (strncasecmp(p, "0x", 2) == 0)
2987 count = min(len / 2, 16U);
2988 leading_zero_bytes = 16 - count;
2989 memset(i_port_id, 0, leading_zero_bytes);
2990 rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
2992 pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
2999 * configfs callback function invoked for
3000 * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3002 static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
3006 if (srpt_parse_i_port_id(i_port_id, name) < 0) {
3007 pr_err("invalid initiator port ID %s\n", name);
3013 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
3016 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3017 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3019 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3022 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
3023 const char *page, size_t count)
3025 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3026 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3030 ret = kstrtoul(page, 0, &val);
3032 pr_err("kstrtoul() failed with ret: %d\n", ret);
3035 if (val > MAX_SRPT_RDMA_SIZE) {
3036 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3037 MAX_SRPT_RDMA_SIZE);
3040 if (val < DEFAULT_MAX_RDMA_SIZE) {
3041 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3042 val, DEFAULT_MAX_RDMA_SIZE);
3045 sport->port_attrib.srp_max_rdma_size = val;
3050 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
3053 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3054 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3056 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3059 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
3060 const char *page, size_t count)
3062 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3063 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3067 ret = kstrtoul(page, 0, &val);
3069 pr_err("kstrtoul() failed with ret: %d\n", ret);
3072 if (val > MAX_SRPT_RSP_SIZE) {
3073 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3077 if (val < MIN_MAX_RSP_SIZE) {
3078 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3082 sport->port_attrib.srp_max_rsp_size = val;
3087 static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
3090 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3091 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3093 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3096 static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
3097 const char *page, size_t count)
3099 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3100 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3104 ret = kstrtoul(page, 0, &val);
3106 pr_err("kstrtoul() failed with ret: %d\n", ret);
3109 if (val > MAX_SRPT_SRQ_SIZE) {
3110 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3114 if (val < MIN_SRPT_SRQ_SIZE) {
3115 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3119 sport->port_attrib.srp_sq_size = val;
3124 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
3125 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size);
3126 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size);
3128 static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3129 &srpt_tpg_attrib_attr_srp_max_rdma_size,
3130 &srpt_tpg_attrib_attr_srp_max_rsp_size,
3131 &srpt_tpg_attrib_attr_srp_sq_size,
3135 static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
3137 struct se_portal_group *se_tpg = to_tpg(item);
3138 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3140 return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
3143 static ssize_t srpt_tpg_enable_store(struct config_item *item,
3144 const char *page, size_t count)
3146 struct se_portal_group *se_tpg = to_tpg(item);
3147 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3148 struct srpt_device *sdev = sport->sdev;
3149 struct srpt_rdma_ch *ch;
3153 ret = kstrtoul(page, 0, &tmp);
3155 pr_err("Unable to extract srpt_tpg_store_enable\n");
3159 if ((tmp != 0) && (tmp != 1)) {
3160 pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
3163 if (sport->enabled == tmp)
3165 sport->enabled = tmp;
3169 mutex_lock(&sdev->mutex);
3170 list_for_each_entry(ch, &sdev->rch_list, list) {
3171 if (ch->sport == sport) {
3172 pr_debug("%s: ch %p %s-%d\n", __func__, ch,
3173 ch->sess_name, ch->qp->qp_num);
3174 srpt_disconnect_ch(ch);
3178 mutex_unlock(&sdev->mutex);
3184 CONFIGFS_ATTR(srpt_tpg_, enable);
3186 static struct configfs_attribute *srpt_tpg_attrs[] = {
3187 &srpt_tpg_attr_enable,
3192 * configfs callback invoked for
3193 * mkdir /sys/kernel/config/target/$driver/$port/$tpg
3195 static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3196 struct config_group *group,
3199 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3202 /* Initialize sport->port_wwn and sport->port_tpg_1 */
3203 res = core_tpg_register(&sport->port_wwn, &sport->port_tpg_1, SCSI_PROTOCOL_SRP);
3205 return ERR_PTR(res);
3207 return &sport->port_tpg_1;
3211 * configfs callback invoked for
3212 * rmdir /sys/kernel/config/target/$driver/$port/$tpg
3214 static void srpt_drop_tpg(struct se_portal_group *tpg)
3216 struct srpt_port *sport = container_of(tpg,
3217 struct srpt_port, port_tpg_1);
3219 sport->enabled = false;
3220 core_tpg_deregister(&sport->port_tpg_1);
3224 * configfs callback invoked for
3225 * mkdir /sys/kernel/config/target/$driver/$port
3227 static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3228 struct config_group *group,
3231 struct srpt_port *sport;
3234 sport = srpt_lookup_port(name);
3235 pr_debug("make_tport(%s)\n", name);
3240 return &sport->port_wwn;
3243 return ERR_PTR(ret);
3247 * configfs callback invoked for
3248 * rmdir /sys/kernel/config/target/$driver/$port
3250 static void srpt_drop_tport(struct se_wwn *wwn)
3252 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3254 pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
3257 static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
3259 return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3262 CONFIGFS_ATTR_RO(srpt_wwn_, version);
3264 static struct configfs_attribute *srpt_wwn_attrs[] = {
3265 &srpt_wwn_attr_version,
3269 static const struct target_core_fabric_ops srpt_template = {
3270 .module = THIS_MODULE,
3272 .get_fabric_name = srpt_get_fabric_name,
3273 .tpg_get_wwn = srpt_get_fabric_wwn,
3274 .tpg_get_tag = srpt_get_tag,
3275 .tpg_check_demo_mode = srpt_check_false,
3276 .tpg_check_demo_mode_cache = srpt_check_true,
3277 .tpg_check_demo_mode_write_protect = srpt_check_true,
3278 .tpg_check_prod_mode_write_protect = srpt_check_false,
3279 .tpg_get_inst_index = srpt_tpg_get_inst_index,
3280 .release_cmd = srpt_release_cmd,
3281 .check_stop_free = srpt_check_stop_free,
3282 .close_session = srpt_close_session,
3283 .sess_get_index = srpt_sess_get_index,
3284 .sess_get_initiator_sid = NULL,
3285 .write_pending = srpt_write_pending,
3286 .write_pending_status = srpt_write_pending_status,
3287 .set_default_node_attributes = srpt_set_default_node_attrs,
3288 .get_cmd_state = srpt_get_tcm_cmd_state,
3289 .queue_data_in = srpt_queue_data_in,
3290 .queue_status = srpt_queue_status,
3291 .queue_tm_rsp = srpt_queue_tm_rsp,
3292 .aborted_task = srpt_aborted_task,
3294 * Setup function pointers for generic logic in
3295 * target_core_fabric_configfs.c
3297 .fabric_make_wwn = srpt_make_tport,
3298 .fabric_drop_wwn = srpt_drop_tport,
3299 .fabric_make_tpg = srpt_make_tpg,
3300 .fabric_drop_tpg = srpt_drop_tpg,
3301 .fabric_init_nodeacl = srpt_init_nodeacl,
3303 .tfc_wwn_attrs = srpt_wwn_attrs,
3304 .tfc_tpg_base_attrs = srpt_tpg_attrs,
3305 .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
3309 * srpt_init_module() - Kernel module initialization.
3311 * Note: Since ib_register_client() registers callback functions, and since at
3312 * least one of these callback functions (srpt_add_one()) calls target core
3313 * functions, this driver must be registered with the target core before
3314 * ib_register_client() is called.
3316 static int __init srpt_init_module(void)
3321 if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3322 pr_err("invalid value %d for kernel module parameter"
3323 " srp_max_req_size -- must be at least %d.\n",
3324 srp_max_req_size, MIN_MAX_REQ_SIZE);
3328 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
3329 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
3330 pr_err("invalid value %d for kernel module parameter"
3331 " srpt_srq_size -- must be in the range [%d..%d].\n",
3332 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
3336 ret = target_register_template(&srpt_template);
3340 ret = ib_register_client(&srpt_client);
3342 pr_err("couldn't register IB client\n");
3343 goto out_unregister_target;
3348 out_unregister_target:
3349 target_unregister_template(&srpt_template);
3354 static void __exit srpt_cleanup_module(void)
3356 ib_unregister_client(&srpt_client);
3357 target_unregister_template(&srpt_template);
3360 module_init(srpt_init_module);
3361 module_exit(srpt_cleanup_module);