2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
44 #include <linux/atomic.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
49 #include <scsi/scsi_tcq.h>
51 #include <scsi/scsi_transport_srp.h>
55 #define DRV_NAME "ib_srp"
56 #define PFX DRV_NAME ": "
57 #define DRV_VERSION "1.0"
58 #define DRV_RELDATE "July 1, 2013"
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63 MODULE_LICENSE("Dual BSD/GPL");
65 static unsigned int srp_sg_tablesize;
66 static unsigned int cmd_sg_entries;
67 static unsigned int indirect_sg_entries;
68 static bool allow_ext_sg;
69 static bool prefer_fr;
70 static bool register_always;
71 static int topspin_workarounds = 1;
73 module_param(srp_sg_tablesize, uint, 0444);
74 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
76 module_param(cmd_sg_entries, uint, 0444);
77 MODULE_PARM_DESC(cmd_sg_entries,
78 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
80 module_param(indirect_sg_entries, uint, 0444);
81 MODULE_PARM_DESC(indirect_sg_entries,
82 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
84 module_param(allow_ext_sg, bool, 0444);
85 MODULE_PARM_DESC(allow_ext_sg,
86 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
88 module_param(topspin_workarounds, int, 0444);
89 MODULE_PARM_DESC(topspin_workarounds,
90 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
92 module_param(prefer_fr, bool, 0444);
93 MODULE_PARM_DESC(prefer_fr,
94 "Whether to use fast registration if both FMR and fast registration are supported");
96 module_param(register_always, bool, 0444);
97 MODULE_PARM_DESC(register_always,
98 "Use memory registration even for contiguous memory regions");
100 static struct kernel_param_ops srp_tmo_ops;
102 static int srp_reconnect_delay = 10;
103 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
105 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
107 static int srp_fast_io_fail_tmo = 15;
108 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
110 MODULE_PARM_DESC(fast_io_fail_tmo,
111 "Number of seconds between the observation of a transport"
112 " layer error and failing all I/O. \"off\" means that this"
113 " functionality is disabled.");
115 static int srp_dev_loss_tmo = 600;
116 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
118 MODULE_PARM_DESC(dev_loss_tmo,
119 "Maximum number of seconds that the SRP transport should"
120 " insulate transport layer errors. After this time has been"
121 " exceeded the SCSI host is removed. Should be"
122 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
123 " if fast_io_fail_tmo has not been set. \"off\" means that"
124 " this functionality is disabled.");
126 static unsigned ch_count;
127 module_param(ch_count, uint, 0444);
128 MODULE_PARM_DESC(ch_count,
129 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
131 static void srp_add_one(struct ib_device *device);
132 static void srp_remove_one(struct ib_device *device);
133 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
134 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
135 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
137 static struct scsi_transport_template *ib_srp_transport_template;
138 static struct workqueue_struct *srp_remove_wq;
140 static struct ib_client srp_client = {
143 .remove = srp_remove_one
146 static struct ib_sa_client srp_sa_client;
148 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
150 int tmo = *(int *)kp->arg;
153 return sprintf(buffer, "%d", tmo);
155 return sprintf(buffer, "off");
158 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
162 if (strncmp(val, "off", 3) != 0) {
163 res = kstrtoint(val, 0, &tmo);
169 if (kp->arg == &srp_reconnect_delay)
170 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
172 else if (kp->arg == &srp_fast_io_fail_tmo)
173 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
175 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
179 *(int *)kp->arg = tmo;
185 static struct kernel_param_ops srp_tmo_ops = {
190 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
192 return (struct srp_target_port *) host->hostdata;
195 static const char *srp_target_info(struct Scsi_Host *host)
197 return host_to_target(host)->target_name;
200 static int srp_target_is_topspin(struct srp_target_port *target)
202 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
203 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
205 return topspin_workarounds &&
206 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
210 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
212 enum dma_data_direction direction)
216 iu = kmalloc(sizeof *iu, gfp_mask);
220 iu->buf = kzalloc(size, gfp_mask);
224 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
226 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
230 iu->direction = direction;
242 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
247 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
253 static void srp_qp_event(struct ib_event *event, void *context)
255 pr_debug("QP event %d\n", event->event);
258 static int srp_init_qp(struct srp_target_port *target,
261 struct ib_qp_attr *attr;
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
268 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
280 ret = ib_modify_qp(qp, attr,
291 static int srp_new_cm_id(struct srp_rdma_ch *ch)
293 struct srp_target_port *target = ch->target;
294 struct ib_cm_id *new_cm_id;
296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
312 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
334 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
337 struct srp_fr_desc *d;
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
344 ib_free_fast_reg_page_list(d->frpl);
352 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353 * @device: IB device to allocate fast registration descriptors for.
354 * @pd: Protection domain associated with the FR descriptors.
355 * @pool_size: Number of descriptors to allocate.
356 * @max_page_list_len: Maximum fast registration work request page list length.
358 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359 struct ib_pd *pd, int pool_size,
360 int max_page_list_len)
362 struct srp_fr_pool *pool;
363 struct srp_fr_desc *d;
365 struct ib_fast_reg_page_list *frpl;
366 int i, ret = -EINVAL;
371 pool = kzalloc(sizeof(struct srp_fr_pool) +
372 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
375 pool->size = pool_size;
376 pool->max_page_list_len = max_page_list_len;
377 spin_lock_init(&pool->lock);
378 INIT_LIST_HEAD(&pool->free_list);
380 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
381 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
387 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
393 list_add_tail(&d->entry, &pool->free_list);
400 srp_destroy_fr_pool(pool);
408 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
409 * @pool: Pool to obtain descriptor from.
411 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
413 struct srp_fr_desc *d = NULL;
416 spin_lock_irqsave(&pool->lock, flags);
417 if (!list_empty(&pool->free_list)) {
418 d = list_first_entry(&pool->free_list, typeof(*d), entry);
421 spin_unlock_irqrestore(&pool->lock, flags);
427 * srp_fr_pool_put() - put an FR descriptor back in the free list
428 * @pool: Pool the descriptor was allocated from.
429 * @desc: Pointer to an array of fast registration descriptor pointers.
430 * @n: Number of descriptors to put back.
432 * Note: The caller must already have queued an invalidation request for
433 * desc->mr->rkey before calling this function.
435 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
441 spin_lock_irqsave(&pool->lock, flags);
442 for (i = 0; i < n; i++)
443 list_add(&desc[i]->entry, &pool->free_list);
444 spin_unlock_irqrestore(&pool->lock, flags);
447 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
449 struct srp_device *dev = target->srp_host->srp_dev;
451 return srp_create_fr_pool(dev->dev, dev->pd,
452 target->scsi_host->can_queue,
453 dev->max_pages_per_mr);
457 * srp_destroy_qp() - destroy an RDMA queue pair
458 * @ch: SRP RDMA channel.
460 * Change a queue pair into the error state and wait until all receive
461 * completions have been processed before destroying it. This avoids that
462 * the receive completion handler can access the queue pair while it is
465 static void srp_destroy_qp(struct srp_rdma_ch *ch)
467 struct srp_target_port *target = ch->target;
468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
474 WARN_ON_ONCE(target->connected);
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
485 wait_for_completion(&ch->done);
488 ib_destroy_qp(ch->qp);
491 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
493 struct srp_target_port *target = ch->target;
494 struct srp_device *dev = target->srp_host->srp_dev;
495 struct ib_qp_init_attr *init_attr;
496 struct ib_cq *recv_cq, *send_cq;
498 struct ib_fmr_pool *fmr_pool = NULL;
499 struct srp_fr_pool *fr_pool = NULL;
500 const int m = 1 + dev->use_fast_reg;
503 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
507 /* + 1 for SRP_LAST_WR_ID */
508 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
509 target->queue_size + 1, ch->comp_vector);
510 if (IS_ERR(recv_cq)) {
511 ret = PTR_ERR(recv_cq);
515 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
516 m * target->queue_size, ch->comp_vector);
517 if (IS_ERR(send_cq)) {
518 ret = PTR_ERR(send_cq);
522 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
524 init_attr->event_handler = srp_qp_event;
525 init_attr->cap.max_send_wr = m * target->queue_size;
526 init_attr->cap.max_recv_wr = target->queue_size + 1;
527 init_attr->cap.max_recv_sge = 1;
528 init_attr->cap.max_send_sge = 1;
529 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
530 init_attr->qp_type = IB_QPT_RC;
531 init_attr->send_cq = send_cq;
532 init_attr->recv_cq = recv_cq;
534 qp = ib_create_qp(dev->pd, init_attr);
540 ret = srp_init_qp(target, qp);
544 if (dev->use_fast_reg && dev->has_fr) {
545 fr_pool = srp_alloc_fr_pool(target);
546 if (IS_ERR(fr_pool)) {
547 ret = PTR_ERR(fr_pool);
548 shost_printk(KERN_WARNING, target->scsi_host, PFX
549 "FR pool allocation failed (%d)\n", ret);
553 srp_destroy_fr_pool(ch->fr_pool);
554 ch->fr_pool = fr_pool;
555 } else if (!dev->use_fast_reg && dev->has_fmr) {
556 fmr_pool = srp_alloc_fmr_pool(target);
557 if (IS_ERR(fmr_pool)) {
558 ret = PTR_ERR(fmr_pool);
559 shost_printk(KERN_WARNING, target->scsi_host, PFX
560 "FMR pool allocation failed (%d)\n", ret);
564 ib_destroy_fmr_pool(ch->fmr_pool);
565 ch->fmr_pool = fmr_pool;
571 ib_destroy_cq(ch->recv_cq);
573 ib_destroy_cq(ch->send_cq);
576 ch->recv_cq = recv_cq;
577 ch->send_cq = send_cq;
586 ib_destroy_cq(send_cq);
589 ib_destroy_cq(recv_cq);
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
598 * invoked. Hence the ch->[rt]x_ring checks.
600 static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
603 struct srp_device *dev = target->srp_host->srp_dev;
610 ib_destroy_cm_id(ch->cm_id);
614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
618 if (dev->use_fast_reg) {
620 srp_destroy_fr_pool(ch->fr_pool);
623 ib_destroy_fmr_pool(ch->fmr_pool);
626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
638 ch->send_cq = ch->recv_cq = NULL;
641 for (i = 0; i < target->queue_size; ++i)
642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
647 for (i = 0; i < target->queue_size; ++i)
648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
654 static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
670 static int srp_lookup_path(struct srp_rdma_ch *ch)
672 struct srp_target_port *target = ch->target;
675 ch->path.numb_path = 1;
677 init_completion(&ch->done);
679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
680 target->srp_host->srp_dev->dev,
681 target->srp_host->port,
683 IB_SA_PATH_REC_SERVICE_ID |
684 IB_SA_PATH_REC_DGID |
685 IB_SA_PATH_REC_SGID |
686 IB_SA_PATH_REC_NUMB_PATH |
688 SRP_PATH_REC_TIMEOUT_MS,
690 srp_path_rec_completion,
691 ch, &ch->path_query);
692 if (ch->path_query_id < 0)
693 return ch->path_query_id;
695 ret = wait_for_completion_interruptible(&ch->done);
700 shost_printk(KERN_WARNING, target->scsi_host,
701 PFX "Path record query failed\n");
706 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
708 struct srp_target_port *target = ch->target;
710 struct ib_cm_req_param param;
711 struct srp_login_req priv;
715 req = kzalloc(sizeof *req, GFP_KERNEL);
719 req->param.primary_path = &ch->path;
720 req->param.alternate_path = NULL;
721 req->param.service_id = target->service_id;
722 req->param.qp_num = ch->qp->qp_num;
723 req->param.qp_type = ch->qp->qp_type;
724 req->param.private_data = &req->priv;
725 req->param.private_data_len = sizeof req->priv;
726 req->param.flow_control = 1;
728 get_random_bytes(&req->param.starting_psn, 4);
729 req->param.starting_psn &= 0xffffff;
732 * Pick some arbitrary defaults here; we could make these
733 * module parameters if anyone cared about setting them.
735 req->param.responder_resources = 4;
736 req->param.remote_cm_response_timeout = 20;
737 req->param.local_cm_response_timeout = 20;
738 req->param.retry_count = target->tl_retry_count;
739 req->param.rnr_retry_count = 7;
740 req->param.max_cm_retries = 15;
742 req->priv.opcode = SRP_LOGIN_REQ;
744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
746 SRP_BUF_FORMAT_INDIRECT);
747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
750 * In the published SRP specification (draft rev. 16a), the
751 * port identifier format is 8 bytes of ID extension followed
752 * by 8 bytes of GUID. Older drafts put the two halves in the
753 * opposite order, so that the GUID comes first.
755 * Targets conforming to these obsolete drafts can be
756 * recognized by the I/O Class they report.
758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
759 memcpy(req->priv.initiator_port_id,
760 &target->sgid.global.interface_id, 8);
761 memcpy(req->priv.initiator_port_id + 8,
762 &target->initiator_ext, 8);
763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
766 memcpy(req->priv.initiator_port_id,
767 &target->initiator_ext, 8);
768 memcpy(req->priv.initiator_port_id + 8,
769 &target->sgid.global.interface_id, 8);
770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
775 * Topspin/Cisco SRP targets will reject our login unless we
776 * zero out the first 8 bytes of our initiator port ID and set
777 * the second 8 bytes to the local node GUID.
779 if (srp_target_is_topspin(target)) {
780 shost_printk(KERN_DEBUG, target->scsi_host,
781 PFX "Topspin/Cisco initiator port ID workaround "
782 "activated for target GUID %016llx\n",
783 (unsigned long long) be64_to_cpu(target->ioc_guid));
784 memset(req->priv.initiator_port_id, 0, 8);
785 memcpy(req->priv.initiator_port_id + 8,
786 &target->srp_host->srp_dev->dev->node_guid, 8);
789 status = ib_send_cm_req(ch->cm_id, &req->param);
796 static bool srp_queue_remove_work(struct srp_target_port *target)
798 bool changed = false;
800 spin_lock_irq(&target->lock);
801 if (target->state != SRP_TARGET_REMOVED) {
802 target->state = SRP_TARGET_REMOVED;
805 spin_unlock_irq(&target->lock);
808 queue_work(srp_remove_wq, &target->remove_work);
813 static bool srp_change_conn_state(struct srp_target_port *target,
816 bool changed = false;
818 spin_lock_irq(&target->lock);
819 if (target->connected != connected) {
820 target->connected = connected;
823 spin_unlock_irq(&target->lock);
828 static void srp_disconnect_target(struct srp_target_port *target)
830 struct srp_rdma_ch *ch;
833 if (srp_change_conn_state(target, false)) {
834 /* XXX should send SRP_I_LOGOUT request */
836 for (i = 0; i < target->ch_count; i++) {
838 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
839 shost_printk(KERN_DEBUG, target->scsi_host,
840 PFX "Sending CM DREQ failed\n");
846 static void srp_free_req_data(struct srp_target_port *target,
847 struct srp_rdma_ch *ch)
849 struct srp_device *dev = target->srp_host->srp_dev;
850 struct ib_device *ibdev = dev->dev;
851 struct srp_request *req;
854 if (!ch->target || !ch->req_ring)
857 for (i = 0; i < target->req_ring_size; ++i) {
858 req = &ch->req_ring[i];
859 if (dev->use_fast_reg)
862 kfree(req->fmr_list);
863 kfree(req->map_page);
864 if (req->indirect_dma_addr) {
865 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
866 target->indirect_size,
869 kfree(req->indirect_desc);
876 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
878 struct srp_target_port *target = ch->target;
879 struct srp_device *srp_dev = target->srp_host->srp_dev;
880 struct ib_device *ibdev = srp_dev->dev;
881 struct srp_request *req;
884 int i, ret = -ENOMEM;
886 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
891 for (i = 0; i < target->req_ring_size; ++i) {
892 req = &ch->req_ring[i];
893 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
897 if (srp_dev->use_fast_reg)
898 req->fr_list = mr_list;
900 req->fmr_list = mr_list;
901 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
902 sizeof(void *), GFP_KERNEL);
905 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
906 if (!req->indirect_desc)
909 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
910 target->indirect_size,
912 if (ib_dma_mapping_error(ibdev, dma_addr))
915 req->indirect_dma_addr = dma_addr;
924 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
925 * @shost: SCSI host whose attributes to remove from sysfs.
927 * Note: Any attributes defined in the host template and that did not exist
928 * before invocation of this function will be ignored.
930 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
932 struct device_attribute **attr;
934 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
935 device_remove_file(&shost->shost_dev, *attr);
938 static void srp_remove_target(struct srp_target_port *target)
940 struct srp_rdma_ch *ch;
943 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
945 srp_del_scsi_host_attr(target->scsi_host);
946 srp_rport_get(target->rport);
947 srp_remove_host(target->scsi_host);
948 scsi_remove_host(target->scsi_host);
949 srp_stop_rport_timers(target->rport);
950 srp_disconnect_target(target);
951 for (i = 0; i < target->ch_count; i++) {
953 srp_free_ch_ib(target, ch);
955 cancel_work_sync(&target->tl_err_work);
956 srp_rport_put(target->rport);
957 for (i = 0; i < target->ch_count; i++) {
959 srp_free_req_data(target, ch);
964 spin_lock(&target->srp_host->target_lock);
965 list_del(&target->list);
966 spin_unlock(&target->srp_host->target_lock);
968 scsi_host_put(target->scsi_host);
971 static void srp_remove_work(struct work_struct *work)
973 struct srp_target_port *target =
974 container_of(work, struct srp_target_port, remove_work);
976 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
978 srp_remove_target(target);
981 static void srp_rport_delete(struct srp_rport *rport)
983 struct srp_target_port *target = rport->lld_data;
985 srp_queue_remove_work(target);
988 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
990 struct srp_target_port *target = ch->target;
993 WARN_ON_ONCE(!multich && target->connected);
995 target->qp_in_error = false;
997 ret = srp_lookup_path(ch);
1002 init_completion(&ch->done);
1003 ret = srp_send_req(ch, multich);
1006 ret = wait_for_completion_interruptible(&ch->done);
1011 * The CM event handling code will set status to
1012 * SRP_PORT_REDIRECT if we get a port redirect REJ
1013 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1014 * redirect REJ back.
1016 switch (ch->status) {
1018 srp_change_conn_state(target, true);
1021 case SRP_PORT_REDIRECT:
1022 ret = srp_lookup_path(ch);
1027 case SRP_DLID_REDIRECT:
1030 case SRP_STALE_CONN:
1031 shost_printk(KERN_ERR, target->scsi_host, PFX
1032 "giving up on stale connection\n");
1033 ch->status = -ECONNRESET;
1042 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
1044 struct ib_send_wr *bad_wr;
1045 struct ib_send_wr wr = {
1046 .opcode = IB_WR_LOCAL_INV,
1047 .wr_id = LOCAL_INV_WR_ID_MASK,
1051 .ex.invalidate_rkey = rkey,
1054 return ib_post_send(ch->qp, &wr, &bad_wr);
1057 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1058 struct srp_rdma_ch *ch,
1059 struct srp_request *req)
1061 struct srp_target_port *target = ch->target;
1062 struct srp_device *dev = target->srp_host->srp_dev;
1063 struct ib_device *ibdev = dev->dev;
1066 if (!scsi_sglist(scmnd) ||
1067 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1068 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1071 if (dev->use_fast_reg) {
1072 struct srp_fr_desc **pfr;
1074 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1075 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
1077 shost_printk(KERN_ERR, target->scsi_host, PFX
1078 "Queueing INV WR for rkey %#x failed (%d)\n",
1079 (*pfr)->mr->rkey, res);
1080 queue_work(system_long_wq,
1081 &target->tl_err_work);
1085 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1088 struct ib_pool_fmr **pfmr;
1090 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1091 ib_fmr_pool_unmap(*pfmr);
1094 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1095 scmnd->sc_data_direction);
1099 * srp_claim_req - Take ownership of the scmnd associated with a request.
1100 * @ch: SRP RDMA channel.
1101 * @req: SRP request.
1102 * @sdev: If not NULL, only take ownership for this SCSI device.
1103 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1104 * ownership of @req->scmnd if it equals @scmnd.
1107 * Either NULL or a pointer to the SCSI command the caller became owner of.
1109 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1110 struct srp_request *req,
1111 struct scsi_device *sdev,
1112 struct scsi_cmnd *scmnd)
1114 unsigned long flags;
1116 spin_lock_irqsave(&ch->lock, flags);
1118 (!sdev || req->scmnd->device == sdev) &&
1119 (!scmnd || req->scmnd == scmnd)) {
1125 spin_unlock_irqrestore(&ch->lock, flags);
1131 * srp_free_req() - Unmap data and add request to the free request list.
1132 * @ch: SRP RDMA channel.
1133 * @req: Request to be freed.
1134 * @scmnd: SCSI command associated with @req.
1135 * @req_lim_delta: Amount to be added to @target->req_lim.
1137 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1138 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1140 unsigned long flags;
1142 srp_unmap_data(scmnd, ch, req);
1144 spin_lock_irqsave(&ch->lock, flags);
1145 ch->req_lim += req_lim_delta;
1146 spin_unlock_irqrestore(&ch->lock, flags);
1149 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1150 struct scsi_device *sdev, int result)
1152 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1155 srp_free_req(ch, req, scmnd, 0);
1156 scmnd->result = result;
1157 scmnd->scsi_done(scmnd);
1161 static void srp_terminate_io(struct srp_rport *rport)
1163 struct srp_target_port *target = rport->lld_data;
1164 struct srp_rdma_ch *ch;
1165 struct Scsi_Host *shost = target->scsi_host;
1166 struct scsi_device *sdev;
1170 * Invoking srp_terminate_io() while srp_queuecommand() is running
1171 * is not safe. Hence the warning statement below.
1173 shost_for_each_device(sdev, shost)
1174 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1176 for (i = 0; i < target->ch_count; i++) {
1177 ch = &target->ch[i];
1179 for (j = 0; j < target->req_ring_size; ++j) {
1180 struct srp_request *req = &ch->req_ring[j];
1182 srp_finish_req(ch, req, NULL,
1183 DID_TRANSPORT_FAILFAST << 16);
1189 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1190 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1191 * srp_reset_device() or srp_reset_host() calls will occur while this function
1192 * is in progress. One way to realize that is not to call this function
1193 * directly but to call srp_reconnect_rport() instead since that last function
1194 * serializes calls of this function via rport->mutex and also blocks
1195 * srp_queuecommand() calls before invoking this function.
1197 static int srp_rport_reconnect(struct srp_rport *rport)
1199 struct srp_target_port *target = rport->lld_data;
1200 struct srp_rdma_ch *ch;
1202 bool multich = false;
1204 srp_disconnect_target(target);
1206 if (target->state == SRP_TARGET_SCANNING)
1210 * Now get a new local CM ID so that we avoid confusing the target in
1211 * case things are really fouled up. Doing so also ensures that all CM
1212 * callbacks will have finished before a new QP is allocated.
1214 for (i = 0; i < target->ch_count; i++) {
1215 ch = &target->ch[i];
1218 ret += srp_new_cm_id(ch);
1220 for (i = 0; i < target->ch_count; i++) {
1221 ch = &target->ch[i];
1224 for (j = 0; j < target->req_ring_size; ++j) {
1225 struct srp_request *req = &ch->req_ring[j];
1227 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1230 for (i = 0; i < target->ch_count; i++) {
1231 ch = &target->ch[i];
1235 * Whether or not creating a new CM ID succeeded, create a new
1236 * QP. This guarantees that all completion callback function
1237 * invocations have finished before request resetting starts.
1239 ret += srp_create_ch_ib(ch);
1241 INIT_LIST_HEAD(&ch->free_tx);
1242 for (j = 0; j < target->queue_size; ++j)
1243 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1245 for (i = 0; i < target->ch_count; i++) {
1246 ch = &target->ch[i];
1247 if (ret || !ch->target) {
1252 ret = srp_connect_ch(ch, multich);
1257 shost_printk(KERN_INFO, target->scsi_host,
1258 PFX "reconnect succeeded\n");
1263 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1264 unsigned int dma_len, u32 rkey)
1266 struct srp_direct_buf *desc = state->desc;
1268 desc->va = cpu_to_be64(dma_addr);
1269 desc->key = cpu_to_be32(rkey);
1270 desc->len = cpu_to_be32(dma_len);
1272 state->total_len += dma_len;
1277 static int srp_map_finish_fmr(struct srp_map_state *state,
1278 struct srp_rdma_ch *ch)
1280 struct ib_pool_fmr *fmr;
1283 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1284 state->npages, io_addr);
1286 return PTR_ERR(fmr);
1288 *state->next_fmr++ = fmr;
1291 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
1296 static int srp_map_finish_fr(struct srp_map_state *state,
1297 struct srp_rdma_ch *ch)
1299 struct srp_target_port *target = ch->target;
1300 struct srp_device *dev = target->srp_host->srp_dev;
1301 struct ib_send_wr *bad_wr;
1302 struct ib_send_wr wr;
1303 struct srp_fr_desc *desc;
1306 desc = srp_fr_pool_get(ch->fr_pool);
1310 rkey = ib_inc_rkey(desc->mr->rkey);
1311 ib_update_fast_reg_key(desc->mr, rkey);
1313 memcpy(desc->frpl->page_list, state->pages,
1314 sizeof(state->pages[0]) * state->npages);
1316 memset(&wr, 0, sizeof(wr));
1317 wr.opcode = IB_WR_FAST_REG_MR;
1318 wr.wr_id = FAST_REG_WR_ID_MASK;
1319 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1320 wr.wr.fast_reg.page_list = desc->frpl;
1321 wr.wr.fast_reg.page_list_len = state->npages;
1322 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1323 wr.wr.fast_reg.length = state->dma_len;
1324 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1325 IB_ACCESS_REMOTE_READ |
1326 IB_ACCESS_REMOTE_WRITE);
1327 wr.wr.fast_reg.rkey = desc->mr->lkey;
1329 *state->next_fr++ = desc;
1332 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1335 return ib_post_send(ch->qp, &wr, &bad_wr);
1338 static int srp_finish_mapping(struct srp_map_state *state,
1339 struct srp_rdma_ch *ch)
1341 struct srp_target_port *target = ch->target;
1344 if (state->npages == 0)
1347 if (state->npages == 1 && !register_always)
1348 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1351 ret = target->srp_host->srp_dev->use_fast_reg ?
1352 srp_map_finish_fr(state, ch) :
1353 srp_map_finish_fmr(state, ch);
1363 static void srp_map_update_start(struct srp_map_state *state,
1364 struct scatterlist *sg, int sg_index,
1365 dma_addr_t dma_addr)
1367 state->unmapped_sg = sg;
1368 state->unmapped_index = sg_index;
1369 state->unmapped_addr = dma_addr;
1372 static int srp_map_sg_entry(struct srp_map_state *state,
1373 struct srp_rdma_ch *ch,
1374 struct scatterlist *sg, int sg_index,
1377 struct srp_target_port *target = ch->target;
1378 struct srp_device *dev = target->srp_host->srp_dev;
1379 struct ib_device *ibdev = dev->dev;
1380 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1381 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1390 * Once we're in direct map mode for a request, we don't
1391 * go back to FMR or FR mode, so no need to update anything
1392 * other than the descriptor.
1394 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1399 * Since not all RDMA HW drivers support non-zero page offsets for
1400 * FMR, if we start at an offset into a page, don't merge into the
1401 * current FMR mapping. Finish it out, and use the kernel's MR for
1404 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1405 dma_len > dev->mr_max_size) {
1406 ret = srp_finish_mapping(state, ch);
1410 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1411 srp_map_update_start(state, NULL, 0, 0);
1416 * If this is the first sg that will be mapped via FMR or via FR, save
1417 * our position. We need to know the first unmapped entry, its index,
1418 * and the first unmapped address within that entry to be able to
1419 * restart mapping after an error.
1421 if (!state->unmapped_sg)
1422 srp_map_update_start(state, sg, sg_index, dma_addr);
1425 unsigned offset = dma_addr & ~dev->mr_page_mask;
1426 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1427 ret = srp_finish_mapping(state, ch);
1431 srp_map_update_start(state, sg, sg_index, dma_addr);
1434 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1437 state->base_dma_addr = dma_addr;
1438 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1439 state->dma_len += len;
1445 * If the last entry of the MR wasn't a full page, then we need to
1446 * close it out and start a new one -- we can only merge at page
1450 if (len != dev->mr_page_size) {
1451 ret = srp_finish_mapping(state, ch);
1453 srp_map_update_start(state, NULL, 0, 0);
1458 static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1459 struct srp_request *req, struct scatterlist *scat,
1462 struct srp_target_port *target = ch->target;
1463 struct srp_device *dev = target->srp_host->srp_dev;
1464 struct ib_device *ibdev = dev->dev;
1465 struct scatterlist *sg;
1469 state->desc = req->indirect_desc;
1470 state->pages = req->map_page;
1471 if (dev->use_fast_reg) {
1472 state->next_fr = req->fr_list;
1473 use_mr = !!ch->fr_pool;
1475 state->next_fmr = req->fmr_list;
1476 use_mr = !!ch->fmr_pool;
1479 for_each_sg(scat, sg, count, i) {
1480 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
1482 * Memory registration failed, so backtrack to the
1483 * first unmapped entry and continue on without using
1484 * memory registration.
1486 dma_addr_t dma_addr;
1487 unsigned int dma_len;
1490 sg = state->unmapped_sg;
1491 i = state->unmapped_index;
1493 dma_addr = ib_sg_dma_address(ibdev, sg);
1494 dma_len = ib_sg_dma_len(ibdev, sg);
1495 dma_len -= (state->unmapped_addr - dma_addr);
1496 dma_addr = state->unmapped_addr;
1498 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1502 if (use_mr && srp_finish_mapping(state, ch))
1505 req->nmdesc = state->nmdesc;
1510 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1511 struct srp_request *req)
1513 struct srp_target_port *target = ch->target;
1514 struct scatterlist *scat;
1515 struct srp_cmd *cmd = req->cmd->buf;
1516 int len, nents, count;
1517 struct srp_device *dev;
1518 struct ib_device *ibdev;
1519 struct srp_map_state state;
1520 struct srp_indirect_buf *indirect_hdr;
1524 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1525 return sizeof (struct srp_cmd);
1527 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1528 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1529 shost_printk(KERN_WARNING, target->scsi_host,
1530 PFX "Unhandled data direction %d\n",
1531 scmnd->sc_data_direction);
1535 nents = scsi_sg_count(scmnd);
1536 scat = scsi_sglist(scmnd);
1538 dev = target->srp_host->srp_dev;
1541 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1542 if (unlikely(count == 0))
1545 fmt = SRP_DATA_DESC_DIRECT;
1546 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1548 if (count == 1 && !register_always) {
1550 * The midlayer only generated a single gather/scatter
1551 * entry, or DMA mapping coalesced everything to a
1552 * single entry. So a direct descriptor along with
1553 * the DMA MR suffices.
1555 struct srp_direct_buf *buf = (void *) cmd->add_data;
1557 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1558 buf->key = cpu_to_be32(target->rkey);
1559 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1566 * We have more than one scatter/gather entry, so build our indirect
1567 * descriptor table, trying to merge as many entries as we can.
1569 indirect_hdr = (void *) cmd->add_data;
1571 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1572 target->indirect_size, DMA_TO_DEVICE);
1574 memset(&state, 0, sizeof(state));
1575 srp_map_sg(&state, ch, req, scat, count);
1577 /* We've mapped the request, now pull as much of the indirect
1578 * descriptor table as we can into the command buffer. If this
1579 * target is not using an external indirect table, we are
1580 * guaranteed to fit into the command, as the SCSI layer won't
1581 * give us more S/G entries than we allow.
1583 if (state.ndesc == 1) {
1585 * Memory registration collapsed the sg-list into one entry,
1586 * so use a direct descriptor.
1588 struct srp_direct_buf *buf = (void *) cmd->add_data;
1590 *buf = req->indirect_desc[0];
1594 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1595 !target->allow_ext_sg)) {
1596 shost_printk(KERN_ERR, target->scsi_host,
1597 "Could not fit S/G list into SRP_CMD\n");
1601 count = min(state.ndesc, target->cmd_sg_cnt);
1602 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1604 fmt = SRP_DATA_DESC_INDIRECT;
1605 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1606 len += count * sizeof (struct srp_direct_buf);
1608 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1609 count * sizeof (struct srp_direct_buf));
1611 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1612 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1613 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1614 indirect_hdr->len = cpu_to_be32(state.total_len);
1616 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1617 cmd->data_out_desc_cnt = count;
1619 cmd->data_in_desc_cnt = count;
1621 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1625 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1626 cmd->buf_fmt = fmt << 4;
1634 * Return an IU and possible credit to the free pool
1636 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1637 enum srp_iu_type iu_type)
1639 unsigned long flags;
1641 spin_lock_irqsave(&ch->lock, flags);
1642 list_add(&iu->list, &ch->free_tx);
1643 if (iu_type != SRP_IU_RSP)
1645 spin_unlock_irqrestore(&ch->lock, flags);
1649 * Must be called with ch->lock held to protect req_lim and free_tx.
1650 * If IU is not sent, it must be returned using srp_put_tx_iu().
1653 * An upper limit for the number of allocated information units for each
1655 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1656 * more than Scsi_Host.can_queue requests.
1657 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1658 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1659 * one unanswered SRP request to an initiator.
1661 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1662 enum srp_iu_type iu_type)
1664 struct srp_target_port *target = ch->target;
1665 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1668 srp_send_completion(ch->send_cq, ch);
1670 if (list_empty(&ch->free_tx))
1673 /* Initiator responses to target requests do not consume credits */
1674 if (iu_type != SRP_IU_RSP) {
1675 if (ch->req_lim <= rsv) {
1676 ++target->zero_req_lim;
1683 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1684 list_del(&iu->list);
1688 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1690 struct srp_target_port *target = ch->target;
1692 struct ib_send_wr wr, *bad_wr;
1694 list.addr = iu->dma;
1696 list.lkey = target->lkey;
1699 wr.wr_id = (uintptr_t) iu;
1702 wr.opcode = IB_WR_SEND;
1703 wr.send_flags = IB_SEND_SIGNALED;
1705 return ib_post_send(ch->qp, &wr, &bad_wr);
1708 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1710 struct srp_target_port *target = ch->target;
1711 struct ib_recv_wr wr, *bad_wr;
1714 list.addr = iu->dma;
1715 list.length = iu->size;
1716 list.lkey = target->lkey;
1719 wr.wr_id = (uintptr_t) iu;
1723 return ib_post_recv(ch->qp, &wr, &bad_wr);
1726 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1728 struct srp_target_port *target = ch->target;
1729 struct srp_request *req;
1730 struct scsi_cmnd *scmnd;
1731 unsigned long flags;
1733 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1734 spin_lock_irqsave(&ch->lock, flags);
1735 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1736 spin_unlock_irqrestore(&ch->lock, flags);
1738 ch->tsk_mgmt_status = -1;
1739 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1740 ch->tsk_mgmt_status = rsp->data[3];
1741 complete(&ch->tsk_mgmt_done);
1743 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1745 req = (void *)scmnd->host_scribble;
1746 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1749 shost_printk(KERN_ERR, target->scsi_host,
1750 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1751 rsp->tag, ch - target->ch, ch->qp->qp_num);
1753 spin_lock_irqsave(&ch->lock, flags);
1754 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1755 spin_unlock_irqrestore(&ch->lock, flags);
1759 scmnd->result = rsp->status;
1761 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1762 memcpy(scmnd->sense_buffer, rsp->data +
1763 be32_to_cpu(rsp->resp_data_len),
1764 min_t(int, be32_to_cpu(rsp->sense_data_len),
1765 SCSI_SENSE_BUFFERSIZE));
1768 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1769 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1770 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1771 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1772 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1773 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1774 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1775 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1777 srp_free_req(ch, req, scmnd,
1778 be32_to_cpu(rsp->req_lim_delta));
1780 scmnd->host_scribble = NULL;
1781 scmnd->scsi_done(scmnd);
1785 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1788 struct srp_target_port *target = ch->target;
1789 struct ib_device *dev = target->srp_host->srp_dev->dev;
1790 unsigned long flags;
1794 spin_lock_irqsave(&ch->lock, flags);
1795 ch->req_lim += req_delta;
1796 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1797 spin_unlock_irqrestore(&ch->lock, flags);
1800 shost_printk(KERN_ERR, target->scsi_host, PFX
1801 "no IU available to send response\n");
1805 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1806 memcpy(iu->buf, rsp, len);
1807 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1809 err = srp_post_send(ch, iu, len);
1811 shost_printk(KERN_ERR, target->scsi_host, PFX
1812 "unable to post response: %d\n", err);
1813 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1819 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1820 struct srp_cred_req *req)
1822 struct srp_cred_rsp rsp = {
1823 .opcode = SRP_CRED_RSP,
1826 s32 delta = be32_to_cpu(req->req_lim_delta);
1828 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1829 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1830 "problems processing SRP_CRED_REQ\n");
1833 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1834 struct srp_aer_req *req)
1836 struct srp_target_port *target = ch->target;
1837 struct srp_aer_rsp rsp = {
1838 .opcode = SRP_AER_RSP,
1841 s32 delta = be32_to_cpu(req->req_lim_delta);
1843 shost_printk(KERN_ERR, target->scsi_host, PFX
1844 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1846 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1847 shost_printk(KERN_ERR, target->scsi_host, PFX
1848 "problems processing SRP_AER_REQ\n");
1851 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1853 struct srp_target_port *target = ch->target;
1854 struct ib_device *dev = target->srp_host->srp_dev->dev;
1855 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1859 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1862 opcode = *(u8 *) iu->buf;
1865 shost_printk(KERN_ERR, target->scsi_host,
1866 PFX "recv completion, opcode 0x%02x\n", opcode);
1867 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1868 iu->buf, wc->byte_len, true);
1873 srp_process_rsp(ch, iu->buf);
1877 srp_process_cred_req(ch, iu->buf);
1881 srp_process_aer_req(ch, iu->buf);
1885 /* XXX Handle target logout */
1886 shost_printk(KERN_WARNING, target->scsi_host,
1887 PFX "Got target logout request\n");
1891 shost_printk(KERN_WARNING, target->scsi_host,
1892 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1896 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1899 res = srp_post_recv(ch, iu);
1901 shost_printk(KERN_ERR, target->scsi_host,
1902 PFX "Recv failed with error code %d\n", res);
1906 * srp_tl_err_work() - handle a transport layer error
1907 * @work: Work structure embedded in an SRP target port.
1909 * Note: This function may get invoked before the rport has been created,
1910 * hence the target->rport test.
1912 static void srp_tl_err_work(struct work_struct *work)
1914 struct srp_target_port *target;
1916 target = container_of(work, struct srp_target_port, tl_err_work);
1918 srp_start_tl_fail_timers(target->rport);
1921 static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1922 bool send_err, struct srp_rdma_ch *ch)
1924 struct srp_target_port *target = ch->target;
1926 if (wr_id == SRP_LAST_WR_ID) {
1927 complete(&ch->done);
1931 if (target->connected && !target->qp_in_error) {
1932 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1933 shost_printk(KERN_ERR, target->scsi_host, PFX
1934 "LOCAL_INV failed with status %d\n",
1936 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1937 shost_printk(KERN_ERR, target->scsi_host, PFX
1938 "FAST_REG_MR failed status %d\n",
1941 shost_printk(KERN_ERR, target->scsi_host,
1942 PFX "failed %s status %d for iu %p\n",
1943 send_err ? "send" : "receive",
1944 wc_status, (void *)(uintptr_t)wr_id);
1946 queue_work(system_long_wq, &target->tl_err_work);
1948 target->qp_in_error = true;
1951 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1953 struct srp_rdma_ch *ch = ch_ptr;
1956 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1957 while (ib_poll_cq(cq, 1, &wc) > 0) {
1958 if (likely(wc.status == IB_WC_SUCCESS)) {
1959 srp_handle_recv(ch, &wc);
1961 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1966 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1968 struct srp_rdma_ch *ch = ch_ptr;
1972 while (ib_poll_cq(cq, 1, &wc) > 0) {
1973 if (likely(wc.status == IB_WC_SUCCESS)) {
1974 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1975 list_add(&iu->list, &ch->free_tx);
1977 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1982 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1984 struct srp_target_port *target = host_to_target(shost);
1985 struct srp_rport *rport = target->rport;
1986 struct srp_rdma_ch *ch;
1987 struct srp_request *req;
1989 struct srp_cmd *cmd;
1990 struct ib_device *dev;
1991 unsigned long flags;
1995 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1998 * The SCSI EH thread is the only context from which srp_queuecommand()
1999 * can get invoked for blocked devices (SDEV_BLOCK /
2000 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2001 * locking the rport mutex if invoked from inside the SCSI EH.
2004 mutex_lock(&rport->mutex);
2006 scmnd->result = srp_chkready(target->rport);
2007 if (unlikely(scmnd->result))
2010 WARN_ON_ONCE(scmnd->request->tag < 0);
2011 tag = blk_mq_unique_tag(scmnd->request);
2012 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2013 idx = blk_mq_unique_tag_to_tag(tag);
2014 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2015 dev_name(&shost->shost_gendev), tag, idx,
2016 target->req_ring_size);
2018 spin_lock_irqsave(&ch->lock, flags);
2019 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2020 spin_unlock_irqrestore(&ch->lock, flags);
2025 req = &ch->req_ring[idx];
2026 dev = target->srp_host->srp_dev->dev;
2027 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2030 scmnd->host_scribble = (void *) req;
2033 memset(cmd, 0, sizeof *cmd);
2035 cmd->opcode = SRP_CMD;
2036 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
2038 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2043 len = srp_map_data(scmnd, ch, req);
2045 shost_printk(KERN_ERR, target->scsi_host,
2046 PFX "Failed to map data (%d)\n", len);
2048 * If we ran out of memory descriptors (-ENOMEM) because an
2049 * application is queuing many requests with more than
2050 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2051 * to reduce queue depth temporarily.
2053 scmnd->result = len == -ENOMEM ?
2054 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2058 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2061 if (srp_post_send(ch, iu, len)) {
2062 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2070 mutex_unlock(&rport->mutex);
2075 srp_unmap_data(scmnd, ch, req);
2078 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2081 * Avoid that the loops that iterate over the request ring can
2082 * encounter a dangling SCSI command pointer.
2087 if (scmnd->result) {
2088 scmnd->scsi_done(scmnd);
2091 ret = SCSI_MLQUEUE_HOST_BUSY;
2098 * Note: the resources allocated in this function are freed in
2101 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2103 struct srp_target_port *target = ch->target;
2106 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2110 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2115 for (i = 0; i < target->queue_size; ++i) {
2116 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2118 GFP_KERNEL, DMA_FROM_DEVICE);
2119 if (!ch->rx_ring[i])
2123 for (i = 0; i < target->queue_size; ++i) {
2124 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2126 GFP_KERNEL, DMA_TO_DEVICE);
2127 if (!ch->tx_ring[i])
2130 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2136 for (i = 0; i < target->queue_size; ++i) {
2137 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2138 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2151 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2153 uint64_t T_tr_ns, max_compl_time_ms;
2154 uint32_t rq_tmo_jiffies;
2157 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2158 * table 91), both the QP timeout and the retry count have to be set
2159 * for RC QP's during the RTR to RTS transition.
2161 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2162 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2165 * Set target->rq_tmo_jiffies to one second more than the largest time
2166 * it can take before an error completion is generated. See also
2167 * C9-140..142 in the IBTA spec for more information about how to
2168 * convert the QP Local ACK Timeout value to nanoseconds.
2170 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2171 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2172 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2173 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2175 return rq_tmo_jiffies;
2178 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2179 struct srp_login_rsp *lrsp,
2180 struct srp_rdma_ch *ch)
2182 struct srp_target_port *target = ch->target;
2183 struct ib_qp_attr *qp_attr = NULL;
2188 if (lrsp->opcode == SRP_LOGIN_RSP) {
2189 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2190 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2193 * Reserve credits for task management so we don't
2194 * bounce requests back to the SCSI mid-layer.
2196 target->scsi_host->can_queue
2197 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2198 target->scsi_host->can_queue);
2199 target->scsi_host->cmd_per_lun
2200 = min_t(int, target->scsi_host->can_queue,
2201 target->scsi_host->cmd_per_lun);
2203 shost_printk(KERN_WARNING, target->scsi_host,
2204 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2210 ret = srp_alloc_iu_bufs(ch);
2216 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2220 qp_attr->qp_state = IB_QPS_RTR;
2221 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2225 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2229 for (i = 0; i < target->queue_size; i++) {
2230 struct srp_iu *iu = ch->rx_ring[i];
2232 ret = srp_post_recv(ch, iu);
2237 qp_attr->qp_state = IB_QPS_RTS;
2238 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2242 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2244 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2248 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2257 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2258 struct ib_cm_event *event,
2259 struct srp_rdma_ch *ch)
2261 struct srp_target_port *target = ch->target;
2262 struct Scsi_Host *shost = target->scsi_host;
2263 struct ib_class_port_info *cpi;
2266 switch (event->param.rej_rcvd.reason) {
2267 case IB_CM_REJ_PORT_CM_REDIRECT:
2268 cpi = event->param.rej_rcvd.ari;
2269 ch->path.dlid = cpi->redirect_lid;
2270 ch->path.pkey = cpi->redirect_pkey;
2271 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2272 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2274 ch->status = ch->path.dlid ?
2275 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2278 case IB_CM_REJ_PORT_REDIRECT:
2279 if (srp_target_is_topspin(target)) {
2281 * Topspin/Cisco SRP gateways incorrectly send
2282 * reject reason code 25 when they mean 24
2285 memcpy(ch->path.dgid.raw,
2286 event->param.rej_rcvd.ari, 16);
2288 shost_printk(KERN_DEBUG, shost,
2289 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2290 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2291 be64_to_cpu(ch->path.dgid.global.interface_id));
2293 ch->status = SRP_PORT_REDIRECT;
2295 shost_printk(KERN_WARNING, shost,
2296 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2297 ch->status = -ECONNRESET;
2301 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2302 shost_printk(KERN_WARNING, shost,
2303 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2304 ch->status = -ECONNRESET;
2307 case IB_CM_REJ_CONSUMER_DEFINED:
2308 opcode = *(u8 *) event->private_data;
2309 if (opcode == SRP_LOGIN_REJ) {
2310 struct srp_login_rej *rej = event->private_data;
2311 u32 reason = be32_to_cpu(rej->reason);
2313 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2314 shost_printk(KERN_WARNING, shost,
2315 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2317 shost_printk(KERN_WARNING, shost, PFX
2318 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2320 target->orig_dgid.raw, reason);
2322 shost_printk(KERN_WARNING, shost,
2323 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2324 " opcode 0x%02x\n", opcode);
2325 ch->status = -ECONNRESET;
2328 case IB_CM_REJ_STALE_CONN:
2329 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2330 ch->status = SRP_STALE_CONN;
2334 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2335 event->param.rej_rcvd.reason);
2336 ch->status = -ECONNRESET;
2340 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2342 struct srp_rdma_ch *ch = cm_id->context;
2343 struct srp_target_port *target = ch->target;
2346 switch (event->event) {
2347 case IB_CM_REQ_ERROR:
2348 shost_printk(KERN_DEBUG, target->scsi_host,
2349 PFX "Sending CM REQ failed\n");
2351 ch->status = -ECONNRESET;
2354 case IB_CM_REP_RECEIVED:
2356 srp_cm_rep_handler(cm_id, event->private_data, ch);
2359 case IB_CM_REJ_RECEIVED:
2360 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2363 srp_cm_rej_handler(cm_id, event, ch);
2366 case IB_CM_DREQ_RECEIVED:
2367 shost_printk(KERN_WARNING, target->scsi_host,
2368 PFX "DREQ received - connection closed\n");
2369 srp_change_conn_state(target, false);
2370 if (ib_send_cm_drep(cm_id, NULL, 0))
2371 shost_printk(KERN_ERR, target->scsi_host,
2372 PFX "Sending CM DREP failed\n");
2373 queue_work(system_long_wq, &target->tl_err_work);
2376 case IB_CM_TIMEWAIT_EXIT:
2377 shost_printk(KERN_ERR, target->scsi_host,
2378 PFX "connection closed\n");
2384 case IB_CM_MRA_RECEIVED:
2385 case IB_CM_DREQ_ERROR:
2386 case IB_CM_DREP_RECEIVED:
2390 shost_printk(KERN_WARNING, target->scsi_host,
2391 PFX "Unhandled CM event %d\n", event->event);
2396 complete(&ch->done);
2402 * srp_change_queue_depth - setting device queue depth
2403 * @sdev: scsi device struct
2404 * @qdepth: requested queue depth
2406 * Returns queue depth.
2409 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2411 if (!sdev->tagged_supported)
2413 return scsi_change_queue_depth(sdev, qdepth);
2416 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2417 unsigned int lun, u8 func)
2419 struct srp_target_port *target = ch->target;
2420 struct srp_rport *rport = target->rport;
2421 struct ib_device *dev = target->srp_host->srp_dev->dev;
2423 struct srp_tsk_mgmt *tsk_mgmt;
2425 if (!target->connected || target->qp_in_error)
2428 init_completion(&ch->tsk_mgmt_done);
2431 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2432 * invoked while a task management function is being sent.
2434 mutex_lock(&rport->mutex);
2435 spin_lock_irq(&ch->lock);
2436 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2437 spin_unlock_irq(&ch->lock);
2440 mutex_unlock(&rport->mutex);
2445 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2448 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2450 tsk_mgmt->opcode = SRP_TSK_MGMT;
2451 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2452 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
2453 tsk_mgmt->tsk_mgmt_func = func;
2454 tsk_mgmt->task_tag = req_tag;
2456 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2458 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2459 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2460 mutex_unlock(&rport->mutex);
2464 mutex_unlock(&rport->mutex);
2466 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2467 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2473 static int srp_abort(struct scsi_cmnd *scmnd)
2475 struct srp_target_port *target = host_to_target(scmnd->device->host);
2476 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2479 struct srp_rdma_ch *ch;
2482 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2486 tag = blk_mq_unique_tag(scmnd->request);
2487 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2488 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2490 ch = &target->ch[ch_idx];
2491 if (!srp_claim_req(ch, req, NULL, scmnd))
2493 shost_printk(KERN_ERR, target->scsi_host,
2494 "Sending SRP abort for tag %#x\n", tag);
2495 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2496 SRP_TSK_ABORT_TASK) == 0)
2498 else if (target->rport->state == SRP_RPORT_LOST)
2502 srp_free_req(ch, req, scmnd, 0);
2503 scmnd->result = DID_ABORT << 16;
2504 scmnd->scsi_done(scmnd);
2509 static int srp_reset_device(struct scsi_cmnd *scmnd)
2511 struct srp_target_port *target = host_to_target(scmnd->device->host);
2512 struct srp_rdma_ch *ch;
2515 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2517 ch = &target->ch[0];
2518 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2521 if (ch->tsk_mgmt_status)
2524 for (i = 0; i < target->ch_count; i++) {
2525 ch = &target->ch[i];
2526 for (i = 0; i < target->req_ring_size; ++i) {
2527 struct srp_request *req = &ch->req_ring[i];
2529 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2536 static int srp_reset_host(struct scsi_cmnd *scmnd)
2538 struct srp_target_port *target = host_to_target(scmnd->device->host);
2540 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2542 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2545 static int srp_slave_configure(struct scsi_device *sdev)
2547 struct Scsi_Host *shost = sdev->host;
2548 struct srp_target_port *target = host_to_target(shost);
2549 struct request_queue *q = sdev->request_queue;
2550 unsigned long timeout;
2552 if (sdev->type == TYPE_DISK) {
2553 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2554 blk_queue_rq_timeout(q, timeout);
2560 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2563 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2565 return sprintf(buf, "0x%016llx\n",
2566 (unsigned long long) be64_to_cpu(target->id_ext));
2569 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2572 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2574 return sprintf(buf, "0x%016llx\n",
2575 (unsigned long long) be64_to_cpu(target->ioc_guid));
2578 static ssize_t show_service_id(struct device *dev,
2579 struct device_attribute *attr, char *buf)
2581 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2583 return sprintf(buf, "0x%016llx\n",
2584 (unsigned long long) be64_to_cpu(target->service_id));
2587 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2590 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2592 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2595 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2598 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2600 return sprintf(buf, "%pI6\n", target->sgid.raw);
2603 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2606 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2607 struct srp_rdma_ch *ch = &target->ch[0];
2609 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2612 static ssize_t show_orig_dgid(struct device *dev,
2613 struct device_attribute *attr, char *buf)
2615 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2617 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2620 static ssize_t show_req_lim(struct device *dev,
2621 struct device_attribute *attr, char *buf)
2623 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2624 struct srp_rdma_ch *ch;
2625 int i, req_lim = INT_MAX;
2627 for (i = 0; i < target->ch_count; i++) {
2628 ch = &target->ch[i];
2629 req_lim = min(req_lim, ch->req_lim);
2631 return sprintf(buf, "%d\n", req_lim);
2634 static ssize_t show_zero_req_lim(struct device *dev,
2635 struct device_attribute *attr, char *buf)
2637 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2639 return sprintf(buf, "%d\n", target->zero_req_lim);
2642 static ssize_t show_local_ib_port(struct device *dev,
2643 struct device_attribute *attr, char *buf)
2645 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2647 return sprintf(buf, "%d\n", target->srp_host->port);
2650 static ssize_t show_local_ib_device(struct device *dev,
2651 struct device_attribute *attr, char *buf)
2653 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2655 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2658 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2661 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2663 return sprintf(buf, "%d\n", target->ch_count);
2666 static ssize_t show_comp_vector(struct device *dev,
2667 struct device_attribute *attr, char *buf)
2669 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2671 return sprintf(buf, "%d\n", target->comp_vector);
2674 static ssize_t show_tl_retry_count(struct device *dev,
2675 struct device_attribute *attr, char *buf)
2677 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2679 return sprintf(buf, "%d\n", target->tl_retry_count);
2682 static ssize_t show_cmd_sg_entries(struct device *dev,
2683 struct device_attribute *attr, char *buf)
2685 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2687 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2690 static ssize_t show_allow_ext_sg(struct device *dev,
2691 struct device_attribute *attr, char *buf)
2693 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2695 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2698 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2699 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2700 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2701 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2702 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2703 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2704 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2705 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2706 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2707 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2708 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2709 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2710 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2711 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2712 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2713 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2715 static struct device_attribute *srp_host_attrs[] = {
2718 &dev_attr_service_id,
2722 &dev_attr_orig_dgid,
2724 &dev_attr_zero_req_lim,
2725 &dev_attr_local_ib_port,
2726 &dev_attr_local_ib_device,
2728 &dev_attr_comp_vector,
2729 &dev_attr_tl_retry_count,
2730 &dev_attr_cmd_sg_entries,
2731 &dev_attr_allow_ext_sg,
2735 static struct scsi_host_template srp_template = {
2736 .module = THIS_MODULE,
2737 .name = "InfiniBand SRP initiator",
2738 .proc_name = DRV_NAME,
2739 .slave_configure = srp_slave_configure,
2740 .info = srp_target_info,
2741 .queuecommand = srp_queuecommand,
2742 .change_queue_depth = srp_change_queue_depth,
2743 .eh_abort_handler = srp_abort,
2744 .eh_device_reset_handler = srp_reset_device,
2745 .eh_host_reset_handler = srp_reset_host,
2746 .skip_settle_delay = true,
2747 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2748 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2750 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2751 .use_clustering = ENABLE_CLUSTERING,
2752 .shost_attrs = srp_host_attrs,
2754 .track_queue_depth = 1,
2757 static int srp_sdev_count(struct Scsi_Host *host)
2759 struct scsi_device *sdev;
2762 shost_for_each_device(sdev, host)
2768 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2770 struct srp_rport_identifiers ids;
2771 struct srp_rport *rport;
2773 target->state = SRP_TARGET_SCANNING;
2774 sprintf(target->target_name, "SRP.T10:%016llX",
2775 (unsigned long long) be64_to_cpu(target->id_ext));
2777 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2780 memcpy(ids.port_id, &target->id_ext, 8);
2781 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2782 ids.roles = SRP_RPORT_ROLE_TARGET;
2783 rport = srp_rport_add(target->scsi_host, &ids);
2784 if (IS_ERR(rport)) {
2785 scsi_remove_host(target->scsi_host);
2786 return PTR_ERR(rport);
2789 rport->lld_data = target;
2790 target->rport = rport;
2792 spin_lock(&host->target_lock);
2793 list_add_tail(&target->list, &host->target_list);
2794 spin_unlock(&host->target_lock);
2796 scsi_scan_target(&target->scsi_host->shost_gendev,
2797 0, target->scsi_id, SCAN_WILD_CARD, 0);
2799 if (!target->connected || target->qp_in_error) {
2800 shost_printk(KERN_INFO, target->scsi_host,
2801 PFX "SCSI scan failed - removing SCSI host\n");
2802 srp_queue_remove_work(target);
2806 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2807 dev_name(&target->scsi_host->shost_gendev),
2808 srp_sdev_count(target->scsi_host));
2810 spin_lock_irq(&target->lock);
2811 if (target->state == SRP_TARGET_SCANNING)
2812 target->state = SRP_TARGET_LIVE;
2813 spin_unlock_irq(&target->lock);
2819 static void srp_release_dev(struct device *dev)
2821 struct srp_host *host =
2822 container_of(dev, struct srp_host, dev);
2824 complete(&host->released);
2827 static struct class srp_class = {
2828 .name = "infiniband_srp",
2829 .dev_release = srp_release_dev
2833 * srp_conn_unique() - check whether the connection to a target is unique
2835 * @target: SRP target port.
2837 static bool srp_conn_unique(struct srp_host *host,
2838 struct srp_target_port *target)
2840 struct srp_target_port *t;
2843 if (target->state == SRP_TARGET_REMOVED)
2848 spin_lock(&host->target_lock);
2849 list_for_each_entry(t, &host->target_list, list) {
2851 target->id_ext == t->id_ext &&
2852 target->ioc_guid == t->ioc_guid &&
2853 target->initiator_ext == t->initiator_ext) {
2858 spin_unlock(&host->target_lock);
2865 * Target ports are added by writing
2867 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2868 * pkey=<P_Key>,service_id=<service ID>
2870 * to the add_target sysfs attribute.
2874 SRP_OPT_ID_EXT = 1 << 0,
2875 SRP_OPT_IOC_GUID = 1 << 1,
2876 SRP_OPT_DGID = 1 << 2,
2877 SRP_OPT_PKEY = 1 << 3,
2878 SRP_OPT_SERVICE_ID = 1 << 4,
2879 SRP_OPT_MAX_SECT = 1 << 5,
2880 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2881 SRP_OPT_IO_CLASS = 1 << 7,
2882 SRP_OPT_INITIATOR_EXT = 1 << 8,
2883 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2884 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2885 SRP_OPT_SG_TABLESIZE = 1 << 11,
2886 SRP_OPT_COMP_VECTOR = 1 << 12,
2887 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2888 SRP_OPT_QUEUE_SIZE = 1 << 14,
2889 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2893 SRP_OPT_SERVICE_ID),
2896 static const match_table_t srp_opt_tokens = {
2897 { SRP_OPT_ID_EXT, "id_ext=%s" },
2898 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2899 { SRP_OPT_DGID, "dgid=%s" },
2900 { SRP_OPT_PKEY, "pkey=%x" },
2901 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2902 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2903 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2904 { SRP_OPT_IO_CLASS, "io_class=%x" },
2905 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2906 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2907 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2908 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2909 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2910 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2911 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2912 { SRP_OPT_ERR, NULL }
2915 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2917 char *options, *sep_opt;
2920 substring_t args[MAX_OPT_ARGS];
2926 options = kstrdup(buf, GFP_KERNEL);
2931 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2935 token = match_token(p, srp_opt_tokens, args);
2939 case SRP_OPT_ID_EXT:
2940 p = match_strdup(args);
2945 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2949 case SRP_OPT_IOC_GUID:
2950 p = match_strdup(args);
2955 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2960 p = match_strdup(args);
2965 if (strlen(p) != 32) {
2966 pr_warn("bad dest GID parameter '%s'\n", p);
2971 for (i = 0; i < 16; ++i) {
2972 strlcpy(dgid, p + i * 2, sizeof(dgid));
2973 if (sscanf(dgid, "%hhx",
2974 &target->orig_dgid.raw[i]) < 1) {
2984 if (match_hex(args, &token)) {
2985 pr_warn("bad P_Key parameter '%s'\n", p);
2988 target->pkey = cpu_to_be16(token);
2991 case SRP_OPT_SERVICE_ID:
2992 p = match_strdup(args);
2997 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3001 case SRP_OPT_MAX_SECT:
3002 if (match_int(args, &token)) {
3003 pr_warn("bad max sect parameter '%s'\n", p);
3006 target->scsi_host->max_sectors = token;
3009 case SRP_OPT_QUEUE_SIZE:
3010 if (match_int(args, &token) || token < 1) {
3011 pr_warn("bad queue_size parameter '%s'\n", p);
3014 target->scsi_host->can_queue = token;
3015 target->queue_size = token + SRP_RSP_SQ_SIZE +
3016 SRP_TSK_MGMT_SQ_SIZE;
3017 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3018 target->scsi_host->cmd_per_lun = token;
3021 case SRP_OPT_MAX_CMD_PER_LUN:
3022 if (match_int(args, &token) || token < 1) {
3023 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3027 target->scsi_host->cmd_per_lun = token;
3030 case SRP_OPT_IO_CLASS:
3031 if (match_hex(args, &token)) {
3032 pr_warn("bad IO class parameter '%s'\n", p);
3035 if (token != SRP_REV10_IB_IO_CLASS &&
3036 token != SRP_REV16A_IB_IO_CLASS) {
3037 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3038 token, SRP_REV10_IB_IO_CLASS,
3039 SRP_REV16A_IB_IO_CLASS);
3042 target->io_class = token;
3045 case SRP_OPT_INITIATOR_EXT:
3046 p = match_strdup(args);
3051 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3055 case SRP_OPT_CMD_SG_ENTRIES:
3056 if (match_int(args, &token) || token < 1 || token > 255) {
3057 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3061 target->cmd_sg_cnt = token;
3064 case SRP_OPT_ALLOW_EXT_SG:
3065 if (match_int(args, &token)) {
3066 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3069 target->allow_ext_sg = !!token;
3072 case SRP_OPT_SG_TABLESIZE:
3073 if (match_int(args, &token) || token < 1 ||
3074 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3075 pr_warn("bad max sg_tablesize parameter '%s'\n",
3079 target->sg_tablesize = token;
3082 case SRP_OPT_COMP_VECTOR:
3083 if (match_int(args, &token) || token < 0) {
3084 pr_warn("bad comp_vector parameter '%s'\n", p);
3087 target->comp_vector = token;
3090 case SRP_OPT_TL_RETRY_COUNT:
3091 if (match_int(args, &token) || token < 2 || token > 7) {
3092 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3096 target->tl_retry_count = token;
3100 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3106 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3109 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3110 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3111 !(srp_opt_tokens[i].token & opt_mask))
3112 pr_warn("target creation request is missing parameter '%s'\n",
3113 srp_opt_tokens[i].pattern);
3115 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3116 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3117 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3118 target->scsi_host->cmd_per_lun,
3119 target->scsi_host->can_queue);
3126 static ssize_t srp_create_target(struct device *dev,
3127 struct device_attribute *attr,
3128 const char *buf, size_t count)
3130 struct srp_host *host =
3131 container_of(dev, struct srp_host, dev);
3132 struct Scsi_Host *target_host;
3133 struct srp_target_port *target;
3134 struct srp_rdma_ch *ch;
3135 struct srp_device *srp_dev = host->srp_dev;
3136 struct ib_device *ibdev = srp_dev->dev;
3137 int ret, node_idx, node, cpu, i;
3138 bool multich = false;
3140 target_host = scsi_host_alloc(&srp_template,
3141 sizeof (struct srp_target_port));
3145 target_host->transportt = ib_srp_transport_template;
3146 target_host->max_channel = 0;
3147 target_host->max_id = 1;
3148 target_host->max_lun = SRP_MAX_LUN;
3149 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3151 target = host_to_target(target_host);
3153 target->io_class = SRP_REV16A_IB_IO_CLASS;
3154 target->scsi_host = target_host;
3155 target->srp_host = host;
3156 target->lkey = host->srp_dev->mr->lkey;
3157 target->rkey = host->srp_dev->mr->rkey;
3158 target->cmd_sg_cnt = cmd_sg_entries;
3159 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3160 target->allow_ext_sg = allow_ext_sg;
3161 target->tl_retry_count = 7;
3162 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3165 * Avoid that the SCSI host can be removed by srp_remove_target()
3166 * before this function returns.
3168 scsi_host_get(target->scsi_host);
3170 mutex_lock(&host->add_target_mutex);
3172 ret = srp_parse_options(buf, target);
3176 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3180 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3182 if (!srp_conn_unique(target->srp_host, target)) {
3183 shost_printk(KERN_INFO, target->scsi_host,
3184 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3185 be64_to_cpu(target->id_ext),
3186 be64_to_cpu(target->ioc_guid),
3187 be64_to_cpu(target->initiator_ext));
3192 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3193 target->cmd_sg_cnt < target->sg_tablesize) {
3194 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3195 target->sg_tablesize = target->cmd_sg_cnt;
3198 target_host->sg_tablesize = target->sg_tablesize;
3199 target->indirect_size = target->sg_tablesize *
3200 sizeof (struct srp_direct_buf);
3201 target->max_iu_len = sizeof (struct srp_cmd) +
3202 sizeof (struct srp_indirect_buf) +
3203 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3205 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3206 INIT_WORK(&target->remove_work, srp_remove_work);
3207 spin_lock_init(&target->lock);
3208 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
3213 target->ch_count = max_t(unsigned, num_online_nodes(),
3215 min(4 * num_online_nodes(),
3216 ibdev->num_comp_vectors),
3217 num_online_cpus()));
3218 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3224 for_each_online_node(node) {
3225 const int ch_start = (node_idx * target->ch_count /
3226 num_online_nodes());
3227 const int ch_end = ((node_idx + 1) * target->ch_count /
3228 num_online_nodes());
3229 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3230 num_online_nodes() + target->comp_vector)
3231 % ibdev->num_comp_vectors;
3232 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3233 num_online_nodes() + target->comp_vector)
3234 % ibdev->num_comp_vectors;
3237 for_each_online_cpu(cpu) {
3238 if (cpu_to_node(cpu) != node)
3240 if (ch_start + cpu_idx >= ch_end)
3242 ch = &target->ch[ch_start + cpu_idx];
3243 ch->target = target;
3244 ch->comp_vector = cv_start == cv_end ? cv_start :
3245 cv_start + cpu_idx % (cv_end - cv_start);
3246 spin_lock_init(&ch->lock);
3247 INIT_LIST_HEAD(&ch->free_tx);
3248 ret = srp_new_cm_id(ch);
3250 goto err_disconnect;
3252 ret = srp_create_ch_ib(ch);
3254 goto err_disconnect;
3256 ret = srp_alloc_req_data(ch);
3258 goto err_disconnect;
3260 ret = srp_connect_ch(ch, multich);
3262 shost_printk(KERN_ERR, target->scsi_host,
3263 PFX "Connection %d/%d failed\n",
3266 if (node_idx == 0 && cpu_idx == 0) {
3267 goto err_disconnect;
3269 srp_free_ch_ib(target, ch);
3270 srp_free_req_data(target, ch);
3271 target->ch_count = ch - target->ch;
3282 target->scsi_host->nr_hw_queues = target->ch_count;
3284 ret = srp_add_target(host, target);
3286 goto err_disconnect;
3288 if (target->state != SRP_TARGET_REMOVED) {
3289 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3290 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3291 be64_to_cpu(target->id_ext),
3292 be64_to_cpu(target->ioc_guid),
3293 be16_to_cpu(target->pkey),
3294 be64_to_cpu(target->service_id),
3295 target->sgid.raw, target->orig_dgid.raw);
3301 mutex_unlock(&host->add_target_mutex);
3303 scsi_host_put(target->scsi_host);
3308 srp_disconnect_target(target);
3310 for (i = 0; i < target->ch_count; i++) {
3311 ch = &target->ch[i];
3312 srp_free_ch_ib(target, ch);
3313 srp_free_req_data(target, ch);
3319 scsi_host_put(target_host);
3323 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3325 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3328 struct srp_host *host = container_of(dev, struct srp_host, dev);
3330 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3333 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3335 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3338 struct srp_host *host = container_of(dev, struct srp_host, dev);
3340 return sprintf(buf, "%d\n", host->port);
3343 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3345 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3347 struct srp_host *host;
3349 host = kzalloc(sizeof *host, GFP_KERNEL);
3353 INIT_LIST_HEAD(&host->target_list);
3354 spin_lock_init(&host->target_lock);
3355 init_completion(&host->released);
3356 mutex_init(&host->add_target_mutex);
3357 host->srp_dev = device;
3360 host->dev.class = &srp_class;
3361 host->dev.parent = device->dev->dma_device;
3362 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3364 if (device_register(&host->dev))
3366 if (device_create_file(&host->dev, &dev_attr_add_target))
3368 if (device_create_file(&host->dev, &dev_attr_ibdev))
3370 if (device_create_file(&host->dev, &dev_attr_port))
3376 device_unregister(&host->dev);
3384 static void srp_add_one(struct ib_device *device)
3386 struct srp_device *srp_dev;
3387 struct ib_device_attr *dev_attr;
3388 struct srp_host *host;
3389 int mr_page_shift, s, e, p;
3390 u64 max_pages_per_mr;
3392 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3396 if (ib_query_device(device, dev_attr)) {
3397 pr_warn("Query device failed for %s\n", device->name);
3401 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3405 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3406 device->map_phys_fmr && device->unmap_fmr);
3407 srp_dev->has_fr = (dev_attr->device_cap_flags &
3408 IB_DEVICE_MEM_MGT_EXTENSIONS);
3409 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3410 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3412 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3413 (!srp_dev->has_fmr || prefer_fr));
3416 * Use the smallest page size supported by the HCA, down to a
3417 * minimum of 4096 bytes. We're unlikely to build large sglists
3418 * out of smaller entries.
3420 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3421 srp_dev->mr_page_size = 1 << mr_page_shift;
3422 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3423 max_pages_per_mr = dev_attr->max_mr_size;
3424 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3425 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3427 if (srp_dev->use_fast_reg) {
3428 srp_dev->max_pages_per_mr =
3429 min_t(u32, srp_dev->max_pages_per_mr,
3430 dev_attr->max_fast_reg_page_list_len);
3432 srp_dev->mr_max_size = srp_dev->mr_page_size *
3433 srp_dev->max_pages_per_mr;
3434 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3435 device->name, mr_page_shift, dev_attr->max_mr_size,
3436 dev_attr->max_fast_reg_page_list_len,
3437 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3439 INIT_LIST_HEAD(&srp_dev->dev_list);
3441 srp_dev->dev = device;
3442 srp_dev->pd = ib_alloc_pd(device);
3443 if (IS_ERR(srp_dev->pd))
3446 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3447 IB_ACCESS_LOCAL_WRITE |
3448 IB_ACCESS_REMOTE_READ |
3449 IB_ACCESS_REMOTE_WRITE);
3450 if (IS_ERR(srp_dev->mr))
3453 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3458 e = device->phys_port_cnt;
3461 for (p = s; p <= e; ++p) {
3462 host = srp_add_port(srp_dev, p);
3464 list_add_tail(&host->list, &srp_dev->dev_list);
3467 ib_set_client_data(device, &srp_client, srp_dev);
3472 ib_dealloc_pd(srp_dev->pd);
3481 static void srp_remove_one(struct ib_device *device)
3483 struct srp_device *srp_dev;
3484 struct srp_host *host, *tmp_host;
3485 struct srp_target_port *target;
3487 srp_dev = ib_get_client_data(device, &srp_client);
3491 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3492 device_unregister(&host->dev);
3494 * Wait for the sysfs entry to go away, so that no new
3495 * target ports can be created.
3497 wait_for_completion(&host->released);
3500 * Remove all target ports.
3502 spin_lock(&host->target_lock);
3503 list_for_each_entry(target, &host->target_list, list)
3504 srp_queue_remove_work(target);
3505 spin_unlock(&host->target_lock);
3508 * Wait for tl_err and target port removal tasks.
3510 flush_workqueue(system_long_wq);
3511 flush_workqueue(srp_remove_wq);
3516 ib_dereg_mr(srp_dev->mr);
3517 ib_dealloc_pd(srp_dev->pd);
3522 static struct srp_function_template ib_srp_transport_functions = {
3523 .has_rport_state = true,
3524 .reset_timer_if_blocked = true,
3525 .reconnect_delay = &srp_reconnect_delay,
3526 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3527 .dev_loss_tmo = &srp_dev_loss_tmo,
3528 .reconnect = srp_rport_reconnect,
3529 .rport_delete = srp_rport_delete,
3530 .terminate_rport_io = srp_terminate_io,
3533 static int __init srp_init_module(void)
3537 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3539 if (srp_sg_tablesize) {
3540 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3541 if (!cmd_sg_entries)
3542 cmd_sg_entries = srp_sg_tablesize;
3545 if (!cmd_sg_entries)
3546 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3548 if (cmd_sg_entries > 255) {
3549 pr_warn("Clamping cmd_sg_entries to 255\n");
3550 cmd_sg_entries = 255;
3553 if (!indirect_sg_entries)
3554 indirect_sg_entries = cmd_sg_entries;
3555 else if (indirect_sg_entries < cmd_sg_entries) {
3556 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3558 indirect_sg_entries = cmd_sg_entries;
3561 srp_remove_wq = create_workqueue("srp_remove");
3562 if (!srp_remove_wq) {
3568 ib_srp_transport_template =
3569 srp_attach_transport(&ib_srp_transport_functions);
3570 if (!ib_srp_transport_template)
3573 ret = class_register(&srp_class);
3575 pr_err("couldn't register class infiniband_srp\n");
3579 ib_sa_register_client(&srp_sa_client);
3581 ret = ib_register_client(&srp_client);
3583 pr_err("couldn't register IB client\n");
3591 ib_sa_unregister_client(&srp_sa_client);
3592 class_unregister(&srp_class);
3595 srp_release_transport(ib_srp_transport_template);
3598 destroy_workqueue(srp_remove_wq);
3602 static void __exit srp_cleanup_module(void)
3604 ib_unregister_client(&srp_client);
3605 ib_sa_unregister_client(&srp_sa_client);
3606 class_unregister(&srp_class);
3607 srp_release_transport(ib_srp_transport_template);
3608 destroy_workqueue(srp_remove_wq);
3611 module_init(srp_init_module);
3612 module_exit(srp_cleanup_module);