2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
45 #include <linux/atomic.h>
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
52 #include <scsi/scsi_transport_srp.h>
56 #define DRV_NAME "ib_srp"
57 #define PFX DRV_NAME ": "
58 #define DRV_VERSION "2.0"
59 #define DRV_RELDATE "July 26, 2015"
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(DRV_VERSION);
65 MODULE_INFO(release_date, DRV_RELDATE);
67 static unsigned int srp_sg_tablesize;
68 static unsigned int cmd_sg_entries;
69 static unsigned int indirect_sg_entries;
70 static bool allow_ext_sg;
71 static bool prefer_fr = true;
72 static bool register_always = true;
73 static int topspin_workarounds = 1;
75 module_param(srp_sg_tablesize, uint, 0444);
76 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
78 module_param(cmd_sg_entries, uint, 0444);
79 MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
82 module_param(indirect_sg_entries, uint, 0444);
83 MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
86 module_param(allow_ext_sg, bool, 0444);
87 MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
90 module_param(topspin_workarounds, int, 0444);
91 MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
94 module_param(prefer_fr, bool, 0444);
95 MODULE_PARM_DESC(prefer_fr,
96 "Whether to use fast registration if both FMR and fast registration are supported");
98 module_param(register_always, bool, 0444);
99 MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
102 static const struct kernel_param_ops srp_tmo_ops;
104 static int srp_reconnect_delay = 10;
105 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
107 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
109 static int srp_fast_io_fail_tmo = 15;
110 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
112 MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
117 static int srp_dev_loss_tmo = 600;
118 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
120 MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
128 static unsigned ch_count;
129 module_param(ch_count, uint, 0444);
130 MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
133 static void srp_add_one(struct ib_device *device);
134 static void srp_remove_one(struct ib_device *device, void *client_data);
135 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
137 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139 static struct scsi_transport_template *ib_srp_transport_template;
140 static struct workqueue_struct *srp_remove_wq;
142 static struct ib_client srp_client = {
145 .remove = srp_remove_one
148 static struct ib_sa_client srp_sa_client;
150 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152 int tmo = *(int *)kp->arg;
155 return sprintf(buffer, "%d", tmo);
157 return sprintf(buffer, "off");
160 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
164 res = srp_parse_tmo(&tmo, val);
168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
178 *(int *)kp->arg = tmo;
184 static const struct kernel_param_ops srp_tmo_ops = {
189 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191 return (struct srp_target_port *) host->hostdata;
194 static const char *srp_target_info(struct Scsi_Host *host)
196 return host_to_target(host)->target_name;
199 static int srp_target_is_topspin(struct srp_target_port *target)
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
204 return topspin_workarounds &&
205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
209 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211 enum dma_data_direction direction)
215 iu = kmalloc(sizeof *iu, gfp_mask);
219 iu->buf = kzalloc(size, gfp_mask);
223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
229 iu->direction = direction;
241 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
252 static void srp_qp_event(struct ib_event *event, void *context)
254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
258 static int srp_init_qp(struct srp_target_port *target,
261 struct ib_qp_attr *attr;
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
280 ret = ib_modify_qp(qp, attr,
291 static int srp_new_cm_id(struct srp_rdma_ch *ch)
293 struct srp_target_port *target = ch->target;
294 struct ib_cm_id *new_cm_id;
296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
312 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
334 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
337 struct srp_fr_desc *d;
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
344 ib_free_fast_reg_page_list(d->frpl);
352 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353 * @device: IB device to allocate fast registration descriptors for.
354 * @pd: Protection domain associated with the FR descriptors.
355 * @pool_size: Number of descriptors to allocate.
356 * @max_page_list_len: Maximum fast registration work request page list length.
358 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359 struct ib_pd *pd, int pool_size,
360 int max_page_list_len)
362 struct srp_fr_pool *pool;
363 struct srp_fr_desc *d;
365 struct ib_fast_reg_page_list *frpl;
366 int i, ret = -EINVAL;
371 pool = kzalloc(sizeof(struct srp_fr_pool) +
372 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
375 pool->size = pool_size;
376 pool->max_page_list_len = max_page_list_len;
377 spin_lock_init(&pool->lock);
378 INIT_LIST_HEAD(&pool->free_list);
380 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
381 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
388 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
394 list_add_tail(&d->entry, &pool->free_list);
401 srp_destroy_fr_pool(pool);
409 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410 * @pool: Pool to obtain descriptor from.
412 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
414 struct srp_fr_desc *d = NULL;
417 spin_lock_irqsave(&pool->lock, flags);
418 if (!list_empty(&pool->free_list)) {
419 d = list_first_entry(&pool->free_list, typeof(*d), entry);
422 spin_unlock_irqrestore(&pool->lock, flags);
428 * srp_fr_pool_put() - put an FR descriptor back in the free list
429 * @pool: Pool the descriptor was allocated from.
430 * @desc: Pointer to an array of fast registration descriptor pointers.
431 * @n: Number of descriptors to put back.
433 * Note: The caller must already have queued an invalidation request for
434 * desc->mr->rkey before calling this function.
436 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
442 spin_lock_irqsave(&pool->lock, flags);
443 for (i = 0; i < n; i++)
444 list_add(&desc[i]->entry, &pool->free_list);
445 spin_unlock_irqrestore(&pool->lock, flags);
448 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
450 struct srp_device *dev = target->srp_host->srp_dev;
452 return srp_create_fr_pool(dev->dev, dev->pd,
453 target->scsi_host->can_queue,
454 dev->max_pages_per_mr);
458 * srp_destroy_qp() - destroy an RDMA queue pair
459 * @ch: SRP RDMA channel.
461 * Change a queue pair into the error state and wait until all receive
462 * completions have been processed before destroying it. This avoids that
463 * the receive completion handler can access the queue pair while it is
466 static void srp_destroy_qp(struct srp_rdma_ch *ch)
468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
474 WARN_ON_ONCE(ch->connected);
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
485 wait_for_completion(&ch->done);
488 ib_destroy_qp(ch->qp);
491 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
493 struct srp_target_port *target = ch->target;
494 struct srp_device *dev = target->srp_host->srp_dev;
495 struct ib_qp_init_attr *init_attr;
496 struct ib_cq *recv_cq, *send_cq;
498 struct ib_fmr_pool *fmr_pool = NULL;
499 struct srp_fr_pool *fr_pool = NULL;
500 const int m = 1 + dev->use_fast_reg;
501 struct ib_cq_init_attr cq_attr = {};
504 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
508 /* + 1 for SRP_LAST_WR_ID */
509 cq_attr.cqe = target->queue_size + 1;
510 cq_attr.comp_vector = ch->comp_vector;
511 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
513 if (IS_ERR(recv_cq)) {
514 ret = PTR_ERR(recv_cq);
518 cq_attr.cqe = m * target->queue_size;
519 cq_attr.comp_vector = ch->comp_vector;
520 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
522 if (IS_ERR(send_cq)) {
523 ret = PTR_ERR(send_cq);
527 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
529 init_attr->event_handler = srp_qp_event;
530 init_attr->cap.max_send_wr = m * target->queue_size;
531 init_attr->cap.max_recv_wr = target->queue_size + 1;
532 init_attr->cap.max_recv_sge = 1;
533 init_attr->cap.max_send_sge = 1;
534 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
535 init_attr->qp_type = IB_QPT_RC;
536 init_attr->send_cq = send_cq;
537 init_attr->recv_cq = recv_cq;
539 qp = ib_create_qp(dev->pd, init_attr);
545 ret = srp_init_qp(target, qp);
549 if (dev->use_fast_reg) {
550 fr_pool = srp_alloc_fr_pool(target);
551 if (IS_ERR(fr_pool)) {
552 ret = PTR_ERR(fr_pool);
553 shost_printk(KERN_WARNING, target->scsi_host, PFX
554 "FR pool allocation failed (%d)\n", ret);
557 } else if (dev->use_fmr) {
558 fmr_pool = srp_alloc_fmr_pool(target);
559 if (IS_ERR(fmr_pool)) {
560 ret = PTR_ERR(fmr_pool);
561 shost_printk(KERN_WARNING, target->scsi_host, PFX
562 "FMR pool allocation failed (%d)\n", ret);
570 ib_destroy_cq(ch->recv_cq);
572 ib_destroy_cq(ch->send_cq);
575 ch->recv_cq = recv_cq;
576 ch->send_cq = send_cq;
578 if (dev->use_fast_reg) {
580 srp_destroy_fr_pool(ch->fr_pool);
581 ch->fr_pool = fr_pool;
582 } else if (dev->use_fmr) {
584 ib_destroy_fmr_pool(ch->fmr_pool);
585 ch->fmr_pool = fmr_pool;
595 ib_destroy_cq(send_cq);
598 ib_destroy_cq(recv_cq);
606 * Note: this function may be called without srp_alloc_iu_bufs() having been
607 * invoked. Hence the ch->[rt]x_ring checks.
609 static void srp_free_ch_ib(struct srp_target_port *target,
610 struct srp_rdma_ch *ch)
612 struct srp_device *dev = target->srp_host->srp_dev;
619 ib_destroy_cm_id(ch->cm_id);
623 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
627 if (dev->use_fast_reg) {
629 srp_destroy_fr_pool(ch->fr_pool);
630 } else if (dev->use_fmr) {
632 ib_destroy_fmr_pool(ch->fmr_pool);
635 ib_destroy_cq(ch->send_cq);
636 ib_destroy_cq(ch->recv_cq);
639 * Avoid that the SCSI error handler tries to use this channel after
640 * it has been freed. The SCSI error handler can namely continue
641 * trying to perform recovery actions after scsi_remove_host()
647 ch->send_cq = ch->recv_cq = NULL;
650 for (i = 0; i < target->queue_size; ++i)
651 srp_free_iu(target->srp_host, ch->rx_ring[i]);
656 for (i = 0; i < target->queue_size; ++i)
657 srp_free_iu(target->srp_host, ch->tx_ring[i]);
663 static void srp_path_rec_completion(int status,
664 struct ib_sa_path_rec *pathrec,
667 struct srp_rdma_ch *ch = ch_ptr;
668 struct srp_target_port *target = ch->target;
672 shost_printk(KERN_ERR, target->scsi_host,
673 PFX "Got failed path rec status %d\n", status);
679 static int srp_lookup_path(struct srp_rdma_ch *ch)
681 struct srp_target_port *target = ch->target;
684 ch->path.numb_path = 1;
686 init_completion(&ch->done);
688 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
689 target->srp_host->srp_dev->dev,
690 target->srp_host->port,
692 IB_SA_PATH_REC_SERVICE_ID |
693 IB_SA_PATH_REC_DGID |
694 IB_SA_PATH_REC_SGID |
695 IB_SA_PATH_REC_NUMB_PATH |
697 SRP_PATH_REC_TIMEOUT_MS,
699 srp_path_rec_completion,
700 ch, &ch->path_query);
701 if (ch->path_query_id < 0)
702 return ch->path_query_id;
704 ret = wait_for_completion_interruptible(&ch->done);
709 shost_printk(KERN_WARNING, target->scsi_host,
710 PFX "Path record query failed\n");
715 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
717 struct srp_target_port *target = ch->target;
719 struct ib_cm_req_param param;
720 struct srp_login_req priv;
724 req = kzalloc(sizeof *req, GFP_KERNEL);
728 req->param.primary_path = &ch->path;
729 req->param.alternate_path = NULL;
730 req->param.service_id = target->service_id;
731 req->param.qp_num = ch->qp->qp_num;
732 req->param.qp_type = ch->qp->qp_type;
733 req->param.private_data = &req->priv;
734 req->param.private_data_len = sizeof req->priv;
735 req->param.flow_control = 1;
737 get_random_bytes(&req->param.starting_psn, 4);
738 req->param.starting_psn &= 0xffffff;
741 * Pick some arbitrary defaults here; we could make these
742 * module parameters if anyone cared about setting them.
744 req->param.responder_resources = 4;
745 req->param.remote_cm_response_timeout = 20;
746 req->param.local_cm_response_timeout = 20;
747 req->param.retry_count = target->tl_retry_count;
748 req->param.rnr_retry_count = 7;
749 req->param.max_cm_retries = 15;
751 req->priv.opcode = SRP_LOGIN_REQ;
753 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
754 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
755 SRP_BUF_FORMAT_INDIRECT);
756 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
757 SRP_MULTICHAN_SINGLE);
759 * In the published SRP specification (draft rev. 16a), the
760 * port identifier format is 8 bytes of ID extension followed
761 * by 8 bytes of GUID. Older drafts put the two halves in the
762 * opposite order, so that the GUID comes first.
764 * Targets conforming to these obsolete drafts can be
765 * recognized by the I/O Class they report.
767 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
768 memcpy(req->priv.initiator_port_id,
769 &target->sgid.global.interface_id, 8);
770 memcpy(req->priv.initiator_port_id + 8,
771 &target->initiator_ext, 8);
772 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
773 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
775 memcpy(req->priv.initiator_port_id,
776 &target->initiator_ext, 8);
777 memcpy(req->priv.initiator_port_id + 8,
778 &target->sgid.global.interface_id, 8);
779 memcpy(req->priv.target_port_id, &target->id_ext, 8);
780 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
784 * Topspin/Cisco SRP targets will reject our login unless we
785 * zero out the first 8 bytes of our initiator port ID and set
786 * the second 8 bytes to the local node GUID.
788 if (srp_target_is_topspin(target)) {
789 shost_printk(KERN_DEBUG, target->scsi_host,
790 PFX "Topspin/Cisco initiator port ID workaround "
791 "activated for target GUID %016llx\n",
792 be64_to_cpu(target->ioc_guid));
793 memset(req->priv.initiator_port_id, 0, 8);
794 memcpy(req->priv.initiator_port_id + 8,
795 &target->srp_host->srp_dev->dev->node_guid, 8);
798 status = ib_send_cm_req(ch->cm_id, &req->param);
805 static bool srp_queue_remove_work(struct srp_target_port *target)
807 bool changed = false;
809 spin_lock_irq(&target->lock);
810 if (target->state != SRP_TARGET_REMOVED) {
811 target->state = SRP_TARGET_REMOVED;
814 spin_unlock_irq(&target->lock);
817 queue_work(srp_remove_wq, &target->remove_work);
822 static void srp_disconnect_target(struct srp_target_port *target)
824 struct srp_rdma_ch *ch;
827 /* XXX should send SRP_I_LOGOUT request */
829 for (i = 0; i < target->ch_count; i++) {
831 ch->connected = false;
832 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
833 shost_printk(KERN_DEBUG, target->scsi_host,
834 PFX "Sending CM DREQ failed\n");
839 static void srp_free_req_data(struct srp_target_port *target,
840 struct srp_rdma_ch *ch)
842 struct srp_device *dev = target->srp_host->srp_dev;
843 struct ib_device *ibdev = dev->dev;
844 struct srp_request *req;
850 for (i = 0; i < target->req_ring_size; ++i) {
851 req = &ch->req_ring[i];
852 if (dev->use_fast_reg)
855 kfree(req->fmr_list);
856 kfree(req->map_page);
857 if (req->indirect_dma_addr) {
858 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
859 target->indirect_size,
862 kfree(req->indirect_desc);
869 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
871 struct srp_target_port *target = ch->target;
872 struct srp_device *srp_dev = target->srp_host->srp_dev;
873 struct ib_device *ibdev = srp_dev->dev;
874 struct srp_request *req;
877 int i, ret = -ENOMEM;
879 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
884 for (i = 0; i < target->req_ring_size; ++i) {
885 req = &ch->req_ring[i];
886 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
890 if (srp_dev->use_fast_reg)
891 req->fr_list = mr_list;
893 req->fmr_list = mr_list;
894 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
895 sizeof(void *), GFP_KERNEL);
898 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
899 if (!req->indirect_desc)
902 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
903 target->indirect_size,
905 if (ib_dma_mapping_error(ibdev, dma_addr))
908 req->indirect_dma_addr = dma_addr;
917 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
918 * @shost: SCSI host whose attributes to remove from sysfs.
920 * Note: Any attributes defined in the host template and that did not exist
921 * before invocation of this function will be ignored.
923 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
925 struct device_attribute **attr;
927 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
928 device_remove_file(&shost->shost_dev, *attr);
931 static void srp_remove_target(struct srp_target_port *target)
933 struct srp_rdma_ch *ch;
936 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
938 srp_del_scsi_host_attr(target->scsi_host);
939 srp_rport_get(target->rport);
940 srp_remove_host(target->scsi_host);
941 scsi_remove_host(target->scsi_host);
942 srp_stop_rport_timers(target->rport);
943 srp_disconnect_target(target);
944 for (i = 0; i < target->ch_count; i++) {
946 srp_free_ch_ib(target, ch);
948 cancel_work_sync(&target->tl_err_work);
949 srp_rport_put(target->rport);
950 for (i = 0; i < target->ch_count; i++) {
952 srp_free_req_data(target, ch);
957 spin_lock(&target->srp_host->target_lock);
958 list_del(&target->list);
959 spin_unlock(&target->srp_host->target_lock);
961 scsi_host_put(target->scsi_host);
964 static void srp_remove_work(struct work_struct *work)
966 struct srp_target_port *target =
967 container_of(work, struct srp_target_port, remove_work);
969 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
971 srp_remove_target(target);
974 static void srp_rport_delete(struct srp_rport *rport)
976 struct srp_target_port *target = rport->lld_data;
978 srp_queue_remove_work(target);
982 * srp_connected_ch() - number of connected channels
983 * @target: SRP target port.
985 static int srp_connected_ch(struct srp_target_port *target)
989 for (i = 0; i < target->ch_count; i++)
990 c += target->ch[i].connected;
995 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
997 struct srp_target_port *target = ch->target;
1000 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1002 ret = srp_lookup_path(ch);
1007 init_completion(&ch->done);
1008 ret = srp_send_req(ch, multich);
1011 ret = wait_for_completion_interruptible(&ch->done);
1016 * The CM event handling code will set status to
1017 * SRP_PORT_REDIRECT if we get a port redirect REJ
1018 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1019 * redirect REJ back.
1021 switch (ch->status) {
1023 ch->connected = true;
1026 case SRP_PORT_REDIRECT:
1027 ret = srp_lookup_path(ch);
1032 case SRP_DLID_REDIRECT:
1035 case SRP_STALE_CONN:
1036 shost_printk(KERN_ERR, target->scsi_host, PFX
1037 "giving up on stale connection\n");
1038 ch->status = -ECONNRESET;
1047 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
1049 struct ib_send_wr *bad_wr;
1050 struct ib_send_wr wr = {
1051 .opcode = IB_WR_LOCAL_INV,
1052 .wr_id = LOCAL_INV_WR_ID_MASK,
1056 .ex.invalidate_rkey = rkey,
1059 return ib_post_send(ch->qp, &wr, &bad_wr);
1062 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1063 struct srp_rdma_ch *ch,
1064 struct srp_request *req)
1066 struct srp_target_port *target = ch->target;
1067 struct srp_device *dev = target->srp_host->srp_dev;
1068 struct ib_device *ibdev = dev->dev;
1071 if (!scsi_sglist(scmnd) ||
1072 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1073 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1076 if (dev->use_fast_reg) {
1077 struct srp_fr_desc **pfr;
1079 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1080 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
1082 shost_printk(KERN_ERR, target->scsi_host, PFX
1083 "Queueing INV WR for rkey %#x failed (%d)\n",
1084 (*pfr)->mr->rkey, res);
1085 queue_work(system_long_wq,
1086 &target->tl_err_work);
1090 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1092 } else if (dev->use_fmr) {
1093 struct ib_pool_fmr **pfmr;
1095 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1096 ib_fmr_pool_unmap(*pfmr);
1099 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1100 scmnd->sc_data_direction);
1104 * srp_claim_req - Take ownership of the scmnd associated with a request.
1105 * @ch: SRP RDMA channel.
1106 * @req: SRP request.
1107 * @sdev: If not NULL, only take ownership for this SCSI device.
1108 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1109 * ownership of @req->scmnd if it equals @scmnd.
1112 * Either NULL or a pointer to the SCSI command the caller became owner of.
1114 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1115 struct srp_request *req,
1116 struct scsi_device *sdev,
1117 struct scsi_cmnd *scmnd)
1119 unsigned long flags;
1121 spin_lock_irqsave(&ch->lock, flags);
1123 (!sdev || req->scmnd->device == sdev) &&
1124 (!scmnd || req->scmnd == scmnd)) {
1130 spin_unlock_irqrestore(&ch->lock, flags);
1136 * srp_free_req() - Unmap data and add request to the free request list.
1137 * @ch: SRP RDMA channel.
1138 * @req: Request to be freed.
1139 * @scmnd: SCSI command associated with @req.
1140 * @req_lim_delta: Amount to be added to @target->req_lim.
1142 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1143 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1145 unsigned long flags;
1147 srp_unmap_data(scmnd, ch, req);
1149 spin_lock_irqsave(&ch->lock, flags);
1150 ch->req_lim += req_lim_delta;
1151 spin_unlock_irqrestore(&ch->lock, flags);
1154 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1155 struct scsi_device *sdev, int result)
1157 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1160 srp_free_req(ch, req, scmnd, 0);
1161 scmnd->result = result;
1162 scmnd->scsi_done(scmnd);
1166 static void srp_terminate_io(struct srp_rport *rport)
1168 struct srp_target_port *target = rport->lld_data;
1169 struct srp_rdma_ch *ch;
1170 struct Scsi_Host *shost = target->scsi_host;
1171 struct scsi_device *sdev;
1175 * Invoking srp_terminate_io() while srp_queuecommand() is running
1176 * is not safe. Hence the warning statement below.
1178 shost_for_each_device(sdev, shost)
1179 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1181 for (i = 0; i < target->ch_count; i++) {
1182 ch = &target->ch[i];
1184 for (j = 0; j < target->req_ring_size; ++j) {
1185 struct srp_request *req = &ch->req_ring[j];
1187 srp_finish_req(ch, req, NULL,
1188 DID_TRANSPORT_FAILFAST << 16);
1194 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1195 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1196 * srp_reset_device() or srp_reset_host() calls will occur while this function
1197 * is in progress. One way to realize that is not to call this function
1198 * directly but to call srp_reconnect_rport() instead since that last function
1199 * serializes calls of this function via rport->mutex and also blocks
1200 * srp_queuecommand() calls before invoking this function.
1202 static int srp_rport_reconnect(struct srp_rport *rport)
1204 struct srp_target_port *target = rport->lld_data;
1205 struct srp_rdma_ch *ch;
1207 bool multich = false;
1209 srp_disconnect_target(target);
1211 if (target->state == SRP_TARGET_SCANNING)
1215 * Now get a new local CM ID so that we avoid confusing the target in
1216 * case things are really fouled up. Doing so also ensures that all CM
1217 * callbacks will have finished before a new QP is allocated.
1219 for (i = 0; i < target->ch_count; i++) {
1220 ch = &target->ch[i];
1221 ret += srp_new_cm_id(ch);
1223 for (i = 0; i < target->ch_count; i++) {
1224 ch = &target->ch[i];
1225 for (j = 0; j < target->req_ring_size; ++j) {
1226 struct srp_request *req = &ch->req_ring[j];
1228 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1231 for (i = 0; i < target->ch_count; i++) {
1232 ch = &target->ch[i];
1234 * Whether or not creating a new CM ID succeeded, create a new
1235 * QP. This guarantees that all completion callback function
1236 * invocations have finished before request resetting starts.
1238 ret += srp_create_ch_ib(ch);
1240 INIT_LIST_HEAD(&ch->free_tx);
1241 for (j = 0; j < target->queue_size; ++j)
1242 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1245 target->qp_in_error = false;
1247 for (i = 0; i < target->ch_count; i++) {
1248 ch = &target->ch[i];
1251 ret = srp_connect_ch(ch, multich);
1256 shost_printk(KERN_INFO, target->scsi_host,
1257 PFX "reconnect succeeded\n");
1262 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1263 unsigned int dma_len, u32 rkey)
1265 struct srp_direct_buf *desc = state->desc;
1267 WARN_ON_ONCE(!dma_len);
1269 desc->va = cpu_to_be64(dma_addr);
1270 desc->key = cpu_to_be32(rkey);
1271 desc->len = cpu_to_be32(dma_len);
1273 state->total_len += dma_len;
1278 static int srp_map_finish_fmr(struct srp_map_state *state,
1279 struct srp_rdma_ch *ch)
1281 struct srp_target_port *target = ch->target;
1282 struct srp_device *dev = target->srp_host->srp_dev;
1283 struct ib_pool_fmr *fmr;
1286 if (state->fmr.next >= state->fmr.end)
1289 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1290 state->npages, io_addr);
1292 return PTR_ERR(fmr);
1294 *state->fmr.next++ = fmr;
1297 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1298 state->dma_len, fmr->fmr->rkey);
1303 static int srp_map_finish_fr(struct srp_map_state *state,
1304 struct srp_rdma_ch *ch)
1306 struct srp_target_port *target = ch->target;
1307 struct srp_device *dev = target->srp_host->srp_dev;
1308 struct ib_send_wr *bad_wr;
1309 struct ib_send_wr wr;
1310 struct srp_fr_desc *desc;
1313 if (state->fr.next >= state->fr.end)
1316 desc = srp_fr_pool_get(ch->fr_pool);
1320 rkey = ib_inc_rkey(desc->mr->rkey);
1321 ib_update_fast_reg_key(desc->mr, rkey);
1323 memcpy(desc->frpl->page_list, state->pages,
1324 sizeof(state->pages[0]) * state->npages);
1326 memset(&wr, 0, sizeof(wr));
1327 wr.opcode = IB_WR_FAST_REG_MR;
1328 wr.wr_id = FAST_REG_WR_ID_MASK;
1329 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1330 wr.wr.fast_reg.page_list = desc->frpl;
1331 wr.wr.fast_reg.page_list_len = state->npages;
1332 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1333 wr.wr.fast_reg.length = state->dma_len;
1334 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1335 IB_ACCESS_REMOTE_READ |
1336 IB_ACCESS_REMOTE_WRITE);
1337 wr.wr.fast_reg.rkey = desc->mr->lkey;
1339 *state->fr.next++ = desc;
1342 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1345 return ib_post_send(ch->qp, &wr, &bad_wr);
1348 static int srp_finish_mapping(struct srp_map_state *state,
1349 struct srp_rdma_ch *ch)
1351 struct srp_target_port *target = ch->target;
1352 struct srp_device *dev = target->srp_host->srp_dev;
1355 WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr);
1357 if (state->npages == 0)
1360 if (state->npages == 1 && target->global_mr)
1361 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1362 target->global_mr->rkey);
1364 ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) :
1365 srp_map_finish_fmr(state, ch);
1375 static int srp_map_sg_entry(struct srp_map_state *state,
1376 struct srp_rdma_ch *ch,
1377 struct scatterlist *sg, int sg_index)
1379 struct srp_target_port *target = ch->target;
1380 struct srp_device *dev = target->srp_host->srp_dev;
1381 struct ib_device *ibdev = dev->dev;
1382 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1383 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1384 unsigned int len = 0;
1387 WARN_ON_ONCE(!dma_len);
1390 unsigned offset = dma_addr & ~dev->mr_page_mask;
1391 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1392 ret = srp_finish_mapping(state, ch);
1397 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1400 state->base_dma_addr = dma_addr;
1401 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1402 state->dma_len += len;
1408 * If the last entry of the MR wasn't a full page, then we need to
1409 * close it out and start a new one -- we can only merge at page
1413 if (len != dev->mr_page_size)
1414 ret = srp_finish_mapping(state, ch);
1418 static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1419 struct srp_request *req, struct scatterlist *scat,
1422 struct srp_target_port *target = ch->target;
1423 struct srp_device *dev = target->srp_host->srp_dev;
1424 struct scatterlist *sg;
1427 state->desc = req->indirect_desc;
1428 state->pages = req->map_page;
1429 if (dev->use_fast_reg) {
1430 state->fr.next = req->fr_list;
1431 state->fr.end = req->fr_list + target->cmd_sg_cnt;
1432 } else if (dev->use_fmr) {
1433 state->fmr.next = req->fmr_list;
1434 state->fmr.end = req->fmr_list + target->cmd_sg_cnt;
1437 if (dev->use_fast_reg || dev->use_fmr) {
1438 for_each_sg(scat, sg, count, i) {
1439 ret = srp_map_sg_entry(state, ch, sg, i);
1443 ret = srp_finish_mapping(state, ch);
1447 for_each_sg(scat, sg, count, i) {
1448 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1449 ib_sg_dma_len(dev->dev, sg),
1450 target->global_mr->rkey);
1454 req->nmdesc = state->nmdesc;
1462 * Register the indirect data buffer descriptor with the HCA.
1464 * Note: since the indirect data buffer descriptor has been allocated with
1465 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1468 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1469 void **next_mr, void **end_mr, u32 idb_len,
1472 struct srp_target_port *target = ch->target;
1473 struct srp_device *dev = target->srp_host->srp_dev;
1474 struct srp_map_state state;
1475 struct srp_direct_buf idb_desc;
1479 memset(&state, 0, sizeof(state));
1480 memset(&idb_desc, 0, sizeof(idb_desc));
1481 state.gen.next = next_mr;
1482 state.gen.end = end_mr;
1483 state.desc = &idb_desc;
1484 state.pages = idb_pages;
1485 state.pages[0] = (req->indirect_dma_addr &
1488 state.base_dma_addr = req->indirect_dma_addr;
1489 state.dma_len = idb_len;
1490 ret = srp_finish_mapping(&state, ch);
1494 *idb_rkey = idb_desc.key;
1500 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1501 struct srp_request *req)
1503 struct srp_target_port *target = ch->target;
1504 struct scatterlist *scat;
1505 struct srp_cmd *cmd = req->cmd->buf;
1506 int len, nents, count, ret;
1507 struct srp_device *dev;
1508 struct ib_device *ibdev;
1509 struct srp_map_state state;
1510 struct srp_indirect_buf *indirect_hdr;
1511 u32 idb_len, table_len;
1515 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1516 return sizeof (struct srp_cmd);
1518 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1519 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1520 shost_printk(KERN_WARNING, target->scsi_host,
1521 PFX "Unhandled data direction %d\n",
1522 scmnd->sc_data_direction);
1526 nents = scsi_sg_count(scmnd);
1527 scat = scsi_sglist(scmnd);
1529 dev = target->srp_host->srp_dev;
1532 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1533 if (unlikely(count == 0))
1536 fmt = SRP_DATA_DESC_DIRECT;
1537 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1539 if (count == 1 && target->global_mr) {
1541 * The midlayer only generated a single gather/scatter
1542 * entry, or DMA mapping coalesced everything to a
1543 * single entry. So a direct descriptor along with
1544 * the DMA MR suffices.
1546 struct srp_direct_buf *buf = (void *) cmd->add_data;
1548 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1549 buf->key = cpu_to_be32(target->global_mr->rkey);
1550 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1557 * We have more than one scatter/gather entry, so build our indirect
1558 * descriptor table, trying to merge as many entries as we can.
1560 indirect_hdr = (void *) cmd->add_data;
1562 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1563 target->indirect_size, DMA_TO_DEVICE);
1565 memset(&state, 0, sizeof(state));
1566 srp_map_sg(&state, ch, req, scat, count);
1568 /* We've mapped the request, now pull as much of the indirect
1569 * descriptor table as we can into the command buffer. If this
1570 * target is not using an external indirect table, we are
1571 * guaranteed to fit into the command, as the SCSI layer won't
1572 * give us more S/G entries than we allow.
1574 if (state.ndesc == 1) {
1576 * Memory registration collapsed the sg-list into one entry,
1577 * so use a direct descriptor.
1579 struct srp_direct_buf *buf = (void *) cmd->add_data;
1581 *buf = req->indirect_desc[0];
1585 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1586 !target->allow_ext_sg)) {
1587 shost_printk(KERN_ERR, target->scsi_host,
1588 "Could not fit S/G list into SRP_CMD\n");
1592 count = min(state.ndesc, target->cmd_sg_cnt);
1593 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1594 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1596 fmt = SRP_DATA_DESC_INDIRECT;
1597 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1598 len += count * sizeof (struct srp_direct_buf);
1600 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1601 count * sizeof (struct srp_direct_buf));
1603 if (!target->global_mr) {
1604 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1605 idb_len, &idb_rkey);
1610 idb_rkey = target->global_mr->rkey;
1613 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1614 indirect_hdr->table_desc.key = idb_rkey;
1615 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1616 indirect_hdr->len = cpu_to_be32(state.total_len);
1618 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1619 cmd->data_out_desc_cnt = count;
1621 cmd->data_in_desc_cnt = count;
1623 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1627 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1628 cmd->buf_fmt = fmt << 4;
1636 * Return an IU and possible credit to the free pool
1638 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1639 enum srp_iu_type iu_type)
1641 unsigned long flags;
1643 spin_lock_irqsave(&ch->lock, flags);
1644 list_add(&iu->list, &ch->free_tx);
1645 if (iu_type != SRP_IU_RSP)
1647 spin_unlock_irqrestore(&ch->lock, flags);
1651 * Must be called with ch->lock held to protect req_lim and free_tx.
1652 * If IU is not sent, it must be returned using srp_put_tx_iu().
1655 * An upper limit for the number of allocated information units for each
1657 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1658 * more than Scsi_Host.can_queue requests.
1659 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1660 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1661 * one unanswered SRP request to an initiator.
1663 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1664 enum srp_iu_type iu_type)
1666 struct srp_target_port *target = ch->target;
1667 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1670 srp_send_completion(ch->send_cq, ch);
1672 if (list_empty(&ch->free_tx))
1675 /* Initiator responses to target requests do not consume credits */
1676 if (iu_type != SRP_IU_RSP) {
1677 if (ch->req_lim <= rsv) {
1678 ++target->zero_req_lim;
1685 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1686 list_del(&iu->list);
1690 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1692 struct srp_target_port *target = ch->target;
1694 struct ib_send_wr wr, *bad_wr;
1696 list.addr = iu->dma;
1698 list.lkey = target->lkey;
1701 wr.wr_id = (uintptr_t) iu;
1704 wr.opcode = IB_WR_SEND;
1705 wr.send_flags = IB_SEND_SIGNALED;
1707 return ib_post_send(ch->qp, &wr, &bad_wr);
1710 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1712 struct srp_target_port *target = ch->target;
1713 struct ib_recv_wr wr, *bad_wr;
1716 list.addr = iu->dma;
1717 list.length = iu->size;
1718 list.lkey = target->lkey;
1721 wr.wr_id = (uintptr_t) iu;
1725 return ib_post_recv(ch->qp, &wr, &bad_wr);
1728 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1730 struct srp_target_port *target = ch->target;
1731 struct srp_request *req;
1732 struct scsi_cmnd *scmnd;
1733 unsigned long flags;
1735 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1736 spin_lock_irqsave(&ch->lock, flags);
1737 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1738 spin_unlock_irqrestore(&ch->lock, flags);
1740 ch->tsk_mgmt_status = -1;
1741 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1742 ch->tsk_mgmt_status = rsp->data[3];
1743 complete(&ch->tsk_mgmt_done);
1745 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1747 req = (void *)scmnd->host_scribble;
1748 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1751 shost_printk(KERN_ERR, target->scsi_host,
1752 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1753 rsp->tag, ch - target->ch, ch->qp->qp_num);
1755 spin_lock_irqsave(&ch->lock, flags);
1756 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1757 spin_unlock_irqrestore(&ch->lock, flags);
1761 scmnd->result = rsp->status;
1763 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1764 memcpy(scmnd->sense_buffer, rsp->data +
1765 be32_to_cpu(rsp->resp_data_len),
1766 min_t(int, be32_to_cpu(rsp->sense_data_len),
1767 SCSI_SENSE_BUFFERSIZE));
1770 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1771 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1772 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1773 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1774 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1775 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1776 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1777 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1779 srp_free_req(ch, req, scmnd,
1780 be32_to_cpu(rsp->req_lim_delta));
1782 scmnd->host_scribble = NULL;
1783 scmnd->scsi_done(scmnd);
1787 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1790 struct srp_target_port *target = ch->target;
1791 struct ib_device *dev = target->srp_host->srp_dev->dev;
1792 unsigned long flags;
1796 spin_lock_irqsave(&ch->lock, flags);
1797 ch->req_lim += req_delta;
1798 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1799 spin_unlock_irqrestore(&ch->lock, flags);
1802 shost_printk(KERN_ERR, target->scsi_host, PFX
1803 "no IU available to send response\n");
1807 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1808 memcpy(iu->buf, rsp, len);
1809 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1811 err = srp_post_send(ch, iu, len);
1813 shost_printk(KERN_ERR, target->scsi_host, PFX
1814 "unable to post response: %d\n", err);
1815 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1821 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1822 struct srp_cred_req *req)
1824 struct srp_cred_rsp rsp = {
1825 .opcode = SRP_CRED_RSP,
1828 s32 delta = be32_to_cpu(req->req_lim_delta);
1830 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1831 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1832 "problems processing SRP_CRED_REQ\n");
1835 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1836 struct srp_aer_req *req)
1838 struct srp_target_port *target = ch->target;
1839 struct srp_aer_rsp rsp = {
1840 .opcode = SRP_AER_RSP,
1843 s32 delta = be32_to_cpu(req->req_lim_delta);
1845 shost_printk(KERN_ERR, target->scsi_host, PFX
1846 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1848 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1849 shost_printk(KERN_ERR, target->scsi_host, PFX
1850 "problems processing SRP_AER_REQ\n");
1853 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1855 struct srp_target_port *target = ch->target;
1856 struct ib_device *dev = target->srp_host->srp_dev->dev;
1857 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1861 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1864 opcode = *(u8 *) iu->buf;
1867 shost_printk(KERN_ERR, target->scsi_host,
1868 PFX "recv completion, opcode 0x%02x\n", opcode);
1869 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1870 iu->buf, wc->byte_len, true);
1875 srp_process_rsp(ch, iu->buf);
1879 srp_process_cred_req(ch, iu->buf);
1883 srp_process_aer_req(ch, iu->buf);
1887 /* XXX Handle target logout */
1888 shost_printk(KERN_WARNING, target->scsi_host,
1889 PFX "Got target logout request\n");
1893 shost_printk(KERN_WARNING, target->scsi_host,
1894 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1898 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1901 res = srp_post_recv(ch, iu);
1903 shost_printk(KERN_ERR, target->scsi_host,
1904 PFX "Recv failed with error code %d\n", res);
1908 * srp_tl_err_work() - handle a transport layer error
1909 * @work: Work structure embedded in an SRP target port.
1911 * Note: This function may get invoked before the rport has been created,
1912 * hence the target->rport test.
1914 static void srp_tl_err_work(struct work_struct *work)
1916 struct srp_target_port *target;
1918 target = container_of(work, struct srp_target_port, tl_err_work);
1920 srp_start_tl_fail_timers(target->rport);
1923 static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1924 bool send_err, struct srp_rdma_ch *ch)
1926 struct srp_target_port *target = ch->target;
1928 if (wr_id == SRP_LAST_WR_ID) {
1929 complete(&ch->done);
1933 if (ch->connected && !target->qp_in_error) {
1934 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1935 shost_printk(KERN_ERR, target->scsi_host, PFX
1936 "LOCAL_INV failed with status %s (%d)\n",
1937 ib_wc_status_msg(wc_status), wc_status);
1938 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1939 shost_printk(KERN_ERR, target->scsi_host, PFX
1940 "FAST_REG_MR failed status %s (%d)\n",
1941 ib_wc_status_msg(wc_status), wc_status);
1943 shost_printk(KERN_ERR, target->scsi_host,
1944 PFX "failed %s status %s (%d) for iu %p\n",
1945 send_err ? "send" : "receive",
1946 ib_wc_status_msg(wc_status), wc_status,
1947 (void *)(uintptr_t)wr_id);
1949 queue_work(system_long_wq, &target->tl_err_work);
1951 target->qp_in_error = true;
1954 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1956 struct srp_rdma_ch *ch = ch_ptr;
1959 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1960 while (ib_poll_cq(cq, 1, &wc) > 0) {
1961 if (likely(wc.status == IB_WC_SUCCESS)) {
1962 srp_handle_recv(ch, &wc);
1964 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1969 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1971 struct srp_rdma_ch *ch = ch_ptr;
1975 while (ib_poll_cq(cq, 1, &wc) > 0) {
1976 if (likely(wc.status == IB_WC_SUCCESS)) {
1977 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1978 list_add(&iu->list, &ch->free_tx);
1980 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1985 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1987 struct srp_target_port *target = host_to_target(shost);
1988 struct srp_rport *rport = target->rport;
1989 struct srp_rdma_ch *ch;
1990 struct srp_request *req;
1992 struct srp_cmd *cmd;
1993 struct ib_device *dev;
1994 unsigned long flags;
1998 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2001 * The SCSI EH thread is the only context from which srp_queuecommand()
2002 * can get invoked for blocked devices (SDEV_BLOCK /
2003 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2004 * locking the rport mutex if invoked from inside the SCSI EH.
2007 mutex_lock(&rport->mutex);
2009 scmnd->result = srp_chkready(target->rport);
2010 if (unlikely(scmnd->result))
2013 WARN_ON_ONCE(scmnd->request->tag < 0);
2014 tag = blk_mq_unique_tag(scmnd->request);
2015 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2016 idx = blk_mq_unique_tag_to_tag(tag);
2017 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2018 dev_name(&shost->shost_gendev), tag, idx,
2019 target->req_ring_size);
2021 spin_lock_irqsave(&ch->lock, flags);
2022 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2023 spin_unlock_irqrestore(&ch->lock, flags);
2028 req = &ch->req_ring[idx];
2029 dev = target->srp_host->srp_dev->dev;
2030 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2033 scmnd->host_scribble = (void *) req;
2036 memset(cmd, 0, sizeof *cmd);
2038 cmd->opcode = SRP_CMD;
2039 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2041 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2046 len = srp_map_data(scmnd, ch, req);
2048 shost_printk(KERN_ERR, target->scsi_host,
2049 PFX "Failed to map data (%d)\n", len);
2051 * If we ran out of memory descriptors (-ENOMEM) because an
2052 * application is queuing many requests with more than
2053 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2054 * to reduce queue depth temporarily.
2056 scmnd->result = len == -ENOMEM ?
2057 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2061 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2064 if (srp_post_send(ch, iu, len)) {
2065 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2073 mutex_unlock(&rport->mutex);
2078 srp_unmap_data(scmnd, ch, req);
2081 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2084 * Avoid that the loops that iterate over the request ring can
2085 * encounter a dangling SCSI command pointer.
2090 if (scmnd->result) {
2091 scmnd->scsi_done(scmnd);
2094 ret = SCSI_MLQUEUE_HOST_BUSY;
2101 * Note: the resources allocated in this function are freed in
2104 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2106 struct srp_target_port *target = ch->target;
2109 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2113 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2118 for (i = 0; i < target->queue_size; ++i) {
2119 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2121 GFP_KERNEL, DMA_FROM_DEVICE);
2122 if (!ch->rx_ring[i])
2126 for (i = 0; i < target->queue_size; ++i) {
2127 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2129 GFP_KERNEL, DMA_TO_DEVICE);
2130 if (!ch->tx_ring[i])
2133 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2139 for (i = 0; i < target->queue_size; ++i) {
2140 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2141 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2154 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2156 uint64_t T_tr_ns, max_compl_time_ms;
2157 uint32_t rq_tmo_jiffies;
2160 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2161 * table 91), both the QP timeout and the retry count have to be set
2162 * for RC QP's during the RTR to RTS transition.
2164 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2165 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2168 * Set target->rq_tmo_jiffies to one second more than the largest time
2169 * it can take before an error completion is generated. See also
2170 * C9-140..142 in the IBTA spec for more information about how to
2171 * convert the QP Local ACK Timeout value to nanoseconds.
2173 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2174 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2175 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2176 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2178 return rq_tmo_jiffies;
2181 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2182 const struct srp_login_rsp *lrsp,
2183 struct srp_rdma_ch *ch)
2185 struct srp_target_port *target = ch->target;
2186 struct ib_qp_attr *qp_attr = NULL;
2191 if (lrsp->opcode == SRP_LOGIN_RSP) {
2192 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2193 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2196 * Reserve credits for task management so we don't
2197 * bounce requests back to the SCSI mid-layer.
2199 target->scsi_host->can_queue
2200 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2201 target->scsi_host->can_queue);
2202 target->scsi_host->cmd_per_lun
2203 = min_t(int, target->scsi_host->can_queue,
2204 target->scsi_host->cmd_per_lun);
2206 shost_printk(KERN_WARNING, target->scsi_host,
2207 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2213 ret = srp_alloc_iu_bufs(ch);
2219 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2223 qp_attr->qp_state = IB_QPS_RTR;
2224 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2228 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2232 for (i = 0; i < target->queue_size; i++) {
2233 struct srp_iu *iu = ch->rx_ring[i];
2235 ret = srp_post_recv(ch, iu);
2240 qp_attr->qp_state = IB_QPS_RTS;
2241 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2245 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2247 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2251 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2260 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2261 struct ib_cm_event *event,
2262 struct srp_rdma_ch *ch)
2264 struct srp_target_port *target = ch->target;
2265 struct Scsi_Host *shost = target->scsi_host;
2266 struct ib_class_port_info *cpi;
2269 switch (event->param.rej_rcvd.reason) {
2270 case IB_CM_REJ_PORT_CM_REDIRECT:
2271 cpi = event->param.rej_rcvd.ari;
2272 ch->path.dlid = cpi->redirect_lid;
2273 ch->path.pkey = cpi->redirect_pkey;
2274 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2275 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2277 ch->status = ch->path.dlid ?
2278 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2281 case IB_CM_REJ_PORT_REDIRECT:
2282 if (srp_target_is_topspin(target)) {
2284 * Topspin/Cisco SRP gateways incorrectly send
2285 * reject reason code 25 when they mean 24
2288 memcpy(ch->path.dgid.raw,
2289 event->param.rej_rcvd.ari, 16);
2291 shost_printk(KERN_DEBUG, shost,
2292 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2293 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2294 be64_to_cpu(ch->path.dgid.global.interface_id));
2296 ch->status = SRP_PORT_REDIRECT;
2298 shost_printk(KERN_WARNING, shost,
2299 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2300 ch->status = -ECONNRESET;
2304 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2305 shost_printk(KERN_WARNING, shost,
2306 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2307 ch->status = -ECONNRESET;
2310 case IB_CM_REJ_CONSUMER_DEFINED:
2311 opcode = *(u8 *) event->private_data;
2312 if (opcode == SRP_LOGIN_REJ) {
2313 struct srp_login_rej *rej = event->private_data;
2314 u32 reason = be32_to_cpu(rej->reason);
2316 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2317 shost_printk(KERN_WARNING, shost,
2318 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2320 shost_printk(KERN_WARNING, shost, PFX
2321 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2323 target->orig_dgid.raw, reason);
2325 shost_printk(KERN_WARNING, shost,
2326 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2327 " opcode 0x%02x\n", opcode);
2328 ch->status = -ECONNRESET;
2331 case IB_CM_REJ_STALE_CONN:
2332 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2333 ch->status = SRP_STALE_CONN;
2337 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2338 event->param.rej_rcvd.reason);
2339 ch->status = -ECONNRESET;
2343 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2345 struct srp_rdma_ch *ch = cm_id->context;
2346 struct srp_target_port *target = ch->target;
2349 switch (event->event) {
2350 case IB_CM_REQ_ERROR:
2351 shost_printk(KERN_DEBUG, target->scsi_host,
2352 PFX "Sending CM REQ failed\n");
2354 ch->status = -ECONNRESET;
2357 case IB_CM_REP_RECEIVED:
2359 srp_cm_rep_handler(cm_id, event->private_data, ch);
2362 case IB_CM_REJ_RECEIVED:
2363 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2366 srp_cm_rej_handler(cm_id, event, ch);
2369 case IB_CM_DREQ_RECEIVED:
2370 shost_printk(KERN_WARNING, target->scsi_host,
2371 PFX "DREQ received - connection closed\n");
2372 ch->connected = false;
2373 if (ib_send_cm_drep(cm_id, NULL, 0))
2374 shost_printk(KERN_ERR, target->scsi_host,
2375 PFX "Sending CM DREP failed\n");
2376 queue_work(system_long_wq, &target->tl_err_work);
2379 case IB_CM_TIMEWAIT_EXIT:
2380 shost_printk(KERN_ERR, target->scsi_host,
2381 PFX "connection closed\n");
2387 case IB_CM_MRA_RECEIVED:
2388 case IB_CM_DREQ_ERROR:
2389 case IB_CM_DREP_RECEIVED:
2393 shost_printk(KERN_WARNING, target->scsi_host,
2394 PFX "Unhandled CM event %d\n", event->event);
2399 complete(&ch->done);
2405 * srp_change_queue_depth - setting device queue depth
2406 * @sdev: scsi device struct
2407 * @qdepth: requested queue depth
2409 * Returns queue depth.
2412 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2414 if (!sdev->tagged_supported)
2416 return scsi_change_queue_depth(sdev, qdepth);
2419 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2422 struct srp_target_port *target = ch->target;
2423 struct srp_rport *rport = target->rport;
2424 struct ib_device *dev = target->srp_host->srp_dev->dev;
2426 struct srp_tsk_mgmt *tsk_mgmt;
2428 if (!ch->connected || target->qp_in_error)
2431 init_completion(&ch->tsk_mgmt_done);
2434 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2435 * invoked while a task management function is being sent.
2437 mutex_lock(&rport->mutex);
2438 spin_lock_irq(&ch->lock);
2439 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2440 spin_unlock_irq(&ch->lock);
2443 mutex_unlock(&rport->mutex);
2448 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2451 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2453 tsk_mgmt->opcode = SRP_TSK_MGMT;
2454 int_to_scsilun(lun, &tsk_mgmt->lun);
2455 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
2456 tsk_mgmt->tsk_mgmt_func = func;
2457 tsk_mgmt->task_tag = req_tag;
2459 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2461 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2462 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2463 mutex_unlock(&rport->mutex);
2467 mutex_unlock(&rport->mutex);
2469 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2470 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2476 static int srp_abort(struct scsi_cmnd *scmnd)
2478 struct srp_target_port *target = host_to_target(scmnd->device->host);
2479 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2482 struct srp_rdma_ch *ch;
2485 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2489 tag = blk_mq_unique_tag(scmnd->request);
2490 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2491 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2493 ch = &target->ch[ch_idx];
2494 if (!srp_claim_req(ch, req, NULL, scmnd))
2496 shost_printk(KERN_ERR, target->scsi_host,
2497 "Sending SRP abort for tag %#x\n", tag);
2498 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2499 SRP_TSK_ABORT_TASK) == 0)
2501 else if (target->rport->state == SRP_RPORT_LOST)
2505 srp_free_req(ch, req, scmnd, 0);
2506 scmnd->result = DID_ABORT << 16;
2507 scmnd->scsi_done(scmnd);
2512 static int srp_reset_device(struct scsi_cmnd *scmnd)
2514 struct srp_target_port *target = host_to_target(scmnd->device->host);
2515 struct srp_rdma_ch *ch;
2518 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2520 ch = &target->ch[0];
2521 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2524 if (ch->tsk_mgmt_status)
2527 for (i = 0; i < target->ch_count; i++) {
2528 ch = &target->ch[i];
2529 for (i = 0; i < target->req_ring_size; ++i) {
2530 struct srp_request *req = &ch->req_ring[i];
2532 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2539 static int srp_reset_host(struct scsi_cmnd *scmnd)
2541 struct srp_target_port *target = host_to_target(scmnd->device->host);
2543 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2545 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2548 static int srp_slave_configure(struct scsi_device *sdev)
2550 struct Scsi_Host *shost = sdev->host;
2551 struct srp_target_port *target = host_to_target(shost);
2552 struct request_queue *q = sdev->request_queue;
2553 unsigned long timeout;
2555 if (sdev->type == TYPE_DISK) {
2556 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2557 blk_queue_rq_timeout(q, timeout);
2563 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2566 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2568 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2571 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2574 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2576 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2579 static ssize_t show_service_id(struct device *dev,
2580 struct device_attribute *attr, char *buf)
2582 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2584 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2587 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2590 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2592 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2595 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2598 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2600 return sprintf(buf, "%pI6\n", target->sgid.raw);
2603 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2606 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2607 struct srp_rdma_ch *ch = &target->ch[0];
2609 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2612 static ssize_t show_orig_dgid(struct device *dev,
2613 struct device_attribute *attr, char *buf)
2615 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2617 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2620 static ssize_t show_req_lim(struct device *dev,
2621 struct device_attribute *attr, char *buf)
2623 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2624 struct srp_rdma_ch *ch;
2625 int i, req_lim = INT_MAX;
2627 for (i = 0; i < target->ch_count; i++) {
2628 ch = &target->ch[i];
2629 req_lim = min(req_lim, ch->req_lim);
2631 return sprintf(buf, "%d\n", req_lim);
2634 static ssize_t show_zero_req_lim(struct device *dev,
2635 struct device_attribute *attr, char *buf)
2637 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2639 return sprintf(buf, "%d\n", target->zero_req_lim);
2642 static ssize_t show_local_ib_port(struct device *dev,
2643 struct device_attribute *attr, char *buf)
2645 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2647 return sprintf(buf, "%d\n", target->srp_host->port);
2650 static ssize_t show_local_ib_device(struct device *dev,
2651 struct device_attribute *attr, char *buf)
2653 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2655 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2658 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2661 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2663 return sprintf(buf, "%d\n", target->ch_count);
2666 static ssize_t show_comp_vector(struct device *dev,
2667 struct device_attribute *attr, char *buf)
2669 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2671 return sprintf(buf, "%d\n", target->comp_vector);
2674 static ssize_t show_tl_retry_count(struct device *dev,
2675 struct device_attribute *attr, char *buf)
2677 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2679 return sprintf(buf, "%d\n", target->tl_retry_count);
2682 static ssize_t show_cmd_sg_entries(struct device *dev,
2683 struct device_attribute *attr, char *buf)
2685 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2687 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2690 static ssize_t show_allow_ext_sg(struct device *dev,
2691 struct device_attribute *attr, char *buf)
2693 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2695 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2698 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2699 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2700 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2701 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2702 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2703 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2704 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2705 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2706 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2707 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2708 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2709 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2710 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2711 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2712 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2713 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2715 static struct device_attribute *srp_host_attrs[] = {
2718 &dev_attr_service_id,
2722 &dev_attr_orig_dgid,
2724 &dev_attr_zero_req_lim,
2725 &dev_attr_local_ib_port,
2726 &dev_attr_local_ib_device,
2728 &dev_attr_comp_vector,
2729 &dev_attr_tl_retry_count,
2730 &dev_attr_cmd_sg_entries,
2731 &dev_attr_allow_ext_sg,
2735 static struct scsi_host_template srp_template = {
2736 .module = THIS_MODULE,
2737 .name = "InfiniBand SRP initiator",
2738 .proc_name = DRV_NAME,
2739 .slave_configure = srp_slave_configure,
2740 .info = srp_target_info,
2741 .queuecommand = srp_queuecommand,
2742 .change_queue_depth = srp_change_queue_depth,
2743 .eh_abort_handler = srp_abort,
2744 .eh_device_reset_handler = srp_reset_device,
2745 .eh_host_reset_handler = srp_reset_host,
2746 .skip_settle_delay = true,
2747 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2748 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2750 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2751 .use_clustering = ENABLE_CLUSTERING,
2752 .shost_attrs = srp_host_attrs,
2754 .track_queue_depth = 1,
2757 static int srp_sdev_count(struct Scsi_Host *host)
2759 struct scsi_device *sdev;
2762 shost_for_each_device(sdev, host)
2770 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2771 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2772 * removal has been scheduled.
2773 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2775 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2777 struct srp_rport_identifiers ids;
2778 struct srp_rport *rport;
2780 target->state = SRP_TARGET_SCANNING;
2781 sprintf(target->target_name, "SRP.T10:%016llX",
2782 be64_to_cpu(target->id_ext));
2784 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2787 memcpy(ids.port_id, &target->id_ext, 8);
2788 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2789 ids.roles = SRP_RPORT_ROLE_TARGET;
2790 rport = srp_rport_add(target->scsi_host, &ids);
2791 if (IS_ERR(rport)) {
2792 scsi_remove_host(target->scsi_host);
2793 return PTR_ERR(rport);
2796 rport->lld_data = target;
2797 target->rport = rport;
2799 spin_lock(&host->target_lock);
2800 list_add_tail(&target->list, &host->target_list);
2801 spin_unlock(&host->target_lock);
2803 scsi_scan_target(&target->scsi_host->shost_gendev,
2804 0, target->scsi_id, SCAN_WILD_CARD, 0);
2806 if (srp_connected_ch(target) < target->ch_count ||
2807 target->qp_in_error) {
2808 shost_printk(KERN_INFO, target->scsi_host,
2809 PFX "SCSI scan failed - removing SCSI host\n");
2810 srp_queue_remove_work(target);
2814 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2815 dev_name(&target->scsi_host->shost_gendev),
2816 srp_sdev_count(target->scsi_host));
2818 spin_lock_irq(&target->lock);
2819 if (target->state == SRP_TARGET_SCANNING)
2820 target->state = SRP_TARGET_LIVE;
2821 spin_unlock_irq(&target->lock);
2827 static void srp_release_dev(struct device *dev)
2829 struct srp_host *host =
2830 container_of(dev, struct srp_host, dev);
2832 complete(&host->released);
2835 static struct class srp_class = {
2836 .name = "infiniband_srp",
2837 .dev_release = srp_release_dev
2841 * srp_conn_unique() - check whether the connection to a target is unique
2843 * @target: SRP target port.
2845 static bool srp_conn_unique(struct srp_host *host,
2846 struct srp_target_port *target)
2848 struct srp_target_port *t;
2851 if (target->state == SRP_TARGET_REMOVED)
2856 spin_lock(&host->target_lock);
2857 list_for_each_entry(t, &host->target_list, list) {
2859 target->id_ext == t->id_ext &&
2860 target->ioc_guid == t->ioc_guid &&
2861 target->initiator_ext == t->initiator_ext) {
2866 spin_unlock(&host->target_lock);
2873 * Target ports are added by writing
2875 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2876 * pkey=<P_Key>,service_id=<service ID>
2878 * to the add_target sysfs attribute.
2882 SRP_OPT_ID_EXT = 1 << 0,
2883 SRP_OPT_IOC_GUID = 1 << 1,
2884 SRP_OPT_DGID = 1 << 2,
2885 SRP_OPT_PKEY = 1 << 3,
2886 SRP_OPT_SERVICE_ID = 1 << 4,
2887 SRP_OPT_MAX_SECT = 1 << 5,
2888 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2889 SRP_OPT_IO_CLASS = 1 << 7,
2890 SRP_OPT_INITIATOR_EXT = 1 << 8,
2891 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2892 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2893 SRP_OPT_SG_TABLESIZE = 1 << 11,
2894 SRP_OPT_COMP_VECTOR = 1 << 12,
2895 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2896 SRP_OPT_QUEUE_SIZE = 1 << 14,
2897 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2901 SRP_OPT_SERVICE_ID),
2904 static const match_table_t srp_opt_tokens = {
2905 { SRP_OPT_ID_EXT, "id_ext=%s" },
2906 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2907 { SRP_OPT_DGID, "dgid=%s" },
2908 { SRP_OPT_PKEY, "pkey=%x" },
2909 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2910 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2911 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2912 { SRP_OPT_IO_CLASS, "io_class=%x" },
2913 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2914 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2915 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2916 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2917 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2918 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2919 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2920 { SRP_OPT_ERR, NULL }
2923 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2925 char *options, *sep_opt;
2928 substring_t args[MAX_OPT_ARGS];
2934 options = kstrdup(buf, GFP_KERNEL);
2939 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2943 token = match_token(p, srp_opt_tokens, args);
2947 case SRP_OPT_ID_EXT:
2948 p = match_strdup(args);
2953 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2957 case SRP_OPT_IOC_GUID:
2958 p = match_strdup(args);
2963 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2968 p = match_strdup(args);
2973 if (strlen(p) != 32) {
2974 pr_warn("bad dest GID parameter '%s'\n", p);
2979 for (i = 0; i < 16; ++i) {
2980 strlcpy(dgid, p + i * 2, sizeof(dgid));
2981 if (sscanf(dgid, "%hhx",
2982 &target->orig_dgid.raw[i]) < 1) {
2992 if (match_hex(args, &token)) {
2993 pr_warn("bad P_Key parameter '%s'\n", p);
2996 target->pkey = cpu_to_be16(token);
2999 case SRP_OPT_SERVICE_ID:
3000 p = match_strdup(args);
3005 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3009 case SRP_OPT_MAX_SECT:
3010 if (match_int(args, &token)) {
3011 pr_warn("bad max sect parameter '%s'\n", p);
3014 target->scsi_host->max_sectors = token;
3017 case SRP_OPT_QUEUE_SIZE:
3018 if (match_int(args, &token) || token < 1) {
3019 pr_warn("bad queue_size parameter '%s'\n", p);
3022 target->scsi_host->can_queue = token;
3023 target->queue_size = token + SRP_RSP_SQ_SIZE +
3024 SRP_TSK_MGMT_SQ_SIZE;
3025 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3026 target->scsi_host->cmd_per_lun = token;
3029 case SRP_OPT_MAX_CMD_PER_LUN:
3030 if (match_int(args, &token) || token < 1) {
3031 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3035 target->scsi_host->cmd_per_lun = token;
3038 case SRP_OPT_IO_CLASS:
3039 if (match_hex(args, &token)) {
3040 pr_warn("bad IO class parameter '%s'\n", p);
3043 if (token != SRP_REV10_IB_IO_CLASS &&
3044 token != SRP_REV16A_IB_IO_CLASS) {
3045 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3046 token, SRP_REV10_IB_IO_CLASS,
3047 SRP_REV16A_IB_IO_CLASS);
3050 target->io_class = token;
3053 case SRP_OPT_INITIATOR_EXT:
3054 p = match_strdup(args);
3059 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3063 case SRP_OPT_CMD_SG_ENTRIES:
3064 if (match_int(args, &token) || token < 1 || token > 255) {
3065 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3069 target->cmd_sg_cnt = token;
3072 case SRP_OPT_ALLOW_EXT_SG:
3073 if (match_int(args, &token)) {
3074 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3077 target->allow_ext_sg = !!token;
3080 case SRP_OPT_SG_TABLESIZE:
3081 if (match_int(args, &token) || token < 1 ||
3082 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3083 pr_warn("bad max sg_tablesize parameter '%s'\n",
3087 target->sg_tablesize = token;
3090 case SRP_OPT_COMP_VECTOR:
3091 if (match_int(args, &token) || token < 0) {
3092 pr_warn("bad comp_vector parameter '%s'\n", p);
3095 target->comp_vector = token;
3098 case SRP_OPT_TL_RETRY_COUNT:
3099 if (match_int(args, &token) || token < 2 || token > 7) {
3100 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3104 target->tl_retry_count = token;
3108 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3114 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3117 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3118 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3119 !(srp_opt_tokens[i].token & opt_mask))
3120 pr_warn("target creation request is missing parameter '%s'\n",
3121 srp_opt_tokens[i].pattern);
3123 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3124 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3125 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3126 target->scsi_host->cmd_per_lun,
3127 target->scsi_host->can_queue);
3134 static ssize_t srp_create_target(struct device *dev,
3135 struct device_attribute *attr,
3136 const char *buf, size_t count)
3138 struct srp_host *host =
3139 container_of(dev, struct srp_host, dev);
3140 struct Scsi_Host *target_host;
3141 struct srp_target_port *target;
3142 struct srp_rdma_ch *ch;
3143 struct srp_device *srp_dev = host->srp_dev;
3144 struct ib_device *ibdev = srp_dev->dev;
3145 int ret, node_idx, node, cpu, i;
3146 bool multich = false;
3148 target_host = scsi_host_alloc(&srp_template,
3149 sizeof (struct srp_target_port));
3153 target_host->transportt = ib_srp_transport_template;
3154 target_host->max_channel = 0;
3155 target_host->max_id = 1;
3156 target_host->max_lun = -1LL;
3157 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3159 target = host_to_target(target_host);
3161 target->io_class = SRP_REV16A_IB_IO_CLASS;
3162 target->scsi_host = target_host;
3163 target->srp_host = host;
3164 target->lkey = host->srp_dev->pd->local_dma_lkey;
3165 target->global_mr = host->srp_dev->global_mr;
3166 target->cmd_sg_cnt = cmd_sg_entries;
3167 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3168 target->allow_ext_sg = allow_ext_sg;
3169 target->tl_retry_count = 7;
3170 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3173 * Avoid that the SCSI host can be removed by srp_remove_target()
3174 * before this function returns.
3176 scsi_host_get(target->scsi_host);
3178 mutex_lock(&host->add_target_mutex);
3180 ret = srp_parse_options(buf, target);
3184 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3188 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3190 if (!srp_conn_unique(target->srp_host, target)) {
3191 shost_printk(KERN_INFO, target->scsi_host,
3192 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3193 be64_to_cpu(target->id_ext),
3194 be64_to_cpu(target->ioc_guid),
3195 be64_to_cpu(target->initiator_ext));
3200 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3201 target->cmd_sg_cnt < target->sg_tablesize) {
3202 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3203 target->sg_tablesize = target->cmd_sg_cnt;
3206 target_host->sg_tablesize = target->sg_tablesize;
3207 target->indirect_size = target->sg_tablesize *
3208 sizeof (struct srp_direct_buf);
3209 target->max_iu_len = sizeof (struct srp_cmd) +
3210 sizeof (struct srp_indirect_buf) +
3211 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3213 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3214 INIT_WORK(&target->remove_work, srp_remove_work);
3215 spin_lock_init(&target->lock);
3216 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
3221 target->ch_count = max_t(unsigned, num_online_nodes(),
3223 min(4 * num_online_nodes(),
3224 ibdev->num_comp_vectors),
3225 num_online_cpus()));
3226 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3232 for_each_online_node(node) {
3233 const int ch_start = (node_idx * target->ch_count /
3234 num_online_nodes());
3235 const int ch_end = ((node_idx + 1) * target->ch_count /
3236 num_online_nodes());
3237 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3238 num_online_nodes() + target->comp_vector)
3239 % ibdev->num_comp_vectors;
3240 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3241 num_online_nodes() + target->comp_vector)
3242 % ibdev->num_comp_vectors;
3245 for_each_online_cpu(cpu) {
3246 if (cpu_to_node(cpu) != node)
3248 if (ch_start + cpu_idx >= ch_end)
3250 ch = &target->ch[ch_start + cpu_idx];
3251 ch->target = target;
3252 ch->comp_vector = cv_start == cv_end ? cv_start :
3253 cv_start + cpu_idx % (cv_end - cv_start);
3254 spin_lock_init(&ch->lock);
3255 INIT_LIST_HEAD(&ch->free_tx);
3256 ret = srp_new_cm_id(ch);
3258 goto err_disconnect;
3260 ret = srp_create_ch_ib(ch);
3262 goto err_disconnect;
3264 ret = srp_alloc_req_data(ch);
3266 goto err_disconnect;
3268 ret = srp_connect_ch(ch, multich);
3270 shost_printk(KERN_ERR, target->scsi_host,
3271 PFX "Connection %d/%d failed\n",
3274 if (node_idx == 0 && cpu_idx == 0) {
3275 goto err_disconnect;
3277 srp_free_ch_ib(target, ch);
3278 srp_free_req_data(target, ch);
3279 target->ch_count = ch - target->ch;
3291 target->scsi_host->nr_hw_queues = target->ch_count;
3293 ret = srp_add_target(host, target);
3295 goto err_disconnect;
3297 if (target->state != SRP_TARGET_REMOVED) {
3298 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3299 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3300 be64_to_cpu(target->id_ext),
3301 be64_to_cpu(target->ioc_guid),
3302 be16_to_cpu(target->pkey),
3303 be64_to_cpu(target->service_id),
3304 target->sgid.raw, target->orig_dgid.raw);
3310 mutex_unlock(&host->add_target_mutex);
3312 scsi_host_put(target->scsi_host);
3314 scsi_host_put(target->scsi_host);
3319 srp_disconnect_target(target);
3321 for (i = 0; i < target->ch_count; i++) {
3322 ch = &target->ch[i];
3323 srp_free_ch_ib(target, ch);
3324 srp_free_req_data(target, ch);
3331 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3333 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3336 struct srp_host *host = container_of(dev, struct srp_host, dev);
3338 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3341 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3343 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3346 struct srp_host *host = container_of(dev, struct srp_host, dev);
3348 return sprintf(buf, "%d\n", host->port);
3351 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3353 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3355 struct srp_host *host;
3357 host = kzalloc(sizeof *host, GFP_KERNEL);
3361 INIT_LIST_HEAD(&host->target_list);
3362 spin_lock_init(&host->target_lock);
3363 init_completion(&host->released);
3364 mutex_init(&host->add_target_mutex);
3365 host->srp_dev = device;
3368 host->dev.class = &srp_class;
3369 host->dev.parent = device->dev->dma_device;
3370 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3372 if (device_register(&host->dev))
3374 if (device_create_file(&host->dev, &dev_attr_add_target))
3376 if (device_create_file(&host->dev, &dev_attr_ibdev))
3378 if (device_create_file(&host->dev, &dev_attr_port))
3384 device_unregister(&host->dev);
3392 static void srp_add_one(struct ib_device *device)
3394 struct srp_device *srp_dev;
3395 struct ib_device_attr *dev_attr;
3396 struct srp_host *host;
3397 int mr_page_shift, p;
3398 u64 max_pages_per_mr;
3400 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3404 if (ib_query_device(device, dev_attr)) {
3405 pr_warn("Query device failed for %s\n", device->name);
3409 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3413 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3414 device->map_phys_fmr && device->unmap_fmr);
3415 srp_dev->has_fr = (dev_attr->device_cap_flags &
3416 IB_DEVICE_MEM_MGT_EXTENSIONS);
3417 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3418 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3420 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3421 (!srp_dev->has_fmr || prefer_fr));
3422 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3425 * Use the smallest page size supported by the HCA, down to a
3426 * minimum of 4096 bytes. We're unlikely to build large sglists
3427 * out of smaller entries.
3429 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3430 srp_dev->mr_page_size = 1 << mr_page_shift;
3431 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3432 max_pages_per_mr = dev_attr->max_mr_size;
3433 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3434 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3436 if (srp_dev->use_fast_reg) {
3437 srp_dev->max_pages_per_mr =
3438 min_t(u32, srp_dev->max_pages_per_mr,
3439 dev_attr->max_fast_reg_page_list_len);
3441 srp_dev->mr_max_size = srp_dev->mr_page_size *
3442 srp_dev->max_pages_per_mr;
3443 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3444 device->name, mr_page_shift, dev_attr->max_mr_size,
3445 dev_attr->max_fast_reg_page_list_len,
3446 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3448 INIT_LIST_HEAD(&srp_dev->dev_list);
3450 srp_dev->dev = device;
3451 srp_dev->pd = ib_alloc_pd(device);
3452 if (IS_ERR(srp_dev->pd))
3455 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3456 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3457 IB_ACCESS_LOCAL_WRITE |
3458 IB_ACCESS_REMOTE_READ |
3459 IB_ACCESS_REMOTE_WRITE);
3460 if (IS_ERR(srp_dev->global_mr))
3463 srp_dev->global_mr = NULL;
3466 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3467 host = srp_add_port(srp_dev, p);
3469 list_add_tail(&host->list, &srp_dev->dev_list);
3472 ib_set_client_data(device, &srp_client, srp_dev);
3477 ib_dealloc_pd(srp_dev->pd);
3486 static void srp_remove_one(struct ib_device *device, void *client_data)
3488 struct srp_device *srp_dev;
3489 struct srp_host *host, *tmp_host;
3490 struct srp_target_port *target;
3492 srp_dev = client_data;
3496 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3497 device_unregister(&host->dev);
3499 * Wait for the sysfs entry to go away, so that no new
3500 * target ports can be created.
3502 wait_for_completion(&host->released);
3505 * Remove all target ports.
3507 spin_lock(&host->target_lock);
3508 list_for_each_entry(target, &host->target_list, list)
3509 srp_queue_remove_work(target);
3510 spin_unlock(&host->target_lock);
3513 * Wait for tl_err and target port removal tasks.
3515 flush_workqueue(system_long_wq);
3516 flush_workqueue(srp_remove_wq);
3521 if (srp_dev->global_mr)
3522 ib_dereg_mr(srp_dev->global_mr);
3523 ib_dealloc_pd(srp_dev->pd);
3528 static struct srp_function_template ib_srp_transport_functions = {
3529 .has_rport_state = true,
3530 .reset_timer_if_blocked = true,
3531 .reconnect_delay = &srp_reconnect_delay,
3532 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3533 .dev_loss_tmo = &srp_dev_loss_tmo,
3534 .reconnect = srp_rport_reconnect,
3535 .rport_delete = srp_rport_delete,
3536 .terminate_rport_io = srp_terminate_io,
3539 static int __init srp_init_module(void)
3543 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3545 if (srp_sg_tablesize) {
3546 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3547 if (!cmd_sg_entries)
3548 cmd_sg_entries = srp_sg_tablesize;
3551 if (!cmd_sg_entries)
3552 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3554 if (cmd_sg_entries > 255) {
3555 pr_warn("Clamping cmd_sg_entries to 255\n");
3556 cmd_sg_entries = 255;
3559 if (!indirect_sg_entries)
3560 indirect_sg_entries = cmd_sg_entries;
3561 else if (indirect_sg_entries < cmd_sg_entries) {
3562 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3564 indirect_sg_entries = cmd_sg_entries;
3567 srp_remove_wq = create_workqueue("srp_remove");
3568 if (!srp_remove_wq) {
3574 ib_srp_transport_template =
3575 srp_attach_transport(&ib_srp_transport_functions);
3576 if (!ib_srp_transport_template)
3579 ret = class_register(&srp_class);
3581 pr_err("couldn't register class infiniband_srp\n");
3585 ib_sa_register_client(&srp_sa_client);
3587 ret = ib_register_client(&srp_client);
3589 pr_err("couldn't register IB client\n");
3597 ib_sa_unregister_client(&srp_sa_client);
3598 class_unregister(&srp_class);
3601 srp_release_transport(ib_srp_transport_template);
3604 destroy_workqueue(srp_remove_wq);
3608 static void __exit srp_cleanup_module(void)
3610 ib_unregister_client(&srp_client);
3611 ib_sa_unregister_client(&srp_sa_client);
3612 class_unregister(&srp_class);
3613 srp_release_transport(ib_srp_transport_template);
3614 destroy_workqueue(srp_remove_wq);
3617 module_init(srp_init_module);
3618 module_exit(srp_cleanup_module);