2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <linux/lockdep.h>
44 #include <linux/inet.h>
45 #include <rdma/ib_cache.h>
47 #include <linux/atomic.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/scsi_dbg.h>
52 #include <scsi/scsi_tcq.h>
54 #include <scsi/scsi_transport_srp.h>
58 #define DRV_NAME "ib_srp"
59 #define PFX DRV_NAME ": "
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
65 #if !defined(CONFIG_DYNAMIC_DEBUG)
66 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
67 #define DYNAMIC_DEBUG_BRANCH(descriptor) false
70 static unsigned int srp_sg_tablesize;
71 static unsigned int cmd_sg_entries;
72 static unsigned int indirect_sg_entries;
73 static bool allow_ext_sg;
74 static bool prefer_fr = true;
75 static bool register_always = true;
76 static bool never_register;
77 static int topspin_workarounds = 1;
79 module_param(srp_sg_tablesize, uint, 0444);
80 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
82 module_param(cmd_sg_entries, uint, 0444);
83 MODULE_PARM_DESC(cmd_sg_entries,
84 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
86 module_param(indirect_sg_entries, uint, 0444);
87 MODULE_PARM_DESC(indirect_sg_entries,
88 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
90 module_param(allow_ext_sg, bool, 0444);
91 MODULE_PARM_DESC(allow_ext_sg,
92 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
94 module_param(topspin_workarounds, int, 0444);
95 MODULE_PARM_DESC(topspin_workarounds,
96 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
98 module_param(prefer_fr, bool, 0444);
99 MODULE_PARM_DESC(prefer_fr,
100 "Whether to use fast registration if both FMR and fast registration are supported");
102 module_param(register_always, bool, 0444);
103 MODULE_PARM_DESC(register_always,
104 "Use memory registration even for contiguous memory regions");
106 module_param(never_register, bool, 0444);
107 MODULE_PARM_DESC(never_register, "Never register memory");
109 static const struct kernel_param_ops srp_tmo_ops;
111 static int srp_reconnect_delay = 10;
112 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
114 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
116 static int srp_fast_io_fail_tmo = 15;
117 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
119 MODULE_PARM_DESC(fast_io_fail_tmo,
120 "Number of seconds between the observation of a transport"
121 " layer error and failing all I/O. \"off\" means that this"
122 " functionality is disabled.");
124 static int srp_dev_loss_tmo = 600;
125 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
127 MODULE_PARM_DESC(dev_loss_tmo,
128 "Maximum number of seconds that the SRP transport should"
129 " insulate transport layer errors. After this time has been"
130 " exceeded the SCSI host is removed. Should be"
131 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
132 " if fast_io_fail_tmo has not been set. \"off\" means that"
133 " this functionality is disabled.");
135 static bool srp_use_imm_data = true;
136 module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
137 MODULE_PARM_DESC(use_imm_data,
138 "Whether or not to request permission to use immediate data during SRP login.");
140 static unsigned int srp_max_imm_data = 8 * 1024;
141 module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
142 MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
144 static unsigned ch_count;
145 module_param(ch_count, uint, 0444);
146 MODULE_PARM_DESC(ch_count,
147 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
149 static void srp_add_one(struct ib_device *device);
150 static void srp_remove_one(struct ib_device *device, void *client_data);
151 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
152 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
154 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
155 const struct ib_cm_event *event);
156 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
157 struct rdma_cm_event *event);
159 static struct scsi_transport_template *ib_srp_transport_template;
160 static struct workqueue_struct *srp_remove_wq;
162 static struct ib_client srp_client = {
165 .remove = srp_remove_one
168 static struct ib_sa_client srp_sa_client;
170 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
172 int tmo = *(int *)kp->arg;
175 return sprintf(buffer, "%d", tmo);
177 return sprintf(buffer, "off");
180 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
184 res = srp_parse_tmo(&tmo, val);
188 if (kp->arg == &srp_reconnect_delay)
189 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
191 else if (kp->arg == &srp_fast_io_fail_tmo)
192 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
194 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
198 *(int *)kp->arg = tmo;
204 static const struct kernel_param_ops srp_tmo_ops = {
209 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
211 return (struct srp_target_port *) host->hostdata;
214 static const char *srp_target_info(struct Scsi_Host *host)
216 return host_to_target(host)->target_name;
219 static int srp_target_is_topspin(struct srp_target_port *target)
221 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
222 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
224 return topspin_workarounds &&
225 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
226 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
229 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
231 enum dma_data_direction direction)
235 iu = kmalloc(sizeof *iu, gfp_mask);
239 iu->buf = kzalloc(size, gfp_mask);
243 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
245 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
249 iu->direction = direction;
261 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
266 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
272 static void srp_qp_event(struct ib_event *event, void *context)
274 pr_debug("QP event %s (%d)\n",
275 ib_event_msg(event->event), event->event);
278 static int srp_init_ib_qp(struct srp_target_port *target,
281 struct ib_qp_attr *attr;
284 attr = kmalloc(sizeof *attr, GFP_KERNEL);
288 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
289 target->srp_host->port,
290 be16_to_cpu(target->ib_cm.pkey),
295 attr->qp_state = IB_QPS_INIT;
296 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
297 IB_ACCESS_REMOTE_WRITE);
298 attr->port_num = target->srp_host->port;
300 ret = ib_modify_qp(qp, attr,
311 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
313 struct srp_target_port *target = ch->target;
314 struct ib_cm_id *new_cm_id;
316 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
317 srp_ib_cm_handler, ch);
318 if (IS_ERR(new_cm_id))
319 return PTR_ERR(new_cm_id);
322 ib_destroy_cm_id(ch->ib_cm.cm_id);
323 ch->ib_cm.cm_id = new_cm_id;
324 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
325 target->srp_host->port))
326 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
328 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
329 ch->ib_cm.path.sgid = target->sgid;
330 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
331 ch->ib_cm.path.pkey = target->ib_cm.pkey;
332 ch->ib_cm.path.service_id = target->ib_cm.service_id;
337 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
339 struct srp_target_port *target = ch->target;
340 struct rdma_cm_id *new_cm_id;
343 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
344 RDMA_PS_TCP, IB_QPT_RC);
345 if (IS_ERR(new_cm_id)) {
346 ret = PTR_ERR(new_cm_id);
351 init_completion(&ch->done);
352 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
353 (struct sockaddr *)&target->rdma_cm.src : NULL,
354 (struct sockaddr *)&target->rdma_cm.dst,
355 SRP_PATH_REC_TIMEOUT_MS);
357 pr_err("No route available from %pIS to %pIS (%d)\n",
358 &target->rdma_cm.src, &target->rdma_cm.dst, ret);
361 ret = wait_for_completion_interruptible(&ch->done);
367 pr_err("Resolving address %pIS failed (%d)\n",
368 &target->rdma_cm.dst, ret);
372 swap(ch->rdma_cm.cm_id, new_cm_id);
376 rdma_destroy_id(new_cm_id);
381 static int srp_new_cm_id(struct srp_rdma_ch *ch)
383 struct srp_target_port *target = ch->target;
385 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
386 srp_new_ib_cm_id(ch);
389 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
391 struct srp_device *dev = target->srp_host->srp_dev;
392 struct ib_fmr_pool_param fmr_param;
394 memset(&fmr_param, 0, sizeof(fmr_param));
395 fmr_param.pool_size = target->mr_pool_size;
396 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
398 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
399 fmr_param.page_shift = ilog2(dev->mr_page_size);
400 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
401 IB_ACCESS_REMOTE_WRITE |
402 IB_ACCESS_REMOTE_READ);
404 return ib_create_fmr_pool(dev->pd, &fmr_param);
408 * srp_destroy_fr_pool() - free the resources owned by a pool
409 * @pool: Fast registration pool to be destroyed.
411 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
414 struct srp_fr_desc *d;
419 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
427 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
428 * @device: IB device to allocate fast registration descriptors for.
429 * @pd: Protection domain associated with the FR descriptors.
430 * @pool_size: Number of descriptors to allocate.
431 * @max_page_list_len: Maximum fast registration work request page list length.
433 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
434 struct ib_pd *pd, int pool_size,
435 int max_page_list_len)
437 struct srp_fr_pool *pool;
438 struct srp_fr_desc *d;
440 int i, ret = -EINVAL;
441 enum ib_mr_type mr_type;
446 pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
449 pool->size = pool_size;
450 pool->max_page_list_len = max_page_list_len;
451 spin_lock_init(&pool->lock);
452 INIT_LIST_HEAD(&pool->free_list);
454 if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
455 mr_type = IB_MR_TYPE_SG_GAPS;
457 mr_type = IB_MR_TYPE_MEM_REG;
459 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
460 mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
464 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
465 dev_name(&device->dev));
469 list_add_tail(&d->entry, &pool->free_list);
476 srp_destroy_fr_pool(pool);
484 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
485 * @pool: Pool to obtain descriptor from.
487 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
489 struct srp_fr_desc *d = NULL;
492 spin_lock_irqsave(&pool->lock, flags);
493 if (!list_empty(&pool->free_list)) {
494 d = list_first_entry(&pool->free_list, typeof(*d), entry);
497 spin_unlock_irqrestore(&pool->lock, flags);
503 * srp_fr_pool_put() - put an FR descriptor back in the free list
504 * @pool: Pool the descriptor was allocated from.
505 * @desc: Pointer to an array of fast registration descriptor pointers.
506 * @n: Number of descriptors to put back.
508 * Note: The caller must already have queued an invalidation request for
509 * desc->mr->rkey before calling this function.
511 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
517 spin_lock_irqsave(&pool->lock, flags);
518 for (i = 0; i < n; i++)
519 list_add(&desc[i]->entry, &pool->free_list);
520 spin_unlock_irqrestore(&pool->lock, flags);
523 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
525 struct srp_device *dev = target->srp_host->srp_dev;
527 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
528 dev->max_pages_per_mr);
532 * srp_destroy_qp() - destroy an RDMA queue pair
533 * @ch: SRP RDMA channel.
535 * Drain the qp before destroying it. This avoids that the receive
536 * completion handler can access the queue pair while it is
539 static void srp_destroy_qp(struct srp_rdma_ch *ch)
541 spin_lock_irq(&ch->lock);
542 ib_process_cq_direct(ch->send_cq, -1);
543 spin_unlock_irq(&ch->lock);
546 ib_destroy_qp(ch->qp);
549 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
551 struct srp_target_port *target = ch->target;
552 struct srp_device *dev = target->srp_host->srp_dev;
553 struct ib_qp_init_attr *init_attr;
554 struct ib_cq *recv_cq, *send_cq;
556 struct ib_fmr_pool *fmr_pool = NULL;
557 struct srp_fr_pool *fr_pool = NULL;
558 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
561 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
565 /* queue_size + 1 for ib_drain_rq() */
566 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
567 ch->comp_vector, IB_POLL_SOFTIRQ);
568 if (IS_ERR(recv_cq)) {
569 ret = PTR_ERR(recv_cq);
573 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
574 ch->comp_vector, IB_POLL_DIRECT);
575 if (IS_ERR(send_cq)) {
576 ret = PTR_ERR(send_cq);
580 init_attr->event_handler = srp_qp_event;
581 init_attr->cap.max_send_wr = m * target->queue_size;
582 init_attr->cap.max_recv_wr = target->queue_size + 1;
583 init_attr->cap.max_recv_sge = 1;
584 init_attr->cap.max_send_sge = SRP_MAX_SGE;
585 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
586 init_attr->qp_type = IB_QPT_RC;
587 init_attr->send_cq = send_cq;
588 init_attr->recv_cq = recv_cq;
590 if (target->using_rdma_cm) {
591 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
592 qp = ch->rdma_cm.cm_id->qp;
594 qp = ib_create_qp(dev->pd, init_attr);
596 ret = srp_init_ib_qp(target, qp);
604 pr_err("QP creation failed for dev %s: %d\n",
605 dev_name(&dev->dev->dev), ret);
609 if (dev->use_fast_reg) {
610 fr_pool = srp_alloc_fr_pool(target);
611 if (IS_ERR(fr_pool)) {
612 ret = PTR_ERR(fr_pool);
613 shost_printk(KERN_WARNING, target->scsi_host, PFX
614 "FR pool allocation failed (%d)\n", ret);
617 } else if (dev->use_fmr) {
618 fmr_pool = srp_alloc_fmr_pool(target);
619 if (IS_ERR(fmr_pool)) {
620 ret = PTR_ERR(fmr_pool);
621 shost_printk(KERN_WARNING, target->scsi_host, PFX
622 "FMR pool allocation failed (%d)\n", ret);
630 ib_free_cq(ch->recv_cq);
632 ib_free_cq(ch->send_cq);
635 ch->recv_cq = recv_cq;
636 ch->send_cq = send_cq;
638 if (dev->use_fast_reg) {
640 srp_destroy_fr_pool(ch->fr_pool);
641 ch->fr_pool = fr_pool;
642 } else if (dev->use_fmr) {
644 ib_destroy_fmr_pool(ch->fmr_pool);
645 ch->fmr_pool = fmr_pool;
652 if (target->using_rdma_cm)
653 rdma_destroy_qp(ch->rdma_cm.cm_id);
669 * Note: this function may be called without srp_alloc_iu_bufs() having been
670 * invoked. Hence the ch->[rt]x_ring checks.
672 static void srp_free_ch_ib(struct srp_target_port *target,
673 struct srp_rdma_ch *ch)
675 struct srp_device *dev = target->srp_host->srp_dev;
681 if (target->using_rdma_cm) {
682 if (ch->rdma_cm.cm_id) {
683 rdma_destroy_id(ch->rdma_cm.cm_id);
684 ch->rdma_cm.cm_id = NULL;
687 if (ch->ib_cm.cm_id) {
688 ib_destroy_cm_id(ch->ib_cm.cm_id);
689 ch->ib_cm.cm_id = NULL;
693 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
697 if (dev->use_fast_reg) {
699 srp_destroy_fr_pool(ch->fr_pool);
700 } else if (dev->use_fmr) {
702 ib_destroy_fmr_pool(ch->fmr_pool);
706 ib_free_cq(ch->send_cq);
707 ib_free_cq(ch->recv_cq);
710 * Avoid that the SCSI error handler tries to use this channel after
711 * it has been freed. The SCSI error handler can namely continue
712 * trying to perform recovery actions after scsi_remove_host()
718 ch->send_cq = ch->recv_cq = NULL;
721 for (i = 0; i < target->queue_size; ++i)
722 srp_free_iu(target->srp_host, ch->rx_ring[i]);
727 for (i = 0; i < target->queue_size; ++i)
728 srp_free_iu(target->srp_host, ch->tx_ring[i]);
734 static void srp_path_rec_completion(int status,
735 struct sa_path_rec *pathrec,
738 struct srp_rdma_ch *ch = ch_ptr;
739 struct srp_target_port *target = ch->target;
743 shost_printk(KERN_ERR, target->scsi_host,
744 PFX "Got failed path rec status %d\n", status);
746 ch->ib_cm.path = *pathrec;
750 static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
752 struct srp_target_port *target = ch->target;
755 ch->ib_cm.path.numb_path = 1;
757 init_completion(&ch->done);
759 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
760 target->srp_host->srp_dev->dev,
761 target->srp_host->port,
763 IB_SA_PATH_REC_SERVICE_ID |
764 IB_SA_PATH_REC_DGID |
765 IB_SA_PATH_REC_SGID |
766 IB_SA_PATH_REC_NUMB_PATH |
768 SRP_PATH_REC_TIMEOUT_MS,
770 srp_path_rec_completion,
771 ch, &ch->ib_cm.path_query);
772 if (ch->ib_cm.path_query_id < 0)
773 return ch->ib_cm.path_query_id;
775 ret = wait_for_completion_interruptible(&ch->done);
780 shost_printk(KERN_WARNING, target->scsi_host,
781 PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
782 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
783 be16_to_cpu(target->ib_cm.pkey),
784 be64_to_cpu(target->ib_cm.service_id));
789 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
791 struct srp_target_port *target = ch->target;
794 init_completion(&ch->done);
796 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
800 wait_for_completion_interruptible(&ch->done);
803 shost_printk(KERN_WARNING, target->scsi_host,
804 PFX "Path resolution failed\n");
809 static int srp_lookup_path(struct srp_rdma_ch *ch)
811 struct srp_target_port *target = ch->target;
813 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
814 srp_ib_lookup_path(ch);
817 static u8 srp_get_subnet_timeout(struct srp_host *host)
819 struct ib_port_attr attr;
821 u8 subnet_timeout = 18;
823 ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
825 subnet_timeout = attr.subnet_timeout;
827 if (unlikely(subnet_timeout < 15))
828 pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
829 dev_name(&host->srp_dev->dev->dev), subnet_timeout);
831 return subnet_timeout;
834 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
837 struct srp_target_port *target = ch->target;
839 struct rdma_conn_param rdma_param;
840 struct srp_login_req_rdma rdma_req;
841 struct ib_cm_req_param ib_param;
842 struct srp_login_req ib_req;
847 req = kzalloc(sizeof *req, GFP_KERNEL);
851 req->ib_param.flow_control = 1;
852 req->ib_param.retry_count = target->tl_retry_count;
855 * Pick some arbitrary defaults here; we could make these
856 * module parameters if anyone cared about setting them.
858 req->ib_param.responder_resources = 4;
859 req->ib_param.rnr_retry_count = 7;
860 req->ib_param.max_cm_retries = 15;
862 req->ib_req.opcode = SRP_LOGIN_REQ;
864 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
865 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
866 SRP_BUF_FORMAT_INDIRECT);
867 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
868 SRP_MULTICHAN_SINGLE);
869 if (srp_use_imm_data) {
870 req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
871 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
874 if (target->using_rdma_cm) {
875 req->rdma_param.flow_control = req->ib_param.flow_control;
876 req->rdma_param.responder_resources =
877 req->ib_param.responder_resources;
878 req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
879 req->rdma_param.retry_count = req->ib_param.retry_count;
880 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
881 req->rdma_param.private_data = &req->rdma_req;
882 req->rdma_param.private_data_len = sizeof(req->rdma_req);
884 req->rdma_req.opcode = req->ib_req.opcode;
885 req->rdma_req.tag = req->ib_req.tag;
886 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
887 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
888 req->rdma_req.req_flags = req->ib_req.req_flags;
889 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
891 ipi = req->rdma_req.initiator_port_id;
892 tpi = req->rdma_req.target_port_id;
896 subnet_timeout = srp_get_subnet_timeout(target->srp_host);
898 req->ib_param.primary_path = &ch->ib_cm.path;
899 req->ib_param.alternate_path = NULL;
900 req->ib_param.service_id = target->ib_cm.service_id;
901 get_random_bytes(&req->ib_param.starting_psn, 4);
902 req->ib_param.starting_psn &= 0xffffff;
903 req->ib_param.qp_num = ch->qp->qp_num;
904 req->ib_param.qp_type = ch->qp->qp_type;
905 req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
906 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
907 req->ib_param.private_data = &req->ib_req;
908 req->ib_param.private_data_len = sizeof(req->ib_req);
910 ipi = req->ib_req.initiator_port_id;
911 tpi = req->ib_req.target_port_id;
915 * In the published SRP specification (draft rev. 16a), the
916 * port identifier format is 8 bytes of ID extension followed
917 * by 8 bytes of GUID. Older drafts put the two halves in the
918 * opposite order, so that the GUID comes first.
920 * Targets conforming to these obsolete drafts can be
921 * recognized by the I/O Class they report.
923 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
924 memcpy(ipi, &target->sgid.global.interface_id, 8);
925 memcpy(ipi + 8, &target->initiator_ext, 8);
926 memcpy(tpi, &target->ioc_guid, 8);
927 memcpy(tpi + 8, &target->id_ext, 8);
929 memcpy(ipi, &target->initiator_ext, 8);
930 memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
931 memcpy(tpi, &target->id_ext, 8);
932 memcpy(tpi + 8, &target->ioc_guid, 8);
936 * Topspin/Cisco SRP targets will reject our login unless we
937 * zero out the first 8 bytes of our initiator port ID and set
938 * the second 8 bytes to the local node GUID.
940 if (srp_target_is_topspin(target)) {
941 shost_printk(KERN_DEBUG, target->scsi_host,
942 PFX "Topspin/Cisco initiator port ID workaround "
943 "activated for target GUID %016llx\n",
944 be64_to_cpu(target->ioc_guid));
946 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
949 if (target->using_rdma_cm)
950 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
952 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
959 static bool srp_queue_remove_work(struct srp_target_port *target)
961 bool changed = false;
963 spin_lock_irq(&target->lock);
964 if (target->state != SRP_TARGET_REMOVED) {
965 target->state = SRP_TARGET_REMOVED;
968 spin_unlock_irq(&target->lock);
971 queue_work(srp_remove_wq, &target->remove_work);
976 static void srp_disconnect_target(struct srp_target_port *target)
978 struct srp_rdma_ch *ch;
981 /* XXX should send SRP_I_LOGOUT request */
983 for (i = 0; i < target->ch_count; i++) {
985 ch->connected = false;
987 if (target->using_rdma_cm) {
988 if (ch->rdma_cm.cm_id)
989 rdma_disconnect(ch->rdma_cm.cm_id);
992 ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
996 shost_printk(KERN_DEBUG, target->scsi_host,
997 PFX "Sending CM DREQ failed\n");
1002 static void srp_free_req_data(struct srp_target_port *target,
1003 struct srp_rdma_ch *ch)
1005 struct srp_device *dev = target->srp_host->srp_dev;
1006 struct ib_device *ibdev = dev->dev;
1007 struct srp_request *req;
1013 for (i = 0; i < target->req_ring_size; ++i) {
1014 req = &ch->req_ring[i];
1015 if (dev->use_fast_reg) {
1016 kfree(req->fr_list);
1018 kfree(req->fmr_list);
1019 kfree(req->map_page);
1021 if (req->indirect_dma_addr) {
1022 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
1023 target->indirect_size,
1026 kfree(req->indirect_desc);
1029 kfree(ch->req_ring);
1030 ch->req_ring = NULL;
1033 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
1035 struct srp_target_port *target = ch->target;
1036 struct srp_device *srp_dev = target->srp_host->srp_dev;
1037 struct ib_device *ibdev = srp_dev->dev;
1038 struct srp_request *req;
1040 dma_addr_t dma_addr;
1041 int i, ret = -ENOMEM;
1043 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
1048 for (i = 0; i < target->req_ring_size; ++i) {
1049 req = &ch->req_ring[i];
1050 mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
1054 if (srp_dev->use_fast_reg) {
1055 req->fr_list = mr_list;
1057 req->fmr_list = mr_list;
1058 req->map_page = kmalloc_array(srp_dev->max_pages_per_mr,
1064 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
1065 if (!req->indirect_desc)
1068 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1069 target->indirect_size,
1071 if (ib_dma_mapping_error(ibdev, dma_addr))
1074 req->indirect_dma_addr = dma_addr;
1083 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1084 * @shost: SCSI host whose attributes to remove from sysfs.
1086 * Note: Any attributes defined in the host template and that did not exist
1087 * before invocation of this function will be ignored.
1089 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1091 struct device_attribute **attr;
1093 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
1094 device_remove_file(&shost->shost_dev, *attr);
1097 static void srp_remove_target(struct srp_target_port *target)
1099 struct srp_rdma_ch *ch;
1102 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1104 srp_del_scsi_host_attr(target->scsi_host);
1105 srp_rport_get(target->rport);
1106 srp_remove_host(target->scsi_host);
1107 scsi_remove_host(target->scsi_host);
1108 srp_stop_rport_timers(target->rport);
1109 srp_disconnect_target(target);
1110 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1111 for (i = 0; i < target->ch_count; i++) {
1112 ch = &target->ch[i];
1113 srp_free_ch_ib(target, ch);
1115 cancel_work_sync(&target->tl_err_work);
1116 srp_rport_put(target->rport);
1117 for (i = 0; i < target->ch_count; i++) {
1118 ch = &target->ch[i];
1119 srp_free_req_data(target, ch);
1124 spin_lock(&target->srp_host->target_lock);
1125 list_del(&target->list);
1126 spin_unlock(&target->srp_host->target_lock);
1128 scsi_host_put(target->scsi_host);
1131 static void srp_remove_work(struct work_struct *work)
1133 struct srp_target_port *target =
1134 container_of(work, struct srp_target_port, remove_work);
1136 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1138 srp_remove_target(target);
1141 static void srp_rport_delete(struct srp_rport *rport)
1143 struct srp_target_port *target = rport->lld_data;
1145 srp_queue_remove_work(target);
1149 * srp_connected_ch() - number of connected channels
1150 * @target: SRP target port.
1152 static int srp_connected_ch(struct srp_target_port *target)
1156 for (i = 0; i < target->ch_count; i++)
1157 c += target->ch[i].connected;
1162 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1165 struct srp_target_port *target = ch->target;
1168 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1170 ret = srp_lookup_path(ch);
1175 init_completion(&ch->done);
1176 ret = srp_send_req(ch, max_iu_len, multich);
1179 ret = wait_for_completion_interruptible(&ch->done);
1184 * The CM event handling code will set status to
1185 * SRP_PORT_REDIRECT if we get a port redirect REJ
1186 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1187 * redirect REJ back.
1192 ch->connected = true;
1195 case SRP_PORT_REDIRECT:
1196 ret = srp_lookup_path(ch);
1201 case SRP_DLID_REDIRECT:
1204 case SRP_STALE_CONN:
1205 shost_printk(KERN_ERR, target->scsi_host, PFX
1206 "giving up on stale connection\n");
1216 return ret <= 0 ? ret : -ENODEV;
1219 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1221 srp_handle_qp_err(cq, wc, "INV RKEY");
1224 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1227 struct ib_send_wr wr = {
1228 .opcode = IB_WR_LOCAL_INV,
1232 .ex.invalidate_rkey = rkey,
1235 wr.wr_cqe = &req->reg_cqe;
1236 req->reg_cqe.done = srp_inv_rkey_err_done;
1237 return ib_post_send(ch->qp, &wr, NULL);
1240 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1241 struct srp_rdma_ch *ch,
1242 struct srp_request *req)
1244 struct srp_target_port *target = ch->target;
1245 struct srp_device *dev = target->srp_host->srp_dev;
1246 struct ib_device *ibdev = dev->dev;
1249 if (!scsi_sglist(scmnd) ||
1250 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1251 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1254 if (dev->use_fast_reg) {
1255 struct srp_fr_desc **pfr;
1257 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1258 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1260 shost_printk(KERN_ERR, target->scsi_host, PFX
1261 "Queueing INV WR for rkey %#x failed (%d)\n",
1262 (*pfr)->mr->rkey, res);
1263 queue_work(system_long_wq,
1264 &target->tl_err_work);
1268 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1270 } else if (dev->use_fmr) {
1271 struct ib_pool_fmr **pfmr;
1273 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1274 ib_fmr_pool_unmap(*pfmr);
1277 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1278 scmnd->sc_data_direction);
1282 * srp_claim_req - Take ownership of the scmnd associated with a request.
1283 * @ch: SRP RDMA channel.
1284 * @req: SRP request.
1285 * @sdev: If not NULL, only take ownership for this SCSI device.
1286 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1287 * ownership of @req->scmnd if it equals @scmnd.
1290 * Either NULL or a pointer to the SCSI command the caller became owner of.
1292 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1293 struct srp_request *req,
1294 struct scsi_device *sdev,
1295 struct scsi_cmnd *scmnd)
1297 unsigned long flags;
1299 spin_lock_irqsave(&ch->lock, flags);
1301 (!sdev || req->scmnd->device == sdev) &&
1302 (!scmnd || req->scmnd == scmnd)) {
1308 spin_unlock_irqrestore(&ch->lock, flags);
1314 * srp_free_req() - Unmap data and adjust ch->req_lim.
1315 * @ch: SRP RDMA channel.
1316 * @req: Request to be freed.
1317 * @scmnd: SCSI command associated with @req.
1318 * @req_lim_delta: Amount to be added to @target->req_lim.
1320 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1321 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1323 unsigned long flags;
1325 srp_unmap_data(scmnd, ch, req);
1327 spin_lock_irqsave(&ch->lock, flags);
1328 ch->req_lim += req_lim_delta;
1329 spin_unlock_irqrestore(&ch->lock, flags);
1332 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1333 struct scsi_device *sdev, int result)
1335 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1338 srp_free_req(ch, req, scmnd, 0);
1339 scmnd->result = result;
1340 scmnd->scsi_done(scmnd);
1344 static void srp_terminate_io(struct srp_rport *rport)
1346 struct srp_target_port *target = rport->lld_data;
1347 struct srp_rdma_ch *ch;
1350 for (i = 0; i < target->ch_count; i++) {
1351 ch = &target->ch[i];
1353 for (j = 0; j < target->req_ring_size; ++j) {
1354 struct srp_request *req = &ch->req_ring[j];
1356 srp_finish_req(ch, req, NULL,
1357 DID_TRANSPORT_FAILFAST << 16);
1362 /* Calculate maximum initiator to target information unit length. */
1363 static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data)
1365 uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1366 sizeof(struct srp_indirect_buf) +
1367 cmd_sg_cnt * sizeof(struct srp_direct_buf);
1370 max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1377 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1378 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1379 * srp_reset_device() or srp_reset_host() calls will occur while this function
1380 * is in progress. One way to realize that is not to call this function
1381 * directly but to call srp_reconnect_rport() instead since that last function
1382 * serializes calls of this function via rport->mutex and also blocks
1383 * srp_queuecommand() calls before invoking this function.
1385 static int srp_rport_reconnect(struct srp_rport *rport)
1387 struct srp_target_port *target = rport->lld_data;
1388 struct srp_rdma_ch *ch;
1389 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1392 bool multich = false;
1394 srp_disconnect_target(target);
1396 if (target->state == SRP_TARGET_SCANNING)
1400 * Now get a new local CM ID so that we avoid confusing the target in
1401 * case things are really fouled up. Doing so also ensures that all CM
1402 * callbacks will have finished before a new QP is allocated.
1404 for (i = 0; i < target->ch_count; i++) {
1405 ch = &target->ch[i];
1406 ret += srp_new_cm_id(ch);
1408 for (i = 0; i < target->ch_count; i++) {
1409 ch = &target->ch[i];
1410 for (j = 0; j < target->req_ring_size; ++j) {
1411 struct srp_request *req = &ch->req_ring[j];
1413 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1416 for (i = 0; i < target->ch_count; i++) {
1417 ch = &target->ch[i];
1419 * Whether or not creating a new CM ID succeeded, create a new
1420 * QP. This guarantees that all completion callback function
1421 * invocations have finished before request resetting starts.
1423 ret += srp_create_ch_ib(ch);
1425 INIT_LIST_HEAD(&ch->free_tx);
1426 for (j = 0; j < target->queue_size; ++j)
1427 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1430 target->qp_in_error = false;
1432 for (i = 0; i < target->ch_count; i++) {
1433 ch = &target->ch[i];
1436 ret = srp_connect_ch(ch, max_iu_len, multich);
1441 shost_printk(KERN_INFO, target->scsi_host,
1442 PFX "reconnect succeeded\n");
1447 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1448 unsigned int dma_len, u32 rkey)
1450 struct srp_direct_buf *desc = state->desc;
1452 WARN_ON_ONCE(!dma_len);
1454 desc->va = cpu_to_be64(dma_addr);
1455 desc->key = cpu_to_be32(rkey);
1456 desc->len = cpu_to_be32(dma_len);
1458 state->total_len += dma_len;
1463 static int srp_map_finish_fmr(struct srp_map_state *state,
1464 struct srp_rdma_ch *ch)
1466 struct srp_target_port *target = ch->target;
1467 struct srp_device *dev = target->srp_host->srp_dev;
1468 struct ib_pool_fmr *fmr;
1471 if (state->fmr.next >= state->fmr.end) {
1472 shost_printk(KERN_ERR, ch->target->scsi_host,
1473 PFX "Out of MRs (mr_per_cmd = %d)\n",
1474 ch->target->mr_per_cmd);
1478 WARN_ON_ONCE(!dev->use_fmr);
1480 if (state->npages == 0)
1483 if (state->npages == 1 && target->global_rkey) {
1484 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1485 target->global_rkey);
1489 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1490 state->npages, io_addr);
1492 return PTR_ERR(fmr);
1494 *state->fmr.next++ = fmr;
1497 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1498 state->dma_len, fmr->fmr->rkey);
1507 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1509 srp_handle_qp_err(cq, wc, "FAST REG");
1513 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1514 * where to start in the first element. If sg_offset_p != NULL then
1515 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1516 * byte that has not yet been mapped.
1518 static int srp_map_finish_fr(struct srp_map_state *state,
1519 struct srp_request *req,
1520 struct srp_rdma_ch *ch, int sg_nents,
1521 unsigned int *sg_offset_p)
1523 struct srp_target_port *target = ch->target;
1524 struct srp_device *dev = target->srp_host->srp_dev;
1525 struct ib_reg_wr wr;
1526 struct srp_fr_desc *desc;
1530 if (state->fr.next >= state->fr.end) {
1531 shost_printk(KERN_ERR, ch->target->scsi_host,
1532 PFX "Out of MRs (mr_per_cmd = %d)\n",
1533 ch->target->mr_per_cmd);
1537 WARN_ON_ONCE(!dev->use_fast_reg);
1539 if (sg_nents == 1 && target->global_rkey) {
1540 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1542 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1543 sg_dma_len(state->sg) - sg_offset,
1544 target->global_rkey);
1550 desc = srp_fr_pool_get(ch->fr_pool);
1554 rkey = ib_inc_rkey(desc->mr->rkey);
1555 ib_update_fast_reg_key(desc->mr, rkey);
1557 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1559 if (unlikely(n < 0)) {
1560 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1561 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1562 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1563 sg_offset_p ? *sg_offset_p : -1, n);
1567 WARN_ON_ONCE(desc->mr->length == 0);
1569 req->reg_cqe.done = srp_reg_mr_err_done;
1572 wr.wr.opcode = IB_WR_REG_MR;
1573 wr.wr.wr_cqe = &req->reg_cqe;
1575 wr.wr.send_flags = 0;
1577 wr.key = desc->mr->rkey;
1578 wr.access = (IB_ACCESS_LOCAL_WRITE |
1579 IB_ACCESS_REMOTE_READ |
1580 IB_ACCESS_REMOTE_WRITE);
1582 *state->fr.next++ = desc;
1585 srp_map_desc(state, desc->mr->iova,
1586 desc->mr->length, desc->mr->rkey);
1588 err = ib_post_send(ch->qp, &wr.wr, NULL);
1589 if (unlikely(err)) {
1590 WARN_ON_ONCE(err == -ENOMEM);
1597 static int srp_map_sg_entry(struct srp_map_state *state,
1598 struct srp_rdma_ch *ch,
1599 struct scatterlist *sg)
1601 struct srp_target_port *target = ch->target;
1602 struct srp_device *dev = target->srp_host->srp_dev;
1603 dma_addr_t dma_addr = sg_dma_address(sg);
1604 unsigned int dma_len = sg_dma_len(sg);
1605 unsigned int len = 0;
1608 WARN_ON_ONCE(!dma_len);
1611 unsigned offset = dma_addr & ~dev->mr_page_mask;
1613 if (state->npages == dev->max_pages_per_mr ||
1614 (state->npages > 0 && offset != 0)) {
1615 ret = srp_map_finish_fmr(state, ch);
1620 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1623 state->base_dma_addr = dma_addr;
1624 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1625 state->dma_len += len;
1631 * If the end of the MR is not on a page boundary then we need to
1632 * close it out and start a new one -- we can only merge at page
1636 if ((dma_addr & ~dev->mr_page_mask) != 0)
1637 ret = srp_map_finish_fmr(state, ch);
1641 static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1642 struct srp_request *req, struct scatterlist *scat,
1645 struct scatterlist *sg;
1648 state->pages = req->map_page;
1649 state->fmr.next = req->fmr_list;
1650 state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
1652 for_each_sg(scat, sg, count, i) {
1653 ret = srp_map_sg_entry(state, ch, sg);
1658 ret = srp_map_finish_fmr(state, ch);
1665 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1666 struct srp_request *req, struct scatterlist *scat,
1669 unsigned int sg_offset = 0;
1671 state->fr.next = req->fr_list;
1672 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1681 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1682 if (unlikely(n < 0))
1686 for (i = 0; i < n; i++)
1687 state->sg = sg_next(state->sg);
1693 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1694 struct srp_request *req, struct scatterlist *scat,
1697 struct srp_target_port *target = ch->target;
1698 struct scatterlist *sg;
1701 for_each_sg(scat, sg, count, i) {
1702 srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1703 target->global_rkey);
1710 * Register the indirect data buffer descriptor with the HCA.
1712 * Note: since the indirect data buffer descriptor has been allocated with
1713 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1716 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1717 void **next_mr, void **end_mr, u32 idb_len,
1720 struct srp_target_port *target = ch->target;
1721 struct srp_device *dev = target->srp_host->srp_dev;
1722 struct srp_map_state state;
1723 struct srp_direct_buf idb_desc;
1725 struct scatterlist idb_sg[1];
1728 memset(&state, 0, sizeof(state));
1729 memset(&idb_desc, 0, sizeof(idb_desc));
1730 state.gen.next = next_mr;
1731 state.gen.end = end_mr;
1732 state.desc = &idb_desc;
1733 state.base_dma_addr = req->indirect_dma_addr;
1734 state.dma_len = idb_len;
1736 if (dev->use_fast_reg) {
1738 sg_init_one(idb_sg, req->indirect_desc, idb_len);
1739 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1740 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1741 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1743 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1746 WARN_ON_ONCE(ret < 1);
1747 } else if (dev->use_fmr) {
1748 state.pages = idb_pages;
1749 state.pages[0] = (req->indirect_dma_addr &
1752 ret = srp_map_finish_fmr(&state, ch);
1759 *idb_rkey = idb_desc.key;
1764 static void srp_check_mapping(struct srp_map_state *state,
1765 struct srp_rdma_ch *ch, struct srp_request *req,
1766 struct scatterlist *scat, int count)
1768 struct srp_device *dev = ch->target->srp_host->srp_dev;
1769 struct srp_fr_desc **pfr;
1770 u64 desc_len = 0, mr_len = 0;
1773 for (i = 0; i < state->ndesc; i++)
1774 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1775 if (dev->use_fast_reg)
1776 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1777 mr_len += (*pfr)->mr->length;
1778 else if (dev->use_fmr)
1779 for (i = 0; i < state->nmdesc; i++)
1780 mr_len += be32_to_cpu(req->indirect_desc[i].len);
1781 if (desc_len != scsi_bufflen(req->scmnd) ||
1782 mr_len > scsi_bufflen(req->scmnd))
1783 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1784 scsi_bufflen(req->scmnd), desc_len, mr_len,
1785 state->ndesc, state->nmdesc);
1789 * srp_map_data() - map SCSI data buffer onto an SRP request
1790 * @scmnd: SCSI command to map
1791 * @ch: SRP RDMA channel
1794 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1795 * mapping failed. The size of any immediate data is not included in the
1798 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1799 struct srp_request *req)
1801 struct srp_target_port *target = ch->target;
1802 struct scatterlist *scat, *sg;
1803 struct srp_cmd *cmd = req->cmd->buf;
1804 int i, len, nents, count, ret;
1805 struct srp_device *dev;
1806 struct ib_device *ibdev;
1807 struct srp_map_state state;
1808 struct srp_indirect_buf *indirect_hdr;
1810 u32 idb_len, table_len;
1814 req->cmd->num_sge = 1;
1816 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1817 return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1819 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1820 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1821 shost_printk(KERN_WARNING, target->scsi_host,
1822 PFX "Unhandled data direction %d\n",
1823 scmnd->sc_data_direction);
1827 nents = scsi_sg_count(scmnd);
1828 scat = scsi_sglist(scmnd);
1829 data_len = scsi_bufflen(scmnd);
1831 dev = target->srp_host->srp_dev;
1834 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1835 if (unlikely(count == 0))
1838 if (ch->use_imm_data &&
1839 count <= SRP_MAX_IMM_SGE &&
1840 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1841 scmnd->sc_data_direction == DMA_TO_DEVICE) {
1842 struct srp_imm_buf *buf;
1843 struct ib_sge *sge = &req->cmd->sge[1];
1845 fmt = SRP_DATA_DESC_IMM;
1846 len = SRP_IMM_DATA_OFFSET;
1848 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1849 buf->len = cpu_to_be32(data_len);
1850 WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1851 for_each_sg(scat, sg, count, i) {
1852 sge[i].addr = sg_dma_address(sg);
1853 sge[i].length = sg_dma_len(sg);
1854 sge[i].lkey = target->lkey;
1856 req->cmd->num_sge += count;
1860 fmt = SRP_DATA_DESC_DIRECT;
1861 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1862 sizeof(struct srp_direct_buf);
1864 if (count == 1 && target->global_rkey) {
1866 * The midlayer only generated a single gather/scatter
1867 * entry, or DMA mapping coalesced everything to a
1868 * single entry. So a direct descriptor along with
1869 * the DMA MR suffices.
1871 struct srp_direct_buf *buf;
1873 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1874 buf->va = cpu_to_be64(sg_dma_address(scat));
1875 buf->key = cpu_to_be32(target->global_rkey);
1876 buf->len = cpu_to_be32(sg_dma_len(scat));
1883 * We have more than one scatter/gather entry, so build our indirect
1884 * descriptor table, trying to merge as many entries as we can.
1886 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1888 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1889 target->indirect_size, DMA_TO_DEVICE);
1891 memset(&state, 0, sizeof(state));
1892 state.desc = req->indirect_desc;
1893 if (dev->use_fast_reg)
1894 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1895 else if (dev->use_fmr)
1896 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
1898 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1899 req->nmdesc = state.nmdesc;
1904 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1905 "Memory mapping consistency check");
1906 if (DYNAMIC_DEBUG_BRANCH(ddm))
1907 srp_check_mapping(&state, ch, req, scat, count);
1910 /* We've mapped the request, now pull as much of the indirect
1911 * descriptor table as we can into the command buffer. If this
1912 * target is not using an external indirect table, we are
1913 * guaranteed to fit into the command, as the SCSI layer won't
1914 * give us more S/G entries than we allow.
1916 if (state.ndesc == 1) {
1918 * Memory registration collapsed the sg-list into one entry,
1919 * so use a direct descriptor.
1921 struct srp_direct_buf *buf;
1923 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1924 *buf = req->indirect_desc[0];
1928 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1929 !target->allow_ext_sg)) {
1930 shost_printk(KERN_ERR, target->scsi_host,
1931 "Could not fit S/G list into SRP_CMD\n");
1936 count = min(state.ndesc, target->cmd_sg_cnt);
1937 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1938 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1940 fmt = SRP_DATA_DESC_INDIRECT;
1941 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1942 sizeof(struct srp_indirect_buf);
1943 len += count * sizeof (struct srp_direct_buf);
1945 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1946 count * sizeof (struct srp_direct_buf));
1948 if (!target->global_rkey) {
1949 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1950 idb_len, &idb_rkey);
1955 idb_rkey = cpu_to_be32(target->global_rkey);
1958 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1959 indirect_hdr->table_desc.key = idb_rkey;
1960 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1961 indirect_hdr->len = cpu_to_be32(state.total_len);
1963 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1964 cmd->data_out_desc_cnt = count;
1966 cmd->data_in_desc_cnt = count;
1968 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1972 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1973 cmd->buf_fmt = fmt << 4;
1980 srp_unmap_data(scmnd, ch, req);
1981 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1987 * Return an IU and possible credit to the free pool
1989 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1990 enum srp_iu_type iu_type)
1992 unsigned long flags;
1994 spin_lock_irqsave(&ch->lock, flags);
1995 list_add(&iu->list, &ch->free_tx);
1996 if (iu_type != SRP_IU_RSP)
1998 spin_unlock_irqrestore(&ch->lock, flags);
2002 * Must be called with ch->lock held to protect req_lim and free_tx.
2003 * If IU is not sent, it must be returned using srp_put_tx_iu().
2006 * An upper limit for the number of allocated information units for each
2008 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
2009 * more than Scsi_Host.can_queue requests.
2010 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
2011 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
2012 * one unanswered SRP request to an initiator.
2014 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
2015 enum srp_iu_type iu_type)
2017 struct srp_target_port *target = ch->target;
2018 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
2021 lockdep_assert_held(&ch->lock);
2023 ib_process_cq_direct(ch->send_cq, -1);
2025 if (list_empty(&ch->free_tx))
2028 /* Initiator responses to target requests do not consume credits */
2029 if (iu_type != SRP_IU_RSP) {
2030 if (ch->req_lim <= rsv) {
2031 ++target->zero_req_lim;
2038 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
2039 list_del(&iu->list);
2044 * Note: if this function is called from inside ib_drain_sq() then it will
2045 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
2046 * with status IB_WC_SUCCESS then that's a bug.
2048 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
2050 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2051 struct srp_rdma_ch *ch = cq->cq_context;
2053 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2054 srp_handle_qp_err(cq, wc, "SEND");
2058 lockdep_assert_held(&ch->lock);
2060 list_add(&iu->list, &ch->free_tx);
2064 * srp_post_send() - send an SRP information unit
2065 * @ch: RDMA channel over which to send the information unit.
2066 * @iu: Information unit to send.
2067 * @len: Length of the information unit excluding immediate data.
2069 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
2071 struct srp_target_port *target = ch->target;
2072 struct ib_send_wr wr;
2074 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
2077 iu->sge[0].addr = iu->dma;
2078 iu->sge[0].length = len;
2079 iu->sge[0].lkey = target->lkey;
2081 iu->cqe.done = srp_send_done;
2084 wr.wr_cqe = &iu->cqe;
2085 wr.sg_list = &iu->sge[0];
2086 wr.num_sge = iu->num_sge;
2087 wr.opcode = IB_WR_SEND;
2088 wr.send_flags = IB_SEND_SIGNALED;
2090 return ib_post_send(ch->qp, &wr, NULL);
2093 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
2095 struct srp_target_port *target = ch->target;
2096 struct ib_recv_wr wr;
2099 list.addr = iu->dma;
2100 list.length = iu->size;
2101 list.lkey = target->lkey;
2103 iu->cqe.done = srp_recv_done;
2106 wr.wr_cqe = &iu->cqe;
2110 return ib_post_recv(ch->qp, &wr, NULL);
2113 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
2115 struct srp_target_port *target = ch->target;
2116 struct srp_request *req;
2117 struct scsi_cmnd *scmnd;
2118 unsigned long flags;
2120 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
2121 spin_lock_irqsave(&ch->lock, flags);
2122 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
2123 if (rsp->tag == ch->tsk_mgmt_tag) {
2124 ch->tsk_mgmt_status = -1;
2125 if (be32_to_cpu(rsp->resp_data_len) >= 4)
2126 ch->tsk_mgmt_status = rsp->data[3];
2127 complete(&ch->tsk_mgmt_done);
2129 shost_printk(KERN_ERR, target->scsi_host,
2130 "Received tsk mgmt response too late for tag %#llx\n",
2133 spin_unlock_irqrestore(&ch->lock, flags);
2135 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
2136 if (scmnd && scmnd->host_scribble) {
2137 req = (void *)scmnd->host_scribble;
2138 scmnd = srp_claim_req(ch, req, NULL, scmnd);
2143 shost_printk(KERN_ERR, target->scsi_host,
2144 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
2145 rsp->tag, ch - target->ch, ch->qp->qp_num);
2147 spin_lock_irqsave(&ch->lock, flags);
2148 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
2149 spin_unlock_irqrestore(&ch->lock, flags);
2153 scmnd->result = rsp->status;
2155 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
2156 memcpy(scmnd->sense_buffer, rsp->data +
2157 be32_to_cpu(rsp->resp_data_len),
2158 min_t(int, be32_to_cpu(rsp->sense_data_len),
2159 SCSI_SENSE_BUFFERSIZE));
2162 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
2163 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
2164 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
2165 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
2166 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
2167 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
2168 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
2169 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
2171 srp_free_req(ch, req, scmnd,
2172 be32_to_cpu(rsp->req_lim_delta));
2174 scmnd->host_scribble = NULL;
2175 scmnd->scsi_done(scmnd);
2179 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
2182 struct srp_target_port *target = ch->target;
2183 struct ib_device *dev = target->srp_host->srp_dev->dev;
2184 unsigned long flags;
2188 spin_lock_irqsave(&ch->lock, flags);
2189 ch->req_lim += req_delta;
2190 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2191 spin_unlock_irqrestore(&ch->lock, flags);
2194 shost_printk(KERN_ERR, target->scsi_host, PFX
2195 "no IU available to send response\n");
2200 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2201 memcpy(iu->buf, rsp, len);
2202 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2204 err = srp_post_send(ch, iu, len);
2206 shost_printk(KERN_ERR, target->scsi_host, PFX
2207 "unable to post response: %d\n", err);
2208 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2214 static void srp_process_cred_req(struct srp_rdma_ch *ch,
2215 struct srp_cred_req *req)
2217 struct srp_cred_rsp rsp = {
2218 .opcode = SRP_CRED_RSP,
2221 s32 delta = be32_to_cpu(req->req_lim_delta);
2223 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2224 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2225 "problems processing SRP_CRED_REQ\n");
2228 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2229 struct srp_aer_req *req)
2231 struct srp_target_port *target = ch->target;
2232 struct srp_aer_rsp rsp = {
2233 .opcode = SRP_AER_RSP,
2236 s32 delta = be32_to_cpu(req->req_lim_delta);
2238 shost_printk(KERN_ERR, target->scsi_host, PFX
2239 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2241 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2242 shost_printk(KERN_ERR, target->scsi_host, PFX
2243 "problems processing SRP_AER_REQ\n");
2246 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2248 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2249 struct srp_rdma_ch *ch = cq->cq_context;
2250 struct srp_target_port *target = ch->target;
2251 struct ib_device *dev = target->srp_host->srp_dev->dev;
2255 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2256 srp_handle_qp_err(cq, wc, "RECV");
2260 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2263 opcode = *(u8 *) iu->buf;
2266 shost_printk(KERN_ERR, target->scsi_host,
2267 PFX "recv completion, opcode 0x%02x\n", opcode);
2268 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2269 iu->buf, wc->byte_len, true);
2274 srp_process_rsp(ch, iu->buf);
2278 srp_process_cred_req(ch, iu->buf);
2282 srp_process_aer_req(ch, iu->buf);
2286 /* XXX Handle target logout */
2287 shost_printk(KERN_WARNING, target->scsi_host,
2288 PFX "Got target logout request\n");
2292 shost_printk(KERN_WARNING, target->scsi_host,
2293 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2297 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2300 res = srp_post_recv(ch, iu);
2302 shost_printk(KERN_ERR, target->scsi_host,
2303 PFX "Recv failed with error code %d\n", res);
2307 * srp_tl_err_work() - handle a transport layer error
2308 * @work: Work structure embedded in an SRP target port.
2310 * Note: This function may get invoked before the rport has been created,
2311 * hence the target->rport test.
2313 static void srp_tl_err_work(struct work_struct *work)
2315 struct srp_target_port *target;
2317 target = container_of(work, struct srp_target_port, tl_err_work);
2319 srp_start_tl_fail_timers(target->rport);
2322 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2325 struct srp_rdma_ch *ch = cq->cq_context;
2326 struct srp_target_port *target = ch->target;
2328 if (ch->connected && !target->qp_in_error) {
2329 shost_printk(KERN_ERR, target->scsi_host,
2330 PFX "failed %s status %s (%d) for CQE %p\n",
2331 opname, ib_wc_status_msg(wc->status), wc->status,
2333 queue_work(system_long_wq, &target->tl_err_work);
2335 target->qp_in_error = true;
2338 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2340 struct srp_target_port *target = host_to_target(shost);
2341 struct srp_rport *rport = target->rport;
2342 struct srp_rdma_ch *ch;
2343 struct srp_request *req;
2345 struct srp_cmd *cmd;
2346 struct ib_device *dev;
2347 unsigned long flags;
2351 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2354 * The SCSI EH thread is the only context from which srp_queuecommand()
2355 * can get invoked for blocked devices (SDEV_BLOCK /
2356 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2357 * locking the rport mutex if invoked from inside the SCSI EH.
2360 mutex_lock(&rport->mutex);
2362 scmnd->result = srp_chkready(target->rport);
2363 if (unlikely(scmnd->result))
2366 WARN_ON_ONCE(scmnd->request->tag < 0);
2367 tag = blk_mq_unique_tag(scmnd->request);
2368 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2369 idx = blk_mq_unique_tag_to_tag(tag);
2370 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2371 dev_name(&shost->shost_gendev), tag, idx,
2372 target->req_ring_size);
2374 spin_lock_irqsave(&ch->lock, flags);
2375 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2376 spin_unlock_irqrestore(&ch->lock, flags);
2381 req = &ch->req_ring[idx];
2382 dev = target->srp_host->srp_dev->dev;
2383 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2386 scmnd->host_scribble = (void *) req;
2389 memset(cmd, 0, sizeof *cmd);
2391 cmd->opcode = SRP_CMD;
2392 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2394 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2395 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2396 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2398 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2405 len = srp_map_data(scmnd, ch, req);
2407 shost_printk(KERN_ERR, target->scsi_host,
2408 PFX "Failed to map data (%d)\n", len);
2410 * If we ran out of memory descriptors (-ENOMEM) because an
2411 * application is queuing many requests with more than
2412 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2413 * to reduce queue depth temporarily.
2415 scmnd->result = len == -ENOMEM ?
2416 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2420 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2423 if (srp_post_send(ch, iu, len)) {
2424 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2425 scmnd->result = DID_ERROR << 16;
2433 mutex_unlock(&rport->mutex);
2438 srp_unmap_data(scmnd, ch, req);
2441 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2444 * Avoid that the loops that iterate over the request ring can
2445 * encounter a dangling SCSI command pointer.
2450 if (scmnd->result) {
2451 scmnd->scsi_done(scmnd);
2454 ret = SCSI_MLQUEUE_HOST_BUSY;
2461 * Note: the resources allocated in this function are freed in
2464 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2466 struct srp_target_port *target = ch->target;
2469 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2473 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2478 for (i = 0; i < target->queue_size; ++i) {
2479 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2481 GFP_KERNEL, DMA_FROM_DEVICE);
2482 if (!ch->rx_ring[i])
2486 for (i = 0; i < target->queue_size; ++i) {
2487 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2489 GFP_KERNEL, DMA_TO_DEVICE);
2490 if (!ch->tx_ring[i])
2493 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2499 for (i = 0; i < target->queue_size; ++i) {
2500 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2501 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2514 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2516 uint64_t T_tr_ns, max_compl_time_ms;
2517 uint32_t rq_tmo_jiffies;
2520 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2521 * table 91), both the QP timeout and the retry count have to be set
2522 * for RC QP's during the RTR to RTS transition.
2524 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2525 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2528 * Set target->rq_tmo_jiffies to one second more than the largest time
2529 * it can take before an error completion is generated. See also
2530 * C9-140..142 in the IBTA spec for more information about how to
2531 * convert the QP Local ACK Timeout value to nanoseconds.
2533 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2534 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2535 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2536 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2538 return rq_tmo_jiffies;
2541 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2542 const struct srp_login_rsp *lrsp,
2543 struct srp_rdma_ch *ch)
2545 struct srp_target_port *target = ch->target;
2546 struct ib_qp_attr *qp_attr = NULL;
2551 if (lrsp->opcode == SRP_LOGIN_RSP) {
2552 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2553 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2554 ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
2555 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2557 WARN_ON_ONCE(ch->max_it_iu_len >
2558 be32_to_cpu(lrsp->max_it_iu_len));
2560 if (ch->use_imm_data)
2561 shost_printk(KERN_DEBUG, target->scsi_host,
2562 PFX "using immediate data\n");
2565 * Reserve credits for task management so we don't
2566 * bounce requests back to the SCSI mid-layer.
2568 target->scsi_host->can_queue
2569 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2570 target->scsi_host->can_queue);
2571 target->scsi_host->cmd_per_lun
2572 = min_t(int, target->scsi_host->can_queue,
2573 target->scsi_host->cmd_per_lun);
2575 shost_printk(KERN_WARNING, target->scsi_host,
2576 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2582 ret = srp_alloc_iu_bufs(ch);
2587 for (i = 0; i < target->queue_size; i++) {
2588 struct srp_iu *iu = ch->rx_ring[i];
2590 ret = srp_post_recv(ch, iu);
2595 if (!target->using_rdma_cm) {
2597 qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2601 qp_attr->qp_state = IB_QPS_RTR;
2602 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2606 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2610 qp_attr->qp_state = IB_QPS_RTS;
2611 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2615 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2617 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2621 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2631 static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2632 const struct ib_cm_event *event,
2633 struct srp_rdma_ch *ch)
2635 struct srp_target_port *target = ch->target;
2636 struct Scsi_Host *shost = target->scsi_host;
2637 struct ib_class_port_info *cpi;
2641 switch (event->param.rej_rcvd.reason) {
2642 case IB_CM_REJ_PORT_CM_REDIRECT:
2643 cpi = event->param.rej_rcvd.ari;
2644 dlid = be16_to_cpu(cpi->redirect_lid);
2645 sa_path_set_dlid(&ch->ib_cm.path, dlid);
2646 ch->ib_cm.path.pkey = cpi->redirect_pkey;
2647 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2648 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2650 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2653 case IB_CM_REJ_PORT_REDIRECT:
2654 if (srp_target_is_topspin(target)) {
2655 union ib_gid *dgid = &ch->ib_cm.path.dgid;
2658 * Topspin/Cisco SRP gateways incorrectly send
2659 * reject reason code 25 when they mean 24
2662 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
2664 shost_printk(KERN_DEBUG, shost,
2665 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2666 be64_to_cpu(dgid->global.subnet_prefix),
2667 be64_to_cpu(dgid->global.interface_id));
2669 ch->status = SRP_PORT_REDIRECT;
2671 shost_printk(KERN_WARNING, shost,
2672 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2673 ch->status = -ECONNRESET;
2677 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2678 shost_printk(KERN_WARNING, shost,
2679 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2680 ch->status = -ECONNRESET;
2683 case IB_CM_REJ_CONSUMER_DEFINED:
2684 opcode = *(u8 *) event->private_data;
2685 if (opcode == SRP_LOGIN_REJ) {
2686 struct srp_login_rej *rej = event->private_data;
2687 u32 reason = be32_to_cpu(rej->reason);
2689 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2690 shost_printk(KERN_WARNING, shost,
2691 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2693 shost_printk(KERN_WARNING, shost, PFX
2694 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2696 target->ib_cm.orig_dgid.raw,
2699 shost_printk(KERN_WARNING, shost,
2700 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2701 " opcode 0x%02x\n", opcode);
2702 ch->status = -ECONNRESET;
2705 case IB_CM_REJ_STALE_CONN:
2706 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2707 ch->status = SRP_STALE_CONN;
2711 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2712 event->param.rej_rcvd.reason);
2713 ch->status = -ECONNRESET;
2717 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2718 const struct ib_cm_event *event)
2720 struct srp_rdma_ch *ch = cm_id->context;
2721 struct srp_target_port *target = ch->target;
2724 switch (event->event) {
2725 case IB_CM_REQ_ERROR:
2726 shost_printk(KERN_DEBUG, target->scsi_host,
2727 PFX "Sending CM REQ failed\n");
2729 ch->status = -ECONNRESET;
2732 case IB_CM_REP_RECEIVED:
2734 srp_cm_rep_handler(cm_id, event->private_data, ch);
2737 case IB_CM_REJ_RECEIVED:
2738 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2741 srp_ib_cm_rej_handler(cm_id, event, ch);
2744 case IB_CM_DREQ_RECEIVED:
2745 shost_printk(KERN_WARNING, target->scsi_host,
2746 PFX "DREQ received - connection closed\n");
2747 ch->connected = false;
2748 if (ib_send_cm_drep(cm_id, NULL, 0))
2749 shost_printk(KERN_ERR, target->scsi_host,
2750 PFX "Sending CM DREP failed\n");
2751 queue_work(system_long_wq, &target->tl_err_work);
2754 case IB_CM_TIMEWAIT_EXIT:
2755 shost_printk(KERN_ERR, target->scsi_host,
2756 PFX "connection closed\n");
2762 case IB_CM_MRA_RECEIVED:
2763 case IB_CM_DREQ_ERROR:
2764 case IB_CM_DREP_RECEIVED:
2768 shost_printk(KERN_WARNING, target->scsi_host,
2769 PFX "Unhandled CM event %d\n", event->event);
2774 complete(&ch->done);
2779 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2780 struct rdma_cm_event *event)
2782 struct srp_target_port *target = ch->target;
2783 struct Scsi_Host *shost = target->scsi_host;
2786 switch (event->status) {
2787 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2788 shost_printk(KERN_WARNING, shost,
2789 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2790 ch->status = -ECONNRESET;
2793 case IB_CM_REJ_CONSUMER_DEFINED:
2794 opcode = *(u8 *) event->param.conn.private_data;
2795 if (opcode == SRP_LOGIN_REJ) {
2796 struct srp_login_rej *rej =
2797 (struct srp_login_rej *)
2798 event->param.conn.private_data;
2799 u32 reason = be32_to_cpu(rej->reason);
2801 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2802 shost_printk(KERN_WARNING, shost,
2803 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2805 shost_printk(KERN_WARNING, shost,
2806 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2808 shost_printk(KERN_WARNING, shost,
2809 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2812 ch->status = -ECONNRESET;
2815 case IB_CM_REJ_STALE_CONN:
2816 shost_printk(KERN_WARNING, shost,
2817 " REJ reason: stale connection\n");
2818 ch->status = SRP_STALE_CONN;
2822 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2824 ch->status = -ECONNRESET;
2829 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2830 struct rdma_cm_event *event)
2832 struct srp_rdma_ch *ch = cm_id->context;
2833 struct srp_target_port *target = ch->target;
2836 switch (event->event) {
2837 case RDMA_CM_EVENT_ADDR_RESOLVED:
2842 case RDMA_CM_EVENT_ADDR_ERROR:
2843 ch->status = -ENXIO;
2847 case RDMA_CM_EVENT_ROUTE_RESOLVED:
2852 case RDMA_CM_EVENT_ROUTE_ERROR:
2853 case RDMA_CM_EVENT_UNREACHABLE:
2854 ch->status = -EHOSTUNREACH;
2858 case RDMA_CM_EVENT_CONNECT_ERROR:
2859 shost_printk(KERN_DEBUG, target->scsi_host,
2860 PFX "Sending CM REQ failed\n");
2862 ch->status = -ECONNRESET;
2865 case RDMA_CM_EVENT_ESTABLISHED:
2867 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2870 case RDMA_CM_EVENT_REJECTED:
2871 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2874 srp_rdma_cm_rej_handler(ch, event);
2877 case RDMA_CM_EVENT_DISCONNECTED:
2878 if (ch->connected) {
2879 shost_printk(KERN_WARNING, target->scsi_host,
2880 PFX "received DREQ\n");
2881 rdma_disconnect(ch->rdma_cm.cm_id);
2884 queue_work(system_long_wq, &target->tl_err_work);
2888 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2889 shost_printk(KERN_ERR, target->scsi_host,
2890 PFX "connection closed\n");
2897 shost_printk(KERN_WARNING, target->scsi_host,
2898 PFX "Unhandled CM event %d\n", event->event);
2903 complete(&ch->done);
2909 * srp_change_queue_depth - setting device queue depth
2910 * @sdev: scsi device struct
2911 * @qdepth: requested queue depth
2913 * Returns queue depth.
2916 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2918 if (!sdev->tagged_supported)
2920 return scsi_change_queue_depth(sdev, qdepth);
2923 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2924 u8 func, u8 *status)
2926 struct srp_target_port *target = ch->target;
2927 struct srp_rport *rport = target->rport;
2928 struct ib_device *dev = target->srp_host->srp_dev->dev;
2930 struct srp_tsk_mgmt *tsk_mgmt;
2933 if (!ch->connected || target->qp_in_error)
2937 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2938 * invoked while a task management function is being sent.
2940 mutex_lock(&rport->mutex);
2941 spin_lock_irq(&ch->lock);
2942 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2943 spin_unlock_irq(&ch->lock);
2946 mutex_unlock(&rport->mutex);
2953 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2956 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2958 tsk_mgmt->opcode = SRP_TSK_MGMT;
2959 int_to_scsilun(lun, &tsk_mgmt->lun);
2960 tsk_mgmt->tsk_mgmt_func = func;
2961 tsk_mgmt->task_tag = req_tag;
2963 spin_lock_irq(&ch->lock);
2964 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2965 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2966 spin_unlock_irq(&ch->lock);
2968 init_completion(&ch->tsk_mgmt_done);
2970 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2972 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2973 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2974 mutex_unlock(&rport->mutex);
2978 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2979 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2980 if (res > 0 && status)
2981 *status = ch->tsk_mgmt_status;
2982 mutex_unlock(&rport->mutex);
2984 WARN_ON_ONCE(res < 0);
2986 return res > 0 ? 0 : -1;
2989 static int srp_abort(struct scsi_cmnd *scmnd)
2991 struct srp_target_port *target = host_to_target(scmnd->device->host);
2992 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2995 struct srp_rdma_ch *ch;
2998 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
3002 tag = blk_mq_unique_tag(scmnd->request);
3003 ch_idx = blk_mq_unique_tag_to_hwq(tag);
3004 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
3006 ch = &target->ch[ch_idx];
3007 if (!srp_claim_req(ch, req, NULL, scmnd))
3009 shost_printk(KERN_ERR, target->scsi_host,
3010 "Sending SRP abort for tag %#x\n", tag);
3011 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
3012 SRP_TSK_ABORT_TASK, NULL) == 0)
3014 else if (target->rport->state == SRP_RPORT_LOST)
3018 if (ret == SUCCESS) {
3019 srp_free_req(ch, req, scmnd, 0);
3020 scmnd->result = DID_ABORT << 16;
3021 scmnd->scsi_done(scmnd);
3027 static int srp_reset_device(struct scsi_cmnd *scmnd)
3029 struct srp_target_port *target = host_to_target(scmnd->device->host);
3030 struct srp_rdma_ch *ch;
3034 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
3036 ch = &target->ch[0];
3037 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
3038 SRP_TSK_LUN_RESET, &status))
3043 for (i = 0; i < target->ch_count; i++) {
3044 ch = &target->ch[i];
3045 for (j = 0; j < target->req_ring_size; ++j) {
3046 struct srp_request *req = &ch->req_ring[j];
3048 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
3055 static int srp_reset_host(struct scsi_cmnd *scmnd)
3057 struct srp_target_port *target = host_to_target(scmnd->device->host);
3059 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
3061 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
3064 static int srp_target_alloc(struct scsi_target *starget)
3066 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3067 struct srp_target_port *target = host_to_target(shost);
3069 if (target->target_can_queue)
3070 starget->can_queue = target->target_can_queue;
3074 static int srp_slave_alloc(struct scsi_device *sdev)
3076 struct Scsi_Host *shost = sdev->host;
3077 struct srp_target_port *target = host_to_target(shost);
3078 struct srp_device *srp_dev = target->srp_host->srp_dev;
3079 struct ib_device *ibdev = srp_dev->dev;
3081 if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
3082 blk_queue_virt_boundary(sdev->request_queue,
3083 ~srp_dev->mr_page_mask);
3088 static int srp_slave_configure(struct scsi_device *sdev)
3090 struct Scsi_Host *shost = sdev->host;
3091 struct srp_target_port *target = host_to_target(shost);
3092 struct request_queue *q = sdev->request_queue;
3093 unsigned long timeout;
3095 if (sdev->type == TYPE_DISK) {
3096 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
3097 blk_queue_rq_timeout(q, timeout);
3103 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
3106 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3108 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
3111 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
3114 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3116 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
3119 static ssize_t show_service_id(struct device *dev,
3120 struct device_attribute *attr, char *buf)
3122 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3124 if (target->using_rdma_cm)
3126 return sprintf(buf, "0x%016llx\n",
3127 be64_to_cpu(target->ib_cm.service_id));
3130 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
3133 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3135 if (target->using_rdma_cm)
3137 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
3140 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
3143 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3145 return sprintf(buf, "%pI6\n", target->sgid.raw);
3148 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
3151 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3152 struct srp_rdma_ch *ch = &target->ch[0];
3154 if (target->using_rdma_cm)
3156 return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
3159 static ssize_t show_orig_dgid(struct device *dev,
3160 struct device_attribute *attr, char *buf)
3162 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3164 if (target->using_rdma_cm)
3166 return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
3169 static ssize_t show_req_lim(struct device *dev,
3170 struct device_attribute *attr, char *buf)
3172 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3173 struct srp_rdma_ch *ch;
3174 int i, req_lim = INT_MAX;
3176 for (i = 0; i < target->ch_count; i++) {
3177 ch = &target->ch[i];
3178 req_lim = min(req_lim, ch->req_lim);
3180 return sprintf(buf, "%d\n", req_lim);
3183 static ssize_t show_zero_req_lim(struct device *dev,
3184 struct device_attribute *attr, char *buf)
3186 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3188 return sprintf(buf, "%d\n", target->zero_req_lim);
3191 static ssize_t show_local_ib_port(struct device *dev,
3192 struct device_attribute *attr, char *buf)
3194 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3196 return sprintf(buf, "%d\n", target->srp_host->port);
3199 static ssize_t show_local_ib_device(struct device *dev,
3200 struct device_attribute *attr, char *buf)
3202 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3204 return sprintf(buf, "%s\n",
3205 dev_name(&target->srp_host->srp_dev->dev->dev));
3208 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
3211 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3213 return sprintf(buf, "%d\n", target->ch_count);
3216 static ssize_t show_comp_vector(struct device *dev,
3217 struct device_attribute *attr, char *buf)
3219 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3221 return sprintf(buf, "%d\n", target->comp_vector);
3224 static ssize_t show_tl_retry_count(struct device *dev,
3225 struct device_attribute *attr, char *buf)
3227 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3229 return sprintf(buf, "%d\n", target->tl_retry_count);
3232 static ssize_t show_cmd_sg_entries(struct device *dev,
3233 struct device_attribute *attr, char *buf)
3235 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3237 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
3240 static ssize_t show_allow_ext_sg(struct device *dev,
3241 struct device_attribute *attr, char *buf)
3243 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3245 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3248 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
3249 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
3250 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
3251 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
3252 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
3253 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
3254 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
3255 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
3256 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
3257 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
3258 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
3259 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
3260 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
3261 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
3262 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
3263 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
3265 static struct device_attribute *srp_host_attrs[] = {
3268 &dev_attr_service_id,
3272 &dev_attr_orig_dgid,
3274 &dev_attr_zero_req_lim,
3275 &dev_attr_local_ib_port,
3276 &dev_attr_local_ib_device,
3278 &dev_attr_comp_vector,
3279 &dev_attr_tl_retry_count,
3280 &dev_attr_cmd_sg_entries,
3281 &dev_attr_allow_ext_sg,
3285 static struct scsi_host_template srp_template = {
3286 .module = THIS_MODULE,
3287 .name = "InfiniBand SRP initiator",
3288 .proc_name = DRV_NAME,
3289 .target_alloc = srp_target_alloc,
3290 .slave_alloc = srp_slave_alloc,
3291 .slave_configure = srp_slave_configure,
3292 .info = srp_target_info,
3293 .queuecommand = srp_queuecommand,
3294 .change_queue_depth = srp_change_queue_depth,
3295 .eh_timed_out = srp_timed_out,
3296 .eh_abort_handler = srp_abort,
3297 .eh_device_reset_handler = srp_reset_device,
3298 .eh_host_reset_handler = srp_reset_host,
3299 .skip_settle_delay = true,
3300 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
3301 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
3303 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
3304 .shost_attrs = srp_host_attrs,
3305 .track_queue_depth = 1,
3308 static int srp_sdev_count(struct Scsi_Host *host)
3310 struct scsi_device *sdev;
3313 shost_for_each_device(sdev, host)
3321 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3322 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3323 * removal has been scheduled.
3324 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3326 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3328 struct srp_rport_identifiers ids;
3329 struct srp_rport *rport;
3331 target->state = SRP_TARGET_SCANNING;
3332 sprintf(target->target_name, "SRP.T10:%016llX",
3333 be64_to_cpu(target->id_ext));
3335 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3338 memcpy(ids.port_id, &target->id_ext, 8);
3339 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3340 ids.roles = SRP_RPORT_ROLE_TARGET;
3341 rport = srp_rport_add(target->scsi_host, &ids);
3342 if (IS_ERR(rport)) {
3343 scsi_remove_host(target->scsi_host);
3344 return PTR_ERR(rport);
3347 rport->lld_data = target;
3348 target->rport = rport;
3350 spin_lock(&host->target_lock);
3351 list_add_tail(&target->list, &host->target_list);
3352 spin_unlock(&host->target_lock);
3354 scsi_scan_target(&target->scsi_host->shost_gendev,
3355 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3357 if (srp_connected_ch(target) < target->ch_count ||
3358 target->qp_in_error) {
3359 shost_printk(KERN_INFO, target->scsi_host,
3360 PFX "SCSI scan failed - removing SCSI host\n");
3361 srp_queue_remove_work(target);
3365 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3366 dev_name(&target->scsi_host->shost_gendev),
3367 srp_sdev_count(target->scsi_host));
3369 spin_lock_irq(&target->lock);
3370 if (target->state == SRP_TARGET_SCANNING)
3371 target->state = SRP_TARGET_LIVE;
3372 spin_unlock_irq(&target->lock);
3378 static void srp_release_dev(struct device *dev)
3380 struct srp_host *host =
3381 container_of(dev, struct srp_host, dev);
3383 complete(&host->released);
3386 static struct class srp_class = {
3387 .name = "infiniband_srp",
3388 .dev_release = srp_release_dev
3392 * srp_conn_unique() - check whether the connection to a target is unique
3394 * @target: SRP target port.
3396 static bool srp_conn_unique(struct srp_host *host,
3397 struct srp_target_port *target)
3399 struct srp_target_port *t;
3402 if (target->state == SRP_TARGET_REMOVED)
3407 spin_lock(&host->target_lock);
3408 list_for_each_entry(t, &host->target_list, list) {
3410 target->id_ext == t->id_ext &&
3411 target->ioc_guid == t->ioc_guid &&
3412 target->initiator_ext == t->initiator_ext) {
3417 spin_unlock(&host->target_lock);
3424 * Target ports are added by writing
3426 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3427 * pkey=<P_Key>,service_id=<service ID>
3429 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3430 * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
3432 * to the add_target sysfs attribute.
3436 SRP_OPT_ID_EXT = 1 << 0,
3437 SRP_OPT_IOC_GUID = 1 << 1,
3438 SRP_OPT_DGID = 1 << 2,
3439 SRP_OPT_PKEY = 1 << 3,
3440 SRP_OPT_SERVICE_ID = 1 << 4,
3441 SRP_OPT_MAX_SECT = 1 << 5,
3442 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
3443 SRP_OPT_IO_CLASS = 1 << 7,
3444 SRP_OPT_INITIATOR_EXT = 1 << 8,
3445 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
3446 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3447 SRP_OPT_SG_TABLESIZE = 1 << 11,
3448 SRP_OPT_COMP_VECTOR = 1 << 12,
3449 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
3450 SRP_OPT_QUEUE_SIZE = 1 << 14,
3451 SRP_OPT_IP_SRC = 1 << 15,
3452 SRP_OPT_IP_DEST = 1 << 16,
3453 SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3456 static unsigned int srp_opt_mandatory[] = {
3467 static const match_table_t srp_opt_tokens = {
3468 { SRP_OPT_ID_EXT, "id_ext=%s" },
3469 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3470 { SRP_OPT_DGID, "dgid=%s" },
3471 { SRP_OPT_PKEY, "pkey=%x" },
3472 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3473 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3474 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
3475 { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
3476 { SRP_OPT_IO_CLASS, "io_class=%x" },
3477 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
3478 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
3479 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3480 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
3481 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
3482 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
3483 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
3484 { SRP_OPT_IP_SRC, "src=%s" },
3485 { SRP_OPT_IP_DEST, "dest=%s" },
3486 { SRP_OPT_ERR, NULL }
3490 * srp_parse_in - parse an IP address and port number combination
3491 * @net: [in] Network namespace.
3492 * @sa: [out] Address family, IP address and port number.
3493 * @addr_port_str: [in] IP address and port number.
3495 * Parse the following address formats:
3496 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3497 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3499 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3500 const char *addr_port_str)
3502 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3508 port_str = strrchr(addr, ':');
3512 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3513 if (ret && addr[0]) {
3514 addr_end = addr + strlen(addr) - 1;
3515 if (addr[0] == '[' && *addr_end == ']') {
3517 ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3522 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3526 static int srp_parse_options(struct net *net, const char *buf,
3527 struct srp_target_port *target)
3529 char *options, *sep_opt;
3531 substring_t args[MAX_OPT_ARGS];
3532 unsigned long long ull;
3538 options = kstrdup(buf, GFP_KERNEL);
3543 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3547 token = match_token(p, srp_opt_tokens, args);
3551 case SRP_OPT_ID_EXT:
3552 p = match_strdup(args);
3557 ret = kstrtoull(p, 16, &ull);
3559 pr_warn("invalid id_ext parameter '%s'\n", p);
3563 target->id_ext = cpu_to_be64(ull);
3567 case SRP_OPT_IOC_GUID:
3568 p = match_strdup(args);
3573 ret = kstrtoull(p, 16, &ull);
3575 pr_warn("invalid ioc_guid parameter '%s'\n", p);
3579 target->ioc_guid = cpu_to_be64(ull);
3584 p = match_strdup(args);
3589 if (strlen(p) != 32) {
3590 pr_warn("bad dest GID parameter '%s'\n", p);
3595 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
3602 if (match_hex(args, &token)) {
3603 pr_warn("bad P_Key parameter '%s'\n", p);
3606 target->ib_cm.pkey = cpu_to_be16(token);
3609 case SRP_OPT_SERVICE_ID:
3610 p = match_strdup(args);
3615 ret = kstrtoull(p, 16, &ull);
3617 pr_warn("bad service_id parameter '%s'\n", p);
3621 target->ib_cm.service_id = cpu_to_be64(ull);
3625 case SRP_OPT_IP_SRC:
3626 p = match_strdup(args);
3631 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
3633 pr_warn("bad source parameter '%s'\n", p);
3637 target->rdma_cm.src_specified = true;
3641 case SRP_OPT_IP_DEST:
3642 p = match_strdup(args);
3647 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
3649 pr_warn("bad dest parameter '%s'\n", p);
3653 target->using_rdma_cm = true;
3657 case SRP_OPT_MAX_SECT:
3658 if (match_int(args, &token)) {
3659 pr_warn("bad max sect parameter '%s'\n", p);
3662 target->scsi_host->max_sectors = token;
3665 case SRP_OPT_QUEUE_SIZE:
3666 if (match_int(args, &token) || token < 1) {
3667 pr_warn("bad queue_size parameter '%s'\n", p);
3670 target->scsi_host->can_queue = token;
3671 target->queue_size = token + SRP_RSP_SQ_SIZE +
3672 SRP_TSK_MGMT_SQ_SIZE;
3673 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3674 target->scsi_host->cmd_per_lun = token;
3677 case SRP_OPT_MAX_CMD_PER_LUN:
3678 if (match_int(args, &token) || token < 1) {
3679 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3683 target->scsi_host->cmd_per_lun = token;
3686 case SRP_OPT_TARGET_CAN_QUEUE:
3687 if (match_int(args, &token) || token < 1) {
3688 pr_warn("bad max target_can_queue parameter '%s'\n",
3692 target->target_can_queue = token;
3695 case SRP_OPT_IO_CLASS:
3696 if (match_hex(args, &token)) {
3697 pr_warn("bad IO class parameter '%s'\n", p);
3700 if (token != SRP_REV10_IB_IO_CLASS &&
3701 token != SRP_REV16A_IB_IO_CLASS) {
3702 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3703 token, SRP_REV10_IB_IO_CLASS,
3704 SRP_REV16A_IB_IO_CLASS);
3707 target->io_class = token;
3710 case SRP_OPT_INITIATOR_EXT:
3711 p = match_strdup(args);
3716 ret = kstrtoull(p, 16, &ull);
3718 pr_warn("bad initiator_ext value '%s'\n", p);
3722 target->initiator_ext = cpu_to_be64(ull);
3726 case SRP_OPT_CMD_SG_ENTRIES:
3727 if (match_int(args, &token) || token < 1 || token > 255) {
3728 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3732 target->cmd_sg_cnt = token;
3735 case SRP_OPT_ALLOW_EXT_SG:
3736 if (match_int(args, &token)) {
3737 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3740 target->allow_ext_sg = !!token;
3743 case SRP_OPT_SG_TABLESIZE:
3744 if (match_int(args, &token) || token < 1 ||
3745 token > SG_MAX_SEGMENTS) {
3746 pr_warn("bad max sg_tablesize parameter '%s'\n",
3750 target->sg_tablesize = token;
3753 case SRP_OPT_COMP_VECTOR:
3754 if (match_int(args, &token) || token < 0) {
3755 pr_warn("bad comp_vector parameter '%s'\n", p);
3758 target->comp_vector = token;
3761 case SRP_OPT_TL_RETRY_COUNT:
3762 if (match_int(args, &token) || token < 2 || token > 7) {
3763 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3767 target->tl_retry_count = token;
3771 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3777 for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3778 if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3784 pr_warn("target creation request is missing one or more parameters\n");
3786 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3787 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3788 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3789 target->scsi_host->cmd_per_lun,
3790 target->scsi_host->can_queue);
3797 static ssize_t srp_create_target(struct device *dev,
3798 struct device_attribute *attr,
3799 const char *buf, size_t count)
3801 struct srp_host *host =
3802 container_of(dev, struct srp_host, dev);
3803 struct Scsi_Host *target_host;
3804 struct srp_target_port *target;
3805 struct srp_rdma_ch *ch;
3806 struct srp_device *srp_dev = host->srp_dev;
3807 struct ib_device *ibdev = srp_dev->dev;
3808 int ret, node_idx, node, cpu, i;
3809 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3810 bool multich = false;
3811 uint32_t max_iu_len;
3813 target_host = scsi_host_alloc(&srp_template,
3814 sizeof (struct srp_target_port));
3818 target_host->transportt = ib_srp_transport_template;
3819 target_host->max_channel = 0;
3820 target_host->max_id = 1;
3821 target_host->max_lun = -1LL;
3822 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3823 target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3825 target = host_to_target(target_host);
3827 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3828 target->io_class = SRP_REV16A_IB_IO_CLASS;
3829 target->scsi_host = target_host;
3830 target->srp_host = host;
3831 target->lkey = host->srp_dev->pd->local_dma_lkey;
3832 target->global_rkey = host->srp_dev->global_rkey;
3833 target->cmd_sg_cnt = cmd_sg_entries;
3834 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3835 target->allow_ext_sg = allow_ext_sg;
3836 target->tl_retry_count = 7;
3837 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3840 * Avoid that the SCSI host can be removed by srp_remove_target()
3841 * before this function returns.
3843 scsi_host_get(target->scsi_host);
3845 ret = mutex_lock_interruptible(&host->add_target_mutex);
3849 ret = srp_parse_options(target->net, buf, target);
3853 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3855 if (!srp_conn_unique(target->srp_host, target)) {
3856 if (target->using_rdma_cm) {
3857 shost_printk(KERN_INFO, target->scsi_host,
3858 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3859 be64_to_cpu(target->id_ext),
3860 be64_to_cpu(target->ioc_guid),
3861 &target->rdma_cm.dst);
3863 shost_printk(KERN_INFO, target->scsi_host,
3864 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3865 be64_to_cpu(target->id_ext),
3866 be64_to_cpu(target->ioc_guid),
3867 be64_to_cpu(target->initiator_ext));
3873 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3874 target->cmd_sg_cnt < target->sg_tablesize) {
3875 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3876 target->sg_tablesize = target->cmd_sg_cnt;
3879 if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
3880 bool gaps_reg = (ibdev->attrs.device_cap_flags &
3881 IB_DEVICE_SG_GAPS_REG);
3883 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3884 (ilog2(srp_dev->mr_page_size) - 9);
3887 * FR and FMR can only map one HCA page per entry. If
3888 * the start address is not aligned on a HCA page
3889 * boundary two entries will be used for the head and
3890 * the tail although these two entries combined
3891 * contain at most one HCA page of data. Hence the "+
3892 * 1" in the calculation below.
3894 * The indirect data buffer descriptor is contiguous
3895 * so the memory for that buffer will only be
3896 * registered if register_always is true. Hence add
3897 * one to mr_per_cmd if register_always has been set.
3899 mr_per_cmd = register_always +
3900 (target->scsi_host->max_sectors + 1 +
3901 max_sectors_per_mr - 1) / max_sectors_per_mr;
3903 mr_per_cmd = register_always +
3904 (target->sg_tablesize +
3905 srp_dev->max_pages_per_mr - 1) /
3906 srp_dev->max_pages_per_mr;
3908 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3909 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3910 max_sectors_per_mr, mr_per_cmd);
3913 target_host->sg_tablesize = target->sg_tablesize;
3914 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3915 target->mr_per_cmd = mr_per_cmd;
3916 target->indirect_size = target->sg_tablesize *
3917 sizeof (struct srp_direct_buf);
3918 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, srp_use_imm_data);
3920 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3921 INIT_WORK(&target->remove_work, srp_remove_work);
3922 spin_lock_init(&target->lock);
3923 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3928 target->ch_count = max_t(unsigned, num_online_nodes(),
3930 min(4 * num_online_nodes(),
3931 ibdev->num_comp_vectors),
3932 num_online_cpus()));
3933 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3939 for_each_online_node(node) {
3940 const int ch_start = (node_idx * target->ch_count /
3941 num_online_nodes());
3942 const int ch_end = ((node_idx + 1) * target->ch_count /
3943 num_online_nodes());
3944 const int cv_start = node_idx * ibdev->num_comp_vectors /
3946 const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
3950 for_each_online_cpu(cpu) {
3951 if (cpu_to_node(cpu) != node)
3953 if (ch_start + cpu_idx >= ch_end)
3955 ch = &target->ch[ch_start + cpu_idx];
3956 ch->target = target;
3957 ch->comp_vector = cv_start == cv_end ? cv_start :
3958 cv_start + cpu_idx % (cv_end - cv_start);
3959 spin_lock_init(&ch->lock);
3960 INIT_LIST_HEAD(&ch->free_tx);
3961 ret = srp_new_cm_id(ch);
3963 goto err_disconnect;
3965 ret = srp_create_ch_ib(ch);
3967 goto err_disconnect;
3969 ret = srp_alloc_req_data(ch);
3971 goto err_disconnect;
3973 ret = srp_connect_ch(ch, max_iu_len, multich);
3977 if (target->using_rdma_cm)
3978 snprintf(dst, sizeof(dst), "%pIS",
3979 &target->rdma_cm.dst);
3981 snprintf(dst, sizeof(dst), "%pI6",
3982 target->ib_cm.orig_dgid.raw);
3983 shost_printk(KERN_ERR, target->scsi_host,
3984 PFX "Connection %d/%d to %s failed\n",
3986 target->ch_count, dst);
3987 if (node_idx == 0 && cpu_idx == 0) {
3990 srp_free_ch_ib(target, ch);
3991 srp_free_req_data(target, ch);
3992 target->ch_count = ch - target->ch;
4004 target->scsi_host->nr_hw_queues = target->ch_count;
4006 ret = srp_add_target(host, target);
4008 goto err_disconnect;
4010 if (target->state != SRP_TARGET_REMOVED) {
4011 if (target->using_rdma_cm) {
4012 shost_printk(KERN_DEBUG, target->scsi_host, PFX
4013 "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
4014 be64_to_cpu(target->id_ext),
4015 be64_to_cpu(target->ioc_guid),
4016 target->sgid.raw, &target->rdma_cm.dst);
4018 shost_printk(KERN_DEBUG, target->scsi_host, PFX
4019 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
4020 be64_to_cpu(target->id_ext),
4021 be64_to_cpu(target->ioc_guid),
4022 be16_to_cpu(target->ib_cm.pkey),
4023 be64_to_cpu(target->ib_cm.service_id),
4025 target->ib_cm.orig_dgid.raw);
4032 mutex_unlock(&host->add_target_mutex);
4035 scsi_host_put(target->scsi_host);
4038 * If a call to srp_remove_target() has not been scheduled,
4039 * drop the network namespace reference now that was obtained
4040 * earlier in this function.
4042 if (target->state != SRP_TARGET_REMOVED)
4043 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
4044 scsi_host_put(target->scsi_host);
4050 srp_disconnect_target(target);
4053 for (i = 0; i < target->ch_count; i++) {
4054 ch = &target->ch[i];
4055 srp_free_ch_ib(target, ch);
4056 srp_free_req_data(target, ch);
4063 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
4065 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
4068 struct srp_host *host = container_of(dev, struct srp_host, dev);
4070 return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
4073 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
4075 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
4078 struct srp_host *host = container_of(dev, struct srp_host, dev);
4080 return sprintf(buf, "%d\n", host->port);
4083 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
4085 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
4087 struct srp_host *host;
4089 host = kzalloc(sizeof *host, GFP_KERNEL);
4093 INIT_LIST_HEAD(&host->target_list);
4094 spin_lock_init(&host->target_lock);
4095 init_completion(&host->released);
4096 mutex_init(&host->add_target_mutex);
4097 host->srp_dev = device;
4100 host->dev.class = &srp_class;
4101 host->dev.parent = device->dev->dev.parent;
4102 dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
4105 if (device_register(&host->dev))
4107 if (device_create_file(&host->dev, &dev_attr_add_target))
4109 if (device_create_file(&host->dev, &dev_attr_ibdev))
4111 if (device_create_file(&host->dev, &dev_attr_port))
4117 device_unregister(&host->dev);
4125 static void srp_add_one(struct ib_device *device)
4127 struct srp_device *srp_dev;
4128 struct ib_device_attr *attr = &device->attrs;
4129 struct srp_host *host;
4130 int mr_page_shift, p;
4131 u64 max_pages_per_mr;
4132 unsigned int flags = 0;
4134 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
4139 * Use the smallest page size supported by the HCA, down to a
4140 * minimum of 4096 bytes. We're unlikely to build large sglists
4141 * out of smaller entries.
4143 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
4144 srp_dev->mr_page_size = 1 << mr_page_shift;
4145 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
4146 max_pages_per_mr = attr->max_mr_size;
4147 do_div(max_pages_per_mr, srp_dev->mr_page_size);
4148 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
4149 attr->max_mr_size, srp_dev->mr_page_size,
4150 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
4151 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
4154 srp_dev->has_fmr = (device->ops.alloc_fmr &&
4155 device->ops.dealloc_fmr &&
4156 device->ops.map_phys_fmr &&
4157 device->ops.unmap_fmr);
4158 srp_dev->has_fr = (attr->device_cap_flags &
4159 IB_DEVICE_MEM_MGT_EXTENSIONS);
4160 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
4161 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
4162 } else if (!never_register &&
4163 attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
4164 srp_dev->use_fast_reg = (srp_dev->has_fr &&
4165 (!srp_dev->has_fmr || prefer_fr));
4166 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
4169 if (never_register || !register_always ||
4170 (!srp_dev->has_fmr && !srp_dev->has_fr))
4171 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
4173 if (srp_dev->use_fast_reg) {
4174 srp_dev->max_pages_per_mr =
4175 min_t(u32, srp_dev->max_pages_per_mr,
4176 attr->max_fast_reg_page_list_len);
4178 srp_dev->mr_max_size = srp_dev->mr_page_size *
4179 srp_dev->max_pages_per_mr;
4180 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
4181 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
4182 attr->max_fast_reg_page_list_len,
4183 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
4185 INIT_LIST_HEAD(&srp_dev->dev_list);
4187 srp_dev->dev = device;
4188 srp_dev->pd = ib_alloc_pd(device, flags);
4189 if (IS_ERR(srp_dev->pd))
4192 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4193 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4194 WARN_ON_ONCE(srp_dev->global_rkey == 0);
4197 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
4198 host = srp_add_port(srp_dev, p);
4200 list_add_tail(&host->list, &srp_dev->dev_list);
4203 ib_set_client_data(device, &srp_client, srp_dev);
4210 static void srp_remove_one(struct ib_device *device, void *client_data)
4212 struct srp_device *srp_dev;
4213 struct srp_host *host, *tmp_host;
4214 struct srp_target_port *target;
4216 srp_dev = client_data;
4220 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4221 device_unregister(&host->dev);
4223 * Wait for the sysfs entry to go away, so that no new
4224 * target ports can be created.
4226 wait_for_completion(&host->released);
4229 * Remove all target ports.
4231 spin_lock(&host->target_lock);
4232 list_for_each_entry(target, &host->target_list, list)
4233 srp_queue_remove_work(target);
4234 spin_unlock(&host->target_lock);
4237 * Wait for tl_err and target port removal tasks.
4239 flush_workqueue(system_long_wq);
4240 flush_workqueue(srp_remove_wq);
4245 ib_dealloc_pd(srp_dev->pd);
4250 static struct srp_function_template ib_srp_transport_functions = {
4251 .has_rport_state = true,
4252 .reset_timer_if_blocked = true,
4253 .reconnect_delay = &srp_reconnect_delay,
4254 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
4255 .dev_loss_tmo = &srp_dev_loss_tmo,
4256 .reconnect = srp_rport_reconnect,
4257 .rport_delete = srp_rport_delete,
4258 .terminate_rport_io = srp_terminate_io,
4261 static int __init srp_init_module(void)
4265 BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4266 BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4267 BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4268 BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4270 if (srp_sg_tablesize) {
4271 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4272 if (!cmd_sg_entries)
4273 cmd_sg_entries = srp_sg_tablesize;
4276 if (!cmd_sg_entries)
4277 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4279 if (cmd_sg_entries > 255) {
4280 pr_warn("Clamping cmd_sg_entries to 255\n");
4281 cmd_sg_entries = 255;
4284 if (!indirect_sg_entries)
4285 indirect_sg_entries = cmd_sg_entries;
4286 else if (indirect_sg_entries < cmd_sg_entries) {
4287 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4289 indirect_sg_entries = cmd_sg_entries;
4292 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4293 pr_warn("Clamping indirect_sg_entries to %u\n",
4295 indirect_sg_entries = SG_MAX_SEGMENTS;
4298 srp_remove_wq = create_workqueue("srp_remove");
4299 if (!srp_remove_wq) {
4305 ib_srp_transport_template =
4306 srp_attach_transport(&ib_srp_transport_functions);
4307 if (!ib_srp_transport_template)
4310 ret = class_register(&srp_class);
4312 pr_err("couldn't register class infiniband_srp\n");
4316 ib_sa_register_client(&srp_sa_client);
4318 ret = ib_register_client(&srp_client);
4320 pr_err("couldn't register IB client\n");
4328 ib_sa_unregister_client(&srp_sa_client);
4329 class_unregister(&srp_class);
4332 srp_release_transport(ib_srp_transport_template);
4335 destroy_workqueue(srp_remove_wq);
4339 static void __exit srp_cleanup_module(void)
4341 ib_unregister_client(&srp_client);
4342 ib_sa_unregister_client(&srp_sa_client);
4343 class_unregister(&srp_class);
4344 srp_release_transport(ib_srp_transport_template);
4345 destroy_workqueue(srp_remove_wq);
4348 module_init(srp_init_module);
4349 module_exit(srp_cleanup_module);