2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <linux/lockdep.h>
44 #include <linux/inet.h>
45 #include <rdma/ib_cache.h>
47 #include <linux/atomic.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/scsi_dbg.h>
52 #include <scsi/scsi_tcq.h>
54 #include <scsi/scsi_transport_srp.h>
58 #define DRV_NAME "ib_srp"
59 #define PFX DRV_NAME ": "
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
65 static unsigned int srp_sg_tablesize;
66 static unsigned int cmd_sg_entries;
67 static unsigned int indirect_sg_entries;
68 static bool allow_ext_sg;
69 static bool register_always = true;
70 static bool never_register;
71 static int topspin_workarounds = 1;
73 module_param(srp_sg_tablesize, uint, 0444);
74 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
76 module_param(cmd_sg_entries, uint, 0444);
77 MODULE_PARM_DESC(cmd_sg_entries,
78 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
80 module_param(indirect_sg_entries, uint, 0444);
81 MODULE_PARM_DESC(indirect_sg_entries,
82 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
84 module_param(allow_ext_sg, bool, 0444);
85 MODULE_PARM_DESC(allow_ext_sg,
86 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
88 module_param(topspin_workarounds, int, 0444);
89 MODULE_PARM_DESC(topspin_workarounds,
90 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
92 module_param(register_always, bool, 0444);
93 MODULE_PARM_DESC(register_always,
94 "Use memory registration even for contiguous memory regions");
96 module_param(never_register, bool, 0444);
97 MODULE_PARM_DESC(never_register, "Never register memory");
99 static const struct kernel_param_ops srp_tmo_ops;
101 static int srp_reconnect_delay = 10;
102 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
104 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
106 static int srp_fast_io_fail_tmo = 15;
107 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
109 MODULE_PARM_DESC(fast_io_fail_tmo,
110 "Number of seconds between the observation of a transport"
111 " layer error and failing all I/O. \"off\" means that this"
112 " functionality is disabled.");
114 static int srp_dev_loss_tmo = 600;
115 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
117 MODULE_PARM_DESC(dev_loss_tmo,
118 "Maximum number of seconds that the SRP transport should"
119 " insulate transport layer errors. After this time has been"
120 " exceeded the SCSI host is removed. Should be"
121 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
122 " if fast_io_fail_tmo has not been set. \"off\" means that"
123 " this functionality is disabled.");
125 static bool srp_use_imm_data = true;
126 module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
127 MODULE_PARM_DESC(use_imm_data,
128 "Whether or not to request permission to use immediate data during SRP login.");
130 static unsigned int srp_max_imm_data = 8 * 1024;
131 module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
132 MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
134 static unsigned ch_count;
135 module_param(ch_count, uint, 0444);
136 MODULE_PARM_DESC(ch_count,
137 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
139 static int srp_add_one(struct ib_device *device);
140 static void srp_remove_one(struct ib_device *device, void *client_data);
141 static void srp_rename_dev(struct ib_device *device, void *client_data);
142 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
143 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
145 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
146 const struct ib_cm_event *event);
147 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
148 struct rdma_cm_event *event);
150 static struct scsi_transport_template *ib_srp_transport_template;
151 static struct workqueue_struct *srp_remove_wq;
153 static struct ib_client srp_client = {
156 .remove = srp_remove_one,
157 .rename = srp_rename_dev
160 static struct ib_sa_client srp_sa_client;
162 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
164 int tmo = *(int *)kp->arg;
167 return sysfs_emit(buffer, "%d\n", tmo);
169 return sysfs_emit(buffer, "off\n");
172 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
176 res = srp_parse_tmo(&tmo, val);
180 if (kp->arg == &srp_reconnect_delay)
181 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
183 else if (kp->arg == &srp_fast_io_fail_tmo)
184 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
186 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
190 *(int *)kp->arg = tmo;
196 static const struct kernel_param_ops srp_tmo_ops = {
201 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
203 return (struct srp_target_port *) host->hostdata;
206 static const char *srp_target_info(struct Scsi_Host *host)
208 return host_to_target(host)->target_name;
211 static int srp_target_is_topspin(struct srp_target_port *target)
213 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
214 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
216 return topspin_workarounds &&
217 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
218 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
221 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
223 enum dma_data_direction direction)
227 iu = kmalloc(sizeof *iu, gfp_mask);
231 iu->buf = kzalloc(size, gfp_mask);
235 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
237 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
241 iu->direction = direction;
253 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
258 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
264 static void srp_qp_event(struct ib_event *event, void *context)
266 pr_debug("QP event %s (%d)\n",
267 ib_event_msg(event->event), event->event);
270 static int srp_init_ib_qp(struct srp_target_port *target,
273 struct ib_qp_attr *attr;
276 attr = kmalloc(sizeof *attr, GFP_KERNEL);
280 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
281 target->srp_host->port,
282 be16_to_cpu(target->ib_cm.pkey),
287 attr->qp_state = IB_QPS_INIT;
288 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
289 IB_ACCESS_REMOTE_WRITE);
290 attr->port_num = target->srp_host->port;
292 ret = ib_modify_qp(qp, attr,
303 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
305 struct srp_target_port *target = ch->target;
306 struct ib_cm_id *new_cm_id;
308 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
309 srp_ib_cm_handler, ch);
310 if (IS_ERR(new_cm_id))
311 return PTR_ERR(new_cm_id);
314 ib_destroy_cm_id(ch->ib_cm.cm_id);
315 ch->ib_cm.cm_id = new_cm_id;
316 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
317 target->srp_host->port))
318 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
320 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
321 ch->ib_cm.path.sgid = target->sgid;
322 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
323 ch->ib_cm.path.pkey = target->ib_cm.pkey;
324 ch->ib_cm.path.service_id = target->ib_cm.service_id;
329 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
331 struct srp_target_port *target = ch->target;
332 struct rdma_cm_id *new_cm_id;
335 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
336 RDMA_PS_TCP, IB_QPT_RC);
337 if (IS_ERR(new_cm_id)) {
338 ret = PTR_ERR(new_cm_id);
343 init_completion(&ch->done);
344 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
345 &target->rdma_cm.src.sa : NULL,
346 &target->rdma_cm.dst.sa,
347 SRP_PATH_REC_TIMEOUT_MS);
349 pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
350 &target->rdma_cm.src, &target->rdma_cm.dst, ret);
353 ret = wait_for_completion_interruptible(&ch->done);
359 pr_err("Resolving address %pISpsc failed (%d)\n",
360 &target->rdma_cm.dst, ret);
364 swap(ch->rdma_cm.cm_id, new_cm_id);
368 rdma_destroy_id(new_cm_id);
373 static int srp_new_cm_id(struct srp_rdma_ch *ch)
375 struct srp_target_port *target = ch->target;
377 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
378 srp_new_ib_cm_id(ch);
382 * srp_destroy_fr_pool() - free the resources owned by a pool
383 * @pool: Fast registration pool to be destroyed.
385 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
388 struct srp_fr_desc *d;
393 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
401 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
402 * @device: IB device to allocate fast registration descriptors for.
403 * @pd: Protection domain associated with the FR descriptors.
404 * @pool_size: Number of descriptors to allocate.
405 * @max_page_list_len: Maximum fast registration work request page list length.
407 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
408 struct ib_pd *pd, int pool_size,
409 int max_page_list_len)
411 struct srp_fr_pool *pool;
412 struct srp_fr_desc *d;
414 int i, ret = -EINVAL;
415 enum ib_mr_type mr_type;
420 pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
423 pool->size = pool_size;
424 pool->max_page_list_len = max_page_list_len;
425 spin_lock_init(&pool->lock);
426 INIT_LIST_HEAD(&pool->free_list);
428 if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
429 mr_type = IB_MR_TYPE_SG_GAPS;
431 mr_type = IB_MR_TYPE_MEM_REG;
433 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
434 mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
438 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
439 dev_name(&device->dev));
443 list_add_tail(&d->entry, &pool->free_list);
450 srp_destroy_fr_pool(pool);
458 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
459 * @pool: Pool to obtain descriptor from.
461 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
463 struct srp_fr_desc *d = NULL;
466 spin_lock_irqsave(&pool->lock, flags);
467 if (!list_empty(&pool->free_list)) {
468 d = list_first_entry(&pool->free_list, typeof(*d), entry);
471 spin_unlock_irqrestore(&pool->lock, flags);
477 * srp_fr_pool_put() - put an FR descriptor back in the free list
478 * @pool: Pool the descriptor was allocated from.
479 * @desc: Pointer to an array of fast registration descriptor pointers.
480 * @n: Number of descriptors to put back.
482 * Note: The caller must already have queued an invalidation request for
483 * desc->mr->rkey before calling this function.
485 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
491 spin_lock_irqsave(&pool->lock, flags);
492 for (i = 0; i < n; i++)
493 list_add(&desc[i]->entry, &pool->free_list);
494 spin_unlock_irqrestore(&pool->lock, flags);
497 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
499 struct srp_device *dev = target->srp_host->srp_dev;
501 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
502 dev->max_pages_per_mr);
506 * srp_destroy_qp() - destroy an RDMA queue pair
507 * @ch: SRP RDMA channel.
509 * Drain the qp before destroying it. This avoids that the receive
510 * completion handler can access the queue pair while it is
513 static void srp_destroy_qp(struct srp_rdma_ch *ch)
515 spin_lock_irq(&ch->lock);
516 ib_process_cq_direct(ch->send_cq, -1);
517 spin_unlock_irq(&ch->lock);
520 ib_destroy_qp(ch->qp);
523 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
525 struct srp_target_port *target = ch->target;
526 struct srp_device *dev = target->srp_host->srp_dev;
527 const struct ib_device_attr *attr = &dev->dev->attrs;
528 struct ib_qp_init_attr *init_attr;
529 struct ib_cq *recv_cq, *send_cq;
531 struct srp_fr_pool *fr_pool = NULL;
532 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
535 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
539 /* queue_size + 1 for ib_drain_rq() */
540 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
541 ch->comp_vector, IB_POLL_SOFTIRQ);
542 if (IS_ERR(recv_cq)) {
543 ret = PTR_ERR(recv_cq);
547 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
548 ch->comp_vector, IB_POLL_DIRECT);
549 if (IS_ERR(send_cq)) {
550 ret = PTR_ERR(send_cq);
554 init_attr->event_handler = srp_qp_event;
555 init_attr->cap.max_send_wr = m * target->queue_size;
556 init_attr->cap.max_recv_wr = target->queue_size + 1;
557 init_attr->cap.max_recv_sge = 1;
558 init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
559 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
560 init_attr->qp_type = IB_QPT_RC;
561 init_attr->send_cq = send_cq;
562 init_attr->recv_cq = recv_cq;
564 ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
566 if (target->using_rdma_cm) {
567 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
568 qp = ch->rdma_cm.cm_id->qp;
570 qp = ib_create_qp(dev->pd, init_attr);
572 ret = srp_init_ib_qp(target, qp);
580 pr_err("QP creation failed for dev %s: %d\n",
581 dev_name(&dev->dev->dev), ret);
585 if (dev->use_fast_reg) {
586 fr_pool = srp_alloc_fr_pool(target);
587 if (IS_ERR(fr_pool)) {
588 ret = PTR_ERR(fr_pool);
589 shost_printk(KERN_WARNING, target->scsi_host, PFX
590 "FR pool allocation failed (%d)\n", ret);
598 ib_free_cq(ch->recv_cq);
600 ib_free_cq(ch->send_cq);
603 ch->recv_cq = recv_cq;
604 ch->send_cq = send_cq;
606 if (dev->use_fast_reg) {
608 srp_destroy_fr_pool(ch->fr_pool);
609 ch->fr_pool = fr_pool;
616 if (target->using_rdma_cm)
617 rdma_destroy_qp(ch->rdma_cm.cm_id);
633 * Note: this function may be called without srp_alloc_iu_bufs() having been
634 * invoked. Hence the ch->[rt]x_ring checks.
636 static void srp_free_ch_ib(struct srp_target_port *target,
637 struct srp_rdma_ch *ch)
639 struct srp_device *dev = target->srp_host->srp_dev;
645 if (target->using_rdma_cm) {
646 if (ch->rdma_cm.cm_id) {
647 rdma_destroy_id(ch->rdma_cm.cm_id);
648 ch->rdma_cm.cm_id = NULL;
651 if (ch->ib_cm.cm_id) {
652 ib_destroy_cm_id(ch->ib_cm.cm_id);
653 ch->ib_cm.cm_id = NULL;
657 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
661 if (dev->use_fast_reg) {
663 srp_destroy_fr_pool(ch->fr_pool);
667 ib_free_cq(ch->send_cq);
668 ib_free_cq(ch->recv_cq);
671 * Avoid that the SCSI error handler tries to use this channel after
672 * it has been freed. The SCSI error handler can namely continue
673 * trying to perform recovery actions after scsi_remove_host()
679 ch->send_cq = ch->recv_cq = NULL;
682 for (i = 0; i < target->queue_size; ++i)
683 srp_free_iu(target->srp_host, ch->rx_ring[i]);
688 for (i = 0; i < target->queue_size; ++i)
689 srp_free_iu(target->srp_host, ch->tx_ring[i]);
695 static void srp_path_rec_completion(int status,
696 struct sa_path_rec *pathrec,
697 unsigned int num_paths, void *ch_ptr)
699 struct srp_rdma_ch *ch = ch_ptr;
700 struct srp_target_port *target = ch->target;
704 shost_printk(KERN_ERR, target->scsi_host,
705 PFX "Got failed path rec status %d\n", status);
707 ch->ib_cm.path = *pathrec;
711 static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
713 struct srp_target_port *target = ch->target;
716 ch->ib_cm.path.numb_path = 1;
718 init_completion(&ch->done);
720 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
721 target->srp_host->srp_dev->dev,
722 target->srp_host->port,
724 IB_SA_PATH_REC_SERVICE_ID |
725 IB_SA_PATH_REC_DGID |
726 IB_SA_PATH_REC_SGID |
727 IB_SA_PATH_REC_NUMB_PATH |
729 SRP_PATH_REC_TIMEOUT_MS,
731 srp_path_rec_completion,
732 ch, &ch->ib_cm.path_query);
733 if (ch->ib_cm.path_query_id < 0)
734 return ch->ib_cm.path_query_id;
736 ret = wait_for_completion_interruptible(&ch->done);
741 shost_printk(KERN_WARNING, target->scsi_host,
742 PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
743 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
744 be16_to_cpu(target->ib_cm.pkey),
745 be64_to_cpu(target->ib_cm.service_id));
750 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
752 struct srp_target_port *target = ch->target;
755 init_completion(&ch->done);
757 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
761 wait_for_completion_interruptible(&ch->done);
764 shost_printk(KERN_WARNING, target->scsi_host,
765 PFX "Path resolution failed\n");
770 static int srp_lookup_path(struct srp_rdma_ch *ch)
772 struct srp_target_port *target = ch->target;
774 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
775 srp_ib_lookup_path(ch);
778 static u8 srp_get_subnet_timeout(struct srp_host *host)
780 struct ib_port_attr attr;
782 u8 subnet_timeout = 18;
784 ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
786 subnet_timeout = attr.subnet_timeout;
788 if (unlikely(subnet_timeout < 15))
789 pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
790 dev_name(&host->srp_dev->dev->dev), subnet_timeout);
792 return subnet_timeout;
795 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
798 struct srp_target_port *target = ch->target;
800 struct rdma_conn_param rdma_param;
801 struct srp_login_req_rdma rdma_req;
802 struct ib_cm_req_param ib_param;
803 struct srp_login_req ib_req;
808 req = kzalloc(sizeof *req, GFP_KERNEL);
812 req->ib_param.flow_control = 1;
813 req->ib_param.retry_count = target->tl_retry_count;
816 * Pick some arbitrary defaults here; we could make these
817 * module parameters if anyone cared about setting them.
819 req->ib_param.responder_resources = 4;
820 req->ib_param.rnr_retry_count = 7;
821 req->ib_param.max_cm_retries = 15;
823 req->ib_req.opcode = SRP_LOGIN_REQ;
825 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
826 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
827 SRP_BUF_FORMAT_INDIRECT);
828 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
829 SRP_MULTICHAN_SINGLE);
830 if (srp_use_imm_data) {
831 req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
832 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
835 if (target->using_rdma_cm) {
836 req->rdma_param.flow_control = req->ib_param.flow_control;
837 req->rdma_param.responder_resources =
838 req->ib_param.responder_resources;
839 req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
840 req->rdma_param.retry_count = req->ib_param.retry_count;
841 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
842 req->rdma_param.private_data = &req->rdma_req;
843 req->rdma_param.private_data_len = sizeof(req->rdma_req);
845 req->rdma_req.opcode = req->ib_req.opcode;
846 req->rdma_req.tag = req->ib_req.tag;
847 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
848 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
849 req->rdma_req.req_flags = req->ib_req.req_flags;
850 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
852 ipi = req->rdma_req.initiator_port_id;
853 tpi = req->rdma_req.target_port_id;
857 subnet_timeout = srp_get_subnet_timeout(target->srp_host);
859 req->ib_param.primary_path = &ch->ib_cm.path;
860 req->ib_param.alternate_path = NULL;
861 req->ib_param.service_id = target->ib_cm.service_id;
862 get_random_bytes(&req->ib_param.starting_psn, 4);
863 req->ib_param.starting_psn &= 0xffffff;
864 req->ib_param.qp_num = ch->qp->qp_num;
865 req->ib_param.qp_type = ch->qp->qp_type;
866 req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
867 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
868 req->ib_param.private_data = &req->ib_req;
869 req->ib_param.private_data_len = sizeof(req->ib_req);
871 ipi = req->ib_req.initiator_port_id;
872 tpi = req->ib_req.target_port_id;
876 * In the published SRP specification (draft rev. 16a), the
877 * port identifier format is 8 bytes of ID extension followed
878 * by 8 bytes of GUID. Older drafts put the two halves in the
879 * opposite order, so that the GUID comes first.
881 * Targets conforming to these obsolete drafts can be
882 * recognized by the I/O Class they report.
884 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
885 memcpy(ipi, &target->sgid.global.interface_id, 8);
886 memcpy(ipi + 8, &target->initiator_ext, 8);
887 memcpy(tpi, &target->ioc_guid, 8);
888 memcpy(tpi + 8, &target->id_ext, 8);
890 memcpy(ipi, &target->initiator_ext, 8);
891 memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
892 memcpy(tpi, &target->id_ext, 8);
893 memcpy(tpi + 8, &target->ioc_guid, 8);
897 * Topspin/Cisco SRP targets will reject our login unless we
898 * zero out the first 8 bytes of our initiator port ID and set
899 * the second 8 bytes to the local node GUID.
901 if (srp_target_is_topspin(target)) {
902 shost_printk(KERN_DEBUG, target->scsi_host,
903 PFX "Topspin/Cisco initiator port ID workaround "
904 "activated for target GUID %016llx\n",
905 be64_to_cpu(target->ioc_guid));
907 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
910 if (target->using_rdma_cm)
911 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
913 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
920 static bool srp_queue_remove_work(struct srp_target_port *target)
922 bool changed = false;
924 spin_lock_irq(&target->lock);
925 if (target->state != SRP_TARGET_REMOVED) {
926 target->state = SRP_TARGET_REMOVED;
929 spin_unlock_irq(&target->lock);
932 queue_work(srp_remove_wq, &target->remove_work);
937 static void srp_disconnect_target(struct srp_target_port *target)
939 struct srp_rdma_ch *ch;
942 /* XXX should send SRP_I_LOGOUT request */
944 for (i = 0; i < target->ch_count; i++) {
946 ch->connected = false;
948 if (target->using_rdma_cm) {
949 if (ch->rdma_cm.cm_id)
950 rdma_disconnect(ch->rdma_cm.cm_id);
953 ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
957 shost_printk(KERN_DEBUG, target->scsi_host,
958 PFX "Sending CM DREQ failed\n");
963 static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
965 struct srp_target_port *target = host_to_target(shost);
966 struct srp_device *dev = target->srp_host->srp_dev;
967 struct ib_device *ibdev = dev->dev;
968 struct srp_request *req = scsi_cmd_priv(cmd);
971 if (req->indirect_dma_addr) {
972 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
973 target->indirect_size,
976 kfree(req->indirect_desc);
981 static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
983 struct srp_target_port *target = host_to_target(shost);
984 struct srp_device *srp_dev = target->srp_host->srp_dev;
985 struct ib_device *ibdev = srp_dev->dev;
986 struct srp_request *req = scsi_cmd_priv(cmd);
990 if (srp_dev->use_fast_reg) {
991 req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
996 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
997 if (!req->indirect_desc)
1000 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1001 target->indirect_size,
1003 if (ib_dma_mapping_error(ibdev, dma_addr)) {
1004 srp_exit_cmd_priv(shost, cmd);
1008 req->indirect_dma_addr = dma_addr;
1016 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1017 * @shost: SCSI host whose attributes to remove from sysfs.
1019 * Note: Any attributes defined in the host template and that did not exist
1020 * before invocation of this function will be ignored.
1022 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1024 const struct attribute_group **g;
1025 struct attribute **attr;
1027 for (g = shost->hostt->shost_groups; *g; ++g) {
1028 for (attr = (*g)->attrs; *attr; ++attr) {
1029 struct device_attribute *dev_attr =
1030 container_of(*attr, typeof(*dev_attr), attr);
1032 device_remove_file(&shost->shost_dev, dev_attr);
1037 static void srp_remove_target(struct srp_target_port *target)
1039 struct srp_rdma_ch *ch;
1042 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1044 srp_del_scsi_host_attr(target->scsi_host);
1045 srp_rport_get(target->rport);
1046 srp_remove_host(target->scsi_host);
1047 scsi_remove_host(target->scsi_host);
1048 srp_stop_rport_timers(target->rport);
1049 srp_disconnect_target(target);
1050 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1051 for (i = 0; i < target->ch_count; i++) {
1052 ch = &target->ch[i];
1053 srp_free_ch_ib(target, ch);
1055 cancel_work_sync(&target->tl_err_work);
1056 srp_rport_put(target->rport);
1060 spin_lock(&target->srp_host->target_lock);
1061 list_del(&target->list);
1062 spin_unlock(&target->srp_host->target_lock);
1064 scsi_host_put(target->scsi_host);
1067 static void srp_remove_work(struct work_struct *work)
1069 struct srp_target_port *target =
1070 container_of(work, struct srp_target_port, remove_work);
1072 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1074 srp_remove_target(target);
1077 static void srp_rport_delete(struct srp_rport *rport)
1079 struct srp_target_port *target = rport->lld_data;
1081 srp_queue_remove_work(target);
1085 * srp_connected_ch() - number of connected channels
1086 * @target: SRP target port.
1088 static int srp_connected_ch(struct srp_target_port *target)
1092 for (i = 0; i < target->ch_count; i++)
1093 c += target->ch[i].connected;
1098 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1101 struct srp_target_port *target = ch->target;
1104 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1106 ret = srp_lookup_path(ch);
1111 init_completion(&ch->done);
1112 ret = srp_send_req(ch, max_iu_len, multich);
1115 ret = wait_for_completion_interruptible(&ch->done);
1120 * The CM event handling code will set status to
1121 * SRP_PORT_REDIRECT if we get a port redirect REJ
1122 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1123 * redirect REJ back.
1128 ch->connected = true;
1131 case SRP_PORT_REDIRECT:
1132 ret = srp_lookup_path(ch);
1137 case SRP_DLID_REDIRECT:
1140 case SRP_STALE_CONN:
1141 shost_printk(KERN_ERR, target->scsi_host, PFX
1142 "giving up on stale connection\n");
1152 return ret <= 0 ? ret : -ENODEV;
1155 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1157 srp_handle_qp_err(cq, wc, "INV RKEY");
1160 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1163 struct ib_send_wr wr = {
1164 .opcode = IB_WR_LOCAL_INV,
1168 .ex.invalidate_rkey = rkey,
1171 wr.wr_cqe = &req->reg_cqe;
1172 req->reg_cqe.done = srp_inv_rkey_err_done;
1173 return ib_post_send(ch->qp, &wr, NULL);
1176 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1177 struct srp_rdma_ch *ch,
1178 struct srp_request *req)
1180 struct srp_target_port *target = ch->target;
1181 struct srp_device *dev = target->srp_host->srp_dev;
1182 struct ib_device *ibdev = dev->dev;
1185 if (!scsi_sglist(scmnd) ||
1186 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1187 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1190 if (dev->use_fast_reg) {
1191 struct srp_fr_desc **pfr;
1193 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1194 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1196 shost_printk(KERN_ERR, target->scsi_host, PFX
1197 "Queueing INV WR for rkey %#x failed (%d)\n",
1198 (*pfr)->mr->rkey, res);
1199 queue_work(system_long_wq,
1200 &target->tl_err_work);
1204 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1208 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1209 scmnd->sc_data_direction);
1213 * srp_claim_req - Take ownership of the scmnd associated with a request.
1214 * @ch: SRP RDMA channel.
1215 * @req: SRP request.
1216 * @sdev: If not NULL, only take ownership for this SCSI device.
1217 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1218 * ownership of @req->scmnd if it equals @scmnd.
1221 * Either NULL or a pointer to the SCSI command the caller became owner of.
1223 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1224 struct srp_request *req,
1225 struct scsi_device *sdev,
1226 struct scsi_cmnd *scmnd)
1228 unsigned long flags;
1230 spin_lock_irqsave(&ch->lock, flags);
1232 (!sdev || req->scmnd->device == sdev) &&
1233 (!scmnd || req->scmnd == scmnd)) {
1239 spin_unlock_irqrestore(&ch->lock, flags);
1245 * srp_free_req() - Unmap data and adjust ch->req_lim.
1246 * @ch: SRP RDMA channel.
1247 * @req: Request to be freed.
1248 * @scmnd: SCSI command associated with @req.
1249 * @req_lim_delta: Amount to be added to @target->req_lim.
1251 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1252 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1254 unsigned long flags;
1256 srp_unmap_data(scmnd, ch, req);
1258 spin_lock_irqsave(&ch->lock, flags);
1259 ch->req_lim += req_lim_delta;
1260 spin_unlock_irqrestore(&ch->lock, flags);
1263 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1264 struct scsi_device *sdev, int result)
1266 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1269 srp_free_req(ch, req, scmnd, 0);
1270 scmnd->result = result;
1275 struct srp_terminate_context {
1276 struct srp_target_port *srp_target;
1280 static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
1282 struct srp_terminate_context *context = context_ptr;
1283 struct srp_target_port *target = context->srp_target;
1284 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
1285 struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1286 struct srp_request *req = scsi_cmd_priv(scmnd);
1288 srp_finish_req(ch, req, NULL, context->scsi_result);
1293 static void srp_terminate_io(struct srp_rport *rport)
1295 struct srp_target_port *target = rport->lld_data;
1296 struct srp_terminate_context context = { .srp_target = target,
1297 .scsi_result = DID_TRANSPORT_FAILFAST << 16 };
1299 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context);
1302 /* Calculate maximum initiator to target information unit length. */
1303 static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1304 uint32_t max_it_iu_size)
1306 uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1307 sizeof(struct srp_indirect_buf) +
1308 cmd_sg_cnt * sizeof(struct srp_direct_buf);
1311 max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1315 max_iu_len = min(max_iu_len, max_it_iu_size);
1317 pr_debug("max_iu_len = %d\n", max_iu_len);
1323 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1324 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1325 * srp_reset_device() or srp_reset_host() calls will occur while this function
1326 * is in progress. One way to realize that is not to call this function
1327 * directly but to call srp_reconnect_rport() instead since that last function
1328 * serializes calls of this function via rport->mutex and also blocks
1329 * srp_queuecommand() calls before invoking this function.
1331 static int srp_rport_reconnect(struct srp_rport *rport)
1333 struct srp_target_port *target = rport->lld_data;
1334 struct srp_rdma_ch *ch;
1335 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1337 target->max_it_iu_size);
1339 bool multich = false;
1341 srp_disconnect_target(target);
1343 if (target->state == SRP_TARGET_SCANNING)
1347 * Now get a new local CM ID so that we avoid confusing the target in
1348 * case things are really fouled up. Doing so also ensures that all CM
1349 * callbacks will have finished before a new QP is allocated.
1351 for (i = 0; i < target->ch_count; i++) {
1352 ch = &target->ch[i];
1353 ret += srp_new_cm_id(ch);
1356 struct srp_terminate_context context = {
1357 .srp_target = target, .scsi_result = DID_RESET << 16};
1359 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
1362 for (i = 0; i < target->ch_count; i++) {
1363 ch = &target->ch[i];
1365 * Whether or not creating a new CM ID succeeded, create a new
1366 * QP. This guarantees that all completion callback function
1367 * invocations have finished before request resetting starts.
1369 ret += srp_create_ch_ib(ch);
1371 INIT_LIST_HEAD(&ch->free_tx);
1372 for (j = 0; j < target->queue_size; ++j)
1373 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1376 target->qp_in_error = false;
1378 for (i = 0; i < target->ch_count; i++) {
1379 ch = &target->ch[i];
1382 ret = srp_connect_ch(ch, max_iu_len, multich);
1387 shost_printk(KERN_INFO, target->scsi_host,
1388 PFX "reconnect succeeded\n");
1393 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1394 unsigned int dma_len, u32 rkey)
1396 struct srp_direct_buf *desc = state->desc;
1398 WARN_ON_ONCE(!dma_len);
1400 desc->va = cpu_to_be64(dma_addr);
1401 desc->key = cpu_to_be32(rkey);
1402 desc->len = cpu_to_be32(dma_len);
1404 state->total_len += dma_len;
1409 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1411 srp_handle_qp_err(cq, wc, "FAST REG");
1415 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1416 * where to start in the first element. If sg_offset_p != NULL then
1417 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1418 * byte that has not yet been mapped.
1420 static int srp_map_finish_fr(struct srp_map_state *state,
1421 struct srp_request *req,
1422 struct srp_rdma_ch *ch, int sg_nents,
1423 unsigned int *sg_offset_p)
1425 struct srp_target_port *target = ch->target;
1426 struct srp_device *dev = target->srp_host->srp_dev;
1427 struct ib_reg_wr wr;
1428 struct srp_fr_desc *desc;
1432 if (state->fr.next >= state->fr.end) {
1433 shost_printk(KERN_ERR, ch->target->scsi_host,
1434 PFX "Out of MRs (mr_per_cmd = %d)\n",
1435 ch->target->mr_per_cmd);
1439 WARN_ON_ONCE(!dev->use_fast_reg);
1441 if (sg_nents == 1 && target->global_rkey) {
1442 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1444 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1445 sg_dma_len(state->sg) - sg_offset,
1446 target->global_rkey);
1452 desc = srp_fr_pool_get(ch->fr_pool);
1456 rkey = ib_inc_rkey(desc->mr->rkey);
1457 ib_update_fast_reg_key(desc->mr, rkey);
1459 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1461 if (unlikely(n < 0)) {
1462 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1463 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1464 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1465 sg_offset_p ? *sg_offset_p : -1, n);
1469 WARN_ON_ONCE(desc->mr->length == 0);
1471 req->reg_cqe.done = srp_reg_mr_err_done;
1474 wr.wr.opcode = IB_WR_REG_MR;
1475 wr.wr.wr_cqe = &req->reg_cqe;
1477 wr.wr.send_flags = 0;
1479 wr.key = desc->mr->rkey;
1480 wr.access = (IB_ACCESS_LOCAL_WRITE |
1481 IB_ACCESS_REMOTE_READ |
1482 IB_ACCESS_REMOTE_WRITE);
1484 *state->fr.next++ = desc;
1487 srp_map_desc(state, desc->mr->iova,
1488 desc->mr->length, desc->mr->rkey);
1490 err = ib_post_send(ch->qp, &wr.wr, NULL);
1491 if (unlikely(err)) {
1492 WARN_ON_ONCE(err == -ENOMEM);
1499 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1500 struct srp_request *req, struct scatterlist *scat,
1503 unsigned int sg_offset = 0;
1505 state->fr.next = req->fr_list;
1506 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1515 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1516 if (unlikely(n < 0))
1520 for (i = 0; i < n; i++)
1521 state->sg = sg_next(state->sg);
1527 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1528 struct srp_request *req, struct scatterlist *scat,
1531 struct srp_target_port *target = ch->target;
1532 struct scatterlist *sg;
1535 for_each_sg(scat, sg, count, i) {
1536 srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1537 target->global_rkey);
1544 * Register the indirect data buffer descriptor with the HCA.
1546 * Note: since the indirect data buffer descriptor has been allocated with
1547 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1550 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1551 void **next_mr, void **end_mr, u32 idb_len,
1554 struct srp_target_port *target = ch->target;
1555 struct srp_device *dev = target->srp_host->srp_dev;
1556 struct srp_map_state state;
1557 struct srp_direct_buf idb_desc;
1558 struct scatterlist idb_sg[1];
1561 memset(&state, 0, sizeof(state));
1562 memset(&idb_desc, 0, sizeof(idb_desc));
1563 state.gen.next = next_mr;
1564 state.gen.end = end_mr;
1565 state.desc = &idb_desc;
1566 state.base_dma_addr = req->indirect_dma_addr;
1567 state.dma_len = idb_len;
1569 if (dev->use_fast_reg) {
1571 sg_init_one(idb_sg, req->indirect_desc, idb_len);
1572 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1573 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1574 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1576 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1579 WARN_ON_ONCE(ret < 1);
1584 *idb_rkey = idb_desc.key;
1589 static void srp_check_mapping(struct srp_map_state *state,
1590 struct srp_rdma_ch *ch, struct srp_request *req,
1591 struct scatterlist *scat, int count)
1593 struct srp_device *dev = ch->target->srp_host->srp_dev;
1594 struct srp_fr_desc **pfr;
1595 u64 desc_len = 0, mr_len = 0;
1598 for (i = 0; i < state->ndesc; i++)
1599 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1600 if (dev->use_fast_reg)
1601 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1602 mr_len += (*pfr)->mr->length;
1603 if (desc_len != scsi_bufflen(req->scmnd) ||
1604 mr_len > scsi_bufflen(req->scmnd))
1605 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1606 scsi_bufflen(req->scmnd), desc_len, mr_len,
1607 state->ndesc, state->nmdesc);
1611 * srp_map_data() - map SCSI data buffer onto an SRP request
1612 * @scmnd: SCSI command to map
1613 * @ch: SRP RDMA channel
1616 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1617 * mapping failed. The size of any immediate data is not included in the
1620 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1621 struct srp_request *req)
1623 struct srp_target_port *target = ch->target;
1624 struct scatterlist *scat, *sg;
1625 struct srp_cmd *cmd = req->cmd->buf;
1626 int i, len, nents, count, ret;
1627 struct srp_device *dev;
1628 struct ib_device *ibdev;
1629 struct srp_map_state state;
1630 struct srp_indirect_buf *indirect_hdr;
1632 u32 idb_len, table_len;
1636 req->cmd->num_sge = 1;
1638 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1639 return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1641 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1642 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1643 shost_printk(KERN_WARNING, target->scsi_host,
1644 PFX "Unhandled data direction %d\n",
1645 scmnd->sc_data_direction);
1649 nents = scsi_sg_count(scmnd);
1650 scat = scsi_sglist(scmnd);
1651 data_len = scsi_bufflen(scmnd);
1653 dev = target->srp_host->srp_dev;
1656 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1657 if (unlikely(count == 0))
1660 if (ch->use_imm_data &&
1661 count <= ch->max_imm_sge &&
1662 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1663 scmnd->sc_data_direction == DMA_TO_DEVICE) {
1664 struct srp_imm_buf *buf;
1665 struct ib_sge *sge = &req->cmd->sge[1];
1667 fmt = SRP_DATA_DESC_IMM;
1668 len = SRP_IMM_DATA_OFFSET;
1670 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1671 buf->len = cpu_to_be32(data_len);
1672 WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1673 for_each_sg(scat, sg, count, i) {
1674 sge[i].addr = sg_dma_address(sg);
1675 sge[i].length = sg_dma_len(sg);
1676 sge[i].lkey = target->lkey;
1678 req->cmd->num_sge += count;
1682 fmt = SRP_DATA_DESC_DIRECT;
1683 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1684 sizeof(struct srp_direct_buf);
1686 if (count == 1 && target->global_rkey) {
1688 * The midlayer only generated a single gather/scatter
1689 * entry, or DMA mapping coalesced everything to a
1690 * single entry. So a direct descriptor along with
1691 * the DMA MR suffices.
1693 struct srp_direct_buf *buf;
1695 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1696 buf->va = cpu_to_be64(sg_dma_address(scat));
1697 buf->key = cpu_to_be32(target->global_rkey);
1698 buf->len = cpu_to_be32(sg_dma_len(scat));
1705 * We have more than one scatter/gather entry, so build our indirect
1706 * descriptor table, trying to merge as many entries as we can.
1708 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1710 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1711 target->indirect_size, DMA_TO_DEVICE);
1713 memset(&state, 0, sizeof(state));
1714 state.desc = req->indirect_desc;
1715 if (dev->use_fast_reg)
1716 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1718 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1719 req->nmdesc = state.nmdesc;
1724 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1725 "Memory mapping consistency check");
1726 if (DYNAMIC_DEBUG_BRANCH(ddm))
1727 srp_check_mapping(&state, ch, req, scat, count);
1730 /* We've mapped the request, now pull as much of the indirect
1731 * descriptor table as we can into the command buffer. If this
1732 * target is not using an external indirect table, we are
1733 * guaranteed to fit into the command, as the SCSI layer won't
1734 * give us more S/G entries than we allow.
1736 if (state.ndesc == 1) {
1738 * Memory registration collapsed the sg-list into one entry,
1739 * so use a direct descriptor.
1741 struct srp_direct_buf *buf;
1743 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1744 *buf = req->indirect_desc[0];
1748 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1749 !target->allow_ext_sg)) {
1750 shost_printk(KERN_ERR, target->scsi_host,
1751 "Could not fit S/G list into SRP_CMD\n");
1756 count = min(state.ndesc, target->cmd_sg_cnt);
1757 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1758 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1760 fmt = SRP_DATA_DESC_INDIRECT;
1761 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1762 sizeof(struct srp_indirect_buf);
1763 len += count * sizeof (struct srp_direct_buf);
1765 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1766 count * sizeof (struct srp_direct_buf));
1768 if (!target->global_rkey) {
1769 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1770 idb_len, &idb_rkey);
1775 idb_rkey = cpu_to_be32(target->global_rkey);
1778 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1779 indirect_hdr->table_desc.key = idb_rkey;
1780 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1781 indirect_hdr->len = cpu_to_be32(state.total_len);
1783 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1784 cmd->data_out_desc_cnt = count;
1786 cmd->data_in_desc_cnt = count;
1788 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1792 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1793 cmd->buf_fmt = fmt << 4;
1800 srp_unmap_data(scmnd, ch, req);
1801 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1807 * Return an IU and possible credit to the free pool
1809 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1810 enum srp_iu_type iu_type)
1812 unsigned long flags;
1814 spin_lock_irqsave(&ch->lock, flags);
1815 list_add(&iu->list, &ch->free_tx);
1816 if (iu_type != SRP_IU_RSP)
1818 spin_unlock_irqrestore(&ch->lock, flags);
1822 * Must be called with ch->lock held to protect req_lim and free_tx.
1823 * If IU is not sent, it must be returned using srp_put_tx_iu().
1826 * An upper limit for the number of allocated information units for each
1828 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1829 * more than Scsi_Host.can_queue requests.
1830 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1831 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1832 * one unanswered SRP request to an initiator.
1834 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1835 enum srp_iu_type iu_type)
1837 struct srp_target_port *target = ch->target;
1838 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1841 lockdep_assert_held(&ch->lock);
1843 ib_process_cq_direct(ch->send_cq, -1);
1845 if (list_empty(&ch->free_tx))
1848 /* Initiator responses to target requests do not consume credits */
1849 if (iu_type != SRP_IU_RSP) {
1850 if (ch->req_lim <= rsv) {
1851 ++target->zero_req_lim;
1858 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1859 list_del(&iu->list);
1864 * Note: if this function is called from inside ib_drain_sq() then it will
1865 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1866 * with status IB_WC_SUCCESS then that's a bug.
1868 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1870 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1871 struct srp_rdma_ch *ch = cq->cq_context;
1873 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1874 srp_handle_qp_err(cq, wc, "SEND");
1878 lockdep_assert_held(&ch->lock);
1880 list_add(&iu->list, &ch->free_tx);
1884 * srp_post_send() - send an SRP information unit
1885 * @ch: RDMA channel over which to send the information unit.
1886 * @iu: Information unit to send.
1887 * @len: Length of the information unit excluding immediate data.
1889 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1891 struct srp_target_port *target = ch->target;
1892 struct ib_send_wr wr;
1894 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1897 iu->sge[0].addr = iu->dma;
1898 iu->sge[0].length = len;
1899 iu->sge[0].lkey = target->lkey;
1901 iu->cqe.done = srp_send_done;
1904 wr.wr_cqe = &iu->cqe;
1905 wr.sg_list = &iu->sge[0];
1906 wr.num_sge = iu->num_sge;
1907 wr.opcode = IB_WR_SEND;
1908 wr.send_flags = IB_SEND_SIGNALED;
1910 return ib_post_send(ch->qp, &wr, NULL);
1913 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1915 struct srp_target_port *target = ch->target;
1916 struct ib_recv_wr wr;
1919 list.addr = iu->dma;
1920 list.length = iu->size;
1921 list.lkey = target->lkey;
1923 iu->cqe.done = srp_recv_done;
1926 wr.wr_cqe = &iu->cqe;
1930 return ib_post_recv(ch->qp, &wr, NULL);
1933 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1935 struct srp_target_port *target = ch->target;
1936 struct srp_request *req;
1937 struct scsi_cmnd *scmnd;
1938 unsigned long flags;
1940 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1941 spin_lock_irqsave(&ch->lock, flags);
1942 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1943 if (rsp->tag == ch->tsk_mgmt_tag) {
1944 ch->tsk_mgmt_status = -1;
1945 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1946 ch->tsk_mgmt_status = rsp->data[3];
1947 complete(&ch->tsk_mgmt_done);
1949 shost_printk(KERN_ERR, target->scsi_host,
1950 "Received tsk mgmt response too late for tag %#llx\n",
1953 spin_unlock_irqrestore(&ch->lock, flags);
1955 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1957 req = scsi_cmd_priv(scmnd);
1958 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1961 shost_printk(KERN_ERR, target->scsi_host,
1962 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1963 rsp->tag, ch - target->ch, ch->qp->qp_num);
1965 spin_lock_irqsave(&ch->lock, flags);
1966 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1967 spin_unlock_irqrestore(&ch->lock, flags);
1971 scmnd->result = rsp->status;
1973 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1974 memcpy(scmnd->sense_buffer, rsp->data +
1975 be32_to_cpu(rsp->resp_data_len),
1976 min_t(int, be32_to_cpu(rsp->sense_data_len),
1977 SCSI_SENSE_BUFFERSIZE));
1980 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1981 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1982 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1983 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1984 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1985 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1986 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1987 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1989 srp_free_req(ch, req, scmnd,
1990 be32_to_cpu(rsp->req_lim_delta));
1996 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1999 struct srp_target_port *target = ch->target;
2000 struct ib_device *dev = target->srp_host->srp_dev->dev;
2001 unsigned long flags;
2005 spin_lock_irqsave(&ch->lock, flags);
2006 ch->req_lim += req_delta;
2007 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2008 spin_unlock_irqrestore(&ch->lock, flags);
2011 shost_printk(KERN_ERR, target->scsi_host, PFX
2012 "no IU available to send response\n");
2017 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2018 memcpy(iu->buf, rsp, len);
2019 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2021 err = srp_post_send(ch, iu, len);
2023 shost_printk(KERN_ERR, target->scsi_host, PFX
2024 "unable to post response: %d\n", err);
2025 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2031 static void srp_process_cred_req(struct srp_rdma_ch *ch,
2032 struct srp_cred_req *req)
2034 struct srp_cred_rsp rsp = {
2035 .opcode = SRP_CRED_RSP,
2038 s32 delta = be32_to_cpu(req->req_lim_delta);
2040 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2041 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2042 "problems processing SRP_CRED_REQ\n");
2045 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2046 struct srp_aer_req *req)
2048 struct srp_target_port *target = ch->target;
2049 struct srp_aer_rsp rsp = {
2050 .opcode = SRP_AER_RSP,
2053 s32 delta = be32_to_cpu(req->req_lim_delta);
2055 shost_printk(KERN_ERR, target->scsi_host, PFX
2056 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2058 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2059 shost_printk(KERN_ERR, target->scsi_host, PFX
2060 "problems processing SRP_AER_REQ\n");
2063 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2065 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2066 struct srp_rdma_ch *ch = cq->cq_context;
2067 struct srp_target_port *target = ch->target;
2068 struct ib_device *dev = target->srp_host->srp_dev->dev;
2072 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2073 srp_handle_qp_err(cq, wc, "RECV");
2077 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2080 opcode = *(u8 *) iu->buf;
2083 shost_printk(KERN_ERR, target->scsi_host,
2084 PFX "recv completion, opcode 0x%02x\n", opcode);
2085 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2086 iu->buf, wc->byte_len, true);
2091 srp_process_rsp(ch, iu->buf);
2095 srp_process_cred_req(ch, iu->buf);
2099 srp_process_aer_req(ch, iu->buf);
2103 /* XXX Handle target logout */
2104 shost_printk(KERN_WARNING, target->scsi_host,
2105 PFX "Got target logout request\n");
2109 shost_printk(KERN_WARNING, target->scsi_host,
2110 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2114 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2117 res = srp_post_recv(ch, iu);
2119 shost_printk(KERN_ERR, target->scsi_host,
2120 PFX "Recv failed with error code %d\n", res);
2124 * srp_tl_err_work() - handle a transport layer error
2125 * @work: Work structure embedded in an SRP target port.
2127 * Note: This function may get invoked before the rport has been created,
2128 * hence the target->rport test.
2130 static void srp_tl_err_work(struct work_struct *work)
2132 struct srp_target_port *target;
2134 target = container_of(work, struct srp_target_port, tl_err_work);
2136 srp_start_tl_fail_timers(target->rport);
2139 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2142 struct srp_rdma_ch *ch = cq->cq_context;
2143 struct srp_target_port *target = ch->target;
2145 if (ch->connected && !target->qp_in_error) {
2146 shost_printk(KERN_ERR, target->scsi_host,
2147 PFX "failed %s status %s (%d) for CQE %p\n",
2148 opname, ib_wc_status_msg(wc->status), wc->status,
2150 queue_work(system_long_wq, &target->tl_err_work);
2152 target->qp_in_error = true;
2155 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2157 struct request *rq = scsi_cmd_to_rq(scmnd);
2158 struct srp_target_port *target = host_to_target(shost);
2159 struct srp_rdma_ch *ch;
2160 struct srp_request *req = scsi_cmd_priv(scmnd);
2162 struct srp_cmd *cmd;
2163 struct ib_device *dev;
2164 unsigned long flags;
2168 scmnd->result = srp_chkready(target->rport);
2169 if (unlikely(scmnd->result))
2172 WARN_ON_ONCE(rq->tag < 0);
2173 tag = blk_mq_unique_tag(rq);
2174 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2176 spin_lock_irqsave(&ch->lock, flags);
2177 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2178 spin_unlock_irqrestore(&ch->lock, flags);
2183 dev = target->srp_host->srp_dev->dev;
2184 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2188 memset(cmd, 0, sizeof *cmd);
2190 cmd->opcode = SRP_CMD;
2191 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2193 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2194 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2195 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2197 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2204 len = srp_map_data(scmnd, ch, req);
2206 shost_printk(KERN_ERR, target->scsi_host,
2207 PFX "Failed to map data (%d)\n", len);
2209 * If we ran out of memory descriptors (-ENOMEM) because an
2210 * application is queuing many requests with more than
2211 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2212 * to reduce queue depth temporarily.
2214 scmnd->result = len == -ENOMEM ?
2215 DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
2219 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2222 if (srp_post_send(ch, iu, len)) {
2223 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2224 scmnd->result = DID_ERROR << 16;
2231 srp_unmap_data(scmnd, ch, req);
2234 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2237 * Avoid that the loops that iterate over the request ring can
2238 * encounter a dangling SCSI command pointer.
2243 if (scmnd->result) {
2247 ret = SCSI_MLQUEUE_HOST_BUSY;
2254 * Note: the resources allocated in this function are freed in
2257 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2259 struct srp_target_port *target = ch->target;
2262 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2266 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2271 for (i = 0; i < target->queue_size; ++i) {
2272 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2274 GFP_KERNEL, DMA_FROM_DEVICE);
2275 if (!ch->rx_ring[i])
2279 for (i = 0; i < target->queue_size; ++i) {
2280 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2282 GFP_KERNEL, DMA_TO_DEVICE);
2283 if (!ch->tx_ring[i])
2286 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2292 for (i = 0; i < target->queue_size; ++i) {
2293 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2294 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2307 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2309 uint64_t T_tr_ns, max_compl_time_ms;
2310 uint32_t rq_tmo_jiffies;
2313 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2314 * table 91), both the QP timeout and the retry count have to be set
2315 * for RC QP's during the RTR to RTS transition.
2317 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2318 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2321 * Set target->rq_tmo_jiffies to one second more than the largest time
2322 * it can take before an error completion is generated. See also
2323 * C9-140..142 in the IBTA spec for more information about how to
2324 * convert the QP Local ACK Timeout value to nanoseconds.
2326 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2327 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2328 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2329 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2331 return rq_tmo_jiffies;
2334 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2335 const struct srp_login_rsp *lrsp,
2336 struct srp_rdma_ch *ch)
2338 struct srp_target_port *target = ch->target;
2339 struct ib_qp_attr *qp_attr = NULL;
2344 if (lrsp->opcode == SRP_LOGIN_RSP) {
2345 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2346 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2347 ch->use_imm_data = srp_use_imm_data &&
2348 (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2349 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2351 target->max_it_iu_size);
2352 WARN_ON_ONCE(ch->max_it_iu_len >
2353 be32_to_cpu(lrsp->max_it_iu_len));
2355 if (ch->use_imm_data)
2356 shost_printk(KERN_DEBUG, target->scsi_host,
2357 PFX "using immediate data\n");
2360 * Reserve credits for task management so we don't
2361 * bounce requests back to the SCSI mid-layer.
2363 target->scsi_host->can_queue
2364 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2365 target->scsi_host->can_queue);
2366 target->scsi_host->cmd_per_lun
2367 = min_t(int, target->scsi_host->can_queue,
2368 target->scsi_host->cmd_per_lun);
2370 shost_printk(KERN_WARNING, target->scsi_host,
2371 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2377 ret = srp_alloc_iu_bufs(ch);
2382 for (i = 0; i < target->queue_size; i++) {
2383 struct srp_iu *iu = ch->rx_ring[i];
2385 ret = srp_post_recv(ch, iu);
2390 if (!target->using_rdma_cm) {
2392 qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2396 qp_attr->qp_state = IB_QPS_RTR;
2397 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2401 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2405 qp_attr->qp_state = IB_QPS_RTS;
2406 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2410 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2412 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2416 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2426 static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2427 const struct ib_cm_event *event,
2428 struct srp_rdma_ch *ch)
2430 struct srp_target_port *target = ch->target;
2431 struct Scsi_Host *shost = target->scsi_host;
2432 struct ib_class_port_info *cpi;
2436 switch (event->param.rej_rcvd.reason) {
2437 case IB_CM_REJ_PORT_CM_REDIRECT:
2438 cpi = event->param.rej_rcvd.ari;
2439 dlid = be16_to_cpu(cpi->redirect_lid);
2440 sa_path_set_dlid(&ch->ib_cm.path, dlid);
2441 ch->ib_cm.path.pkey = cpi->redirect_pkey;
2442 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2443 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2445 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2448 case IB_CM_REJ_PORT_REDIRECT:
2449 if (srp_target_is_topspin(target)) {
2450 union ib_gid *dgid = &ch->ib_cm.path.dgid;
2453 * Topspin/Cisco SRP gateways incorrectly send
2454 * reject reason code 25 when they mean 24
2457 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
2459 shost_printk(KERN_DEBUG, shost,
2460 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2461 be64_to_cpu(dgid->global.subnet_prefix),
2462 be64_to_cpu(dgid->global.interface_id));
2464 ch->status = SRP_PORT_REDIRECT;
2466 shost_printk(KERN_WARNING, shost,
2467 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2468 ch->status = -ECONNRESET;
2472 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2473 shost_printk(KERN_WARNING, shost,
2474 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2475 ch->status = -ECONNRESET;
2478 case IB_CM_REJ_CONSUMER_DEFINED:
2479 opcode = *(u8 *) event->private_data;
2480 if (opcode == SRP_LOGIN_REJ) {
2481 struct srp_login_rej *rej = event->private_data;
2482 u32 reason = be32_to_cpu(rej->reason);
2484 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2485 shost_printk(KERN_WARNING, shost,
2486 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2488 shost_printk(KERN_WARNING, shost, PFX
2489 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2491 target->ib_cm.orig_dgid.raw,
2494 shost_printk(KERN_WARNING, shost,
2495 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2496 " opcode 0x%02x\n", opcode);
2497 ch->status = -ECONNRESET;
2500 case IB_CM_REJ_STALE_CONN:
2501 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2502 ch->status = SRP_STALE_CONN;
2506 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2507 event->param.rej_rcvd.reason);
2508 ch->status = -ECONNRESET;
2512 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2513 const struct ib_cm_event *event)
2515 struct srp_rdma_ch *ch = cm_id->context;
2516 struct srp_target_port *target = ch->target;
2519 switch (event->event) {
2520 case IB_CM_REQ_ERROR:
2521 shost_printk(KERN_DEBUG, target->scsi_host,
2522 PFX "Sending CM REQ failed\n");
2524 ch->status = -ECONNRESET;
2527 case IB_CM_REP_RECEIVED:
2529 srp_cm_rep_handler(cm_id, event->private_data, ch);
2532 case IB_CM_REJ_RECEIVED:
2533 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2536 srp_ib_cm_rej_handler(cm_id, event, ch);
2539 case IB_CM_DREQ_RECEIVED:
2540 shost_printk(KERN_WARNING, target->scsi_host,
2541 PFX "DREQ received - connection closed\n");
2542 ch->connected = false;
2543 if (ib_send_cm_drep(cm_id, NULL, 0))
2544 shost_printk(KERN_ERR, target->scsi_host,
2545 PFX "Sending CM DREP failed\n");
2546 queue_work(system_long_wq, &target->tl_err_work);
2549 case IB_CM_TIMEWAIT_EXIT:
2550 shost_printk(KERN_ERR, target->scsi_host,
2551 PFX "connection closed\n");
2557 case IB_CM_MRA_RECEIVED:
2558 case IB_CM_DREQ_ERROR:
2559 case IB_CM_DREP_RECEIVED:
2563 shost_printk(KERN_WARNING, target->scsi_host,
2564 PFX "Unhandled CM event %d\n", event->event);
2569 complete(&ch->done);
2574 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2575 struct rdma_cm_event *event)
2577 struct srp_target_port *target = ch->target;
2578 struct Scsi_Host *shost = target->scsi_host;
2581 switch (event->status) {
2582 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2583 shost_printk(KERN_WARNING, shost,
2584 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2585 ch->status = -ECONNRESET;
2588 case IB_CM_REJ_CONSUMER_DEFINED:
2589 opcode = *(u8 *) event->param.conn.private_data;
2590 if (opcode == SRP_LOGIN_REJ) {
2591 struct srp_login_rej *rej =
2592 (struct srp_login_rej *)
2593 event->param.conn.private_data;
2594 u32 reason = be32_to_cpu(rej->reason);
2596 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2597 shost_printk(KERN_WARNING, shost,
2598 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2600 shost_printk(KERN_WARNING, shost,
2601 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2603 shost_printk(KERN_WARNING, shost,
2604 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2607 ch->status = -ECONNRESET;
2610 case IB_CM_REJ_STALE_CONN:
2611 shost_printk(KERN_WARNING, shost,
2612 " REJ reason: stale connection\n");
2613 ch->status = SRP_STALE_CONN;
2617 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2619 ch->status = -ECONNRESET;
2624 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2625 struct rdma_cm_event *event)
2627 struct srp_rdma_ch *ch = cm_id->context;
2628 struct srp_target_port *target = ch->target;
2631 switch (event->event) {
2632 case RDMA_CM_EVENT_ADDR_RESOLVED:
2637 case RDMA_CM_EVENT_ADDR_ERROR:
2638 ch->status = -ENXIO;
2642 case RDMA_CM_EVENT_ROUTE_RESOLVED:
2647 case RDMA_CM_EVENT_ROUTE_ERROR:
2648 case RDMA_CM_EVENT_UNREACHABLE:
2649 ch->status = -EHOSTUNREACH;
2653 case RDMA_CM_EVENT_CONNECT_ERROR:
2654 shost_printk(KERN_DEBUG, target->scsi_host,
2655 PFX "Sending CM REQ failed\n");
2657 ch->status = -ECONNRESET;
2660 case RDMA_CM_EVENT_ESTABLISHED:
2662 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2665 case RDMA_CM_EVENT_REJECTED:
2666 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2669 srp_rdma_cm_rej_handler(ch, event);
2672 case RDMA_CM_EVENT_DISCONNECTED:
2673 if (ch->connected) {
2674 shost_printk(KERN_WARNING, target->scsi_host,
2675 PFX "received DREQ\n");
2676 rdma_disconnect(ch->rdma_cm.cm_id);
2679 queue_work(system_long_wq, &target->tl_err_work);
2683 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2684 shost_printk(KERN_ERR, target->scsi_host,
2685 PFX "connection closed\n");
2692 shost_printk(KERN_WARNING, target->scsi_host,
2693 PFX "Unhandled CM event %d\n", event->event);
2698 complete(&ch->done);
2704 * srp_change_queue_depth - setting device queue depth
2705 * @sdev: scsi device struct
2706 * @qdepth: requested queue depth
2708 * Returns queue depth.
2711 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2713 if (!sdev->tagged_supported)
2715 return scsi_change_queue_depth(sdev, qdepth);
2718 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2719 u8 func, u8 *status)
2721 struct srp_target_port *target = ch->target;
2722 struct srp_rport *rport = target->rport;
2723 struct ib_device *dev = target->srp_host->srp_dev->dev;
2725 struct srp_tsk_mgmt *tsk_mgmt;
2728 if (!ch->connected || target->qp_in_error)
2732 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2733 * invoked while a task management function is being sent.
2735 mutex_lock(&rport->mutex);
2736 spin_lock_irq(&ch->lock);
2737 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2738 spin_unlock_irq(&ch->lock);
2741 mutex_unlock(&rport->mutex);
2748 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2751 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2753 tsk_mgmt->opcode = SRP_TSK_MGMT;
2754 int_to_scsilun(lun, &tsk_mgmt->lun);
2755 tsk_mgmt->tsk_mgmt_func = func;
2756 tsk_mgmt->task_tag = req_tag;
2758 spin_lock_irq(&ch->lock);
2759 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2760 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2761 spin_unlock_irq(&ch->lock);
2763 init_completion(&ch->tsk_mgmt_done);
2765 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2767 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2768 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2769 mutex_unlock(&rport->mutex);
2773 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2774 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2775 if (res > 0 && status)
2776 *status = ch->tsk_mgmt_status;
2777 mutex_unlock(&rport->mutex);
2779 WARN_ON_ONCE(res < 0);
2781 return res > 0 ? 0 : -1;
2784 static int srp_abort(struct scsi_cmnd *scmnd)
2786 struct srp_target_port *target = host_to_target(scmnd->device->host);
2787 struct srp_request *req = scsi_cmd_priv(scmnd);
2790 struct srp_rdma_ch *ch;
2793 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2795 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
2796 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2797 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2799 ch = &target->ch[ch_idx];
2800 if (!srp_claim_req(ch, req, NULL, scmnd))
2802 shost_printk(KERN_ERR, target->scsi_host,
2803 "Sending SRP abort for tag %#x\n", tag);
2804 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2805 SRP_TSK_ABORT_TASK, NULL) == 0)
2807 else if (target->rport->state == SRP_RPORT_LOST)
2811 if (ret == SUCCESS) {
2812 srp_free_req(ch, req, scmnd, 0);
2813 scmnd->result = DID_ABORT << 16;
2820 static int srp_reset_device(struct scsi_cmnd *scmnd)
2822 struct srp_target_port *target = host_to_target(scmnd->device->host);
2823 struct srp_rdma_ch *ch;
2826 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2828 ch = &target->ch[0];
2829 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2830 SRP_TSK_LUN_RESET, &status))
2838 static int srp_reset_host(struct scsi_cmnd *scmnd)
2840 struct srp_target_port *target = host_to_target(scmnd->device->host);
2842 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2844 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2847 static int srp_target_alloc(struct scsi_target *starget)
2849 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2850 struct srp_target_port *target = host_to_target(shost);
2852 if (target->target_can_queue)
2853 starget->can_queue = target->target_can_queue;
2857 static int srp_slave_configure(struct scsi_device *sdev)
2859 struct Scsi_Host *shost = sdev->host;
2860 struct srp_target_port *target = host_to_target(shost);
2861 struct request_queue *q = sdev->request_queue;
2862 unsigned long timeout;
2864 if (sdev->type == TYPE_DISK) {
2865 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2866 blk_queue_rq_timeout(q, timeout);
2872 static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
2875 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2877 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2880 static DEVICE_ATTR_RO(id_ext);
2882 static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
2885 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2887 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2890 static DEVICE_ATTR_RO(ioc_guid);
2892 static ssize_t service_id_show(struct device *dev,
2893 struct device_attribute *attr, char *buf)
2895 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2897 if (target->using_rdma_cm)
2899 return sysfs_emit(buf, "0x%016llx\n",
2900 be64_to_cpu(target->ib_cm.service_id));
2903 static DEVICE_ATTR_RO(service_id);
2905 static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
2908 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2910 if (target->using_rdma_cm)
2913 return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
2916 static DEVICE_ATTR_RO(pkey);
2918 static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
2921 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2923 return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
2926 static DEVICE_ATTR_RO(sgid);
2928 static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
2931 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2932 struct srp_rdma_ch *ch = &target->ch[0];
2934 if (target->using_rdma_cm)
2937 return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2940 static DEVICE_ATTR_RO(dgid);
2942 static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
2945 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2947 if (target->using_rdma_cm)
2950 return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
2953 static DEVICE_ATTR_RO(orig_dgid);
2955 static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
2958 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2959 struct srp_rdma_ch *ch;
2960 int i, req_lim = INT_MAX;
2962 for (i = 0; i < target->ch_count; i++) {
2963 ch = &target->ch[i];
2964 req_lim = min(req_lim, ch->req_lim);
2967 return sysfs_emit(buf, "%d\n", req_lim);
2970 static DEVICE_ATTR_RO(req_lim);
2972 static ssize_t zero_req_lim_show(struct device *dev,
2973 struct device_attribute *attr, char *buf)
2975 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2977 return sysfs_emit(buf, "%d\n", target->zero_req_lim);
2980 static DEVICE_ATTR_RO(zero_req_lim);
2982 static ssize_t local_ib_port_show(struct device *dev,
2983 struct device_attribute *attr, char *buf)
2985 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2987 return sysfs_emit(buf, "%u\n", target->srp_host->port);
2990 static DEVICE_ATTR_RO(local_ib_port);
2992 static ssize_t local_ib_device_show(struct device *dev,
2993 struct device_attribute *attr, char *buf)
2995 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2997 return sysfs_emit(buf, "%s\n",
2998 dev_name(&target->srp_host->srp_dev->dev->dev));
3001 static DEVICE_ATTR_RO(local_ib_device);
3003 static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
3006 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3008 return sysfs_emit(buf, "%d\n", target->ch_count);
3011 static DEVICE_ATTR_RO(ch_count);
3013 static ssize_t comp_vector_show(struct device *dev,
3014 struct device_attribute *attr, char *buf)
3016 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3018 return sysfs_emit(buf, "%d\n", target->comp_vector);
3021 static DEVICE_ATTR_RO(comp_vector);
3023 static ssize_t tl_retry_count_show(struct device *dev,
3024 struct device_attribute *attr, char *buf)
3026 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3028 return sysfs_emit(buf, "%d\n", target->tl_retry_count);
3031 static DEVICE_ATTR_RO(tl_retry_count);
3033 static ssize_t cmd_sg_entries_show(struct device *dev,
3034 struct device_attribute *attr, char *buf)
3036 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3038 return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
3041 static DEVICE_ATTR_RO(cmd_sg_entries);
3043 static ssize_t allow_ext_sg_show(struct device *dev,
3044 struct device_attribute *attr, char *buf)
3046 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3048 return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3051 static DEVICE_ATTR_RO(allow_ext_sg);
3053 static struct attribute *srp_host_attrs[] = {
3054 &dev_attr_id_ext.attr,
3055 &dev_attr_ioc_guid.attr,
3056 &dev_attr_service_id.attr,
3057 &dev_attr_pkey.attr,
3058 &dev_attr_sgid.attr,
3059 &dev_attr_dgid.attr,
3060 &dev_attr_orig_dgid.attr,
3061 &dev_attr_req_lim.attr,
3062 &dev_attr_zero_req_lim.attr,
3063 &dev_attr_local_ib_port.attr,
3064 &dev_attr_local_ib_device.attr,
3065 &dev_attr_ch_count.attr,
3066 &dev_attr_comp_vector.attr,
3067 &dev_attr_tl_retry_count.attr,
3068 &dev_attr_cmd_sg_entries.attr,
3069 &dev_attr_allow_ext_sg.attr,
3073 ATTRIBUTE_GROUPS(srp_host);
3075 static const struct scsi_host_template srp_template = {
3076 .module = THIS_MODULE,
3077 .name = "InfiniBand SRP initiator",
3078 .proc_name = DRV_NAME,
3079 .target_alloc = srp_target_alloc,
3080 .slave_configure = srp_slave_configure,
3081 .info = srp_target_info,
3082 .init_cmd_priv = srp_init_cmd_priv,
3083 .exit_cmd_priv = srp_exit_cmd_priv,
3084 .queuecommand = srp_queuecommand,
3085 .change_queue_depth = srp_change_queue_depth,
3086 .eh_timed_out = srp_timed_out,
3087 .eh_abort_handler = srp_abort,
3088 .eh_device_reset_handler = srp_reset_device,
3089 .eh_host_reset_handler = srp_reset_host,
3090 .skip_settle_delay = true,
3091 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
3092 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
3094 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
3095 .shost_groups = srp_host_groups,
3096 .track_queue_depth = 1,
3097 .cmd_size = sizeof(struct srp_request),
3100 static int srp_sdev_count(struct Scsi_Host *host)
3102 struct scsi_device *sdev;
3105 shost_for_each_device(sdev, host)
3113 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3114 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3115 * removal has been scheduled.
3116 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3118 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3120 struct srp_rport_identifiers ids;
3121 struct srp_rport *rport;
3123 target->state = SRP_TARGET_SCANNING;
3124 sprintf(target->target_name, "SRP.T10:%016llX",
3125 be64_to_cpu(target->id_ext));
3127 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3130 memcpy(ids.port_id, &target->id_ext, 8);
3131 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3132 ids.roles = SRP_RPORT_ROLE_TARGET;
3133 rport = srp_rport_add(target->scsi_host, &ids);
3134 if (IS_ERR(rport)) {
3135 scsi_remove_host(target->scsi_host);
3136 return PTR_ERR(rport);
3139 rport->lld_data = target;
3140 target->rport = rport;
3142 spin_lock(&host->target_lock);
3143 list_add_tail(&target->list, &host->target_list);
3144 spin_unlock(&host->target_lock);
3146 scsi_scan_target(&target->scsi_host->shost_gendev,
3147 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3149 if (srp_connected_ch(target) < target->ch_count ||
3150 target->qp_in_error) {
3151 shost_printk(KERN_INFO, target->scsi_host,
3152 PFX "SCSI scan failed - removing SCSI host\n");
3153 srp_queue_remove_work(target);
3157 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3158 dev_name(&target->scsi_host->shost_gendev),
3159 srp_sdev_count(target->scsi_host));
3161 spin_lock_irq(&target->lock);
3162 if (target->state == SRP_TARGET_SCANNING)
3163 target->state = SRP_TARGET_LIVE;
3164 spin_unlock_irq(&target->lock);
3170 static void srp_release_dev(struct device *dev)
3172 struct srp_host *host =
3173 container_of(dev, struct srp_host, dev);
3178 static struct attribute *srp_class_attrs[];
3180 ATTRIBUTE_GROUPS(srp_class);
3182 static struct class srp_class = {
3183 .name = "infiniband_srp",
3184 .dev_groups = srp_class_groups,
3185 .dev_release = srp_release_dev
3189 * srp_conn_unique() - check whether the connection to a target is unique
3191 * @target: SRP target port.
3193 static bool srp_conn_unique(struct srp_host *host,
3194 struct srp_target_port *target)
3196 struct srp_target_port *t;
3199 if (target->state == SRP_TARGET_REMOVED)
3204 spin_lock(&host->target_lock);
3205 list_for_each_entry(t, &host->target_list, list) {
3207 target->id_ext == t->id_ext &&
3208 target->ioc_guid == t->ioc_guid &&
3209 target->initiator_ext == t->initiator_ext) {
3214 spin_unlock(&host->target_lock);
3221 * Target ports are added by writing
3223 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3224 * pkey=<P_Key>,service_id=<service ID>
3226 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3227 * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
3229 * to the add_target sysfs attribute.
3233 SRP_OPT_ID_EXT = 1 << 0,
3234 SRP_OPT_IOC_GUID = 1 << 1,
3235 SRP_OPT_DGID = 1 << 2,
3236 SRP_OPT_PKEY = 1 << 3,
3237 SRP_OPT_SERVICE_ID = 1 << 4,
3238 SRP_OPT_MAX_SECT = 1 << 5,
3239 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
3240 SRP_OPT_IO_CLASS = 1 << 7,
3241 SRP_OPT_INITIATOR_EXT = 1 << 8,
3242 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
3243 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3244 SRP_OPT_SG_TABLESIZE = 1 << 11,
3245 SRP_OPT_COMP_VECTOR = 1 << 12,
3246 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
3247 SRP_OPT_QUEUE_SIZE = 1 << 14,
3248 SRP_OPT_IP_SRC = 1 << 15,
3249 SRP_OPT_IP_DEST = 1 << 16,
3250 SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3251 SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
3252 SRP_OPT_CH_COUNT = 1 << 19,
3255 static unsigned int srp_opt_mandatory[] = {
3266 static const match_table_t srp_opt_tokens = {
3267 { SRP_OPT_ID_EXT, "id_ext=%s" },
3268 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3269 { SRP_OPT_DGID, "dgid=%s" },
3270 { SRP_OPT_PKEY, "pkey=%x" },
3271 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3272 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3273 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
3274 { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
3275 { SRP_OPT_IO_CLASS, "io_class=%x" },
3276 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
3277 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
3278 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3279 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
3280 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
3281 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
3282 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
3283 { SRP_OPT_IP_SRC, "src=%s" },
3284 { SRP_OPT_IP_DEST, "dest=%s" },
3285 { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
3286 { SRP_OPT_CH_COUNT, "ch_count=%u", },
3287 { SRP_OPT_ERR, NULL }
3291 * srp_parse_in - parse an IP address and port number combination
3292 * @net: [in] Network namespace.
3293 * @sa: [out] Address family, IP address and port number.
3294 * @addr_port_str: [in] IP address and port number.
3295 * @has_port: [out] Whether or not @addr_port_str includes a port number.
3297 * Parse the following address formats:
3298 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3299 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3301 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3302 const char *addr_port_str, bool *has_port)
3304 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3310 port_str = strrchr(addr, ':');
3311 if (port_str && strchr(port_str, ']'))
3316 *has_port = port_str != NULL;
3317 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3318 if (ret && addr[0]) {
3319 addr_end = addr + strlen(addr) - 1;
3320 if (addr[0] == '[' && *addr_end == ']') {
3322 ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3327 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3331 static int srp_parse_options(struct net *net, const char *buf,
3332 struct srp_target_port *target)
3334 char *options, *sep_opt;
3336 substring_t args[MAX_OPT_ARGS];
3337 unsigned long long ull;
3344 options = kstrdup(buf, GFP_KERNEL);
3349 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3353 token = match_token(p, srp_opt_tokens, args);
3357 case SRP_OPT_ID_EXT:
3358 p = match_strdup(args);
3363 ret = kstrtoull(p, 16, &ull);
3365 pr_warn("invalid id_ext parameter '%s'\n", p);
3369 target->id_ext = cpu_to_be64(ull);
3373 case SRP_OPT_IOC_GUID:
3374 p = match_strdup(args);
3379 ret = kstrtoull(p, 16, &ull);
3381 pr_warn("invalid ioc_guid parameter '%s'\n", p);
3385 target->ioc_guid = cpu_to_be64(ull);
3390 p = match_strdup(args);
3395 if (strlen(p) != 32) {
3396 pr_warn("bad dest GID parameter '%s'\n", p);
3401 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
3408 ret = match_hex(args, &token);
3410 pr_warn("bad P_Key parameter '%s'\n", p);
3413 target->ib_cm.pkey = cpu_to_be16(token);
3416 case SRP_OPT_SERVICE_ID:
3417 p = match_strdup(args);
3422 ret = kstrtoull(p, 16, &ull);
3424 pr_warn("bad service_id parameter '%s'\n", p);
3428 target->ib_cm.service_id = cpu_to_be64(ull);
3432 case SRP_OPT_IP_SRC:
3433 p = match_strdup(args);
3438 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3441 pr_warn("bad source parameter '%s'\n", p);
3445 target->rdma_cm.src_specified = true;
3449 case SRP_OPT_IP_DEST:
3450 p = match_strdup(args);
3455 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3460 pr_warn("bad dest parameter '%s'\n", p);
3464 target->using_rdma_cm = true;
3468 case SRP_OPT_MAX_SECT:
3469 ret = match_int(args, &token);
3471 pr_warn("bad max sect parameter '%s'\n", p);
3474 target->scsi_host->max_sectors = token;
3477 case SRP_OPT_QUEUE_SIZE:
3478 ret = match_int(args, &token);
3480 pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n",
3485 pr_warn("bad queue_size parameter '%s'\n", p);
3489 target->scsi_host->can_queue = token;
3490 target->queue_size = token + SRP_RSP_SQ_SIZE +
3491 SRP_TSK_MGMT_SQ_SIZE;
3492 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3493 target->scsi_host->cmd_per_lun = token;
3496 case SRP_OPT_MAX_CMD_PER_LUN:
3497 ret = match_int(args, &token);
3499 pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n",
3504 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3509 target->scsi_host->cmd_per_lun = token;
3512 case SRP_OPT_TARGET_CAN_QUEUE:
3513 ret = match_int(args, &token);
3515 pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n",
3520 pr_warn("bad max target_can_queue parameter '%s'\n",
3525 target->target_can_queue = token;
3528 case SRP_OPT_IO_CLASS:
3529 ret = match_hex(args, &token);
3531 pr_warn("bad IO class parameter '%s'\n", p);
3534 if (token != SRP_REV10_IB_IO_CLASS &&
3535 token != SRP_REV16A_IB_IO_CLASS) {
3536 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3537 token, SRP_REV10_IB_IO_CLASS,
3538 SRP_REV16A_IB_IO_CLASS);
3542 target->io_class = token;
3545 case SRP_OPT_INITIATOR_EXT:
3546 p = match_strdup(args);
3551 ret = kstrtoull(p, 16, &ull);
3553 pr_warn("bad initiator_ext value '%s'\n", p);
3557 target->initiator_ext = cpu_to_be64(ull);
3561 case SRP_OPT_CMD_SG_ENTRIES:
3562 ret = match_int(args, &token);
3564 pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n",
3568 if (token < 1 || token > 255) {
3569 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3574 target->cmd_sg_cnt = token;
3577 case SRP_OPT_ALLOW_EXT_SG:
3578 ret = match_int(args, &token);
3580 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3583 target->allow_ext_sg = !!token;
3586 case SRP_OPT_SG_TABLESIZE:
3587 ret = match_int(args, &token);
3589 pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n",
3593 if (token < 1 || token > SG_MAX_SEGMENTS) {
3594 pr_warn("bad max sg_tablesize parameter '%s'\n",
3599 target->sg_tablesize = token;
3602 case SRP_OPT_COMP_VECTOR:
3603 ret = match_int(args, &token);
3605 pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n",
3610 pr_warn("bad comp_vector parameter '%s'\n", p);
3614 target->comp_vector = token;
3617 case SRP_OPT_TL_RETRY_COUNT:
3618 ret = match_int(args, &token);
3620 pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n",
3624 if (token < 2 || token > 7) {
3625 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3630 target->tl_retry_count = token;
3633 case SRP_OPT_MAX_IT_IU_SIZE:
3634 ret = match_int(args, &token);
3636 pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n",
3641 pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3645 target->max_it_iu_size = token;
3648 case SRP_OPT_CH_COUNT:
3649 ret = match_int(args, &token);
3651 pr_warn("match_int() failed for channel count parameter '%s', Error %d\n",
3656 pr_warn("bad channel count %s\n", p);
3660 target->ch_count = token;
3664 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3671 for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3672 if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3678 pr_warn("target creation request is missing one or more parameters\n");
3680 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3681 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3682 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3683 target->scsi_host->cmd_per_lun,
3684 target->scsi_host->can_queue);
3691 static ssize_t add_target_store(struct device *dev,
3692 struct device_attribute *attr, const char *buf,
3695 struct srp_host *host =
3696 container_of(dev, struct srp_host, dev);
3697 struct Scsi_Host *target_host;
3698 struct srp_target_port *target;
3699 struct srp_rdma_ch *ch;
3700 struct srp_device *srp_dev = host->srp_dev;
3701 struct ib_device *ibdev = srp_dev->dev;
3703 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3704 bool multich = false;
3705 uint32_t max_iu_len;
3707 target_host = scsi_host_alloc(&srp_template,
3708 sizeof (struct srp_target_port));
3712 target_host->transportt = ib_srp_transport_template;
3713 target_host->max_channel = 0;
3714 target_host->max_id = 1;
3715 target_host->max_lun = -1LL;
3716 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3717 target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3719 if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
3720 target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3722 target = host_to_target(target_host);
3724 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3725 target->io_class = SRP_REV16A_IB_IO_CLASS;
3726 target->scsi_host = target_host;
3727 target->srp_host = host;
3728 target->lkey = host->srp_dev->pd->local_dma_lkey;
3729 target->global_rkey = host->srp_dev->global_rkey;
3730 target->cmd_sg_cnt = cmd_sg_entries;
3731 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3732 target->allow_ext_sg = allow_ext_sg;
3733 target->tl_retry_count = 7;
3734 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3737 * Avoid that the SCSI host can be removed by srp_remove_target()
3738 * before this function returns.
3740 scsi_host_get(target->scsi_host);
3742 ret = mutex_lock_interruptible(&host->add_target_mutex);
3746 ret = srp_parse_options(target->net, buf, target);
3750 if (!srp_conn_unique(target->srp_host, target)) {
3751 if (target->using_rdma_cm) {
3752 shost_printk(KERN_INFO, target->scsi_host,
3753 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3754 be64_to_cpu(target->id_ext),
3755 be64_to_cpu(target->ioc_guid),
3756 &target->rdma_cm.dst);
3758 shost_printk(KERN_INFO, target->scsi_host,
3759 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3760 be64_to_cpu(target->id_ext),
3761 be64_to_cpu(target->ioc_guid),
3762 be64_to_cpu(target->initiator_ext));
3768 if (!srp_dev->has_fr && !target->allow_ext_sg &&
3769 target->cmd_sg_cnt < target->sg_tablesize) {
3770 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3771 target->sg_tablesize = target->cmd_sg_cnt;
3774 if (srp_dev->use_fast_reg) {
3775 bool gaps_reg = ibdev->attrs.kernel_cap_flags &
3778 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3779 (ilog2(srp_dev->mr_page_size) - 9);
3782 * FR can only map one HCA page per entry. If the start
3783 * address is not aligned on a HCA page boundary two
3784 * entries will be used for the head and the tail
3785 * although these two entries combined contain at most
3786 * one HCA page of data. Hence the "+ 1" in the
3787 * calculation below.
3789 * The indirect data buffer descriptor is contiguous
3790 * so the memory for that buffer will only be
3791 * registered if register_always is true. Hence add
3792 * one to mr_per_cmd if register_always has been set.
3794 mr_per_cmd = register_always +
3795 (target->scsi_host->max_sectors + 1 +
3796 max_sectors_per_mr - 1) / max_sectors_per_mr;
3798 mr_per_cmd = register_always +
3799 (target->sg_tablesize +
3800 srp_dev->max_pages_per_mr - 1) /
3801 srp_dev->max_pages_per_mr;
3803 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3804 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3805 max_sectors_per_mr, mr_per_cmd);
3808 target_host->sg_tablesize = target->sg_tablesize;
3809 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3810 target->mr_per_cmd = mr_per_cmd;
3811 target->indirect_size = target->sg_tablesize *
3812 sizeof (struct srp_direct_buf);
3813 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3815 target->max_it_iu_size);
3817 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3818 INIT_WORK(&target->remove_work, srp_remove_work);
3819 spin_lock_init(&target->lock);
3820 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3825 if (target->ch_count == 0) {
3828 max(4 * num_online_nodes(),
3829 ibdev->num_comp_vectors),
3833 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3838 for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
3839 ch = &target->ch[ch_idx];
3840 ch->target = target;
3841 ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3842 spin_lock_init(&ch->lock);
3843 INIT_LIST_HEAD(&ch->free_tx);
3844 ret = srp_new_cm_id(ch);
3846 goto err_disconnect;
3848 ret = srp_create_ch_ib(ch);
3850 goto err_disconnect;
3852 ret = srp_connect_ch(ch, max_iu_len, multich);
3856 if (target->using_rdma_cm)
3857 snprintf(dst, sizeof(dst), "%pIS",
3858 &target->rdma_cm.dst);
3860 snprintf(dst, sizeof(dst), "%pI6",
3861 target->ib_cm.orig_dgid.raw);
3862 shost_printk(KERN_ERR, target->scsi_host,
3863 PFX "Connection %d/%d to %s failed\n",
3865 target->ch_count, dst);
3869 srp_free_ch_ib(target, ch);
3870 target->ch_count = ch - target->ch;
3878 target->scsi_host->nr_hw_queues = target->ch_count;
3880 ret = srp_add_target(host, target);
3882 goto err_disconnect;
3884 if (target->state != SRP_TARGET_REMOVED) {
3885 if (target->using_rdma_cm) {
3886 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3887 "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3888 be64_to_cpu(target->id_ext),
3889 be64_to_cpu(target->ioc_guid),
3890 target->sgid.raw, &target->rdma_cm.dst);
3892 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3893 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3894 be64_to_cpu(target->id_ext),
3895 be64_to_cpu(target->ioc_guid),
3896 be16_to_cpu(target->ib_cm.pkey),
3897 be64_to_cpu(target->ib_cm.service_id),
3899 target->ib_cm.orig_dgid.raw);
3906 mutex_unlock(&host->add_target_mutex);
3909 scsi_host_put(target->scsi_host);
3912 * If a call to srp_remove_target() has not been scheduled,
3913 * drop the network namespace reference now that was obtained
3914 * earlier in this function.
3916 if (target->state != SRP_TARGET_REMOVED)
3917 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
3918 scsi_host_put(target->scsi_host);
3924 srp_disconnect_target(target);
3927 for (i = 0; i < target->ch_count; i++) {
3928 ch = &target->ch[i];
3929 srp_free_ch_ib(target, ch);
3936 static DEVICE_ATTR_WO(add_target);
3938 static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
3941 struct srp_host *host = container_of(dev, struct srp_host, dev);
3943 return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3946 static DEVICE_ATTR_RO(ibdev);
3948 static ssize_t port_show(struct device *dev, struct device_attribute *attr,
3951 struct srp_host *host = container_of(dev, struct srp_host, dev);
3953 return sysfs_emit(buf, "%u\n", host->port);
3956 static DEVICE_ATTR_RO(port);
3958 static struct attribute *srp_class_attrs[] = {
3959 &dev_attr_add_target.attr,
3960 &dev_attr_ibdev.attr,
3961 &dev_attr_port.attr,
3965 static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
3967 struct srp_host *host;
3969 host = kzalloc(sizeof *host, GFP_KERNEL);
3973 INIT_LIST_HEAD(&host->target_list);
3974 spin_lock_init(&host->target_lock);
3975 mutex_init(&host->add_target_mutex);
3976 host->srp_dev = device;
3979 device_initialize(&host->dev);
3980 host->dev.class = &srp_class;
3981 host->dev.parent = device->dev->dev.parent;
3982 if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev),
3985 if (device_add(&host->dev))
3991 device_del(&host->dev);
3992 put_device(&host->dev);
3996 static void srp_rename_dev(struct ib_device *device, void *client_data)
3998 struct srp_device *srp_dev = client_data;
3999 struct srp_host *host, *tmp_host;
4001 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4002 char name[IB_DEVICE_NAME_MAX + 8];
4004 snprintf(name, sizeof(name), "srp-%s-%u",
4005 dev_name(&device->dev), host->port);
4006 device_rename(&host->dev, name);
4010 static int srp_add_one(struct ib_device *device)
4012 struct srp_device *srp_dev;
4013 struct ib_device_attr *attr = &device->attrs;
4014 struct srp_host *host;
4017 u64 max_pages_per_mr;
4018 unsigned int flags = 0;
4020 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
4025 * Use the smallest page size supported by the HCA, down to a
4026 * minimum of 4096 bytes. We're unlikely to build large sglists
4027 * out of smaller entries.
4029 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
4030 srp_dev->mr_page_size = 1 << mr_page_shift;
4031 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
4032 max_pages_per_mr = attr->max_mr_size;
4033 do_div(max_pages_per_mr, srp_dev->mr_page_size);
4034 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
4035 attr->max_mr_size, srp_dev->mr_page_size,
4036 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
4037 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
4040 srp_dev->has_fr = (attr->device_cap_flags &
4041 IB_DEVICE_MEM_MGT_EXTENSIONS);
4042 if (!never_register && !srp_dev->has_fr)
4043 dev_warn(&device->dev, "FR is not supported\n");
4044 else if (!never_register &&
4045 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
4046 srp_dev->use_fast_reg = srp_dev->has_fr;
4048 if (never_register || !register_always || !srp_dev->has_fr)
4049 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
4051 if (srp_dev->use_fast_reg) {
4052 srp_dev->max_pages_per_mr =
4053 min_t(u32, srp_dev->max_pages_per_mr,
4054 attr->max_fast_reg_page_list_len);
4056 srp_dev->mr_max_size = srp_dev->mr_page_size *
4057 srp_dev->max_pages_per_mr;
4058 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
4059 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
4060 attr->max_fast_reg_page_list_len,
4061 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
4063 INIT_LIST_HEAD(&srp_dev->dev_list);
4065 srp_dev->dev = device;
4066 srp_dev->pd = ib_alloc_pd(device, flags);
4067 if (IS_ERR(srp_dev->pd)) {
4068 int ret = PTR_ERR(srp_dev->pd);
4074 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4075 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4076 WARN_ON_ONCE(srp_dev->global_rkey == 0);
4079 rdma_for_each_port (device, p) {
4080 host = srp_add_port(srp_dev, p);
4082 list_add_tail(&host->list, &srp_dev->dev_list);
4085 ib_set_client_data(device, &srp_client, srp_dev);
4089 static void srp_remove_one(struct ib_device *device, void *client_data)
4091 struct srp_device *srp_dev;
4092 struct srp_host *host, *tmp_host;
4093 struct srp_target_port *target;
4095 srp_dev = client_data;
4097 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4099 * Remove the add_target sysfs entry so that no new target ports
4102 device_del(&host->dev);
4105 * Remove all target ports.
4107 spin_lock(&host->target_lock);
4108 list_for_each_entry(target, &host->target_list, list)
4109 srp_queue_remove_work(target);
4110 spin_unlock(&host->target_lock);
4113 * srp_queue_remove_work() queues a call to
4114 * srp_remove_target(). The latter function cancels
4115 * target->tl_err_work so waiting for the remove works to
4116 * finish is sufficient.
4118 flush_workqueue(srp_remove_wq);
4120 put_device(&host->dev);
4123 ib_dealloc_pd(srp_dev->pd);
4128 static struct srp_function_template ib_srp_transport_functions = {
4129 .has_rport_state = true,
4130 .reset_timer_if_blocked = true,
4131 .reconnect_delay = &srp_reconnect_delay,
4132 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
4133 .dev_loss_tmo = &srp_dev_loss_tmo,
4134 .reconnect = srp_rport_reconnect,
4135 .rport_delete = srp_rport_delete,
4136 .terminate_rport_io = srp_terminate_io,
4139 static int __init srp_init_module(void)
4143 BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36);
4144 BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4145 BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4146 BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20);
4147 BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4148 BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4149 BUILD_BUG_ON(sizeof(struct srp_rsp) != 36);
4151 if (srp_sg_tablesize) {
4152 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4153 if (!cmd_sg_entries)
4154 cmd_sg_entries = srp_sg_tablesize;
4157 if (!cmd_sg_entries)
4158 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4160 if (cmd_sg_entries > 255) {
4161 pr_warn("Clamping cmd_sg_entries to 255\n");
4162 cmd_sg_entries = 255;
4165 if (!indirect_sg_entries)
4166 indirect_sg_entries = cmd_sg_entries;
4167 else if (indirect_sg_entries < cmd_sg_entries) {
4168 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4170 indirect_sg_entries = cmd_sg_entries;
4173 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4174 pr_warn("Clamping indirect_sg_entries to %u\n",
4176 indirect_sg_entries = SG_MAX_SEGMENTS;
4179 srp_remove_wq = create_workqueue("srp_remove");
4180 if (!srp_remove_wq) {
4186 ib_srp_transport_template =
4187 srp_attach_transport(&ib_srp_transport_functions);
4188 if (!ib_srp_transport_template)
4191 ret = class_register(&srp_class);
4193 pr_err("couldn't register class infiniband_srp\n");
4197 ib_sa_register_client(&srp_sa_client);
4199 ret = ib_register_client(&srp_client);
4201 pr_err("couldn't register IB client\n");
4209 ib_sa_unregister_client(&srp_sa_client);
4210 class_unregister(&srp_class);
4213 srp_release_transport(ib_srp_transport_template);
4216 destroy_workqueue(srp_remove_wq);
4220 static void __exit srp_cleanup_module(void)
4222 ib_unregister_client(&srp_client);
4223 ib_sa_unregister_client(&srp_sa_client);
4224 class_unregister(&srp_class);
4225 srp_release_transport(ib_srp_transport_template);
4226 destroy_workqueue(srp_remove_wq);
4229 module_init(srp_init_module);
4230 module_exit(srp_cleanup_module);