2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) PFX fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
44 #include <linux/atomic.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
49 #include <scsi/scsi_tcq.h>
51 #include <scsi/scsi_transport_srp.h>
55 #define DRV_NAME "ib_srp"
56 #define PFX DRV_NAME ": "
57 #define DRV_VERSION "1.0"
58 #define DRV_RELDATE "July 1, 2013"
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63 MODULE_LICENSE("Dual BSD/GPL");
65 static unsigned int srp_sg_tablesize;
66 static unsigned int cmd_sg_entries;
67 static unsigned int indirect_sg_entries;
68 static bool allow_ext_sg;
69 static bool register_always;
70 static int topspin_workarounds = 1;
72 module_param(srp_sg_tablesize, uint, 0444);
73 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
75 module_param(cmd_sg_entries, uint, 0444);
76 MODULE_PARM_DESC(cmd_sg_entries,
77 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
79 module_param(indirect_sg_entries, uint, 0444);
80 MODULE_PARM_DESC(indirect_sg_entries,
81 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
83 module_param(allow_ext_sg, bool, 0444);
84 MODULE_PARM_DESC(allow_ext_sg,
85 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
87 module_param(topspin_workarounds, int, 0444);
88 MODULE_PARM_DESC(topspin_workarounds,
89 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
91 module_param(register_always, bool, 0444);
92 MODULE_PARM_DESC(register_always,
93 "Use memory registration even for contiguous memory regions");
95 static struct kernel_param_ops srp_tmo_ops;
97 static int srp_reconnect_delay = 10;
98 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
100 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
102 static int srp_fast_io_fail_tmo = 15;
103 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
105 MODULE_PARM_DESC(fast_io_fail_tmo,
106 "Number of seconds between the observation of a transport"
107 " layer error and failing all I/O. \"off\" means that this"
108 " functionality is disabled.");
110 static int srp_dev_loss_tmo = 600;
111 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
113 MODULE_PARM_DESC(dev_loss_tmo,
114 "Maximum number of seconds that the SRP transport should"
115 " insulate transport layer errors. After this time has been"
116 " exceeded the SCSI host is removed. Should be"
117 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
118 " if fast_io_fail_tmo has not been set. \"off\" means that"
119 " this functionality is disabled.");
121 static void srp_add_one(struct ib_device *device);
122 static void srp_remove_one(struct ib_device *device);
123 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
124 static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
125 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
127 static struct scsi_transport_template *ib_srp_transport_template;
129 static struct ib_client srp_client = {
132 .remove = srp_remove_one
135 static struct ib_sa_client srp_sa_client;
137 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
139 int tmo = *(int *)kp->arg;
142 return sprintf(buffer, "%d", tmo);
144 return sprintf(buffer, "off");
147 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
151 if (strncmp(val, "off", 3) != 0) {
152 res = kstrtoint(val, 0, &tmo);
158 if (kp->arg == &srp_reconnect_delay)
159 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
161 else if (kp->arg == &srp_fast_io_fail_tmo)
162 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
164 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
168 *(int *)kp->arg = tmo;
174 static struct kernel_param_ops srp_tmo_ops = {
179 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
181 return (struct srp_target_port *) host->hostdata;
184 static const char *srp_target_info(struct Scsi_Host *host)
186 return host_to_target(host)->target_name;
189 static int srp_target_is_topspin(struct srp_target_port *target)
191 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
192 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
194 return topspin_workarounds &&
195 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
196 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
199 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
201 enum dma_data_direction direction)
205 iu = kmalloc(sizeof *iu, gfp_mask);
209 iu->buf = kzalloc(size, gfp_mask);
213 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
215 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
219 iu->direction = direction;
231 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
236 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
242 static void srp_qp_event(struct ib_event *event, void *context)
244 pr_debug("QP event %d\n", event->event);
247 static int srp_init_qp(struct srp_target_port *target,
250 struct ib_qp_attr *attr;
253 attr = kmalloc(sizeof *attr, GFP_KERNEL);
257 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
258 target->srp_host->port,
259 be16_to_cpu(target->path.pkey),
264 attr->qp_state = IB_QPS_INIT;
265 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
266 IB_ACCESS_REMOTE_WRITE);
267 attr->port_num = target->srp_host->port;
269 ret = ib_modify_qp(qp, attr,
280 static int srp_new_cm_id(struct srp_target_port *target)
282 struct ib_cm_id *new_cm_id;
284 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
285 srp_cm_handler, target);
286 if (IS_ERR(new_cm_id))
287 return PTR_ERR(new_cm_id);
290 ib_destroy_cm_id(target->cm_id);
291 target->cm_id = new_cm_id;
296 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
298 struct srp_device *dev = target->srp_host->srp_dev;
299 struct ib_fmr_pool_param fmr_param;
301 memset(&fmr_param, 0, sizeof(fmr_param));
302 fmr_param.pool_size = target->scsi_host->can_queue;
303 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
305 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
306 fmr_param.page_shift = ilog2(dev->mr_page_size);
307 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
308 IB_ACCESS_REMOTE_WRITE |
309 IB_ACCESS_REMOTE_READ);
311 return ib_create_fmr_pool(dev->pd, &fmr_param);
314 static int srp_create_target_ib(struct srp_target_port *target)
316 struct srp_device *dev = target->srp_host->srp_dev;
317 struct ib_qp_init_attr *init_attr;
318 struct ib_cq *recv_cq, *send_cq;
320 struct ib_fmr_pool *fmr_pool = NULL;
323 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
327 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, target,
328 target->queue_size, target->comp_vector);
329 if (IS_ERR(recv_cq)) {
330 ret = PTR_ERR(recv_cq);
334 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, target,
335 target->queue_size, target->comp_vector);
336 if (IS_ERR(send_cq)) {
337 ret = PTR_ERR(send_cq);
341 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
343 init_attr->event_handler = srp_qp_event;
344 init_attr->cap.max_send_wr = target->queue_size;
345 init_attr->cap.max_recv_wr = target->queue_size;
346 init_attr->cap.max_recv_sge = 1;
347 init_attr->cap.max_send_sge = 1;
348 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
349 init_attr->qp_type = IB_QPT_RC;
350 init_attr->send_cq = send_cq;
351 init_attr->recv_cq = recv_cq;
353 qp = ib_create_qp(dev->pd, init_attr);
359 ret = srp_init_qp(target, qp);
364 fmr_pool = srp_alloc_fmr_pool(target);
365 if (IS_ERR(fmr_pool)) {
366 ret = PTR_ERR(fmr_pool);
367 shost_printk(KERN_WARNING, target->scsi_host, PFX
368 "FMR pool allocation failed (%d)\n", ret);
371 if (target->fmr_pool)
372 ib_destroy_fmr_pool(target->fmr_pool);
373 target->fmr_pool = fmr_pool;
377 ib_destroy_qp(target->qp);
379 ib_destroy_cq(target->recv_cq);
381 ib_destroy_cq(target->send_cq);
384 target->recv_cq = recv_cq;
385 target->send_cq = send_cq;
394 ib_destroy_cq(send_cq);
397 ib_destroy_cq(recv_cq);
405 * Note: this function may be called without srp_alloc_iu_bufs() having been
406 * invoked. Hence the target->[rt]x_ring checks.
408 static void srp_free_target_ib(struct srp_target_port *target)
412 if (target->fmr_pool)
413 ib_destroy_fmr_pool(target->fmr_pool);
414 ib_destroy_qp(target->qp);
415 ib_destroy_cq(target->send_cq);
416 ib_destroy_cq(target->recv_cq);
419 target->send_cq = target->recv_cq = NULL;
421 if (target->rx_ring) {
422 for (i = 0; i < target->queue_size; ++i)
423 srp_free_iu(target->srp_host, target->rx_ring[i]);
424 kfree(target->rx_ring);
425 target->rx_ring = NULL;
427 if (target->tx_ring) {
428 for (i = 0; i < target->queue_size; ++i)
429 srp_free_iu(target->srp_host, target->tx_ring[i]);
430 kfree(target->tx_ring);
431 target->tx_ring = NULL;
435 static void srp_path_rec_completion(int status,
436 struct ib_sa_path_rec *pathrec,
439 struct srp_target_port *target = target_ptr;
441 target->status = status;
443 shost_printk(KERN_ERR, target->scsi_host,
444 PFX "Got failed path rec status %d\n", status);
446 target->path = *pathrec;
447 complete(&target->done);
450 static int srp_lookup_path(struct srp_target_port *target)
454 target->path.numb_path = 1;
456 init_completion(&target->done);
458 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
459 target->srp_host->srp_dev->dev,
460 target->srp_host->port,
462 IB_SA_PATH_REC_SERVICE_ID |
463 IB_SA_PATH_REC_DGID |
464 IB_SA_PATH_REC_SGID |
465 IB_SA_PATH_REC_NUMB_PATH |
467 SRP_PATH_REC_TIMEOUT_MS,
469 srp_path_rec_completion,
470 target, &target->path_query);
471 if (target->path_query_id < 0)
472 return target->path_query_id;
474 ret = wait_for_completion_interruptible(&target->done);
478 if (target->status < 0)
479 shost_printk(KERN_WARNING, target->scsi_host,
480 PFX "Path record query failed\n");
482 return target->status;
485 static int srp_send_req(struct srp_target_port *target)
488 struct ib_cm_req_param param;
489 struct srp_login_req priv;
493 req = kzalloc(sizeof *req, GFP_KERNEL);
497 req->param.primary_path = &target->path;
498 req->param.alternate_path = NULL;
499 req->param.service_id = target->service_id;
500 req->param.qp_num = target->qp->qp_num;
501 req->param.qp_type = target->qp->qp_type;
502 req->param.private_data = &req->priv;
503 req->param.private_data_len = sizeof req->priv;
504 req->param.flow_control = 1;
506 get_random_bytes(&req->param.starting_psn, 4);
507 req->param.starting_psn &= 0xffffff;
510 * Pick some arbitrary defaults here; we could make these
511 * module parameters if anyone cared about setting them.
513 req->param.responder_resources = 4;
514 req->param.remote_cm_response_timeout = 20;
515 req->param.local_cm_response_timeout = 20;
516 req->param.retry_count = target->tl_retry_count;
517 req->param.rnr_retry_count = 7;
518 req->param.max_cm_retries = 15;
520 req->priv.opcode = SRP_LOGIN_REQ;
522 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
523 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
524 SRP_BUF_FORMAT_INDIRECT);
526 * In the published SRP specification (draft rev. 16a), the
527 * port identifier format is 8 bytes of ID extension followed
528 * by 8 bytes of GUID. Older drafts put the two halves in the
529 * opposite order, so that the GUID comes first.
531 * Targets conforming to these obsolete drafts can be
532 * recognized by the I/O Class they report.
534 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
535 memcpy(req->priv.initiator_port_id,
536 &target->path.sgid.global.interface_id, 8);
537 memcpy(req->priv.initiator_port_id + 8,
538 &target->initiator_ext, 8);
539 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
540 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
542 memcpy(req->priv.initiator_port_id,
543 &target->initiator_ext, 8);
544 memcpy(req->priv.initiator_port_id + 8,
545 &target->path.sgid.global.interface_id, 8);
546 memcpy(req->priv.target_port_id, &target->id_ext, 8);
547 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
551 * Topspin/Cisco SRP targets will reject our login unless we
552 * zero out the first 8 bytes of our initiator port ID and set
553 * the second 8 bytes to the local node GUID.
555 if (srp_target_is_topspin(target)) {
556 shost_printk(KERN_DEBUG, target->scsi_host,
557 PFX "Topspin/Cisco initiator port ID workaround "
558 "activated for target GUID %016llx\n",
559 (unsigned long long) be64_to_cpu(target->ioc_guid));
560 memset(req->priv.initiator_port_id, 0, 8);
561 memcpy(req->priv.initiator_port_id + 8,
562 &target->srp_host->srp_dev->dev->node_guid, 8);
565 status = ib_send_cm_req(target->cm_id, &req->param);
572 static bool srp_queue_remove_work(struct srp_target_port *target)
574 bool changed = false;
576 spin_lock_irq(&target->lock);
577 if (target->state != SRP_TARGET_REMOVED) {
578 target->state = SRP_TARGET_REMOVED;
581 spin_unlock_irq(&target->lock);
584 queue_work(system_long_wq, &target->remove_work);
589 static bool srp_change_conn_state(struct srp_target_port *target,
592 bool changed = false;
594 spin_lock_irq(&target->lock);
595 if (target->connected != connected) {
596 target->connected = connected;
599 spin_unlock_irq(&target->lock);
604 static void srp_disconnect_target(struct srp_target_port *target)
606 if (srp_change_conn_state(target, false)) {
607 /* XXX should send SRP_I_LOGOUT request */
609 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
610 shost_printk(KERN_DEBUG, target->scsi_host,
611 PFX "Sending CM DREQ failed\n");
616 static void srp_free_req_data(struct srp_target_port *target)
618 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
619 struct srp_request *req;
622 if (!target->req_ring)
625 for (i = 0; i < target->req_ring_size; ++i) {
626 req = &target->req_ring[i];
627 kfree(req->fmr_list);
628 kfree(req->map_page);
629 if (req->indirect_dma_addr) {
630 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
631 target->indirect_size,
634 kfree(req->indirect_desc);
637 kfree(target->req_ring);
638 target->req_ring = NULL;
641 static int srp_alloc_req_data(struct srp_target_port *target)
643 struct srp_device *srp_dev = target->srp_host->srp_dev;
644 struct ib_device *ibdev = srp_dev->dev;
645 struct srp_request *req;
647 int i, ret = -ENOMEM;
649 INIT_LIST_HEAD(&target->free_reqs);
651 target->req_ring = kzalloc(target->req_ring_size *
652 sizeof(*target->req_ring), GFP_KERNEL);
653 if (!target->req_ring)
656 for (i = 0; i < target->req_ring_size; ++i) {
657 req = &target->req_ring[i];
658 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
660 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
661 sizeof(void *), GFP_KERNEL);
662 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
663 if (!req->fmr_list || !req->map_page || !req->indirect_desc)
666 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
667 target->indirect_size,
669 if (ib_dma_mapping_error(ibdev, dma_addr))
672 req->indirect_dma_addr = dma_addr;
674 list_add_tail(&req->list, &target->free_reqs);
683 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
684 * @shost: SCSI host whose attributes to remove from sysfs.
686 * Note: Any attributes defined in the host template and that did not exist
687 * before invocation of this function will be ignored.
689 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
691 struct device_attribute **attr;
693 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
694 device_remove_file(&shost->shost_dev, *attr);
697 static void srp_remove_target(struct srp_target_port *target)
699 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
701 srp_del_scsi_host_attr(target->scsi_host);
702 srp_rport_get(target->rport);
703 srp_remove_host(target->scsi_host);
704 scsi_remove_host(target->scsi_host);
705 srp_stop_rport_timers(target->rport);
706 srp_disconnect_target(target);
707 ib_destroy_cm_id(target->cm_id);
708 srp_free_target_ib(target);
709 cancel_work_sync(&target->tl_err_work);
710 srp_rport_put(target->rport);
711 srp_free_req_data(target);
713 spin_lock(&target->srp_host->target_lock);
714 list_del(&target->list);
715 spin_unlock(&target->srp_host->target_lock);
717 scsi_host_put(target->scsi_host);
720 static void srp_remove_work(struct work_struct *work)
722 struct srp_target_port *target =
723 container_of(work, struct srp_target_port, remove_work);
725 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
727 srp_remove_target(target);
730 static void srp_rport_delete(struct srp_rport *rport)
732 struct srp_target_port *target = rport->lld_data;
734 srp_queue_remove_work(target);
737 static int srp_connect_target(struct srp_target_port *target)
742 WARN_ON_ONCE(target->connected);
744 target->qp_in_error = false;
746 ret = srp_lookup_path(target);
751 init_completion(&target->done);
752 ret = srp_send_req(target);
755 ret = wait_for_completion_interruptible(&target->done);
760 * The CM event handling code will set status to
761 * SRP_PORT_REDIRECT if we get a port redirect REJ
762 * back, or SRP_DLID_REDIRECT if we get a lid/qp
765 switch (target->status) {
767 srp_change_conn_state(target, true);
770 case SRP_PORT_REDIRECT:
771 ret = srp_lookup_path(target);
776 case SRP_DLID_REDIRECT:
780 /* Our current CM id was stale, and is now in timewait.
781 * Try to reconnect with a new one.
783 if (!retries-- || srp_new_cm_id(target)) {
784 shost_printk(KERN_ERR, target->scsi_host, PFX
785 "giving up on stale connection\n");
786 target->status = -ECONNRESET;
787 return target->status;
790 shost_printk(KERN_ERR, target->scsi_host, PFX
791 "retrying stale connection\n");
795 return target->status;
800 static void srp_unmap_data(struct scsi_cmnd *scmnd,
801 struct srp_target_port *target,
802 struct srp_request *req)
804 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
805 struct ib_pool_fmr **pfmr;
807 if (!scsi_sglist(scmnd) ||
808 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
809 scmnd->sc_data_direction != DMA_FROM_DEVICE))
812 pfmr = req->fmr_list;
813 while (req->nmdesc--)
814 ib_fmr_pool_unmap(*pfmr++);
816 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
817 scmnd->sc_data_direction);
821 * srp_claim_req - Take ownership of the scmnd associated with a request.
822 * @target: SRP target port.
824 * @sdev: If not NULL, only take ownership for this SCSI device.
825 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
826 * ownership of @req->scmnd if it equals @scmnd.
829 * Either NULL or a pointer to the SCSI command the caller became owner of.
831 static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
832 struct srp_request *req,
833 struct scsi_device *sdev,
834 struct scsi_cmnd *scmnd)
838 spin_lock_irqsave(&target->lock, flags);
840 (!sdev || req->scmnd->device == sdev) &&
841 (!scmnd || req->scmnd == scmnd)) {
847 spin_unlock_irqrestore(&target->lock, flags);
853 * srp_free_req() - Unmap data and add request to the free request list.
854 * @target: SRP target port.
855 * @req: Request to be freed.
856 * @scmnd: SCSI command associated with @req.
857 * @req_lim_delta: Amount to be added to @target->req_lim.
859 static void srp_free_req(struct srp_target_port *target,
860 struct srp_request *req, struct scsi_cmnd *scmnd,
865 srp_unmap_data(scmnd, target, req);
867 spin_lock_irqsave(&target->lock, flags);
868 target->req_lim += req_lim_delta;
869 list_add_tail(&req->list, &target->free_reqs);
870 spin_unlock_irqrestore(&target->lock, flags);
873 static void srp_finish_req(struct srp_target_port *target,
874 struct srp_request *req, struct scsi_device *sdev,
877 struct scsi_cmnd *scmnd = srp_claim_req(target, req, sdev, NULL);
880 srp_free_req(target, req, scmnd, 0);
881 scmnd->result = result;
882 scmnd->scsi_done(scmnd);
886 static void srp_terminate_io(struct srp_rport *rport)
888 struct srp_target_port *target = rport->lld_data;
889 struct Scsi_Host *shost = target->scsi_host;
890 struct scsi_device *sdev;
894 * Invoking srp_terminate_io() while srp_queuecommand() is running
895 * is not safe. Hence the warning statement below.
897 shost_for_each_device(sdev, shost)
898 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
900 for (i = 0; i < target->req_ring_size; ++i) {
901 struct srp_request *req = &target->req_ring[i];
902 srp_finish_req(target, req, NULL, DID_TRANSPORT_FAILFAST << 16);
907 * It is up to the caller to ensure that srp_rport_reconnect() calls are
908 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
909 * srp_reset_device() or srp_reset_host() calls will occur while this function
910 * is in progress. One way to realize that is not to call this function
911 * directly but to call srp_reconnect_rport() instead since that last function
912 * serializes calls of this function via rport->mutex and also blocks
913 * srp_queuecommand() calls before invoking this function.
915 static int srp_rport_reconnect(struct srp_rport *rport)
917 struct srp_target_port *target = rport->lld_data;
920 srp_disconnect_target(target);
922 * Now get a new local CM ID so that we avoid confusing the target in
923 * case things are really fouled up. Doing so also ensures that all CM
924 * callbacks will have finished before a new QP is allocated.
926 ret = srp_new_cm_id(target);
928 * Whether or not creating a new CM ID succeeded, create a new
929 * QP. This guarantees that all completion callback function
930 * invocations have finished before request resetting starts.
933 ret = srp_create_target_ib(target);
935 srp_create_target_ib(target);
937 for (i = 0; i < target->req_ring_size; ++i) {
938 struct srp_request *req = &target->req_ring[i];
939 srp_finish_req(target, req, NULL, DID_RESET << 16);
942 INIT_LIST_HEAD(&target->free_tx);
943 for (i = 0; i < target->queue_size; ++i)
944 list_add(&target->tx_ring[i]->list, &target->free_tx);
947 ret = srp_connect_target(target);
950 shost_printk(KERN_INFO, target->scsi_host,
951 PFX "reconnect succeeded\n");
956 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
957 unsigned int dma_len, u32 rkey)
959 struct srp_direct_buf *desc = state->desc;
961 desc->va = cpu_to_be64(dma_addr);
962 desc->key = cpu_to_be32(rkey);
963 desc->len = cpu_to_be32(dma_len);
965 state->total_len += dma_len;
970 static int srp_map_finish_fmr(struct srp_map_state *state,
971 struct srp_target_port *target)
973 struct ib_pool_fmr *fmr;
976 fmr = ib_fmr_pool_map_phys(target->fmr_pool, state->pages,
977 state->npages, io_addr);
981 *state->next_fmr++ = fmr;
984 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
989 static int srp_finish_mapping(struct srp_map_state *state,
990 struct srp_target_port *target)
994 if (state->npages == 0)
997 if (state->npages == 1 && !register_always)
998 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1001 ret = srp_map_finish_fmr(state, target);
1011 static void srp_map_update_start(struct srp_map_state *state,
1012 struct scatterlist *sg, int sg_index,
1013 dma_addr_t dma_addr)
1015 state->unmapped_sg = sg;
1016 state->unmapped_index = sg_index;
1017 state->unmapped_addr = dma_addr;
1020 static int srp_map_sg_entry(struct srp_map_state *state,
1021 struct srp_target_port *target,
1022 struct scatterlist *sg, int sg_index,
1025 struct srp_device *dev = target->srp_host->srp_dev;
1026 struct ib_device *ibdev = dev->dev;
1027 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1028 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1035 if (use_fmr == SRP_MAP_NO_FMR) {
1036 /* Once we're in direct map mode for a request, we don't
1037 * go back to FMR mode, so no need to update anything
1038 * other than the descriptor.
1040 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1044 /* If we start at an offset into the FMR page, don't merge into
1045 * the current FMR. Finish it out, and use the kernel's MR for this
1046 * sg entry. This is to avoid potential bugs on some SRP targets
1047 * that were never quite defined, but went away when the initiator
1048 * avoided using FMR on such page fragments.
1050 if (dma_addr & ~dev->mr_page_mask || dma_len > dev->mr_max_size) {
1051 ret = srp_finish_mapping(state, target);
1055 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1056 srp_map_update_start(state, NULL, 0, 0);
1060 /* If this is the first sg to go into the FMR, save our position.
1061 * We need to know the first unmapped entry, its index, and the
1062 * first unmapped address within that entry to be able to restart
1063 * mapping after an error.
1065 if (!state->unmapped_sg)
1066 srp_map_update_start(state, sg, sg_index, dma_addr);
1069 if (state->npages == dev->max_pages_per_mr) {
1070 ret = srp_finish_mapping(state, target);
1074 srp_map_update_start(state, sg, sg_index, dma_addr);
1077 len = min_t(unsigned int, dma_len, dev->mr_page_size);
1080 state->base_dma_addr = dma_addr;
1081 state->pages[state->npages++] = dma_addr;
1082 state->dma_len += len;
1087 /* If the last entry of the FMR wasn't a full page, then we need to
1088 * close it out and start a new one -- we can only merge at page
1092 if (len != dev->mr_page_size) {
1093 ret = srp_finish_mapping(state, target);
1095 srp_map_update_start(state, NULL, 0, 0);
1100 static void srp_map_fmr(struct srp_map_state *state,
1101 struct srp_target_port *target, struct srp_request *req,
1102 struct scatterlist *scat, int count)
1104 struct srp_device *dev = target->srp_host->srp_dev;
1105 struct ib_device *ibdev = dev->dev;
1106 struct scatterlist *sg;
1109 state->desc = req->indirect_desc;
1110 state->pages = req->map_page;
1111 state->next_fmr = req->fmr_list;
1113 use_fmr = target->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
1115 for_each_sg(scat, sg, count, i) {
1116 if (srp_map_sg_entry(state, target, sg, i, use_fmr)) {
1117 /* FMR mapping failed, so backtrack to the first
1118 * unmapped entry and continue on without using FMR.
1120 dma_addr_t dma_addr;
1121 unsigned int dma_len;
1124 sg = state->unmapped_sg;
1125 i = state->unmapped_index;
1127 dma_addr = ib_sg_dma_address(ibdev, sg);
1128 dma_len = ib_sg_dma_len(ibdev, sg);
1129 dma_len -= (state->unmapped_addr - dma_addr);
1130 dma_addr = state->unmapped_addr;
1131 use_fmr = SRP_MAP_NO_FMR;
1132 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1136 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_finish_mapping(state, target))
1139 req->nmdesc = state->nmdesc;
1142 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
1143 struct srp_request *req)
1145 struct scatterlist *scat;
1146 struct srp_cmd *cmd = req->cmd->buf;
1147 int len, nents, count;
1148 struct srp_device *dev;
1149 struct ib_device *ibdev;
1150 struct srp_map_state state;
1151 struct srp_indirect_buf *indirect_hdr;
1155 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1156 return sizeof (struct srp_cmd);
1158 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1159 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1160 shost_printk(KERN_WARNING, target->scsi_host,
1161 PFX "Unhandled data direction %d\n",
1162 scmnd->sc_data_direction);
1166 nents = scsi_sg_count(scmnd);
1167 scat = scsi_sglist(scmnd);
1169 dev = target->srp_host->srp_dev;
1172 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1173 if (unlikely(count == 0))
1176 fmt = SRP_DATA_DESC_DIRECT;
1177 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1179 if (count == 1 && !register_always) {
1181 * The midlayer only generated a single gather/scatter
1182 * entry, or DMA mapping coalesced everything to a
1183 * single entry. So a direct descriptor along with
1184 * the DMA MR suffices.
1186 struct srp_direct_buf *buf = (void *) cmd->add_data;
1188 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1189 buf->key = cpu_to_be32(target->rkey);
1190 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1196 /* We have more than one scatter/gather entry, so build our indirect
1197 * descriptor table, trying to merge as many entries with FMR as we
1200 indirect_hdr = (void *) cmd->add_data;
1202 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1203 target->indirect_size, DMA_TO_DEVICE);
1205 memset(&state, 0, sizeof(state));
1206 srp_map_fmr(&state, target, req, scat, count);
1208 /* We've mapped the request, now pull as much of the indirect
1209 * descriptor table as we can into the command buffer. If this
1210 * target is not using an external indirect table, we are
1211 * guaranteed to fit into the command, as the SCSI layer won't
1212 * give us more S/G entries than we allow.
1214 if (state.ndesc == 1) {
1215 /* FMR mapping was able to collapse this to one entry,
1216 * so use a direct descriptor.
1218 struct srp_direct_buf *buf = (void *) cmd->add_data;
1220 *buf = req->indirect_desc[0];
1224 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1225 !target->allow_ext_sg)) {
1226 shost_printk(KERN_ERR, target->scsi_host,
1227 "Could not fit S/G list into SRP_CMD\n");
1231 count = min(state.ndesc, target->cmd_sg_cnt);
1232 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1234 fmt = SRP_DATA_DESC_INDIRECT;
1235 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1236 len += count * sizeof (struct srp_direct_buf);
1238 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1239 count * sizeof (struct srp_direct_buf));
1241 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1242 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1243 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1244 indirect_hdr->len = cpu_to_be32(state.total_len);
1246 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1247 cmd->data_out_desc_cnt = count;
1249 cmd->data_in_desc_cnt = count;
1251 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1255 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1256 cmd->buf_fmt = fmt << 4;
1264 * Return an IU and possible credit to the free pool
1266 static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1267 enum srp_iu_type iu_type)
1269 unsigned long flags;
1271 spin_lock_irqsave(&target->lock, flags);
1272 list_add(&iu->list, &target->free_tx);
1273 if (iu_type != SRP_IU_RSP)
1275 spin_unlock_irqrestore(&target->lock, flags);
1279 * Must be called with target->lock held to protect req_lim and free_tx.
1280 * If IU is not sent, it must be returned using srp_put_tx_iu().
1283 * An upper limit for the number of allocated information units for each
1285 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1286 * more than Scsi_Host.can_queue requests.
1287 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1288 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1289 * one unanswered SRP request to an initiator.
1291 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1292 enum srp_iu_type iu_type)
1294 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1297 srp_send_completion(target->send_cq, target);
1299 if (list_empty(&target->free_tx))
1302 /* Initiator responses to target requests do not consume credits */
1303 if (iu_type != SRP_IU_RSP) {
1304 if (target->req_lim <= rsv) {
1305 ++target->zero_req_lim;
1312 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
1313 list_del(&iu->list);
1317 static int srp_post_send(struct srp_target_port *target,
1318 struct srp_iu *iu, int len)
1321 struct ib_send_wr wr, *bad_wr;
1323 list.addr = iu->dma;
1325 list.lkey = target->lkey;
1328 wr.wr_id = (uintptr_t) iu;
1331 wr.opcode = IB_WR_SEND;
1332 wr.send_flags = IB_SEND_SIGNALED;
1334 return ib_post_send(target->qp, &wr, &bad_wr);
1337 static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1339 struct ib_recv_wr wr, *bad_wr;
1342 list.addr = iu->dma;
1343 list.length = iu->size;
1344 list.lkey = target->lkey;
1347 wr.wr_id = (uintptr_t) iu;
1351 return ib_post_recv(target->qp, &wr, &bad_wr);
1354 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1356 struct srp_request *req;
1357 struct scsi_cmnd *scmnd;
1358 unsigned long flags;
1360 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1361 spin_lock_irqsave(&target->lock, flags);
1362 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1363 spin_unlock_irqrestore(&target->lock, flags);
1365 target->tsk_mgmt_status = -1;
1366 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1367 target->tsk_mgmt_status = rsp->data[3];
1368 complete(&target->tsk_mgmt_done);
1370 req = &target->req_ring[rsp->tag];
1371 scmnd = srp_claim_req(target, req, NULL, NULL);
1373 shost_printk(KERN_ERR, target->scsi_host,
1374 "Null scmnd for RSP w/tag %016llx\n",
1375 (unsigned long long) rsp->tag);
1377 spin_lock_irqsave(&target->lock, flags);
1378 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1379 spin_unlock_irqrestore(&target->lock, flags);
1383 scmnd->result = rsp->status;
1385 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1386 memcpy(scmnd->sense_buffer, rsp->data +
1387 be32_to_cpu(rsp->resp_data_len),
1388 min_t(int, be32_to_cpu(rsp->sense_data_len),
1389 SCSI_SENSE_BUFFERSIZE));
1392 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
1393 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1394 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1395 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1397 srp_free_req(target, req, scmnd,
1398 be32_to_cpu(rsp->req_lim_delta));
1400 scmnd->host_scribble = NULL;
1401 scmnd->scsi_done(scmnd);
1405 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1408 struct ib_device *dev = target->srp_host->srp_dev->dev;
1409 unsigned long flags;
1413 spin_lock_irqsave(&target->lock, flags);
1414 target->req_lim += req_delta;
1415 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
1416 spin_unlock_irqrestore(&target->lock, flags);
1419 shost_printk(KERN_ERR, target->scsi_host, PFX
1420 "no IU available to send response\n");
1424 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1425 memcpy(iu->buf, rsp, len);
1426 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1428 err = srp_post_send(target, iu, len);
1430 shost_printk(KERN_ERR, target->scsi_host, PFX
1431 "unable to post response: %d\n", err);
1432 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1438 static void srp_process_cred_req(struct srp_target_port *target,
1439 struct srp_cred_req *req)
1441 struct srp_cred_rsp rsp = {
1442 .opcode = SRP_CRED_RSP,
1445 s32 delta = be32_to_cpu(req->req_lim_delta);
1447 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1448 shost_printk(KERN_ERR, target->scsi_host, PFX
1449 "problems processing SRP_CRED_REQ\n");
1452 static void srp_process_aer_req(struct srp_target_port *target,
1453 struct srp_aer_req *req)
1455 struct srp_aer_rsp rsp = {
1456 .opcode = SRP_AER_RSP,
1459 s32 delta = be32_to_cpu(req->req_lim_delta);
1461 shost_printk(KERN_ERR, target->scsi_host, PFX
1462 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1464 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1465 shost_printk(KERN_ERR, target->scsi_host, PFX
1466 "problems processing SRP_AER_REQ\n");
1469 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1471 struct ib_device *dev = target->srp_host->srp_dev->dev;
1472 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1476 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1479 opcode = *(u8 *) iu->buf;
1482 shost_printk(KERN_ERR, target->scsi_host,
1483 PFX "recv completion, opcode 0x%02x\n", opcode);
1484 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1485 iu->buf, wc->byte_len, true);
1490 srp_process_rsp(target, iu->buf);
1494 srp_process_cred_req(target, iu->buf);
1498 srp_process_aer_req(target, iu->buf);
1502 /* XXX Handle target logout */
1503 shost_printk(KERN_WARNING, target->scsi_host,
1504 PFX "Got target logout request\n");
1508 shost_printk(KERN_WARNING, target->scsi_host,
1509 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1513 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1516 res = srp_post_recv(target, iu);
1518 shost_printk(KERN_ERR, target->scsi_host,
1519 PFX "Recv failed with error code %d\n", res);
1523 * srp_tl_err_work() - handle a transport layer error
1524 * @work: Work structure embedded in an SRP target port.
1526 * Note: This function may get invoked before the rport has been created,
1527 * hence the target->rport test.
1529 static void srp_tl_err_work(struct work_struct *work)
1531 struct srp_target_port *target;
1533 target = container_of(work, struct srp_target_port, tl_err_work);
1535 srp_start_tl_fail_timers(target->rport);
1538 static void srp_handle_qp_err(enum ib_wc_status wc_status, bool send_err,
1539 struct srp_target_port *target)
1541 if (target->connected && !target->qp_in_error) {
1542 shost_printk(KERN_ERR, target->scsi_host,
1543 PFX "failed %s status %d\n",
1544 send_err ? "send" : "receive",
1546 queue_work(system_long_wq, &target->tl_err_work);
1548 target->qp_in_error = true;
1551 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1553 struct srp_target_port *target = target_ptr;
1556 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1557 while (ib_poll_cq(cq, 1, &wc) > 0) {
1558 if (likely(wc.status == IB_WC_SUCCESS)) {
1559 srp_handle_recv(target, &wc);
1561 srp_handle_qp_err(wc.status, false, target);
1566 static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1568 struct srp_target_port *target = target_ptr;
1572 while (ib_poll_cq(cq, 1, &wc) > 0) {
1573 if (likely(wc.status == IB_WC_SUCCESS)) {
1574 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1575 list_add(&iu->list, &target->free_tx);
1577 srp_handle_qp_err(wc.status, true, target);
1582 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1584 struct srp_target_port *target = host_to_target(shost);
1585 struct srp_rport *rport = target->rport;
1586 struct srp_request *req;
1588 struct srp_cmd *cmd;
1589 struct ib_device *dev;
1590 unsigned long flags;
1592 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1595 * The SCSI EH thread is the only context from which srp_queuecommand()
1596 * can get invoked for blocked devices (SDEV_BLOCK /
1597 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1598 * locking the rport mutex if invoked from inside the SCSI EH.
1601 mutex_lock(&rport->mutex);
1603 scmnd->result = srp_chkready(target->rport);
1604 if (unlikely(scmnd->result))
1607 spin_lock_irqsave(&target->lock, flags);
1608 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1612 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1613 list_del(&req->list);
1614 spin_unlock_irqrestore(&target->lock, flags);
1616 dev = target->srp_host->srp_dev->dev;
1617 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1620 scmnd->host_scribble = (void *) req;
1623 memset(cmd, 0, sizeof *cmd);
1625 cmd->opcode = SRP_CMD;
1626 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
1627 cmd->tag = req->index;
1628 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1633 len = srp_map_data(scmnd, target, req);
1635 shost_printk(KERN_ERR, target->scsi_host,
1636 PFX "Failed to map data (%d)\n", len);
1638 * If we ran out of memory descriptors (-ENOMEM) because an
1639 * application is queuing many requests with more than
1640 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
1641 * to reduce queue depth temporarily.
1643 scmnd->result = len == -ENOMEM ?
1644 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
1648 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1651 if (srp_post_send(target, iu, len)) {
1652 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1660 mutex_unlock(&rport->mutex);
1665 srp_unmap_data(scmnd, target, req);
1668 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1671 * Avoid that the loops that iterate over the request ring can
1672 * encounter a dangling SCSI command pointer.
1676 spin_lock_irqsave(&target->lock, flags);
1677 list_add(&req->list, &target->free_reqs);
1680 spin_unlock_irqrestore(&target->lock, flags);
1683 if (scmnd->result) {
1684 scmnd->scsi_done(scmnd);
1687 ret = SCSI_MLQUEUE_HOST_BUSY;
1694 * Note: the resources allocated in this function are freed in
1695 * srp_free_target_ib().
1697 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1701 target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring),
1703 if (!target->rx_ring)
1705 target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring),
1707 if (!target->tx_ring)
1710 for (i = 0; i < target->queue_size; ++i) {
1711 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1712 target->max_ti_iu_len,
1713 GFP_KERNEL, DMA_FROM_DEVICE);
1714 if (!target->rx_ring[i])
1718 for (i = 0; i < target->queue_size; ++i) {
1719 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1721 GFP_KERNEL, DMA_TO_DEVICE);
1722 if (!target->tx_ring[i])
1725 list_add(&target->tx_ring[i]->list, &target->free_tx);
1731 for (i = 0; i < target->queue_size; ++i) {
1732 srp_free_iu(target->srp_host, target->rx_ring[i]);
1733 srp_free_iu(target->srp_host, target->tx_ring[i]);
1738 kfree(target->tx_ring);
1739 target->tx_ring = NULL;
1740 kfree(target->rx_ring);
1741 target->rx_ring = NULL;
1746 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
1748 uint64_t T_tr_ns, max_compl_time_ms;
1749 uint32_t rq_tmo_jiffies;
1752 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1753 * table 91), both the QP timeout and the retry count have to be set
1754 * for RC QP's during the RTR to RTS transition.
1756 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
1757 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
1760 * Set target->rq_tmo_jiffies to one second more than the largest time
1761 * it can take before an error completion is generated. See also
1762 * C9-140..142 in the IBTA spec for more information about how to
1763 * convert the QP Local ACK Timeout value to nanoseconds.
1765 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
1766 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
1767 do_div(max_compl_time_ms, NSEC_PER_MSEC);
1768 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
1770 return rq_tmo_jiffies;
1773 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1774 struct srp_login_rsp *lrsp,
1775 struct srp_target_port *target)
1777 struct ib_qp_attr *qp_attr = NULL;
1782 if (lrsp->opcode == SRP_LOGIN_RSP) {
1783 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1784 target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1787 * Reserve credits for task management so we don't
1788 * bounce requests back to the SCSI mid-layer.
1790 target->scsi_host->can_queue
1791 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1792 target->scsi_host->can_queue);
1793 target->scsi_host->cmd_per_lun
1794 = min_t(int, target->scsi_host->can_queue,
1795 target->scsi_host->cmd_per_lun);
1797 shost_printk(KERN_WARNING, target->scsi_host,
1798 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1803 if (!target->rx_ring) {
1804 ret = srp_alloc_iu_bufs(target);
1810 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1814 qp_attr->qp_state = IB_QPS_RTR;
1815 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1819 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1823 for (i = 0; i < target->queue_size; i++) {
1824 struct srp_iu *iu = target->rx_ring[i];
1825 ret = srp_post_recv(target, iu);
1830 qp_attr->qp_state = IB_QPS_RTS;
1831 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1835 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
1837 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1841 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1847 target->status = ret;
1850 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1851 struct ib_cm_event *event,
1852 struct srp_target_port *target)
1854 struct Scsi_Host *shost = target->scsi_host;
1855 struct ib_class_port_info *cpi;
1858 switch (event->param.rej_rcvd.reason) {
1859 case IB_CM_REJ_PORT_CM_REDIRECT:
1860 cpi = event->param.rej_rcvd.ari;
1861 target->path.dlid = cpi->redirect_lid;
1862 target->path.pkey = cpi->redirect_pkey;
1863 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1864 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1866 target->status = target->path.dlid ?
1867 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1870 case IB_CM_REJ_PORT_REDIRECT:
1871 if (srp_target_is_topspin(target)) {
1873 * Topspin/Cisco SRP gateways incorrectly send
1874 * reject reason code 25 when they mean 24
1877 memcpy(target->path.dgid.raw,
1878 event->param.rej_rcvd.ari, 16);
1880 shost_printk(KERN_DEBUG, shost,
1881 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1882 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1883 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1885 target->status = SRP_PORT_REDIRECT;
1887 shost_printk(KERN_WARNING, shost,
1888 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1889 target->status = -ECONNRESET;
1893 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1894 shost_printk(KERN_WARNING, shost,
1895 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1896 target->status = -ECONNRESET;
1899 case IB_CM_REJ_CONSUMER_DEFINED:
1900 opcode = *(u8 *) event->private_data;
1901 if (opcode == SRP_LOGIN_REJ) {
1902 struct srp_login_rej *rej = event->private_data;
1903 u32 reason = be32_to_cpu(rej->reason);
1905 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1906 shost_printk(KERN_WARNING, shost,
1907 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1909 shost_printk(KERN_WARNING, shost, PFX
1910 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
1911 target->path.sgid.raw,
1912 target->orig_dgid, reason);
1914 shost_printk(KERN_WARNING, shost,
1915 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1916 " opcode 0x%02x\n", opcode);
1917 target->status = -ECONNRESET;
1920 case IB_CM_REJ_STALE_CONN:
1921 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1922 target->status = SRP_STALE_CONN;
1926 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1927 event->param.rej_rcvd.reason);
1928 target->status = -ECONNRESET;
1932 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1934 struct srp_target_port *target = cm_id->context;
1937 switch (event->event) {
1938 case IB_CM_REQ_ERROR:
1939 shost_printk(KERN_DEBUG, target->scsi_host,
1940 PFX "Sending CM REQ failed\n");
1942 target->status = -ECONNRESET;
1945 case IB_CM_REP_RECEIVED:
1947 srp_cm_rep_handler(cm_id, event->private_data, target);
1950 case IB_CM_REJ_RECEIVED:
1951 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1954 srp_cm_rej_handler(cm_id, event, target);
1957 case IB_CM_DREQ_RECEIVED:
1958 shost_printk(KERN_WARNING, target->scsi_host,
1959 PFX "DREQ received - connection closed\n");
1960 srp_change_conn_state(target, false);
1961 if (ib_send_cm_drep(cm_id, NULL, 0))
1962 shost_printk(KERN_ERR, target->scsi_host,
1963 PFX "Sending CM DREP failed\n");
1964 queue_work(system_long_wq, &target->tl_err_work);
1967 case IB_CM_TIMEWAIT_EXIT:
1968 shost_printk(KERN_ERR, target->scsi_host,
1969 PFX "connection closed\n");
1975 case IB_CM_MRA_RECEIVED:
1976 case IB_CM_DREQ_ERROR:
1977 case IB_CM_DREP_RECEIVED:
1981 shost_printk(KERN_WARNING, target->scsi_host,
1982 PFX "Unhandled CM event %d\n", event->event);
1987 complete(&target->done);
1993 * srp_change_queue_type - changing device queue tag type
1994 * @sdev: scsi device struct
1995 * @tag_type: requested tag type
1997 * Returns queue tag type.
2000 srp_change_queue_type(struct scsi_device *sdev, int tag_type)
2002 if (sdev->tagged_supported) {
2003 scsi_set_tag_type(sdev, tag_type);
2005 scsi_activate_tcq(sdev, sdev->queue_depth);
2007 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2015 * srp_change_queue_depth - setting device queue depth
2016 * @sdev: scsi device struct
2017 * @qdepth: requested queue depth
2018 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
2019 * (see include/scsi/scsi_host.h for definition)
2021 * Returns queue depth.
2024 srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2026 struct Scsi_Host *shost = sdev->host;
2028 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
2029 max_depth = shost->can_queue;
2030 if (!sdev->tagged_supported)
2032 if (qdepth > max_depth)
2034 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2035 } else if (reason == SCSI_QDEPTH_QFULL)
2036 scsi_track_queue_full(sdev, qdepth);
2040 return sdev->queue_depth;
2043 static int srp_send_tsk_mgmt(struct srp_target_port *target,
2044 u64 req_tag, unsigned int lun, u8 func)
2046 struct srp_rport *rport = target->rport;
2047 struct ib_device *dev = target->srp_host->srp_dev->dev;
2049 struct srp_tsk_mgmt *tsk_mgmt;
2051 if (!target->connected || target->qp_in_error)
2054 init_completion(&target->tsk_mgmt_done);
2057 * Lock the rport mutex to avoid that srp_create_target_ib() is
2058 * invoked while a task management function is being sent.
2060 mutex_lock(&rport->mutex);
2061 spin_lock_irq(&target->lock);
2062 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
2063 spin_unlock_irq(&target->lock);
2066 mutex_unlock(&rport->mutex);
2071 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2074 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2076 tsk_mgmt->opcode = SRP_TSK_MGMT;
2077 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2078 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
2079 tsk_mgmt->tsk_mgmt_func = func;
2080 tsk_mgmt->task_tag = req_tag;
2082 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2084 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
2085 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
2086 mutex_unlock(&rport->mutex);
2090 mutex_unlock(&rport->mutex);
2092 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
2093 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2099 static int srp_abort(struct scsi_cmnd *scmnd)
2101 struct srp_target_port *target = host_to_target(scmnd->device->host);
2102 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2105 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2107 if (!req || !srp_claim_req(target, req, NULL, scmnd))
2109 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
2110 SRP_TSK_ABORT_TASK) == 0)
2112 else if (target->rport->state == SRP_RPORT_LOST)
2116 srp_free_req(target, req, scmnd, 0);
2117 scmnd->result = DID_ABORT << 16;
2118 scmnd->scsi_done(scmnd);
2123 static int srp_reset_device(struct scsi_cmnd *scmnd)
2125 struct srp_target_port *target = host_to_target(scmnd->device->host);
2128 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2130 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
2133 if (target->tsk_mgmt_status)
2136 for (i = 0; i < target->req_ring_size; ++i) {
2137 struct srp_request *req = &target->req_ring[i];
2138 srp_finish_req(target, req, scmnd->device, DID_RESET << 16);
2144 static int srp_reset_host(struct scsi_cmnd *scmnd)
2146 struct srp_target_port *target = host_to_target(scmnd->device->host);
2148 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2150 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2153 static int srp_slave_configure(struct scsi_device *sdev)
2155 struct Scsi_Host *shost = sdev->host;
2156 struct srp_target_port *target = host_to_target(shost);
2157 struct request_queue *q = sdev->request_queue;
2158 unsigned long timeout;
2160 if (sdev->type == TYPE_DISK) {
2161 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2162 blk_queue_rq_timeout(q, timeout);
2168 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2171 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2173 return sprintf(buf, "0x%016llx\n",
2174 (unsigned long long) be64_to_cpu(target->id_ext));
2177 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2180 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2182 return sprintf(buf, "0x%016llx\n",
2183 (unsigned long long) be64_to_cpu(target->ioc_guid));
2186 static ssize_t show_service_id(struct device *dev,
2187 struct device_attribute *attr, char *buf)
2189 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2191 return sprintf(buf, "0x%016llx\n",
2192 (unsigned long long) be64_to_cpu(target->service_id));
2195 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2198 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2200 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
2203 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2206 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2208 return sprintf(buf, "%pI6\n", target->path.sgid.raw);
2211 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2214 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2216 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
2219 static ssize_t show_orig_dgid(struct device *dev,
2220 struct device_attribute *attr, char *buf)
2222 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2224 return sprintf(buf, "%pI6\n", target->orig_dgid);
2227 static ssize_t show_req_lim(struct device *dev,
2228 struct device_attribute *attr, char *buf)
2230 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2232 return sprintf(buf, "%d\n", target->req_lim);
2235 static ssize_t show_zero_req_lim(struct device *dev,
2236 struct device_attribute *attr, char *buf)
2238 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2240 return sprintf(buf, "%d\n", target->zero_req_lim);
2243 static ssize_t show_local_ib_port(struct device *dev,
2244 struct device_attribute *attr, char *buf)
2246 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2248 return sprintf(buf, "%d\n", target->srp_host->port);
2251 static ssize_t show_local_ib_device(struct device *dev,
2252 struct device_attribute *attr, char *buf)
2254 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2256 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2259 static ssize_t show_comp_vector(struct device *dev,
2260 struct device_attribute *attr, char *buf)
2262 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2264 return sprintf(buf, "%d\n", target->comp_vector);
2267 static ssize_t show_tl_retry_count(struct device *dev,
2268 struct device_attribute *attr, char *buf)
2270 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2272 return sprintf(buf, "%d\n", target->tl_retry_count);
2275 static ssize_t show_cmd_sg_entries(struct device *dev,
2276 struct device_attribute *attr, char *buf)
2278 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2280 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2283 static ssize_t show_allow_ext_sg(struct device *dev,
2284 struct device_attribute *attr, char *buf)
2286 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2288 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2291 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2292 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2293 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2294 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2295 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2296 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2297 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2298 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2299 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2300 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2301 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2302 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2303 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2304 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2305 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2307 static struct device_attribute *srp_host_attrs[] = {
2310 &dev_attr_service_id,
2314 &dev_attr_orig_dgid,
2316 &dev_attr_zero_req_lim,
2317 &dev_attr_local_ib_port,
2318 &dev_attr_local_ib_device,
2319 &dev_attr_comp_vector,
2320 &dev_attr_tl_retry_count,
2321 &dev_attr_cmd_sg_entries,
2322 &dev_attr_allow_ext_sg,
2326 static struct scsi_host_template srp_template = {
2327 .module = THIS_MODULE,
2328 .name = "InfiniBand SRP initiator",
2329 .proc_name = DRV_NAME,
2330 .slave_configure = srp_slave_configure,
2331 .info = srp_target_info,
2332 .queuecommand = srp_queuecommand,
2333 .change_queue_depth = srp_change_queue_depth,
2334 .change_queue_type = srp_change_queue_type,
2335 .eh_abort_handler = srp_abort,
2336 .eh_device_reset_handler = srp_reset_device,
2337 .eh_host_reset_handler = srp_reset_host,
2338 .skip_settle_delay = true,
2339 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2340 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2342 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2343 .use_clustering = ENABLE_CLUSTERING,
2344 .shost_attrs = srp_host_attrs
2347 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2349 struct srp_rport_identifiers ids;
2350 struct srp_rport *rport;
2352 sprintf(target->target_name, "SRP.T10:%016llX",
2353 (unsigned long long) be64_to_cpu(target->id_ext));
2355 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2358 memcpy(ids.port_id, &target->id_ext, 8);
2359 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2360 ids.roles = SRP_RPORT_ROLE_TARGET;
2361 rport = srp_rport_add(target->scsi_host, &ids);
2362 if (IS_ERR(rport)) {
2363 scsi_remove_host(target->scsi_host);
2364 return PTR_ERR(rport);
2367 rport->lld_data = target;
2368 target->rport = rport;
2370 spin_lock(&host->target_lock);
2371 list_add_tail(&target->list, &host->target_list);
2372 spin_unlock(&host->target_lock);
2374 target->state = SRP_TARGET_LIVE;
2376 scsi_scan_target(&target->scsi_host->shost_gendev,
2377 0, target->scsi_id, SCAN_WILD_CARD, 0);
2382 static void srp_release_dev(struct device *dev)
2384 struct srp_host *host =
2385 container_of(dev, struct srp_host, dev);
2387 complete(&host->released);
2390 static struct class srp_class = {
2391 .name = "infiniband_srp",
2392 .dev_release = srp_release_dev
2396 * srp_conn_unique() - check whether the connection to a target is unique
2398 * @target: SRP target port.
2400 static bool srp_conn_unique(struct srp_host *host,
2401 struct srp_target_port *target)
2403 struct srp_target_port *t;
2406 if (target->state == SRP_TARGET_REMOVED)
2411 spin_lock(&host->target_lock);
2412 list_for_each_entry(t, &host->target_list, list) {
2414 target->id_ext == t->id_ext &&
2415 target->ioc_guid == t->ioc_guid &&
2416 target->initiator_ext == t->initiator_ext) {
2421 spin_unlock(&host->target_lock);
2428 * Target ports are added by writing
2430 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2431 * pkey=<P_Key>,service_id=<service ID>
2433 * to the add_target sysfs attribute.
2437 SRP_OPT_ID_EXT = 1 << 0,
2438 SRP_OPT_IOC_GUID = 1 << 1,
2439 SRP_OPT_DGID = 1 << 2,
2440 SRP_OPT_PKEY = 1 << 3,
2441 SRP_OPT_SERVICE_ID = 1 << 4,
2442 SRP_OPT_MAX_SECT = 1 << 5,
2443 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2444 SRP_OPT_IO_CLASS = 1 << 7,
2445 SRP_OPT_INITIATOR_EXT = 1 << 8,
2446 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2447 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2448 SRP_OPT_SG_TABLESIZE = 1 << 11,
2449 SRP_OPT_COMP_VECTOR = 1 << 12,
2450 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2451 SRP_OPT_QUEUE_SIZE = 1 << 14,
2452 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2456 SRP_OPT_SERVICE_ID),
2459 static const match_table_t srp_opt_tokens = {
2460 { SRP_OPT_ID_EXT, "id_ext=%s" },
2461 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2462 { SRP_OPT_DGID, "dgid=%s" },
2463 { SRP_OPT_PKEY, "pkey=%x" },
2464 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2465 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2466 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2467 { SRP_OPT_IO_CLASS, "io_class=%x" },
2468 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2469 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2470 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2471 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2472 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2473 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2474 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2475 { SRP_OPT_ERR, NULL }
2478 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2480 char *options, *sep_opt;
2483 substring_t args[MAX_OPT_ARGS];
2489 options = kstrdup(buf, GFP_KERNEL);
2494 while ((p = strsep(&sep_opt, ",")) != NULL) {
2498 token = match_token(p, srp_opt_tokens, args);
2502 case SRP_OPT_ID_EXT:
2503 p = match_strdup(args);
2508 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2512 case SRP_OPT_IOC_GUID:
2513 p = match_strdup(args);
2518 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2523 p = match_strdup(args);
2528 if (strlen(p) != 32) {
2529 pr_warn("bad dest GID parameter '%s'\n", p);
2534 for (i = 0; i < 16; ++i) {
2535 strlcpy(dgid, p + i * 2, 3);
2536 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2539 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2543 if (match_hex(args, &token)) {
2544 pr_warn("bad P_Key parameter '%s'\n", p);
2547 target->path.pkey = cpu_to_be16(token);
2550 case SRP_OPT_SERVICE_ID:
2551 p = match_strdup(args);
2556 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2557 target->path.service_id = target->service_id;
2561 case SRP_OPT_MAX_SECT:
2562 if (match_int(args, &token)) {
2563 pr_warn("bad max sect parameter '%s'\n", p);
2566 target->scsi_host->max_sectors = token;
2569 case SRP_OPT_QUEUE_SIZE:
2570 if (match_int(args, &token) || token < 1) {
2571 pr_warn("bad queue_size parameter '%s'\n", p);
2574 target->scsi_host->can_queue = token;
2575 target->queue_size = token + SRP_RSP_SQ_SIZE +
2576 SRP_TSK_MGMT_SQ_SIZE;
2577 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2578 target->scsi_host->cmd_per_lun = token;
2581 case SRP_OPT_MAX_CMD_PER_LUN:
2582 if (match_int(args, &token) || token < 1) {
2583 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2587 target->scsi_host->cmd_per_lun = token;
2590 case SRP_OPT_IO_CLASS:
2591 if (match_hex(args, &token)) {
2592 pr_warn("bad IO class parameter '%s'\n", p);
2595 if (token != SRP_REV10_IB_IO_CLASS &&
2596 token != SRP_REV16A_IB_IO_CLASS) {
2597 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2598 token, SRP_REV10_IB_IO_CLASS,
2599 SRP_REV16A_IB_IO_CLASS);
2602 target->io_class = token;
2605 case SRP_OPT_INITIATOR_EXT:
2606 p = match_strdup(args);
2611 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2615 case SRP_OPT_CMD_SG_ENTRIES:
2616 if (match_int(args, &token) || token < 1 || token > 255) {
2617 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2621 target->cmd_sg_cnt = token;
2624 case SRP_OPT_ALLOW_EXT_SG:
2625 if (match_int(args, &token)) {
2626 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
2629 target->allow_ext_sg = !!token;
2632 case SRP_OPT_SG_TABLESIZE:
2633 if (match_int(args, &token) || token < 1 ||
2634 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
2635 pr_warn("bad max sg_tablesize parameter '%s'\n",
2639 target->sg_tablesize = token;
2642 case SRP_OPT_COMP_VECTOR:
2643 if (match_int(args, &token) || token < 0) {
2644 pr_warn("bad comp_vector parameter '%s'\n", p);
2647 target->comp_vector = token;
2650 case SRP_OPT_TL_RETRY_COUNT:
2651 if (match_int(args, &token) || token < 2 || token > 7) {
2652 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2656 target->tl_retry_count = token;
2660 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2666 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2669 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2670 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2671 !(srp_opt_tokens[i].token & opt_mask))
2672 pr_warn("target creation request is missing parameter '%s'\n",
2673 srp_opt_tokens[i].pattern);
2675 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
2676 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2677 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
2678 target->scsi_host->cmd_per_lun,
2679 target->scsi_host->can_queue);
2686 static ssize_t srp_create_target(struct device *dev,
2687 struct device_attribute *attr,
2688 const char *buf, size_t count)
2690 struct srp_host *host =
2691 container_of(dev, struct srp_host, dev);
2692 struct Scsi_Host *target_host;
2693 struct srp_target_port *target;
2694 struct srp_device *srp_dev = host->srp_dev;
2695 struct ib_device *ibdev = srp_dev->dev;
2698 target_host = scsi_host_alloc(&srp_template,
2699 sizeof (struct srp_target_port));
2703 target_host->transportt = ib_srp_transport_template;
2704 target_host->max_channel = 0;
2705 target_host->max_id = 1;
2706 target_host->max_lun = SRP_MAX_LUN;
2707 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
2709 target = host_to_target(target_host);
2711 target->io_class = SRP_REV16A_IB_IO_CLASS;
2712 target->scsi_host = target_host;
2713 target->srp_host = host;
2714 target->lkey = host->srp_dev->mr->lkey;
2715 target->rkey = host->srp_dev->mr->rkey;
2716 target->cmd_sg_cnt = cmd_sg_entries;
2717 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2718 target->allow_ext_sg = allow_ext_sg;
2719 target->tl_retry_count = 7;
2720 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
2722 mutex_lock(&host->add_target_mutex);
2724 ret = srp_parse_options(buf, target);
2728 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
2730 if (!srp_conn_unique(target->srp_host, target)) {
2731 shost_printk(KERN_INFO, target->scsi_host,
2732 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
2733 be64_to_cpu(target->id_ext),
2734 be64_to_cpu(target->ioc_guid),
2735 be64_to_cpu(target->initiator_ext));
2740 if (!srp_dev->has_fmr && !target->allow_ext_sg &&
2741 target->cmd_sg_cnt < target->sg_tablesize) {
2742 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2743 target->sg_tablesize = target->cmd_sg_cnt;
2746 target_host->sg_tablesize = target->sg_tablesize;
2747 target->indirect_size = target->sg_tablesize *
2748 sizeof (struct srp_direct_buf);
2749 target->max_iu_len = sizeof (struct srp_cmd) +
2750 sizeof (struct srp_indirect_buf) +
2751 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2753 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
2754 INIT_WORK(&target->remove_work, srp_remove_work);
2755 spin_lock_init(&target->lock);
2756 INIT_LIST_HEAD(&target->free_tx);
2757 ret = srp_alloc_req_data(target);
2761 ret = ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
2765 ret = srp_create_target_ib(target);
2769 ret = srp_new_cm_id(target);
2773 ret = srp_connect_target(target);
2775 shost_printk(KERN_ERR, target->scsi_host,
2776 PFX "Connection failed\n");
2780 ret = srp_add_target(host, target);
2782 goto err_disconnect;
2784 shost_printk(KERN_DEBUG, target->scsi_host, PFX
2785 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
2786 be64_to_cpu(target->id_ext),
2787 be64_to_cpu(target->ioc_guid),
2788 be16_to_cpu(target->path.pkey),
2789 be64_to_cpu(target->service_id),
2790 target->path.sgid.raw, target->path.dgid.raw);
2795 mutex_unlock(&host->add_target_mutex);
2799 srp_disconnect_target(target);
2802 ib_destroy_cm_id(target->cm_id);
2805 srp_free_target_ib(target);
2808 srp_free_req_data(target);
2811 scsi_host_put(target_host);
2815 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2817 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2820 struct srp_host *host = container_of(dev, struct srp_host, dev);
2822 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2825 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2827 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2830 struct srp_host *host = container_of(dev, struct srp_host, dev);
2832 return sprintf(buf, "%d\n", host->port);
2835 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2837 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
2839 struct srp_host *host;
2841 host = kzalloc(sizeof *host, GFP_KERNEL);
2845 INIT_LIST_HEAD(&host->target_list);
2846 spin_lock_init(&host->target_lock);
2847 init_completion(&host->released);
2848 mutex_init(&host->add_target_mutex);
2849 host->srp_dev = device;
2852 host->dev.class = &srp_class;
2853 host->dev.parent = device->dev->dma_device;
2854 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2856 if (device_register(&host->dev))
2858 if (device_create_file(&host->dev, &dev_attr_add_target))
2860 if (device_create_file(&host->dev, &dev_attr_ibdev))
2862 if (device_create_file(&host->dev, &dev_attr_port))
2868 device_unregister(&host->dev);
2876 static void srp_add_one(struct ib_device *device)
2878 struct srp_device *srp_dev;
2879 struct ib_device_attr *dev_attr;
2880 struct srp_host *host;
2881 int mr_page_shift, s, e, p;
2882 u64 max_pages_per_mr;
2884 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2888 if (ib_query_device(device, dev_attr)) {
2889 pr_warn("Query device failed for %s\n", device->name);
2893 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2897 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
2898 device->map_phys_fmr && device->unmap_fmr);
2901 * Use the smallest page size supported by the HCA, down to a
2902 * minimum of 4096 bytes. We're unlikely to build large sglists
2903 * out of smaller entries.
2905 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2906 srp_dev->mr_page_size = 1 << mr_page_shift;
2907 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
2908 max_pages_per_mr = dev_attr->max_mr_size;
2909 do_div(max_pages_per_mr, srp_dev->mr_page_size);
2910 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
2912 srp_dev->mr_max_size = srp_dev->mr_page_size *
2913 srp_dev->max_pages_per_mr;
2914 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, max_pages_per_mr = %d, mr_max_size = %#x\n",
2915 device->name, mr_page_shift, dev_attr->max_mr_size,
2916 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
2918 INIT_LIST_HEAD(&srp_dev->dev_list);
2920 srp_dev->dev = device;
2921 srp_dev->pd = ib_alloc_pd(device);
2922 if (IS_ERR(srp_dev->pd))
2925 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2926 IB_ACCESS_LOCAL_WRITE |
2927 IB_ACCESS_REMOTE_READ |
2928 IB_ACCESS_REMOTE_WRITE);
2929 if (IS_ERR(srp_dev->mr))
2932 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2937 e = device->phys_port_cnt;
2940 for (p = s; p <= e; ++p) {
2941 host = srp_add_port(srp_dev, p);
2943 list_add_tail(&host->list, &srp_dev->dev_list);
2946 ib_set_client_data(device, &srp_client, srp_dev);
2951 ib_dealloc_pd(srp_dev->pd);
2960 static void srp_remove_one(struct ib_device *device)
2962 struct srp_device *srp_dev;
2963 struct srp_host *host, *tmp_host;
2964 struct srp_target_port *target;
2966 srp_dev = ib_get_client_data(device, &srp_client);
2970 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2971 device_unregister(&host->dev);
2973 * Wait for the sysfs entry to go away, so that no new
2974 * target ports can be created.
2976 wait_for_completion(&host->released);
2979 * Remove all target ports.
2981 spin_lock(&host->target_lock);
2982 list_for_each_entry(target, &host->target_list, list)
2983 srp_queue_remove_work(target);
2984 spin_unlock(&host->target_lock);
2987 * Wait for target port removal tasks.
2989 flush_workqueue(system_long_wq);
2994 ib_dereg_mr(srp_dev->mr);
2995 ib_dealloc_pd(srp_dev->pd);
3000 static struct srp_function_template ib_srp_transport_functions = {
3001 .has_rport_state = true,
3002 .reset_timer_if_blocked = true,
3003 .reconnect_delay = &srp_reconnect_delay,
3004 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3005 .dev_loss_tmo = &srp_dev_loss_tmo,
3006 .reconnect = srp_rport_reconnect,
3007 .rport_delete = srp_rport_delete,
3008 .terminate_rport_io = srp_terminate_io,
3011 static int __init srp_init_module(void)
3015 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3017 if (srp_sg_tablesize) {
3018 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3019 if (!cmd_sg_entries)
3020 cmd_sg_entries = srp_sg_tablesize;
3023 if (!cmd_sg_entries)
3024 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3026 if (cmd_sg_entries > 255) {
3027 pr_warn("Clamping cmd_sg_entries to 255\n");
3028 cmd_sg_entries = 255;
3031 if (!indirect_sg_entries)
3032 indirect_sg_entries = cmd_sg_entries;
3033 else if (indirect_sg_entries < cmd_sg_entries) {
3034 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3036 indirect_sg_entries = cmd_sg_entries;
3039 ib_srp_transport_template =
3040 srp_attach_transport(&ib_srp_transport_functions);
3041 if (!ib_srp_transport_template)
3044 ret = class_register(&srp_class);
3046 pr_err("couldn't register class infiniband_srp\n");
3047 srp_release_transport(ib_srp_transport_template);
3051 ib_sa_register_client(&srp_sa_client);
3053 ret = ib_register_client(&srp_client);
3055 pr_err("couldn't register IB client\n");
3056 srp_release_transport(ib_srp_transport_template);
3057 ib_sa_unregister_client(&srp_sa_client);
3058 class_unregister(&srp_class);
3065 static void __exit srp_cleanup_module(void)
3067 ib_unregister_client(&srp_client);
3068 ib_sa_unregister_client(&srp_sa_client);
3069 class_unregister(&srp_class);
3070 srp_release_transport(ib_srp_transport_template);
3073 module_init(srp_init_module);
3074 module_exit(srp_cleanup_module);