Commit | Line | Data |
---|---|---|
aef9ec39 RD |
1 | /* |
2 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $ | |
33 | */ | |
34 | ||
aef9ec39 RD |
35 | #include <linux/module.h> |
36 | #include <linux/init.h> | |
37 | #include <linux/slab.h> | |
38 | #include <linux/err.h> | |
39 | #include <linux/string.h> | |
40 | #include <linux/parser.h> | |
41 | #include <linux/random.h> | |
42 | ||
43 | #include <asm/atomic.h> | |
44 | ||
45 | #include <scsi/scsi.h> | |
46 | #include <scsi/scsi_device.h> | |
47 | #include <scsi/scsi_dbg.h> | |
48 | #include <scsi/srp.h> | |
49 | ||
50 | #include <rdma/ib_cache.h> | |
51 | ||
52 | #include "ib_srp.h" | |
53 | ||
54 | #define DRV_NAME "ib_srp" | |
55 | #define PFX DRV_NAME ": " | |
56 | #define DRV_VERSION "0.2" | |
57 | #define DRV_RELDATE "November 1, 2005" | |
58 | ||
59 | MODULE_AUTHOR("Roland Dreier"); | |
60 | MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " | |
61 | "v" DRV_VERSION " (" DRV_RELDATE ")"); | |
62 | MODULE_LICENSE("Dual BSD/GPL"); | |
63 | ||
64 | static int topspin_workarounds = 1; | |
65 | ||
66 | module_param(topspin_workarounds, int, 0444); | |
67 | MODULE_PARM_DESC(topspin_workarounds, | |
68 | "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); | |
69 | ||
70 | static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; | |
71 | ||
72 | static void srp_add_one(struct ib_device *device); | |
73 | static void srp_remove_one(struct ib_device *device); | |
74 | static void srp_completion(struct ib_cq *cq, void *target_ptr); | |
75 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); | |
76 | ||
77 | static struct ib_client srp_client = { | |
78 | .name = "srp", | |
79 | .add = srp_add_one, | |
80 | .remove = srp_remove_one | |
81 | }; | |
82 | ||
83 | static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) | |
84 | { | |
85 | return (struct srp_target_port *) host->hostdata; | |
86 | } | |
87 | ||
88 | static const char *srp_target_info(struct Scsi_Host *host) | |
89 | { | |
90 | return host_to_target(host)->target_name; | |
91 | } | |
92 | ||
93 | static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, | |
94 | gfp_t gfp_mask, | |
95 | enum dma_data_direction direction) | |
96 | { | |
97 | struct srp_iu *iu; | |
98 | ||
99 | iu = kmalloc(sizeof *iu, gfp_mask); | |
100 | if (!iu) | |
101 | goto out; | |
102 | ||
103 | iu->buf = kzalloc(size, gfp_mask); | |
104 | if (!iu->buf) | |
105 | goto out_free_iu; | |
106 | ||
107 | iu->dma = dma_map_single(host->dev->dma_device, iu->buf, size, direction); | |
108 | if (dma_mapping_error(iu->dma)) | |
109 | goto out_free_buf; | |
110 | ||
111 | iu->size = size; | |
112 | iu->direction = direction; | |
113 | ||
114 | return iu; | |
115 | ||
116 | out_free_buf: | |
117 | kfree(iu->buf); | |
118 | out_free_iu: | |
119 | kfree(iu); | |
120 | out: | |
121 | return NULL; | |
122 | } | |
123 | ||
124 | static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) | |
125 | { | |
126 | if (!iu) | |
127 | return; | |
128 | ||
129 | dma_unmap_single(host->dev->dma_device, iu->dma, iu->size, iu->direction); | |
130 | kfree(iu->buf); | |
131 | kfree(iu); | |
132 | } | |
133 | ||
134 | static void srp_qp_event(struct ib_event *event, void *context) | |
135 | { | |
136 | printk(KERN_ERR PFX "QP event %d\n", event->event); | |
137 | } | |
138 | ||
139 | static int srp_init_qp(struct srp_target_port *target, | |
140 | struct ib_qp *qp) | |
141 | { | |
142 | struct ib_qp_attr *attr; | |
143 | int ret; | |
144 | ||
145 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | |
146 | if (!attr) | |
147 | return -ENOMEM; | |
148 | ||
149 | ret = ib_find_cached_pkey(target->srp_host->dev, | |
150 | target->srp_host->port, | |
151 | be16_to_cpu(target->path.pkey), | |
152 | &attr->pkey_index); | |
153 | if (ret) | |
154 | goto out; | |
155 | ||
156 | attr->qp_state = IB_QPS_INIT; | |
157 | attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | | |
158 | IB_ACCESS_REMOTE_WRITE); | |
159 | attr->port_num = target->srp_host->port; | |
160 | ||
161 | ret = ib_modify_qp(qp, attr, | |
162 | IB_QP_STATE | | |
163 | IB_QP_PKEY_INDEX | | |
164 | IB_QP_ACCESS_FLAGS | | |
165 | IB_QP_PORT); | |
166 | ||
167 | out: | |
168 | kfree(attr); | |
169 | return ret; | |
170 | } | |
171 | ||
172 | static int srp_create_target_ib(struct srp_target_port *target) | |
173 | { | |
174 | struct ib_qp_init_attr *init_attr; | |
175 | int ret; | |
176 | ||
177 | init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); | |
178 | if (!init_attr) | |
179 | return -ENOMEM; | |
180 | ||
181 | target->cq = ib_create_cq(target->srp_host->dev, srp_completion, | |
182 | NULL, target, SRP_CQ_SIZE); | |
183 | if (IS_ERR(target->cq)) { | |
184 | ret = PTR_ERR(target->cq); | |
185 | goto out; | |
186 | } | |
187 | ||
188 | ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); | |
189 | ||
190 | init_attr->event_handler = srp_qp_event; | |
191 | init_attr->cap.max_send_wr = SRP_SQ_SIZE; | |
192 | init_attr->cap.max_recv_wr = SRP_RQ_SIZE; | |
193 | init_attr->cap.max_recv_sge = 1; | |
194 | init_attr->cap.max_send_sge = 1; | |
195 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | |
196 | init_attr->qp_type = IB_QPT_RC; | |
197 | init_attr->send_cq = target->cq; | |
198 | init_attr->recv_cq = target->cq; | |
199 | ||
200 | target->qp = ib_create_qp(target->srp_host->pd, init_attr); | |
201 | if (IS_ERR(target->qp)) { | |
202 | ret = PTR_ERR(target->qp); | |
203 | ib_destroy_cq(target->cq); | |
204 | goto out; | |
205 | } | |
206 | ||
207 | ret = srp_init_qp(target, target->qp); | |
208 | if (ret) { | |
209 | ib_destroy_qp(target->qp); | |
210 | ib_destroy_cq(target->cq); | |
211 | goto out; | |
212 | } | |
213 | ||
214 | out: | |
215 | kfree(init_attr); | |
216 | return ret; | |
217 | } | |
218 | ||
219 | static void srp_free_target_ib(struct srp_target_port *target) | |
220 | { | |
221 | int i; | |
222 | ||
223 | ib_destroy_qp(target->qp); | |
224 | ib_destroy_cq(target->cq); | |
225 | ||
226 | for (i = 0; i < SRP_RQ_SIZE; ++i) | |
227 | srp_free_iu(target->srp_host, target->rx_ring[i]); | |
228 | for (i = 0; i < SRP_SQ_SIZE + 1; ++i) | |
229 | srp_free_iu(target->srp_host, target->tx_ring[i]); | |
230 | } | |
231 | ||
232 | static void srp_path_rec_completion(int status, | |
233 | struct ib_sa_path_rec *pathrec, | |
234 | void *target_ptr) | |
235 | { | |
236 | struct srp_target_port *target = target_ptr; | |
237 | ||
238 | target->status = status; | |
239 | if (status) | |
240 | printk(KERN_ERR PFX "Got failed path rec status %d\n", status); | |
241 | else | |
242 | target->path = *pathrec; | |
243 | complete(&target->done); | |
244 | } | |
245 | ||
246 | static int srp_lookup_path(struct srp_target_port *target) | |
247 | { | |
248 | target->path.numb_path = 1; | |
249 | ||
250 | init_completion(&target->done); | |
251 | ||
252 | target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev, | |
253 | target->srp_host->port, | |
254 | &target->path, | |
255 | IB_SA_PATH_REC_DGID | | |
256 | IB_SA_PATH_REC_SGID | | |
257 | IB_SA_PATH_REC_NUMB_PATH | | |
258 | IB_SA_PATH_REC_PKEY, | |
259 | SRP_PATH_REC_TIMEOUT_MS, | |
260 | GFP_KERNEL, | |
261 | srp_path_rec_completion, | |
262 | target, &target->path_query); | |
263 | if (target->path_query_id < 0) | |
264 | return target->path_query_id; | |
265 | ||
266 | wait_for_completion(&target->done); | |
267 | ||
268 | if (target->status < 0) | |
269 | printk(KERN_WARNING PFX "Path record query failed\n"); | |
270 | ||
271 | return target->status; | |
272 | } | |
273 | ||
274 | static int srp_send_req(struct srp_target_port *target) | |
275 | { | |
276 | struct { | |
277 | struct ib_cm_req_param param; | |
278 | struct srp_login_req priv; | |
279 | } *req = NULL; | |
280 | int status; | |
281 | ||
282 | req = kzalloc(sizeof *req, GFP_KERNEL); | |
283 | if (!req) | |
284 | return -ENOMEM; | |
285 | ||
286 | req->param.primary_path = &target->path; | |
287 | req->param.alternate_path = NULL; | |
288 | req->param.service_id = target->service_id; | |
289 | req->param.qp_num = target->qp->qp_num; | |
290 | req->param.qp_type = target->qp->qp_type; | |
291 | req->param.private_data = &req->priv; | |
292 | req->param.private_data_len = sizeof req->priv; | |
293 | req->param.flow_control = 1; | |
294 | ||
295 | get_random_bytes(&req->param.starting_psn, 4); | |
296 | req->param.starting_psn &= 0xffffff; | |
297 | ||
298 | /* | |
299 | * Pick some arbitrary defaults here; we could make these | |
300 | * module parameters if anyone cared about setting them. | |
301 | */ | |
302 | req->param.responder_resources = 4; | |
303 | req->param.remote_cm_response_timeout = 20; | |
304 | req->param.local_cm_response_timeout = 20; | |
305 | req->param.retry_count = 7; | |
306 | req->param.rnr_retry_count = 7; | |
307 | req->param.max_cm_retries = 15; | |
308 | ||
309 | req->priv.opcode = SRP_LOGIN_REQ; | |
310 | req->priv.tag = 0; | |
311 | req->priv.req_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); | |
312 | req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | | |
313 | SRP_BUF_FORMAT_INDIRECT); | |
314 | memcpy(req->priv.initiator_port_id, target->srp_host->initiator_port_id, 16); | |
315 | /* | |
316 | * Topspin/Cisco SRP targets will reject our login unless we | |
317 | * zero out the first 8 bytes of our initiator port ID. The | |
318 | * second 8 bytes must be our local node GUID, but we always | |
319 | * use that anyway. | |
320 | */ | |
321 | if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) { | |
322 | printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround " | |
323 | "activated for target GUID %016llx\n", | |
324 | (unsigned long long) be64_to_cpu(target->ioc_guid)); | |
325 | memset(req->priv.initiator_port_id, 0, 8); | |
326 | } | |
327 | memcpy(req->priv.target_port_id, &target->id_ext, 8); | |
328 | memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); | |
329 | ||
330 | status = ib_send_cm_req(target->cm_id, &req->param); | |
331 | ||
332 | kfree(req); | |
333 | ||
334 | return status; | |
335 | } | |
336 | ||
337 | static void srp_disconnect_target(struct srp_target_port *target) | |
338 | { | |
339 | /* XXX should send SRP_I_LOGOUT request */ | |
340 | ||
341 | init_completion(&target->done); | |
342 | ib_send_cm_dreq(target->cm_id, NULL, 0); | |
343 | wait_for_completion(&target->done); | |
344 | } | |
345 | ||
346 | static void srp_remove_work(void *target_ptr) | |
347 | { | |
348 | struct srp_target_port *target = target_ptr; | |
349 | ||
350 | spin_lock_irq(target->scsi_host->host_lock); | |
351 | if (target->state != SRP_TARGET_DEAD) { | |
352 | spin_unlock_irq(target->scsi_host->host_lock); | |
353 | scsi_host_put(target->scsi_host); | |
354 | return; | |
355 | } | |
356 | target->state = SRP_TARGET_REMOVED; | |
357 | spin_unlock_irq(target->scsi_host->host_lock); | |
358 | ||
359 | down(&target->srp_host->target_mutex); | |
360 | list_del(&target->list); | |
361 | up(&target->srp_host->target_mutex); | |
362 | ||
363 | scsi_remove_host(target->scsi_host); | |
364 | ib_destroy_cm_id(target->cm_id); | |
365 | srp_free_target_ib(target); | |
366 | scsi_host_put(target->scsi_host); | |
367 | /* And another put to really free the target port... */ | |
368 | scsi_host_put(target->scsi_host); | |
369 | } | |
370 | ||
371 | static int srp_connect_target(struct srp_target_port *target) | |
372 | { | |
373 | int ret; | |
374 | ||
375 | ret = srp_lookup_path(target); | |
376 | if (ret) | |
377 | return ret; | |
378 | ||
379 | while (1) { | |
380 | init_completion(&target->done); | |
381 | ret = srp_send_req(target); | |
382 | if (ret) | |
383 | return ret; | |
384 | wait_for_completion(&target->done); | |
385 | ||
386 | /* | |
387 | * The CM event handling code will set status to | |
388 | * SRP_PORT_REDIRECT if we get a port redirect REJ | |
389 | * back, or SRP_DLID_REDIRECT if we get a lid/qp | |
390 | * redirect REJ back. | |
391 | */ | |
392 | switch (target->status) { | |
393 | case 0: | |
394 | return 0; | |
395 | ||
396 | case SRP_PORT_REDIRECT: | |
397 | ret = srp_lookup_path(target); | |
398 | if (ret) | |
399 | return ret; | |
400 | break; | |
401 | ||
402 | case SRP_DLID_REDIRECT: | |
403 | break; | |
404 | ||
405 | default: | |
406 | return target->status; | |
407 | } | |
408 | } | |
409 | } | |
410 | ||
411 | static int srp_reconnect_target(struct srp_target_port *target) | |
412 | { | |
413 | struct ib_cm_id *new_cm_id; | |
414 | struct ib_qp_attr qp_attr; | |
415 | struct srp_request *req; | |
416 | struct ib_wc wc; | |
417 | int ret; | |
418 | int i; | |
419 | ||
420 | spin_lock_irq(target->scsi_host->host_lock); | |
421 | if (target->state != SRP_TARGET_LIVE) { | |
422 | spin_unlock_irq(target->scsi_host->host_lock); | |
423 | return -EAGAIN; | |
424 | } | |
425 | target->state = SRP_TARGET_CONNECTING; | |
426 | spin_unlock_irq(target->scsi_host->host_lock); | |
427 | ||
428 | srp_disconnect_target(target); | |
429 | /* | |
430 | * Now get a new local CM ID so that we avoid confusing the | |
431 | * target in case things are really fouled up. | |
432 | */ | |
433 | new_cm_id = ib_create_cm_id(target->srp_host->dev, | |
434 | srp_cm_handler, target); | |
435 | if (IS_ERR(new_cm_id)) { | |
436 | ret = PTR_ERR(new_cm_id); | |
437 | goto err; | |
438 | } | |
439 | ib_destroy_cm_id(target->cm_id); | |
440 | target->cm_id = new_cm_id; | |
441 | ||
442 | qp_attr.qp_state = IB_QPS_RESET; | |
443 | ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); | |
444 | if (ret) | |
445 | goto err; | |
446 | ||
447 | ret = srp_init_qp(target, target->qp); | |
448 | if (ret) | |
449 | goto err; | |
450 | ||
451 | while (ib_poll_cq(target->cq, 1, &wc) > 0) | |
452 | ; /* nothing */ | |
453 | ||
454 | list_for_each_entry(req, &target->req_queue, list) { | |
455 | req->scmnd->result = DID_RESET << 16; | |
456 | req->scmnd->scsi_done(req->scmnd); | |
457 | } | |
458 | ||
459 | target->rx_head = 0; | |
460 | target->tx_head = 0; | |
461 | target->tx_tail = 0; | |
462 | target->req_head = 0; | |
463 | for (i = 0; i < SRP_SQ_SIZE - 1; ++i) | |
464 | target->req_ring[i].next = i + 1; | |
465 | target->req_ring[SRP_SQ_SIZE - 1].next = -1; | |
466 | INIT_LIST_HEAD(&target->req_queue); | |
467 | ||
468 | ret = srp_connect_target(target); | |
469 | if (ret) | |
470 | goto err; | |
471 | ||
472 | spin_lock_irq(target->scsi_host->host_lock); | |
473 | if (target->state == SRP_TARGET_CONNECTING) { | |
474 | ret = 0; | |
475 | target->state = SRP_TARGET_LIVE; | |
476 | } else | |
477 | ret = -EAGAIN; | |
478 | spin_unlock_irq(target->scsi_host->host_lock); | |
479 | ||
480 | return ret; | |
481 | ||
482 | err: | |
483 | printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret); | |
484 | ||
485 | /* | |
486 | * We couldn't reconnect, so kill our target port off. | |
487 | * However, we have to defer the real removal because we might | |
488 | * be in the context of the SCSI error handler now, which | |
489 | * would deadlock if we call scsi_remove_host(). | |
490 | */ | |
491 | spin_lock_irq(target->scsi_host->host_lock); | |
492 | if (target->state == SRP_TARGET_CONNECTING) { | |
493 | target->state = SRP_TARGET_DEAD; | |
494 | INIT_WORK(&target->work, srp_remove_work, target); | |
495 | schedule_work(&target->work); | |
496 | } | |
497 | spin_unlock_irq(target->scsi_host->host_lock); | |
498 | ||
499 | return ret; | |
500 | } | |
501 | ||
502 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |
503 | struct srp_request *req) | |
504 | { | |
505 | struct srp_cmd *cmd = req->cmd->buf; | |
506 | int len; | |
507 | u8 fmt; | |
508 | ||
509 | if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) | |
510 | return sizeof (struct srp_cmd); | |
511 | ||
512 | if (scmnd->sc_data_direction != DMA_FROM_DEVICE && | |
513 | scmnd->sc_data_direction != DMA_TO_DEVICE) { | |
514 | printk(KERN_WARNING PFX "Unhandled data direction %d\n", | |
515 | scmnd->sc_data_direction); | |
516 | return -EINVAL; | |
517 | } | |
518 | ||
519 | if (scmnd->use_sg) { | |
520 | struct scatterlist *scat = scmnd->request_buffer; | |
521 | int n; | |
522 | int i; | |
523 | ||
524 | n = dma_map_sg(target->srp_host->dev->dma_device, | |
525 | scat, scmnd->use_sg, scmnd->sc_data_direction); | |
526 | ||
527 | if (n == 1) { | |
528 | struct srp_direct_buf *buf = (void *) cmd->add_data; | |
529 | ||
530 | fmt = SRP_DATA_DESC_DIRECT; | |
531 | ||
532 | buf->va = cpu_to_be64(sg_dma_address(scat)); | |
533 | buf->key = cpu_to_be32(target->srp_host->mr->rkey); | |
534 | buf->len = cpu_to_be32(sg_dma_len(scat)); | |
535 | ||
536 | len = sizeof (struct srp_cmd) + | |
537 | sizeof (struct srp_direct_buf); | |
538 | } else { | |
539 | struct srp_indirect_buf *buf = (void *) cmd->add_data; | |
540 | u32 datalen = 0; | |
541 | ||
542 | fmt = SRP_DATA_DESC_INDIRECT; | |
543 | ||
544 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) | |
545 | cmd->data_out_desc_cnt = n; | |
546 | else | |
547 | cmd->data_in_desc_cnt = n; | |
548 | ||
549 | buf->table_desc.va = cpu_to_be64(req->cmd->dma + | |
550 | sizeof *cmd + | |
551 | sizeof *buf); | |
552 | buf->table_desc.key = | |
553 | cpu_to_be32(target->srp_host->mr->rkey); | |
554 | buf->table_desc.len = | |
555 | cpu_to_be32(n * sizeof (struct srp_direct_buf)); | |
556 | ||
557 | for (i = 0; i < n; ++i) { | |
558 | buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i])); | |
559 | buf->desc_list[i].key = | |
560 | cpu_to_be32(target->srp_host->mr->rkey); | |
561 | buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i])); | |
562 | ||
563 | datalen += sg_dma_len(&scat[i]); | |
564 | } | |
565 | ||
566 | buf->len = cpu_to_be32(datalen); | |
567 | ||
568 | len = sizeof (struct srp_cmd) + | |
569 | sizeof (struct srp_indirect_buf) + | |
570 | n * sizeof (struct srp_direct_buf); | |
571 | } | |
572 | } else { | |
573 | struct srp_direct_buf *buf = (void *) cmd->add_data; | |
574 | dma_addr_t dma; | |
575 | ||
576 | dma = dma_map_single(target->srp_host->dev->dma_device, | |
577 | scmnd->request_buffer, scmnd->request_bufflen, | |
578 | scmnd->sc_data_direction); | |
579 | if (dma_mapping_error(dma)) { | |
580 | printk(KERN_WARNING PFX "unable to map %p/%d (dir %d)\n", | |
581 | scmnd->request_buffer, (int) scmnd->request_bufflen, | |
582 | scmnd->sc_data_direction); | |
583 | return -EINVAL; | |
584 | } | |
585 | ||
586 | pci_unmap_addr_set(req, direct_mapping, dma); | |
587 | ||
588 | buf->va = cpu_to_be64(dma); | |
589 | buf->key = cpu_to_be32(target->srp_host->mr->rkey); | |
590 | buf->len = cpu_to_be32(scmnd->request_bufflen); | |
591 | ||
592 | fmt = SRP_DATA_DESC_DIRECT; | |
593 | ||
594 | len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); | |
595 | } | |
596 | ||
597 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) | |
598 | cmd->buf_fmt = fmt << 4; | |
599 | else | |
600 | cmd->buf_fmt = fmt; | |
601 | ||
602 | ||
603 | return len; | |
604 | } | |
605 | ||
606 | static void srp_unmap_data(struct scsi_cmnd *scmnd, | |
607 | struct srp_target_port *target, | |
608 | struct srp_request *req) | |
609 | { | |
610 | if (!scmnd->request_buffer || | |
611 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | |
612 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | |
613 | return; | |
614 | ||
615 | if (scmnd->use_sg) | |
616 | dma_unmap_sg(target->srp_host->dev->dma_device, | |
617 | (struct scatterlist *) scmnd->request_buffer, | |
618 | scmnd->use_sg, scmnd->sc_data_direction); | |
619 | else | |
620 | dma_unmap_single(target->srp_host->dev->dma_device, | |
621 | pci_unmap_addr(req, direct_mapping), | |
622 | scmnd->request_bufflen, | |
623 | scmnd->sc_data_direction); | |
624 | } | |
625 | ||
626 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |
627 | { | |
628 | struct srp_request *req; | |
629 | struct scsi_cmnd *scmnd; | |
630 | unsigned long flags; | |
631 | s32 delta; | |
632 | ||
633 | delta = (s32) be32_to_cpu(rsp->req_lim_delta); | |
634 | ||
635 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | |
636 | ||
637 | target->req_lim += delta; | |
638 | ||
639 | req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; | |
640 | ||
641 | if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { | |
642 | if (be32_to_cpu(rsp->resp_data_len) < 4) | |
643 | req->tsk_status = -1; | |
644 | else | |
645 | req->tsk_status = rsp->data[3]; | |
646 | complete(&req->done); | |
647 | } else { | |
648 | scmnd = req->scmnd; | |
649 | if (!scmnd) | |
650 | printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", | |
651 | (unsigned long long) rsp->tag); | |
652 | scmnd->result = rsp->status; | |
653 | ||
654 | if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { | |
655 | memcpy(scmnd->sense_buffer, rsp->data + | |
656 | be32_to_cpu(rsp->resp_data_len), | |
657 | min_t(int, be32_to_cpu(rsp->sense_data_len), | |
658 | SCSI_SENSE_BUFFERSIZE)); | |
659 | } | |
660 | ||
661 | if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) | |
662 | scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt); | |
663 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) | |
664 | scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); | |
665 | ||
666 | srp_unmap_data(scmnd, target, req); | |
667 | ||
668 | if (!req->tsk_mgmt) { | |
669 | req->scmnd = NULL; | |
670 | scmnd->host_scribble = (void *) -1L; | |
671 | scmnd->scsi_done(scmnd); | |
672 | ||
673 | list_del(&req->list); | |
674 | req->next = target->req_head; | |
675 | target->req_head = rsp->tag & ~SRP_TAG_TSK_MGMT; | |
676 | } else | |
677 | req->cmd_done = 1; | |
678 | } | |
679 | ||
680 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | |
681 | } | |
682 | ||
683 | static void srp_reconnect_work(void *target_ptr) | |
684 | { | |
685 | struct srp_target_port *target = target_ptr; | |
686 | ||
687 | srp_reconnect_target(target); | |
688 | } | |
689 | ||
690 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |
691 | { | |
692 | struct srp_iu *iu; | |
693 | u8 opcode; | |
694 | ||
695 | iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; | |
696 | ||
697 | dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, | |
698 | target->max_ti_iu_len, DMA_FROM_DEVICE); | |
699 | ||
700 | opcode = *(u8 *) iu->buf; | |
701 | ||
702 | if (0) { | |
703 | int i; | |
704 | ||
705 | printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode); | |
706 | ||
707 | for (i = 0; i < wc->byte_len; ++i) { | |
708 | if (i % 8 == 0) | |
709 | printk(KERN_ERR " [%02x] ", i); | |
710 | printk(" %02x", ((u8 *) iu->buf)[i]); | |
711 | if ((i + 1) % 8 == 0) | |
712 | printk("\n"); | |
713 | } | |
714 | ||
715 | if (wc->byte_len % 8) | |
716 | printk("\n"); | |
717 | } | |
718 | ||
719 | switch (opcode) { | |
720 | case SRP_RSP: | |
721 | srp_process_rsp(target, iu->buf); | |
722 | break; | |
723 | ||
724 | case SRP_T_LOGOUT: | |
725 | /* XXX Handle target logout */ | |
726 | printk(KERN_WARNING PFX "Got target logout request\n"); | |
727 | break; | |
728 | ||
729 | default: | |
730 | printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode); | |
731 | break; | |
732 | } | |
733 | ||
734 | dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, | |
735 | target->max_ti_iu_len, DMA_FROM_DEVICE); | |
736 | } | |
737 | ||
738 | static void srp_completion(struct ib_cq *cq, void *target_ptr) | |
739 | { | |
740 | struct srp_target_port *target = target_ptr; | |
741 | struct ib_wc wc; | |
742 | unsigned long flags; | |
743 | ||
744 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | |
745 | while (ib_poll_cq(cq, 1, &wc) > 0) { | |
746 | if (wc.status) { | |
747 | printk(KERN_ERR PFX "failed %s status %d\n", | |
748 | wc.wr_id & SRP_OP_RECV ? "receive" : "send", | |
749 | wc.status); | |
750 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | |
751 | if (target->state == SRP_TARGET_LIVE) | |
752 | schedule_work(&target->work); | |
753 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | |
754 | break; | |
755 | } | |
756 | ||
757 | if (wc.wr_id & SRP_OP_RECV) | |
758 | srp_handle_recv(target, &wc); | |
759 | else | |
760 | ++target->tx_tail; | |
761 | } | |
762 | } | |
763 | ||
764 | static int __srp_post_recv(struct srp_target_port *target) | |
765 | { | |
766 | struct srp_iu *iu; | |
767 | struct ib_sge list; | |
768 | struct ib_recv_wr wr, *bad_wr; | |
769 | unsigned int next; | |
770 | int ret; | |
771 | ||
772 | next = target->rx_head & (SRP_RQ_SIZE - 1); | |
773 | wr.wr_id = next | SRP_OP_RECV; | |
774 | iu = target->rx_ring[next]; | |
775 | ||
776 | list.addr = iu->dma; | |
777 | list.length = iu->size; | |
778 | list.lkey = target->srp_host->mr->lkey; | |
779 | ||
780 | wr.next = NULL; | |
781 | wr.sg_list = &list; | |
782 | wr.num_sge = 1; | |
783 | ||
784 | ret = ib_post_recv(target->qp, &wr, &bad_wr); | |
785 | if (!ret) | |
786 | ++target->rx_head; | |
787 | ||
788 | return ret; | |
789 | } | |
790 | ||
791 | static int srp_post_recv(struct srp_target_port *target) | |
792 | { | |
793 | unsigned long flags; | |
794 | int ret; | |
795 | ||
796 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | |
797 | ret = __srp_post_recv(target); | |
798 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | |
799 | ||
800 | return ret; | |
801 | } | |
802 | ||
803 | /* | |
804 | * Must be called with target->scsi_host->host_lock held to protect | |
805 | * req_lim and tx_head. | |
806 | */ | |
807 | static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target) | |
808 | { | |
809 | if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) | |
810 | return NULL; | |
811 | ||
812 | return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; | |
813 | } | |
814 | ||
815 | /* | |
816 | * Must be called with target->scsi_host->host_lock held to protect | |
817 | * req_lim and tx_head. | |
818 | */ | |
819 | static int __srp_post_send(struct srp_target_port *target, | |
820 | struct srp_iu *iu, int len) | |
821 | { | |
822 | struct ib_sge list; | |
823 | struct ib_send_wr wr, *bad_wr; | |
824 | int ret = 0; | |
825 | ||
826 | if (target->req_lim < 1) { | |
827 | printk(KERN_ERR PFX "Target has req_lim %d\n", target->req_lim); | |
828 | return -EAGAIN; | |
829 | } | |
830 | ||
831 | list.addr = iu->dma; | |
832 | list.length = len; | |
833 | list.lkey = target->srp_host->mr->lkey; | |
834 | ||
835 | wr.next = NULL; | |
836 | wr.wr_id = target->tx_head & SRP_SQ_SIZE; | |
837 | wr.sg_list = &list; | |
838 | wr.num_sge = 1; | |
839 | wr.opcode = IB_WR_SEND; | |
840 | wr.send_flags = IB_SEND_SIGNALED; | |
841 | ||
842 | ret = ib_post_send(target->qp, &wr, &bad_wr); | |
843 | ||
844 | if (!ret) { | |
845 | ++target->tx_head; | |
846 | --target->req_lim; | |
847 | } | |
848 | ||
849 | return ret; | |
850 | } | |
851 | ||
852 | static int srp_queuecommand(struct scsi_cmnd *scmnd, | |
853 | void (*done)(struct scsi_cmnd *)) | |
854 | { | |
855 | struct srp_target_port *target = host_to_target(scmnd->device->host); | |
856 | struct srp_request *req; | |
857 | struct srp_iu *iu; | |
858 | struct srp_cmd *cmd; | |
859 | long req_index; | |
860 | int len; | |
861 | ||
862 | if (target->state == SRP_TARGET_CONNECTING) | |
863 | goto err; | |
864 | ||
865 | if (target->state == SRP_TARGET_DEAD || | |
866 | target->state == SRP_TARGET_REMOVED) { | |
867 | scmnd->result = DID_BAD_TARGET << 16; | |
868 | done(scmnd); | |
869 | return 0; | |
870 | } | |
871 | ||
872 | iu = __srp_get_tx_iu(target); | |
873 | if (!iu) | |
874 | goto err; | |
875 | ||
876 | dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, | |
877 | SRP_MAX_IU_LEN, DMA_TO_DEVICE); | |
878 | ||
879 | req_index = target->req_head; | |
880 | ||
881 | scmnd->scsi_done = done; | |
882 | scmnd->result = 0; | |
883 | scmnd->host_scribble = (void *) req_index; | |
884 | ||
885 | cmd = iu->buf; | |
886 | memset(cmd, 0, sizeof *cmd); | |
887 | ||
888 | cmd->opcode = SRP_CMD; | |
889 | cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); | |
890 | cmd->tag = req_index; | |
891 | memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); | |
892 | ||
893 | req = &target->req_ring[req_index]; | |
894 | ||
895 | req->scmnd = scmnd; | |
896 | req->cmd = iu; | |
897 | req->cmd_done = 0; | |
898 | req->tsk_mgmt = NULL; | |
899 | ||
900 | len = srp_map_data(scmnd, target, req); | |
901 | if (len < 0) { | |
902 | printk(KERN_ERR PFX "Failed to map data\n"); | |
903 | goto err; | |
904 | } | |
905 | ||
906 | if (__srp_post_recv(target)) { | |
907 | printk(KERN_ERR PFX "Recv failed\n"); | |
908 | goto err_unmap; | |
909 | } | |
910 | ||
911 | dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, | |
912 | SRP_MAX_IU_LEN, DMA_TO_DEVICE); | |
913 | ||
914 | if (__srp_post_send(target, iu, len)) { | |
915 | printk(KERN_ERR PFX "Send failed\n"); | |
916 | goto err_unmap; | |
917 | } | |
918 | ||
919 | target->req_head = req->next; | |
920 | list_add_tail(&req->list, &target->req_queue); | |
921 | ||
922 | return 0; | |
923 | ||
924 | err_unmap: | |
925 | srp_unmap_data(scmnd, target, req); | |
926 | ||
927 | err: | |
928 | return SCSI_MLQUEUE_HOST_BUSY; | |
929 | } | |
930 | ||
931 | static int srp_alloc_iu_bufs(struct srp_target_port *target) | |
932 | { | |
933 | int i; | |
934 | ||
935 | for (i = 0; i < SRP_RQ_SIZE; ++i) { | |
936 | target->rx_ring[i] = srp_alloc_iu(target->srp_host, | |
937 | target->max_ti_iu_len, | |
938 | GFP_KERNEL, DMA_FROM_DEVICE); | |
939 | if (!target->rx_ring[i]) | |
940 | goto err; | |
941 | } | |
942 | ||
943 | for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { | |
944 | target->tx_ring[i] = srp_alloc_iu(target->srp_host, | |
945 | SRP_MAX_IU_LEN, | |
946 | GFP_KERNEL, DMA_TO_DEVICE); | |
947 | if (!target->tx_ring[i]) | |
948 | goto err; | |
949 | } | |
950 | ||
951 | return 0; | |
952 | ||
953 | err: | |
954 | for (i = 0; i < SRP_RQ_SIZE; ++i) { | |
955 | srp_free_iu(target->srp_host, target->rx_ring[i]); | |
956 | target->rx_ring[i] = NULL; | |
957 | } | |
958 | ||
959 | for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { | |
960 | srp_free_iu(target->srp_host, target->tx_ring[i]); | |
961 | target->tx_ring[i] = NULL; | |
962 | } | |
963 | ||
964 | return -ENOMEM; | |
965 | } | |
966 | ||
967 | static void srp_cm_rej_handler(struct ib_cm_id *cm_id, | |
968 | struct ib_cm_event *event, | |
969 | struct srp_target_port *target) | |
970 | { | |
971 | struct ib_class_port_info *cpi; | |
972 | int opcode; | |
973 | ||
974 | switch (event->param.rej_rcvd.reason) { | |
975 | case IB_CM_REJ_PORT_CM_REDIRECT: | |
976 | cpi = event->param.rej_rcvd.ari; | |
977 | target->path.dlid = cpi->redirect_lid; | |
978 | target->path.pkey = cpi->redirect_pkey; | |
979 | cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; | |
980 | memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); | |
981 | ||
982 | target->status = target->path.dlid ? | |
983 | SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; | |
984 | break; | |
985 | ||
986 | case IB_CM_REJ_PORT_REDIRECT: | |
987 | if (topspin_workarounds && | |
988 | !memcmp(&target->ioc_guid, topspin_oui, 3)) { | |
989 | /* | |
990 | * Topspin/Cisco SRP gateways incorrectly send | |
991 | * reject reason code 25 when they mean 24 | |
992 | * (port redirect). | |
993 | */ | |
994 | memcpy(target->path.dgid.raw, | |
995 | event->param.rej_rcvd.ari, 16); | |
996 | ||
997 | printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", | |
998 | (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), | |
999 | (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); | |
1000 | ||
1001 | target->status = SRP_PORT_REDIRECT; | |
1002 | } else { | |
1003 | printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); | |
1004 | target->status = -ECONNRESET; | |
1005 | } | |
1006 | break; | |
1007 | ||
1008 | case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: | |
1009 | printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); | |
1010 | target->status = -ECONNRESET; | |
1011 | break; | |
1012 | ||
1013 | case IB_CM_REJ_CONSUMER_DEFINED: | |
1014 | opcode = *(u8 *) event->private_data; | |
1015 | if (opcode == SRP_LOGIN_REJ) { | |
1016 | struct srp_login_rej *rej = event->private_data; | |
1017 | u32 reason = be32_to_cpu(rej->reason); | |
1018 | ||
1019 | if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) | |
1020 | printk(KERN_WARNING PFX | |
1021 | "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); | |
1022 | else | |
1023 | printk(KERN_WARNING PFX | |
1024 | "SRP LOGIN REJECTED, reason 0x%08x\n", reason); | |
1025 | } else | |
1026 | printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," | |
1027 | " opcode 0x%02x\n", opcode); | |
1028 | target->status = -ECONNRESET; | |
1029 | break; | |
1030 | ||
1031 | default: | |
1032 | printk(KERN_WARNING " REJ reason 0x%x\n", | |
1033 | event->param.rej_rcvd.reason); | |
1034 | target->status = -ECONNRESET; | |
1035 | } | |
1036 | } | |
1037 | ||
1038 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |
1039 | { | |
1040 | struct srp_target_port *target = cm_id->context; | |
1041 | struct ib_qp_attr *qp_attr = NULL; | |
1042 | int attr_mask = 0; | |
1043 | int comp = 0; | |
1044 | int opcode = 0; | |
1045 | ||
1046 | switch (event->event) { | |
1047 | case IB_CM_REQ_ERROR: | |
1048 | printk(KERN_DEBUG PFX "Sending CM REQ failed\n"); | |
1049 | comp = 1; | |
1050 | target->status = -ECONNRESET; | |
1051 | break; | |
1052 | ||
1053 | case IB_CM_REP_RECEIVED: | |
1054 | comp = 1; | |
1055 | opcode = *(u8 *) event->private_data; | |
1056 | ||
1057 | if (opcode == SRP_LOGIN_RSP) { | |
1058 | struct srp_login_rsp *rsp = event->private_data; | |
1059 | ||
1060 | target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); | |
1061 | target->req_lim = be32_to_cpu(rsp->req_lim_delta); | |
1062 | ||
1063 | target->scsi_host->can_queue = min(target->req_lim, | |
1064 | target->scsi_host->can_queue); | |
1065 | } else { | |
1066 | printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode); | |
1067 | target->status = -ECONNRESET; | |
1068 | break; | |
1069 | } | |
1070 | ||
1071 | target->status = srp_alloc_iu_bufs(target); | |
1072 | if (target->status) | |
1073 | break; | |
1074 | ||
1075 | qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); | |
1076 | if (!qp_attr) { | |
1077 | target->status = -ENOMEM; | |
1078 | break; | |
1079 | } | |
1080 | ||
1081 | qp_attr->qp_state = IB_QPS_RTR; | |
1082 | target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); | |
1083 | if (target->status) | |
1084 | break; | |
1085 | ||
1086 | target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); | |
1087 | if (target->status) | |
1088 | break; | |
1089 | ||
1090 | target->status = srp_post_recv(target); | |
1091 | if (target->status) | |
1092 | break; | |
1093 | ||
1094 | qp_attr->qp_state = IB_QPS_RTS; | |
1095 | target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); | |
1096 | if (target->status) | |
1097 | break; | |
1098 | ||
1099 | target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); | |
1100 | if (target->status) | |
1101 | break; | |
1102 | ||
1103 | target->status = ib_send_cm_rtu(cm_id, NULL, 0); | |
1104 | if (target->status) | |
1105 | break; | |
1106 | ||
1107 | break; | |
1108 | ||
1109 | case IB_CM_REJ_RECEIVED: | |
1110 | printk(KERN_DEBUG PFX "REJ received\n"); | |
1111 | comp = 1; | |
1112 | ||
1113 | srp_cm_rej_handler(cm_id, event, target); | |
1114 | break; | |
1115 | ||
1116 | case IB_CM_MRA_RECEIVED: | |
1117 | printk(KERN_ERR PFX "MRA received\n"); | |
1118 | break; | |
1119 | ||
1120 | case IB_CM_DREP_RECEIVED: | |
1121 | break; | |
1122 | ||
1123 | case IB_CM_TIMEWAIT_EXIT: | |
1124 | printk(KERN_ERR PFX "connection closed\n"); | |
1125 | ||
1126 | comp = 1; | |
1127 | target->status = 0; | |
1128 | break; | |
1129 | ||
1130 | default: | |
1131 | printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event); | |
1132 | break; | |
1133 | } | |
1134 | ||
1135 | if (comp) | |
1136 | complete(&target->done); | |
1137 | ||
1138 | kfree(qp_attr); | |
1139 | ||
1140 | return 0; | |
1141 | } | |
1142 | ||
1143 | static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) | |
1144 | { | |
1145 | struct srp_target_port *target = host_to_target(scmnd->device->host); | |
1146 | struct srp_request *req; | |
1147 | struct srp_iu *iu; | |
1148 | struct srp_tsk_mgmt *tsk_mgmt; | |
1149 | int req_index; | |
1150 | int ret = FAILED; | |
1151 | ||
1152 | spin_lock_irq(target->scsi_host->host_lock); | |
1153 | ||
1154 | if (scmnd->host_scribble == (void *) -1L) | |
1155 | goto out; | |
1156 | ||
1157 | req_index = (long) scmnd->host_scribble; | |
1158 | printk(KERN_ERR "Abort for req_index %d\n", req_index); | |
1159 | ||
1160 | req = &target->req_ring[req_index]; | |
1161 | init_completion(&req->done); | |
1162 | ||
1163 | iu = __srp_get_tx_iu(target); | |
1164 | if (!iu) | |
1165 | goto out; | |
1166 | ||
1167 | tsk_mgmt = iu->buf; | |
1168 | memset(tsk_mgmt, 0, sizeof *tsk_mgmt); | |
1169 | ||
1170 | tsk_mgmt->opcode = SRP_TSK_MGMT; | |
1171 | tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); | |
1172 | tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; | |
1173 | tsk_mgmt->tsk_mgmt_func = func; | |
1174 | tsk_mgmt->task_tag = req_index; | |
1175 | ||
1176 | if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) | |
1177 | goto out; | |
1178 | ||
1179 | req->tsk_mgmt = iu; | |
1180 | ||
1181 | spin_unlock_irq(target->scsi_host->host_lock); | |
1182 | if (!wait_for_completion_timeout(&req->done, | |
1183 | msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) | |
1184 | return FAILED; | |
1185 | spin_lock_irq(target->scsi_host->host_lock); | |
1186 | ||
1187 | if (req->cmd_done) { | |
1188 | list_del(&req->list); | |
1189 | req->next = target->req_head; | |
1190 | target->req_head = req_index; | |
1191 | ||
1192 | scmnd->scsi_done(scmnd); | |
1193 | } else if (!req->tsk_status) { | |
1194 | scmnd->result = DID_ABORT << 16; | |
1195 | ret = SUCCESS; | |
1196 | } | |
1197 | ||
1198 | out: | |
1199 | spin_unlock_irq(target->scsi_host->host_lock); | |
1200 | return ret; | |
1201 | } | |
1202 | ||
1203 | static int srp_abort(struct scsi_cmnd *scmnd) | |
1204 | { | |
1205 | printk(KERN_ERR "SRP abort called\n"); | |
1206 | ||
1207 | return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); | |
1208 | } | |
1209 | ||
1210 | static int srp_reset_device(struct scsi_cmnd *scmnd) | |
1211 | { | |
1212 | printk(KERN_ERR "SRP reset_device called\n"); | |
1213 | ||
1214 | return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); | |
1215 | } | |
1216 | ||
1217 | static int srp_reset_host(struct scsi_cmnd *scmnd) | |
1218 | { | |
1219 | struct srp_target_port *target = host_to_target(scmnd->device->host); | |
1220 | int ret = FAILED; | |
1221 | ||
1222 | printk(KERN_ERR PFX "SRP reset_host called\n"); | |
1223 | ||
1224 | if (!srp_reconnect_target(target)) | |
1225 | ret = SUCCESS; | |
1226 | ||
1227 | return ret; | |
1228 | } | |
1229 | ||
1230 | static struct scsi_host_template srp_template = { | |
1231 | .module = THIS_MODULE, | |
1232 | .name = DRV_NAME, | |
1233 | .info = srp_target_info, | |
1234 | .queuecommand = srp_queuecommand, | |
1235 | .eh_abort_handler = srp_abort, | |
1236 | .eh_device_reset_handler = srp_reset_device, | |
1237 | .eh_host_reset_handler = srp_reset_host, | |
1238 | .can_queue = SRP_SQ_SIZE, | |
1239 | .this_id = -1, | |
1240 | .sg_tablesize = SRP_MAX_INDIRECT, | |
1241 | .cmd_per_lun = SRP_SQ_SIZE, | |
1242 | .use_clustering = ENABLE_CLUSTERING | |
1243 | }; | |
1244 | ||
1245 | static int srp_add_target(struct srp_host *host, struct srp_target_port *target) | |
1246 | { | |
1247 | sprintf(target->target_name, "SRP.T10:%016llX", | |
1248 | (unsigned long long) be64_to_cpu(target->id_ext)); | |
1249 | ||
1250 | if (scsi_add_host(target->scsi_host, host->dev->dma_device)) | |
1251 | return -ENODEV; | |
1252 | ||
1253 | down(&host->target_mutex); | |
1254 | list_add_tail(&target->list, &host->target_list); | |
1255 | up(&host->target_mutex); | |
1256 | ||
1257 | target->state = SRP_TARGET_LIVE; | |
1258 | ||
1259 | /* XXX: are we supposed to have a definition of SCAN_WILD_CARD ?? */ | |
1260 | scsi_scan_target(&target->scsi_host->shost_gendev, | |
1261 | 0, target->scsi_id, ~0, 0); | |
1262 | ||
1263 | return 0; | |
1264 | } | |
1265 | ||
1266 | static void srp_release_class_dev(struct class_device *class_dev) | |
1267 | { | |
1268 | struct srp_host *host = | |
1269 | container_of(class_dev, struct srp_host, class_dev); | |
1270 | ||
1271 | complete(&host->released); | |
1272 | } | |
1273 | ||
1274 | static struct class srp_class = { | |
1275 | .name = "infiniband_srp", | |
1276 | .release = srp_release_class_dev | |
1277 | }; | |
1278 | ||
1279 | /* | |
1280 | * Target ports are added by writing | |
1281 | * | |
1282 | * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, | |
1283 | * pkey=<P_Key>,service_id=<service ID> | |
1284 | * | |
1285 | * to the add_target sysfs attribute. | |
1286 | */ | |
1287 | enum { | |
1288 | SRP_OPT_ERR = 0, | |
1289 | SRP_OPT_ID_EXT = 1 << 0, | |
1290 | SRP_OPT_IOC_GUID = 1 << 1, | |
1291 | SRP_OPT_DGID = 1 << 2, | |
1292 | SRP_OPT_PKEY = 1 << 3, | |
1293 | SRP_OPT_SERVICE_ID = 1 << 4, | |
1294 | SRP_OPT_MAX_SECT = 1 << 5, | |
1295 | SRP_OPT_ALL = (SRP_OPT_ID_EXT | | |
1296 | SRP_OPT_IOC_GUID | | |
1297 | SRP_OPT_DGID | | |
1298 | SRP_OPT_PKEY | | |
1299 | SRP_OPT_SERVICE_ID), | |
1300 | }; | |
1301 | ||
1302 | static match_table_t srp_opt_tokens = { | |
1303 | { SRP_OPT_ID_EXT, "id_ext=%s" }, | |
1304 | { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, | |
1305 | { SRP_OPT_DGID, "dgid=%s" }, | |
1306 | { SRP_OPT_PKEY, "pkey=%x" }, | |
1307 | { SRP_OPT_SERVICE_ID, "service_id=%s" }, | |
1308 | { SRP_OPT_MAX_SECT, "max_sect=%d" }, | |
1309 | { SRP_OPT_ERR, NULL } | |
1310 | }; | |
1311 | ||
1312 | static int srp_parse_options(const char *buf, struct srp_target_port *target) | |
1313 | { | |
1314 | char *options, *sep_opt; | |
1315 | char *p; | |
1316 | char dgid[3]; | |
1317 | substring_t args[MAX_OPT_ARGS]; | |
1318 | int opt_mask = 0; | |
1319 | int token; | |
1320 | int ret = -EINVAL; | |
1321 | int i; | |
1322 | ||
1323 | options = kstrdup(buf, GFP_KERNEL); | |
1324 | if (!options) | |
1325 | return -ENOMEM; | |
1326 | ||
1327 | sep_opt = options; | |
1328 | while ((p = strsep(&sep_opt, ",")) != NULL) { | |
1329 | if (!*p) | |
1330 | continue; | |
1331 | ||
1332 | token = match_token(p, srp_opt_tokens, args); | |
1333 | opt_mask |= token; | |
1334 | ||
1335 | switch (token) { | |
1336 | case SRP_OPT_ID_EXT: | |
1337 | p = match_strdup(args); | |
1338 | target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); | |
1339 | kfree(p); | |
1340 | break; | |
1341 | ||
1342 | case SRP_OPT_IOC_GUID: | |
1343 | p = match_strdup(args); | |
1344 | target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); | |
1345 | kfree(p); | |
1346 | break; | |
1347 | ||
1348 | case SRP_OPT_DGID: | |
1349 | p = match_strdup(args); | |
1350 | if (strlen(p) != 32) { | |
1351 | printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p); | |
1352 | goto out; | |
1353 | } | |
1354 | ||
1355 | for (i = 0; i < 16; ++i) { | |
1356 | strlcpy(dgid, p + i * 2, 3); | |
1357 | target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); | |
1358 | } | |
1359 | break; | |
1360 | ||
1361 | case SRP_OPT_PKEY: | |
1362 | if (match_hex(args, &token)) { | |
1363 | printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p); | |
1364 | goto out; | |
1365 | } | |
1366 | target->path.pkey = cpu_to_be16(token); | |
1367 | break; | |
1368 | ||
1369 | case SRP_OPT_SERVICE_ID: | |
1370 | p = match_strdup(args); | |
1371 | target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); | |
1372 | kfree(p); | |
1373 | break; | |
1374 | ||
1375 | case SRP_OPT_MAX_SECT: | |
1376 | if (match_int(args, &token)) { | |
1377 | printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p); | |
1378 | goto out; | |
1379 | } | |
1380 | target->scsi_host->max_sectors = token; | |
1381 | break; | |
1382 | ||
1383 | default: | |
1384 | printk(KERN_WARNING PFX "unknown parameter or missing value " | |
1385 | "'%s' in target creation request\n", p); | |
1386 | goto out; | |
1387 | } | |
1388 | } | |
1389 | ||
1390 | if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) | |
1391 | ret = 0; | |
1392 | else | |
1393 | for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) | |
1394 | if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && | |
1395 | !(srp_opt_tokens[i].token & opt_mask)) | |
1396 | printk(KERN_WARNING PFX "target creation request is " | |
1397 | "missing parameter '%s'\n", | |
1398 | srp_opt_tokens[i].pattern); | |
1399 | ||
1400 | out: | |
1401 | kfree(options); | |
1402 | return ret; | |
1403 | } | |
1404 | ||
1405 | static ssize_t srp_create_target(struct class_device *class_dev, | |
1406 | const char *buf, size_t count) | |
1407 | { | |
1408 | struct srp_host *host = | |
1409 | container_of(class_dev, struct srp_host, class_dev); | |
1410 | struct Scsi_Host *target_host; | |
1411 | struct srp_target_port *target; | |
1412 | int ret; | |
1413 | int i; | |
1414 | ||
1415 | target_host = scsi_host_alloc(&srp_template, | |
1416 | sizeof (struct srp_target_port)); | |
1417 | if (!target_host) | |
1418 | return -ENOMEM; | |
1419 | ||
1420 | target = host_to_target(target_host); | |
1421 | memset(target, 0, sizeof *target); | |
1422 | ||
1423 | target->scsi_host = target_host; | |
1424 | target->srp_host = host; | |
1425 | ||
1426 | INIT_WORK(&target->work, srp_reconnect_work, target); | |
1427 | ||
1428 | for (i = 0; i < SRP_SQ_SIZE - 1; ++i) | |
1429 | target->req_ring[i].next = i + 1; | |
1430 | target->req_ring[SRP_SQ_SIZE - 1].next = -1; | |
1431 | INIT_LIST_HEAD(&target->req_queue); | |
1432 | ||
1433 | ret = srp_parse_options(buf, target); | |
1434 | if (ret) | |
1435 | goto err; | |
1436 | ||
1437 | ib_get_cached_gid(host->dev, host->port, 0, &target->path.sgid); | |
1438 | ||
1439 | printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x " | |
1440 | "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", | |
1441 | (unsigned long long) be64_to_cpu(target->id_ext), | |
1442 | (unsigned long long) be64_to_cpu(target->ioc_guid), | |
1443 | be16_to_cpu(target->path.pkey), | |
1444 | (unsigned long long) be64_to_cpu(target->service_id), | |
1445 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]), | |
1446 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]), | |
1447 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]), | |
1448 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]), | |
1449 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]), | |
1450 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]), | |
1451 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]), | |
1452 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14])); | |
1453 | ||
1454 | ret = srp_create_target_ib(target); | |
1455 | if (ret) | |
1456 | goto err; | |
1457 | ||
1458 | target->cm_id = ib_create_cm_id(host->dev, srp_cm_handler, target); | |
1459 | if (IS_ERR(target->cm_id)) { | |
1460 | ret = PTR_ERR(target->cm_id); | |
1461 | goto err_free; | |
1462 | } | |
1463 | ||
1464 | ret = srp_connect_target(target); | |
1465 | if (ret) { | |
1466 | printk(KERN_ERR PFX "Connection failed\n"); | |
1467 | goto err_cm_id; | |
1468 | } | |
1469 | ||
1470 | ret = srp_add_target(host, target); | |
1471 | if (ret) | |
1472 | goto err_disconnect; | |
1473 | ||
1474 | return count; | |
1475 | ||
1476 | err_disconnect: | |
1477 | srp_disconnect_target(target); | |
1478 | ||
1479 | err_cm_id: | |
1480 | ib_destroy_cm_id(target->cm_id); | |
1481 | ||
1482 | err_free: | |
1483 | srp_free_target_ib(target); | |
1484 | ||
1485 | err: | |
1486 | scsi_host_put(target_host); | |
1487 | ||
1488 | return ret; | |
1489 | } | |
1490 | ||
1491 | static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); | |
1492 | ||
1493 | static ssize_t show_ibdev(struct class_device *class_dev, char *buf) | |
1494 | { | |
1495 | struct srp_host *host = | |
1496 | container_of(class_dev, struct srp_host, class_dev); | |
1497 | ||
1498 | return sprintf(buf, "%s\n", host->dev->name); | |
1499 | } | |
1500 | ||
1501 | static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); | |
1502 | ||
1503 | static ssize_t show_port(struct class_device *class_dev, char *buf) | |
1504 | { | |
1505 | struct srp_host *host = | |
1506 | container_of(class_dev, struct srp_host, class_dev); | |
1507 | ||
1508 | return sprintf(buf, "%d\n", host->port); | |
1509 | } | |
1510 | ||
1511 | static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL); | |
1512 | ||
1513 | static struct srp_host *srp_add_port(struct ib_device *device, | |
1514 | __be64 node_guid, u8 port) | |
1515 | { | |
1516 | struct srp_host *host; | |
1517 | ||
1518 | host = kzalloc(sizeof *host, GFP_KERNEL); | |
1519 | if (!host) | |
1520 | return NULL; | |
1521 | ||
1522 | INIT_LIST_HEAD(&host->target_list); | |
1523 | init_MUTEX(&host->target_mutex); | |
1524 | init_completion(&host->released); | |
1525 | host->dev = device; | |
1526 | host->port = port; | |
1527 | ||
1528 | host->initiator_port_id[7] = port; | |
1529 | memcpy(host->initiator_port_id + 8, &node_guid, 8); | |
1530 | ||
1531 | host->pd = ib_alloc_pd(device); | |
1532 | if (IS_ERR(host->pd)) | |
1533 | goto err_free; | |
1534 | ||
1535 | host->mr = ib_get_dma_mr(host->pd, | |
1536 | IB_ACCESS_LOCAL_WRITE | | |
1537 | IB_ACCESS_REMOTE_READ | | |
1538 | IB_ACCESS_REMOTE_WRITE); | |
1539 | if (IS_ERR(host->mr)) | |
1540 | goto err_pd; | |
1541 | ||
1542 | host->class_dev.class = &srp_class; | |
1543 | host->class_dev.dev = device->dma_device; | |
1544 | snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d", | |
1545 | device->name, port); | |
1546 | ||
1547 | if (class_device_register(&host->class_dev)) | |
1548 | goto err_mr; | |
1549 | if (class_device_create_file(&host->class_dev, &class_device_attr_add_target)) | |
1550 | goto err_class; | |
1551 | if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev)) | |
1552 | goto err_class; | |
1553 | if (class_device_create_file(&host->class_dev, &class_device_attr_port)) | |
1554 | goto err_class; | |
1555 | ||
1556 | return host; | |
1557 | ||
1558 | err_class: | |
1559 | class_device_unregister(&host->class_dev); | |
1560 | ||
1561 | err_mr: | |
1562 | ib_dereg_mr(host->mr); | |
1563 | ||
1564 | err_pd: | |
1565 | ib_dealloc_pd(host->pd); | |
1566 | ||
1567 | err_free: | |
1568 | kfree(host); | |
1569 | ||
1570 | return NULL; | |
1571 | } | |
1572 | ||
1573 | static void srp_add_one(struct ib_device *device) | |
1574 | { | |
1575 | struct list_head *dev_list; | |
1576 | struct srp_host *host; | |
1577 | struct ib_device_attr *dev_attr; | |
1578 | int s, e, p; | |
1579 | ||
1580 | dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); | |
1581 | if (!dev_attr) | |
1582 | return; | |
1583 | ||
1584 | if (ib_query_device(device, dev_attr)) { | |
1585 | printk(KERN_WARNING PFX "Couldn't query node GUID for %s.\n", | |
1586 | device->name); | |
1587 | goto out; | |
1588 | } | |
1589 | ||
1590 | dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); | |
1591 | if (!dev_list) | |
1592 | goto out; | |
1593 | ||
1594 | INIT_LIST_HEAD(dev_list); | |
1595 | ||
1596 | if (device->node_type == IB_NODE_SWITCH) { | |
1597 | s = 0; | |
1598 | e = 0; | |
1599 | } else { | |
1600 | s = 1; | |
1601 | e = device->phys_port_cnt; | |
1602 | } | |
1603 | ||
1604 | for (p = s; p <= e; ++p) { | |
1605 | host = srp_add_port(device, dev_attr->node_guid, p); | |
1606 | if (host) | |
1607 | list_add_tail(&host->list, dev_list); | |
1608 | } | |
1609 | ||
1610 | ib_set_client_data(device, &srp_client, dev_list); | |
1611 | ||
1612 | out: | |
1613 | kfree(dev_attr); | |
1614 | } | |
1615 | ||
1616 | static void srp_remove_one(struct ib_device *device) | |
1617 | { | |
1618 | struct list_head *dev_list; | |
1619 | struct srp_host *host, *tmp_host; | |
1620 | LIST_HEAD(target_list); | |
1621 | struct srp_target_port *target, *tmp_target; | |
1622 | unsigned long flags; | |
1623 | ||
1624 | dev_list = ib_get_client_data(device, &srp_client); | |
1625 | ||
1626 | list_for_each_entry_safe(host, tmp_host, dev_list, list) { | |
1627 | class_device_unregister(&host->class_dev); | |
1628 | /* | |
1629 | * Wait for the sysfs entry to go away, so that no new | |
1630 | * target ports can be created. | |
1631 | */ | |
1632 | wait_for_completion(&host->released); | |
1633 | ||
1634 | /* | |
1635 | * Mark all target ports as removed, so we stop queueing | |
1636 | * commands and don't try to reconnect. | |
1637 | */ | |
1638 | down(&host->target_mutex); | |
1639 | list_for_each_entry_safe(target, tmp_target, | |
1640 | &host->target_list, list) { | |
1641 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | |
1642 | if (target->state != SRP_TARGET_REMOVED) | |
1643 | target->state = SRP_TARGET_REMOVED; | |
1644 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | |
1645 | } | |
1646 | up(&host->target_mutex); | |
1647 | ||
1648 | /* | |
1649 | * Wait for any reconnection tasks that may have | |
1650 | * started before we marked our target ports as | |
1651 | * removed, and any target port removal tasks. | |
1652 | */ | |
1653 | flush_scheduled_work(); | |
1654 | ||
1655 | list_for_each_entry_safe(target, tmp_target, | |
1656 | &host->target_list, list) { | |
1657 | scsi_remove_host(target->scsi_host); | |
1658 | srp_disconnect_target(target); | |
1659 | ib_destroy_cm_id(target->cm_id); | |
1660 | srp_free_target_ib(target); | |
1661 | scsi_host_put(target->scsi_host); | |
1662 | } | |
1663 | ||
1664 | ib_dereg_mr(host->mr); | |
1665 | ib_dealloc_pd(host->pd); | |
1666 | kfree(host); | |
1667 | } | |
1668 | ||
1669 | kfree(dev_list); | |
1670 | } | |
1671 | ||
1672 | static int __init srp_init_module(void) | |
1673 | { | |
1674 | int ret; | |
1675 | ||
1676 | ret = class_register(&srp_class); | |
1677 | if (ret) { | |
1678 | printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); | |
1679 | return ret; | |
1680 | } | |
1681 | ||
1682 | ret = ib_register_client(&srp_client); | |
1683 | if (ret) { | |
1684 | printk(KERN_ERR PFX "couldn't register IB client\n"); | |
1685 | class_unregister(&srp_class); | |
1686 | return ret; | |
1687 | } | |
1688 | ||
1689 | return 0; | |
1690 | } | |
1691 | ||
1692 | static void __exit srp_cleanup_module(void) | |
1693 | { | |
1694 | ib_unregister_client(&srp_client); | |
1695 | class_unregister(&srp_class); | |
1696 | } | |
1697 | ||
1698 | module_init(srp_init_module); | |
1699 | module_exit(srp_cleanup_module); |