f6646d71633dcb8cd5838e42637d965a4f23d8cf
[linux-2.6-block.git] / drivers / scsi / ibmvscsi / ibmvfc.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4  *
5  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) IBM Corporation, 2008
8  */
9
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kthread.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/pm.h>
21 #include <linux/stringify.h>
22 #include <linux/bsg-lib.h>
23 #include <asm/firmware.h>
24 #include <asm/irq.h>
25 #include <asm/vio.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_transport_fc.h>
32 #include <scsi/scsi_bsg_fc.h>
33 #include "ibmvfc.h"
34
35 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
36 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
37 static u64 max_lun = IBMVFC_MAX_LUN;
38 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
39 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
40 static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
41 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
42 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
43 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
44 static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
45 static unsigned int mq_enabled = IBMVFC_MQ;
46 static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
47 static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
48 static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
49 static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
50
51 static LIST_HEAD(ibmvfc_head);
52 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
53 static struct scsi_transport_template *ibmvfc_transport_template;
54
55 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
56 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
59
60 module_param_named(mq, mq_enabled, uint, S_IRUGO);
61 MODULE_PARM_DESC(mq, "Enable multiqueue support. "
62                  "[Default=" __stringify(IBMVFC_MQ) "]");
63 module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
64 MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
65                  "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
66 module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
67 MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
68                  "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
69 module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
70 MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
71                  "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
72 module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
73 MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
74                  "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
75
76 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
77 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
78                  "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
79 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(default_timeout,
81                  "Default timeout in seconds for initialization and EH commands. "
82                  "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
83 module_param_named(max_requests, max_requests, uint, S_IRUGO);
84 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
85                  "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
86 module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
87 MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
88                  "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
89 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
90 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
91                  "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
92 module_param_named(max_targets, max_targets, uint, S_IRUGO);
93 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
94                  "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
95 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
96 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
97                  "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
98 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
99 MODULE_PARM_DESC(debug, "Enable driver debug information. "
100                  "[Default=" __stringify(IBMVFC_DEBUG) "]");
101 module_param_named(log_level, log_level, uint, 0);
102 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
103                  "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
104 module_param_named(cls3_error, cls3_error, uint, 0);
105 MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
106                  "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
107
108 static const struct {
109         u16 status;
110         u16 error;
111         u8 result;
112         u8 retry;
113         int log;
114         char *name;
115 } cmd_status [] = {
116         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
117         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
118         { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
119         { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
120         { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
121         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
122         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
123         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
124         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
125         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
126         { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
127         { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
128         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
129         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
130
131         { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
132         { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
133         { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
134         { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
135         { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
136         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
137         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
138         { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
139         { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
140         { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
141
142         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
143         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
144         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
145         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
146         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
147         { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
148         { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
149         { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
150         { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
151         { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
152         { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
153
154         { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
155         { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
156 };
157
158 static void ibmvfc_npiv_login(struct ibmvfc_host *);
159 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
160 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
161 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
162 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
163 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
164 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
165
166 static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
167 static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
168
169 static const char *unknown_error = "unknown error";
170
171 static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
172                           unsigned long length, unsigned long *cookie,
173                           unsigned long *irq)
174 {
175         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
176         long rc;
177
178         rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
179         *cookie = retbuf[0];
180         *irq = retbuf[1];
181
182         return rc;
183 }
184
185 static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
186 {
187         u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
188
189         return (host_caps & cap_flags) ? 1 : 0;
190 }
191
192 static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
193                                                    struct ibmvfc_cmd *vfc_cmd)
194 {
195         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
196                 return &vfc_cmd->v2.iu;
197         else
198                 return &vfc_cmd->v1.iu;
199 }
200
201 static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
202                                                  struct ibmvfc_cmd *vfc_cmd)
203 {
204         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
205                 return &vfc_cmd->v2.rsp;
206         else
207                 return &vfc_cmd->v1.rsp;
208 }
209
210 #ifdef CONFIG_SCSI_IBMVFC_TRACE
211 /**
212  * ibmvfc_trc_start - Log a start trace entry
213  * @evt:                ibmvfc event struct
214  *
215  **/
216 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
217 {
218         struct ibmvfc_host *vhost = evt->vhost;
219         struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
220         struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
221         struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
222         struct ibmvfc_trace_entry *entry;
223         int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
224
225         entry = &vhost->trace[index];
226         entry->evt = evt;
227         entry->time = jiffies;
228         entry->fmt = evt->crq.format;
229         entry->type = IBMVFC_TRC_START;
230
231         switch (entry->fmt) {
232         case IBMVFC_CMD_FORMAT:
233                 entry->op_code = iu->cdb[0];
234                 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
235                 entry->lun = scsilun_to_int(&iu->lun);
236                 entry->tmf_flags = iu->tmf_flags;
237                 entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
238                 break;
239         case IBMVFC_MAD_FORMAT:
240                 entry->op_code = be32_to_cpu(mad->opcode);
241                 break;
242         default:
243                 break;
244         }
245 }
246
247 /**
248  * ibmvfc_trc_end - Log an end trace entry
249  * @evt:                ibmvfc event struct
250  *
251  **/
252 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
253 {
254         struct ibmvfc_host *vhost = evt->vhost;
255         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
256         struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
257         struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
258         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
259         struct ibmvfc_trace_entry *entry;
260         int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
261
262         entry = &vhost->trace[index];
263         entry->evt = evt;
264         entry->time = jiffies;
265         entry->fmt = evt->crq.format;
266         entry->type = IBMVFC_TRC_END;
267
268         switch (entry->fmt) {
269         case IBMVFC_CMD_FORMAT:
270                 entry->op_code = iu->cdb[0];
271                 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
272                 entry->lun = scsilun_to_int(&iu->lun);
273                 entry->tmf_flags = iu->tmf_flags;
274                 entry->u.end.status = be16_to_cpu(vfc_cmd->status);
275                 entry->u.end.error = be16_to_cpu(vfc_cmd->error);
276                 entry->u.end.fcp_rsp_flags = rsp->flags;
277                 entry->u.end.rsp_code = rsp->data.info.rsp_code;
278                 entry->u.end.scsi_status = rsp->scsi_status;
279                 break;
280         case IBMVFC_MAD_FORMAT:
281                 entry->op_code = be32_to_cpu(mad->opcode);
282                 entry->u.end.status = be16_to_cpu(mad->status);
283                 break;
284         default:
285                 break;
286
287         }
288 }
289
290 #else
291 #define ibmvfc_trc_start(evt) do { } while (0)
292 #define ibmvfc_trc_end(evt) do { } while (0)
293 #endif
294
295 /**
296  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
297  * @status:             status / error class
298  * @error:              error
299  *
300  * Return value:
301  *      index into cmd_status / -EINVAL on failure
302  **/
303 static int ibmvfc_get_err_index(u16 status, u16 error)
304 {
305         int i;
306
307         for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
308                 if ((cmd_status[i].status & status) == cmd_status[i].status &&
309                     cmd_status[i].error == error)
310                         return i;
311
312         return -EINVAL;
313 }
314
315 /**
316  * ibmvfc_get_cmd_error - Find the error description for the fcp response
317  * @status:             status / error class
318  * @error:              error
319  *
320  * Return value:
321  *      error description string
322  **/
323 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
324 {
325         int rc = ibmvfc_get_err_index(status, error);
326         if (rc >= 0)
327                 return cmd_status[rc].name;
328         return unknown_error;
329 }
330
331 /**
332  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
333  * @vhost:      ibmvfc host struct
334  * @vfc_cmd:    ibmvfc command struct
335  *
336  * Return value:
337  *      SCSI result value to return for completed command
338  **/
339 static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
340 {
341         int err;
342         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
343         int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
344
345         if ((rsp->flags & FCP_RSP_LEN_VALID) &&
346             ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
347              rsp->data.info.rsp_code))
348                 return DID_ERROR << 16;
349
350         err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
351         if (err >= 0)
352                 return rsp->scsi_status | (cmd_status[err].result << 16);
353         return rsp->scsi_status | (DID_ERROR << 16);
354 }
355
356 /**
357  * ibmvfc_retry_cmd - Determine if error status is retryable
358  * @status:             status / error class
359  * @error:              error
360  *
361  * Return value:
362  *      1 if error should be retried / 0 if it should not
363  **/
364 static int ibmvfc_retry_cmd(u16 status, u16 error)
365 {
366         int rc = ibmvfc_get_err_index(status, error);
367
368         if (rc >= 0)
369                 return cmd_status[rc].retry;
370         return 1;
371 }
372
373 static const char *unknown_fc_explain = "unknown fc explain";
374
375 static const struct {
376         u16 fc_explain;
377         char *name;
378 } ls_explain [] = {
379         { 0x00, "no additional explanation" },
380         { 0x01, "service parameter error - options" },
381         { 0x03, "service parameter error - initiator control" },
382         { 0x05, "service parameter error - recipient control" },
383         { 0x07, "service parameter error - received data field size" },
384         { 0x09, "service parameter error - concurrent seq" },
385         { 0x0B, "service parameter error - credit" },
386         { 0x0D, "invalid N_Port/F_Port_Name" },
387         { 0x0E, "invalid node/Fabric Name" },
388         { 0x0F, "invalid common service parameters" },
389         { 0x11, "invalid association header" },
390         { 0x13, "association header required" },
391         { 0x15, "invalid originator S_ID" },
392         { 0x17, "invalid OX_ID-RX-ID combination" },
393         { 0x19, "command (request) already in progress" },
394         { 0x1E, "N_Port Login requested" },
395         { 0x1F, "Invalid N_Port_ID" },
396 };
397
398 static const struct {
399         u16 fc_explain;
400         char *name;
401 } gs_explain [] = {
402         { 0x00, "no additional explanation" },
403         { 0x01, "port identifier not registered" },
404         { 0x02, "port name not registered" },
405         { 0x03, "node name not registered" },
406         { 0x04, "class of service not registered" },
407         { 0x06, "initial process associator not registered" },
408         { 0x07, "FC-4 TYPEs not registered" },
409         { 0x08, "symbolic port name not registered" },
410         { 0x09, "symbolic node name not registered" },
411         { 0x0A, "port type not registered" },
412         { 0xF0, "authorization exception" },
413         { 0xF1, "authentication exception" },
414         { 0xF2, "data base full" },
415         { 0xF3, "data base empty" },
416         { 0xF4, "processing request" },
417         { 0xF5, "unable to verify connection" },
418         { 0xF6, "devices not in a common zone" },
419 };
420
421 /**
422  * ibmvfc_get_ls_explain - Return the FC Explain description text
423  * @status:     FC Explain status
424  *
425  * Returns:
426  *      error string
427  **/
428 static const char *ibmvfc_get_ls_explain(u16 status)
429 {
430         int i;
431
432         for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
433                 if (ls_explain[i].fc_explain == status)
434                         return ls_explain[i].name;
435
436         return unknown_fc_explain;
437 }
438
439 /**
440  * ibmvfc_get_gs_explain - Return the FC Explain description text
441  * @status:     FC Explain status
442  *
443  * Returns:
444  *      error string
445  **/
446 static const char *ibmvfc_get_gs_explain(u16 status)
447 {
448         int i;
449
450         for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
451                 if (gs_explain[i].fc_explain == status)
452                         return gs_explain[i].name;
453
454         return unknown_fc_explain;
455 }
456
457 static const struct {
458         enum ibmvfc_fc_type fc_type;
459         char *name;
460 } fc_type [] = {
461         { IBMVFC_FABRIC_REJECT, "fabric reject" },
462         { IBMVFC_PORT_REJECT, "port reject" },
463         { IBMVFC_LS_REJECT, "ELS reject" },
464         { IBMVFC_FABRIC_BUSY, "fabric busy" },
465         { IBMVFC_PORT_BUSY, "port busy" },
466         { IBMVFC_BASIC_REJECT, "basic reject" },
467 };
468
469 static const char *unknown_fc_type = "unknown fc type";
470
471 /**
472  * ibmvfc_get_fc_type - Return the FC Type description text
473  * @status:     FC Type error status
474  *
475  * Returns:
476  *      error string
477  **/
478 static const char *ibmvfc_get_fc_type(u16 status)
479 {
480         int i;
481
482         for (i = 0; i < ARRAY_SIZE(fc_type); i++)
483                 if (fc_type[i].fc_type == status)
484                         return fc_type[i].name;
485
486         return unknown_fc_type;
487 }
488
489 /**
490  * ibmvfc_set_tgt_action - Set the next init action for the target
491  * @tgt:                ibmvfc target struct
492  * @action:             action to perform
493  *
494  * Returns:
495  *      0 if action changed / non-zero if not changed
496  **/
497 static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
498                                   enum ibmvfc_target_action action)
499 {
500         int rc = -EINVAL;
501
502         switch (tgt->action) {
503         case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
504                 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
505                     action == IBMVFC_TGT_ACTION_DEL_RPORT) {
506                         tgt->action = action;
507                         rc = 0;
508                 }
509                 break;
510         case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
511                 if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
512                     action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
513                         tgt->action = action;
514                         rc = 0;
515                 }
516                 break;
517         case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
518                 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
519                         tgt->action = action;
520                         rc = 0;
521                 }
522                 break;
523         case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
524                 if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
525                         tgt->action = action;
526                         rc = 0;
527                 }
528                 break;
529         case IBMVFC_TGT_ACTION_DEL_RPORT:
530                 if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
531                         tgt->action = action;
532                         rc = 0;
533                 }
534                 break;
535         case IBMVFC_TGT_ACTION_DELETED_RPORT:
536                 break;
537         default:
538                 tgt->action = action;
539                 rc = 0;
540                 break;
541         }
542
543         if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
544                 tgt->add_rport = 0;
545
546         return rc;
547 }
548
549 /**
550  * ibmvfc_set_host_state - Set the state for the host
551  * @vhost:              ibmvfc host struct
552  * @state:              state to set host to
553  *
554  * Returns:
555  *      0 if state changed / non-zero if not changed
556  **/
557 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
558                                   enum ibmvfc_host_state state)
559 {
560         int rc = 0;
561
562         switch (vhost->state) {
563         case IBMVFC_HOST_OFFLINE:
564                 rc = -EINVAL;
565                 break;
566         default:
567                 vhost->state = state;
568                 break;
569         }
570
571         return rc;
572 }
573
574 /**
575  * ibmvfc_set_host_action - Set the next init action for the host
576  * @vhost:              ibmvfc host struct
577  * @action:             action to perform
578  *
579  **/
580 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
581                                    enum ibmvfc_host_action action)
582 {
583         switch (action) {
584         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
585                 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
586                         vhost->action = action;
587                 break;
588         case IBMVFC_HOST_ACTION_LOGO_WAIT:
589                 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
590                         vhost->action = action;
591                 break;
592         case IBMVFC_HOST_ACTION_INIT_WAIT:
593                 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
594                         vhost->action = action;
595                 break;
596         case IBMVFC_HOST_ACTION_QUERY:
597                 switch (vhost->action) {
598                 case IBMVFC_HOST_ACTION_INIT_WAIT:
599                 case IBMVFC_HOST_ACTION_NONE:
600                 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
601                         vhost->action = action;
602                         break;
603                 default:
604                         break;
605                 }
606                 break;
607         case IBMVFC_HOST_ACTION_TGT_INIT:
608                 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
609                         vhost->action = action;
610                 break;
611         case IBMVFC_HOST_ACTION_REENABLE:
612         case IBMVFC_HOST_ACTION_RESET:
613                 vhost->action = action;
614                 break;
615         case IBMVFC_HOST_ACTION_INIT:
616         case IBMVFC_HOST_ACTION_TGT_DEL:
617         case IBMVFC_HOST_ACTION_LOGO:
618         case IBMVFC_HOST_ACTION_QUERY_TGTS:
619         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
620         case IBMVFC_HOST_ACTION_NONE:
621         default:
622                 switch (vhost->action) {
623                 case IBMVFC_HOST_ACTION_RESET:
624                 case IBMVFC_HOST_ACTION_REENABLE:
625                         break;
626                 default:
627                         vhost->action = action;
628                         break;
629                 }
630                 break;
631         }
632 }
633
634 /**
635  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
636  * @vhost:              ibmvfc host struct
637  *
638  * Return value:
639  *      nothing
640  **/
641 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
642 {
643         if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
644             vhost->state == IBMVFC_ACTIVE) {
645                 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
646                         scsi_block_requests(vhost->host);
647                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
648                 }
649         } else
650                 vhost->reinit = 1;
651
652         wake_up(&vhost->work_wait_q);
653 }
654
655 /**
656  * ibmvfc_del_tgt - Schedule cleanup and removal of the target
657  * @tgt:                ibmvfc target struct
658  **/
659 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
660 {
661         if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
662                 tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
663                 tgt->init_retries = 0;
664         }
665         wake_up(&tgt->vhost->work_wait_q);
666 }
667
668 /**
669  * ibmvfc_link_down - Handle a link down event from the adapter
670  * @vhost:      ibmvfc host struct
671  * @state:      ibmvfc host state to enter
672  *
673  **/
674 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
675                              enum ibmvfc_host_state state)
676 {
677         struct ibmvfc_target *tgt;
678
679         ENTER;
680         scsi_block_requests(vhost->host);
681         list_for_each_entry(tgt, &vhost->targets, queue)
682                 ibmvfc_del_tgt(tgt);
683         ibmvfc_set_host_state(vhost, state);
684         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
685         vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
686         wake_up(&vhost->work_wait_q);
687         LEAVE;
688 }
689
690 /**
691  * ibmvfc_init_host - Start host initialization
692  * @vhost:              ibmvfc host struct
693  *
694  * Return value:
695  *      nothing
696  **/
697 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
698 {
699         struct ibmvfc_target *tgt;
700
701         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
702                 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
703                         dev_err(vhost->dev,
704                                 "Host initialization retries exceeded. Taking adapter offline\n");
705                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
706                         return;
707                 }
708         }
709
710         if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
711                 memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
712                 vhost->async_crq.cur = 0;
713
714                 list_for_each_entry(tgt, &vhost->targets, queue) {
715                         if (vhost->client_migrated)
716                                 tgt->need_login = 1;
717                         else
718                                 ibmvfc_del_tgt(tgt);
719                 }
720
721                 scsi_block_requests(vhost->host);
722                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
723                 vhost->job_step = ibmvfc_npiv_login;
724                 wake_up(&vhost->work_wait_q);
725         }
726 }
727
728 /**
729  * ibmvfc_send_crq - Send a CRQ
730  * @vhost:      ibmvfc host struct
731  * @word1:      the first 64 bits of the data
732  * @word2:      the second 64 bits of the data
733  *
734  * Return value:
735  *      0 on success / other on failure
736  **/
737 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
738 {
739         struct vio_dev *vdev = to_vio_dev(vhost->dev);
740         return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
741 }
742
743 static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
744                                u64 word2, u64 word3, u64 word4)
745 {
746         struct vio_dev *vdev = to_vio_dev(vhost->dev);
747
748         return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
749                                   word1, word2, word3, word4);
750 }
751
752 /**
753  * ibmvfc_send_crq_init - Send a CRQ init message
754  * @vhost:      ibmvfc host struct
755  *
756  * Return value:
757  *      0 on success / other on failure
758  **/
759 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
760 {
761         ibmvfc_dbg(vhost, "Sending CRQ init\n");
762         return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
763 }
764
765 /**
766  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
767  * @vhost:      ibmvfc host struct
768  *
769  * Return value:
770  *      0 on success / other on failure
771  **/
772 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
773 {
774         ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
775         return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
776 }
777
778 /**
779  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
780  * @vhost:      ibmvfc host who owns the event pool
781  * @queue:      ibmvfc queue struct
782  *
783  * Returns zero on success.
784  **/
785 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
786                                   struct ibmvfc_queue *queue)
787 {
788         int i;
789         struct ibmvfc_event_pool *pool = &queue->evt_pool;
790
791         ENTER;
792         if (!queue->total_depth)
793                 return 0;
794
795         pool->size = queue->total_depth;
796         pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
797         if (!pool->events)
798                 return -ENOMEM;
799
800         pool->iu_storage = dma_alloc_coherent(vhost->dev,
801                                               pool->size * sizeof(*pool->iu_storage),
802                                               &pool->iu_token, 0);
803
804         if (!pool->iu_storage) {
805                 kfree(pool->events);
806                 return -ENOMEM;
807         }
808
809         INIT_LIST_HEAD(&queue->sent);
810         INIT_LIST_HEAD(&queue->free);
811         queue->evt_free = queue->evt_depth;
812         queue->reserved_free = queue->reserved_depth;
813         spin_lock_init(&queue->l_lock);
814
815         for (i = 0; i < pool->size; ++i) {
816                 struct ibmvfc_event *evt = &pool->events[i];
817
818                 /*
819                  * evt->active states
820                  *  1 = in flight
821                  *  0 = being completed
822                  * -1 = free/freed
823                  */
824                 atomic_set(&evt->active, -1);
825                 atomic_set(&evt->free, 1);
826                 evt->crq.valid = 0x80;
827                 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
828                 evt->xfer_iu = pool->iu_storage + i;
829                 evt->vhost = vhost;
830                 evt->queue = queue;
831                 evt->ext_list = NULL;
832                 list_add_tail(&evt->queue_list, &queue->free);
833         }
834
835         LEAVE;
836         return 0;
837 }
838
839 /**
840  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
841  * @vhost:      ibmvfc host who owns the event pool
842  * @queue:      ibmvfc queue struct
843  *
844  **/
845 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
846                                    struct ibmvfc_queue *queue)
847 {
848         int i;
849         struct ibmvfc_event_pool *pool = &queue->evt_pool;
850
851         ENTER;
852         for (i = 0; i < pool->size; ++i) {
853                 list_del(&pool->events[i].queue_list);
854                 BUG_ON(atomic_read(&pool->events[i].free) != 1);
855                 if (pool->events[i].ext_list)
856                         dma_pool_free(vhost->sg_pool,
857                                       pool->events[i].ext_list,
858                                       pool->events[i].ext_list_token);
859         }
860
861         kfree(pool->events);
862         dma_free_coherent(vhost->dev,
863                           pool->size * sizeof(*pool->iu_storage),
864                           pool->iu_storage, pool->iu_token);
865         LEAVE;
866 }
867
868 /**
869  * ibmvfc_free_queue - Deallocate queue
870  * @vhost:      ibmvfc host struct
871  * @queue:      ibmvfc queue struct
872  *
873  * Unmaps dma and deallocates page for messages
874  **/
875 static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
876                               struct ibmvfc_queue *queue)
877 {
878         struct device *dev = vhost->dev;
879
880         dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
881         free_page((unsigned long)queue->msgs.handle);
882         queue->msgs.handle = NULL;
883
884         ibmvfc_free_event_pool(vhost, queue);
885 }
886
887 /**
888  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
889  * @vhost:      ibmvfc host struct
890  *
891  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
892  * the crq with the hypervisor.
893  **/
894 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
895 {
896         long rc = 0;
897         struct vio_dev *vdev = to_vio_dev(vhost->dev);
898         struct ibmvfc_queue *crq = &vhost->crq;
899
900         ibmvfc_dbg(vhost, "Releasing CRQ\n");
901         free_irq(vdev->irq, vhost);
902         tasklet_kill(&vhost->tasklet);
903         do {
904                 if (rc)
905                         msleep(100);
906                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
907         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
908
909         vhost->state = IBMVFC_NO_CRQ;
910         vhost->logged_in = 0;
911
912         ibmvfc_free_queue(vhost, crq);
913 }
914
915 /**
916  * ibmvfc_reenable_crq_queue - reenables the CRQ
917  * @vhost:      ibmvfc host struct
918  *
919  * Return value:
920  *      0 on success / other on failure
921  **/
922 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
923 {
924         int rc = 0;
925         struct vio_dev *vdev = to_vio_dev(vhost->dev);
926         unsigned long flags;
927
928         ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
929
930         /* Re-enable the CRQ */
931         do {
932                 if (rc)
933                         msleep(100);
934                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
935         } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
936
937         if (rc)
938                 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
939
940         spin_lock_irqsave(vhost->host->host_lock, flags);
941         spin_lock(vhost->crq.q_lock);
942         vhost->do_enquiry = 1;
943         vhost->using_channels = 0;
944         spin_unlock(vhost->crq.q_lock);
945         spin_unlock_irqrestore(vhost->host->host_lock, flags);
946
947         ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
948
949         return rc;
950 }
951
952 /**
953  * ibmvfc_reset_crq - resets a crq after a failure
954  * @vhost:      ibmvfc host struct
955  *
956  * Return value:
957  *      0 on success / other on failure
958  **/
959 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
960 {
961         int rc = 0;
962         unsigned long flags;
963         struct vio_dev *vdev = to_vio_dev(vhost->dev);
964         struct ibmvfc_queue *crq = &vhost->crq;
965
966         ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
967
968         /* Close the CRQ */
969         do {
970                 if (rc)
971                         msleep(100);
972                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
973         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
974
975         spin_lock_irqsave(vhost->host->host_lock, flags);
976         spin_lock(vhost->crq.q_lock);
977         vhost->state = IBMVFC_NO_CRQ;
978         vhost->logged_in = 0;
979         vhost->do_enquiry = 1;
980         vhost->using_channels = 0;
981
982         /* Clean out the queue */
983         memset(crq->msgs.crq, 0, PAGE_SIZE);
984         crq->cur = 0;
985
986         /* And re-open it again */
987         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
988                                 crq->msg_token, PAGE_SIZE);
989
990         if (rc == H_CLOSED)
991                 /* Adapter is good, but other end is not ready */
992                 dev_warn(vhost->dev, "Partner adapter not ready\n");
993         else if (rc != 0)
994                 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
995
996         spin_unlock(vhost->crq.q_lock);
997         spin_unlock_irqrestore(vhost->host->host_lock, flags);
998
999         ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
1000
1001         return rc;
1002 }
1003
1004 /**
1005  * ibmvfc_valid_event - Determines if event is valid.
1006  * @pool:       event_pool that contains the event
1007  * @evt:        ibmvfc event to be checked for validity
1008  *
1009  * Return value:
1010  *      1 if event is valid / 0 if event is not valid
1011  **/
1012 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
1013                               struct ibmvfc_event *evt)
1014 {
1015         int index = evt - pool->events;
1016         if (index < 0 || index >= pool->size)   /* outside of bounds */
1017                 return 0;
1018         if (evt != pool->events + index)        /* unaligned */
1019                 return 0;
1020         return 1;
1021 }
1022
1023 /**
1024  * ibmvfc_free_event - Free the specified event
1025  * @evt:        ibmvfc_event to be freed
1026  *
1027  **/
1028 static void ibmvfc_free_event(struct ibmvfc_event *evt)
1029 {
1030         struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
1031         unsigned long flags;
1032
1033         BUG_ON(!ibmvfc_valid_event(pool, evt));
1034         BUG_ON(atomic_inc_return(&evt->free) != 1);
1035         BUG_ON(atomic_dec_and_test(&evt->active));
1036
1037         spin_lock_irqsave(&evt->queue->l_lock, flags);
1038         list_add_tail(&evt->queue_list, &evt->queue->free);
1039         if (evt->reserved) {
1040                 evt->reserved = 0;
1041                 evt->queue->reserved_free++;
1042         } else {
1043                 evt->queue->evt_free++;
1044         }
1045         if (evt->eh_comp)
1046                 complete(evt->eh_comp);
1047         spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1048 }
1049
1050 /**
1051  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
1052  * @evt:        ibmvfc event struct
1053  *
1054  * This function does not setup any error status, that must be done
1055  * before this function gets called.
1056  **/
1057 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
1058 {
1059         struct scsi_cmnd *cmnd = evt->cmnd;
1060
1061         if (cmnd) {
1062                 scsi_dma_unmap(cmnd);
1063                 scsi_done(cmnd);
1064         }
1065
1066         ibmvfc_free_event(evt);
1067 }
1068
1069 /**
1070  * ibmvfc_complete_purge - Complete failed command list
1071  * @purge_list:         list head of failed commands
1072  *
1073  * This function runs completions on commands to fail as a result of a
1074  * host reset or platform migration.
1075  **/
1076 static void ibmvfc_complete_purge(struct list_head *purge_list)
1077 {
1078         struct ibmvfc_event *evt, *pos;
1079
1080         list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
1081                 list_del(&evt->queue_list);
1082                 ibmvfc_trc_end(evt);
1083                 evt->done(evt);
1084         }
1085 }
1086
1087 /**
1088  * ibmvfc_fail_request - Fail request with specified error code
1089  * @evt:                ibmvfc event struct
1090  * @error_code: error code to fail request with
1091  *
1092  * Return value:
1093  *      none
1094  **/
1095 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
1096 {
1097         /*
1098          * Anything we are failing should still be active. Otherwise, it
1099          * implies we already got a response for the command and are doing
1100          * something bad like double completing it.
1101          */
1102         BUG_ON(!atomic_dec_and_test(&evt->active));
1103         if (evt->cmnd) {
1104                 evt->cmnd->result = (error_code << 16);
1105                 evt->done = ibmvfc_scsi_eh_done;
1106         } else
1107                 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
1108
1109         del_timer(&evt->timer);
1110 }
1111
1112 /**
1113  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
1114  * @vhost:              ibmvfc host struct
1115  * @error_code: error code to fail requests with
1116  *
1117  * Return value:
1118  *      none
1119  **/
1120 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
1121 {
1122         struct ibmvfc_event *evt, *pos;
1123         struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
1124         unsigned long flags;
1125         int hwqs = 0;
1126         int i;
1127
1128         if (vhost->using_channels)
1129                 hwqs = vhost->scsi_scrqs.active_queues;
1130
1131         ibmvfc_dbg(vhost, "Purging all requests\n");
1132         spin_lock_irqsave(&vhost->crq.l_lock, flags);
1133         list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
1134                 ibmvfc_fail_request(evt, error_code);
1135         list_splice_init(&vhost->crq.sent, &vhost->purge);
1136         spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
1137
1138         for (i = 0; i < hwqs; i++) {
1139                 spin_lock_irqsave(queues[i].q_lock, flags);
1140                 spin_lock(&queues[i].l_lock);
1141                 list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
1142                         ibmvfc_fail_request(evt, error_code);
1143                 list_splice_init(&queues[i].sent, &vhost->purge);
1144                 spin_unlock(&queues[i].l_lock);
1145                 spin_unlock_irqrestore(queues[i].q_lock, flags);
1146         }
1147 }
1148
1149 /**
1150  * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
1151  * @vhost:      struct ibmvfc host to reset
1152  **/
1153 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
1154 {
1155         ibmvfc_purge_requests(vhost, DID_ERROR);
1156         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
1157         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
1158 }
1159
1160 /**
1161  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
1162  * @vhost:      struct ibmvfc host to reset
1163  **/
1164 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
1165 {
1166         if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
1167             !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
1168                 scsi_block_requests(vhost->host);
1169                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
1170                 vhost->job_step = ibmvfc_npiv_logout;
1171                 wake_up(&vhost->work_wait_q);
1172         } else
1173                 ibmvfc_hard_reset_host(vhost);
1174 }
1175
1176 /**
1177  * ibmvfc_reset_host - Reset the connection to the server
1178  * @vhost:      ibmvfc host struct
1179  **/
1180 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
1181 {
1182         unsigned long flags;
1183
1184         spin_lock_irqsave(vhost->host->host_lock, flags);
1185         __ibmvfc_reset_host(vhost);
1186         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1187 }
1188
1189 /**
1190  * ibmvfc_retry_host_init - Retry host initialization if allowed
1191  * @vhost:      ibmvfc host struct
1192  *
1193  * Returns: 1 if init will be retried / 0 if not
1194  *
1195  **/
1196 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
1197 {
1198         int retry = 0;
1199
1200         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
1201                 vhost->delay_init = 1;
1202                 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
1203                         dev_err(vhost->dev,
1204                                 "Host initialization retries exceeded. Taking adapter offline\n");
1205                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
1206                 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
1207                         __ibmvfc_reset_host(vhost);
1208                 else {
1209                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
1210                         retry = 1;
1211                 }
1212         }
1213
1214         wake_up(&vhost->work_wait_q);
1215         return retry;
1216 }
1217
1218 /**
1219  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
1220  * @starget:    scsi target struct
1221  *
1222  * Return value:
1223  *      ibmvfc_target struct / NULL if not found
1224  **/
1225 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
1226 {
1227         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1228         struct ibmvfc_host *vhost = shost_priv(shost);
1229         struct ibmvfc_target *tgt;
1230
1231         list_for_each_entry(tgt, &vhost->targets, queue)
1232                 if (tgt->target_id == starget->id) {
1233                         kref_get(&tgt->kref);
1234                         return tgt;
1235                 }
1236         return NULL;
1237 }
1238
1239 /**
1240  * ibmvfc_get_target - Find the specified scsi_target
1241  * @starget:    scsi target struct
1242  *
1243  * Return value:
1244  *      ibmvfc_target struct / NULL if not found
1245  **/
1246 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1247 {
1248         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1249         struct ibmvfc_target *tgt;
1250         unsigned long flags;
1251
1252         spin_lock_irqsave(shost->host_lock, flags);
1253         tgt = __ibmvfc_get_target(starget);
1254         spin_unlock_irqrestore(shost->host_lock, flags);
1255         return tgt;
1256 }
1257
1258 /**
1259  * ibmvfc_get_host_speed - Get host port speed
1260  * @shost:              scsi host struct
1261  *
1262  * Return value:
1263  *      none
1264  **/
1265 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1266 {
1267         struct ibmvfc_host *vhost = shost_priv(shost);
1268         unsigned long flags;
1269
1270         spin_lock_irqsave(shost->host_lock, flags);
1271         if (vhost->state == IBMVFC_ACTIVE) {
1272                 switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1273                 case 1:
1274                         fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1275                         break;
1276                 case 2:
1277                         fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1278                         break;
1279                 case 4:
1280                         fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1281                         break;
1282                 case 8:
1283                         fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1284                         break;
1285                 case 10:
1286                         fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1287                         break;
1288                 case 16:
1289                         fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1290                         break;
1291                 default:
1292                         ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1293                                    be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1294                         fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1295                         break;
1296                 }
1297         } else
1298                 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1299         spin_unlock_irqrestore(shost->host_lock, flags);
1300 }
1301
1302 /**
1303  * ibmvfc_get_host_port_state - Get host port state
1304  * @shost:              scsi host struct
1305  *
1306  * Return value:
1307  *      none
1308  **/
1309 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1310 {
1311         struct ibmvfc_host *vhost = shost_priv(shost);
1312         unsigned long flags;
1313
1314         spin_lock_irqsave(shost->host_lock, flags);
1315         switch (vhost->state) {
1316         case IBMVFC_INITIALIZING:
1317         case IBMVFC_ACTIVE:
1318                 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1319                 break;
1320         case IBMVFC_LINK_DOWN:
1321                 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1322                 break;
1323         case IBMVFC_LINK_DEAD:
1324         case IBMVFC_HOST_OFFLINE:
1325                 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1326                 break;
1327         case IBMVFC_HALTED:
1328                 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1329                 break;
1330         case IBMVFC_NO_CRQ:
1331                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1332                 break;
1333         default:
1334                 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1335                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1336                 break;
1337         }
1338         spin_unlock_irqrestore(shost->host_lock, flags);
1339 }
1340
1341 /**
1342  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1343  * @rport:              rport struct
1344  * @timeout:    timeout value
1345  *
1346  * Return value:
1347  *      none
1348  **/
1349 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1350 {
1351         if (timeout)
1352                 rport->dev_loss_tmo = timeout;
1353         else
1354                 rport->dev_loss_tmo = 1;
1355 }
1356
1357 /**
1358  * ibmvfc_release_tgt - Free memory allocated for a target
1359  * @kref:               kref struct
1360  *
1361  **/
1362 static void ibmvfc_release_tgt(struct kref *kref)
1363 {
1364         struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1365         kfree(tgt);
1366 }
1367
1368 /**
1369  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1370  * @starget:    scsi target struct
1371  *
1372  * Return value:
1373  *      none
1374  **/
1375 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1376 {
1377         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1378         fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1379         if (tgt)
1380                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1381 }
1382
1383 /**
1384  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1385  * @starget:    scsi target struct
1386  *
1387  * Return value:
1388  *      none
1389  **/
1390 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1391 {
1392         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1393         fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1394         if (tgt)
1395                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1396 }
1397
1398 /**
1399  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1400  * @starget:    scsi target struct
1401  *
1402  * Return value:
1403  *      none
1404  **/
1405 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1406 {
1407         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1408         fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1409         if (tgt)
1410                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1411 }
1412
1413 /**
1414  * ibmvfc_wait_while_resetting - Wait while the host resets
1415  * @vhost:              ibmvfc host struct
1416  *
1417  * Return value:
1418  *      0 on success / other on failure
1419  **/
1420 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1421 {
1422         long timeout = wait_event_timeout(vhost->init_wait_q,
1423                                           ((vhost->state == IBMVFC_ACTIVE ||
1424                                             vhost->state == IBMVFC_HOST_OFFLINE ||
1425                                             vhost->state == IBMVFC_LINK_DEAD) &&
1426                                            vhost->action == IBMVFC_HOST_ACTION_NONE),
1427                                           (init_timeout * HZ));
1428
1429         return timeout ? 0 : -EIO;
1430 }
1431
1432 /**
1433  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1434  * @shost:              scsi host struct
1435  *
1436  * Return value:
1437  *      0 on success / other on failure
1438  **/
1439 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1440 {
1441         struct ibmvfc_host *vhost = shost_priv(shost);
1442
1443         dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1444         ibmvfc_reset_host(vhost);
1445         return ibmvfc_wait_while_resetting(vhost);
1446 }
1447
1448 /**
1449  * ibmvfc_gather_partition_info - Gather info about the LPAR
1450  * @vhost:      ibmvfc host struct
1451  *
1452  * Return value:
1453  *      none
1454  **/
1455 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1456 {
1457         struct device_node *rootdn;
1458         const char *name;
1459         const unsigned int *num;
1460
1461         rootdn = of_find_node_by_path("/");
1462         if (!rootdn)
1463                 return;
1464
1465         name = of_get_property(rootdn, "ibm,partition-name", NULL);
1466         if (name)
1467                 strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1468         num = of_get_property(rootdn, "ibm,partition-no", NULL);
1469         if (num)
1470                 vhost->partition_number = *num;
1471         of_node_put(rootdn);
1472 }
1473
1474 /**
1475  * ibmvfc_set_login_info - Setup info for NPIV login
1476  * @vhost:      ibmvfc host struct
1477  *
1478  * Return value:
1479  *      none
1480  **/
1481 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1482 {
1483         struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1484         struct ibmvfc_queue *async_crq = &vhost->async_crq;
1485         struct device_node *of_node = vhost->dev->of_node;
1486         const char *location;
1487         u16 max_cmds;
1488
1489         max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
1490         if (mq_enabled)
1491                 max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *
1492                         vhost->scsi_scrqs.desired_queues;
1493
1494         memset(login_info, 0, sizeof(*login_info));
1495
1496         login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1497         login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1498         login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1499         login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1500         login_info->partition_num = cpu_to_be32(vhost->partition_number);
1501         login_info->vfc_frame_version = cpu_to_be32(1);
1502         login_info->fcp_version = cpu_to_be16(3);
1503         login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1504         if (vhost->client_migrated)
1505                 login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1506
1507         login_info->max_cmds = cpu_to_be32(max_cmds);
1508         login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1509
1510         if (vhost->mq_enabled || vhost->using_channels)
1511                 login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
1512
1513         login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1514         login_info->async.len = cpu_to_be32(async_crq->size *
1515                                             sizeof(*async_crq->msgs.async));
1516         strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1517         strncpy(login_info->device_name,
1518                 dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1519
1520         location = of_get_property(of_node, "ibm,loc-code", NULL);
1521         location = location ? location : dev_name(vhost->dev);
1522         strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1523 }
1524
1525 /**
1526  * __ibmvfc_get_event - Gets the next free event in pool
1527  * @queue:      ibmvfc queue struct
1528  * @reserved:   event is for a reserved management command
1529  *
1530  * Returns a free event from the pool.
1531  **/
1532 static struct ibmvfc_event *__ibmvfc_get_event(struct ibmvfc_queue *queue, int reserved)
1533 {
1534         struct ibmvfc_event *evt = NULL;
1535         unsigned long flags;
1536
1537         spin_lock_irqsave(&queue->l_lock, flags);
1538         if (reserved && queue->reserved_free) {
1539                 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1540                 evt->reserved = 1;
1541                 queue->reserved_free--;
1542         } else if (queue->evt_free) {
1543                 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1544                 queue->evt_free--;
1545         } else {
1546                 goto out;
1547         }
1548
1549         atomic_set(&evt->free, 0);
1550         list_del(&evt->queue_list);
1551 out:
1552         spin_unlock_irqrestore(&queue->l_lock, flags);
1553         return evt;
1554 }
1555
1556 #define ibmvfc_get_event(queue) __ibmvfc_get_event(queue, 0)
1557 #define ibmvfc_get_reserved_event(queue) __ibmvfc_get_event(queue, 1)
1558
1559 /**
1560  * ibmvfc_locked_done - Calls evt completion with host_lock held
1561  * @evt:        ibmvfc evt to complete
1562  *
1563  * All non-scsi command completion callbacks have the expectation that the
1564  * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
1565  * MAD evt with the host_lock.
1566  **/
1567 static void ibmvfc_locked_done(struct ibmvfc_event *evt)
1568 {
1569         unsigned long flags;
1570
1571         spin_lock_irqsave(evt->vhost->host->host_lock, flags);
1572         evt->_done(evt);
1573         spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
1574 }
1575
1576 /**
1577  * ibmvfc_init_event - Initialize fields in an event struct that are always
1578  *                              required.
1579  * @evt:        The event
1580  * @done:       Routine to call when the event is responded to
1581  * @format:     SRP or MAD format
1582  **/
1583 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1584                               void (*done) (struct ibmvfc_event *), u8 format)
1585 {
1586         evt->cmnd = NULL;
1587         evt->sync_iu = NULL;
1588         evt->eh_comp = NULL;
1589         evt->crq.format = format;
1590         if (format == IBMVFC_CMD_FORMAT)
1591                 evt->done = done;
1592         else {
1593                 evt->_done = done;
1594                 evt->done = ibmvfc_locked_done;
1595         }
1596         evt->hwq = 0;
1597 }
1598
1599 /**
1600  * ibmvfc_map_sg_list - Initialize scatterlist
1601  * @scmd:       scsi command struct
1602  * @nseg:       number of scatterlist segments
1603  * @md: memory descriptor list to initialize
1604  **/
1605 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1606                                struct srp_direct_buf *md)
1607 {
1608         int i;
1609         struct scatterlist *sg;
1610
1611         scsi_for_each_sg(scmd, sg, nseg, i) {
1612                 md[i].va = cpu_to_be64(sg_dma_address(sg));
1613                 md[i].len = cpu_to_be32(sg_dma_len(sg));
1614                 md[i].key = 0;
1615         }
1616 }
1617
1618 /**
1619  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1620  * @scmd:               struct scsi_cmnd with the scatterlist
1621  * @evt:                ibmvfc event struct
1622  * @vfc_cmd:    vfc_cmd that contains the memory descriptor
1623  * @dev:                device for which to map dma memory
1624  *
1625  * Returns:
1626  *      0 on success / non-zero on failure
1627  **/
1628 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1629                               struct ibmvfc_event *evt,
1630                               struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1631 {
1632
1633         int sg_mapped;
1634         struct srp_direct_buf *data = &vfc_cmd->ioba;
1635         struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1636         struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1637
1638         if (cls3_error)
1639                 vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1640
1641         sg_mapped = scsi_dma_map(scmd);
1642         if (!sg_mapped) {
1643                 vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1644                 return 0;
1645         } else if (unlikely(sg_mapped < 0)) {
1646                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1647                         scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1648                 return sg_mapped;
1649         }
1650
1651         if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1652                 vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1653                 iu->add_cdb_len |= IBMVFC_WRDATA;
1654         } else {
1655                 vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1656                 iu->add_cdb_len |= IBMVFC_RDDATA;
1657         }
1658
1659         if (sg_mapped == 1) {
1660                 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1661                 return 0;
1662         }
1663
1664         vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1665
1666         if (!evt->ext_list) {
1667                 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1668                                                &evt->ext_list_token);
1669
1670                 if (!evt->ext_list) {
1671                         scsi_dma_unmap(scmd);
1672                         if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1673                                 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1674                         return -ENOMEM;
1675                 }
1676         }
1677
1678         ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1679
1680         data->va = cpu_to_be64(evt->ext_list_token);
1681         data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1682         data->key = 0;
1683         return 0;
1684 }
1685
1686 /**
1687  * ibmvfc_timeout - Internal command timeout handler
1688  * @t:  struct ibmvfc_event that timed out
1689  *
1690  * Called when an internally generated command times out
1691  **/
1692 static void ibmvfc_timeout(struct timer_list *t)
1693 {
1694         struct ibmvfc_event *evt = from_timer(evt, t, timer);
1695         struct ibmvfc_host *vhost = evt->vhost;
1696         dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1697         ibmvfc_reset_host(vhost);
1698 }
1699
1700 /**
1701  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1702  * @evt:                event to be sent
1703  * @vhost:              ibmvfc host struct
1704  * @timeout:    timeout in seconds - 0 means do not time command
1705  *
1706  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1707  **/
1708 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1709                              struct ibmvfc_host *vhost, unsigned long timeout)
1710 {
1711         __be64 *crq_as_u64 = (__be64 *) &evt->crq;
1712         unsigned long flags;
1713         int rc;
1714
1715         /* Copy the IU into the transfer area */
1716         *evt->xfer_iu = evt->iu;
1717         if (evt->crq.format == IBMVFC_CMD_FORMAT)
1718                 evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1719         else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1720                 evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1721         else
1722                 BUG();
1723
1724         timer_setup(&evt->timer, ibmvfc_timeout, 0);
1725
1726         if (timeout) {
1727                 evt->timer.expires = jiffies + (timeout * HZ);
1728                 add_timer(&evt->timer);
1729         }
1730
1731         spin_lock_irqsave(&evt->queue->l_lock, flags);
1732         list_add_tail(&evt->queue_list, &evt->queue->sent);
1733         atomic_set(&evt->active, 1);
1734
1735         mb();
1736
1737         if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
1738                 rc = ibmvfc_send_sub_crq(vhost,
1739                                          evt->queue->vios_cookie,
1740                                          be64_to_cpu(crq_as_u64[0]),
1741                                          be64_to_cpu(crq_as_u64[1]),
1742                                          0, 0);
1743         else
1744                 rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1745                                      be64_to_cpu(crq_as_u64[1]));
1746
1747         if (rc) {
1748                 atomic_set(&evt->active, 0);
1749                 list_del(&evt->queue_list);
1750                 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1751                 del_timer(&evt->timer);
1752
1753                 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1754                  * Firmware will send a CRQ with a transport event (0xFF) to
1755                  * tell this client what has happened to the transport. This
1756                  * will be handled in ibmvfc_handle_crq()
1757                  */
1758                 if (rc == H_CLOSED) {
1759                         if (printk_ratelimit())
1760                                 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1761                         if (evt->cmnd)
1762                                 scsi_dma_unmap(evt->cmnd);
1763                         ibmvfc_free_event(evt);
1764                         return SCSI_MLQUEUE_HOST_BUSY;
1765                 }
1766
1767                 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1768                 if (evt->cmnd) {
1769                         evt->cmnd->result = DID_ERROR << 16;
1770                         evt->done = ibmvfc_scsi_eh_done;
1771                 } else
1772                         evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1773
1774                 evt->done(evt);
1775         } else {
1776                 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1777                 ibmvfc_trc_start(evt);
1778         }
1779
1780         return 0;
1781 }
1782
1783 /**
1784  * ibmvfc_log_error - Log an error for the failed command if appropriate
1785  * @evt:        ibmvfc event to log
1786  *
1787  **/
1788 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1789 {
1790         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1791         struct ibmvfc_host *vhost = evt->vhost;
1792         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1793         struct scsi_cmnd *cmnd = evt->cmnd;
1794         const char *err = unknown_error;
1795         int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1796         int logerr = 0;
1797         int rsp_code = 0;
1798
1799         if (index >= 0) {
1800                 logerr = cmd_status[index].log;
1801                 err = cmd_status[index].name;
1802         }
1803
1804         if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1805                 return;
1806
1807         if (rsp->flags & FCP_RSP_LEN_VALID)
1808                 rsp_code = rsp->data.info.rsp_code;
1809
1810         scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1811                     "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1812                     cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1813                     rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1814 }
1815
1816 /**
1817  * ibmvfc_relogin - Log back into the specified device
1818  * @sdev:       scsi device struct
1819  *
1820  **/
1821 static void ibmvfc_relogin(struct scsi_device *sdev)
1822 {
1823         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1824         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1825         struct ibmvfc_target *tgt;
1826         unsigned long flags;
1827
1828         spin_lock_irqsave(vhost->host->host_lock, flags);
1829         list_for_each_entry(tgt, &vhost->targets, queue) {
1830                 if (rport == tgt->rport) {
1831                         ibmvfc_del_tgt(tgt);
1832                         break;
1833                 }
1834         }
1835
1836         ibmvfc_reinit_host(vhost);
1837         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1838 }
1839
1840 /**
1841  * ibmvfc_scsi_done - Handle responses from commands
1842  * @evt:        ibmvfc event to be handled
1843  *
1844  * Used as a callback when sending scsi cmds.
1845  **/
1846 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1847 {
1848         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1849         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1850         struct scsi_cmnd *cmnd = evt->cmnd;
1851         u32 rsp_len = 0;
1852         u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1853
1854         if (cmnd) {
1855                 if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1856                         scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1857                 else if (rsp->flags & FCP_RESID_UNDER)
1858                         scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1859                 else
1860                         scsi_set_resid(cmnd, 0);
1861
1862                 if (vfc_cmd->status) {
1863                         cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1864
1865                         if (rsp->flags & FCP_RSP_LEN_VALID)
1866                                 rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1867                         if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1868                                 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1869                         if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1870                                 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1871                         if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1872                             (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1873                                 ibmvfc_relogin(cmnd->device);
1874
1875                         if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1876                                 cmnd->result = (DID_ERROR << 16);
1877
1878                         ibmvfc_log_error(evt);
1879                 }
1880
1881                 if (!cmnd->result &&
1882                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1883                         cmnd->result = (DID_ERROR << 16);
1884
1885                 scsi_dma_unmap(cmnd);
1886                 scsi_done(cmnd);
1887         }
1888
1889         ibmvfc_free_event(evt);
1890 }
1891
1892 /**
1893  * ibmvfc_host_chkready - Check if the host can accept commands
1894  * @vhost:       struct ibmvfc host
1895  *
1896  * Returns:
1897  *      1 if host can accept command / 0 if not
1898  **/
1899 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1900 {
1901         int result = 0;
1902
1903         switch (vhost->state) {
1904         case IBMVFC_LINK_DEAD:
1905         case IBMVFC_HOST_OFFLINE:
1906                 result = DID_NO_CONNECT << 16;
1907                 break;
1908         case IBMVFC_NO_CRQ:
1909         case IBMVFC_INITIALIZING:
1910         case IBMVFC_HALTED:
1911         case IBMVFC_LINK_DOWN:
1912                 result = DID_REQUEUE << 16;
1913                 break;
1914         case IBMVFC_ACTIVE:
1915                 result = 0;
1916                 break;
1917         }
1918
1919         return result;
1920 }
1921
1922 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1923 {
1924         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1925         struct ibmvfc_host *vhost = evt->vhost;
1926         struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1927         struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1928         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1929         size_t offset;
1930
1931         memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1932         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1933                 offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1934                 vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1935         } else
1936                 offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1937         vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1938         vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1939         vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1940         vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1941         vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1942         vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1943         vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1944         int_to_scsilun(sdev->lun, &iu->lun);
1945
1946         return vfc_cmd;
1947 }
1948
1949 /**
1950  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1951  * @shost:      scsi host struct
1952  * @cmnd:       struct scsi_cmnd to be executed
1953  *
1954  * Returns:
1955  *      0 on success / other on failure
1956  **/
1957 static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
1958 {
1959         struct ibmvfc_host *vhost = shost_priv(shost);
1960         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1961         struct ibmvfc_cmd *vfc_cmd;
1962         struct ibmvfc_fcp_cmd_iu *iu;
1963         struct ibmvfc_event *evt;
1964         u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
1965         u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
1966         u16 scsi_channel;
1967         int rc;
1968
1969         if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1970             unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1971                 cmnd->result = rc;
1972                 scsi_done(cmnd);
1973                 return 0;
1974         }
1975
1976         cmnd->result = (DID_OK << 16);
1977         if (vhost->using_channels) {
1978                 scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
1979                 evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
1980                 if (!evt)
1981                         return SCSI_MLQUEUE_HOST_BUSY;
1982
1983                 evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
1984         } else {
1985                 evt = ibmvfc_get_event(&vhost->crq);
1986                 if (!evt)
1987                         return SCSI_MLQUEUE_HOST_BUSY;
1988         }
1989
1990         ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1991         evt->cmnd = cmnd;
1992
1993         vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
1994         iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1995
1996         iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1997         memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
1998
1999         if (cmnd->flags & SCMD_TAGGED) {
2000                 vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
2001                 iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
2002         }
2003
2004         vfc_cmd->correlation = cpu_to_be64((u64)evt);
2005
2006         if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
2007                 return ibmvfc_send_event(evt, vhost, 0);
2008
2009         ibmvfc_free_event(evt);
2010         if (rc == -ENOMEM)
2011                 return SCSI_MLQUEUE_HOST_BUSY;
2012
2013         if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2014                 scmd_printk(KERN_ERR, cmnd,
2015                             "Failed to map DMA buffer for command. rc=%d\n", rc);
2016
2017         cmnd->result = DID_ERROR << 16;
2018         scsi_done(cmnd);
2019         return 0;
2020 }
2021
2022 /**
2023  * ibmvfc_sync_completion - Signal that a synchronous command has completed
2024  * @evt:        ibmvfc event struct
2025  *
2026  **/
2027 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
2028 {
2029         /* copy the response back */
2030         if (evt->sync_iu)
2031                 *evt->sync_iu = *evt->xfer_iu;
2032
2033         complete(&evt->comp);
2034 }
2035
2036 /**
2037  * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
2038  * @evt:        struct ibmvfc_event
2039  *
2040  **/
2041 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
2042 {
2043         struct ibmvfc_host *vhost = evt->vhost;
2044
2045         ibmvfc_free_event(evt);
2046         vhost->aborting_passthru = 0;
2047         dev_info(vhost->dev, "Passthru command cancelled\n");
2048 }
2049
2050 /**
2051  * ibmvfc_bsg_timeout - Handle a BSG timeout
2052  * @job:        struct bsg_job that timed out
2053  *
2054  * Returns:
2055  *      0 on success / other on failure
2056  **/
2057 static int ibmvfc_bsg_timeout(struct bsg_job *job)
2058 {
2059         struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2060         unsigned long port_id = (unsigned long)job->dd_data;
2061         struct ibmvfc_event *evt;
2062         struct ibmvfc_tmf *tmf;
2063         unsigned long flags;
2064         int rc;
2065
2066         ENTER;
2067         spin_lock_irqsave(vhost->host->host_lock, flags);
2068         if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
2069                 __ibmvfc_reset_host(vhost);
2070                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2071                 return 0;
2072         }
2073
2074         vhost->aborting_passthru = 1;
2075         evt = ibmvfc_get_reserved_event(&vhost->crq);
2076         if (!evt) {
2077                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2078                 return -ENOMEM;
2079         }
2080
2081         ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
2082
2083         tmf = &evt->iu.tmf;
2084         memset(tmf, 0, sizeof(*tmf));
2085         tmf->common.version = cpu_to_be32(1);
2086         tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2087         tmf->common.length = cpu_to_be16(sizeof(*tmf));
2088         tmf->scsi_id = cpu_to_be64(port_id);
2089         tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2090         tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
2091         rc = ibmvfc_send_event(evt, vhost, default_timeout);
2092
2093         if (rc != 0) {
2094                 vhost->aborting_passthru = 0;
2095                 dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
2096                 rc = -EIO;
2097         } else
2098                 dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
2099                          port_id);
2100
2101         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2102
2103         LEAVE;
2104         return rc;
2105 }
2106
2107 /**
2108  * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
2109  * @vhost:              struct ibmvfc_host to send command
2110  * @port_id:    port ID to send command
2111  *
2112  * Returns:
2113  *      0 on success / other on failure
2114  **/
2115 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
2116 {
2117         struct ibmvfc_port_login *plogi;
2118         struct ibmvfc_target *tgt;
2119         struct ibmvfc_event *evt;
2120         union ibmvfc_iu rsp_iu;
2121         unsigned long flags;
2122         int rc = 0, issue_login = 1;
2123
2124         ENTER;
2125         spin_lock_irqsave(vhost->host->host_lock, flags);
2126         list_for_each_entry(tgt, &vhost->targets, queue) {
2127                 if (tgt->scsi_id == port_id) {
2128                         issue_login = 0;
2129                         break;
2130                 }
2131         }
2132
2133         if (!issue_login)
2134                 goto unlock_out;
2135         if (unlikely((rc = ibmvfc_host_chkready(vhost))))
2136                 goto unlock_out;
2137
2138         evt = ibmvfc_get_reserved_event(&vhost->crq);
2139         if (!evt) {
2140                 rc = -ENOMEM;
2141                 goto unlock_out;
2142         }
2143         ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2144         plogi = &evt->iu.plogi;
2145         memset(plogi, 0, sizeof(*plogi));
2146         plogi->common.version = cpu_to_be32(1);
2147         plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
2148         plogi->common.length = cpu_to_be16(sizeof(*plogi));
2149         plogi->scsi_id = cpu_to_be64(port_id);
2150         evt->sync_iu = &rsp_iu;
2151         init_completion(&evt->comp);
2152
2153         rc = ibmvfc_send_event(evt, vhost, default_timeout);
2154         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2155
2156         if (rc)
2157                 return -EIO;
2158
2159         wait_for_completion(&evt->comp);
2160
2161         if (rsp_iu.plogi.common.status)
2162                 rc = -EIO;
2163
2164         spin_lock_irqsave(vhost->host->host_lock, flags);
2165         ibmvfc_free_event(evt);
2166 unlock_out:
2167         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2168         LEAVE;
2169         return rc;
2170 }
2171
2172 /**
2173  * ibmvfc_bsg_request - Handle a BSG request
2174  * @job:        struct bsg_job to be executed
2175  *
2176  * Returns:
2177  *      0 on success / other on failure
2178  **/
2179 static int ibmvfc_bsg_request(struct bsg_job *job)
2180 {
2181         struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2182         struct fc_rport *rport = fc_bsg_to_rport(job);
2183         struct ibmvfc_passthru_mad *mad;
2184         struct ibmvfc_event *evt;
2185         union ibmvfc_iu rsp_iu;
2186         unsigned long flags, port_id = -1;
2187         struct fc_bsg_request *bsg_request = job->request;
2188         struct fc_bsg_reply *bsg_reply = job->reply;
2189         unsigned int code = bsg_request->msgcode;
2190         int rc = 0, req_seg, rsp_seg, issue_login = 0;
2191         u32 fc_flags, rsp_len;
2192
2193         ENTER;
2194         bsg_reply->reply_payload_rcv_len = 0;
2195         if (rport)
2196                 port_id = rport->port_id;
2197
2198         switch (code) {
2199         case FC_BSG_HST_ELS_NOLOGIN:
2200                 port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
2201                         (bsg_request->rqst_data.h_els.port_id[1] << 8) |
2202                         bsg_request->rqst_data.h_els.port_id[2];
2203                 fallthrough;
2204         case FC_BSG_RPT_ELS:
2205                 fc_flags = IBMVFC_FC_ELS;
2206                 break;
2207         case FC_BSG_HST_CT:
2208                 issue_login = 1;
2209                 port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
2210                         (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
2211                         bsg_request->rqst_data.h_ct.port_id[2];
2212                 fallthrough;
2213         case FC_BSG_RPT_CT:
2214                 fc_flags = IBMVFC_FC_CT_IU;
2215                 break;
2216         default:
2217                 return -ENOTSUPP;
2218         }
2219
2220         if (port_id == -1)
2221                 return -EINVAL;
2222         if (!mutex_trylock(&vhost->passthru_mutex))
2223                 return -EBUSY;
2224
2225         job->dd_data = (void *)port_id;
2226         req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
2227                              job->request_payload.sg_cnt, DMA_TO_DEVICE);
2228
2229         if (!req_seg) {
2230                 mutex_unlock(&vhost->passthru_mutex);
2231                 return -ENOMEM;
2232         }
2233
2234         rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
2235                              job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2236
2237         if (!rsp_seg) {
2238                 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2239                              job->request_payload.sg_cnt, DMA_TO_DEVICE);
2240                 mutex_unlock(&vhost->passthru_mutex);
2241                 return -ENOMEM;
2242         }
2243
2244         if (req_seg > 1 || rsp_seg > 1) {
2245                 rc = -EINVAL;
2246                 goto out;
2247         }
2248
2249         if (issue_login)
2250                 rc = ibmvfc_bsg_plogi(vhost, port_id);
2251
2252         spin_lock_irqsave(vhost->host->host_lock, flags);
2253
2254         if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
2255             unlikely((rc = ibmvfc_host_chkready(vhost)))) {
2256                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2257                 goto out;
2258         }
2259
2260         evt = ibmvfc_get_reserved_event(&vhost->crq);
2261         if (!evt) {
2262                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2263                 rc = -ENOMEM;
2264                 goto out;
2265         }
2266         ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2267         mad = &evt->iu.passthru;
2268
2269         memset(mad, 0, sizeof(*mad));
2270         mad->common.version = cpu_to_be32(1);
2271         mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2272         mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2273
2274         mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2275                 offsetof(struct ibmvfc_passthru_mad, iu));
2276         mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2277
2278         mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2279         mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2280         mad->iu.flags = cpu_to_be32(fc_flags);
2281         mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2282
2283         mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2284         mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2285         mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2286         mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2287         mad->iu.scsi_id = cpu_to_be64(port_id);
2288         mad->iu.tag = cpu_to_be64((u64)evt);
2289         rsp_len = be32_to_cpu(mad->iu.rsp.len);
2290
2291         evt->sync_iu = &rsp_iu;
2292         init_completion(&evt->comp);
2293         rc = ibmvfc_send_event(evt, vhost, 0);
2294         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2295
2296         if (rc) {
2297                 rc = -EIO;
2298                 goto out;
2299         }
2300
2301         wait_for_completion(&evt->comp);
2302
2303         if (rsp_iu.passthru.common.status)
2304                 rc = -EIO;
2305         else
2306                 bsg_reply->reply_payload_rcv_len = rsp_len;
2307
2308         spin_lock_irqsave(vhost->host->host_lock, flags);
2309         ibmvfc_free_event(evt);
2310         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2311         bsg_reply->result = rc;
2312         bsg_job_done(job, bsg_reply->result,
2313                        bsg_reply->reply_payload_rcv_len);
2314         rc = 0;
2315 out:
2316         dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2317                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
2318         dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2319                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2320         mutex_unlock(&vhost->passthru_mutex);
2321         LEAVE;
2322         return rc;
2323 }
2324
2325 /**
2326  * ibmvfc_reset_device - Reset the device with the specified reset type
2327  * @sdev:       scsi device to reset
2328  * @type:       reset type
2329  * @desc:       reset type description for log messages
2330  *
2331  * Returns:
2332  *      0 on success / other on failure
2333  **/
2334 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2335 {
2336         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2337         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2338         struct ibmvfc_cmd *tmf;
2339         struct ibmvfc_event *evt = NULL;
2340         union ibmvfc_iu rsp_iu;
2341         struct ibmvfc_fcp_cmd_iu *iu;
2342         struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2343         int rsp_rc = -EBUSY;
2344         unsigned long flags;
2345         int rsp_code = 0;
2346
2347         spin_lock_irqsave(vhost->host->host_lock, flags);
2348         if (vhost->state == IBMVFC_ACTIVE) {
2349                 if (vhost->using_channels)
2350                         evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
2351                 else
2352                         evt = ibmvfc_get_event(&vhost->crq);
2353
2354                 if (!evt) {
2355                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2356                         return -ENOMEM;
2357                 }
2358
2359                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2360                 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2361                 iu = ibmvfc_get_fcp_iu(vhost, tmf);
2362
2363                 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2364                 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2365                         tmf->target_wwpn = cpu_to_be64(rport->port_name);
2366                 iu->tmf_flags = type;
2367                 evt->sync_iu = &rsp_iu;
2368
2369                 init_completion(&evt->comp);
2370                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2371         }
2372         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2373
2374         if (rsp_rc != 0) {
2375                 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2376                             desc, rsp_rc);
2377                 return -EIO;
2378         }
2379
2380         sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2381         wait_for_completion(&evt->comp);
2382
2383         if (rsp_iu.cmd.status)
2384                 rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2385
2386         if (rsp_code) {
2387                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2388                         rsp_code = fc_rsp->data.info.rsp_code;
2389
2390                 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2391                             "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2392                             ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2393                             be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2394                             fc_rsp->scsi_status);
2395                 rsp_rc = -EIO;
2396         } else
2397                 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2398
2399         spin_lock_irqsave(vhost->host->host_lock, flags);
2400         ibmvfc_free_event(evt);
2401         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2402         return rsp_rc;
2403 }
2404
2405 /**
2406  * ibmvfc_match_rport - Match function for specified remote port
2407  * @evt:        ibmvfc event struct
2408  * @rport:      device to match
2409  *
2410  * Returns:
2411  *      1 if event matches rport / 0 if event does not match rport
2412  **/
2413 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2414 {
2415         struct fc_rport *cmd_rport;
2416
2417         if (evt->cmnd) {
2418                 cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2419                 if (cmd_rport == rport)
2420                         return 1;
2421         }
2422         return 0;
2423 }
2424
2425 /**
2426  * ibmvfc_match_target - Match function for specified target
2427  * @evt:        ibmvfc event struct
2428  * @device:     device to match (starget)
2429  *
2430  * Returns:
2431  *      1 if event matches starget / 0 if event does not match starget
2432  **/
2433 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2434 {
2435         if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2436                 return 1;
2437         return 0;
2438 }
2439
2440 /**
2441  * ibmvfc_match_lun - Match function for specified LUN
2442  * @evt:        ibmvfc event struct
2443  * @device:     device to match (sdev)
2444  *
2445  * Returns:
2446  *      1 if event matches sdev / 0 if event does not match sdev
2447  **/
2448 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2449 {
2450         if (evt->cmnd && evt->cmnd->device == device)
2451                 return 1;
2452         return 0;
2453 }
2454
2455 /**
2456  * ibmvfc_event_is_free - Check if event is free or not
2457  * @evt:        ibmvfc event struct
2458  *
2459  * Returns:
2460  *      true / false
2461  **/
2462 static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
2463 {
2464         struct ibmvfc_event *loop_evt;
2465
2466         list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
2467                 if (loop_evt == evt)
2468                         return true;
2469
2470         return false;
2471 }
2472
2473 /**
2474  * ibmvfc_wait_for_ops - Wait for ops to complete
2475  * @vhost:      ibmvfc host struct
2476  * @device:     device to match (starget or sdev)
2477  * @match:      match function
2478  *
2479  * Returns:
2480  *      SUCCESS / FAILED
2481  **/
2482 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2483                                int (*match) (struct ibmvfc_event *, void *))
2484 {
2485         struct ibmvfc_event *evt;
2486         DECLARE_COMPLETION_ONSTACK(comp);
2487         int wait, i, q_index, q_size;
2488         unsigned long flags;
2489         signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2490         struct ibmvfc_queue *queues;
2491
2492         ENTER;
2493         if (vhost->mq_enabled && vhost->using_channels) {
2494                 queues = vhost->scsi_scrqs.scrqs;
2495                 q_size = vhost->scsi_scrqs.active_queues;
2496         } else {
2497                 queues = &vhost->crq;
2498                 q_size = 1;
2499         }
2500
2501         do {
2502                 wait = 0;
2503                 spin_lock_irqsave(vhost->host->host_lock, flags);
2504                 for (q_index = 0; q_index < q_size; q_index++) {
2505                         spin_lock(&queues[q_index].l_lock);
2506                         for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2507                                 evt = &queues[q_index].evt_pool.events[i];
2508                                 if (!ibmvfc_event_is_free(evt)) {
2509                                         if (match(evt, device)) {
2510                                                 evt->eh_comp = &comp;
2511                                                 wait++;
2512                                         }
2513                                 }
2514                         }
2515                         spin_unlock(&queues[q_index].l_lock);
2516                 }
2517                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2518
2519                 if (wait) {
2520                         timeout = wait_for_completion_timeout(&comp, timeout);
2521
2522                         if (!timeout) {
2523                                 wait = 0;
2524                                 spin_lock_irqsave(vhost->host->host_lock, flags);
2525                                 for (q_index = 0; q_index < q_size; q_index++) {
2526                                         spin_lock(&queues[q_index].l_lock);
2527                                         for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2528                                                 evt = &queues[q_index].evt_pool.events[i];
2529                                                 if (!ibmvfc_event_is_free(evt)) {
2530                                                         if (match(evt, device)) {
2531                                                                 evt->eh_comp = NULL;
2532                                                                 wait++;
2533                                                         }
2534                                                 }
2535                                         }
2536                                         spin_unlock(&queues[q_index].l_lock);
2537                                 }
2538                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2539                                 if (wait)
2540                                         dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2541                                 LEAVE;
2542                                 return wait ? FAILED : SUCCESS;
2543                         }
2544                 }
2545         } while (wait);
2546
2547         LEAVE;
2548         return SUCCESS;
2549 }
2550
2551 static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
2552                                             struct scsi_device *sdev,
2553                                             int type)
2554 {
2555         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2556         struct scsi_target *starget = scsi_target(sdev);
2557         struct fc_rport *rport = starget_to_rport(starget);
2558         struct ibmvfc_event *evt;
2559         struct ibmvfc_tmf *tmf;
2560
2561         evt = ibmvfc_get_reserved_event(queue);
2562         if (!evt)
2563                 return NULL;
2564         ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2565
2566         tmf = &evt->iu.tmf;
2567         memset(tmf, 0, sizeof(*tmf));
2568         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2569                 tmf->common.version = cpu_to_be32(2);
2570                 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2571         } else {
2572                 tmf->common.version = cpu_to_be32(1);
2573         }
2574         tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2575         tmf->common.length = cpu_to_be16(sizeof(*tmf));
2576         tmf->scsi_id = cpu_to_be64(rport->port_id);
2577         int_to_scsilun(sdev->lun, &tmf->lun);
2578         if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2579                 type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2580         if (vhost->state == IBMVFC_ACTIVE)
2581                 tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2582         else
2583                 tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2584         tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2585         tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2586
2587         init_completion(&evt->comp);
2588
2589         return evt;
2590 }
2591
2592 static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
2593 {
2594         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2595         struct ibmvfc_event *evt, *found_evt, *temp;
2596         struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
2597         unsigned long flags;
2598         int num_hwq, i;
2599         int fail = 0;
2600         LIST_HEAD(cancelq);
2601         u16 status;
2602
2603         ENTER;
2604         spin_lock_irqsave(vhost->host->host_lock, flags);
2605         num_hwq = vhost->scsi_scrqs.active_queues;
2606         for (i = 0; i < num_hwq; i++) {
2607                 spin_lock(queues[i].q_lock);
2608                 spin_lock(&queues[i].l_lock);
2609                 found_evt = NULL;
2610                 list_for_each_entry(evt, &queues[i].sent, queue_list) {
2611                         if (evt->cmnd && evt->cmnd->device == sdev) {
2612                                 found_evt = evt;
2613                                 break;
2614                         }
2615                 }
2616                 spin_unlock(&queues[i].l_lock);
2617
2618                 if (found_evt && vhost->logged_in) {
2619                         evt = ibmvfc_init_tmf(&queues[i], sdev, type);
2620                         if (!evt) {
2621                                 spin_unlock(queues[i].q_lock);
2622                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2623                                 return -ENOMEM;
2624                         }
2625                         evt->sync_iu = &queues[i].cancel_rsp;
2626                         ibmvfc_send_event(evt, vhost, default_timeout);
2627                         list_add_tail(&evt->cancel, &cancelq);
2628                 }
2629
2630                 spin_unlock(queues[i].q_lock);
2631         }
2632         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2633
2634         if (list_empty(&cancelq)) {
2635                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2636                         sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2637                 return 0;
2638         }
2639
2640         sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2641
2642         list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
2643                 wait_for_completion(&evt->comp);
2644                 status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
2645                 list_del(&evt->cancel);
2646                 ibmvfc_free_event(evt);
2647
2648                 if (status != IBMVFC_MAD_SUCCESS) {
2649                         sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2650                         switch (status) {
2651                         case IBMVFC_MAD_DRIVER_FAILED:
2652                         case IBMVFC_MAD_CRQ_ERROR:
2653                         /* Host adapter most likely going through reset, return success to
2654                          * the caller will wait for the command being cancelled to get returned
2655                          */
2656                                 break;
2657                         default:
2658                                 fail = 1;
2659                                 break;
2660                         }
2661                 }
2662         }
2663
2664         if (fail)
2665                 return -EIO;
2666
2667         sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2668         LEAVE;
2669         return 0;
2670 }
2671
2672 static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
2673 {
2674         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2675         struct ibmvfc_event *evt, *found_evt;
2676         union ibmvfc_iu rsp;
2677         int rsp_rc = -EBUSY;
2678         unsigned long flags;
2679         u16 status;
2680
2681         ENTER;
2682         found_evt = NULL;
2683         spin_lock_irqsave(vhost->host->host_lock, flags);
2684         spin_lock(&vhost->crq.l_lock);
2685         list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2686                 if (evt->cmnd && evt->cmnd->device == sdev) {
2687                         found_evt = evt;
2688                         break;
2689                 }
2690         }
2691         spin_unlock(&vhost->crq.l_lock);
2692
2693         if (!found_evt) {
2694                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2695                         sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2696                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2697                 return 0;
2698         }
2699
2700         if (vhost->logged_in) {
2701                 evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
2702                 evt->sync_iu = &rsp;
2703                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2704         }
2705
2706         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2707
2708         if (rsp_rc != 0) {
2709                 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2710                 /* If failure is received, the host adapter is most likely going
2711                  through reset, return success so the caller will wait for the command
2712                  being cancelled to get returned */
2713                 return 0;
2714         }
2715
2716         sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2717
2718         wait_for_completion(&evt->comp);
2719         status = be16_to_cpu(rsp.mad_common.status);
2720         spin_lock_irqsave(vhost->host->host_lock, flags);
2721         ibmvfc_free_event(evt);
2722         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2723
2724         if (status != IBMVFC_MAD_SUCCESS) {
2725                 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2726                 switch (status) {
2727                 case IBMVFC_MAD_DRIVER_FAILED:
2728                 case IBMVFC_MAD_CRQ_ERROR:
2729                         /* Host adapter most likely going through reset, return success to
2730                          the caller will wait for the command being cancelled to get returned */
2731                         return 0;
2732                 default:
2733                         return -EIO;
2734                 };
2735         }
2736
2737         sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2738         return 0;
2739 }
2740
2741 /**
2742  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2743  * @sdev:       scsi device to cancel commands
2744  * @type:       type of error recovery being performed
2745  *
2746  * This sends a cancel to the VIOS for the specified device. This does
2747  * NOT send any abort to the actual device. That must be done separately.
2748  *
2749  * Returns:
2750  *      0 on success / other on failure
2751  **/
2752 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2753 {
2754         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2755
2756         if (vhost->mq_enabled && vhost->using_channels)
2757                 return ibmvfc_cancel_all_mq(sdev, type);
2758         else
2759                 return ibmvfc_cancel_all_sq(sdev, type);
2760 }
2761
2762 /**
2763  * ibmvfc_match_key - Match function for specified cancel key
2764  * @evt:        ibmvfc event struct
2765  * @key:        cancel key to match
2766  *
2767  * Returns:
2768  *      1 if event matches key / 0 if event does not match key
2769  **/
2770 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2771 {
2772         unsigned long cancel_key = (unsigned long)key;
2773
2774         if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2775             be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2776                 return 1;
2777         return 0;
2778 }
2779
2780 /**
2781  * ibmvfc_match_evt - Match function for specified event
2782  * @evt:        ibmvfc event struct
2783  * @match:      event to match
2784  *
2785  * Returns:
2786  *      1 if event matches key / 0 if event does not match key
2787  **/
2788 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2789 {
2790         if (evt == match)
2791                 return 1;
2792         return 0;
2793 }
2794
2795 /**
2796  * ibmvfc_abort_task_set - Abort outstanding commands to the device
2797  * @sdev:       scsi device to abort commands
2798  *
2799  * This sends an Abort Task Set to the VIOS for the specified device. This does
2800  * NOT send any cancel to the VIOS. That must be done separately.
2801  *
2802  * Returns:
2803  *      0 on success / other on failure
2804  **/
2805 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2806 {
2807         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2808         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2809         struct ibmvfc_cmd *tmf;
2810         struct ibmvfc_event *evt, *found_evt;
2811         union ibmvfc_iu rsp_iu;
2812         struct ibmvfc_fcp_cmd_iu *iu;
2813         struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2814         int rc, rsp_rc = -EBUSY;
2815         unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2816         int rsp_code = 0;
2817
2818         found_evt = NULL;
2819         spin_lock_irqsave(vhost->host->host_lock, flags);
2820         spin_lock(&vhost->crq.l_lock);
2821         list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2822                 if (evt->cmnd && evt->cmnd->device == sdev) {
2823                         found_evt = evt;
2824                         break;
2825                 }
2826         }
2827         spin_unlock(&vhost->crq.l_lock);
2828
2829         if (!found_evt) {
2830                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2831                         sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2832                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2833                 return 0;
2834         }
2835
2836         if (vhost->state == IBMVFC_ACTIVE) {
2837                 evt = ibmvfc_get_event(&vhost->crq);
2838                 if (!evt) {
2839                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2840                         return -ENOMEM;
2841                 }
2842                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2843                 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2844                 iu = ibmvfc_get_fcp_iu(vhost, tmf);
2845
2846                 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2847                         tmf->target_wwpn = cpu_to_be64(rport->port_name);
2848                 iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2849                 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2850                 evt->sync_iu = &rsp_iu;
2851
2852                 tmf->correlation = cpu_to_be64((u64)evt);
2853
2854                 init_completion(&evt->comp);
2855                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2856         }
2857
2858         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2859
2860         if (rsp_rc != 0) {
2861                 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2862                 return -EIO;
2863         }
2864
2865         sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2866         timeout = wait_for_completion_timeout(&evt->comp, timeout);
2867
2868         if (!timeout) {
2869                 rc = ibmvfc_cancel_all(sdev, 0);
2870                 if (!rc) {
2871                         rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2872                         if (rc == SUCCESS)
2873                                 rc = 0;
2874                 }
2875
2876                 if (rc) {
2877                         sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2878                         ibmvfc_reset_host(vhost);
2879                         rsp_rc = -EIO;
2880                         rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2881
2882                         if (rc == SUCCESS)
2883                                 rsp_rc = 0;
2884
2885                         rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2886                         if (rc != SUCCESS) {
2887                                 spin_lock_irqsave(vhost->host->host_lock, flags);
2888                                 ibmvfc_hard_reset_host(vhost);
2889                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2890                                 rsp_rc = 0;
2891                         }
2892
2893                         goto out;
2894                 }
2895         }
2896
2897         if (rsp_iu.cmd.status)
2898                 rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2899
2900         if (rsp_code) {
2901                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2902                         rsp_code = fc_rsp->data.info.rsp_code;
2903
2904                 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2905                             "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2906                             ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2907                             be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2908                             fc_rsp->scsi_status);
2909                 rsp_rc = -EIO;
2910         } else
2911                 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2912
2913 out:
2914         spin_lock_irqsave(vhost->host->host_lock, flags);
2915         ibmvfc_free_event(evt);
2916         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2917         return rsp_rc;
2918 }
2919
2920 /**
2921  * ibmvfc_eh_abort_handler - Abort a command
2922  * @cmd:        scsi command to abort
2923  *
2924  * Returns:
2925  *      SUCCESS / FAST_IO_FAIL / FAILED
2926  **/
2927 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2928 {
2929         struct scsi_device *sdev = cmd->device;
2930         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2931         int cancel_rc, block_rc;
2932         int rc = FAILED;
2933
2934         ENTER;
2935         block_rc = fc_block_scsi_eh(cmd);
2936         ibmvfc_wait_while_resetting(vhost);
2937         if (block_rc != FAST_IO_FAIL) {
2938                 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2939                 ibmvfc_abort_task_set(sdev);
2940         } else
2941                 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2942
2943         if (!cancel_rc)
2944                 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2945
2946         if (block_rc == FAST_IO_FAIL && rc != FAILED)
2947                 rc = FAST_IO_FAIL;
2948
2949         LEAVE;
2950         return rc;
2951 }
2952
2953 /**
2954  * ibmvfc_eh_device_reset_handler - Reset a single LUN
2955  * @cmd:        scsi command struct
2956  *
2957  * Returns:
2958  *      SUCCESS / FAST_IO_FAIL / FAILED
2959  **/
2960 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2961 {
2962         struct scsi_device *sdev = cmd->device;
2963         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2964         int cancel_rc, block_rc, reset_rc = 0;
2965         int rc = FAILED;
2966
2967         ENTER;
2968         block_rc = fc_block_scsi_eh(cmd);
2969         ibmvfc_wait_while_resetting(vhost);
2970         if (block_rc != FAST_IO_FAIL) {
2971                 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2972                 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2973         } else
2974                 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2975
2976         if (!cancel_rc && !reset_rc)
2977                 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2978
2979         if (block_rc == FAST_IO_FAIL && rc != FAILED)
2980                 rc = FAST_IO_FAIL;
2981
2982         LEAVE;
2983         return rc;
2984 }
2985
2986 /**
2987  * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2988  * @sdev:       scsi device struct
2989  * @data:       return code
2990  *
2991  **/
2992 static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2993 {
2994         unsigned long *rc = data;
2995         *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2996 }
2997
2998 /**
2999  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
3000  * @sdev:       scsi device struct
3001  * @data:       return code
3002  *
3003  **/
3004 static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
3005 {
3006         unsigned long *rc = data;
3007         *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
3008 }
3009
3010 /**
3011  * ibmvfc_eh_target_reset_handler - Reset the target
3012  * @cmd:        scsi command struct
3013  *
3014  * Returns:
3015  *      SUCCESS / FAST_IO_FAIL / FAILED
3016  **/
3017 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
3018 {
3019         struct scsi_device *sdev = cmd->device;
3020         struct ibmvfc_host *vhost = shost_priv(sdev->host);
3021         struct scsi_target *starget = scsi_target(sdev);
3022         int block_rc;
3023         int reset_rc = 0;
3024         int rc = FAILED;
3025         unsigned long cancel_rc = 0;
3026
3027         ENTER;
3028         block_rc = fc_block_scsi_eh(cmd);
3029         ibmvfc_wait_while_resetting(vhost);
3030         if (block_rc != FAST_IO_FAIL) {
3031                 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
3032                 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
3033         } else
3034                 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
3035
3036         if (!cancel_rc && !reset_rc)
3037                 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
3038
3039         if (block_rc == FAST_IO_FAIL && rc != FAILED)
3040                 rc = FAST_IO_FAIL;
3041
3042         LEAVE;
3043         return rc;
3044 }
3045
3046 /**
3047  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
3048  * @cmd:        struct scsi_cmnd having problems
3049  *
3050  **/
3051 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
3052 {
3053         int rc;
3054         struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
3055
3056         dev_err(vhost->dev, "Resetting connection due to error recovery\n");
3057         rc = ibmvfc_issue_fc_host_lip(vhost->host);
3058
3059         return rc ? FAILED : SUCCESS;
3060 }
3061
3062 /**
3063  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
3064  * @rport:              rport struct
3065  *
3066  * Return value:
3067  *      none
3068  **/
3069 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
3070 {
3071         struct Scsi_Host *shost = rport_to_shost(rport);
3072         struct ibmvfc_host *vhost = shost_priv(shost);
3073         struct fc_rport *dev_rport;
3074         struct scsi_device *sdev;
3075         struct ibmvfc_target *tgt;
3076         unsigned long rc, flags;
3077         unsigned int found;
3078
3079         ENTER;
3080         shost_for_each_device(sdev, shost) {
3081                 dev_rport = starget_to_rport(scsi_target(sdev));
3082                 if (dev_rport != rport)
3083                         continue;
3084                 ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
3085         }
3086
3087         rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
3088
3089         if (rc == FAILED)
3090                 ibmvfc_issue_fc_host_lip(shost);
3091
3092         spin_lock_irqsave(shost->host_lock, flags);
3093         found = 0;
3094         list_for_each_entry(tgt, &vhost->targets, queue) {
3095                 if (tgt->scsi_id == rport->port_id) {
3096                         found++;
3097                         break;
3098                 }
3099         }
3100
3101         if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
3102                 /*
3103                  * If we get here, that means we previously attempted to send
3104                  * an implicit logout to the target but it failed, most likely
3105                  * due to I/O being pending, so we need to send it again
3106                  */
3107                 ibmvfc_del_tgt(tgt);
3108                 ibmvfc_reinit_host(vhost);
3109         }
3110
3111         spin_unlock_irqrestore(shost->host_lock, flags);
3112         LEAVE;
3113 }
3114
3115 static const struct ibmvfc_async_desc ae_desc [] = {
3116         { "PLOGI",      IBMVFC_AE_ELS_PLOGI,    IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3117         { "LOGO",       IBMVFC_AE_ELS_LOGO,     IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3118         { "PRLO",       IBMVFC_AE_ELS_PRLO,     IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3119         { "N-Port SCN", IBMVFC_AE_SCN_NPORT,    IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3120         { "Group SCN",  IBMVFC_AE_SCN_GROUP,    IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3121         { "Domain SCN", IBMVFC_AE_SCN_DOMAIN,   IBMVFC_DEFAULT_LOG_LEVEL },
3122         { "Fabric SCN", IBMVFC_AE_SCN_FABRIC,   IBMVFC_DEFAULT_LOG_LEVEL },
3123         { "Link Up",    IBMVFC_AE_LINK_UP,      IBMVFC_DEFAULT_LOG_LEVEL },
3124         { "Link Down",  IBMVFC_AE_LINK_DOWN,    IBMVFC_DEFAULT_LOG_LEVEL },
3125         { "Link Dead",  IBMVFC_AE_LINK_DEAD,    IBMVFC_DEFAULT_LOG_LEVEL },
3126         { "Halt",       IBMVFC_AE_HALT,         IBMVFC_DEFAULT_LOG_LEVEL },
3127         { "Resume",     IBMVFC_AE_RESUME,       IBMVFC_DEFAULT_LOG_LEVEL },
3128         { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
3129 };
3130
3131 static const struct ibmvfc_async_desc unknown_ae = {
3132         "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
3133 };
3134
3135 /**
3136  * ibmvfc_get_ae_desc - Get text description for async event
3137  * @ae: async event
3138  *
3139  **/
3140 static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
3141 {
3142         int i;
3143
3144         for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
3145                 if (ae_desc[i].ae == ae)
3146                         return &ae_desc[i];
3147
3148         return &unknown_ae;
3149 }
3150
3151 static const struct {
3152         enum ibmvfc_ae_link_state state;
3153         const char *desc;
3154 } link_desc [] = {
3155         { IBMVFC_AE_LS_LINK_UP,         " link up" },
3156         { IBMVFC_AE_LS_LINK_BOUNCED,    " link bounced" },
3157         { IBMVFC_AE_LS_LINK_DOWN,       " link down" },
3158         { IBMVFC_AE_LS_LINK_DEAD,       " link dead" },
3159 };
3160
3161 /**
3162  * ibmvfc_get_link_state - Get text description for link state
3163  * @state:      link state
3164  *
3165  **/
3166 static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
3167 {
3168         int i;
3169
3170         for (i = 0; i < ARRAY_SIZE(link_desc); i++)
3171                 if (link_desc[i].state == state)
3172                         return link_desc[i].desc;
3173
3174         return "";
3175 }
3176
3177 /**
3178  * ibmvfc_handle_async - Handle an async event from the adapter
3179  * @crq:        crq to process
3180  * @vhost:      ibmvfc host struct
3181  *
3182  **/
3183 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
3184                                 struct ibmvfc_host *vhost)
3185 {
3186         const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
3187         struct ibmvfc_target *tgt;
3188
3189         ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
3190                    " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
3191                    be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
3192                    ibmvfc_get_link_state(crq->link_state));
3193
3194         switch (be64_to_cpu(crq->event)) {
3195         case IBMVFC_AE_RESUME:
3196                 switch (crq->link_state) {
3197                 case IBMVFC_AE_LS_LINK_DOWN:
3198                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3199                         break;
3200                 case IBMVFC_AE_LS_LINK_DEAD:
3201                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3202                         break;
3203                 case IBMVFC_AE_LS_LINK_UP:
3204                 case IBMVFC_AE_LS_LINK_BOUNCED:
3205                 default:
3206                         vhost->events_to_log |= IBMVFC_AE_LINKUP;
3207                         vhost->delay_init = 1;
3208                         __ibmvfc_reset_host(vhost);
3209                         break;
3210                 }
3211
3212                 break;
3213         case IBMVFC_AE_LINK_UP:
3214                 vhost->events_to_log |= IBMVFC_AE_LINKUP;
3215                 vhost->delay_init = 1;
3216                 __ibmvfc_reset_host(vhost);
3217                 break;
3218         case IBMVFC_AE_SCN_FABRIC:
3219         case IBMVFC_AE_SCN_DOMAIN:
3220                 vhost->events_to_log |= IBMVFC_AE_RSCN;
3221                 if (vhost->state < IBMVFC_HALTED) {
3222                         vhost->delay_init = 1;
3223                         __ibmvfc_reset_host(vhost);
3224                 }
3225                 break;
3226         case IBMVFC_AE_SCN_NPORT:
3227         case IBMVFC_AE_SCN_GROUP:
3228                 vhost->events_to_log |= IBMVFC_AE_RSCN;
3229                 ibmvfc_reinit_host(vhost);
3230                 break;
3231         case IBMVFC_AE_ELS_LOGO:
3232         case IBMVFC_AE_ELS_PRLO:
3233         case IBMVFC_AE_ELS_PLOGI:
3234                 list_for_each_entry(tgt, &vhost->targets, queue) {
3235                         if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
3236                                 break;
3237                         if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
3238                                 continue;
3239                         if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
3240                                 continue;
3241                         if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
3242                                 continue;
3243                         if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
3244                                 tgt->logo_rcvd = 1;
3245                         if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
3246                                 ibmvfc_del_tgt(tgt);
3247                                 ibmvfc_reinit_host(vhost);
3248                         }
3249                 }
3250                 break;
3251         case IBMVFC_AE_LINK_DOWN:
3252         case IBMVFC_AE_ADAPTER_FAILED:
3253                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3254                 break;
3255         case IBMVFC_AE_LINK_DEAD:
3256                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3257                 break;
3258         case IBMVFC_AE_HALT:
3259                 ibmvfc_link_down(vhost, IBMVFC_HALTED);
3260                 break;
3261         default:
3262                 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
3263                 break;
3264         }
3265 }
3266
3267 /**
3268  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
3269  * @crq:        Command/Response queue
3270  * @vhost:      ibmvfc host struct
3271  * @evt_doneq:  Event done queue
3272  *
3273 **/
3274 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3275                               struct list_head *evt_doneq)
3276 {
3277         long rc;
3278         struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3279
3280         switch (crq->valid) {
3281         case IBMVFC_CRQ_INIT_RSP:
3282                 switch (crq->format) {
3283                 case IBMVFC_CRQ_INIT:
3284                         dev_info(vhost->dev, "Partner initialized\n");
3285                         /* Send back a response */
3286                         rc = ibmvfc_send_crq_init_complete(vhost);
3287                         if (rc == 0)
3288                                 ibmvfc_init_host(vhost);
3289                         else
3290                                 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
3291                         break;
3292                 case IBMVFC_CRQ_INIT_COMPLETE:
3293                         dev_info(vhost->dev, "Partner initialization complete\n");
3294                         ibmvfc_init_host(vhost);
3295                         break;
3296                 default:
3297                         dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
3298                 }
3299                 return;
3300         case IBMVFC_CRQ_XPORT_EVENT:
3301                 vhost->state = IBMVFC_NO_CRQ;
3302                 vhost->logged_in = 0;
3303                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3304                 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
3305                         /* We need to re-setup the interpartition connection */
3306                         dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
3307                         vhost->client_migrated = 1;
3308
3309                         scsi_block_requests(vhost->host);
3310                         ibmvfc_purge_requests(vhost, DID_REQUEUE);
3311                         ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN);
3312                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
3313                         wake_up(&vhost->work_wait_q);
3314                 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
3315                         dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
3316                         ibmvfc_purge_requests(vhost, DID_ERROR);
3317                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3318                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
3319                 } else {
3320                         dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
3321                 }
3322                 return;
3323         case IBMVFC_CRQ_CMD_RSP:
3324                 break;
3325         default:
3326                 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
3327                 return;
3328         }
3329
3330         if (crq->format == IBMVFC_ASYNC_EVENT)
3331                 return;
3332
3333         /* The only kind of payload CRQs we should get are responses to
3334          * things we send. Make sure this response is to something we
3335          * actually sent
3336          */
3337         if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
3338                 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3339                         crq->ioba);
3340                 return;
3341         }
3342
3343         if (unlikely(atomic_dec_if_positive(&evt->active))) {
3344                 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3345                         crq->ioba);
3346                 return;
3347         }
3348
3349         spin_lock(&evt->queue->l_lock);
3350         list_move_tail(&evt->queue_list, evt_doneq);
3351         spin_unlock(&evt->queue->l_lock);
3352 }
3353
3354 /**
3355  * ibmvfc_scan_finished - Check if the device scan is done.
3356  * @shost:      scsi host struct
3357  * @time:       current elapsed time
3358  *
3359  * Returns:
3360  *      0 if scan is not done / 1 if scan is done
3361  **/
3362 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3363 {
3364         unsigned long flags;
3365         struct ibmvfc_host *vhost = shost_priv(shost);
3366         int done = 0;
3367
3368         spin_lock_irqsave(shost->host_lock, flags);
3369         if (!vhost->scan_timeout)
3370                 done = 1;
3371         else if (time >= (vhost->scan_timeout * HZ)) {
3372                 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
3373                          "continuing initialization\n", vhost->scan_timeout);
3374                 done = 1;
3375         }
3376
3377         if (vhost->scan_complete) {
3378                 vhost->scan_timeout = init_timeout;
3379                 done = 1;
3380         }
3381         spin_unlock_irqrestore(shost->host_lock, flags);
3382         return done;
3383 }
3384
3385 /**
3386  * ibmvfc_slave_alloc - Setup the device's task set value
3387  * @sdev:       struct scsi_device device to configure
3388  *
3389  * Set the device's task set value so that error handling works as
3390  * expected.
3391  *
3392  * Returns:
3393  *      0 on success / -ENXIO if device does not exist
3394  **/
3395 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
3396 {
3397         struct Scsi_Host *shost = sdev->host;
3398         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3399         struct ibmvfc_host *vhost = shost_priv(shost);
3400         unsigned long flags = 0;
3401
3402         if (!rport || fc_remote_port_chkready(rport))
3403                 return -ENXIO;
3404
3405         spin_lock_irqsave(shost->host_lock, flags);
3406         sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
3407         spin_unlock_irqrestore(shost->host_lock, flags);
3408         return 0;
3409 }
3410
3411 /**
3412  * ibmvfc_target_alloc - Setup the target's task set value
3413  * @starget:    struct scsi_target
3414  *
3415  * Set the target's task set value so that error handling works as
3416  * expected.
3417  *
3418  * Returns:
3419  *      0 on success / -ENXIO if device does not exist
3420  **/
3421 static int ibmvfc_target_alloc(struct scsi_target *starget)
3422 {
3423         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3424         struct ibmvfc_host *vhost = shost_priv(shost);
3425         unsigned long flags = 0;
3426
3427         spin_lock_irqsave(shost->host_lock, flags);
3428         starget->hostdata = (void *)(unsigned long)vhost->task_set++;
3429         spin_unlock_irqrestore(shost->host_lock, flags);
3430         return 0;
3431 }
3432
3433 /**
3434  * ibmvfc_slave_configure - Configure the device
3435  * @sdev:       struct scsi_device device to configure
3436  *
3437  * Enable allow_restart for a device if it is a disk. Adjust the
3438  * queue_depth here also.
3439  *
3440  * Returns:
3441  *      0
3442  **/
3443 static int ibmvfc_slave_configure(struct scsi_device *sdev)
3444 {
3445         struct Scsi_Host *shost = sdev->host;
3446         unsigned long flags = 0;
3447
3448         spin_lock_irqsave(shost->host_lock, flags);
3449         if (sdev->type == TYPE_DISK) {
3450                 sdev->allow_restart = 1;
3451                 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
3452         }
3453         spin_unlock_irqrestore(shost->host_lock, flags);
3454         return 0;
3455 }
3456
3457 /**
3458  * ibmvfc_change_queue_depth - Change the device's queue depth
3459  * @sdev:       scsi device struct
3460  * @qdepth:     depth to set
3461  *
3462  * Return value:
3463  *      actual depth set
3464  **/
3465 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3466 {
3467         if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3468                 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3469
3470         return scsi_change_queue_depth(sdev, qdepth);
3471 }
3472
3473 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3474                                                  struct device_attribute *attr, char *buf)
3475 {
3476         struct Scsi_Host *shost = class_to_shost(dev);
3477         struct ibmvfc_host *vhost = shost_priv(shost);
3478
3479         return snprintf(buf, PAGE_SIZE, "%s\n",
3480                         vhost->login_buf->resp.partition_name);
3481 }
3482
3483 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3484                                             struct device_attribute *attr, char *buf)
3485 {
3486         struct Scsi_Host *shost = class_to_shost(dev);
3487         struct ibmvfc_host *vhost = shost_priv(shost);
3488
3489         return snprintf(buf, PAGE_SIZE, "%s\n",
3490                         vhost->login_buf->resp.device_name);
3491 }
3492
3493 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3494                                          struct device_attribute *attr, char *buf)
3495 {
3496         struct Scsi_Host *shost = class_to_shost(dev);
3497         struct ibmvfc_host *vhost = shost_priv(shost);
3498
3499         return snprintf(buf, PAGE_SIZE, "%s\n",
3500                         vhost->login_buf->resp.port_loc_code);
3501 }
3502
3503 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3504                                          struct device_attribute *attr, char *buf)
3505 {
3506         struct Scsi_Host *shost = class_to_shost(dev);
3507         struct ibmvfc_host *vhost = shost_priv(shost);
3508
3509         return snprintf(buf, PAGE_SIZE, "%s\n",
3510                         vhost->login_buf->resp.drc_name);
3511 }
3512
3513 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3514                                              struct device_attribute *attr, char *buf)
3515 {
3516         struct Scsi_Host *shost = class_to_shost(dev);
3517         struct ibmvfc_host *vhost = shost_priv(shost);
3518         return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
3519 }
3520
3521 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3522                                              struct device_attribute *attr, char *buf)
3523 {
3524         struct Scsi_Host *shost = class_to_shost(dev);
3525         struct ibmvfc_host *vhost = shost_priv(shost);
3526         return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
3527 }
3528
3529 /**
3530  * ibmvfc_show_log_level - Show the adapter's error logging level
3531  * @dev:        class device struct
3532  * @attr:       unused
3533  * @buf:        buffer
3534  *
3535  * Return value:
3536  *      number of bytes printed to buffer
3537  **/
3538 static ssize_t ibmvfc_show_log_level(struct device *dev,
3539                                      struct device_attribute *attr, char *buf)
3540 {
3541         struct Scsi_Host *shost = class_to_shost(dev);
3542         struct ibmvfc_host *vhost = shost_priv(shost);
3543         unsigned long flags = 0;
3544         int len;
3545
3546         spin_lock_irqsave(shost->host_lock, flags);
3547         len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
3548         spin_unlock_irqrestore(shost->host_lock, flags);
3549         return len;
3550 }
3551
3552 /**
3553  * ibmvfc_store_log_level - Change the adapter's error logging level
3554  * @dev:        class device struct
3555  * @attr:       unused
3556  * @buf:        buffer
3557  * @count:      buffer size
3558  *
3559  * Return value:
3560  *      number of bytes printed to buffer
3561  **/
3562 static ssize_t ibmvfc_store_log_level(struct device *dev,
3563                                       struct device_attribute *attr,
3564                                       const char *buf, size_t count)
3565 {
3566         struct Scsi_Host *shost = class_to_shost(dev);
3567         struct ibmvfc_host *vhost = shost_priv(shost);
3568         unsigned long flags = 0;
3569
3570         spin_lock_irqsave(shost->host_lock, flags);
3571         vhost->log_level = simple_strtoul(buf, NULL, 10);
3572         spin_unlock_irqrestore(shost->host_lock, flags);
3573         return strlen(buf);
3574 }
3575
3576 static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
3577                                          struct device_attribute *attr, char *buf)
3578 {
3579         struct Scsi_Host *shost = class_to_shost(dev);
3580         struct ibmvfc_host *vhost = shost_priv(shost);
3581         struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
3582         unsigned long flags = 0;
3583         int len;
3584
3585         spin_lock_irqsave(shost->host_lock, flags);
3586         len = snprintf(buf, PAGE_SIZE, "%d\n", scsi->desired_queues);
3587         spin_unlock_irqrestore(shost->host_lock, flags);
3588         return len;
3589 }
3590
3591 static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
3592                                          struct device_attribute *attr,
3593                                          const char *buf, size_t count)
3594 {
3595         struct Scsi_Host *shost = class_to_shost(dev);
3596         struct ibmvfc_host *vhost = shost_priv(shost);
3597         struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
3598         unsigned long flags = 0;
3599         unsigned int channels;
3600
3601         spin_lock_irqsave(shost->host_lock, flags);
3602         channels = simple_strtoul(buf, NULL, 10);
3603         scsi->desired_queues = min(channels, shost->nr_hw_queues);
3604         ibmvfc_hard_reset_host(vhost);
3605         spin_unlock_irqrestore(shost->host_lock, flags);
3606         return strlen(buf);
3607 }
3608
3609 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3610 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3611 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3612 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3613 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3614 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3615 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3616                    ibmvfc_show_log_level, ibmvfc_store_log_level);
3617 static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
3618                    ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
3619
3620 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3621 /**
3622  * ibmvfc_read_trace - Dump the adapter trace
3623  * @filp:               open sysfs file
3624  * @kobj:               kobject struct
3625  * @bin_attr:   bin_attribute struct
3626  * @buf:                buffer
3627  * @off:                offset
3628  * @count:              buffer size
3629  *
3630  * Return value:
3631  *      number of bytes printed to buffer
3632  **/
3633 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3634                                  struct bin_attribute *bin_attr,
3635                                  char *buf, loff_t off, size_t count)
3636 {
3637         struct device *dev = kobj_to_dev(kobj);
3638         struct Scsi_Host *shost = class_to_shost(dev);
3639         struct ibmvfc_host *vhost = shost_priv(shost);
3640         unsigned long flags = 0;
3641         int size = IBMVFC_TRACE_SIZE;
3642         char *src = (char *)vhost->trace;
3643
3644         if (off > size)
3645                 return 0;
3646         if (off + count > size) {
3647                 size -= off;
3648                 count = size;
3649         }
3650
3651         spin_lock_irqsave(shost->host_lock, flags);
3652         memcpy(buf, &src[off], count);
3653         spin_unlock_irqrestore(shost->host_lock, flags);
3654         return count;
3655 }
3656
3657 static struct bin_attribute ibmvfc_trace_attr = {
3658         .attr = {
3659                 .name = "trace",
3660                 .mode = S_IRUGO,
3661         },
3662         .size = 0,
3663         .read = ibmvfc_read_trace,
3664 };
3665 #endif
3666
3667 static struct attribute *ibmvfc_host_attrs[] = {
3668         &dev_attr_partition_name.attr,
3669         &dev_attr_device_name.attr,
3670         &dev_attr_port_loc_code.attr,
3671         &dev_attr_drc_name.attr,
3672         &dev_attr_npiv_version.attr,
3673         &dev_attr_capabilities.attr,
3674         &dev_attr_log_level.attr,
3675         &dev_attr_nr_scsi_channels.attr,
3676         NULL
3677 };
3678
3679 ATTRIBUTE_GROUPS(ibmvfc_host);
3680
3681 static const struct scsi_host_template driver_template = {
3682         .module = THIS_MODULE,
3683         .name = "IBM POWER Virtual FC Adapter",
3684         .proc_name = IBMVFC_NAME,
3685         .queuecommand = ibmvfc_queuecommand,
3686         .eh_timed_out = fc_eh_timed_out,
3687         .eh_abort_handler = ibmvfc_eh_abort_handler,
3688         .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3689         .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3690         .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3691         .slave_alloc = ibmvfc_slave_alloc,
3692         .slave_configure = ibmvfc_slave_configure,
3693         .target_alloc = ibmvfc_target_alloc,
3694         .scan_finished = ibmvfc_scan_finished,
3695         .change_queue_depth = ibmvfc_change_queue_depth,
3696         .cmd_per_lun = 16,
3697         .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3698         .this_id = -1,
3699         .sg_tablesize = SG_ALL,
3700         .max_sectors = IBMVFC_MAX_SECTORS,
3701         .shost_groups = ibmvfc_host_groups,
3702         .track_queue_depth = 1,
3703 };
3704
3705 /**
3706  * ibmvfc_next_async_crq - Returns the next entry in async queue
3707  * @vhost:      ibmvfc host struct
3708  *
3709  * Returns:
3710  *      Pointer to next entry in queue / NULL if empty
3711  **/
3712 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3713 {
3714         struct ibmvfc_queue *async_crq = &vhost->async_crq;
3715         struct ibmvfc_async_crq *crq;
3716
3717         crq = &async_crq->msgs.async[async_crq->cur];
3718         if (crq->valid & 0x80) {
3719                 if (++async_crq->cur == async_crq->size)
3720                         async_crq->cur = 0;
3721                 rmb();
3722         } else
3723                 crq = NULL;
3724
3725         return crq;
3726 }
3727
3728 /**
3729  * ibmvfc_next_crq - Returns the next entry in message queue
3730  * @vhost:      ibmvfc host struct
3731  *
3732  * Returns:
3733  *      Pointer to next entry in queue / NULL if empty
3734  **/
3735 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3736 {
3737         struct ibmvfc_queue *queue = &vhost->crq;
3738         struct ibmvfc_crq *crq;
3739
3740         crq = &queue->msgs.crq[queue->cur];
3741         if (crq->valid & 0x80) {
3742                 if (++queue->cur == queue->size)
3743                         queue->cur = 0;
3744                 rmb();
3745         } else
3746                 crq = NULL;
3747
3748         return crq;
3749 }
3750
3751 /**
3752  * ibmvfc_interrupt - Interrupt handler
3753  * @irq:                number of irq to handle, not used
3754  * @dev_instance: ibmvfc_host that received interrupt
3755  *
3756  * Returns:
3757  *      IRQ_HANDLED
3758  **/
3759 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3760 {
3761         struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3762         unsigned long flags;
3763
3764         spin_lock_irqsave(vhost->host->host_lock, flags);
3765         vio_disable_interrupts(to_vio_dev(vhost->dev));
3766         tasklet_schedule(&vhost->tasklet);
3767         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3768         return IRQ_HANDLED;
3769 }
3770
3771 /**
3772  * ibmvfc_tasklet - Interrupt handler tasklet
3773  * @data:               ibmvfc host struct
3774  *
3775  * Returns:
3776  *      Nothing
3777  **/
3778 static void ibmvfc_tasklet(void *data)
3779 {
3780         struct ibmvfc_host *vhost = data;
3781         struct vio_dev *vdev = to_vio_dev(vhost->dev);
3782         struct ibmvfc_crq *crq;
3783         struct ibmvfc_async_crq *async;
3784         struct ibmvfc_event *evt, *temp;
3785         unsigned long flags;
3786         int done = 0;
3787         LIST_HEAD(evt_doneq);
3788
3789         spin_lock_irqsave(vhost->host->host_lock, flags);
3790         spin_lock(vhost->crq.q_lock);
3791         while (!done) {
3792                 /* Pull all the valid messages off the async CRQ */
3793                 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3794                         ibmvfc_handle_async(async, vhost);
3795                         async->valid = 0;
3796                         wmb();
3797                 }
3798
3799                 /* Pull all the valid messages off the CRQ */
3800                 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3801                         ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3802                         crq->valid = 0;
3803                         wmb();
3804                 }
3805
3806                 vio_enable_interrupts(vdev);
3807                 if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3808                         vio_disable_interrupts(vdev);
3809                         ibmvfc_handle_async(async, vhost);
3810                         async->valid = 0;
3811                         wmb();
3812                 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3813                         vio_disable_interrupts(vdev);
3814                         ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3815                         crq->valid = 0;
3816                         wmb();
3817                 } else
3818                         done = 1;
3819         }
3820
3821         spin_unlock(vhost->crq.q_lock);
3822         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3823
3824         list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3825                 del_timer(&evt->timer);
3826                 list_del(&evt->queue_list);
3827                 ibmvfc_trc_end(evt);
3828                 evt->done(evt);
3829         }
3830 }
3831
3832 static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
3833 {
3834         struct device *dev = scrq->vhost->dev;
3835         struct vio_dev *vdev = to_vio_dev(dev);
3836         unsigned long rc;
3837         int irq_action = H_ENABLE_VIO_INTERRUPT;
3838
3839         if (!enable)
3840                 irq_action = H_DISABLE_VIO_INTERRUPT;
3841
3842         rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
3843                                 scrq->hw_irq, 0, 0);
3844
3845         if (rc)
3846                 dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
3847                         enable ? "enable" : "disable", scrq->hwq_id, rc);
3848
3849         return rc;
3850 }
3851
3852 static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3853                                struct list_head *evt_doneq)
3854 {
3855         struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3856
3857         switch (crq->valid) {
3858         case IBMVFC_CRQ_CMD_RSP:
3859                 break;
3860         case IBMVFC_CRQ_XPORT_EVENT:
3861                 return;
3862         default:
3863                 dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
3864                 return;
3865         }
3866
3867         /* The only kind of payload CRQs we should get are responses to
3868          * things we send. Make sure this response is to something we
3869          * actually sent
3870          */
3871         if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
3872                 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3873                         crq->ioba);
3874                 return;
3875         }
3876
3877         if (unlikely(atomic_dec_if_positive(&evt->active))) {
3878                 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3879                         crq->ioba);
3880                 return;
3881         }
3882
3883         spin_lock(&evt->queue->l_lock);
3884         list_move_tail(&evt->queue_list, evt_doneq);
3885         spin_unlock(&evt->queue->l_lock);
3886 }
3887
3888 static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
3889 {
3890         struct ibmvfc_crq *crq;
3891
3892         crq = &scrq->msgs.scrq[scrq->cur].crq;
3893         if (crq->valid & 0x80) {
3894                 if (++scrq->cur == scrq->size)
3895                         scrq->cur = 0;
3896                 rmb();
3897         } else
3898                 crq = NULL;
3899
3900         return crq;
3901 }
3902
3903 static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
3904 {
3905         struct ibmvfc_crq *crq;
3906         struct ibmvfc_event *evt, *temp;
3907         unsigned long flags;
3908         int done = 0;
3909         LIST_HEAD(evt_doneq);
3910
3911         spin_lock_irqsave(scrq->q_lock, flags);
3912         while (!done) {
3913                 while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3914                         ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3915                         crq->valid = 0;
3916                         wmb();
3917                 }
3918
3919                 ibmvfc_toggle_scrq_irq(scrq, 1);
3920                 if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3921                         ibmvfc_toggle_scrq_irq(scrq, 0);
3922                         ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3923                         crq->valid = 0;
3924                         wmb();
3925                 } else
3926                         done = 1;
3927         }
3928         spin_unlock_irqrestore(scrq->q_lock, flags);
3929
3930         list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3931                 del_timer(&evt->timer);
3932                 list_del(&evt->queue_list);
3933                 ibmvfc_trc_end(evt);
3934                 evt->done(evt);
3935         }
3936 }
3937
3938 static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance)
3939 {
3940         struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
3941
3942         ibmvfc_toggle_scrq_irq(scrq, 0);
3943         ibmvfc_drain_sub_crq(scrq);
3944
3945         return IRQ_HANDLED;
3946 }
3947
3948 /**
3949  * ibmvfc_init_tgt - Set the next init job step for the target
3950  * @tgt:                ibmvfc target struct
3951  * @job_step:   job step to perform
3952  *
3953  **/
3954 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3955                             void (*job_step) (struct ibmvfc_target *))
3956 {
3957         if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3958                 tgt->job_step = job_step;
3959         wake_up(&tgt->vhost->work_wait_q);
3960 }
3961
3962 /**
3963  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3964  * @tgt:                ibmvfc target struct
3965  * @job_step:   initialization job step
3966  *
3967  * Returns: 1 if step will be retried / 0 if not
3968  *
3969  **/
3970 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3971                                   void (*job_step) (struct ibmvfc_target *))
3972 {
3973         if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3974                 ibmvfc_del_tgt(tgt);
3975                 wake_up(&tgt->vhost->work_wait_q);
3976                 return 0;
3977         } else
3978                 ibmvfc_init_tgt(tgt, job_step);
3979         return 1;
3980 }
3981
3982 /* Defined in FC-LS */
3983 static const struct {
3984         int code;
3985         int retry;
3986         int logged_in;
3987 } prli_rsp [] = {
3988         { 0, 1, 0 },
3989         { 1, 0, 1 },
3990         { 2, 1, 0 },
3991         { 3, 1, 0 },
3992         { 4, 0, 0 },
3993         { 5, 0, 0 },
3994         { 6, 0, 1 },
3995         { 7, 0, 0 },
3996         { 8, 1, 0 },
3997 };
3998
3999 /**
4000  * ibmvfc_get_prli_rsp - Find PRLI response index
4001  * @flags:      PRLI response flags
4002  *
4003  **/
4004 static int ibmvfc_get_prli_rsp(u16 flags)
4005 {
4006         int i;
4007         int code = (flags & 0x0f00) >> 8;
4008
4009         for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
4010                 if (prli_rsp[i].code == code)
4011                         return i;
4012
4013         return 0;
4014 }
4015
4016 /**
4017  * ibmvfc_tgt_prli_done - Completion handler for Process Login
4018  * @evt:        ibmvfc event struct
4019  *
4020  **/
4021 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
4022 {
4023         struct ibmvfc_target *tgt = evt->tgt;
4024         struct ibmvfc_host *vhost = evt->vhost;
4025         struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
4026         struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
4027         u32 status = be16_to_cpu(rsp->common.status);
4028         int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
4029
4030         vhost->discovery_threads--;
4031         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4032         switch (status) {
4033         case IBMVFC_MAD_SUCCESS:
4034                 tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
4035                         parms->type, parms->flags, parms->service_parms);
4036
4037                 if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
4038                         index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
4039                         if (prli_rsp[index].logged_in) {
4040                                 if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
4041                                         tgt->need_login = 0;
4042                                         tgt->ids.roles = 0;
4043                                         if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
4044                                                 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
4045                                         if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
4046                                                 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
4047                                         tgt->add_rport = 1;
4048                                 } else
4049                                         ibmvfc_del_tgt(tgt);
4050                         } else if (prli_rsp[index].retry)
4051                                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4052                         else
4053                                 ibmvfc_del_tgt(tgt);
4054                 } else
4055                         ibmvfc_del_tgt(tgt);
4056                 break;
4057         case IBMVFC_MAD_DRIVER_FAILED:
4058                 break;
4059         case IBMVFC_MAD_CRQ_ERROR:
4060                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4061                 break;
4062         case IBMVFC_MAD_FAILED:
4063         default:
4064                 if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
4065                      be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
4066                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4067                 else if (tgt->logo_rcvd)
4068                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4069                 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4070                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4071                 else
4072                         ibmvfc_del_tgt(tgt);
4073
4074                 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
4075                         ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4076                         be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
4077                 break;
4078         }
4079
4080         kref_put(&tgt->kref, ibmvfc_release_tgt);
4081         ibmvfc_free_event(evt);
4082         wake_up(&vhost->work_wait_q);
4083 }
4084
4085 /**
4086  * ibmvfc_tgt_send_prli - Send a process login
4087  * @tgt:        ibmvfc target struct
4088  *
4089  **/
4090 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
4091 {
4092         struct ibmvfc_process_login *prli;
4093         struct ibmvfc_host *vhost = tgt->vhost;
4094         struct ibmvfc_event *evt;
4095
4096         if (vhost->discovery_threads >= disc_threads)
4097                 return;
4098
4099         kref_get(&tgt->kref);
4100         evt = ibmvfc_get_reserved_event(&vhost->crq);
4101         if (!evt) {
4102                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4103                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4104                 __ibmvfc_reset_host(vhost);
4105                 return;
4106         }
4107         vhost->discovery_threads++;
4108         ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
4109         evt->tgt = tgt;
4110         prli = &evt->iu.prli;
4111         memset(prli, 0, sizeof(*prli));
4112         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4113                 prli->common.version = cpu_to_be32(2);
4114                 prli->target_wwpn = cpu_to_be64(tgt->wwpn);
4115         } else {
4116                 prli->common.version = cpu_to_be32(1);
4117         }
4118         prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
4119         prli->common.length = cpu_to_be16(sizeof(*prli));
4120         prli->scsi_id = cpu_to_be64(tgt->scsi_id);
4121
4122         prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
4123         prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
4124         prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
4125         prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
4126
4127         if (cls3_error)
4128                 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
4129
4130         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4131         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4132                 vhost->discovery_threads--;
4133                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4134                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4135         } else
4136                 tgt_dbg(tgt, "Sent process login\n");
4137 }
4138
4139 /**
4140  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
4141  * @evt:        ibmvfc event struct
4142  *
4143  **/
4144 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
4145 {
4146         struct ibmvfc_target *tgt = evt->tgt;
4147         struct ibmvfc_host *vhost = evt->vhost;
4148         struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
4149         u32 status = be16_to_cpu(rsp->common.status);
4150         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4151
4152         vhost->discovery_threads--;
4153         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4154         switch (status) {
4155         case IBMVFC_MAD_SUCCESS:
4156                 tgt_dbg(tgt, "Port Login succeeded\n");
4157                 if (tgt->ids.port_name &&
4158                     tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
4159                         vhost->reinit = 1;
4160                         tgt_dbg(tgt, "Port re-init required\n");
4161                         break;
4162                 }
4163                 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4164                 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4165                 tgt->ids.port_id = tgt->scsi_id;
4166                 memcpy(&tgt->service_parms, &rsp->service_parms,
4167                        sizeof(tgt->service_parms));
4168                 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4169                        sizeof(tgt->service_parms_change));
4170                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4171                 break;
4172         case IBMVFC_MAD_DRIVER_FAILED:
4173                 break;
4174         case IBMVFC_MAD_CRQ_ERROR:
4175                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4176                 break;
4177         case IBMVFC_MAD_FAILED:
4178         default:
4179                 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4180                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4181                 else
4182                         ibmvfc_del_tgt(tgt);
4183
4184                 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4185                         ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4186                                              be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4187                         ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4188                         ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
4189                 break;
4190         }
4191
4192         kref_put(&tgt->kref, ibmvfc_release_tgt);
4193         ibmvfc_free_event(evt);
4194         wake_up(&vhost->work_wait_q);
4195 }
4196
4197 /**
4198  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
4199  * @tgt:        ibmvfc target struct
4200  *
4201  **/
4202 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
4203 {
4204         struct ibmvfc_port_login *plogi;
4205         struct ibmvfc_host *vhost = tgt->vhost;
4206         struct ibmvfc_event *evt;
4207
4208         if (vhost->discovery_threads >= disc_threads)
4209                 return;
4210
4211         kref_get(&tgt->kref);
4212         tgt->logo_rcvd = 0;
4213         evt = ibmvfc_get_reserved_event(&vhost->crq);
4214         if (!evt) {
4215                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4216                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4217                 __ibmvfc_reset_host(vhost);
4218                 return;
4219         }
4220         vhost->discovery_threads++;
4221         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4222         ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
4223         evt->tgt = tgt;
4224         plogi = &evt->iu.plogi;
4225         memset(plogi, 0, sizeof(*plogi));
4226         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4227                 plogi->common.version = cpu_to_be32(2);
4228                 plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
4229         } else {
4230                 plogi->common.version = cpu_to_be32(1);
4231         }
4232         plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
4233         plogi->common.length = cpu_to_be16(sizeof(*plogi));
4234         plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
4235
4236         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4237                 vhost->discovery_threads--;
4238                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4239                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4240         } else
4241                 tgt_dbg(tgt, "Sent port login\n");
4242 }
4243
4244 /**
4245  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
4246  * @evt:        ibmvfc event struct
4247  *
4248  **/
4249 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
4250 {
4251         struct ibmvfc_target *tgt = evt->tgt;
4252         struct ibmvfc_host *vhost = evt->vhost;
4253         struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
4254         u32 status = be16_to_cpu(rsp->common.status);
4255
4256         vhost->discovery_threads--;
4257         ibmvfc_free_event(evt);
4258         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4259
4260         switch (status) {
4261         case IBMVFC_MAD_SUCCESS:
4262                 tgt_dbg(tgt, "Implicit Logout succeeded\n");
4263                 break;
4264         case IBMVFC_MAD_DRIVER_FAILED:
4265                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4266                 wake_up(&vhost->work_wait_q);
4267                 return;
4268         case IBMVFC_MAD_FAILED:
4269         default:
4270                 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
4271                 break;
4272         }
4273
4274         ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
4275         kref_put(&tgt->kref, ibmvfc_release_tgt);
4276         wake_up(&vhost->work_wait_q);
4277 }
4278
4279 /**
4280  * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
4281  * @tgt:                ibmvfc target struct
4282  * @done:               Routine to call when the event is responded to
4283  *
4284  * Returns:
4285  *      Allocated and initialized ibmvfc_event struct
4286  **/
4287 static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
4288                                                                  void (*done) (struct ibmvfc_event *))
4289 {
4290         struct ibmvfc_implicit_logout *mad;
4291         struct ibmvfc_host *vhost = tgt->vhost;
4292         struct ibmvfc_event *evt;
4293
4294         kref_get(&tgt->kref);
4295         evt = ibmvfc_get_reserved_event(&vhost->crq);
4296         if (!evt)
4297                 return NULL;
4298         ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
4299         evt->tgt = tgt;
4300         mad = &evt->iu.implicit_logout;
4301         memset(mad, 0, sizeof(*mad));
4302         mad->common.version = cpu_to_be32(1);
4303         mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
4304         mad->common.length = cpu_to_be16(sizeof(*mad));
4305         mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4306         return evt;
4307 }
4308
4309 /**
4310  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
4311  * @tgt:                ibmvfc target struct
4312  *
4313  **/
4314 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
4315 {
4316         struct ibmvfc_host *vhost = tgt->vhost;
4317         struct ibmvfc_event *evt;
4318
4319         if (vhost->discovery_threads >= disc_threads)
4320                 return;
4321
4322         vhost->discovery_threads++;
4323         evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4324                                                    ibmvfc_tgt_implicit_logout_done);
4325         if (!evt) {
4326                 vhost->discovery_threads--;
4327                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4328                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4329                 __ibmvfc_reset_host(vhost);
4330                 return;
4331         }
4332
4333         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4334         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4335                 vhost->discovery_threads--;
4336                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4337                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4338         } else
4339                 tgt_dbg(tgt, "Sent Implicit Logout\n");
4340 }
4341
4342 /**
4343  * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
4344  * @evt:        ibmvfc event struct
4345  *
4346  **/
4347 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
4348 {
4349         struct ibmvfc_target *tgt = evt->tgt;
4350         struct ibmvfc_host *vhost = evt->vhost;
4351         struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4352         u32 status = be16_to_cpu(mad->common.status);
4353
4354         vhost->discovery_threads--;
4355         ibmvfc_free_event(evt);
4356
4357         /*
4358          * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
4359          * driver in which case we need to free up all the targets. If we are
4360          * not unloading, we will still go through a hard reset to get out of
4361          * offline state, so there is no need to track the old targets in that
4362          * case.
4363          */
4364         if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
4365                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4366         else
4367                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
4368
4369         tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
4370         kref_put(&tgt->kref, ibmvfc_release_tgt);
4371         wake_up(&vhost->work_wait_q);
4372 }
4373
4374 /**
4375  * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
4376  * @tgt:                ibmvfc target struct
4377  *
4378  **/
4379 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
4380 {
4381         struct ibmvfc_host *vhost = tgt->vhost;
4382         struct ibmvfc_event *evt;
4383
4384         if (!vhost->logged_in) {
4385                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4386                 return;
4387         }
4388
4389         if (vhost->discovery_threads >= disc_threads)
4390                 return;
4391
4392         vhost->discovery_threads++;
4393         evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4394                                                    ibmvfc_tgt_implicit_logout_and_del_done);
4395
4396         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
4397         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4398                 vhost->discovery_threads--;
4399                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4400                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4401         } else
4402                 tgt_dbg(tgt, "Sent Implicit Logout\n");
4403 }
4404
4405 /**
4406  * ibmvfc_tgt_move_login_done - Completion handler for Move Login
4407  * @evt:        ibmvfc event struct
4408  *
4409  **/
4410 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
4411 {
4412         struct ibmvfc_target *tgt = evt->tgt;
4413         struct ibmvfc_host *vhost = evt->vhost;
4414         struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
4415         u32 status = be16_to_cpu(rsp->common.status);
4416         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4417
4418         vhost->discovery_threads--;
4419         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4420         switch (status) {
4421         case IBMVFC_MAD_SUCCESS:
4422                 tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
4423                 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4424                 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4425                 tgt->scsi_id = tgt->new_scsi_id;
4426                 tgt->ids.port_id = tgt->scsi_id;
4427                 memcpy(&tgt->service_parms, &rsp->service_parms,
4428                        sizeof(tgt->service_parms));
4429                 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4430                        sizeof(tgt->service_parms_change));
4431                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4432                 break;
4433         case IBMVFC_MAD_DRIVER_FAILED:
4434                 break;
4435         case IBMVFC_MAD_CRQ_ERROR:
4436                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4437                 break;
4438         case IBMVFC_MAD_FAILED:
4439         default:
4440                 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4441
4442                 tgt_log(tgt, level,
4443                         "Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
4444                         tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
4445                         status);
4446                 break;
4447         }
4448
4449         kref_put(&tgt->kref, ibmvfc_release_tgt);
4450         ibmvfc_free_event(evt);
4451         wake_up(&vhost->work_wait_q);
4452 }
4453
4454
4455 /**
4456  * ibmvfc_tgt_move_login - Initiate a move login for specified target
4457  * @tgt:                ibmvfc target struct
4458  *
4459  **/
4460 static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
4461 {
4462         struct ibmvfc_host *vhost = tgt->vhost;
4463         struct ibmvfc_move_login *move;
4464         struct ibmvfc_event *evt;
4465
4466         if (vhost->discovery_threads >= disc_threads)
4467                 return;
4468
4469         kref_get(&tgt->kref);
4470         evt = ibmvfc_get_reserved_event(&vhost->crq);
4471         if (!evt) {
4472                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4473                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4474                 __ibmvfc_reset_host(vhost);
4475                 return;
4476         }
4477         vhost->discovery_threads++;
4478         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4479         ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
4480         evt->tgt = tgt;
4481         move = &evt->iu.move_login;
4482         memset(move, 0, sizeof(*move));
4483         move->common.version = cpu_to_be32(1);
4484         move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
4485         move->common.length = cpu_to_be16(sizeof(*move));
4486
4487         move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4488         move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
4489         move->wwpn = cpu_to_be64(tgt->wwpn);
4490         move->node_name = cpu_to_be64(tgt->ids.node_name);
4491
4492         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4493                 vhost->discovery_threads--;
4494                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4495                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4496         } else
4497                 tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
4498 }
4499
4500 /**
4501  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
4502  * @mad:        ibmvfc passthru mad struct
4503  * @tgt:        ibmvfc target struct
4504  *
4505  * Returns:
4506  *      1 if PLOGI needed / 0 if PLOGI not needed
4507  **/
4508 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
4509                                     struct ibmvfc_target *tgt)
4510 {
4511         if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
4512                 return 1;
4513         if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
4514                 return 1;
4515         if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
4516                 return 1;
4517         return 0;
4518 }
4519
4520 /**
4521  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
4522  * @evt:        ibmvfc event struct
4523  *
4524  **/
4525 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
4526 {
4527         struct ibmvfc_target *tgt = evt->tgt;
4528         struct ibmvfc_host *vhost = evt->vhost;
4529         struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4530         u32 status = be16_to_cpu(mad->common.status);
4531         u8 fc_reason, fc_explain;
4532
4533         vhost->discovery_threads--;
4534         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4535         del_timer(&tgt->timer);
4536
4537         switch (status) {
4538         case IBMVFC_MAD_SUCCESS:
4539                 tgt_dbg(tgt, "ADISC succeeded\n");
4540                 if (ibmvfc_adisc_needs_plogi(mad, tgt))
4541                         ibmvfc_del_tgt(tgt);
4542                 break;
4543         case IBMVFC_MAD_DRIVER_FAILED:
4544                 break;
4545         case IBMVFC_MAD_FAILED:
4546         default:
4547                 ibmvfc_del_tgt(tgt);
4548                 fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
4549                 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
4550                 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4551                          ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
4552                          be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
4553                          ibmvfc_get_fc_type(fc_reason), fc_reason,
4554                          ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
4555                 break;
4556         }
4557
4558         kref_put(&tgt->kref, ibmvfc_release_tgt);
4559         ibmvfc_free_event(evt);
4560         wake_up(&vhost->work_wait_q);
4561 }
4562
4563 /**
4564  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
4565  * @evt:                ibmvfc event struct
4566  *
4567  **/
4568 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
4569 {
4570         struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
4571
4572         memset(mad, 0, sizeof(*mad));
4573         mad->common.version = cpu_to_be32(1);
4574         mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
4575         mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
4576         mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4577                 offsetof(struct ibmvfc_passthru_mad, iu));
4578         mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
4579         mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4580         mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
4581         mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4582                 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4583                 offsetof(struct ibmvfc_passthru_fc_iu, payload));
4584         mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4585         mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4586                 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4587                 offsetof(struct ibmvfc_passthru_fc_iu, response));
4588         mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
4589 }
4590
4591 /**
4592  * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
4593  * @evt:                ibmvfc event struct
4594  *
4595  * Just cleanup this event struct. Everything else is handled by
4596  * the ADISC completion handler. If the ADISC never actually comes
4597  * back, we still have the timer running on the ADISC event struct
4598  * which will fire and cause the CRQ to get reset.
4599  *
4600  **/
4601 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
4602 {
4603         struct ibmvfc_host *vhost = evt->vhost;
4604         struct ibmvfc_target *tgt = evt->tgt;
4605
4606         tgt_dbg(tgt, "ADISC cancel complete\n");
4607         vhost->abort_threads--;
4608         ibmvfc_free_event(evt);
4609         kref_put(&tgt->kref, ibmvfc_release_tgt);
4610         wake_up(&vhost->work_wait_q);
4611 }
4612
4613 /**
4614  * ibmvfc_adisc_timeout - Handle an ADISC timeout
4615  * @t:          ibmvfc target struct
4616  *
4617  * If an ADISC times out, send a cancel. If the cancel times
4618  * out, reset the CRQ. When the ADISC comes back as cancelled,
4619  * log back into the target.
4620  **/
4621 static void ibmvfc_adisc_timeout(struct timer_list *t)
4622 {
4623         struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
4624         struct ibmvfc_host *vhost = tgt->vhost;
4625         struct ibmvfc_event *evt;
4626         struct ibmvfc_tmf *tmf;
4627         unsigned long flags;
4628         int rc;
4629
4630         tgt_dbg(tgt, "ADISC timeout\n");
4631         spin_lock_irqsave(vhost->host->host_lock, flags);
4632         if (vhost->abort_threads >= disc_threads ||
4633             tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
4634             vhost->state != IBMVFC_INITIALIZING ||
4635             vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
4636                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4637                 return;
4638         }
4639
4640         vhost->abort_threads++;
4641         kref_get(&tgt->kref);
4642         evt = ibmvfc_get_reserved_event(&vhost->crq);
4643         if (!evt) {
4644                 tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
4645                 vhost->abort_threads--;
4646                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4647                 __ibmvfc_reset_host(vhost);
4648                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4649                 return;
4650         }
4651         ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4652
4653         evt->tgt = tgt;
4654         tmf = &evt->iu.tmf;
4655         memset(tmf, 0, sizeof(*tmf));
4656         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4657                 tmf->common.version = cpu_to_be32(2);
4658                 tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4659         } else {
4660                 tmf->common.version = cpu_to_be32(1);
4661         }
4662         tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4663         tmf->common.length = cpu_to_be16(sizeof(*tmf));
4664         tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4665         tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4666
4667         rc = ibmvfc_send_event(evt, vhost, default_timeout);
4668
4669         if (rc) {
4670                 tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4671                 vhost->abort_threads--;
4672                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4673                 __ibmvfc_reset_host(vhost);
4674         } else
4675                 tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4676         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4677 }
4678
4679 /**
4680  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4681  * @tgt:                ibmvfc target struct
4682  *
4683  * When sending an ADISC we end up with two timers running. The
4684  * first timer is the timer in the ibmvfc target struct. If this
4685  * fires, we send a cancel to the target. The second timer is the
4686  * timer on the ibmvfc event for the ADISC, which is longer. If that
4687  * fires, it means the ADISC timed out and our attempt to cancel it
4688  * also failed, so we need to reset the CRQ.
4689  **/
4690 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4691 {
4692         struct ibmvfc_passthru_mad *mad;
4693         struct ibmvfc_host *vhost = tgt->vhost;
4694         struct ibmvfc_event *evt;
4695
4696         if (vhost->discovery_threads >= disc_threads)
4697                 return;
4698
4699         kref_get(&tgt->kref);
4700         evt = ibmvfc_get_reserved_event(&vhost->crq);
4701         if (!evt) {
4702                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4703                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4704                 __ibmvfc_reset_host(vhost);
4705                 return;
4706         }
4707         vhost->discovery_threads++;
4708         ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4709         evt->tgt = tgt;
4710
4711         ibmvfc_init_passthru(evt);
4712         mad = &evt->iu.passthru;
4713         mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4714         mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4715         mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4716
4717         mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4718         memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4719                sizeof(vhost->login_buf->resp.port_name));
4720         memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4721                sizeof(vhost->login_buf->resp.node_name));
4722         mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4723
4724         if (timer_pending(&tgt->timer))
4725                 mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4726         else {
4727                 tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4728                 add_timer(&tgt->timer);
4729         }
4730
4731         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4732         if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4733                 vhost->discovery_threads--;
4734                 del_timer(&tgt->timer);
4735                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4736                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4737         } else
4738                 tgt_dbg(tgt, "Sent ADISC\n");
4739 }
4740
4741 /**
4742  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4743  * @evt:        ibmvfc event struct
4744  *
4745  **/
4746 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4747 {
4748         struct ibmvfc_target *tgt = evt->tgt;
4749         struct ibmvfc_host *vhost = evt->vhost;
4750         struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4751         u32 status = be16_to_cpu(rsp->common.status);
4752         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4753
4754         vhost->discovery_threads--;
4755         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4756         switch (status) {
4757         case IBMVFC_MAD_SUCCESS:
4758                 tgt_dbg(tgt, "Query Target succeeded\n");
4759                 if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4760                         ibmvfc_del_tgt(tgt);
4761                 else
4762                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4763                 break;
4764         case IBMVFC_MAD_DRIVER_FAILED:
4765                 break;
4766         case IBMVFC_MAD_CRQ_ERROR:
4767                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4768                 break;
4769         case IBMVFC_MAD_FAILED:
4770         default:
4771                 if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4772                     be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4773                     be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4774                         ibmvfc_del_tgt(tgt);
4775                 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4776                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4777                 else
4778                         ibmvfc_del_tgt(tgt);
4779
4780                 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4781                         ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4782                         be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4783                         ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4784                         ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4785                         status);
4786                 break;
4787         }
4788
4789         kref_put(&tgt->kref, ibmvfc_release_tgt);
4790         ibmvfc_free_event(evt);
4791         wake_up(&vhost->work_wait_q);
4792 }
4793
4794 /**
4795  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4796  * @tgt:        ibmvfc target struct
4797  *
4798  **/
4799 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4800 {
4801         struct ibmvfc_query_tgt *query_tgt;
4802         struct ibmvfc_host *vhost = tgt->vhost;
4803         struct ibmvfc_event *evt;
4804
4805         if (vhost->discovery_threads >= disc_threads)
4806                 return;
4807
4808         kref_get(&tgt->kref);
4809         evt = ibmvfc_get_reserved_event(&vhost->crq);
4810         if (!evt) {
4811                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4812                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4813                 __ibmvfc_reset_host(vhost);
4814                 return;
4815         }
4816         vhost->discovery_threads++;
4817         evt->tgt = tgt;
4818         ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4819         query_tgt = &evt->iu.query_tgt;
4820         memset(query_tgt, 0, sizeof(*query_tgt));
4821         query_tgt->common.version = cpu_to_be32(1);
4822         query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4823         query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4824         query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4825
4826         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4827         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4828                 vhost->discovery_threads--;
4829                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4830                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4831         } else
4832                 tgt_dbg(tgt, "Sent Query Target\n");
4833 }
4834
4835 /**
4836  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4837  * @vhost:              ibmvfc host struct
4838  * @target:             Holds SCSI ID to allocate target forand the WWPN
4839  *
4840  * Returns:
4841  *      0 on success / other on failure
4842  **/
4843 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4844                                struct ibmvfc_discover_targets_entry *target)
4845 {
4846         struct ibmvfc_target *stgt = NULL;
4847         struct ibmvfc_target *wtgt = NULL;
4848         struct ibmvfc_target *tgt;
4849         unsigned long flags;
4850         u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4851         u64 wwpn = be64_to_cpu(target->wwpn);
4852
4853         /* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4854         spin_lock_irqsave(vhost->host->host_lock, flags);
4855         list_for_each_entry(tgt, &vhost->targets, queue) {
4856                 if (tgt->wwpn == wwpn) {
4857                         wtgt = tgt;
4858                         break;
4859                 }
4860         }
4861
4862         list_for_each_entry(tgt, &vhost->targets, queue) {
4863                 if (tgt->scsi_id == scsi_id) {
4864                         stgt = tgt;
4865                         break;
4866                 }
4867         }
4868
4869         if (wtgt && !stgt) {
4870                 /*
4871                  * A WWPN target has moved and we still are tracking the old
4872                  * SCSI ID.  The only way we should be able to get here is if
4873                  * we attempted to send an implicit logout for the old SCSI ID
4874                  * and it failed for some reason, such as there being I/O
4875                  * pending to the target. In this case, we will have already
4876                  * deleted the rport from the FC transport so we do a move
4877                  * login, which works even with I/O pending, however, if
4878                  * there is still I/O pending, it will stay outstanding, so
4879                  * we only do this if fast fail is disabled for the rport,
4880                  * otherwise we let terminate_rport_io clean up the port
4881                  * before we login at the new location.
4882                  */
4883                 if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4884                         if (wtgt->move_login) {
4885                                 /*
4886                                  * Do a move login here. The old target is no longer
4887                                  * known to the transport layer We don't use the
4888                                  * normal ibmvfc_set_tgt_action to set this, as we
4889                                  * don't normally want to allow this state change.
4890                                  */
4891                                 wtgt->new_scsi_id = scsi_id;
4892                                 wtgt->action = IBMVFC_TGT_ACTION_INIT;
4893                                 wtgt->init_retries = 0;
4894                                 ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4895                         }
4896                         goto unlock_out;
4897                 } else {
4898                         tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4899                                 wtgt->action, wtgt->rport);
4900                 }
4901         } else if (stgt) {
4902                 if (tgt->need_login)
4903                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4904                 goto unlock_out;
4905         }
4906         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4907
4908         tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4909         memset(tgt, 0, sizeof(*tgt));
4910         tgt->scsi_id = scsi_id;
4911         tgt->wwpn = wwpn;
4912         tgt->vhost = vhost;
4913         tgt->need_login = 1;
4914         timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4915         kref_init(&tgt->kref);
4916         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4917         spin_lock_irqsave(vhost->host->host_lock, flags);
4918         tgt->cancel_key = vhost->task_set++;
4919         list_add_tail(&tgt->queue, &vhost->targets);
4920
4921 unlock_out:
4922         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4923         return 0;
4924 }
4925
4926 /**
4927  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4928  * @vhost:              ibmvfc host struct
4929  *
4930  * Returns:
4931  *      0 on success / other on failure
4932  **/
4933 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4934 {
4935         int i, rc;
4936
4937         for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4938                 rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
4939
4940         return rc;
4941 }
4942
4943 /**
4944  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4945  * @evt:        ibmvfc event struct
4946  *
4947  **/
4948 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4949 {
4950         struct ibmvfc_host *vhost = evt->vhost;
4951         struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4952         u32 mad_status = be16_to_cpu(rsp->common.status);
4953         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4954
4955         switch (mad_status) {
4956         case IBMVFC_MAD_SUCCESS:
4957                 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4958                 vhost->num_targets = be32_to_cpu(rsp->num_written);
4959                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4960                 break;
4961         case IBMVFC_MAD_FAILED:
4962                 level += ibmvfc_retry_host_init(vhost);
4963                 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4964                            ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4965                            be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4966                 break;
4967         case IBMVFC_MAD_DRIVER_FAILED:
4968                 break;
4969         default:
4970                 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4971                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4972                 break;
4973         }
4974
4975         ibmvfc_free_event(evt);
4976         wake_up(&vhost->work_wait_q);
4977 }
4978
4979 /**
4980  * ibmvfc_discover_targets - Send Discover Targets MAD
4981  * @vhost:      ibmvfc host struct
4982  *
4983  **/
4984 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4985 {
4986         struct ibmvfc_discover_targets *mad;
4987         struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
4988         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4989
4990         if (!evt) {
4991                 ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
4992                 ibmvfc_hard_reset_host(vhost);
4993                 return;
4994         }
4995
4996         ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4997         mad = &evt->iu.discover_targets;
4998         memset(mad, 0, sizeof(*mad));
4999         mad->common.version = cpu_to_be32(1);
5000         mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
5001         mad->common.length = cpu_to_be16(sizeof(*mad));
5002         mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
5003         mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
5004         mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
5005         mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
5006         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5007
5008         if (!ibmvfc_send_event(evt, vhost, default_timeout))
5009                 ibmvfc_dbg(vhost, "Sent discover targets\n");
5010         else
5011                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5012 }
5013
5014 static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
5015 {
5016         struct ibmvfc_host *vhost = evt->vhost;
5017         struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
5018         struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
5019         u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
5020         int level = IBMVFC_DEFAULT_LOG_LEVEL;
5021         int flags, active_queues, i;
5022
5023         ibmvfc_free_event(evt);
5024
5025         switch (mad_status) {
5026         case IBMVFC_MAD_SUCCESS:
5027                 ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
5028                 flags = be32_to_cpu(setup->flags);
5029                 vhost->do_enquiry = 0;
5030                 active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
5031                 scrqs->active_queues = active_queues;
5032
5033                 if (flags & IBMVFC_CHANNELS_CANCELED) {
5034                         ibmvfc_dbg(vhost, "Channels Canceled\n");
5035                         vhost->using_channels = 0;
5036                 } else {
5037                         if (active_queues)
5038                                 vhost->using_channels = 1;
5039                         for (i = 0; i < active_queues; i++)
5040                                 scrqs->scrqs[i].vios_cookie =
5041                                         be64_to_cpu(setup->channel_handles[i]);
5042
5043                         ibmvfc_dbg(vhost, "Using %u channels\n",
5044                                    vhost->scsi_scrqs.active_queues);
5045                 }
5046                 break;
5047         case IBMVFC_MAD_FAILED:
5048                 level += ibmvfc_retry_host_init(vhost);
5049                 ibmvfc_log(vhost, level, "Channel Setup failed\n");
5050                 fallthrough;
5051         case IBMVFC_MAD_DRIVER_FAILED:
5052                 return;
5053         default:
5054                 dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
5055                         mad_status);
5056                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5057                 return;
5058         }
5059
5060         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5061         wake_up(&vhost->work_wait_q);
5062 }
5063
5064 static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
5065 {
5066         struct ibmvfc_channel_setup_mad *mad;
5067         struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
5068         struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5069         struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
5070         unsigned int num_channels =
5071                 min(scrqs->desired_queues, vhost->max_vios_scsi_channels);
5072         int level = IBMVFC_DEFAULT_LOG_LEVEL;
5073         int i;
5074
5075         if (!evt) {
5076                 ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
5077                 ibmvfc_hard_reset_host(vhost);
5078                 return;
5079         }
5080
5081         memset(setup_buf, 0, sizeof(*setup_buf));
5082         if (num_channels == 0)
5083                 setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
5084         else {
5085                 setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
5086                 for (i = 0; i < num_channels; i++)
5087                         setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
5088         }
5089
5090         ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
5091         mad = &evt->iu.channel_setup;
5092         memset(mad, 0, sizeof(*mad));
5093         mad->common.version = cpu_to_be32(1);
5094         mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
5095         mad->common.length = cpu_to_be16(sizeof(*mad));
5096         mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
5097         mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
5098
5099         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5100
5101         if (!ibmvfc_send_event(evt, vhost, default_timeout))
5102                 ibmvfc_dbg(vhost, "Sent channel setup\n");
5103         else
5104                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
5105 }
5106
5107 static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
5108 {
5109         struct ibmvfc_host *vhost = evt->vhost;
5110         struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
5111         u32 mad_status = be16_to_cpu(rsp->common.status);
5112         int level = IBMVFC_DEFAULT_LOG_LEVEL;
5113
5114         switch (mad_status) {
5115         case IBMVFC_MAD_SUCCESS:
5116                 ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
5117                 vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
5118                 ibmvfc_free_event(evt);
5119                 break;
5120         case IBMVFC_MAD_FAILED:
5121                 level += ibmvfc_retry_host_init(vhost);
5122                 ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
5123                 fallthrough;
5124         case IBMVFC_MAD_DRIVER_FAILED:
5125                 ibmvfc_free_event(evt);
5126                 return;
5127         default:
5128                 dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
5129                         mad_status);
5130                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5131                 ibmvfc_free_event(evt);
5132                 return;
5133         }
5134
5135         ibmvfc_channel_setup(vhost);
5136 }
5137
5138 static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
5139 {
5140         struct ibmvfc_channel_enquiry *mad;
5141         struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5142         int level = IBMVFC_DEFAULT_LOG_LEVEL;
5143
5144         if (!evt) {
5145                 ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
5146                 ibmvfc_hard_reset_host(vhost);
5147                 return;
5148         }
5149
5150         ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
5151         mad = &evt->iu.channel_enquiry;
5152         memset(mad, 0, sizeof(*mad));
5153         mad->common.version = cpu_to_be32(1);
5154         mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
5155         mad->common.length = cpu_to_be16(sizeof(*mad));
5156
5157         if (mig_channels_only)
5158                 mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
5159         if (mig_no_less_channels)
5160                 mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
5161
5162         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5163
5164         if (!ibmvfc_send_event(evt, vhost, default_timeout))
5165                 ibmvfc_dbg(vhost, "Send channel enquiry\n");
5166         else
5167                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5168 }
5169
5170 /**
5171  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
5172  * @evt:        ibmvfc event struct
5173  *
5174  **/
5175 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
5176 {
5177         struct ibmvfc_host *vhost = evt->vhost;
5178         u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
5179         struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
5180         unsigned int npiv_max_sectors;
5181         int level = IBMVFC_DEFAULT_LOG_LEVEL;
5182
5183         switch (mad_status) {
5184         case IBMVFC_MAD_SUCCESS:
5185                 ibmvfc_free_event(evt);
5186                 break;
5187         case IBMVFC_MAD_FAILED:
5188                 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
5189                         level += ibmvfc_retry_host_init(vhost);
5190                 else
5191                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5192                 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
5193                            ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
5194                                                 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
5195                 ibmvfc_free_event(evt);
5196                 return;
5197         case IBMVFC_MAD_CRQ_ERROR:
5198                 ibmvfc_retry_host_init(vhost);
5199                 fallthrough;
5200         case IBMVFC_MAD_DRIVER_FAILED:
5201                 ibmvfc_free_event(evt);
5202                 return;
5203         default:
5204                 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
5205                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5206                 ibmvfc_free_event(evt);
5207                 return;
5208         }
5209
5210         vhost->client_migrated = 0;
5211
5212         if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
5213                 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
5214                         rsp->flags);
5215                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5216                 wake_up(&vhost->work_wait_q);
5217                 return;
5218         }
5219
5220         if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
5221                 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
5222                         rsp->max_cmds);
5223                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5224                 wake_up(&vhost->work_wait_q);
5225                 return;
5226         }
5227
5228         vhost->logged_in = 1;
5229         npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
5230         dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
5231                  rsp->partition_name, rsp->device_name, rsp->port_loc_code,
5232                  rsp->drc_name, npiv_max_sectors);
5233
5234         fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
5235         fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
5236         fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
5237         fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
5238         fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
5239         fc_host_supported_classes(vhost->host) = 0;
5240         if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
5241                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
5242         if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
5243                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
5244         if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
5245                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
5246         fc_host_maxframe_size(vhost->host) =
5247                 be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
5248
5249         vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
5250         vhost->host->max_sectors = npiv_max_sectors;
5251
5252         if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
5253                 ibmvfc_channel_enquiry(vhost);
5254         } else {
5255                 vhost->do_enquiry = 0;
5256                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5257                 wake_up(&vhost->work_wait_q);
5258         }
5259 }
5260
5261 /**
5262  * ibmvfc_npiv_login - Sends NPIV login
5263  * @vhost:      ibmvfc host struct
5264  *
5265  **/
5266 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
5267 {
5268         struct ibmvfc_npiv_login_mad *mad;
5269         struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5270
5271         if (!evt) {
5272                 ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
5273                 ibmvfc_hard_reset_host(vhost);
5274                 return;
5275         }
5276
5277         ibmvfc_gather_partition_info(vhost);
5278         ibmvfc_set_login_info(vhost);
5279         ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
5280
5281         memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
5282         mad = &evt->iu.npiv_login;
5283         memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
5284         mad->common.version = cpu_to_be32(1);
5285         mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
5286         mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
5287         mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
5288         mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
5289
5290         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5291
5292         if (!ibmvfc_send_event(evt, vhost, default_timeout))
5293                 ibmvfc_dbg(vhost, "Sent NPIV login\n");
5294         else
5295                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5296 }
5297
5298 /**
5299  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
5300  * @evt:                ibmvfc event struct
5301  *
5302  **/
5303 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
5304 {
5305         struct ibmvfc_host *vhost = evt->vhost;
5306         u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
5307
5308         ibmvfc_free_event(evt);
5309
5310         switch (mad_status) {
5311         case IBMVFC_MAD_SUCCESS:
5312                 if (list_empty(&vhost->crq.sent) &&
5313                     vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
5314                         ibmvfc_init_host(vhost);
5315                         return;
5316                 }
5317                 break;
5318         case IBMVFC_MAD_FAILED:
5319         case IBMVFC_MAD_NOT_SUPPORTED:
5320         case IBMVFC_MAD_CRQ_ERROR:
5321         case IBMVFC_MAD_DRIVER_FAILED:
5322         default:
5323                 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
5324                 break;
5325         }
5326
5327         ibmvfc_hard_reset_host(vhost);
5328 }
5329
5330 /**
5331  * ibmvfc_npiv_logout - Issue an NPIV Logout
5332  * @vhost:              ibmvfc host struct
5333  *
5334  **/
5335 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
5336 {
5337         struct ibmvfc_npiv_logout_mad *mad;
5338         struct ibmvfc_event *evt;
5339
5340         evt = ibmvfc_get_reserved_event(&vhost->crq);
5341         if (!evt) {
5342                 ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
5343                 ibmvfc_hard_reset_host(vhost);
5344                 return;
5345         }
5346
5347         ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
5348
5349         mad = &evt->iu.npiv_logout;
5350         memset(mad, 0, sizeof(*mad));
5351         mad->common.version = cpu_to_be32(1);
5352         mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
5353         mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
5354
5355         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
5356
5357         if (!ibmvfc_send_event(evt, vhost, default_timeout))
5358                 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
5359         else
5360                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5361 }
5362
5363 /**
5364  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
5365  * @vhost:              ibmvfc host struct
5366  *
5367  * Returns:
5368  *      1 if work to do / 0 if not
5369  **/
5370 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
5371 {
5372         struct ibmvfc_target *tgt;
5373
5374         list_for_each_entry(tgt, &vhost->targets, queue) {
5375                 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
5376                     tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5377                         return 1;
5378         }
5379
5380         return 0;
5381 }
5382
5383 /**
5384  * ibmvfc_dev_logo_to_do - Is there target logout work to do?
5385  * @vhost:              ibmvfc host struct
5386  *
5387  * Returns:
5388  *      1 if work to do / 0 if not
5389  **/
5390 static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
5391 {
5392         struct ibmvfc_target *tgt;
5393
5394         list_for_each_entry(tgt, &vhost->targets, queue) {
5395                 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
5396                     tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5397                         return 1;
5398         }
5399         return 0;
5400 }
5401
5402 /**
5403  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
5404  * @vhost:              ibmvfc host struct
5405  *
5406  * Returns:
5407  *      1 if work to do / 0 if not
5408  **/
5409 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5410 {
5411         struct ibmvfc_target *tgt;
5412
5413         if (kthread_should_stop())
5414                 return 1;
5415         switch (vhost->action) {
5416         case IBMVFC_HOST_ACTION_NONE:
5417         case IBMVFC_HOST_ACTION_INIT_WAIT:
5418         case IBMVFC_HOST_ACTION_LOGO_WAIT:
5419                 return 0;
5420         case IBMVFC_HOST_ACTION_TGT_INIT:
5421         case IBMVFC_HOST_ACTION_QUERY_TGTS:
5422                 if (vhost->discovery_threads == disc_threads)
5423                         return 0;
5424                 list_for_each_entry(tgt, &vhost->targets, queue)
5425                         if (tgt->action == IBMVFC_TGT_ACTION_INIT)
5426                                 return 1;
5427                 list_for_each_entry(tgt, &vhost->targets, queue)
5428                         if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5429                                 return 0;
5430                 return 1;
5431         case IBMVFC_HOST_ACTION_TGT_DEL:
5432         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5433                 if (vhost->discovery_threads == disc_threads)
5434                         return 0;
5435                 list_for_each_entry(tgt, &vhost->targets, queue)
5436                         if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
5437                                 return 1;
5438                 list_for_each_entry(tgt, &vhost->targets, queue)
5439                         if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5440                                 return 0;
5441                 return 1;
5442         case IBMVFC_HOST_ACTION_LOGO:
5443         case IBMVFC_HOST_ACTION_INIT:
5444         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5445         case IBMVFC_HOST_ACTION_QUERY:
5446         case IBMVFC_HOST_ACTION_RESET:
5447         case IBMVFC_HOST_ACTION_REENABLE:
5448         default:
5449                 break;
5450         }
5451
5452         return 1;
5453 }
5454
5455 /**
5456  * ibmvfc_work_to_do - Is there task level work to do?
5457  * @vhost:              ibmvfc host struct
5458  *
5459  * Returns:
5460  *      1 if work to do / 0 if not
5461  **/
5462 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5463 {
5464         unsigned long flags;
5465         int rc;
5466
5467         spin_lock_irqsave(vhost->host->host_lock, flags);
5468         rc = __ibmvfc_work_to_do(vhost);
5469         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5470         return rc;
5471 }
5472
5473 /**
5474  * ibmvfc_log_ae - Log async events if necessary
5475  * @vhost:              ibmvfc host struct
5476  * @events:             events to log
5477  *
5478  **/
5479 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
5480 {
5481         if (events & IBMVFC_AE_RSCN)
5482                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
5483         if ((events & IBMVFC_AE_LINKDOWN) &&
5484             vhost->state >= IBMVFC_HALTED)
5485                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
5486         if ((events & IBMVFC_AE_LINKUP) &&
5487             vhost->state == IBMVFC_INITIALIZING)
5488                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
5489 }
5490
5491 /**
5492  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
5493  * @tgt:                ibmvfc target struct
5494  *
5495  **/
5496 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
5497 {
5498         struct ibmvfc_host *vhost = tgt->vhost;
5499         struct fc_rport *rport;
5500         unsigned long flags;
5501
5502         tgt_dbg(tgt, "Adding rport\n");
5503         rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
5504         spin_lock_irqsave(vhost->host->host_lock, flags);
5505
5506         if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5507                 tgt_dbg(tgt, "Deleting rport\n");
5508                 list_del(&tgt->queue);
5509                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5510                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5511                 fc_remote_port_delete(rport);
5512                 del_timer_sync(&tgt->timer);
5513                 kref_put(&tgt->kref, ibmvfc_release_tgt);
5514                 return;
5515         } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5516                 tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
5517                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5518                 tgt->rport = NULL;
5519                 tgt->init_retries = 0;
5520                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5521                 fc_remote_port_delete(rport);
5522                 return;
5523         } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
5524                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5525                 return;
5526         }
5527
5528         if (rport) {
5529                 tgt_dbg(tgt, "rport add succeeded\n");
5530                 tgt->rport = rport;
5531                 rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
5532                 rport->supported_classes = 0;
5533                 tgt->target_id = rport->scsi_target_id;
5534                 if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
5535                         rport->supported_classes |= FC_COS_CLASS1;
5536                 if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
5537                         rport->supported_classes |= FC_COS_CLASS2;
5538                 if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
5539                         rport->supported_classes |= FC_COS_CLASS3;
5540                 if (rport->rqst_q)
5541                         blk_queue_max_segments(rport->rqst_q, 1);
5542         } else
5543                 tgt_dbg(tgt, "rport add failed\n");
5544         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5545 }
5546
5547 /**
5548  * ibmvfc_do_work - Do task level work
5549  * @vhost:              ibmvfc host struct
5550  *
5551  **/
5552 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
5553 {
5554         struct ibmvfc_target *tgt;
5555         unsigned long flags;
5556         struct fc_rport *rport;
5557         LIST_HEAD(purge);
5558         int rc;
5559
5560         ibmvfc_log_ae(vhost, vhost->events_to_log);
5561         spin_lock_irqsave(vhost->host->host_lock, flags);
5562         vhost->events_to_log = 0;
5563         switch (vhost->action) {
5564         case IBMVFC_HOST_ACTION_NONE:
5565         case IBMVFC_HOST_ACTION_LOGO_WAIT:
5566         case IBMVFC_HOST_ACTION_INIT_WAIT:
5567                 break;
5568         case IBMVFC_HOST_ACTION_RESET:
5569                 list_splice_init(&vhost->purge, &purge);
5570                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5571                 ibmvfc_complete_purge(&purge);
5572                 rc = ibmvfc_reset_crq(vhost);
5573
5574                 spin_lock_irqsave(vhost->host->host_lock, flags);
5575                 if (!rc || rc == H_CLOSED)
5576                         vio_enable_interrupts(to_vio_dev(vhost->dev));
5577                 if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
5578                         /*
5579                          * The only action we could have changed to would have
5580                          * been reenable, in which case, we skip the rest of
5581                          * this path and wait until we've done the re-enable
5582                          * before sending the crq init.
5583                          */
5584                         vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5585
5586                         if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
5587                             (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
5588                                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5589                                 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
5590                         }
5591                 }
5592                 break;
5593         case IBMVFC_HOST_ACTION_REENABLE:
5594                 list_splice_init(&vhost->purge, &purge);
5595                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5596                 ibmvfc_complete_purge(&purge);
5597                 rc = ibmvfc_reenable_crq_queue(vhost);
5598
5599                 spin_lock_irqsave(vhost->host->host_lock, flags);
5600                 if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
5601                         /*
5602                          * The only action we could have changed to would have
5603                          * been reset, in which case, we skip the rest of this
5604                          * path and wait until we've done the reset before
5605                          * sending the crq init.
5606                          */
5607                         vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5608                         if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
5609                                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5610                                 dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
5611                         }
5612                 }
5613                 break;
5614         case IBMVFC_HOST_ACTION_LOGO:
5615                 vhost->job_step(vhost);
5616                 break;
5617         case IBMVFC_HOST_ACTION_INIT:
5618                 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
5619                 if (vhost->delay_init) {
5620                         vhost->delay_init = 0;
5621                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5622                         ssleep(15);
5623                         return;
5624                 } else
5625                         vhost->job_step(vhost);
5626                 break;
5627         case IBMVFC_HOST_ACTION_QUERY:
5628                 list_for_each_entry(tgt, &vhost->targets, queue)
5629                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
5630                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
5631                 break;
5632         case IBMVFC_HOST_ACTION_QUERY_TGTS:
5633                 list_for_each_entry(tgt, &vhost->targets, queue) {
5634                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5635                                 tgt->job_step(tgt);
5636                                 break;
5637                         }
5638                 }
5639
5640                 if (!ibmvfc_dev_init_to_do(vhost))
5641                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
5642                 break;
5643         case IBMVFC_HOST_ACTION_TGT_DEL:
5644         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5645                 list_for_each_entry(tgt, &vhost->targets, queue) {
5646                         if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
5647                                 tgt->job_step(tgt);
5648                                 break;
5649                         }
5650                 }
5651
5652                 if (ibmvfc_dev_logo_to_do(vhost)) {
5653                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5654                         return;
5655                 }
5656
5657                 list_for_each_entry(tgt, &vhost->targets, queue) {
5658                         if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5659                                 tgt_dbg(tgt, "Deleting rport\n");
5660                                 rport = tgt->rport;
5661                                 tgt->rport = NULL;
5662                                 list_del(&tgt->queue);
5663                                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5664                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5665                                 if (rport)
5666                                         fc_remote_port_delete(rport);
5667                                 del_timer_sync(&tgt->timer);
5668                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
5669                                 return;
5670                         } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5671                                 tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
5672                                 rport = tgt->rport;
5673                                 tgt->rport = NULL;
5674                                 tgt->init_retries = 0;
5675                                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5676
5677                                 /*
5678                                  * If fast fail is enabled, we wait for it to fire and then clean up
5679                                  * the old port, since we expect the fast fail timer to clean up the
5680                                  * outstanding I/O faster than waiting for normal command timeouts.
5681                                  * However, if fast fail is disabled, any I/O outstanding to the
5682                                  * rport LUNs will stay outstanding indefinitely, since the EH handlers
5683                                  * won't get invoked for I/O's timing out. If this is a NPIV failover
5684                                  * scenario, the better alternative is to use the move login.
5685                                  */
5686                                 if (rport && rport->fast_io_fail_tmo == -1)
5687                                         tgt->move_login = 1;
5688                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5689                                 if (rport)
5690                                         fc_remote_port_delete(rport);
5691                                 return;
5692                         }
5693                 }
5694
5695                 if (vhost->state == IBMVFC_INITIALIZING) {
5696                         if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
5697                                 if (vhost->reinit) {
5698                                         vhost->reinit = 0;
5699                                         scsi_block_requests(vhost->host);
5700                                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5701                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5702                                 } else {
5703                                         ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
5704                                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5705                                         wake_up(&vhost->init_wait_q);
5706                                         schedule_work(&vhost->rport_add_work_q);
5707                                         vhost->init_retries = 0;
5708                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5709                                         scsi_unblock_requests(vhost->host);
5710                                 }
5711
5712                                 return;
5713                         } else {
5714                                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
5715                                 vhost->job_step = ibmvfc_discover_targets;
5716                         }
5717                 } else {
5718                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5719                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5720                         scsi_unblock_requests(vhost->host);
5721                         wake_up(&vhost->init_wait_q);
5722                         return;
5723                 }
5724                 break;
5725         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5726                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
5727                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5728                 ibmvfc_alloc_targets(vhost);
5729                 spin_lock_irqsave(vhost->host->host_lock, flags);
5730                 break;
5731         case IBMVFC_HOST_ACTION_TGT_INIT:
5732                 list_for_each_entry(tgt, &vhost->targets, queue) {
5733                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5734                                 tgt->job_step(tgt);
5735                                 break;
5736                         }
5737                 }
5738
5739                 if (!ibmvfc_dev_init_to_do(vhost))
5740                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
5741                 break;
5742         default:
5743                 break;
5744         }
5745
5746         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5747 }
5748
5749 /**
5750  * ibmvfc_work - Do task level work
5751  * @data:               ibmvfc host struct
5752  *
5753  * Returns:
5754  *      zero
5755  **/
5756 static int ibmvfc_work(void *data)
5757 {
5758         struct ibmvfc_host *vhost = data;
5759         int rc;
5760
5761         set_user_nice(current, MIN_NICE);
5762
5763         while (1) {
5764                 rc = wait_event_interruptible(vhost->work_wait_q,
5765                                               ibmvfc_work_to_do(vhost));
5766
5767                 BUG_ON(rc);
5768
5769                 if (kthread_should_stop())
5770                         break;
5771
5772                 ibmvfc_do_work(vhost);
5773         }
5774
5775         ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
5776         return 0;
5777 }
5778
5779 /**
5780  * ibmvfc_alloc_queue - Allocate queue
5781  * @vhost:      ibmvfc host struct
5782  * @queue:      ibmvfc queue to allocate
5783  * @fmt:        queue format to allocate
5784  *
5785  * Returns:
5786  *      0 on success / non-zero on failure
5787  **/
5788 static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
5789                               struct ibmvfc_queue *queue,
5790                               enum ibmvfc_msg_fmt fmt)
5791 {
5792         struct device *dev = vhost->dev;
5793         size_t fmt_size;
5794
5795         ENTER;
5796         spin_lock_init(&queue->_lock);
5797         queue->q_lock = &queue->_lock;
5798
5799         switch (fmt) {
5800         case IBMVFC_CRQ_FMT:
5801                 fmt_size = sizeof(*queue->msgs.crq);
5802                 queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
5803                 queue->evt_depth = scsi_qdepth;
5804                 queue->reserved_depth = IBMVFC_NUM_INTERNAL_REQ;
5805                 break;
5806         case IBMVFC_ASYNC_FMT:
5807                 fmt_size = sizeof(*queue->msgs.async);
5808                 break;
5809         case IBMVFC_SUB_CRQ_FMT:
5810                 fmt_size = sizeof(*queue->msgs.scrq);
5811                 /* We need one extra event for Cancel Commands */
5812                 queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ;
5813                 queue->evt_depth = scsi_qdepth;
5814                 queue->reserved_depth = IBMVFC_NUM_INTERNAL_SUBQ_REQ;
5815                 break;
5816         default:
5817                 dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
5818                 return -EINVAL;
5819         }
5820
5821         queue->fmt = fmt;
5822         if (ibmvfc_init_event_pool(vhost, queue)) {
5823                 dev_err(dev, "Couldn't initialize event pool.\n");
5824                 return -ENOMEM;
5825         }
5826
5827         queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
5828         if (!queue->msgs.handle)
5829                 return -ENOMEM;
5830
5831         queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
5832                                           DMA_BIDIRECTIONAL);
5833
5834         if (dma_mapping_error(dev, queue->msg_token)) {
5835                 free_page((unsigned long)queue->msgs.handle);
5836                 queue->msgs.handle = NULL;
5837                 return -ENOMEM;
5838         }
5839
5840         queue->cur = 0;
5841         queue->size = PAGE_SIZE / fmt_size;
5842
5843         queue->vhost = vhost;
5844         return 0;
5845 }
5846
5847 /**
5848  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
5849  * @vhost:      ibmvfc host struct
5850  *
5851  * Allocates a page for messages, maps it for dma, and registers
5852  * the crq with the hypervisor.
5853  *
5854  * Return value:
5855  *      zero on success / other on failure
5856  **/
5857 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
5858 {
5859         int rc, retrc = -ENOMEM;
5860         struct device *dev = vhost->dev;
5861         struct vio_dev *vdev = to_vio_dev(dev);
5862         struct ibmvfc_queue *crq = &vhost->crq;
5863
5864         ENTER;
5865         if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
5866                 return -ENOMEM;
5867
5868         retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5869                                         crq->msg_token, PAGE_SIZE);
5870
5871         if (rc == H_RESOURCE)
5872                 /* maybe kexecing and resource is busy. try a reset */
5873                 retrc = rc = ibmvfc_reset_crq(vhost);
5874
5875         if (rc == H_CLOSED)
5876                 dev_warn(dev, "Partner adapter not ready\n");
5877         else if (rc) {
5878                 dev_warn(dev, "Error %d opening adapter\n", rc);
5879                 goto reg_crq_failed;
5880         }
5881
5882         retrc = 0;
5883
5884         tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
5885
5886         if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
5887                 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
5888                 goto req_irq_failed;
5889         }
5890
5891         if ((rc = vio_enable_interrupts(vdev))) {
5892                 dev_err(dev, "Error %d enabling interrupts\n", rc);
5893                 goto req_irq_failed;
5894         }
5895
5896         LEAVE;
5897         return retrc;
5898
5899 req_irq_failed:
5900         tasklet_kill(&vhost->tasklet);
5901         do {
5902                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5903         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5904 reg_crq_failed:
5905         ibmvfc_free_queue(vhost, crq);
5906         return retrc;
5907 }
5908
5909 static int ibmvfc_register_channel(struct ibmvfc_host *vhost,
5910                                    struct ibmvfc_channels *channels,
5911                                    int index)
5912 {
5913         struct device *dev = vhost->dev;
5914         struct vio_dev *vdev = to_vio_dev(dev);
5915         struct ibmvfc_queue *scrq = &channels->scrqs[index];
5916         int rc = -ENOMEM;
5917
5918         ENTER;
5919
5920         rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
5921                            &scrq->cookie, &scrq->hw_irq);
5922
5923         /* H_CLOSED indicates successful register, but no CRQ partner */
5924         if (rc && rc != H_CLOSED) {
5925                 dev_warn(dev, "Error registering sub-crq: %d\n", rc);
5926                 if (rc == H_PARAMETER)
5927                         dev_warn_once(dev, "Firmware may not support MQ\n");
5928                 goto reg_failed;
5929         }
5930
5931         scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
5932
5933         if (!scrq->irq) {
5934                 rc = -EINVAL;
5935                 dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
5936                 goto irq_failed;
5937         }
5938
5939         snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
5940                  vdev->unit_address, index);
5941         rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq);
5942
5943         if (rc) {
5944                 dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
5945                 irq_dispose_mapping(scrq->irq);
5946                 goto irq_failed;
5947         }
5948
5949         scrq->hwq_id = index;
5950
5951         LEAVE;
5952         return 0;
5953
5954 irq_failed:
5955         do {
5956                 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
5957         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5958 reg_failed:
5959         LEAVE;
5960         return rc;
5961 }
5962
5963 static void ibmvfc_deregister_channel(struct ibmvfc_host *vhost,
5964                                       struct ibmvfc_channels *channels,
5965                                       int index)
5966 {
5967         struct device *dev = vhost->dev;
5968         struct vio_dev *vdev = to_vio_dev(dev);
5969         struct ibmvfc_queue *scrq = &channels->scrqs[index];
5970         long rc;
5971
5972         ENTER;
5973
5974         free_irq(scrq->irq, scrq);
5975         irq_dispose_mapping(scrq->irq);
5976         scrq->irq = 0;
5977
5978         do {
5979                 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
5980                                         scrq->cookie);
5981         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5982
5983         if (rc)
5984                 dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
5985
5986         /* Clean out the queue */
5987         memset(scrq->msgs.crq, 0, PAGE_SIZE);
5988         scrq->cur = 0;
5989
5990         LEAVE;
5991 }
5992
5993 static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost,
5994                                 struct ibmvfc_channels *channels)
5995 {
5996         int i, j;
5997
5998         ENTER;
5999         if (!vhost->mq_enabled || !channels->scrqs)
6000                 return;
6001
6002         for (i = 0; i < channels->max_queues; i++) {
6003                 if (ibmvfc_register_channel(vhost, channels, i)) {
6004                         for (j = i; j > 0; j--)
6005                                 ibmvfc_deregister_channel(vhost, channels, j - 1);
6006                         vhost->do_enquiry = 0;
6007                         return;
6008                 }
6009         }
6010
6011         LEAVE;
6012 }
6013
6014 static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost,
6015                                   struct ibmvfc_channels *channels)
6016 {
6017         int i;
6018
6019         ENTER;
6020         if (!vhost->mq_enabled || !channels->scrqs)
6021                 return;
6022
6023         for (i = 0; i < channels->max_queues; i++)
6024                 ibmvfc_deregister_channel(vhost, channels, i);
6025
6026         LEAVE;
6027 }
6028
6029 static int ibmvfc_alloc_channels(struct ibmvfc_host *vhost,
6030                                  struct ibmvfc_channels *channels)
6031 {
6032         struct ibmvfc_queue *scrq;
6033         int i, j;
6034         int rc = 0;
6035
6036         channels->scrqs = kcalloc(channels->max_queues,
6037                                   sizeof(*channels->scrqs),
6038                                   GFP_KERNEL);
6039         if (!channels->scrqs)
6040                 return -ENOMEM;
6041
6042         for (i = 0; i < channels->max_queues; i++) {
6043                 scrq = &channels->scrqs[i];
6044                 rc = ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT);
6045                 if (rc) {
6046                         for (j = i; j > 0; j--) {
6047                                 scrq = &channels->scrqs[j - 1];
6048                                 ibmvfc_free_queue(vhost, scrq);
6049                         }
6050                         kfree(channels->scrqs);
6051                         channels->scrqs = NULL;
6052                         channels->active_queues = 0;
6053                         return rc;
6054                 }
6055         }
6056
6057         return rc;
6058 }
6059
6060 static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
6061 {
6062         ENTER;
6063         if (!vhost->mq_enabled)
6064                 return;
6065
6066         if (ibmvfc_alloc_channels(vhost, &vhost->scsi_scrqs)) {
6067                 vhost->do_enquiry = 0;
6068                 vhost->mq_enabled = 0;
6069                 return;
6070         }
6071
6072         ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
6073
6074         LEAVE;
6075 }
6076
6077 static void ibmvfc_release_channels(struct ibmvfc_host *vhost,
6078                                     struct ibmvfc_channels *channels)
6079 {
6080         struct ibmvfc_queue *scrq;
6081         int i;
6082
6083         if (channels->scrqs) {
6084                 for (i = 0; i < channels->max_queues; i++) {
6085                         scrq = &channels->scrqs[i];
6086                         ibmvfc_free_queue(vhost, scrq);
6087                 }
6088
6089                 kfree(channels->scrqs);
6090                 channels->scrqs = NULL;
6091                 channels->active_queues = 0;
6092         }
6093 }
6094
6095 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
6096 {
6097         ENTER;
6098         if (!vhost->scsi_scrqs.scrqs)
6099                 return;
6100
6101         ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
6102
6103         ibmvfc_release_channels(vhost, &vhost->scsi_scrqs);
6104         LEAVE;
6105 }
6106
6107 /**
6108  * ibmvfc_free_mem - Free memory for vhost
6109  * @vhost:      ibmvfc host struct
6110  *
6111  * Return value:
6112  *      none
6113  **/
6114 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
6115 {
6116         struct ibmvfc_queue *async_q = &vhost->async_crq;
6117
6118         ENTER;
6119         mempool_destroy(vhost->tgt_pool);
6120         kfree(vhost->trace);
6121         dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
6122                           vhost->disc_buf_dma);
6123         dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
6124                           vhost->login_buf, vhost->login_buf_dma);
6125         dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
6126                           vhost->channel_setup_buf, vhost->channel_setup_dma);
6127         dma_pool_destroy(vhost->sg_pool);
6128         ibmvfc_free_queue(vhost, async_q);
6129         LEAVE;
6130 }
6131
6132 /**
6133  * ibmvfc_alloc_mem - Allocate memory for vhost
6134  * @vhost:      ibmvfc host struct
6135  *
6136  * Return value:
6137  *      0 on success / non-zero on failure
6138  **/
6139 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
6140 {
6141         struct ibmvfc_queue *async_q = &vhost->async_crq;
6142         struct device *dev = vhost->dev;
6143
6144         ENTER;
6145         if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
6146                 dev_err(dev, "Couldn't allocate/map async queue.\n");
6147                 goto nomem;
6148         }
6149
6150         vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
6151                                          SG_ALL * sizeof(struct srp_direct_buf),
6152                                          sizeof(struct srp_direct_buf), 0);
6153
6154         if (!vhost->sg_pool) {
6155                 dev_err(dev, "Failed to allocate sg pool\n");
6156                 goto unmap_async_crq;
6157         }
6158
6159         vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
6160                                               &vhost->login_buf_dma, GFP_KERNEL);
6161
6162         if (!vhost->login_buf) {
6163                 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
6164                 goto free_sg_pool;
6165         }
6166
6167         vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
6168         vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
6169                                              &vhost->disc_buf_dma, GFP_KERNEL);
6170
6171         if (!vhost->disc_buf) {
6172                 dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
6173                 goto free_login_buffer;
6174         }
6175
6176         vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
6177                                sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
6178         atomic_set(&vhost->trace_index, -1);
6179
6180         if (!vhost->trace)
6181                 goto free_disc_buffer;
6182
6183         vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
6184                                                       sizeof(struct ibmvfc_target));
6185
6186         if (!vhost->tgt_pool) {
6187                 dev_err(dev, "Couldn't allocate target memory pool\n");
6188                 goto free_trace;
6189         }
6190
6191         vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
6192                                                       &vhost->channel_setup_dma,
6193                                                       GFP_KERNEL);
6194
6195         if (!vhost->channel_setup_buf) {
6196                 dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
6197                 goto free_tgt_pool;
6198         }
6199
6200         LEAVE;
6201         return 0;
6202
6203 free_tgt_pool:
6204         mempool_destroy(vhost->tgt_pool);
6205 free_trace:
6206         kfree(vhost->trace);
6207 free_disc_buffer:
6208         dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
6209                           vhost->disc_buf_dma);
6210 free_login_buffer:
6211         dma_free_coherent(dev, sizeof(*vhost->login_buf),
6212                           vhost->login_buf, vhost->login_buf_dma);
6213 free_sg_pool:
6214         dma_pool_destroy(vhost->sg_pool);
6215 unmap_async_crq:
6216         ibmvfc_free_queue(vhost, async_q);
6217 nomem:
6218         LEAVE;
6219         return -ENOMEM;
6220 }
6221
6222 /**
6223  * ibmvfc_rport_add_thread - Worker thread for rport adds
6224  * @work:       work struct
6225  *
6226  **/
6227 static void ibmvfc_rport_add_thread(struct work_struct *work)
6228 {
6229         struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
6230                                                  rport_add_work_q);
6231         struct ibmvfc_target *tgt;
6232         struct fc_rport *rport;
6233         unsigned long flags;
6234         int did_work;
6235
6236         ENTER;
6237         spin_lock_irqsave(vhost->host->host_lock, flags);
6238         do {
6239                 did_work = 0;
6240                 if (vhost->state != IBMVFC_ACTIVE)
6241                         break;
6242
6243                 list_for_each_entry(tgt, &vhost->targets, queue) {
6244                         if (tgt->add_rport) {
6245                                 did_work = 1;
6246                                 tgt->add_rport = 0;
6247                                 kref_get(&tgt->kref);
6248                                 rport = tgt->rport;
6249                                 if (!rport) {
6250                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6251                                         ibmvfc_tgt_add_rport(tgt);
6252                                 } else if (get_device(&rport->dev)) {
6253                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6254                                         tgt_dbg(tgt, "Setting rport roles\n");
6255                                         fc_remote_port_rolechg(rport, tgt->ids.roles);
6256                                         put_device(&rport->dev);
6257                                 } else {
6258                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6259                                 }
6260
6261                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
6262                                 spin_lock_irqsave(vhost->host->host_lock, flags);
6263                                 break;
6264                         }
6265                 }
6266         } while(did_work);
6267
6268         if (vhost->state == IBMVFC_ACTIVE)
6269                 vhost->scan_complete = 1;
6270         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6271         LEAVE;
6272 }
6273
6274 /**
6275  * ibmvfc_probe - Adapter hot plug add entry point
6276  * @vdev:       vio device struct
6277  * @id: vio device id struct
6278  *
6279  * Return value:
6280  *      0 on success / non-zero on failure
6281  **/
6282 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
6283 {
6284         struct ibmvfc_host *vhost;
6285         struct Scsi_Host *shost;
6286         struct device *dev = &vdev->dev;
6287         int rc = -ENOMEM;
6288         unsigned int online_cpus = num_online_cpus();
6289         unsigned int max_scsi_queues = min((unsigned int)IBMVFC_MAX_SCSI_QUEUES, online_cpus);
6290
6291         ENTER;
6292         shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
6293         if (!shost) {
6294                 dev_err(dev, "Couldn't allocate host data\n");
6295                 goto out;
6296         }
6297
6298         shost->transportt = ibmvfc_transport_template;
6299         shost->can_queue = scsi_qdepth;
6300         shost->max_lun = max_lun;
6301         shost->max_id = max_targets;
6302         shost->max_sectors = IBMVFC_MAX_SECTORS;
6303         shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
6304         shost->unique_id = shost->host_no;
6305         shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
6306
6307         vhost = shost_priv(shost);
6308         INIT_LIST_HEAD(&vhost->targets);
6309         INIT_LIST_HEAD(&vhost->purge);
6310         sprintf(vhost->name, IBMVFC_NAME);
6311         vhost->host = shost;
6312         vhost->dev = dev;
6313         vhost->partition_number = -1;
6314         vhost->log_level = log_level;
6315         vhost->task_set = 1;
6316
6317         vhost->mq_enabled = mq_enabled;
6318         vhost->scsi_scrqs.desired_queues = min(shost->nr_hw_queues, nr_scsi_channels);
6319         vhost->scsi_scrqs.max_queues = shost->nr_hw_queues;
6320         vhost->using_channels = 0;
6321         vhost->do_enquiry = 1;
6322         vhost->scan_timeout = 0;
6323
6324         strcpy(vhost->partition_name, "UNKNOWN");
6325         init_waitqueue_head(&vhost->work_wait_q);
6326         init_waitqueue_head(&vhost->init_wait_q);
6327         INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
6328         mutex_init(&vhost->passthru_mutex);
6329
6330         if ((rc = ibmvfc_alloc_mem(vhost)))
6331                 goto free_scsi_host;
6332
6333         vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
6334                                          shost->host_no);
6335
6336         if (IS_ERR(vhost->work_thread)) {
6337                 dev_err(dev, "Couldn't create kernel thread: %ld\n",
6338                         PTR_ERR(vhost->work_thread));
6339                 rc = PTR_ERR(vhost->work_thread);
6340                 goto free_host_mem;
6341         }
6342
6343         if ((rc = ibmvfc_init_crq(vhost))) {
6344                 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
6345                 goto kill_kthread;
6346         }
6347
6348         if ((rc = scsi_add_host(shost, dev)))
6349                 goto release_crq;
6350
6351         fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
6352
6353         if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
6354                                            &ibmvfc_trace_attr))) {
6355                 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
6356                 goto remove_shost;
6357         }
6358
6359         ibmvfc_init_sub_crqs(vhost);
6360
6361         if (shost_to_fc_host(shost)->rqst_q)
6362                 blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
6363         dev_set_drvdata(dev, vhost);
6364         spin_lock(&ibmvfc_driver_lock);
6365         list_add_tail(&vhost->queue, &ibmvfc_head);
6366         spin_unlock(&ibmvfc_driver_lock);
6367
6368         ibmvfc_send_crq_init(vhost);
6369         scsi_scan_host(shost);
6370         return 0;
6371
6372 remove_shost:
6373         scsi_remove_host(shost);
6374 release_crq:
6375         ibmvfc_release_crq_queue(vhost);
6376 kill_kthread:
6377         kthread_stop(vhost->work_thread);
6378 free_host_mem:
6379         ibmvfc_free_mem(vhost);
6380 free_scsi_host:
6381         scsi_host_put(shost);
6382 out:
6383         LEAVE;
6384         return rc;
6385 }
6386
6387 /**
6388  * ibmvfc_remove - Adapter hot plug remove entry point
6389  * @vdev:       vio device struct
6390  *
6391  * Return value:
6392  *      0
6393  **/
6394 static void ibmvfc_remove(struct vio_dev *vdev)
6395 {
6396         struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
6397         LIST_HEAD(purge);
6398         unsigned long flags;
6399
6400         ENTER;
6401         ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
6402
6403         spin_lock_irqsave(vhost->host->host_lock, flags);
6404         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
6405         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6406
6407         ibmvfc_wait_while_resetting(vhost);
6408         kthread_stop(vhost->work_thread);
6409         fc_remove_host(vhost->host);
6410         scsi_remove_host(vhost->host);
6411
6412         spin_lock_irqsave(vhost->host->host_lock, flags);
6413         ibmvfc_purge_requests(vhost, DID_ERROR);
6414         list_splice_init(&vhost->purge, &purge);
6415         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6416         ibmvfc_complete_purge(&purge);
6417         ibmvfc_release_sub_crqs(vhost);
6418         ibmvfc_release_crq_queue(vhost);
6419
6420         ibmvfc_free_mem(vhost);
6421         spin_lock(&ibmvfc_driver_lock);
6422         list_del(&vhost->queue);
6423         spin_unlock(&ibmvfc_driver_lock);
6424         scsi_host_put(vhost->host);
6425         LEAVE;
6426 }
6427
6428 /**
6429  * ibmvfc_resume - Resume from suspend
6430  * @dev:        device struct
6431  *
6432  * We may have lost an interrupt across suspend/resume, so kick the
6433  * interrupt handler
6434  *
6435  */
6436 static int ibmvfc_resume(struct device *dev)
6437 {
6438         unsigned long flags;
6439         struct ibmvfc_host *vhost = dev_get_drvdata(dev);
6440         struct vio_dev *vdev = to_vio_dev(dev);
6441
6442         spin_lock_irqsave(vhost->host->host_lock, flags);
6443         vio_disable_interrupts(vdev);
6444         tasklet_schedule(&vhost->tasklet);
6445         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6446         return 0;
6447 }
6448
6449 /**
6450  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
6451  * @vdev:       vio device struct
6452  *
6453  * Return value:
6454  *      Number of bytes the driver will need to DMA map at the same time in
6455  *      order to perform well.
6456  */
6457 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
6458 {
6459         unsigned long pool_dma;
6460
6461         pool_dma = (IBMVFC_MAX_SCSI_QUEUES * scsi_qdepth) * sizeof(union ibmvfc_iu);
6462         return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
6463 }
6464
6465 static const struct vio_device_id ibmvfc_device_table[] = {
6466         {"fcp", "IBM,vfc-client"},
6467         { "", "" }
6468 };
6469 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
6470
6471 static const struct dev_pm_ops ibmvfc_pm_ops = {
6472         .resume = ibmvfc_resume
6473 };
6474
6475 static struct vio_driver ibmvfc_driver = {
6476         .id_table = ibmvfc_device_table,
6477         .probe = ibmvfc_probe,
6478         .remove = ibmvfc_remove,
6479         .get_desired_dma = ibmvfc_get_desired_dma,
6480         .name = IBMVFC_NAME,
6481         .pm = &ibmvfc_pm_ops,
6482 };
6483
6484 static struct fc_function_template ibmvfc_transport_functions = {
6485         .show_host_fabric_name = 1,
6486         .show_host_node_name = 1,
6487         .show_host_port_name = 1,
6488         .show_host_supported_classes = 1,
6489         .show_host_port_type = 1,
6490         .show_host_port_id = 1,
6491         .show_host_maxframe_size = 1,
6492
6493         .get_host_port_state = ibmvfc_get_host_port_state,
6494         .show_host_port_state = 1,
6495
6496         .get_host_speed = ibmvfc_get_host_speed,
6497         .show_host_speed = 1,
6498
6499         .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
6500         .terminate_rport_io = ibmvfc_terminate_rport_io,
6501
6502         .show_rport_maxframe_size = 1,
6503         .show_rport_supported_classes = 1,
6504
6505         .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
6506         .show_rport_dev_loss_tmo = 1,
6507
6508         .get_starget_node_name = ibmvfc_get_starget_node_name,
6509         .show_starget_node_name = 1,
6510
6511         .get_starget_port_name = ibmvfc_get_starget_port_name,
6512         .show_starget_port_name = 1,
6513
6514         .get_starget_port_id = ibmvfc_get_starget_port_id,
6515         .show_starget_port_id = 1,
6516
6517         .bsg_request = ibmvfc_bsg_request,
6518         .bsg_timeout = ibmvfc_bsg_timeout,
6519 };
6520
6521 /**
6522  * ibmvfc_module_init - Initialize the ibmvfc module
6523  *
6524  * Return value:
6525  *      0 on success / other on failure
6526  **/
6527 static int __init ibmvfc_module_init(void)
6528 {
6529         int rc;
6530
6531         if (!firmware_has_feature(FW_FEATURE_VIO))
6532                 return -ENODEV;
6533
6534         printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
6535                IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
6536
6537         ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
6538         if (!ibmvfc_transport_template)
6539                 return -ENOMEM;
6540
6541         rc = vio_register_driver(&ibmvfc_driver);
6542         if (rc)
6543                 fc_release_transport(ibmvfc_transport_template);
6544         return rc;
6545 }
6546
6547 /**
6548  * ibmvfc_module_exit - Teardown the ibmvfc module
6549  *
6550  * Return value:
6551  *      nothing
6552  **/
6553 static void __exit ibmvfc_module_exit(void)
6554 {
6555         vio_unregister_driver(&ibmvfc_driver);
6556         fc_release_transport(ibmvfc_transport_template);
6557 }
6558
6559 module_init(ibmvfc_module_init);
6560 module_exit(ibmvfc_module_exit);