scsi: ibmvfc: Fix erroneous use of rtas_busy_delay with hcall return code
[linux-2.6-block.git] / drivers / scsi / ibmvscsi / ibmvfc.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4  *
5  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) IBM Corporation, 2008
8  */
9
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kthread.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/pm.h>
21 #include <linux/stringify.h>
22 #include <linux/bsg-lib.h>
23 #include <asm/firmware.h>
24 #include <asm/irq.h>
25 #include <asm/vio.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_transport_fc.h>
32 #include <scsi/scsi_bsg_fc.h>
33 #include "ibmvfc.h"
34
35 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
36 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
37 static u64 max_lun = IBMVFC_MAX_LUN;
38 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
39 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
40 static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
41 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
42 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
43 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
44 static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
45 static unsigned int mq_enabled = IBMVFC_MQ;
46 static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
47 static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
48 static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
49 static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
50
51 static LIST_HEAD(ibmvfc_head);
52 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
53 static struct scsi_transport_template *ibmvfc_transport_template;
54
55 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
56 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
59
60 module_param_named(mq, mq_enabled, uint, S_IRUGO);
61 MODULE_PARM_DESC(mq, "Enable multiqueue support. "
62                  "[Default=" __stringify(IBMVFC_MQ) "]");
63 module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
64 MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
65                  "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
66 module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
67 MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
68                  "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
69 module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
70 MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
71                  "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
72 module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
73 MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
74                  "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
75
76 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
77 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
78                  "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
79 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(default_timeout,
81                  "Default timeout in seconds for initialization and EH commands. "
82                  "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
83 module_param_named(max_requests, max_requests, uint, S_IRUGO);
84 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
85                  "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
86 module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
87 MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
88                  "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
89 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
90 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
91                  "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
92 module_param_named(max_targets, max_targets, uint, S_IRUGO);
93 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
94                  "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
95 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
96 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
97                  "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
98 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
99 MODULE_PARM_DESC(debug, "Enable driver debug information. "
100                  "[Default=" __stringify(IBMVFC_DEBUG) "]");
101 module_param_named(log_level, log_level, uint, 0);
102 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
103                  "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
104 module_param_named(cls3_error, cls3_error, uint, 0);
105 MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
106                  "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
107
108 static const struct {
109         u16 status;
110         u16 error;
111         u8 result;
112         u8 retry;
113         int log;
114         char *name;
115 } cmd_status [] = {
116         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
117         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
118         { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
119         { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
120         { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
121         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
122         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
123         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
124         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
125         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
126         { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
127         { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
128         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
129         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
130
131         { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
132         { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
133         { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
134         { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
135         { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
136         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
137         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
138         { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
139         { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
140         { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
141
142         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
143         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
144         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
145         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
146         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
147         { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
148         { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
149         { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
150         { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
151         { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
152         { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
153
154         { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
155         { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
156 };
157
158 static void ibmvfc_npiv_login(struct ibmvfc_host *);
159 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
160 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
161 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
162 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
163 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
164 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
165
166 static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *);
167 static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *);
168
169 static const char *unknown_error = "unknown error";
170
171 static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
172                           unsigned long length, unsigned long *cookie,
173                           unsigned long *irq)
174 {
175         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
176         long rc;
177
178         rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
179         *cookie = retbuf[0];
180         *irq = retbuf[1];
181
182         return rc;
183 }
184
185 static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
186 {
187         u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
188
189         return (host_caps & cap_flags) ? 1 : 0;
190 }
191
192 static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
193                                                    struct ibmvfc_cmd *vfc_cmd)
194 {
195         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
196                 return &vfc_cmd->v2.iu;
197         else
198                 return &vfc_cmd->v1.iu;
199 }
200
201 static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
202                                                  struct ibmvfc_cmd *vfc_cmd)
203 {
204         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
205                 return &vfc_cmd->v2.rsp;
206         else
207                 return &vfc_cmd->v1.rsp;
208 }
209
210 #ifdef CONFIG_SCSI_IBMVFC_TRACE
211 /**
212  * ibmvfc_trc_start - Log a start trace entry
213  * @evt:                ibmvfc event struct
214  *
215  **/
216 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
217 {
218         struct ibmvfc_host *vhost = evt->vhost;
219         struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
220         struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
221         struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
222         struct ibmvfc_trace_entry *entry;
223         int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
224
225         entry = &vhost->trace[index];
226         entry->evt = evt;
227         entry->time = jiffies;
228         entry->fmt = evt->crq.format;
229         entry->type = IBMVFC_TRC_START;
230
231         switch (entry->fmt) {
232         case IBMVFC_CMD_FORMAT:
233                 entry->op_code = iu->cdb[0];
234                 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
235                 entry->lun = scsilun_to_int(&iu->lun);
236                 entry->tmf_flags = iu->tmf_flags;
237                 entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
238                 break;
239         case IBMVFC_MAD_FORMAT:
240                 entry->op_code = be32_to_cpu(mad->opcode);
241                 break;
242         default:
243                 break;
244         }
245 }
246
247 /**
248  * ibmvfc_trc_end - Log an end trace entry
249  * @evt:                ibmvfc event struct
250  *
251  **/
252 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
253 {
254         struct ibmvfc_host *vhost = evt->vhost;
255         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
256         struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
257         struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
258         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
259         struct ibmvfc_trace_entry *entry;
260         int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
261
262         entry = &vhost->trace[index];
263         entry->evt = evt;
264         entry->time = jiffies;
265         entry->fmt = evt->crq.format;
266         entry->type = IBMVFC_TRC_END;
267
268         switch (entry->fmt) {
269         case IBMVFC_CMD_FORMAT:
270                 entry->op_code = iu->cdb[0];
271                 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
272                 entry->lun = scsilun_to_int(&iu->lun);
273                 entry->tmf_flags = iu->tmf_flags;
274                 entry->u.end.status = be16_to_cpu(vfc_cmd->status);
275                 entry->u.end.error = be16_to_cpu(vfc_cmd->error);
276                 entry->u.end.fcp_rsp_flags = rsp->flags;
277                 entry->u.end.rsp_code = rsp->data.info.rsp_code;
278                 entry->u.end.scsi_status = rsp->scsi_status;
279                 break;
280         case IBMVFC_MAD_FORMAT:
281                 entry->op_code = be32_to_cpu(mad->opcode);
282                 entry->u.end.status = be16_to_cpu(mad->status);
283                 break;
284         default:
285                 break;
286
287         }
288 }
289
290 #else
291 #define ibmvfc_trc_start(evt) do { } while (0)
292 #define ibmvfc_trc_end(evt) do { } while (0)
293 #endif
294
295 /**
296  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
297  * @status:             status / error class
298  * @error:              error
299  *
300  * Return value:
301  *      index into cmd_status / -EINVAL on failure
302  **/
303 static int ibmvfc_get_err_index(u16 status, u16 error)
304 {
305         int i;
306
307         for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
308                 if ((cmd_status[i].status & status) == cmd_status[i].status &&
309                     cmd_status[i].error == error)
310                         return i;
311
312         return -EINVAL;
313 }
314
315 /**
316  * ibmvfc_get_cmd_error - Find the error description for the fcp response
317  * @status:             status / error class
318  * @error:              error
319  *
320  * Return value:
321  *      error description string
322  **/
323 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
324 {
325         int rc = ibmvfc_get_err_index(status, error);
326         if (rc >= 0)
327                 return cmd_status[rc].name;
328         return unknown_error;
329 }
330
331 /**
332  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
333  * @vhost:      ibmvfc host struct
334  * @vfc_cmd:    ibmvfc command struct
335  *
336  * Return value:
337  *      SCSI result value to return for completed command
338  **/
339 static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
340 {
341         int err;
342         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
343         int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
344
345         if ((rsp->flags & FCP_RSP_LEN_VALID) &&
346             ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
347              rsp->data.info.rsp_code))
348                 return DID_ERROR << 16;
349
350         err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
351         if (err >= 0)
352                 return rsp->scsi_status | (cmd_status[err].result << 16);
353         return rsp->scsi_status | (DID_ERROR << 16);
354 }
355
356 /**
357  * ibmvfc_retry_cmd - Determine if error status is retryable
358  * @status:             status / error class
359  * @error:              error
360  *
361  * Return value:
362  *      1 if error should be retried / 0 if it should not
363  **/
364 static int ibmvfc_retry_cmd(u16 status, u16 error)
365 {
366         int rc = ibmvfc_get_err_index(status, error);
367
368         if (rc >= 0)
369                 return cmd_status[rc].retry;
370         return 1;
371 }
372
373 static const char *unknown_fc_explain = "unknown fc explain";
374
375 static const struct {
376         u16 fc_explain;
377         char *name;
378 } ls_explain [] = {
379         { 0x00, "no additional explanation" },
380         { 0x01, "service parameter error - options" },
381         { 0x03, "service parameter error - initiator control" },
382         { 0x05, "service parameter error - recipient control" },
383         { 0x07, "service parameter error - received data field size" },
384         { 0x09, "service parameter error - concurrent seq" },
385         { 0x0B, "service parameter error - credit" },
386         { 0x0D, "invalid N_Port/F_Port_Name" },
387         { 0x0E, "invalid node/Fabric Name" },
388         { 0x0F, "invalid common service parameters" },
389         { 0x11, "invalid association header" },
390         { 0x13, "association header required" },
391         { 0x15, "invalid originator S_ID" },
392         { 0x17, "invalid OX_ID-RX-ID combination" },
393         { 0x19, "command (request) already in progress" },
394         { 0x1E, "N_Port Login requested" },
395         { 0x1F, "Invalid N_Port_ID" },
396 };
397
398 static const struct {
399         u16 fc_explain;
400         char *name;
401 } gs_explain [] = {
402         { 0x00, "no additional explanation" },
403         { 0x01, "port identifier not registered" },
404         { 0x02, "port name not registered" },
405         { 0x03, "node name not registered" },
406         { 0x04, "class of service not registered" },
407         { 0x06, "initial process associator not registered" },
408         { 0x07, "FC-4 TYPEs not registered" },
409         { 0x08, "symbolic port name not registered" },
410         { 0x09, "symbolic node name not registered" },
411         { 0x0A, "port type not registered" },
412         { 0xF0, "authorization exception" },
413         { 0xF1, "authentication exception" },
414         { 0xF2, "data base full" },
415         { 0xF3, "data base empty" },
416         { 0xF4, "processing request" },
417         { 0xF5, "unable to verify connection" },
418         { 0xF6, "devices not in a common zone" },
419 };
420
421 /**
422  * ibmvfc_get_ls_explain - Return the FC Explain description text
423  * @status:     FC Explain status
424  *
425  * Returns:
426  *      error string
427  **/
428 static const char *ibmvfc_get_ls_explain(u16 status)
429 {
430         int i;
431
432         for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
433                 if (ls_explain[i].fc_explain == status)
434                         return ls_explain[i].name;
435
436         return unknown_fc_explain;
437 }
438
439 /**
440  * ibmvfc_get_gs_explain - Return the FC Explain description text
441  * @status:     FC Explain status
442  *
443  * Returns:
444  *      error string
445  **/
446 static const char *ibmvfc_get_gs_explain(u16 status)
447 {
448         int i;
449
450         for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
451                 if (gs_explain[i].fc_explain == status)
452                         return gs_explain[i].name;
453
454         return unknown_fc_explain;
455 }
456
457 static const struct {
458         enum ibmvfc_fc_type fc_type;
459         char *name;
460 } fc_type [] = {
461         { IBMVFC_FABRIC_REJECT, "fabric reject" },
462         { IBMVFC_PORT_REJECT, "port reject" },
463         { IBMVFC_LS_REJECT, "ELS reject" },
464         { IBMVFC_FABRIC_BUSY, "fabric busy" },
465         { IBMVFC_PORT_BUSY, "port busy" },
466         { IBMVFC_BASIC_REJECT, "basic reject" },
467 };
468
469 static const char *unknown_fc_type = "unknown fc type";
470
471 /**
472  * ibmvfc_get_fc_type - Return the FC Type description text
473  * @status:     FC Type error status
474  *
475  * Returns:
476  *      error string
477  **/
478 static const char *ibmvfc_get_fc_type(u16 status)
479 {
480         int i;
481
482         for (i = 0; i < ARRAY_SIZE(fc_type); i++)
483                 if (fc_type[i].fc_type == status)
484                         return fc_type[i].name;
485
486         return unknown_fc_type;
487 }
488
489 /**
490  * ibmvfc_set_tgt_action - Set the next init action for the target
491  * @tgt:                ibmvfc target struct
492  * @action:             action to perform
493  *
494  * Returns:
495  *      0 if action changed / non-zero if not changed
496  **/
497 static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
498                                   enum ibmvfc_target_action action)
499 {
500         int rc = -EINVAL;
501
502         switch (tgt->action) {
503         case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
504                 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
505                     action == IBMVFC_TGT_ACTION_DEL_RPORT) {
506                         tgt->action = action;
507                         rc = 0;
508                 }
509                 break;
510         case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
511                 if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
512                     action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
513                         tgt->action = action;
514                         rc = 0;
515                 }
516                 break;
517         case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
518                 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
519                         tgt->action = action;
520                         rc = 0;
521                 }
522                 break;
523         case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
524                 if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
525                         tgt->action = action;
526                         rc = 0;
527                 }
528                 break;
529         case IBMVFC_TGT_ACTION_DEL_RPORT:
530                 if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
531                         tgt->action = action;
532                         rc = 0;
533                 }
534                 break;
535         case IBMVFC_TGT_ACTION_DELETED_RPORT:
536                 break;
537         default:
538                 tgt->action = action;
539                 rc = 0;
540                 break;
541         }
542
543         if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
544                 tgt->add_rport = 0;
545
546         return rc;
547 }
548
549 /**
550  * ibmvfc_set_host_state - Set the state for the host
551  * @vhost:              ibmvfc host struct
552  * @state:              state to set host to
553  *
554  * Returns:
555  *      0 if state changed / non-zero if not changed
556  **/
557 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
558                                   enum ibmvfc_host_state state)
559 {
560         int rc = 0;
561
562         switch (vhost->state) {
563         case IBMVFC_HOST_OFFLINE:
564                 rc = -EINVAL;
565                 break;
566         default:
567                 vhost->state = state;
568                 break;
569         }
570
571         return rc;
572 }
573
574 /**
575  * ibmvfc_set_host_action - Set the next init action for the host
576  * @vhost:              ibmvfc host struct
577  * @action:             action to perform
578  *
579  **/
580 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
581                                    enum ibmvfc_host_action action)
582 {
583         switch (action) {
584         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
585                 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
586                         vhost->action = action;
587                 break;
588         case IBMVFC_HOST_ACTION_LOGO_WAIT:
589                 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
590                         vhost->action = action;
591                 break;
592         case IBMVFC_HOST_ACTION_INIT_WAIT:
593                 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
594                         vhost->action = action;
595                 break;
596         case IBMVFC_HOST_ACTION_QUERY:
597                 switch (vhost->action) {
598                 case IBMVFC_HOST_ACTION_INIT_WAIT:
599                 case IBMVFC_HOST_ACTION_NONE:
600                 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
601                         vhost->action = action;
602                         break;
603                 default:
604                         break;
605                 }
606                 break;
607         case IBMVFC_HOST_ACTION_TGT_INIT:
608                 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
609                         vhost->action = action;
610                 break;
611         case IBMVFC_HOST_ACTION_REENABLE:
612         case IBMVFC_HOST_ACTION_RESET:
613                 vhost->action = action;
614                 break;
615         case IBMVFC_HOST_ACTION_INIT:
616         case IBMVFC_HOST_ACTION_TGT_DEL:
617         case IBMVFC_HOST_ACTION_LOGO:
618         case IBMVFC_HOST_ACTION_QUERY_TGTS:
619         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
620         case IBMVFC_HOST_ACTION_NONE:
621         default:
622                 switch (vhost->action) {
623                 case IBMVFC_HOST_ACTION_RESET:
624                 case IBMVFC_HOST_ACTION_REENABLE:
625                         break;
626                 default:
627                         vhost->action = action;
628                         break;
629                 }
630                 break;
631         }
632 }
633
634 /**
635  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
636  * @vhost:              ibmvfc host struct
637  *
638  * Return value:
639  *      nothing
640  **/
641 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
642 {
643         if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
644             vhost->state == IBMVFC_ACTIVE) {
645                 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
646                         scsi_block_requests(vhost->host);
647                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
648                 }
649         } else
650                 vhost->reinit = 1;
651
652         wake_up(&vhost->work_wait_q);
653 }
654
655 /**
656  * ibmvfc_del_tgt - Schedule cleanup and removal of the target
657  * @tgt:                ibmvfc target struct
658  **/
659 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
660 {
661         if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
662                 tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
663                 tgt->init_retries = 0;
664         }
665         wake_up(&tgt->vhost->work_wait_q);
666 }
667
668 /**
669  * ibmvfc_link_down - Handle a link down event from the adapter
670  * @vhost:      ibmvfc host struct
671  * @state:      ibmvfc host state to enter
672  *
673  **/
674 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
675                              enum ibmvfc_host_state state)
676 {
677         struct ibmvfc_target *tgt;
678
679         ENTER;
680         scsi_block_requests(vhost->host);
681         list_for_each_entry(tgt, &vhost->targets, queue)
682                 ibmvfc_del_tgt(tgt);
683         ibmvfc_set_host_state(vhost, state);
684         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
685         vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
686         wake_up(&vhost->work_wait_q);
687         LEAVE;
688 }
689
690 /**
691  * ibmvfc_init_host - Start host initialization
692  * @vhost:              ibmvfc host struct
693  *
694  * Return value:
695  *      nothing
696  **/
697 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
698 {
699         struct ibmvfc_target *tgt;
700
701         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
702                 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
703                         dev_err(vhost->dev,
704                                 "Host initialization retries exceeded. Taking adapter offline\n");
705                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
706                         return;
707                 }
708         }
709
710         if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
711                 memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
712                 vhost->async_crq.cur = 0;
713
714                 list_for_each_entry(tgt, &vhost->targets, queue) {
715                         if (vhost->client_migrated)
716                                 tgt->need_login = 1;
717                         else
718                                 ibmvfc_del_tgt(tgt);
719                 }
720
721                 scsi_block_requests(vhost->host);
722                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
723                 vhost->job_step = ibmvfc_npiv_login;
724                 wake_up(&vhost->work_wait_q);
725         }
726 }
727
728 /**
729  * ibmvfc_send_crq - Send a CRQ
730  * @vhost:      ibmvfc host struct
731  * @word1:      the first 64 bits of the data
732  * @word2:      the second 64 bits of the data
733  *
734  * Return value:
735  *      0 on success / other on failure
736  **/
737 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
738 {
739         struct vio_dev *vdev = to_vio_dev(vhost->dev);
740         return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
741 }
742
743 static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
744                                u64 word2, u64 word3, u64 word4)
745 {
746         struct vio_dev *vdev = to_vio_dev(vhost->dev);
747
748         return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
749                                   word1, word2, word3, word4);
750 }
751
752 /**
753  * ibmvfc_send_crq_init - Send a CRQ init message
754  * @vhost:      ibmvfc host struct
755  *
756  * Return value:
757  *      0 on success / other on failure
758  **/
759 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
760 {
761         ibmvfc_dbg(vhost, "Sending CRQ init\n");
762         return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
763 }
764
765 /**
766  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
767  * @vhost:      ibmvfc host struct
768  *
769  * Return value:
770  *      0 on success / other on failure
771  **/
772 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
773 {
774         ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
775         return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
776 }
777
778 /**
779  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
780  * @vhost:      ibmvfc host who owns the event pool
781  * @queue:      ibmvfc queue struct
782  *
783  * Returns zero on success.
784  **/
785 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
786                                   struct ibmvfc_queue *queue)
787 {
788         int i;
789         struct ibmvfc_event_pool *pool = &queue->evt_pool;
790
791         ENTER;
792         if (!queue->total_depth)
793                 return 0;
794
795         pool->size = queue->total_depth;
796         pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
797         if (!pool->events)
798                 return -ENOMEM;
799
800         pool->iu_storage = dma_alloc_coherent(vhost->dev,
801                                               pool->size * sizeof(*pool->iu_storage),
802                                               &pool->iu_token, 0);
803
804         if (!pool->iu_storage) {
805                 kfree(pool->events);
806                 return -ENOMEM;
807         }
808
809         INIT_LIST_HEAD(&queue->sent);
810         INIT_LIST_HEAD(&queue->free);
811         queue->evt_free = queue->evt_depth;
812         queue->reserved_free = queue->reserved_depth;
813         spin_lock_init(&queue->l_lock);
814
815         for (i = 0; i < pool->size; ++i) {
816                 struct ibmvfc_event *evt = &pool->events[i];
817
818                 /*
819                  * evt->active states
820                  *  1 = in flight
821                  *  0 = being completed
822                  * -1 = free/freed
823                  */
824                 atomic_set(&evt->active, -1);
825                 atomic_set(&evt->free, 1);
826                 evt->crq.valid = 0x80;
827                 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
828                 evt->xfer_iu = pool->iu_storage + i;
829                 evt->vhost = vhost;
830                 evt->queue = queue;
831                 evt->ext_list = NULL;
832                 list_add_tail(&evt->queue_list, &queue->free);
833         }
834
835         LEAVE;
836         return 0;
837 }
838
839 /**
840  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
841  * @vhost:      ibmvfc host who owns the event pool
842  * @queue:      ibmvfc queue struct
843  *
844  **/
845 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
846                                    struct ibmvfc_queue *queue)
847 {
848         int i;
849         struct ibmvfc_event_pool *pool = &queue->evt_pool;
850
851         ENTER;
852         for (i = 0; i < pool->size; ++i) {
853                 list_del(&pool->events[i].queue_list);
854                 BUG_ON(atomic_read(&pool->events[i].free) != 1);
855                 if (pool->events[i].ext_list)
856                         dma_pool_free(vhost->sg_pool,
857                                       pool->events[i].ext_list,
858                                       pool->events[i].ext_list_token);
859         }
860
861         kfree(pool->events);
862         dma_free_coherent(vhost->dev,
863                           pool->size * sizeof(*pool->iu_storage),
864                           pool->iu_storage, pool->iu_token);
865         LEAVE;
866 }
867
868 /**
869  * ibmvfc_free_queue - Deallocate queue
870  * @vhost:      ibmvfc host struct
871  * @queue:      ibmvfc queue struct
872  *
873  * Unmaps dma and deallocates page for messages
874  **/
875 static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
876                               struct ibmvfc_queue *queue)
877 {
878         struct device *dev = vhost->dev;
879
880         dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
881         free_page((unsigned long)queue->msgs.handle);
882         queue->msgs.handle = NULL;
883
884         ibmvfc_free_event_pool(vhost, queue);
885 }
886
887 /**
888  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
889  * @vhost:      ibmvfc host struct
890  *
891  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
892  * the crq with the hypervisor.
893  **/
894 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
895 {
896         long rc = 0;
897         struct vio_dev *vdev = to_vio_dev(vhost->dev);
898         struct ibmvfc_queue *crq = &vhost->crq;
899
900         ibmvfc_dbg(vhost, "Releasing CRQ\n");
901         free_irq(vdev->irq, vhost);
902         tasklet_kill(&vhost->tasklet);
903         do {
904                 if (rc)
905                         msleep(100);
906                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
907         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
908
909         vhost->state = IBMVFC_NO_CRQ;
910         vhost->logged_in = 0;
911
912         ibmvfc_free_queue(vhost, crq);
913 }
914
915 /**
916  * ibmvfc_reenable_crq_queue - reenables the CRQ
917  * @vhost:      ibmvfc host struct
918  *
919  * Return value:
920  *      0 on success / other on failure
921  **/
922 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
923 {
924         int rc = 0;
925         struct vio_dev *vdev = to_vio_dev(vhost->dev);
926         unsigned long flags;
927
928         ibmvfc_dereg_sub_crqs(vhost);
929
930         /* Re-enable the CRQ */
931         do {
932                 if (rc)
933                         msleep(100);
934                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
935         } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
936
937         if (rc)
938                 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
939
940         spin_lock_irqsave(vhost->host->host_lock, flags);
941         spin_lock(vhost->crq.q_lock);
942         vhost->do_enquiry = 1;
943         vhost->using_channels = 0;
944         spin_unlock(vhost->crq.q_lock);
945         spin_unlock_irqrestore(vhost->host->host_lock, flags);
946
947         ibmvfc_reg_sub_crqs(vhost);
948
949         return rc;
950 }
951
952 /**
953  * ibmvfc_reset_crq - resets a crq after a failure
954  * @vhost:      ibmvfc host struct
955  *
956  * Return value:
957  *      0 on success / other on failure
958  **/
959 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
960 {
961         int rc = 0;
962         unsigned long flags;
963         struct vio_dev *vdev = to_vio_dev(vhost->dev);
964         struct ibmvfc_queue *crq = &vhost->crq;
965
966         ibmvfc_dereg_sub_crqs(vhost);
967
968         /* Close the CRQ */
969         do {
970                 if (rc)
971                         msleep(100);
972                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
973         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
974
975         spin_lock_irqsave(vhost->host->host_lock, flags);
976         spin_lock(vhost->crq.q_lock);
977         vhost->state = IBMVFC_NO_CRQ;
978         vhost->logged_in = 0;
979         vhost->do_enquiry = 1;
980         vhost->using_channels = 0;
981
982         /* Clean out the queue */
983         memset(crq->msgs.crq, 0, PAGE_SIZE);
984         crq->cur = 0;
985
986         /* And re-open it again */
987         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
988                                 crq->msg_token, PAGE_SIZE);
989
990         if (rc == H_CLOSED)
991                 /* Adapter is good, but other end is not ready */
992                 dev_warn(vhost->dev, "Partner adapter not ready\n");
993         else if (rc != 0)
994                 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
995
996         spin_unlock(vhost->crq.q_lock);
997         spin_unlock_irqrestore(vhost->host->host_lock, flags);
998
999         ibmvfc_reg_sub_crqs(vhost);
1000
1001         return rc;
1002 }
1003
1004 /**
1005  * ibmvfc_valid_event - Determines if event is valid.
1006  * @pool:       event_pool that contains the event
1007  * @evt:        ibmvfc event to be checked for validity
1008  *
1009  * Return value:
1010  *      1 if event is valid / 0 if event is not valid
1011  **/
1012 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
1013                               struct ibmvfc_event *evt)
1014 {
1015         int index = evt - pool->events;
1016         if (index < 0 || index >= pool->size)   /* outside of bounds */
1017                 return 0;
1018         if (evt != pool->events + index)        /* unaligned */
1019                 return 0;
1020         return 1;
1021 }
1022
1023 /**
1024  * ibmvfc_free_event - Free the specified event
1025  * @evt:        ibmvfc_event to be freed
1026  *
1027  **/
1028 static void ibmvfc_free_event(struct ibmvfc_event *evt)
1029 {
1030         struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
1031         unsigned long flags;
1032
1033         BUG_ON(!ibmvfc_valid_event(pool, evt));
1034         BUG_ON(atomic_inc_return(&evt->free) != 1);
1035         BUG_ON(atomic_dec_and_test(&evt->active));
1036
1037         spin_lock_irqsave(&evt->queue->l_lock, flags);
1038         list_add_tail(&evt->queue_list, &evt->queue->free);
1039         if (evt->reserved) {
1040                 evt->reserved = 0;
1041                 evt->queue->reserved_free++;
1042         } else {
1043                 evt->queue->evt_free++;
1044         }
1045         if (evt->eh_comp)
1046                 complete(evt->eh_comp);
1047         spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1048 }
1049
1050 /**
1051  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
1052  * @evt:        ibmvfc event struct
1053  *
1054  * This function does not setup any error status, that must be done
1055  * before this function gets called.
1056  **/
1057 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
1058 {
1059         struct scsi_cmnd *cmnd = evt->cmnd;
1060
1061         if (cmnd) {
1062                 scsi_dma_unmap(cmnd);
1063                 scsi_done(cmnd);
1064         }
1065
1066         ibmvfc_free_event(evt);
1067 }
1068
1069 /**
1070  * ibmvfc_complete_purge - Complete failed command list
1071  * @purge_list:         list head of failed commands
1072  *
1073  * This function runs completions on commands to fail as a result of a
1074  * host reset or platform migration.
1075  **/
1076 static void ibmvfc_complete_purge(struct list_head *purge_list)
1077 {
1078         struct ibmvfc_event *evt, *pos;
1079
1080         list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
1081                 list_del(&evt->queue_list);
1082                 ibmvfc_trc_end(evt);
1083                 evt->done(evt);
1084         }
1085 }
1086
1087 /**
1088  * ibmvfc_fail_request - Fail request with specified error code
1089  * @evt:                ibmvfc event struct
1090  * @error_code: error code to fail request with
1091  *
1092  * Return value:
1093  *      none
1094  **/
1095 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
1096 {
1097         /*
1098          * Anything we are failing should still be active. Otherwise, it
1099          * implies we already got a response for the command and are doing
1100          * something bad like double completing it.
1101          */
1102         BUG_ON(!atomic_dec_and_test(&evt->active));
1103         if (evt->cmnd) {
1104                 evt->cmnd->result = (error_code << 16);
1105                 evt->done = ibmvfc_scsi_eh_done;
1106         } else
1107                 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
1108
1109         del_timer(&evt->timer);
1110 }
1111
1112 /**
1113  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
1114  * @vhost:              ibmvfc host struct
1115  * @error_code: error code to fail requests with
1116  *
1117  * Return value:
1118  *      none
1119  **/
1120 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
1121 {
1122         struct ibmvfc_event *evt, *pos;
1123         struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
1124         unsigned long flags;
1125         int hwqs = 0;
1126         int i;
1127
1128         if (vhost->using_channels)
1129                 hwqs = vhost->scsi_scrqs.active_queues;
1130
1131         ibmvfc_dbg(vhost, "Purging all requests\n");
1132         spin_lock_irqsave(&vhost->crq.l_lock, flags);
1133         list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
1134                 ibmvfc_fail_request(evt, error_code);
1135         list_splice_init(&vhost->crq.sent, &vhost->purge);
1136         spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
1137
1138         for (i = 0; i < hwqs; i++) {
1139                 spin_lock_irqsave(queues[i].q_lock, flags);
1140                 spin_lock(&queues[i].l_lock);
1141                 list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
1142                         ibmvfc_fail_request(evt, error_code);
1143                 list_splice_init(&queues[i].sent, &vhost->purge);
1144                 spin_unlock(&queues[i].l_lock);
1145                 spin_unlock_irqrestore(queues[i].q_lock, flags);
1146         }
1147 }
1148
1149 /**
1150  * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
1151  * @vhost:      struct ibmvfc host to reset
1152  **/
1153 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
1154 {
1155         ibmvfc_purge_requests(vhost, DID_ERROR);
1156         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
1157         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
1158 }
1159
1160 /**
1161  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
1162  * @vhost:      struct ibmvfc host to reset
1163  **/
1164 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
1165 {
1166         if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
1167             !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
1168                 scsi_block_requests(vhost->host);
1169                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
1170                 vhost->job_step = ibmvfc_npiv_logout;
1171                 wake_up(&vhost->work_wait_q);
1172         } else
1173                 ibmvfc_hard_reset_host(vhost);
1174 }
1175
1176 /**
1177  * ibmvfc_reset_host - Reset the connection to the server
1178  * @vhost:      ibmvfc host struct
1179  **/
1180 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
1181 {
1182         unsigned long flags;
1183
1184         spin_lock_irqsave(vhost->host->host_lock, flags);
1185         __ibmvfc_reset_host(vhost);
1186         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1187 }
1188
1189 /**
1190  * ibmvfc_retry_host_init - Retry host initialization if allowed
1191  * @vhost:      ibmvfc host struct
1192  *
1193  * Returns: 1 if init will be retried / 0 if not
1194  *
1195  **/
1196 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
1197 {
1198         int retry = 0;
1199
1200         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
1201                 vhost->delay_init = 1;
1202                 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
1203                         dev_err(vhost->dev,
1204                                 "Host initialization retries exceeded. Taking adapter offline\n");
1205                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
1206                 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
1207                         __ibmvfc_reset_host(vhost);
1208                 else {
1209                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
1210                         retry = 1;
1211                 }
1212         }
1213
1214         wake_up(&vhost->work_wait_q);
1215         return retry;
1216 }
1217
1218 /**
1219  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
1220  * @starget:    scsi target struct
1221  *
1222  * Return value:
1223  *      ibmvfc_target struct / NULL if not found
1224  **/
1225 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
1226 {
1227         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1228         struct ibmvfc_host *vhost = shost_priv(shost);
1229         struct ibmvfc_target *tgt;
1230
1231         list_for_each_entry(tgt, &vhost->targets, queue)
1232                 if (tgt->target_id == starget->id) {
1233                         kref_get(&tgt->kref);
1234                         return tgt;
1235                 }
1236         return NULL;
1237 }
1238
1239 /**
1240  * ibmvfc_get_target - Find the specified scsi_target
1241  * @starget:    scsi target struct
1242  *
1243  * Return value:
1244  *      ibmvfc_target struct / NULL if not found
1245  **/
1246 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1247 {
1248         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1249         struct ibmvfc_target *tgt;
1250         unsigned long flags;
1251
1252         spin_lock_irqsave(shost->host_lock, flags);
1253         tgt = __ibmvfc_get_target(starget);
1254         spin_unlock_irqrestore(shost->host_lock, flags);
1255         return tgt;
1256 }
1257
1258 /**
1259  * ibmvfc_get_host_speed - Get host port speed
1260  * @shost:              scsi host struct
1261  *
1262  * Return value:
1263  *      none
1264  **/
1265 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1266 {
1267         struct ibmvfc_host *vhost = shost_priv(shost);
1268         unsigned long flags;
1269
1270         spin_lock_irqsave(shost->host_lock, flags);
1271         if (vhost->state == IBMVFC_ACTIVE) {
1272                 switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1273                 case 1:
1274                         fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1275                         break;
1276                 case 2:
1277                         fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1278                         break;
1279                 case 4:
1280                         fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1281                         break;
1282                 case 8:
1283                         fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1284                         break;
1285                 case 10:
1286                         fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1287                         break;
1288                 case 16:
1289                         fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1290                         break;
1291                 default:
1292                         ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1293                                    be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1294                         fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1295                         break;
1296                 }
1297         } else
1298                 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1299         spin_unlock_irqrestore(shost->host_lock, flags);
1300 }
1301
1302 /**
1303  * ibmvfc_get_host_port_state - Get host port state
1304  * @shost:              scsi host struct
1305  *
1306  * Return value:
1307  *      none
1308  **/
1309 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1310 {
1311         struct ibmvfc_host *vhost = shost_priv(shost);
1312         unsigned long flags;
1313
1314         spin_lock_irqsave(shost->host_lock, flags);
1315         switch (vhost->state) {
1316         case IBMVFC_INITIALIZING:
1317         case IBMVFC_ACTIVE:
1318                 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1319                 break;
1320         case IBMVFC_LINK_DOWN:
1321                 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1322                 break;
1323         case IBMVFC_LINK_DEAD:
1324         case IBMVFC_HOST_OFFLINE:
1325                 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1326                 break;
1327         case IBMVFC_HALTED:
1328                 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1329                 break;
1330         case IBMVFC_NO_CRQ:
1331                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1332                 break;
1333         default:
1334                 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1335                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1336                 break;
1337         }
1338         spin_unlock_irqrestore(shost->host_lock, flags);
1339 }
1340
1341 /**
1342  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1343  * @rport:              rport struct
1344  * @timeout:    timeout value
1345  *
1346  * Return value:
1347  *      none
1348  **/
1349 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1350 {
1351         if (timeout)
1352                 rport->dev_loss_tmo = timeout;
1353         else
1354                 rport->dev_loss_tmo = 1;
1355 }
1356
1357 /**
1358  * ibmvfc_release_tgt - Free memory allocated for a target
1359  * @kref:               kref struct
1360  *
1361  **/
1362 static void ibmvfc_release_tgt(struct kref *kref)
1363 {
1364         struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1365         kfree(tgt);
1366 }
1367
1368 /**
1369  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1370  * @starget:    scsi target struct
1371  *
1372  * Return value:
1373  *      none
1374  **/
1375 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1376 {
1377         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1378         fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1379         if (tgt)
1380                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1381 }
1382
1383 /**
1384  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1385  * @starget:    scsi target struct
1386  *
1387  * Return value:
1388  *      none
1389  **/
1390 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1391 {
1392         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1393         fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1394         if (tgt)
1395                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1396 }
1397
1398 /**
1399  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1400  * @starget:    scsi target struct
1401  *
1402  * Return value:
1403  *      none
1404  **/
1405 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1406 {
1407         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1408         fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1409         if (tgt)
1410                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1411 }
1412
1413 /**
1414  * ibmvfc_wait_while_resetting - Wait while the host resets
1415  * @vhost:              ibmvfc host struct
1416  *
1417  * Return value:
1418  *      0 on success / other on failure
1419  **/
1420 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1421 {
1422         long timeout = wait_event_timeout(vhost->init_wait_q,
1423                                           ((vhost->state == IBMVFC_ACTIVE ||
1424                                             vhost->state == IBMVFC_HOST_OFFLINE ||
1425                                             vhost->state == IBMVFC_LINK_DEAD) &&
1426                                            vhost->action == IBMVFC_HOST_ACTION_NONE),
1427                                           (init_timeout * HZ));
1428
1429         return timeout ? 0 : -EIO;
1430 }
1431
1432 /**
1433  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1434  * @shost:              scsi host struct
1435  *
1436  * Return value:
1437  *      0 on success / other on failure
1438  **/
1439 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1440 {
1441         struct ibmvfc_host *vhost = shost_priv(shost);
1442
1443         dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1444         ibmvfc_reset_host(vhost);
1445         return ibmvfc_wait_while_resetting(vhost);
1446 }
1447
1448 /**
1449  * ibmvfc_gather_partition_info - Gather info about the LPAR
1450  * @vhost:      ibmvfc host struct
1451  *
1452  * Return value:
1453  *      none
1454  **/
1455 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1456 {
1457         struct device_node *rootdn;
1458         const char *name;
1459         const unsigned int *num;
1460
1461         rootdn = of_find_node_by_path("/");
1462         if (!rootdn)
1463                 return;
1464
1465         name = of_get_property(rootdn, "ibm,partition-name", NULL);
1466         if (name)
1467                 strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1468         num = of_get_property(rootdn, "ibm,partition-no", NULL);
1469         if (num)
1470                 vhost->partition_number = *num;
1471         of_node_put(rootdn);
1472 }
1473
1474 /**
1475  * ibmvfc_set_login_info - Setup info for NPIV login
1476  * @vhost:      ibmvfc host struct
1477  *
1478  * Return value:
1479  *      none
1480  **/
1481 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1482 {
1483         struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1484         struct ibmvfc_queue *async_crq = &vhost->async_crq;
1485         struct device_node *of_node = vhost->dev->of_node;
1486         const char *location;
1487         u16 max_cmds;
1488
1489         max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
1490         if (mq_enabled)
1491                 max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *
1492                         vhost->client_scsi_channels;
1493
1494         memset(login_info, 0, sizeof(*login_info));
1495
1496         login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1497         login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1498         login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1499         login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1500         login_info->partition_num = cpu_to_be32(vhost->partition_number);
1501         login_info->vfc_frame_version = cpu_to_be32(1);
1502         login_info->fcp_version = cpu_to_be16(3);
1503         login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1504         if (vhost->client_migrated)
1505                 login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1506
1507         login_info->max_cmds = cpu_to_be32(max_cmds);
1508         login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1509
1510         if (vhost->mq_enabled || vhost->using_channels)
1511                 login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
1512
1513         login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1514         login_info->async.len = cpu_to_be32(async_crq->size *
1515                                             sizeof(*async_crq->msgs.async));
1516         strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1517         strncpy(login_info->device_name,
1518                 dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1519
1520         location = of_get_property(of_node, "ibm,loc-code", NULL);
1521         location = location ? location : dev_name(vhost->dev);
1522         strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1523 }
1524
1525 /**
1526  * __ibmvfc_get_event - Gets the next free event in pool
1527  * @queue:      ibmvfc queue struct
1528  * @reserved:   event is for a reserved management command
1529  *
1530  * Returns a free event from the pool.
1531  **/
1532 static struct ibmvfc_event *__ibmvfc_get_event(struct ibmvfc_queue *queue, int reserved)
1533 {
1534         struct ibmvfc_event *evt = NULL;
1535         unsigned long flags;
1536
1537         spin_lock_irqsave(&queue->l_lock, flags);
1538         if (reserved && queue->reserved_free) {
1539                 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1540                 evt->reserved = 1;
1541                 queue->reserved_free--;
1542         } else if (queue->evt_free) {
1543                 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1544                 queue->evt_free--;
1545         } else {
1546                 goto out;
1547         }
1548
1549         atomic_set(&evt->free, 0);
1550         list_del(&evt->queue_list);
1551 out:
1552         spin_unlock_irqrestore(&queue->l_lock, flags);
1553         return evt;
1554 }
1555
1556 #define ibmvfc_get_event(queue) __ibmvfc_get_event(queue, 0)
1557 #define ibmvfc_get_reserved_event(queue) __ibmvfc_get_event(queue, 1)
1558
1559 /**
1560  * ibmvfc_locked_done - Calls evt completion with host_lock held
1561  * @evt:        ibmvfc evt to complete
1562  *
1563  * All non-scsi command completion callbacks have the expectation that the
1564  * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
1565  * MAD evt with the host_lock.
1566  **/
1567 static void ibmvfc_locked_done(struct ibmvfc_event *evt)
1568 {
1569         unsigned long flags;
1570
1571         spin_lock_irqsave(evt->vhost->host->host_lock, flags);
1572         evt->_done(evt);
1573         spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
1574 }
1575
1576 /**
1577  * ibmvfc_init_event - Initialize fields in an event struct that are always
1578  *                              required.
1579  * @evt:        The event
1580  * @done:       Routine to call when the event is responded to
1581  * @format:     SRP or MAD format
1582  **/
1583 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1584                               void (*done) (struct ibmvfc_event *), u8 format)
1585 {
1586         evt->cmnd = NULL;
1587         evt->sync_iu = NULL;
1588         evt->eh_comp = NULL;
1589         evt->crq.format = format;
1590         if (format == IBMVFC_CMD_FORMAT)
1591                 evt->done = done;
1592         else {
1593                 evt->_done = done;
1594                 evt->done = ibmvfc_locked_done;
1595         }
1596         evt->hwq = 0;
1597 }
1598
1599 /**
1600  * ibmvfc_map_sg_list - Initialize scatterlist
1601  * @scmd:       scsi command struct
1602  * @nseg:       number of scatterlist segments
1603  * @md: memory descriptor list to initialize
1604  **/
1605 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1606                                struct srp_direct_buf *md)
1607 {
1608         int i;
1609         struct scatterlist *sg;
1610
1611         scsi_for_each_sg(scmd, sg, nseg, i) {
1612                 md[i].va = cpu_to_be64(sg_dma_address(sg));
1613                 md[i].len = cpu_to_be32(sg_dma_len(sg));
1614                 md[i].key = 0;
1615         }
1616 }
1617
1618 /**
1619  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1620  * @scmd:               struct scsi_cmnd with the scatterlist
1621  * @evt:                ibmvfc event struct
1622  * @vfc_cmd:    vfc_cmd that contains the memory descriptor
1623  * @dev:                device for which to map dma memory
1624  *
1625  * Returns:
1626  *      0 on success / non-zero on failure
1627  **/
1628 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1629                               struct ibmvfc_event *evt,
1630                               struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1631 {
1632
1633         int sg_mapped;
1634         struct srp_direct_buf *data = &vfc_cmd->ioba;
1635         struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1636         struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1637
1638         if (cls3_error)
1639                 vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1640
1641         sg_mapped = scsi_dma_map(scmd);
1642         if (!sg_mapped) {
1643                 vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1644                 return 0;
1645         } else if (unlikely(sg_mapped < 0)) {
1646                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1647                         scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1648                 return sg_mapped;
1649         }
1650
1651         if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1652                 vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1653                 iu->add_cdb_len |= IBMVFC_WRDATA;
1654         } else {
1655                 vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1656                 iu->add_cdb_len |= IBMVFC_RDDATA;
1657         }
1658
1659         if (sg_mapped == 1) {
1660                 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1661                 return 0;
1662         }
1663
1664         vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1665
1666         if (!evt->ext_list) {
1667                 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1668                                                &evt->ext_list_token);
1669
1670                 if (!evt->ext_list) {
1671                         scsi_dma_unmap(scmd);
1672                         if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1673                                 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1674                         return -ENOMEM;
1675                 }
1676         }
1677
1678         ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1679
1680         data->va = cpu_to_be64(evt->ext_list_token);
1681         data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1682         data->key = 0;
1683         return 0;
1684 }
1685
1686 /**
1687  * ibmvfc_timeout - Internal command timeout handler
1688  * @t:  struct ibmvfc_event that timed out
1689  *
1690  * Called when an internally generated command times out
1691  **/
1692 static void ibmvfc_timeout(struct timer_list *t)
1693 {
1694         struct ibmvfc_event *evt = from_timer(evt, t, timer);
1695         struct ibmvfc_host *vhost = evt->vhost;
1696         dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1697         ibmvfc_reset_host(vhost);
1698 }
1699
1700 /**
1701  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1702  * @evt:                event to be sent
1703  * @vhost:              ibmvfc host struct
1704  * @timeout:    timeout in seconds - 0 means do not time command
1705  *
1706  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1707  **/
1708 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1709                              struct ibmvfc_host *vhost, unsigned long timeout)
1710 {
1711         __be64 *crq_as_u64 = (__be64 *) &evt->crq;
1712         unsigned long flags;
1713         int rc;
1714
1715         /* Copy the IU into the transfer area */
1716         *evt->xfer_iu = evt->iu;
1717         if (evt->crq.format == IBMVFC_CMD_FORMAT)
1718                 evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1719         else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1720                 evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1721         else
1722                 BUG();
1723
1724         timer_setup(&evt->timer, ibmvfc_timeout, 0);
1725
1726         if (timeout) {
1727                 evt->timer.expires = jiffies + (timeout * HZ);
1728                 add_timer(&evt->timer);
1729         }
1730
1731         spin_lock_irqsave(&evt->queue->l_lock, flags);
1732         list_add_tail(&evt->queue_list, &evt->queue->sent);
1733         atomic_set(&evt->active, 1);
1734
1735         mb();
1736
1737         if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
1738                 rc = ibmvfc_send_sub_crq(vhost,
1739                                          evt->queue->vios_cookie,
1740                                          be64_to_cpu(crq_as_u64[0]),
1741                                          be64_to_cpu(crq_as_u64[1]),
1742                                          0, 0);
1743         else
1744                 rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1745                                      be64_to_cpu(crq_as_u64[1]));
1746
1747         if (rc) {
1748                 atomic_set(&evt->active, 0);
1749                 list_del(&evt->queue_list);
1750                 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1751                 del_timer(&evt->timer);
1752
1753                 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1754                  * Firmware will send a CRQ with a transport event (0xFF) to
1755                  * tell this client what has happened to the transport. This
1756                  * will be handled in ibmvfc_handle_crq()
1757                  */
1758                 if (rc == H_CLOSED) {
1759                         if (printk_ratelimit())
1760                                 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1761                         if (evt->cmnd)
1762                                 scsi_dma_unmap(evt->cmnd);
1763                         ibmvfc_free_event(evt);
1764                         return SCSI_MLQUEUE_HOST_BUSY;
1765                 }
1766
1767                 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1768                 if (evt->cmnd) {
1769                         evt->cmnd->result = DID_ERROR << 16;
1770                         evt->done = ibmvfc_scsi_eh_done;
1771                 } else
1772                         evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1773
1774                 evt->done(evt);
1775         } else {
1776                 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1777                 ibmvfc_trc_start(evt);
1778         }
1779
1780         return 0;
1781 }
1782
1783 /**
1784  * ibmvfc_log_error - Log an error for the failed command if appropriate
1785  * @evt:        ibmvfc event to log
1786  *
1787  **/
1788 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1789 {
1790         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1791         struct ibmvfc_host *vhost = evt->vhost;
1792         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1793         struct scsi_cmnd *cmnd = evt->cmnd;
1794         const char *err = unknown_error;
1795         int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1796         int logerr = 0;
1797         int rsp_code = 0;
1798
1799         if (index >= 0) {
1800                 logerr = cmd_status[index].log;
1801                 err = cmd_status[index].name;
1802         }
1803
1804         if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1805                 return;
1806
1807         if (rsp->flags & FCP_RSP_LEN_VALID)
1808                 rsp_code = rsp->data.info.rsp_code;
1809
1810         scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1811                     "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1812                     cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1813                     rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1814 }
1815
1816 /**
1817  * ibmvfc_relogin - Log back into the specified device
1818  * @sdev:       scsi device struct
1819  *
1820  **/
1821 static void ibmvfc_relogin(struct scsi_device *sdev)
1822 {
1823         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1824         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1825         struct ibmvfc_target *tgt;
1826         unsigned long flags;
1827
1828         spin_lock_irqsave(vhost->host->host_lock, flags);
1829         list_for_each_entry(tgt, &vhost->targets, queue) {
1830                 if (rport == tgt->rport) {
1831                         ibmvfc_del_tgt(tgt);
1832                         break;
1833                 }
1834         }
1835
1836         ibmvfc_reinit_host(vhost);
1837         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1838 }
1839
1840 /**
1841  * ibmvfc_scsi_done - Handle responses from commands
1842  * @evt:        ibmvfc event to be handled
1843  *
1844  * Used as a callback when sending scsi cmds.
1845  **/
1846 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1847 {
1848         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1849         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1850         struct scsi_cmnd *cmnd = evt->cmnd;
1851         u32 rsp_len = 0;
1852         u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1853
1854         if (cmnd) {
1855                 if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1856                         scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1857                 else if (rsp->flags & FCP_RESID_UNDER)
1858                         scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1859                 else
1860                         scsi_set_resid(cmnd, 0);
1861
1862                 if (vfc_cmd->status) {
1863                         cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1864
1865                         if (rsp->flags & FCP_RSP_LEN_VALID)
1866                                 rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1867                         if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1868                                 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1869                         if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1870                                 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1871                         if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1872                             (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1873                                 ibmvfc_relogin(cmnd->device);
1874
1875                         if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1876                                 cmnd->result = (DID_ERROR << 16);
1877
1878                         ibmvfc_log_error(evt);
1879                 }
1880
1881                 if (!cmnd->result &&
1882                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1883                         cmnd->result = (DID_ERROR << 16);
1884
1885                 scsi_dma_unmap(cmnd);
1886                 scsi_done(cmnd);
1887         }
1888
1889         ibmvfc_free_event(evt);
1890 }
1891
1892 /**
1893  * ibmvfc_host_chkready - Check if the host can accept commands
1894  * @vhost:       struct ibmvfc host
1895  *
1896  * Returns:
1897  *      1 if host can accept command / 0 if not
1898  **/
1899 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1900 {
1901         int result = 0;
1902
1903         switch (vhost->state) {
1904         case IBMVFC_LINK_DEAD:
1905         case IBMVFC_HOST_OFFLINE:
1906                 result = DID_NO_CONNECT << 16;
1907                 break;
1908         case IBMVFC_NO_CRQ:
1909         case IBMVFC_INITIALIZING:
1910         case IBMVFC_HALTED:
1911         case IBMVFC_LINK_DOWN:
1912                 result = DID_REQUEUE << 16;
1913                 break;
1914         case IBMVFC_ACTIVE:
1915                 result = 0;
1916                 break;
1917         }
1918
1919         return result;
1920 }
1921
1922 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1923 {
1924         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1925         struct ibmvfc_host *vhost = evt->vhost;
1926         struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1927         struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1928         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1929         size_t offset;
1930
1931         memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1932         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1933                 offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1934                 vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1935         } else
1936                 offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1937         vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1938         vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1939         vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1940         vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1941         vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1942         vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1943         vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1944         int_to_scsilun(sdev->lun, &iu->lun);
1945
1946         return vfc_cmd;
1947 }
1948
1949 /**
1950  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1951  * @shost:      scsi host struct
1952  * @cmnd:       struct scsi_cmnd to be executed
1953  *
1954  * Returns:
1955  *      0 on success / other on failure
1956  **/
1957 static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
1958 {
1959         struct ibmvfc_host *vhost = shost_priv(shost);
1960         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1961         struct ibmvfc_cmd *vfc_cmd;
1962         struct ibmvfc_fcp_cmd_iu *iu;
1963         struct ibmvfc_event *evt;
1964         u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
1965         u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
1966         u16 scsi_channel;
1967         int rc;
1968
1969         if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1970             unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1971                 cmnd->result = rc;
1972                 scsi_done(cmnd);
1973                 return 0;
1974         }
1975
1976         cmnd->result = (DID_OK << 16);
1977         if (vhost->using_channels) {
1978                 scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
1979                 evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
1980                 if (!evt)
1981                         return SCSI_MLQUEUE_HOST_BUSY;
1982
1983                 evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
1984         } else {
1985                 evt = ibmvfc_get_event(&vhost->crq);
1986                 if (!evt)
1987                         return SCSI_MLQUEUE_HOST_BUSY;
1988         }
1989
1990         ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1991         evt->cmnd = cmnd;
1992
1993         vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
1994         iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1995
1996         iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1997         memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
1998
1999         if (cmnd->flags & SCMD_TAGGED) {
2000                 vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
2001                 iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
2002         }
2003
2004         vfc_cmd->correlation = cpu_to_be64((u64)evt);
2005
2006         if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
2007                 return ibmvfc_send_event(evt, vhost, 0);
2008
2009         ibmvfc_free_event(evt);
2010         if (rc == -ENOMEM)
2011                 return SCSI_MLQUEUE_HOST_BUSY;
2012
2013         if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2014                 scmd_printk(KERN_ERR, cmnd,
2015                             "Failed to map DMA buffer for command. rc=%d\n", rc);
2016
2017         cmnd->result = DID_ERROR << 16;
2018         scsi_done(cmnd);
2019         return 0;
2020 }
2021
2022 /**
2023  * ibmvfc_sync_completion - Signal that a synchronous command has completed
2024  * @evt:        ibmvfc event struct
2025  *
2026  **/
2027 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
2028 {
2029         /* copy the response back */
2030         if (evt->sync_iu)
2031                 *evt->sync_iu = *evt->xfer_iu;
2032
2033         complete(&evt->comp);
2034 }
2035
2036 /**
2037  * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
2038  * @evt:        struct ibmvfc_event
2039  *
2040  **/
2041 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
2042 {
2043         struct ibmvfc_host *vhost = evt->vhost;
2044
2045         ibmvfc_free_event(evt);
2046         vhost->aborting_passthru = 0;
2047         dev_info(vhost->dev, "Passthru command cancelled\n");
2048 }
2049
2050 /**
2051  * ibmvfc_bsg_timeout - Handle a BSG timeout
2052  * @job:        struct bsg_job that timed out
2053  *
2054  * Returns:
2055  *      0 on success / other on failure
2056  **/
2057 static int ibmvfc_bsg_timeout(struct bsg_job *job)
2058 {
2059         struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2060         unsigned long port_id = (unsigned long)job->dd_data;
2061         struct ibmvfc_event *evt;
2062         struct ibmvfc_tmf *tmf;
2063         unsigned long flags;
2064         int rc;
2065
2066         ENTER;
2067         spin_lock_irqsave(vhost->host->host_lock, flags);
2068         if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
2069                 __ibmvfc_reset_host(vhost);
2070                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2071                 return 0;
2072         }
2073
2074         vhost->aborting_passthru = 1;
2075         evt = ibmvfc_get_reserved_event(&vhost->crq);
2076         if (!evt) {
2077                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2078                 return -ENOMEM;
2079         }
2080
2081         ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
2082
2083         tmf = &evt->iu.tmf;
2084         memset(tmf, 0, sizeof(*tmf));
2085         tmf->common.version = cpu_to_be32(1);
2086         tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2087         tmf->common.length = cpu_to_be16(sizeof(*tmf));
2088         tmf->scsi_id = cpu_to_be64(port_id);
2089         tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2090         tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
2091         rc = ibmvfc_send_event(evt, vhost, default_timeout);
2092
2093         if (rc != 0) {
2094                 vhost->aborting_passthru = 0;
2095                 dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
2096                 rc = -EIO;
2097         } else
2098                 dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
2099                          port_id);
2100
2101         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2102
2103         LEAVE;
2104         return rc;
2105 }
2106
2107 /**
2108  * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
2109  * @vhost:              struct ibmvfc_host to send command
2110  * @port_id:    port ID to send command
2111  *
2112  * Returns:
2113  *      0 on success / other on failure
2114  **/
2115 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
2116 {
2117         struct ibmvfc_port_login *plogi;
2118         struct ibmvfc_target *tgt;
2119         struct ibmvfc_event *evt;
2120         union ibmvfc_iu rsp_iu;
2121         unsigned long flags;
2122         int rc = 0, issue_login = 1;
2123
2124         ENTER;
2125         spin_lock_irqsave(vhost->host->host_lock, flags);
2126         list_for_each_entry(tgt, &vhost->targets, queue) {
2127                 if (tgt->scsi_id == port_id) {
2128                         issue_login = 0;
2129                         break;
2130                 }
2131         }
2132
2133         if (!issue_login)
2134                 goto unlock_out;
2135         if (unlikely((rc = ibmvfc_host_chkready(vhost))))
2136                 goto unlock_out;
2137
2138         evt = ibmvfc_get_reserved_event(&vhost->crq);
2139         if (!evt) {
2140                 rc = -ENOMEM;
2141                 goto unlock_out;
2142         }
2143         ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2144         plogi = &evt->iu.plogi;
2145         memset(plogi, 0, sizeof(*plogi));
2146         plogi->common.version = cpu_to_be32(1);
2147         plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
2148         plogi->common.length = cpu_to_be16(sizeof(*plogi));
2149         plogi->scsi_id = cpu_to_be64(port_id);
2150         evt->sync_iu = &rsp_iu;
2151         init_completion(&evt->comp);
2152
2153         rc = ibmvfc_send_event(evt, vhost, default_timeout);
2154         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2155
2156         if (rc)
2157                 return -EIO;
2158
2159         wait_for_completion(&evt->comp);
2160
2161         if (rsp_iu.plogi.common.status)
2162                 rc = -EIO;
2163
2164         spin_lock_irqsave(vhost->host->host_lock, flags);
2165         ibmvfc_free_event(evt);
2166 unlock_out:
2167         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2168         LEAVE;
2169         return rc;
2170 }
2171
2172 /**
2173  * ibmvfc_bsg_request - Handle a BSG request
2174  * @job:        struct bsg_job to be executed
2175  *
2176  * Returns:
2177  *      0 on success / other on failure
2178  **/
2179 static int ibmvfc_bsg_request(struct bsg_job *job)
2180 {
2181         struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2182         struct fc_rport *rport = fc_bsg_to_rport(job);
2183         struct ibmvfc_passthru_mad *mad;
2184         struct ibmvfc_event *evt;
2185         union ibmvfc_iu rsp_iu;
2186         unsigned long flags, port_id = -1;
2187         struct fc_bsg_request *bsg_request = job->request;
2188         struct fc_bsg_reply *bsg_reply = job->reply;
2189         unsigned int code = bsg_request->msgcode;
2190         int rc = 0, req_seg, rsp_seg, issue_login = 0;
2191         u32 fc_flags, rsp_len;
2192
2193         ENTER;
2194         bsg_reply->reply_payload_rcv_len = 0;
2195         if (rport)
2196                 port_id = rport->port_id;
2197
2198         switch (code) {
2199         case FC_BSG_HST_ELS_NOLOGIN:
2200                 port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
2201                         (bsg_request->rqst_data.h_els.port_id[1] << 8) |
2202                         bsg_request->rqst_data.h_els.port_id[2];
2203                 fallthrough;
2204         case FC_BSG_RPT_ELS:
2205                 fc_flags = IBMVFC_FC_ELS;
2206                 break;
2207         case FC_BSG_HST_CT:
2208                 issue_login = 1;
2209                 port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
2210                         (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
2211                         bsg_request->rqst_data.h_ct.port_id[2];
2212                 fallthrough;
2213         case FC_BSG_RPT_CT:
2214                 fc_flags = IBMVFC_FC_CT_IU;
2215                 break;
2216         default:
2217                 return -ENOTSUPP;
2218         }
2219
2220         if (port_id == -1)
2221                 return -EINVAL;
2222         if (!mutex_trylock(&vhost->passthru_mutex))
2223                 return -EBUSY;
2224
2225         job->dd_data = (void *)port_id;
2226         req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
2227                              job->request_payload.sg_cnt, DMA_TO_DEVICE);
2228
2229         if (!req_seg) {
2230                 mutex_unlock(&vhost->passthru_mutex);
2231                 return -ENOMEM;
2232         }
2233
2234         rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
2235                              job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2236
2237         if (!rsp_seg) {
2238                 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2239                              job->request_payload.sg_cnt, DMA_TO_DEVICE);
2240                 mutex_unlock(&vhost->passthru_mutex);
2241                 return -ENOMEM;
2242         }
2243
2244         if (req_seg > 1 || rsp_seg > 1) {
2245                 rc = -EINVAL;
2246                 goto out;
2247         }
2248
2249         if (issue_login)
2250                 rc = ibmvfc_bsg_plogi(vhost, port_id);
2251
2252         spin_lock_irqsave(vhost->host->host_lock, flags);
2253
2254         if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
2255             unlikely((rc = ibmvfc_host_chkready(vhost)))) {
2256                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2257                 goto out;
2258         }
2259
2260         evt = ibmvfc_get_reserved_event(&vhost->crq);
2261         if (!evt) {
2262                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2263                 rc = -ENOMEM;
2264                 goto out;
2265         }
2266         ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2267         mad = &evt->iu.passthru;
2268
2269         memset(mad, 0, sizeof(*mad));
2270         mad->common.version = cpu_to_be32(1);
2271         mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2272         mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2273
2274         mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2275                 offsetof(struct ibmvfc_passthru_mad, iu));
2276         mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2277
2278         mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2279         mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2280         mad->iu.flags = cpu_to_be32(fc_flags);
2281         mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2282
2283         mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2284         mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2285         mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2286         mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2287         mad->iu.scsi_id = cpu_to_be64(port_id);
2288         mad->iu.tag = cpu_to_be64((u64)evt);
2289         rsp_len = be32_to_cpu(mad->iu.rsp.len);
2290
2291         evt->sync_iu = &rsp_iu;
2292         init_completion(&evt->comp);
2293         rc = ibmvfc_send_event(evt, vhost, 0);
2294         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2295
2296         if (rc) {
2297                 rc = -EIO;
2298                 goto out;
2299         }
2300
2301         wait_for_completion(&evt->comp);
2302
2303         if (rsp_iu.passthru.common.status)
2304                 rc = -EIO;
2305         else
2306                 bsg_reply->reply_payload_rcv_len = rsp_len;
2307
2308         spin_lock_irqsave(vhost->host->host_lock, flags);
2309         ibmvfc_free_event(evt);
2310         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2311         bsg_reply->result = rc;
2312         bsg_job_done(job, bsg_reply->result,
2313                        bsg_reply->reply_payload_rcv_len);
2314         rc = 0;
2315 out:
2316         dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2317                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
2318         dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2319                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2320         mutex_unlock(&vhost->passthru_mutex);
2321         LEAVE;
2322         return rc;
2323 }
2324
2325 /**
2326  * ibmvfc_reset_device - Reset the device with the specified reset type
2327  * @sdev:       scsi device to reset
2328  * @type:       reset type
2329  * @desc:       reset type description for log messages
2330  *
2331  * Returns:
2332  *      0 on success / other on failure
2333  **/
2334 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2335 {
2336         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2337         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2338         struct ibmvfc_cmd *tmf;
2339         struct ibmvfc_event *evt = NULL;
2340         union ibmvfc_iu rsp_iu;
2341         struct ibmvfc_fcp_cmd_iu *iu;
2342         struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2343         int rsp_rc = -EBUSY;
2344         unsigned long flags;
2345         int rsp_code = 0;
2346
2347         spin_lock_irqsave(vhost->host->host_lock, flags);
2348         if (vhost->state == IBMVFC_ACTIVE) {
2349                 if (vhost->using_channels)
2350                         evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
2351                 else
2352                         evt = ibmvfc_get_event(&vhost->crq);
2353
2354                 if (!evt) {
2355                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2356                         return -ENOMEM;
2357                 }
2358
2359                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2360                 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2361                 iu = ibmvfc_get_fcp_iu(vhost, tmf);
2362
2363                 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2364                 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2365                         tmf->target_wwpn = cpu_to_be64(rport->port_name);
2366                 iu->tmf_flags = type;
2367                 evt->sync_iu = &rsp_iu;
2368
2369                 init_completion(&evt->comp);
2370                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2371         }
2372         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2373
2374         if (rsp_rc != 0) {
2375                 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2376                             desc, rsp_rc);
2377                 return -EIO;
2378         }
2379
2380         sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2381         wait_for_completion(&evt->comp);
2382
2383         if (rsp_iu.cmd.status)
2384                 rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2385
2386         if (rsp_code) {
2387                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2388                         rsp_code = fc_rsp->data.info.rsp_code;
2389
2390                 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2391                             "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2392                             ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2393                             be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2394                             fc_rsp->scsi_status);
2395                 rsp_rc = -EIO;
2396         } else
2397                 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2398
2399         spin_lock_irqsave(vhost->host->host_lock, flags);
2400         ibmvfc_free_event(evt);
2401         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2402         return rsp_rc;
2403 }
2404
2405 /**
2406  * ibmvfc_match_rport - Match function for specified remote port
2407  * @evt:        ibmvfc event struct
2408  * @rport:      device to match
2409  *
2410  * Returns:
2411  *      1 if event matches rport / 0 if event does not match rport
2412  **/
2413 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2414 {
2415         struct fc_rport *cmd_rport;
2416
2417         if (evt->cmnd) {
2418                 cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2419                 if (cmd_rport == rport)
2420                         return 1;
2421         }
2422         return 0;
2423 }
2424
2425 /**
2426  * ibmvfc_match_target - Match function for specified target
2427  * @evt:        ibmvfc event struct
2428  * @device:     device to match (starget)
2429  *
2430  * Returns:
2431  *      1 if event matches starget / 0 if event does not match starget
2432  **/
2433 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2434 {
2435         if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2436                 return 1;
2437         return 0;
2438 }
2439
2440 /**
2441  * ibmvfc_match_lun - Match function for specified LUN
2442  * @evt:        ibmvfc event struct
2443  * @device:     device to match (sdev)
2444  *
2445  * Returns:
2446  *      1 if event matches sdev / 0 if event does not match sdev
2447  **/
2448 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2449 {
2450         if (evt->cmnd && evt->cmnd->device == device)
2451                 return 1;
2452         return 0;
2453 }
2454
2455 /**
2456  * ibmvfc_event_is_free - Check if event is free or not
2457  * @evt:        ibmvfc event struct
2458  *
2459  * Returns:
2460  *      true / false
2461  **/
2462 static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
2463 {
2464         struct ibmvfc_event *loop_evt;
2465
2466         list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
2467                 if (loop_evt == evt)
2468                         return true;
2469
2470         return false;
2471 }
2472
2473 /**
2474  * ibmvfc_wait_for_ops - Wait for ops to complete
2475  * @vhost:      ibmvfc host struct
2476  * @device:     device to match (starget or sdev)
2477  * @match:      match function
2478  *
2479  * Returns:
2480  *      SUCCESS / FAILED
2481  **/
2482 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2483                                int (*match) (struct ibmvfc_event *, void *))
2484 {
2485         struct ibmvfc_event *evt;
2486         DECLARE_COMPLETION_ONSTACK(comp);
2487         int wait, i, q_index, q_size;
2488         unsigned long flags;
2489         signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2490         struct ibmvfc_queue *queues;
2491
2492         ENTER;
2493         if (vhost->mq_enabled && vhost->using_channels) {
2494                 queues = vhost->scsi_scrqs.scrqs;
2495                 q_size = vhost->scsi_scrqs.active_queues;
2496         } else {
2497                 queues = &vhost->crq;
2498                 q_size = 1;
2499         }
2500
2501         do {
2502                 wait = 0;
2503                 spin_lock_irqsave(vhost->host->host_lock, flags);
2504                 for (q_index = 0; q_index < q_size; q_index++) {
2505                         spin_lock(&queues[q_index].l_lock);
2506                         for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2507                                 evt = &queues[q_index].evt_pool.events[i];
2508                                 if (!ibmvfc_event_is_free(evt)) {
2509                                         if (match(evt, device)) {
2510                                                 evt->eh_comp = &comp;
2511                                                 wait++;
2512                                         }
2513                                 }
2514                         }
2515                         spin_unlock(&queues[q_index].l_lock);
2516                 }
2517                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2518
2519                 if (wait) {
2520                         timeout = wait_for_completion_timeout(&comp, timeout);
2521
2522                         if (!timeout) {
2523                                 wait = 0;
2524                                 spin_lock_irqsave(vhost->host->host_lock, flags);
2525                                 for (q_index = 0; q_index < q_size; q_index++) {
2526                                         spin_lock(&queues[q_index].l_lock);
2527                                         for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2528                                                 evt = &queues[q_index].evt_pool.events[i];
2529                                                 if (!ibmvfc_event_is_free(evt)) {
2530                                                         if (match(evt, device)) {
2531                                                                 evt->eh_comp = NULL;
2532                                                                 wait++;
2533                                                         }
2534                                                 }
2535                                         }
2536                                         spin_unlock(&queues[q_index].l_lock);
2537                                 }
2538                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2539                                 if (wait)
2540                                         dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2541                                 LEAVE;
2542                                 return wait ? FAILED : SUCCESS;
2543                         }
2544                 }
2545         } while (wait);
2546
2547         LEAVE;
2548         return SUCCESS;
2549 }
2550
2551 static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
2552                                             struct scsi_device *sdev,
2553                                             int type)
2554 {
2555         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2556         struct scsi_target *starget = scsi_target(sdev);
2557         struct fc_rport *rport = starget_to_rport(starget);
2558         struct ibmvfc_event *evt;
2559         struct ibmvfc_tmf *tmf;
2560
2561         evt = ibmvfc_get_reserved_event(queue);
2562         if (!evt)
2563                 return NULL;
2564         ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2565
2566         tmf = &evt->iu.tmf;
2567         memset(tmf, 0, sizeof(*tmf));
2568         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2569                 tmf->common.version = cpu_to_be32(2);
2570                 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2571         } else {
2572                 tmf->common.version = cpu_to_be32(1);
2573         }
2574         tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2575         tmf->common.length = cpu_to_be16(sizeof(*tmf));
2576         tmf->scsi_id = cpu_to_be64(rport->port_id);
2577         int_to_scsilun(sdev->lun, &tmf->lun);
2578         if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2579                 type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2580         if (vhost->state == IBMVFC_ACTIVE)
2581                 tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2582         else
2583                 tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2584         tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2585         tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2586
2587         init_completion(&evt->comp);
2588
2589         return evt;
2590 }
2591
2592 static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
2593 {
2594         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2595         struct ibmvfc_event *evt, *found_evt, *temp;
2596         struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
2597         unsigned long flags;
2598         int num_hwq, i;
2599         int fail = 0;
2600         LIST_HEAD(cancelq);
2601         u16 status;
2602
2603         ENTER;
2604         spin_lock_irqsave(vhost->host->host_lock, flags);
2605         num_hwq = vhost->scsi_scrqs.active_queues;
2606         for (i = 0; i < num_hwq; i++) {
2607                 spin_lock(queues[i].q_lock);
2608                 spin_lock(&queues[i].l_lock);
2609                 found_evt = NULL;
2610                 list_for_each_entry(evt, &queues[i].sent, queue_list) {
2611                         if (evt->cmnd && evt->cmnd->device == sdev) {
2612                                 found_evt = evt;
2613                                 break;
2614                         }
2615                 }
2616                 spin_unlock(&queues[i].l_lock);
2617
2618                 if (found_evt && vhost->logged_in) {
2619                         evt = ibmvfc_init_tmf(&queues[i], sdev, type);
2620                         if (!evt) {
2621                                 spin_unlock(queues[i].q_lock);
2622                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2623                                 return -ENOMEM;
2624                         }
2625                         evt->sync_iu = &queues[i].cancel_rsp;
2626                         ibmvfc_send_event(evt, vhost, default_timeout);
2627                         list_add_tail(&evt->cancel, &cancelq);
2628                 }
2629
2630                 spin_unlock(queues[i].q_lock);
2631         }
2632         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2633
2634         if (list_empty(&cancelq)) {
2635                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2636                         sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2637                 return 0;
2638         }
2639
2640         sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2641
2642         list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
2643                 wait_for_completion(&evt->comp);
2644                 status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
2645                 list_del(&evt->cancel);
2646                 ibmvfc_free_event(evt);
2647
2648                 if (status != IBMVFC_MAD_SUCCESS) {
2649                         sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2650                         switch (status) {
2651                         case IBMVFC_MAD_DRIVER_FAILED:
2652                         case IBMVFC_MAD_CRQ_ERROR:
2653                         /* Host adapter most likely going through reset, return success to
2654                          * the caller will wait for the command being cancelled to get returned
2655                          */
2656                                 break;
2657                         default:
2658                                 fail = 1;
2659                                 break;
2660                         }
2661                 }
2662         }
2663
2664         if (fail)
2665                 return -EIO;
2666
2667         sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2668         LEAVE;
2669         return 0;
2670 }
2671
2672 static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
2673 {
2674         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2675         struct ibmvfc_event *evt, *found_evt;
2676         union ibmvfc_iu rsp;
2677         int rsp_rc = -EBUSY;
2678         unsigned long flags;
2679         u16 status;
2680
2681         ENTER;
2682         found_evt = NULL;
2683         spin_lock_irqsave(vhost->host->host_lock, flags);
2684         spin_lock(&vhost->crq.l_lock);
2685         list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2686                 if (evt->cmnd && evt->cmnd->device == sdev) {
2687                         found_evt = evt;
2688                         break;
2689                 }
2690         }
2691         spin_unlock(&vhost->crq.l_lock);
2692
2693         if (!found_evt) {
2694                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2695                         sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2696                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2697                 return 0;
2698         }
2699
2700         if (vhost->logged_in) {
2701                 evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
2702                 evt->sync_iu = &rsp;
2703                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2704         }
2705
2706         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2707
2708         if (rsp_rc != 0) {
2709                 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2710                 /* If failure is received, the host adapter is most likely going
2711                  through reset, return success so the caller will wait for the command
2712                  being cancelled to get returned */
2713                 return 0;
2714         }
2715
2716         sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2717
2718         wait_for_completion(&evt->comp);
2719         status = be16_to_cpu(rsp.mad_common.status);
2720         spin_lock_irqsave(vhost->host->host_lock, flags);
2721         ibmvfc_free_event(evt);
2722         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2723
2724         if (status != IBMVFC_MAD_SUCCESS) {
2725                 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2726                 switch (status) {
2727                 case IBMVFC_MAD_DRIVER_FAILED:
2728                 case IBMVFC_MAD_CRQ_ERROR:
2729                         /* Host adapter most likely going through reset, return success to
2730                          the caller will wait for the command being cancelled to get returned */
2731                         return 0;
2732                 default:
2733                         return -EIO;
2734                 };
2735         }
2736
2737         sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2738         return 0;
2739 }
2740
2741 /**
2742  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2743  * @sdev:       scsi device to cancel commands
2744  * @type:       type of error recovery being performed
2745  *
2746  * This sends a cancel to the VIOS for the specified device. This does
2747  * NOT send any abort to the actual device. That must be done separately.
2748  *
2749  * Returns:
2750  *      0 on success / other on failure
2751  **/
2752 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2753 {
2754         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2755
2756         if (vhost->mq_enabled && vhost->using_channels)
2757                 return ibmvfc_cancel_all_mq(sdev, type);
2758         else
2759                 return ibmvfc_cancel_all_sq(sdev, type);
2760 }
2761
2762 /**
2763  * ibmvfc_match_key - Match function for specified cancel key
2764  * @evt:        ibmvfc event struct
2765  * @key:        cancel key to match
2766  *
2767  * Returns:
2768  *      1 if event matches key / 0 if event does not match key
2769  **/
2770 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2771 {
2772         unsigned long cancel_key = (unsigned long)key;
2773
2774         if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2775             be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2776                 return 1;
2777         return 0;
2778 }
2779
2780 /**
2781  * ibmvfc_match_evt - Match function for specified event
2782  * @evt:        ibmvfc event struct
2783  * @match:      event to match
2784  *
2785  * Returns:
2786  *      1 if event matches key / 0 if event does not match key
2787  **/
2788 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2789 {
2790         if (evt == match)
2791                 return 1;
2792         return 0;
2793 }
2794
2795 /**
2796  * ibmvfc_abort_task_set - Abort outstanding commands to the device
2797  * @sdev:       scsi device to abort commands
2798  *
2799  * This sends an Abort Task Set to the VIOS for the specified device. This does
2800  * NOT send any cancel to the VIOS. That must be done separately.
2801  *
2802  * Returns:
2803  *      0 on success / other on failure
2804  **/
2805 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2806 {
2807         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2808         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2809         struct ibmvfc_cmd *tmf;
2810         struct ibmvfc_event *evt, *found_evt;
2811         union ibmvfc_iu rsp_iu;
2812         struct ibmvfc_fcp_cmd_iu *iu;
2813         struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2814         int rc, rsp_rc = -EBUSY;
2815         unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2816         int rsp_code = 0;
2817
2818         found_evt = NULL;
2819         spin_lock_irqsave(vhost->host->host_lock, flags);
2820         spin_lock(&vhost->crq.l_lock);
2821         list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2822                 if (evt->cmnd && evt->cmnd->device == sdev) {
2823                         found_evt = evt;
2824                         break;
2825                 }
2826         }
2827         spin_unlock(&vhost->crq.l_lock);
2828
2829         if (!found_evt) {
2830                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2831                         sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2832                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2833                 return 0;
2834         }
2835
2836         if (vhost->state == IBMVFC_ACTIVE) {
2837                 evt = ibmvfc_get_event(&vhost->crq);
2838                 if (!evt) {
2839                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2840                         return -ENOMEM;
2841                 }
2842                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2843                 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2844                 iu = ibmvfc_get_fcp_iu(vhost, tmf);
2845
2846                 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2847                         tmf->target_wwpn = cpu_to_be64(rport->port_name);
2848                 iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2849                 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2850                 evt->sync_iu = &rsp_iu;
2851
2852                 tmf->correlation = cpu_to_be64((u64)evt);
2853
2854                 init_completion(&evt->comp);
2855                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2856         }
2857
2858         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2859
2860         if (rsp_rc != 0) {
2861                 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2862                 return -EIO;
2863         }
2864
2865         sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2866         timeout = wait_for_completion_timeout(&evt->comp, timeout);
2867
2868         if (!timeout) {
2869                 rc = ibmvfc_cancel_all(sdev, 0);
2870                 if (!rc) {
2871                         rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2872                         if (rc == SUCCESS)
2873                                 rc = 0;
2874                 }
2875
2876                 if (rc) {
2877                         sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2878                         ibmvfc_reset_host(vhost);
2879                         rsp_rc = -EIO;
2880                         rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2881
2882                         if (rc == SUCCESS)
2883                                 rsp_rc = 0;
2884
2885                         rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2886                         if (rc != SUCCESS) {
2887                                 spin_lock_irqsave(vhost->host->host_lock, flags);
2888                                 ibmvfc_hard_reset_host(vhost);
2889                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2890                                 rsp_rc = 0;
2891                         }
2892
2893                         goto out;
2894                 }
2895         }
2896
2897         if (rsp_iu.cmd.status)
2898                 rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2899
2900         if (rsp_code) {
2901                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2902                         rsp_code = fc_rsp->data.info.rsp_code;
2903
2904                 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2905                             "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2906                             ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2907                             be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2908                             fc_rsp->scsi_status);
2909                 rsp_rc = -EIO;
2910         } else
2911                 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2912
2913 out:
2914         spin_lock_irqsave(vhost->host->host_lock, flags);
2915         ibmvfc_free_event(evt);
2916         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2917         return rsp_rc;
2918 }
2919
2920 /**
2921  * ibmvfc_eh_abort_handler - Abort a command
2922  * @cmd:        scsi command to abort
2923  *
2924  * Returns:
2925  *      SUCCESS / FAST_IO_FAIL / FAILED
2926  **/
2927 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2928 {
2929         struct scsi_device *sdev = cmd->device;
2930         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2931         int cancel_rc, block_rc;
2932         int rc = FAILED;
2933
2934         ENTER;
2935         block_rc = fc_block_scsi_eh(cmd);
2936         ibmvfc_wait_while_resetting(vhost);
2937         if (block_rc != FAST_IO_FAIL) {
2938                 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2939                 ibmvfc_abort_task_set(sdev);
2940         } else
2941                 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2942
2943         if (!cancel_rc)
2944                 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2945
2946         if (block_rc == FAST_IO_FAIL && rc != FAILED)
2947                 rc = FAST_IO_FAIL;
2948
2949         LEAVE;
2950         return rc;
2951 }
2952
2953 /**
2954  * ibmvfc_eh_device_reset_handler - Reset a single LUN
2955  * @cmd:        scsi command struct
2956  *
2957  * Returns:
2958  *      SUCCESS / FAST_IO_FAIL / FAILED
2959  **/
2960 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2961 {
2962         struct scsi_device *sdev = cmd->device;
2963         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2964         int cancel_rc, block_rc, reset_rc = 0;
2965         int rc = FAILED;
2966
2967         ENTER;
2968         block_rc = fc_block_scsi_eh(cmd);
2969         ibmvfc_wait_while_resetting(vhost);
2970         if (block_rc != FAST_IO_FAIL) {
2971                 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2972                 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2973         } else
2974                 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2975
2976         if (!cancel_rc && !reset_rc)
2977                 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2978
2979         if (block_rc == FAST_IO_FAIL && rc != FAILED)
2980                 rc = FAST_IO_FAIL;
2981
2982         LEAVE;
2983         return rc;
2984 }
2985
2986 /**
2987  * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2988  * @sdev:       scsi device struct
2989  * @data:       return code
2990  *
2991  **/
2992 static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2993 {
2994         unsigned long *rc = data;
2995         *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2996 }
2997
2998 /**
2999  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
3000  * @sdev:       scsi device struct
3001  * @data:       return code
3002  *
3003  **/
3004 static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
3005 {
3006         unsigned long *rc = data;
3007         *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
3008 }
3009
3010 /**
3011  * ibmvfc_eh_target_reset_handler - Reset the target
3012  * @cmd:        scsi command struct
3013  *
3014  * Returns:
3015  *      SUCCESS / FAST_IO_FAIL / FAILED
3016  **/
3017 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
3018 {
3019         struct scsi_device *sdev = cmd->device;
3020         struct ibmvfc_host *vhost = shost_priv(sdev->host);
3021         struct scsi_target *starget = scsi_target(sdev);
3022         int block_rc;
3023         int reset_rc = 0;
3024         int rc = FAILED;
3025         unsigned long cancel_rc = 0;
3026
3027         ENTER;
3028         block_rc = fc_block_scsi_eh(cmd);
3029         ibmvfc_wait_while_resetting(vhost);
3030         if (block_rc != FAST_IO_FAIL) {
3031                 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
3032                 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
3033         } else
3034                 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
3035
3036         if (!cancel_rc && !reset_rc)
3037                 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
3038
3039         if (block_rc == FAST_IO_FAIL && rc != FAILED)
3040                 rc = FAST_IO_FAIL;
3041
3042         LEAVE;
3043         return rc;
3044 }
3045
3046 /**
3047  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
3048  * @cmd:        struct scsi_cmnd having problems
3049  *
3050  **/
3051 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
3052 {
3053         int rc;
3054         struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
3055
3056         dev_err(vhost->dev, "Resetting connection due to error recovery\n");
3057         rc = ibmvfc_issue_fc_host_lip(vhost->host);
3058
3059         return rc ? FAILED : SUCCESS;
3060 }
3061
3062 /**
3063  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
3064  * @rport:              rport struct
3065  *
3066  * Return value:
3067  *      none
3068  **/
3069 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
3070 {
3071         struct Scsi_Host *shost = rport_to_shost(rport);
3072         struct ibmvfc_host *vhost = shost_priv(shost);
3073         struct fc_rport *dev_rport;
3074         struct scsi_device *sdev;
3075         struct ibmvfc_target *tgt;
3076         unsigned long rc, flags;
3077         unsigned int found;
3078
3079         ENTER;
3080         shost_for_each_device(sdev, shost) {
3081                 dev_rport = starget_to_rport(scsi_target(sdev));
3082                 if (dev_rport != rport)
3083                         continue;
3084                 ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
3085         }
3086
3087         rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
3088
3089         if (rc == FAILED)
3090                 ibmvfc_issue_fc_host_lip(shost);
3091
3092         spin_lock_irqsave(shost->host_lock, flags);
3093         found = 0;
3094         list_for_each_entry(tgt, &vhost->targets, queue) {
3095                 if (tgt->scsi_id == rport->port_id) {
3096                         found++;
3097                         break;
3098                 }
3099         }
3100
3101         if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
3102                 /*
3103                  * If we get here, that means we previously attempted to send
3104                  * an implicit logout to the target but it failed, most likely
3105                  * due to I/O being pending, so we need to send it again
3106                  */
3107                 ibmvfc_del_tgt(tgt);
3108                 ibmvfc_reinit_host(vhost);
3109         }
3110
3111         spin_unlock_irqrestore(shost->host_lock, flags);
3112         LEAVE;
3113 }
3114
3115 static const struct ibmvfc_async_desc ae_desc [] = {
3116         { "PLOGI",      IBMVFC_AE_ELS_PLOGI,    IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3117         { "LOGO",       IBMVFC_AE_ELS_LOGO,     IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3118         { "PRLO",       IBMVFC_AE_ELS_PRLO,     IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3119         { "N-Port SCN", IBMVFC_AE_SCN_NPORT,    IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3120         { "Group SCN",  IBMVFC_AE_SCN_GROUP,    IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3121         { "Domain SCN", IBMVFC_AE_SCN_DOMAIN,   IBMVFC_DEFAULT_LOG_LEVEL },
3122         { "Fabric SCN", IBMVFC_AE_SCN_FABRIC,   IBMVFC_DEFAULT_LOG_LEVEL },
3123         { "Link Up",    IBMVFC_AE_LINK_UP,      IBMVFC_DEFAULT_LOG_LEVEL },
3124         { "Link Down",  IBMVFC_AE_LINK_DOWN,    IBMVFC_DEFAULT_LOG_LEVEL },
3125         { "Link Dead",  IBMVFC_AE_LINK_DEAD,    IBMVFC_DEFAULT_LOG_LEVEL },
3126         { "Halt",       IBMVFC_AE_HALT,         IBMVFC_DEFAULT_LOG_LEVEL },
3127         { "Resume",     IBMVFC_AE_RESUME,       IBMVFC_DEFAULT_LOG_LEVEL },
3128         { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
3129 };
3130
3131 static const struct ibmvfc_async_desc unknown_ae = {
3132         "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
3133 };
3134
3135 /**
3136  * ibmvfc_get_ae_desc - Get text description for async event
3137  * @ae: async event
3138  *
3139  **/
3140 static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
3141 {
3142         int i;
3143
3144         for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
3145                 if (ae_desc[i].ae == ae)
3146                         return &ae_desc[i];
3147
3148         return &unknown_ae;
3149 }
3150
3151 static const struct {
3152         enum ibmvfc_ae_link_state state;
3153         const char *desc;
3154 } link_desc [] = {
3155         { IBMVFC_AE_LS_LINK_UP,         " link up" },
3156         { IBMVFC_AE_LS_LINK_BOUNCED,    " link bounced" },
3157         { IBMVFC_AE_LS_LINK_DOWN,       " link down" },
3158         { IBMVFC_AE_LS_LINK_DEAD,       " link dead" },
3159 };
3160
3161 /**
3162  * ibmvfc_get_link_state - Get text description for link state
3163  * @state:      link state
3164  *
3165  **/
3166 static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
3167 {
3168         int i;
3169
3170         for (i = 0; i < ARRAY_SIZE(link_desc); i++)
3171                 if (link_desc[i].state == state)
3172                         return link_desc[i].desc;
3173
3174         return "";
3175 }
3176
3177 /**
3178  * ibmvfc_handle_async - Handle an async event from the adapter
3179  * @crq:        crq to process
3180  * @vhost:      ibmvfc host struct
3181  *
3182  **/
3183 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
3184                                 struct ibmvfc_host *vhost)
3185 {
3186         const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
3187         struct ibmvfc_target *tgt;
3188
3189         ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
3190                    " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
3191                    be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
3192                    ibmvfc_get_link_state(crq->link_state));
3193
3194         switch (be64_to_cpu(crq->event)) {
3195         case IBMVFC_AE_RESUME:
3196                 switch (crq->link_state) {
3197                 case IBMVFC_AE_LS_LINK_DOWN:
3198                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3199                         break;
3200                 case IBMVFC_AE_LS_LINK_DEAD:
3201                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3202                         break;
3203                 case IBMVFC_AE_LS_LINK_UP:
3204                 case IBMVFC_AE_LS_LINK_BOUNCED:
3205                 default:
3206                         vhost->events_to_log |= IBMVFC_AE_LINKUP;
3207                         vhost->delay_init = 1;
3208                         __ibmvfc_reset_host(vhost);
3209                         break;
3210                 }
3211
3212                 break;
3213         case IBMVFC_AE_LINK_UP:
3214                 vhost->events_to_log |= IBMVFC_AE_LINKUP;
3215                 vhost->delay_init = 1;
3216                 __ibmvfc_reset_host(vhost);
3217                 break;
3218         case IBMVFC_AE_SCN_FABRIC:
3219         case IBMVFC_AE_SCN_DOMAIN:
3220                 vhost->events_to_log |= IBMVFC_AE_RSCN;
3221                 if (vhost->state < IBMVFC_HALTED) {
3222                         vhost->delay_init = 1;
3223                         __ibmvfc_reset_host(vhost);
3224                 }
3225                 break;
3226         case IBMVFC_AE_SCN_NPORT:
3227         case IBMVFC_AE_SCN_GROUP:
3228                 vhost->events_to_log |= IBMVFC_AE_RSCN;
3229                 ibmvfc_reinit_host(vhost);
3230                 break;
3231         case IBMVFC_AE_ELS_LOGO:
3232         case IBMVFC_AE_ELS_PRLO:
3233         case IBMVFC_AE_ELS_PLOGI:
3234                 list_for_each_entry(tgt, &vhost->targets, queue) {
3235                         if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
3236                                 break;
3237                         if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
3238                                 continue;
3239                         if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
3240                                 continue;
3241                         if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
3242                                 continue;
3243                         if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
3244                                 tgt->logo_rcvd = 1;
3245                         if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
3246                                 ibmvfc_del_tgt(tgt);
3247                                 ibmvfc_reinit_host(vhost);
3248                         }
3249                 }
3250                 break;
3251         case IBMVFC_AE_LINK_DOWN:
3252         case IBMVFC_AE_ADAPTER_FAILED:
3253                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3254                 break;
3255         case IBMVFC_AE_LINK_DEAD:
3256                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3257                 break;
3258         case IBMVFC_AE_HALT:
3259                 ibmvfc_link_down(vhost, IBMVFC_HALTED);
3260                 break;
3261         default:
3262                 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
3263                 break;
3264         }
3265 }
3266
3267 /**
3268  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
3269  * @crq:        Command/Response queue
3270  * @vhost:      ibmvfc host struct
3271  * @evt_doneq:  Event done queue
3272  *
3273 **/
3274 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3275                               struct list_head *evt_doneq)
3276 {
3277         long rc;
3278         struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3279
3280         switch (crq->valid) {
3281         case IBMVFC_CRQ_INIT_RSP:
3282                 switch (crq->format) {
3283                 case IBMVFC_CRQ_INIT:
3284                         dev_info(vhost->dev, "Partner initialized\n");
3285                         /* Send back a response */
3286                         rc = ibmvfc_send_crq_init_complete(vhost);
3287                         if (rc == 0)
3288                                 ibmvfc_init_host(vhost);
3289                         else
3290                                 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
3291                         break;
3292                 case IBMVFC_CRQ_INIT_COMPLETE:
3293                         dev_info(vhost->dev, "Partner initialization complete\n");
3294                         ibmvfc_init_host(vhost);
3295                         break;
3296                 default:
3297                         dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
3298                 }
3299                 return;
3300         case IBMVFC_CRQ_XPORT_EVENT:
3301                 vhost->state = IBMVFC_NO_CRQ;
3302                 vhost->logged_in = 0;
3303                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3304                 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
3305                         /* We need to re-setup the interpartition connection */
3306                         dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
3307                         vhost->client_migrated = 1;
3308
3309                         scsi_block_requests(vhost->host);
3310                         ibmvfc_purge_requests(vhost, DID_REQUEUE);
3311                         ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN);
3312                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
3313                         wake_up(&vhost->work_wait_q);
3314                 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
3315                         dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
3316                         ibmvfc_purge_requests(vhost, DID_ERROR);
3317                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3318                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
3319                 } else {
3320                         dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
3321                 }
3322                 return;
3323         case IBMVFC_CRQ_CMD_RSP:
3324                 break;
3325         default:
3326                 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
3327                 return;
3328         }
3329
3330         if (crq->format == IBMVFC_ASYNC_EVENT)
3331                 return;
3332
3333         /* The only kind of payload CRQs we should get are responses to
3334          * things we send. Make sure this response is to something we
3335          * actually sent
3336          */
3337         if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
3338                 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3339                         crq->ioba);
3340                 return;
3341         }
3342
3343         if (unlikely(atomic_dec_if_positive(&evt->active))) {
3344                 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3345                         crq->ioba);
3346                 return;
3347         }
3348
3349         spin_lock(&evt->queue->l_lock);
3350         list_move_tail(&evt->queue_list, evt_doneq);
3351         spin_unlock(&evt->queue->l_lock);
3352 }
3353
3354 /**
3355  * ibmvfc_scan_finished - Check if the device scan is done.
3356  * @shost:      scsi host struct
3357  * @time:       current elapsed time
3358  *
3359  * Returns:
3360  *      0 if scan is not done / 1 if scan is done
3361  **/
3362 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3363 {
3364         unsigned long flags;
3365         struct ibmvfc_host *vhost = shost_priv(shost);
3366         int done = 0;
3367
3368         spin_lock_irqsave(shost->host_lock, flags);
3369         if (!vhost->scan_timeout)
3370                 done = 1;
3371         else if (time >= (vhost->scan_timeout * HZ)) {
3372                 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
3373                          "continuing initialization\n", vhost->scan_timeout);
3374                 done = 1;
3375         }
3376
3377         if (vhost->scan_complete) {
3378                 vhost->scan_timeout = init_timeout;
3379                 done = 1;
3380         }
3381         spin_unlock_irqrestore(shost->host_lock, flags);
3382         return done;
3383 }
3384
3385 /**
3386  * ibmvfc_slave_alloc - Setup the device's task set value
3387  * @sdev:       struct scsi_device device to configure
3388  *
3389  * Set the device's task set value so that error handling works as
3390  * expected.
3391  *
3392  * Returns:
3393  *      0 on success / -ENXIO if device does not exist
3394  **/
3395 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
3396 {
3397         struct Scsi_Host *shost = sdev->host;
3398         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3399         struct ibmvfc_host *vhost = shost_priv(shost);
3400         unsigned long flags = 0;
3401
3402         if (!rport || fc_remote_port_chkready(rport))
3403                 return -ENXIO;
3404
3405         spin_lock_irqsave(shost->host_lock, flags);
3406         sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
3407         spin_unlock_irqrestore(shost->host_lock, flags);
3408         return 0;
3409 }
3410
3411 /**
3412  * ibmvfc_target_alloc - Setup the target's task set value
3413  * @starget:    struct scsi_target
3414  *
3415  * Set the target's task set value so that error handling works as
3416  * expected.
3417  *
3418  * Returns:
3419  *      0 on success / -ENXIO if device does not exist
3420  **/
3421 static int ibmvfc_target_alloc(struct scsi_target *starget)
3422 {
3423         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3424         struct ibmvfc_host *vhost = shost_priv(shost);
3425         unsigned long flags = 0;
3426
3427         spin_lock_irqsave(shost->host_lock, flags);
3428         starget->hostdata = (void *)(unsigned long)vhost->task_set++;
3429         spin_unlock_irqrestore(shost->host_lock, flags);
3430         return 0;
3431 }
3432
3433 /**
3434  * ibmvfc_slave_configure - Configure the device
3435  * @sdev:       struct scsi_device device to configure
3436  *
3437  * Enable allow_restart for a device if it is a disk. Adjust the
3438  * queue_depth here also.
3439  *
3440  * Returns:
3441  *      0
3442  **/
3443 static int ibmvfc_slave_configure(struct scsi_device *sdev)
3444 {
3445         struct Scsi_Host *shost = sdev->host;
3446         unsigned long flags = 0;
3447
3448         spin_lock_irqsave(shost->host_lock, flags);
3449         if (sdev->type == TYPE_DISK) {
3450                 sdev->allow_restart = 1;
3451                 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
3452         }
3453         spin_unlock_irqrestore(shost->host_lock, flags);
3454         return 0;
3455 }
3456
3457 /**
3458  * ibmvfc_change_queue_depth - Change the device's queue depth
3459  * @sdev:       scsi device struct
3460  * @qdepth:     depth to set
3461  *
3462  * Return value:
3463  *      actual depth set
3464  **/
3465 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3466 {
3467         if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3468                 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3469
3470         return scsi_change_queue_depth(sdev, qdepth);
3471 }
3472
3473 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3474                                                  struct device_attribute *attr, char *buf)
3475 {
3476         struct Scsi_Host *shost = class_to_shost(dev);
3477         struct ibmvfc_host *vhost = shost_priv(shost);
3478
3479         return snprintf(buf, PAGE_SIZE, "%s\n",
3480                         vhost->login_buf->resp.partition_name);
3481 }
3482
3483 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3484                                             struct device_attribute *attr, char *buf)
3485 {
3486         struct Scsi_Host *shost = class_to_shost(dev);
3487         struct ibmvfc_host *vhost = shost_priv(shost);
3488
3489         return snprintf(buf, PAGE_SIZE, "%s\n",
3490                         vhost->login_buf->resp.device_name);
3491 }
3492
3493 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3494                                          struct device_attribute *attr, char *buf)
3495 {
3496         struct Scsi_Host *shost = class_to_shost(dev);
3497         struct ibmvfc_host *vhost = shost_priv(shost);
3498
3499         return snprintf(buf, PAGE_SIZE, "%s\n",
3500                         vhost->login_buf->resp.port_loc_code);
3501 }
3502
3503 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3504                                          struct device_attribute *attr, char *buf)
3505 {
3506         struct Scsi_Host *shost = class_to_shost(dev);
3507         struct ibmvfc_host *vhost = shost_priv(shost);
3508
3509         return snprintf(buf, PAGE_SIZE, "%s\n",
3510                         vhost->login_buf->resp.drc_name);
3511 }
3512
3513 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3514                                              struct device_attribute *attr, char *buf)
3515 {
3516         struct Scsi_Host *shost = class_to_shost(dev);
3517         struct ibmvfc_host *vhost = shost_priv(shost);
3518         return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
3519 }
3520
3521 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3522                                              struct device_attribute *attr, char *buf)
3523 {
3524         struct Scsi_Host *shost = class_to_shost(dev);
3525         struct ibmvfc_host *vhost = shost_priv(shost);
3526         return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
3527 }
3528
3529 /**
3530  * ibmvfc_show_log_level - Show the adapter's error logging level
3531  * @dev:        class device struct
3532  * @attr:       unused
3533  * @buf:        buffer
3534  *
3535  * Return value:
3536  *      number of bytes printed to buffer
3537  **/
3538 static ssize_t ibmvfc_show_log_level(struct device *dev,
3539                                      struct device_attribute *attr, char *buf)
3540 {
3541         struct Scsi_Host *shost = class_to_shost(dev);
3542         struct ibmvfc_host *vhost = shost_priv(shost);
3543         unsigned long flags = 0;
3544         int len;
3545
3546         spin_lock_irqsave(shost->host_lock, flags);
3547         len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
3548         spin_unlock_irqrestore(shost->host_lock, flags);
3549         return len;
3550 }
3551
3552 /**
3553  * ibmvfc_store_log_level - Change the adapter's error logging level
3554  * @dev:        class device struct
3555  * @attr:       unused
3556  * @buf:        buffer
3557  * @count:      buffer size
3558  *
3559  * Return value:
3560  *      number of bytes printed to buffer
3561  **/
3562 static ssize_t ibmvfc_store_log_level(struct device *dev,
3563                                       struct device_attribute *attr,
3564                                       const char *buf, size_t count)
3565 {
3566         struct Scsi_Host *shost = class_to_shost(dev);
3567         struct ibmvfc_host *vhost = shost_priv(shost);
3568         unsigned long flags = 0;
3569
3570         spin_lock_irqsave(shost->host_lock, flags);
3571         vhost->log_level = simple_strtoul(buf, NULL, 10);
3572         spin_unlock_irqrestore(shost->host_lock, flags);
3573         return strlen(buf);
3574 }
3575
3576 static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
3577                                          struct device_attribute *attr, char *buf)
3578 {
3579         struct Scsi_Host *shost = class_to_shost(dev);
3580         struct ibmvfc_host *vhost = shost_priv(shost);
3581         unsigned long flags = 0;
3582         int len;
3583
3584         spin_lock_irqsave(shost->host_lock, flags);
3585         len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels);
3586         spin_unlock_irqrestore(shost->host_lock, flags);
3587         return len;
3588 }
3589
3590 static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
3591                                          struct device_attribute *attr,
3592                                          const char *buf, size_t count)
3593 {
3594         struct Scsi_Host *shost = class_to_shost(dev);
3595         struct ibmvfc_host *vhost = shost_priv(shost);
3596         unsigned long flags = 0;
3597         unsigned int channels;
3598
3599         spin_lock_irqsave(shost->host_lock, flags);
3600         channels = simple_strtoul(buf, NULL, 10);
3601         vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues);
3602         ibmvfc_hard_reset_host(vhost);
3603         spin_unlock_irqrestore(shost->host_lock, flags);
3604         return strlen(buf);
3605 }
3606
3607 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3608 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3609 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3610 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3611 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3612 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3613 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3614                    ibmvfc_show_log_level, ibmvfc_store_log_level);
3615 static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
3616                    ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
3617
3618 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3619 /**
3620  * ibmvfc_read_trace - Dump the adapter trace
3621  * @filp:               open sysfs file
3622  * @kobj:               kobject struct
3623  * @bin_attr:   bin_attribute struct
3624  * @buf:                buffer
3625  * @off:                offset
3626  * @count:              buffer size
3627  *
3628  * Return value:
3629  *      number of bytes printed to buffer
3630  **/
3631 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3632                                  struct bin_attribute *bin_attr,
3633                                  char *buf, loff_t off, size_t count)
3634 {
3635         struct device *dev = kobj_to_dev(kobj);
3636         struct Scsi_Host *shost = class_to_shost(dev);
3637         struct ibmvfc_host *vhost = shost_priv(shost);
3638         unsigned long flags = 0;
3639         int size = IBMVFC_TRACE_SIZE;
3640         char *src = (char *)vhost->trace;
3641
3642         if (off > size)
3643                 return 0;
3644         if (off + count > size) {
3645                 size -= off;
3646                 count = size;
3647         }
3648
3649         spin_lock_irqsave(shost->host_lock, flags);
3650         memcpy(buf, &src[off], count);
3651         spin_unlock_irqrestore(shost->host_lock, flags);
3652         return count;
3653 }
3654
3655 static struct bin_attribute ibmvfc_trace_attr = {
3656         .attr = {
3657                 .name = "trace",
3658                 .mode = S_IRUGO,
3659         },
3660         .size = 0,
3661         .read = ibmvfc_read_trace,
3662 };
3663 #endif
3664
3665 static struct attribute *ibmvfc_host_attrs[] = {
3666         &dev_attr_partition_name.attr,
3667         &dev_attr_device_name.attr,
3668         &dev_attr_port_loc_code.attr,
3669         &dev_attr_drc_name.attr,
3670         &dev_attr_npiv_version.attr,
3671         &dev_attr_capabilities.attr,
3672         &dev_attr_log_level.attr,
3673         &dev_attr_nr_scsi_channels.attr,
3674         NULL
3675 };
3676
3677 ATTRIBUTE_GROUPS(ibmvfc_host);
3678
3679 static const struct scsi_host_template driver_template = {
3680         .module = THIS_MODULE,
3681         .name = "IBM POWER Virtual FC Adapter",
3682         .proc_name = IBMVFC_NAME,
3683         .queuecommand = ibmvfc_queuecommand,
3684         .eh_timed_out = fc_eh_timed_out,
3685         .eh_abort_handler = ibmvfc_eh_abort_handler,
3686         .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3687         .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3688         .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3689         .slave_alloc = ibmvfc_slave_alloc,
3690         .slave_configure = ibmvfc_slave_configure,
3691         .target_alloc = ibmvfc_target_alloc,
3692         .scan_finished = ibmvfc_scan_finished,
3693         .change_queue_depth = ibmvfc_change_queue_depth,
3694         .cmd_per_lun = 16,
3695         .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3696         .this_id = -1,
3697         .sg_tablesize = SG_ALL,
3698         .max_sectors = IBMVFC_MAX_SECTORS,
3699         .shost_groups = ibmvfc_host_groups,
3700         .track_queue_depth = 1,
3701 };
3702
3703 /**
3704  * ibmvfc_next_async_crq - Returns the next entry in async queue
3705  * @vhost:      ibmvfc host struct
3706  *
3707  * Returns:
3708  *      Pointer to next entry in queue / NULL if empty
3709  **/
3710 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3711 {
3712         struct ibmvfc_queue *async_crq = &vhost->async_crq;
3713         struct ibmvfc_async_crq *crq;
3714
3715         crq = &async_crq->msgs.async[async_crq->cur];
3716         if (crq->valid & 0x80) {
3717                 if (++async_crq->cur == async_crq->size)
3718                         async_crq->cur = 0;
3719                 rmb();
3720         } else
3721                 crq = NULL;
3722
3723         return crq;
3724 }
3725
3726 /**
3727  * ibmvfc_next_crq - Returns the next entry in message queue
3728  * @vhost:      ibmvfc host struct
3729  *
3730  * Returns:
3731  *      Pointer to next entry in queue / NULL if empty
3732  **/
3733 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3734 {
3735         struct ibmvfc_queue *queue = &vhost->crq;
3736         struct ibmvfc_crq *crq;
3737
3738         crq = &queue->msgs.crq[queue->cur];
3739         if (crq->valid & 0x80) {
3740                 if (++queue->cur == queue->size)
3741                         queue->cur = 0;
3742                 rmb();
3743         } else
3744                 crq = NULL;
3745
3746         return crq;
3747 }
3748
3749 /**
3750  * ibmvfc_interrupt - Interrupt handler
3751  * @irq:                number of irq to handle, not used
3752  * @dev_instance: ibmvfc_host that received interrupt
3753  *
3754  * Returns:
3755  *      IRQ_HANDLED
3756  **/
3757 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3758 {
3759         struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3760         unsigned long flags;
3761
3762         spin_lock_irqsave(vhost->host->host_lock, flags);
3763         vio_disable_interrupts(to_vio_dev(vhost->dev));
3764         tasklet_schedule(&vhost->tasklet);
3765         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3766         return IRQ_HANDLED;
3767 }
3768
3769 /**
3770  * ibmvfc_tasklet - Interrupt handler tasklet
3771  * @data:               ibmvfc host struct
3772  *
3773  * Returns:
3774  *      Nothing
3775  **/
3776 static void ibmvfc_tasklet(void *data)
3777 {
3778         struct ibmvfc_host *vhost = data;
3779         struct vio_dev *vdev = to_vio_dev(vhost->dev);
3780         struct ibmvfc_crq *crq;
3781         struct ibmvfc_async_crq *async;
3782         struct ibmvfc_event *evt, *temp;
3783         unsigned long flags;
3784         int done = 0;
3785         LIST_HEAD(evt_doneq);
3786
3787         spin_lock_irqsave(vhost->host->host_lock, flags);
3788         spin_lock(vhost->crq.q_lock);
3789         while (!done) {
3790                 /* Pull all the valid messages off the async CRQ */
3791                 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3792                         ibmvfc_handle_async(async, vhost);
3793                         async->valid = 0;
3794                         wmb();
3795                 }
3796
3797                 /* Pull all the valid messages off the CRQ */
3798                 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3799                         ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3800                         crq->valid = 0;
3801                         wmb();
3802                 }
3803
3804                 vio_enable_interrupts(vdev);
3805                 if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3806                         vio_disable_interrupts(vdev);
3807                         ibmvfc_handle_async(async, vhost);
3808                         async->valid = 0;
3809                         wmb();
3810                 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3811                         vio_disable_interrupts(vdev);
3812                         ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3813                         crq->valid = 0;
3814                         wmb();
3815                 } else
3816                         done = 1;
3817         }
3818
3819         spin_unlock(vhost->crq.q_lock);
3820         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3821
3822         list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3823                 del_timer(&evt->timer);
3824                 list_del(&evt->queue_list);
3825                 ibmvfc_trc_end(evt);
3826                 evt->done(evt);
3827         }
3828 }
3829
3830 static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
3831 {
3832         struct device *dev = scrq->vhost->dev;
3833         struct vio_dev *vdev = to_vio_dev(dev);
3834         unsigned long rc;
3835         int irq_action = H_ENABLE_VIO_INTERRUPT;
3836
3837         if (!enable)
3838                 irq_action = H_DISABLE_VIO_INTERRUPT;
3839
3840         rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
3841                                 scrq->hw_irq, 0, 0);
3842
3843         if (rc)
3844                 dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
3845                         enable ? "enable" : "disable", scrq->hwq_id, rc);
3846
3847         return rc;
3848 }
3849
3850 static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3851                                struct list_head *evt_doneq)
3852 {
3853         struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3854
3855         switch (crq->valid) {
3856         case IBMVFC_CRQ_CMD_RSP:
3857                 break;
3858         case IBMVFC_CRQ_XPORT_EVENT:
3859                 return;
3860         default:
3861                 dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
3862                 return;
3863         }
3864
3865         /* The only kind of payload CRQs we should get are responses to
3866          * things we send. Make sure this response is to something we
3867          * actually sent
3868          */
3869         if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
3870                 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3871                         crq->ioba);
3872                 return;
3873         }
3874
3875         if (unlikely(atomic_dec_if_positive(&evt->active))) {
3876                 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3877                         crq->ioba);
3878                 return;
3879         }
3880
3881         spin_lock(&evt->queue->l_lock);
3882         list_move_tail(&evt->queue_list, evt_doneq);
3883         spin_unlock(&evt->queue->l_lock);
3884 }
3885
3886 static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
3887 {
3888         struct ibmvfc_crq *crq;
3889
3890         crq = &scrq->msgs.scrq[scrq->cur].crq;
3891         if (crq->valid & 0x80) {
3892                 if (++scrq->cur == scrq->size)
3893                         scrq->cur = 0;
3894                 rmb();
3895         } else
3896                 crq = NULL;
3897
3898         return crq;
3899 }
3900
3901 static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
3902 {
3903         struct ibmvfc_crq *crq;
3904         struct ibmvfc_event *evt, *temp;
3905         unsigned long flags;
3906         int done = 0;
3907         LIST_HEAD(evt_doneq);
3908
3909         spin_lock_irqsave(scrq->q_lock, flags);
3910         while (!done) {
3911                 while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3912                         ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3913                         crq->valid = 0;
3914                         wmb();
3915                 }
3916
3917                 ibmvfc_toggle_scrq_irq(scrq, 1);
3918                 if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3919                         ibmvfc_toggle_scrq_irq(scrq, 0);
3920                         ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3921                         crq->valid = 0;
3922                         wmb();
3923                 } else
3924                         done = 1;
3925         }
3926         spin_unlock_irqrestore(scrq->q_lock, flags);
3927
3928         list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3929                 del_timer(&evt->timer);
3930                 list_del(&evt->queue_list);
3931                 ibmvfc_trc_end(evt);
3932                 evt->done(evt);
3933         }
3934 }
3935
3936 static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance)
3937 {
3938         struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
3939
3940         ibmvfc_toggle_scrq_irq(scrq, 0);
3941         ibmvfc_drain_sub_crq(scrq);
3942
3943         return IRQ_HANDLED;
3944 }
3945
3946 /**
3947  * ibmvfc_init_tgt - Set the next init job step for the target
3948  * @tgt:                ibmvfc target struct
3949  * @job_step:   job step to perform
3950  *
3951  **/
3952 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3953                             void (*job_step) (struct ibmvfc_target *))
3954 {
3955         if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3956                 tgt->job_step = job_step;
3957         wake_up(&tgt->vhost->work_wait_q);
3958 }
3959
3960 /**
3961  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3962  * @tgt:                ibmvfc target struct
3963  * @job_step:   initialization job step
3964  *
3965  * Returns: 1 if step will be retried / 0 if not
3966  *
3967  **/
3968 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3969                                   void (*job_step) (struct ibmvfc_target *))
3970 {
3971         if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3972                 ibmvfc_del_tgt(tgt);
3973                 wake_up(&tgt->vhost->work_wait_q);
3974                 return 0;
3975         } else
3976                 ibmvfc_init_tgt(tgt, job_step);
3977         return 1;
3978 }
3979
3980 /* Defined in FC-LS */
3981 static const struct {
3982         int code;
3983         int retry;
3984         int logged_in;
3985 } prli_rsp [] = {
3986         { 0, 1, 0 },
3987         { 1, 0, 1 },
3988         { 2, 1, 0 },
3989         { 3, 1, 0 },
3990         { 4, 0, 0 },
3991         { 5, 0, 0 },
3992         { 6, 0, 1 },
3993         { 7, 0, 0 },
3994         { 8, 1, 0 },
3995 };
3996
3997 /**
3998  * ibmvfc_get_prli_rsp - Find PRLI response index
3999  * @flags:      PRLI response flags
4000  *
4001  **/
4002 static int ibmvfc_get_prli_rsp(u16 flags)
4003 {
4004         int i;
4005         int code = (flags & 0x0f00) >> 8;
4006
4007         for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
4008                 if (prli_rsp[i].code == code)
4009                         return i;
4010
4011         return 0;
4012 }
4013
4014 /**
4015  * ibmvfc_tgt_prli_done - Completion handler for Process Login
4016  * @evt:        ibmvfc event struct
4017  *
4018  **/
4019 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
4020 {
4021         struct ibmvfc_target *tgt = evt->tgt;
4022         struct ibmvfc_host *vhost = evt->vhost;
4023         struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
4024         struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
4025         u32 status = be16_to_cpu(rsp->common.status);
4026         int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
4027
4028         vhost->discovery_threads--;
4029         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4030         switch (status) {
4031         case IBMVFC_MAD_SUCCESS:
4032                 tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
4033                         parms->type, parms->flags, parms->service_parms);
4034
4035                 if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
4036                         index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
4037                         if (prli_rsp[index].logged_in) {
4038                                 if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
4039                                         tgt->need_login = 0;
4040                                         tgt->ids.roles = 0;
4041                                         if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
4042                                                 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
4043                                         if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
4044                                                 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
4045                                         tgt->add_rport = 1;
4046                                 } else
4047                                         ibmvfc_del_tgt(tgt);
4048                         } else if (prli_rsp[index].retry)
4049                                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4050                         else
4051                                 ibmvfc_del_tgt(tgt);
4052                 } else
4053                         ibmvfc_del_tgt(tgt);
4054                 break;
4055         case IBMVFC_MAD_DRIVER_FAILED:
4056                 break;
4057         case IBMVFC_MAD_CRQ_ERROR:
4058                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4059                 break;
4060         case IBMVFC_MAD_FAILED:
4061         default:
4062                 if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
4063                      be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
4064                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4065                 else if (tgt->logo_rcvd)
4066                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4067                 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4068                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4069                 else
4070                         ibmvfc_del_tgt(tgt);
4071
4072                 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
4073                         ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4074                         be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
4075                 break;
4076         }
4077
4078         kref_put(&tgt->kref, ibmvfc_release_tgt);
4079         ibmvfc_free_event(evt);
4080         wake_up(&vhost->work_wait_q);
4081 }
4082
4083 /**
4084  * ibmvfc_tgt_send_prli - Send a process login
4085  * @tgt:        ibmvfc target struct
4086  *
4087  **/
4088 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
4089 {
4090         struct ibmvfc_process_login *prli;
4091         struct ibmvfc_host *vhost = tgt->vhost;
4092         struct ibmvfc_event *evt;
4093
4094         if (vhost->discovery_threads >= disc_threads)
4095                 return;
4096
4097         kref_get(&tgt->kref);
4098         evt = ibmvfc_get_reserved_event(&vhost->crq);
4099         if (!evt) {
4100                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4101                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4102                 __ibmvfc_reset_host(vhost);
4103                 return;
4104         }
4105         vhost->discovery_threads++;
4106         ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
4107         evt->tgt = tgt;
4108         prli = &evt->iu.prli;
4109         memset(prli, 0, sizeof(*prli));
4110         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4111                 prli->common.version = cpu_to_be32(2);
4112                 prli->target_wwpn = cpu_to_be64(tgt->wwpn);
4113         } else {
4114                 prli->common.version = cpu_to_be32(1);
4115         }
4116         prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
4117         prli->common.length = cpu_to_be16(sizeof(*prli));
4118         prli->scsi_id = cpu_to_be64(tgt->scsi_id);
4119
4120         prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
4121         prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
4122         prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
4123         prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
4124
4125         if (cls3_error)
4126                 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
4127
4128         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4129         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4130                 vhost->discovery_threads--;
4131                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4132                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4133         } else
4134                 tgt_dbg(tgt, "Sent process login\n");
4135 }
4136
4137 /**
4138  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
4139  * @evt:        ibmvfc event struct
4140  *
4141  **/
4142 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
4143 {
4144         struct ibmvfc_target *tgt = evt->tgt;
4145         struct ibmvfc_host *vhost = evt->vhost;
4146         struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
4147         u32 status = be16_to_cpu(rsp->common.status);
4148         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4149
4150         vhost->discovery_threads--;
4151         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4152         switch (status) {
4153         case IBMVFC_MAD_SUCCESS:
4154                 tgt_dbg(tgt, "Port Login succeeded\n");
4155                 if (tgt->ids.port_name &&
4156                     tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
4157                         vhost->reinit = 1;
4158                         tgt_dbg(tgt, "Port re-init required\n");
4159                         break;
4160                 }
4161                 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4162                 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4163                 tgt->ids.port_id = tgt->scsi_id;
4164                 memcpy(&tgt->service_parms, &rsp->service_parms,
4165                        sizeof(tgt->service_parms));
4166                 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4167                        sizeof(tgt->service_parms_change));
4168                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4169                 break;
4170         case IBMVFC_MAD_DRIVER_FAILED:
4171                 break;
4172         case IBMVFC_MAD_CRQ_ERROR:
4173                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4174                 break;
4175         case IBMVFC_MAD_FAILED:
4176         default:
4177                 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4178                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4179                 else
4180                         ibmvfc_del_tgt(tgt);
4181
4182                 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4183                         ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4184                                              be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4185                         ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4186                         ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
4187                 break;
4188         }
4189
4190         kref_put(&tgt->kref, ibmvfc_release_tgt);
4191         ibmvfc_free_event(evt);
4192         wake_up(&vhost->work_wait_q);
4193 }
4194
4195 /**
4196  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
4197  * @tgt:        ibmvfc target struct
4198  *
4199  **/
4200 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
4201 {
4202         struct ibmvfc_port_login *plogi;
4203         struct ibmvfc_host *vhost = tgt->vhost;
4204         struct ibmvfc_event *evt;
4205
4206         if (vhost->discovery_threads >= disc_threads)
4207                 return;
4208
4209         kref_get(&tgt->kref);
4210         tgt->logo_rcvd = 0;
4211         evt = ibmvfc_get_reserved_event(&vhost->crq);
4212         if (!evt) {
4213                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4214                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4215                 __ibmvfc_reset_host(vhost);
4216                 return;
4217         }
4218         vhost->discovery_threads++;
4219         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4220         ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
4221         evt->tgt = tgt;
4222         plogi = &evt->iu.plogi;
4223         memset(plogi, 0, sizeof(*plogi));
4224         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4225                 plogi->common.version = cpu_to_be32(2);
4226                 plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
4227         } else {
4228                 plogi->common.version = cpu_to_be32(1);
4229         }
4230         plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
4231         plogi->common.length = cpu_to_be16(sizeof(*plogi));
4232         plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
4233
4234         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4235                 vhost->discovery_threads--;
4236                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4237                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4238         } else
4239                 tgt_dbg(tgt, "Sent port login\n");
4240 }
4241
4242 /**
4243  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
4244  * @evt:        ibmvfc event struct
4245  *
4246  **/
4247 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
4248 {
4249         struct ibmvfc_target *tgt = evt->tgt;
4250         struct ibmvfc_host *vhost = evt->vhost;
4251         struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
4252         u32 status = be16_to_cpu(rsp->common.status);
4253
4254         vhost->discovery_threads--;
4255         ibmvfc_free_event(evt);
4256         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4257
4258         switch (status) {
4259         case IBMVFC_MAD_SUCCESS:
4260                 tgt_dbg(tgt, "Implicit Logout succeeded\n");
4261                 break;
4262         case IBMVFC_MAD_DRIVER_FAILED:
4263                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4264                 wake_up(&vhost->work_wait_q);
4265                 return;
4266         case IBMVFC_MAD_FAILED:
4267         default:
4268                 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
4269                 break;
4270         }
4271
4272         ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
4273         kref_put(&tgt->kref, ibmvfc_release_tgt);
4274         wake_up(&vhost->work_wait_q);
4275 }
4276
4277 /**
4278  * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
4279  * @tgt:                ibmvfc target struct
4280  * @done:               Routine to call when the event is responded to
4281  *
4282  * Returns:
4283  *      Allocated and initialized ibmvfc_event struct
4284  **/
4285 static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
4286                                                                  void (*done) (struct ibmvfc_event *))
4287 {
4288         struct ibmvfc_implicit_logout *mad;
4289         struct ibmvfc_host *vhost = tgt->vhost;
4290         struct ibmvfc_event *evt;
4291
4292         kref_get(&tgt->kref);
4293         evt = ibmvfc_get_reserved_event(&vhost->crq);
4294         if (!evt)
4295                 return NULL;
4296         ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
4297         evt->tgt = tgt;
4298         mad = &evt->iu.implicit_logout;
4299         memset(mad, 0, sizeof(*mad));
4300         mad->common.version = cpu_to_be32(1);
4301         mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
4302         mad->common.length = cpu_to_be16(sizeof(*mad));
4303         mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4304         return evt;
4305 }
4306
4307 /**
4308  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
4309  * @tgt:                ibmvfc target struct
4310  *
4311  **/
4312 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
4313 {
4314         struct ibmvfc_host *vhost = tgt->vhost;
4315         struct ibmvfc_event *evt;
4316
4317         if (vhost->discovery_threads >= disc_threads)
4318                 return;
4319
4320         vhost->discovery_threads++;
4321         evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4322                                                    ibmvfc_tgt_implicit_logout_done);
4323         if (!evt) {
4324                 vhost->discovery_threads--;
4325                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4326                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4327                 __ibmvfc_reset_host(vhost);
4328                 return;
4329         }
4330
4331         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4332         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4333                 vhost->discovery_threads--;
4334                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4335                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4336         } else
4337                 tgt_dbg(tgt, "Sent Implicit Logout\n");
4338 }
4339
4340 /**
4341  * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
4342  * @evt:        ibmvfc event struct
4343  *
4344  **/
4345 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
4346 {
4347         struct ibmvfc_target *tgt = evt->tgt;
4348         struct ibmvfc_host *vhost = evt->vhost;
4349         struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4350         u32 status = be16_to_cpu(mad->common.status);
4351
4352         vhost->discovery_threads--;
4353         ibmvfc_free_event(evt);
4354
4355         /*
4356          * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
4357          * driver in which case we need to free up all the targets. If we are
4358          * not unloading, we will still go through a hard reset to get out of
4359          * offline state, so there is no need to track the old targets in that
4360          * case.
4361          */
4362         if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
4363                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4364         else
4365                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
4366
4367         tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
4368         kref_put(&tgt->kref, ibmvfc_release_tgt);
4369         wake_up(&vhost->work_wait_q);
4370 }
4371
4372 /**
4373  * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
4374  * @tgt:                ibmvfc target struct
4375  *
4376  **/
4377 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
4378 {
4379         struct ibmvfc_host *vhost = tgt->vhost;
4380         struct ibmvfc_event *evt;
4381
4382         if (!vhost->logged_in) {
4383                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4384                 return;
4385         }
4386
4387         if (vhost->discovery_threads >= disc_threads)
4388                 return;
4389
4390         vhost->discovery_threads++;
4391         evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4392                                                    ibmvfc_tgt_implicit_logout_and_del_done);
4393
4394         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
4395         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4396                 vhost->discovery_threads--;
4397                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4398                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4399         } else
4400                 tgt_dbg(tgt, "Sent Implicit Logout\n");
4401 }
4402
4403 /**
4404  * ibmvfc_tgt_move_login_done - Completion handler for Move Login
4405  * @evt:        ibmvfc event struct
4406  *
4407  **/
4408 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
4409 {
4410         struct ibmvfc_target *tgt = evt->tgt;
4411         struct ibmvfc_host *vhost = evt->vhost;
4412         struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
4413         u32 status = be16_to_cpu(rsp->common.status);
4414         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4415
4416         vhost->discovery_threads--;
4417         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4418         switch (status) {
4419         case IBMVFC_MAD_SUCCESS:
4420                 tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
4421                 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4422                 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4423                 tgt->scsi_id = tgt->new_scsi_id;
4424                 tgt->ids.port_id = tgt->scsi_id;
4425                 memcpy(&tgt->service_parms, &rsp->service_parms,
4426                        sizeof(tgt->service_parms));
4427                 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4428                        sizeof(tgt->service_parms_change));
4429                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4430                 break;
4431         case IBMVFC_MAD_DRIVER_FAILED:
4432                 break;
4433         case IBMVFC_MAD_CRQ_ERROR:
4434                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4435                 break;
4436         case IBMVFC_MAD_FAILED:
4437         default:
4438                 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4439
4440                 tgt_log(tgt, level,
4441                         "Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
4442                         tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
4443                         status);
4444                 break;
4445         }
4446
4447         kref_put(&tgt->kref, ibmvfc_release_tgt);
4448         ibmvfc_free_event(evt);
4449         wake_up(&vhost->work_wait_q);
4450 }
4451
4452
4453 /**
4454  * ibmvfc_tgt_move_login - Initiate a move login for specified target
4455  * @tgt:                ibmvfc target struct
4456  *
4457  **/
4458 static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
4459 {
4460         struct ibmvfc_host *vhost = tgt->vhost;
4461         struct ibmvfc_move_login *move;
4462         struct ibmvfc_event *evt;
4463
4464         if (vhost->discovery_threads >= disc_threads)
4465                 return;
4466
4467         kref_get(&tgt->kref);
4468         evt = ibmvfc_get_reserved_event(&vhost->crq);
4469         if (!evt) {
4470                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4471                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4472                 __ibmvfc_reset_host(vhost);
4473                 return;
4474         }
4475         vhost->discovery_threads++;
4476         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4477         ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
4478         evt->tgt = tgt;
4479         move = &evt->iu.move_login;
4480         memset(move, 0, sizeof(*move));
4481         move->common.version = cpu_to_be32(1);
4482         move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
4483         move->common.length = cpu_to_be16(sizeof(*move));
4484
4485         move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4486         move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
4487         move->wwpn = cpu_to_be64(tgt->wwpn);
4488         move->node_name = cpu_to_be64(tgt->ids.node_name);
4489
4490         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4491                 vhost->discovery_threads--;
4492                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4493                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4494         } else
4495                 tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
4496 }
4497
4498 /**
4499  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
4500  * @mad:        ibmvfc passthru mad struct
4501  * @tgt:        ibmvfc target struct
4502  *
4503  * Returns:
4504  *      1 if PLOGI needed / 0 if PLOGI not needed
4505  **/
4506 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
4507                                     struct ibmvfc_target *tgt)
4508 {
4509         if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
4510                 return 1;
4511         if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
4512                 return 1;
4513         if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
4514                 return 1;
4515         return 0;
4516 }
4517
4518 /**
4519  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
4520  * @evt:        ibmvfc event struct
4521  *
4522  **/
4523 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
4524 {
4525         struct ibmvfc_target *tgt = evt->tgt;
4526         struct ibmvfc_host *vhost = evt->vhost;
4527         struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4528         u32 status = be16_to_cpu(mad->common.status);
4529         u8 fc_reason, fc_explain;
4530
4531         vhost->discovery_threads--;
4532         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4533         del_timer(&tgt->timer);
4534
4535         switch (status) {
4536         case IBMVFC_MAD_SUCCESS:
4537                 tgt_dbg(tgt, "ADISC succeeded\n");
4538                 if (ibmvfc_adisc_needs_plogi(mad, tgt))
4539                         ibmvfc_del_tgt(tgt);
4540                 break;
4541         case IBMVFC_MAD_DRIVER_FAILED:
4542                 break;
4543         case IBMVFC_MAD_FAILED:
4544         default:
4545                 ibmvfc_del_tgt(tgt);
4546                 fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
4547                 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
4548                 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4549                          ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
4550                          be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
4551                          ibmvfc_get_fc_type(fc_reason), fc_reason,
4552                          ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
4553                 break;
4554         }
4555
4556         kref_put(&tgt->kref, ibmvfc_release_tgt);
4557         ibmvfc_free_event(evt);
4558         wake_up(&vhost->work_wait_q);
4559 }
4560
4561 /**
4562  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
4563  * @evt:                ibmvfc event struct
4564  *
4565  **/
4566 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
4567 {
4568         struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
4569
4570         memset(mad, 0, sizeof(*mad));
4571         mad->common.version = cpu_to_be32(1);
4572         mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
4573         mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
4574         mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4575                 offsetof(struct ibmvfc_passthru_mad, iu));
4576         mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
4577         mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4578         mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
4579         mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4580                 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4581                 offsetof(struct ibmvfc_passthru_fc_iu, payload));
4582         mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4583         mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4584                 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4585                 offsetof(struct ibmvfc_passthru_fc_iu, response));
4586         mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
4587 }
4588
4589 /**
4590  * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
4591  * @evt:                ibmvfc event struct
4592  *
4593  * Just cleanup this event struct. Everything else is handled by
4594  * the ADISC completion handler. If the ADISC never actually comes
4595  * back, we still have the timer running on the ADISC event struct
4596  * which will fire and cause the CRQ to get reset.
4597  *
4598  **/
4599 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
4600 {
4601         struct ibmvfc_host *vhost = evt->vhost;
4602         struct ibmvfc_target *tgt = evt->tgt;
4603
4604         tgt_dbg(tgt, "ADISC cancel complete\n");
4605         vhost->abort_threads--;
4606         ibmvfc_free_event(evt);
4607         kref_put(&tgt->kref, ibmvfc_release_tgt);
4608         wake_up(&vhost->work_wait_q);
4609 }
4610
4611 /**
4612  * ibmvfc_adisc_timeout - Handle an ADISC timeout
4613  * @t:          ibmvfc target struct
4614  *
4615  * If an ADISC times out, send a cancel. If the cancel times
4616  * out, reset the CRQ. When the ADISC comes back as cancelled,
4617  * log back into the target.
4618  **/
4619 static void ibmvfc_adisc_timeout(struct timer_list *t)
4620 {
4621         struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
4622         struct ibmvfc_host *vhost = tgt->vhost;
4623         struct ibmvfc_event *evt;
4624         struct ibmvfc_tmf *tmf;
4625         unsigned long flags;
4626         int rc;
4627
4628         tgt_dbg(tgt, "ADISC timeout\n");
4629         spin_lock_irqsave(vhost->host->host_lock, flags);
4630         if (vhost->abort_threads >= disc_threads ||
4631             tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
4632             vhost->state != IBMVFC_INITIALIZING ||
4633             vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
4634                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4635                 return;
4636         }
4637
4638         vhost->abort_threads++;
4639         kref_get(&tgt->kref);
4640         evt = ibmvfc_get_reserved_event(&vhost->crq);
4641         if (!evt) {
4642                 tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
4643                 vhost->abort_threads--;
4644                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4645                 __ibmvfc_reset_host(vhost);
4646                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4647                 return;
4648         }
4649         ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4650
4651         evt->tgt = tgt;
4652         tmf = &evt->iu.tmf;
4653         memset(tmf, 0, sizeof(*tmf));
4654         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4655                 tmf->common.version = cpu_to_be32(2);
4656                 tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4657         } else {
4658                 tmf->common.version = cpu_to_be32(1);
4659         }
4660         tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4661         tmf->common.length = cpu_to_be16(sizeof(*tmf));
4662         tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4663         tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4664
4665         rc = ibmvfc_send_event(evt, vhost, default_timeout);
4666
4667         if (rc) {
4668                 tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4669                 vhost->abort_threads--;
4670                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4671                 __ibmvfc_reset_host(vhost);
4672         } else
4673                 tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4674         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4675 }
4676
4677 /**
4678  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4679  * @tgt:                ibmvfc target struct
4680  *
4681  * When sending an ADISC we end up with two timers running. The
4682  * first timer is the timer in the ibmvfc target struct. If this
4683  * fires, we send a cancel to the target. The second timer is the
4684  * timer on the ibmvfc event for the ADISC, which is longer. If that
4685  * fires, it means the ADISC timed out and our attempt to cancel it
4686  * also failed, so we need to reset the CRQ.
4687  **/
4688 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4689 {
4690         struct ibmvfc_passthru_mad *mad;
4691         struct ibmvfc_host *vhost = tgt->vhost;
4692         struct ibmvfc_event *evt;
4693
4694         if (vhost->discovery_threads >= disc_threads)
4695                 return;
4696
4697         kref_get(&tgt->kref);
4698         evt = ibmvfc_get_reserved_event(&vhost->crq);
4699         if (!evt) {
4700                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4701                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4702                 __ibmvfc_reset_host(vhost);
4703                 return;
4704         }
4705         vhost->discovery_threads++;
4706         ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4707         evt->tgt = tgt;
4708
4709         ibmvfc_init_passthru(evt);
4710         mad = &evt->iu.passthru;
4711         mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4712         mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4713         mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4714
4715         mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4716         memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4717                sizeof(vhost->login_buf->resp.port_name));
4718         memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4719                sizeof(vhost->login_buf->resp.node_name));
4720         mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4721
4722         if (timer_pending(&tgt->timer))
4723                 mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4724         else {
4725                 tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4726                 add_timer(&tgt->timer);
4727         }
4728
4729         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4730         if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4731                 vhost->discovery_threads--;
4732                 del_timer(&tgt->timer);
4733                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4734                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4735         } else
4736                 tgt_dbg(tgt, "Sent ADISC\n");
4737 }
4738
4739 /**
4740  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4741  * @evt:        ibmvfc event struct
4742  *
4743  **/
4744 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4745 {
4746         struct ibmvfc_target *tgt = evt->tgt;
4747         struct ibmvfc_host *vhost = evt->vhost;
4748         struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4749         u32 status = be16_to_cpu(rsp->common.status);
4750         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4751
4752         vhost->discovery_threads--;
4753         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4754         switch (status) {
4755         case IBMVFC_MAD_SUCCESS:
4756                 tgt_dbg(tgt, "Query Target succeeded\n");
4757                 if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4758                         ibmvfc_del_tgt(tgt);
4759                 else
4760                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4761                 break;
4762         case IBMVFC_MAD_DRIVER_FAILED:
4763                 break;
4764         case IBMVFC_MAD_CRQ_ERROR:
4765                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4766                 break;
4767         case IBMVFC_MAD_FAILED:
4768         default:
4769                 if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4770                     be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4771                     be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4772                         ibmvfc_del_tgt(tgt);
4773                 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4774                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4775                 else
4776                         ibmvfc_del_tgt(tgt);
4777
4778                 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4779                         ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4780                         be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4781                         ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4782                         ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4783                         status);
4784                 break;
4785         }
4786
4787         kref_put(&tgt->kref, ibmvfc_release_tgt);
4788         ibmvfc_free_event(evt);
4789         wake_up(&vhost->work_wait_q);
4790 }
4791
4792 /**
4793  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4794  * @tgt:        ibmvfc target struct
4795  *
4796  **/
4797 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4798 {
4799         struct ibmvfc_query_tgt *query_tgt;
4800         struct ibmvfc_host *vhost = tgt->vhost;
4801         struct ibmvfc_event *evt;
4802
4803         if (vhost->discovery_threads >= disc_threads)
4804                 return;
4805
4806         kref_get(&tgt->kref);
4807         evt = ibmvfc_get_reserved_event(&vhost->crq);
4808         if (!evt) {
4809                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4810                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4811                 __ibmvfc_reset_host(vhost);
4812                 return;
4813         }
4814         vhost->discovery_threads++;
4815         evt->tgt = tgt;
4816         ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4817         query_tgt = &evt->iu.query_tgt;
4818         memset(query_tgt, 0, sizeof(*query_tgt));
4819         query_tgt->common.version = cpu_to_be32(1);
4820         query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4821         query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4822         query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4823
4824         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4825         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4826                 vhost->discovery_threads--;
4827                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4828                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4829         } else
4830                 tgt_dbg(tgt, "Sent Query Target\n");
4831 }
4832
4833 /**
4834  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4835  * @vhost:              ibmvfc host struct
4836  * @target:             Holds SCSI ID to allocate target forand the WWPN
4837  *
4838  * Returns:
4839  *      0 on success / other on failure
4840  **/
4841 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4842                                struct ibmvfc_discover_targets_entry *target)
4843 {
4844         struct ibmvfc_target *stgt = NULL;
4845         struct ibmvfc_target *wtgt = NULL;
4846         struct ibmvfc_target *tgt;
4847         unsigned long flags;
4848         u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4849         u64 wwpn = be64_to_cpu(target->wwpn);
4850
4851         /* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4852         spin_lock_irqsave(vhost->host->host_lock, flags);
4853         list_for_each_entry(tgt, &vhost->targets, queue) {
4854                 if (tgt->wwpn == wwpn) {
4855                         wtgt = tgt;
4856                         break;
4857                 }
4858         }
4859
4860         list_for_each_entry(tgt, &vhost->targets, queue) {
4861                 if (tgt->scsi_id == scsi_id) {
4862                         stgt = tgt;
4863                         break;
4864                 }
4865         }
4866
4867         if (wtgt && !stgt) {
4868                 /*
4869                  * A WWPN target has moved and we still are tracking the old
4870                  * SCSI ID.  The only way we should be able to get here is if
4871                  * we attempted to send an implicit logout for the old SCSI ID
4872                  * and it failed for some reason, such as there being I/O
4873                  * pending to the target. In this case, we will have already
4874                  * deleted the rport from the FC transport so we do a move
4875                  * login, which works even with I/O pending, however, if
4876                  * there is still I/O pending, it will stay outstanding, so
4877                  * we only do this if fast fail is disabled for the rport,
4878                  * otherwise we let terminate_rport_io clean up the port
4879                  * before we login at the new location.
4880                  */
4881                 if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4882                         if (wtgt->move_login) {
4883                                 /*
4884                                  * Do a move login here. The old target is no longer
4885                                  * known to the transport layer We don't use the
4886                                  * normal ibmvfc_set_tgt_action to set this, as we
4887                                  * don't normally want to allow this state change.
4888                                  */
4889                                 wtgt->new_scsi_id = scsi_id;
4890                                 wtgt->action = IBMVFC_TGT_ACTION_INIT;
4891                                 wtgt->init_retries = 0;
4892                                 ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4893                         }
4894                         goto unlock_out;
4895                 } else {
4896                         tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4897                                 wtgt->action, wtgt->rport);
4898                 }
4899         } else if (stgt) {
4900                 if (tgt->need_login)
4901                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4902                 goto unlock_out;
4903         }
4904         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4905
4906         tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4907         memset(tgt, 0, sizeof(*tgt));
4908         tgt->scsi_id = scsi_id;
4909         tgt->wwpn = wwpn;
4910         tgt->vhost = vhost;
4911         tgt->need_login = 1;
4912         timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4913         kref_init(&tgt->kref);
4914         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4915         spin_lock_irqsave(vhost->host->host_lock, flags);
4916         tgt->cancel_key = vhost->task_set++;
4917         list_add_tail(&tgt->queue, &vhost->targets);
4918
4919 unlock_out:
4920         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4921         return 0;
4922 }
4923
4924 /**
4925  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4926  * @vhost:              ibmvfc host struct
4927  *
4928  * Returns:
4929  *      0 on success / other on failure
4930  **/
4931 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4932 {
4933         int i, rc;
4934
4935         for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4936                 rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
4937
4938         return rc;
4939 }
4940
4941 /**
4942  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4943  * @evt:        ibmvfc event struct
4944  *
4945  **/
4946 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4947 {
4948         struct ibmvfc_host *vhost = evt->vhost;
4949         struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4950         u32 mad_status = be16_to_cpu(rsp->common.status);
4951         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4952
4953         switch (mad_status) {
4954         case IBMVFC_MAD_SUCCESS:
4955                 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4956                 vhost->num_targets = be32_to_cpu(rsp->num_written);
4957                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4958                 break;
4959         case IBMVFC_MAD_FAILED:
4960                 level += ibmvfc_retry_host_init(vhost);
4961                 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4962                            ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4963                            be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4964                 break;
4965         case IBMVFC_MAD_DRIVER_FAILED:
4966                 break;
4967         default:
4968                 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4969                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4970                 break;
4971         }
4972
4973         ibmvfc_free_event(evt);
4974         wake_up(&vhost->work_wait_q);
4975 }
4976
4977 /**
4978  * ibmvfc_discover_targets - Send Discover Targets MAD
4979  * @vhost:      ibmvfc host struct
4980  *
4981  **/
4982 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4983 {
4984         struct ibmvfc_discover_targets *mad;
4985         struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
4986         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4987
4988         if (!evt) {
4989                 ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
4990                 ibmvfc_hard_reset_host(vhost);
4991                 return;
4992         }
4993
4994         ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4995         mad = &evt->iu.discover_targets;
4996         memset(mad, 0, sizeof(*mad));
4997         mad->common.version = cpu_to_be32(1);
4998         mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
4999         mad->common.length = cpu_to_be16(sizeof(*mad));
5000         mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
5001         mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
5002         mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
5003         mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
5004         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5005
5006         if (!ibmvfc_send_event(evt, vhost, default_timeout))
5007                 ibmvfc_dbg(vhost, "Sent discover targets\n");
5008         else
5009                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5010 }
5011
5012 static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
5013 {
5014         struct ibmvfc_host *vhost = evt->vhost;
5015         struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
5016         struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
5017         u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
5018         int level = IBMVFC_DEFAULT_LOG_LEVEL;
5019         int flags, active_queues, i;
5020
5021         ibmvfc_free_event(evt);
5022
5023         switch (mad_status) {
5024         case IBMVFC_MAD_SUCCESS:
5025                 ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
5026                 flags = be32_to_cpu(setup->flags);
5027                 vhost->do_enquiry = 0;
5028                 active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
5029                 scrqs->active_queues = active_queues;
5030
5031                 if (flags & IBMVFC_CHANNELS_CANCELED) {
5032                         ibmvfc_dbg(vhost, "Channels Canceled\n");
5033                         vhost->using_channels = 0;
5034                 } else {
5035                         if (active_queues)
5036                                 vhost->using_channels = 1;
5037                         for (i = 0; i < active_queues; i++)
5038                                 scrqs->scrqs[i].vios_cookie =
5039                                         be64_to_cpu(setup->channel_handles[i]);
5040
5041                         ibmvfc_dbg(vhost, "Using %u channels\n",
5042                                    vhost->scsi_scrqs.active_queues);
5043                 }
5044                 break;
5045         case IBMVFC_MAD_FAILED:
5046                 level += ibmvfc_retry_host_init(vhost);
5047                 ibmvfc_log(vhost, level, "Channel Setup failed\n");
5048                 fallthrough;
5049         case IBMVFC_MAD_DRIVER_FAILED:
5050                 return;
5051         default:
5052                 dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
5053                         mad_status);
5054                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5055                 return;
5056         }
5057
5058         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5059         wake_up(&vhost->work_wait_q);
5060 }
5061
5062 static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
5063 {
5064         struct ibmvfc_channel_setup_mad *mad;
5065         struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
5066         struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5067         struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
5068         unsigned int num_channels =
5069                 min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
5070         int level = IBMVFC_DEFAULT_LOG_LEVEL;
5071         int i;
5072
5073         if (!evt) {
5074                 ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
5075                 ibmvfc_hard_reset_host(vhost);
5076                 return;
5077         }
5078
5079         memset(setup_buf, 0, sizeof(*setup_buf));
5080         if (num_channels == 0)
5081                 setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
5082         else {
5083                 setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
5084                 for (i = 0; i < num_channels; i++)
5085                         setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
5086         }
5087
5088         ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
5089         mad = &evt->iu.channel_setup;
5090         memset(mad, 0, sizeof(*mad));
5091         mad->common.version = cpu_to_be32(1);
5092         mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
5093         mad->common.length = cpu_to_be16(sizeof(*mad));
5094         mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
5095         mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
5096
5097         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5098
5099         if (!ibmvfc_send_event(evt, vhost, default_timeout))
5100                 ibmvfc_dbg(vhost, "Sent channel setup\n");
5101         else
5102                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
5103 }
5104
5105 static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
5106 {
5107         struct ibmvfc_host *vhost = evt->vhost;
5108         struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
5109         u32 mad_status = be16_to_cpu(rsp->common.status);
5110         int level = IBMVFC_DEFAULT_LOG_LEVEL;
5111
5112         switch (mad_status) {
5113         case IBMVFC_MAD_SUCCESS:
5114                 ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
5115                 vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
5116                 ibmvfc_free_event(evt);
5117                 break;
5118         case IBMVFC_MAD_FAILED:
5119                 level += ibmvfc_retry_host_init(vhost);
5120                 ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
5121                 fallthrough;
5122         case IBMVFC_MAD_DRIVER_FAILED:
5123                 ibmvfc_free_event(evt);
5124                 return;
5125         default:
5126                 dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
5127                         mad_status);
5128                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5129                 ibmvfc_free_event(evt);
5130                 return;
5131         }
5132
5133         ibmvfc_channel_setup(vhost);
5134 }
5135
5136 static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
5137 {
5138         struct ibmvfc_channel_enquiry *mad;
5139         struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5140         int level = IBMVFC_DEFAULT_LOG_LEVEL;
5141
5142         if (!evt) {
5143                 ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
5144                 ibmvfc_hard_reset_host(vhost);
5145                 return;
5146         }
5147
5148         ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
5149         mad = &evt->iu.channel_enquiry;
5150         memset(mad, 0, sizeof(*mad));
5151         mad->common.version = cpu_to_be32(1);
5152         mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
5153         mad->common.length = cpu_to_be16(sizeof(*mad));
5154
5155         if (mig_channels_only)
5156                 mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
5157         if (mig_no_less_channels)
5158                 mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
5159
5160         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5161
5162         if (!ibmvfc_send_event(evt, vhost, default_timeout))
5163                 ibmvfc_dbg(vhost, "Send channel enquiry\n");
5164         else
5165                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5166 }
5167
5168 /**
5169  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
5170  * @evt:        ibmvfc event struct
5171  *
5172  **/
5173 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
5174 {
5175         struct ibmvfc_host *vhost = evt->vhost;
5176         u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
5177         struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
5178         unsigned int npiv_max_sectors;
5179         int level = IBMVFC_DEFAULT_LOG_LEVEL;
5180
5181         switch (mad_status) {
5182         case IBMVFC_MAD_SUCCESS:
5183                 ibmvfc_free_event(evt);
5184                 break;
5185         case IBMVFC_MAD_FAILED:
5186                 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
5187                         level += ibmvfc_retry_host_init(vhost);
5188                 else
5189                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5190                 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
5191                            ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
5192                                                 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
5193                 ibmvfc_free_event(evt);
5194                 return;
5195         case IBMVFC_MAD_CRQ_ERROR:
5196                 ibmvfc_retry_host_init(vhost);
5197                 fallthrough;
5198         case IBMVFC_MAD_DRIVER_FAILED:
5199                 ibmvfc_free_event(evt);
5200                 return;
5201         default:
5202                 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
5203                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5204                 ibmvfc_free_event(evt);
5205                 return;
5206         }
5207
5208         vhost->client_migrated = 0;
5209
5210         if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
5211                 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
5212                         rsp->flags);
5213                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5214                 wake_up(&vhost->work_wait_q);
5215                 return;
5216         }
5217
5218         if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
5219                 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
5220                         rsp->max_cmds);
5221                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5222                 wake_up(&vhost->work_wait_q);
5223                 return;
5224         }
5225
5226         vhost->logged_in = 1;
5227         npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
5228         dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
5229                  rsp->partition_name, rsp->device_name, rsp->port_loc_code,
5230                  rsp->drc_name, npiv_max_sectors);
5231
5232         fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
5233         fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
5234         fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
5235         fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
5236         fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
5237         fc_host_supported_classes(vhost->host) = 0;
5238         if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
5239                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
5240         if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
5241                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
5242         if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
5243                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
5244         fc_host_maxframe_size(vhost->host) =
5245                 be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
5246
5247         vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
5248         vhost->host->max_sectors = npiv_max_sectors;
5249
5250         if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
5251                 ibmvfc_channel_enquiry(vhost);
5252         } else {
5253                 vhost->do_enquiry = 0;
5254                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5255                 wake_up(&vhost->work_wait_q);
5256         }
5257 }
5258
5259 /**
5260  * ibmvfc_npiv_login - Sends NPIV login
5261  * @vhost:      ibmvfc host struct
5262  *
5263  **/
5264 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
5265 {
5266         struct ibmvfc_npiv_login_mad *mad;
5267         struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5268
5269         if (!evt) {
5270                 ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
5271                 ibmvfc_hard_reset_host(vhost);
5272                 return;
5273         }
5274
5275         ibmvfc_gather_partition_info(vhost);
5276         ibmvfc_set_login_info(vhost);
5277         ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
5278
5279         memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
5280         mad = &evt->iu.npiv_login;
5281         memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
5282         mad->common.version = cpu_to_be32(1);
5283         mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
5284         mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
5285         mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
5286         mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
5287
5288         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5289
5290         if (!ibmvfc_send_event(evt, vhost, default_timeout))
5291                 ibmvfc_dbg(vhost, "Sent NPIV login\n");
5292         else
5293                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5294 }
5295
5296 /**
5297  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
5298  * @evt:                ibmvfc event struct
5299  *
5300  **/
5301 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
5302 {
5303         struct ibmvfc_host *vhost = evt->vhost;
5304         u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
5305
5306         ibmvfc_free_event(evt);
5307
5308         switch (mad_status) {
5309         case IBMVFC_MAD_SUCCESS:
5310                 if (list_empty(&vhost->crq.sent) &&
5311                     vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
5312                         ibmvfc_init_host(vhost);
5313                         return;
5314                 }
5315                 break;
5316         case IBMVFC_MAD_FAILED:
5317         case IBMVFC_MAD_NOT_SUPPORTED:
5318         case IBMVFC_MAD_CRQ_ERROR:
5319         case IBMVFC_MAD_DRIVER_FAILED:
5320         default:
5321                 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
5322                 break;
5323         }
5324
5325         ibmvfc_hard_reset_host(vhost);
5326 }
5327
5328 /**
5329  * ibmvfc_npiv_logout - Issue an NPIV Logout
5330  * @vhost:              ibmvfc host struct
5331  *
5332  **/
5333 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
5334 {
5335         struct ibmvfc_npiv_logout_mad *mad;
5336         struct ibmvfc_event *evt;
5337
5338         evt = ibmvfc_get_reserved_event(&vhost->crq);
5339         if (!evt) {
5340                 ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
5341                 ibmvfc_hard_reset_host(vhost);
5342                 return;
5343         }
5344
5345         ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
5346
5347         mad = &evt->iu.npiv_logout;
5348         memset(mad, 0, sizeof(*mad));
5349         mad->common.version = cpu_to_be32(1);
5350         mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
5351         mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
5352
5353         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
5354
5355         if (!ibmvfc_send_event(evt, vhost, default_timeout))
5356                 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
5357         else
5358                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5359 }
5360
5361 /**
5362  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
5363  * @vhost:              ibmvfc host struct
5364  *
5365  * Returns:
5366  *      1 if work to do / 0 if not
5367  **/
5368 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
5369 {
5370         struct ibmvfc_target *tgt;
5371
5372         list_for_each_entry(tgt, &vhost->targets, queue) {
5373                 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
5374                     tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5375                         return 1;
5376         }
5377
5378         return 0;
5379 }
5380
5381 /**
5382  * ibmvfc_dev_logo_to_do - Is there target logout work to do?
5383  * @vhost:              ibmvfc host struct
5384  *
5385  * Returns:
5386  *      1 if work to do / 0 if not
5387  **/
5388 static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
5389 {
5390         struct ibmvfc_target *tgt;
5391
5392         list_for_each_entry(tgt, &vhost->targets, queue) {
5393                 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
5394                     tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5395                         return 1;
5396         }
5397         return 0;
5398 }
5399
5400 /**
5401  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
5402  * @vhost:              ibmvfc host struct
5403  *
5404  * Returns:
5405  *      1 if work to do / 0 if not
5406  **/
5407 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5408 {
5409         struct ibmvfc_target *tgt;
5410
5411         if (kthread_should_stop())
5412                 return 1;
5413         switch (vhost->action) {
5414         case IBMVFC_HOST_ACTION_NONE:
5415         case IBMVFC_HOST_ACTION_INIT_WAIT:
5416         case IBMVFC_HOST_ACTION_LOGO_WAIT:
5417                 return 0;
5418         case IBMVFC_HOST_ACTION_TGT_INIT:
5419         case IBMVFC_HOST_ACTION_QUERY_TGTS:
5420                 if (vhost->discovery_threads == disc_threads)
5421                         return 0;
5422                 list_for_each_entry(tgt, &vhost->targets, queue)
5423                         if (tgt->action == IBMVFC_TGT_ACTION_INIT)
5424                                 return 1;
5425                 list_for_each_entry(tgt, &vhost->targets, queue)
5426                         if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5427                                 return 0;
5428                 return 1;
5429         case IBMVFC_HOST_ACTION_TGT_DEL:
5430         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5431                 if (vhost->discovery_threads == disc_threads)
5432                         return 0;
5433                 list_for_each_entry(tgt, &vhost->targets, queue)
5434                         if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
5435                                 return 1;
5436                 list_for_each_entry(tgt, &vhost->targets, queue)
5437                         if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5438                                 return 0;
5439                 return 1;
5440         case IBMVFC_HOST_ACTION_LOGO:
5441         case IBMVFC_HOST_ACTION_INIT:
5442         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5443         case IBMVFC_HOST_ACTION_QUERY:
5444         case IBMVFC_HOST_ACTION_RESET:
5445         case IBMVFC_HOST_ACTION_REENABLE:
5446         default:
5447                 break;
5448         }
5449
5450         return 1;
5451 }
5452
5453 /**
5454  * ibmvfc_work_to_do - Is there task level work to do?
5455  * @vhost:              ibmvfc host struct
5456  *
5457  * Returns:
5458  *      1 if work to do / 0 if not
5459  **/
5460 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5461 {
5462         unsigned long flags;
5463         int rc;
5464
5465         spin_lock_irqsave(vhost->host->host_lock, flags);
5466         rc = __ibmvfc_work_to_do(vhost);
5467         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5468         return rc;
5469 }
5470
5471 /**
5472  * ibmvfc_log_ae - Log async events if necessary
5473  * @vhost:              ibmvfc host struct
5474  * @events:             events to log
5475  *
5476  **/
5477 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
5478 {
5479         if (events & IBMVFC_AE_RSCN)
5480                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
5481         if ((events & IBMVFC_AE_LINKDOWN) &&
5482             vhost->state >= IBMVFC_HALTED)
5483                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
5484         if ((events & IBMVFC_AE_LINKUP) &&
5485             vhost->state == IBMVFC_INITIALIZING)
5486                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
5487 }
5488
5489 /**
5490  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
5491  * @tgt:                ibmvfc target struct
5492  *
5493  **/
5494 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
5495 {
5496         struct ibmvfc_host *vhost = tgt->vhost;
5497         struct fc_rport *rport;
5498         unsigned long flags;
5499
5500         tgt_dbg(tgt, "Adding rport\n");
5501         rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
5502         spin_lock_irqsave(vhost->host->host_lock, flags);
5503
5504         if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5505                 tgt_dbg(tgt, "Deleting rport\n");
5506                 list_del(&tgt->queue);
5507                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5508                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5509                 fc_remote_port_delete(rport);
5510                 del_timer_sync(&tgt->timer);
5511                 kref_put(&tgt->kref, ibmvfc_release_tgt);
5512                 return;
5513         } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5514                 tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
5515                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5516                 tgt->rport = NULL;
5517                 tgt->init_retries = 0;
5518                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5519                 fc_remote_port_delete(rport);
5520                 return;
5521         } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
5522                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5523                 return;
5524         }
5525
5526         if (rport) {
5527                 tgt_dbg(tgt, "rport add succeeded\n");
5528                 tgt->rport = rport;
5529                 rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
5530                 rport->supported_classes = 0;
5531                 tgt->target_id = rport->scsi_target_id;
5532                 if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
5533                         rport->supported_classes |= FC_COS_CLASS1;
5534                 if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
5535                         rport->supported_classes |= FC_COS_CLASS2;
5536                 if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
5537                         rport->supported_classes |= FC_COS_CLASS3;
5538                 if (rport->rqst_q)
5539                         blk_queue_max_segments(rport->rqst_q, 1);
5540         } else
5541                 tgt_dbg(tgt, "rport add failed\n");
5542         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5543 }
5544
5545 /**
5546  * ibmvfc_do_work - Do task level work
5547  * @vhost:              ibmvfc host struct
5548  *
5549  **/
5550 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
5551 {
5552         struct ibmvfc_target *tgt;
5553         unsigned long flags;
5554         struct fc_rport *rport;
5555         LIST_HEAD(purge);
5556         int rc;
5557
5558         ibmvfc_log_ae(vhost, vhost->events_to_log);
5559         spin_lock_irqsave(vhost->host->host_lock, flags);
5560         vhost->events_to_log = 0;
5561         switch (vhost->action) {
5562         case IBMVFC_HOST_ACTION_NONE:
5563         case IBMVFC_HOST_ACTION_LOGO_WAIT:
5564         case IBMVFC_HOST_ACTION_INIT_WAIT:
5565                 break;
5566         case IBMVFC_HOST_ACTION_RESET:
5567                 list_splice_init(&vhost->purge, &purge);
5568                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5569                 ibmvfc_complete_purge(&purge);
5570                 rc = ibmvfc_reset_crq(vhost);
5571
5572                 spin_lock_irqsave(vhost->host->host_lock, flags);
5573                 if (!rc || rc == H_CLOSED)
5574                         vio_enable_interrupts(to_vio_dev(vhost->dev));
5575                 if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
5576                         /*
5577                          * The only action we could have changed to would have
5578                          * been reenable, in which case, we skip the rest of
5579                          * this path and wait until we've done the re-enable
5580                          * before sending the crq init.
5581                          */
5582                         vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5583
5584                         if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
5585                             (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
5586                                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5587                                 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
5588                         }
5589                 }
5590                 break;
5591         case IBMVFC_HOST_ACTION_REENABLE:
5592                 list_splice_init(&vhost->purge, &purge);
5593                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5594                 ibmvfc_complete_purge(&purge);
5595                 rc = ibmvfc_reenable_crq_queue(vhost);
5596
5597                 spin_lock_irqsave(vhost->host->host_lock, flags);
5598                 if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
5599                         /*
5600                          * The only action we could have changed to would have
5601                          * been reset, in which case, we skip the rest of this
5602                          * path and wait until we've done the reset before
5603                          * sending the crq init.
5604                          */
5605                         vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5606                         if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
5607                                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5608                                 dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
5609                         }
5610                 }
5611                 break;
5612         case IBMVFC_HOST_ACTION_LOGO:
5613                 vhost->job_step(vhost);
5614                 break;
5615         case IBMVFC_HOST_ACTION_INIT:
5616                 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
5617                 if (vhost->delay_init) {
5618                         vhost->delay_init = 0;
5619                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5620                         ssleep(15);
5621                         return;
5622                 } else
5623                         vhost->job_step(vhost);
5624                 break;
5625         case IBMVFC_HOST_ACTION_QUERY:
5626                 list_for_each_entry(tgt, &vhost->targets, queue)
5627                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
5628                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
5629                 break;
5630         case IBMVFC_HOST_ACTION_QUERY_TGTS:
5631                 list_for_each_entry(tgt, &vhost->targets, queue) {
5632                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5633                                 tgt->job_step(tgt);
5634                                 break;
5635                         }
5636                 }
5637
5638                 if (!ibmvfc_dev_init_to_do(vhost))
5639                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
5640                 break;
5641         case IBMVFC_HOST_ACTION_TGT_DEL:
5642         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5643                 list_for_each_entry(tgt, &vhost->targets, queue) {
5644                         if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
5645                                 tgt->job_step(tgt);
5646                                 break;
5647                         }
5648                 }
5649
5650                 if (ibmvfc_dev_logo_to_do(vhost)) {
5651                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5652                         return;
5653                 }
5654
5655                 list_for_each_entry(tgt, &vhost->targets, queue) {
5656                         if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5657                                 tgt_dbg(tgt, "Deleting rport\n");
5658                                 rport = tgt->rport;
5659                                 tgt->rport = NULL;
5660                                 list_del(&tgt->queue);
5661                                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5662                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5663                                 if (rport)
5664                                         fc_remote_port_delete(rport);
5665                                 del_timer_sync(&tgt->timer);
5666                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
5667                                 return;
5668                         } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5669                                 tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
5670                                 rport = tgt->rport;
5671                                 tgt->rport = NULL;
5672                                 tgt->init_retries = 0;
5673                                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5674
5675                                 /*
5676                                  * If fast fail is enabled, we wait for it to fire and then clean up
5677                                  * the old port, since we expect the fast fail timer to clean up the
5678                                  * outstanding I/O faster than waiting for normal command timeouts.
5679                                  * However, if fast fail is disabled, any I/O outstanding to the
5680                                  * rport LUNs will stay outstanding indefinitely, since the EH handlers
5681                                  * won't get invoked for I/O's timing out. If this is a NPIV failover
5682                                  * scenario, the better alternative is to use the move login.
5683                                  */
5684                                 if (rport && rport->fast_io_fail_tmo == -1)
5685                                         tgt->move_login = 1;
5686                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5687                                 if (rport)
5688                                         fc_remote_port_delete(rport);
5689                                 return;
5690                         }
5691                 }
5692
5693                 if (vhost->state == IBMVFC_INITIALIZING) {
5694                         if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
5695                                 if (vhost->reinit) {
5696                                         vhost->reinit = 0;
5697                                         scsi_block_requests(vhost->host);
5698                                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5699                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5700                                 } else {
5701                                         ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
5702                                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5703                                         wake_up(&vhost->init_wait_q);
5704                                         schedule_work(&vhost->rport_add_work_q);
5705                                         vhost->init_retries = 0;
5706                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5707                                         scsi_unblock_requests(vhost->host);
5708                                 }
5709
5710                                 return;
5711                         } else {
5712                                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
5713                                 vhost->job_step = ibmvfc_discover_targets;
5714                         }
5715                 } else {
5716                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5717                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5718                         scsi_unblock_requests(vhost->host);
5719                         wake_up(&vhost->init_wait_q);
5720                         return;
5721                 }
5722                 break;
5723         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5724                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
5725                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5726                 ibmvfc_alloc_targets(vhost);
5727                 spin_lock_irqsave(vhost->host->host_lock, flags);
5728                 break;
5729         case IBMVFC_HOST_ACTION_TGT_INIT:
5730                 list_for_each_entry(tgt, &vhost->targets, queue) {
5731                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5732                                 tgt->job_step(tgt);
5733                                 break;
5734                         }
5735                 }
5736
5737                 if (!ibmvfc_dev_init_to_do(vhost))
5738                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
5739                 break;
5740         default:
5741                 break;
5742         }
5743
5744         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5745 }
5746
5747 /**
5748  * ibmvfc_work - Do task level work
5749  * @data:               ibmvfc host struct
5750  *
5751  * Returns:
5752  *      zero
5753  **/
5754 static int ibmvfc_work(void *data)
5755 {
5756         struct ibmvfc_host *vhost = data;
5757         int rc;
5758
5759         set_user_nice(current, MIN_NICE);
5760
5761         while (1) {
5762                 rc = wait_event_interruptible(vhost->work_wait_q,
5763                                               ibmvfc_work_to_do(vhost));
5764
5765                 BUG_ON(rc);
5766
5767                 if (kthread_should_stop())
5768                         break;
5769
5770                 ibmvfc_do_work(vhost);
5771         }
5772
5773         ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
5774         return 0;
5775 }
5776
5777 /**
5778  * ibmvfc_alloc_queue - Allocate queue
5779  * @vhost:      ibmvfc host struct
5780  * @queue:      ibmvfc queue to allocate
5781  * @fmt:        queue format to allocate
5782  *
5783  * Returns:
5784  *      0 on success / non-zero on failure
5785  **/
5786 static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
5787                               struct ibmvfc_queue *queue,
5788                               enum ibmvfc_msg_fmt fmt)
5789 {
5790         struct device *dev = vhost->dev;
5791         size_t fmt_size;
5792
5793         ENTER;
5794         spin_lock_init(&queue->_lock);
5795         queue->q_lock = &queue->_lock;
5796
5797         switch (fmt) {
5798         case IBMVFC_CRQ_FMT:
5799                 fmt_size = sizeof(*queue->msgs.crq);
5800                 queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
5801                 queue->evt_depth = scsi_qdepth;
5802                 queue->reserved_depth = IBMVFC_NUM_INTERNAL_REQ;
5803                 break;
5804         case IBMVFC_ASYNC_FMT:
5805                 fmt_size = sizeof(*queue->msgs.async);
5806                 break;
5807         case IBMVFC_SUB_CRQ_FMT:
5808                 fmt_size = sizeof(*queue->msgs.scrq);
5809                 /* We need one extra event for Cancel Commands */
5810                 queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ;
5811                 queue->evt_depth = scsi_qdepth;
5812                 queue->reserved_depth = IBMVFC_NUM_INTERNAL_SUBQ_REQ;
5813                 break;
5814         default:
5815                 dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
5816                 return -EINVAL;
5817         }
5818
5819         queue->fmt = fmt;
5820         if (ibmvfc_init_event_pool(vhost, queue)) {
5821                 dev_err(dev, "Couldn't initialize event pool.\n");
5822                 return -ENOMEM;
5823         }
5824
5825         queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
5826         if (!queue->msgs.handle)
5827                 return -ENOMEM;
5828
5829         queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
5830                                           DMA_BIDIRECTIONAL);
5831
5832         if (dma_mapping_error(dev, queue->msg_token)) {
5833                 free_page((unsigned long)queue->msgs.handle);
5834                 queue->msgs.handle = NULL;
5835                 return -ENOMEM;
5836         }
5837
5838         queue->cur = 0;
5839         queue->size = PAGE_SIZE / fmt_size;
5840
5841         queue->vhost = vhost;
5842         return 0;
5843 }
5844
5845 /**
5846  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
5847  * @vhost:      ibmvfc host struct
5848  *
5849  * Allocates a page for messages, maps it for dma, and registers
5850  * the crq with the hypervisor.
5851  *
5852  * Return value:
5853  *      zero on success / other on failure
5854  **/
5855 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
5856 {
5857         int rc, retrc = -ENOMEM;
5858         struct device *dev = vhost->dev;
5859         struct vio_dev *vdev = to_vio_dev(dev);
5860         struct ibmvfc_queue *crq = &vhost->crq;
5861
5862         ENTER;
5863         if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
5864                 return -ENOMEM;
5865
5866         retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5867                                         crq->msg_token, PAGE_SIZE);
5868
5869         if (rc == H_RESOURCE)
5870                 /* maybe kexecing and resource is busy. try a reset */
5871                 retrc = rc = ibmvfc_reset_crq(vhost);
5872
5873         if (rc == H_CLOSED)
5874                 dev_warn(dev, "Partner adapter not ready\n");
5875         else if (rc) {
5876                 dev_warn(dev, "Error %d opening adapter\n", rc);
5877                 goto reg_crq_failed;
5878         }
5879
5880         retrc = 0;
5881
5882         tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
5883
5884         if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
5885                 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
5886                 goto req_irq_failed;
5887         }
5888
5889         if ((rc = vio_enable_interrupts(vdev))) {
5890                 dev_err(dev, "Error %d enabling interrupts\n", rc);
5891                 goto req_irq_failed;
5892         }
5893
5894         LEAVE;
5895         return retrc;
5896
5897 req_irq_failed:
5898         tasklet_kill(&vhost->tasklet);
5899         do {
5900                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5901         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5902 reg_crq_failed:
5903         ibmvfc_free_queue(vhost, crq);
5904         return retrc;
5905 }
5906
5907 static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
5908                                   int index)
5909 {
5910         struct device *dev = vhost->dev;
5911         struct vio_dev *vdev = to_vio_dev(dev);
5912         struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
5913         int rc = -ENOMEM;
5914
5915         ENTER;
5916
5917         rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
5918                            &scrq->cookie, &scrq->hw_irq);
5919
5920         /* H_CLOSED indicates successful register, but no CRQ partner */
5921         if (rc && rc != H_CLOSED) {
5922                 dev_warn(dev, "Error registering sub-crq: %d\n", rc);
5923                 if (rc == H_PARAMETER)
5924                         dev_warn_once(dev, "Firmware may not support MQ\n");
5925                 goto reg_failed;
5926         }
5927
5928         scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
5929
5930         if (!scrq->irq) {
5931                 rc = -EINVAL;
5932                 dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
5933                 goto irq_failed;
5934         }
5935
5936         snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
5937                  vdev->unit_address, index);
5938         rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq);
5939
5940         if (rc) {
5941                 dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
5942                 irq_dispose_mapping(scrq->irq);
5943                 goto irq_failed;
5944         }
5945
5946         scrq->hwq_id = index;
5947
5948         LEAVE;
5949         return 0;
5950
5951 irq_failed:
5952         do {
5953                 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
5954         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5955 reg_failed:
5956         LEAVE;
5957         return rc;
5958 }
5959
5960 static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
5961 {
5962         struct device *dev = vhost->dev;
5963         struct vio_dev *vdev = to_vio_dev(dev);
5964         struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
5965         long rc;
5966
5967         ENTER;
5968
5969         free_irq(scrq->irq, scrq);
5970         irq_dispose_mapping(scrq->irq);
5971         scrq->irq = 0;
5972
5973         do {
5974                 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
5975                                         scrq->cookie);
5976         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5977
5978         if (rc)
5979                 dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
5980
5981         /* Clean out the queue */
5982         memset(scrq->msgs.crq, 0, PAGE_SIZE);
5983         scrq->cur = 0;
5984
5985         LEAVE;
5986 }
5987
5988 static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
5989 {
5990         int i, j;
5991
5992         ENTER;
5993         if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
5994                 return;
5995
5996         for (i = 0; i < nr_scsi_hw_queues; i++) {
5997                 if (ibmvfc_register_scsi_channel(vhost, i)) {
5998                         for (j = i; j > 0; j--)
5999                                 ibmvfc_deregister_scsi_channel(vhost, j - 1);
6000                         vhost->do_enquiry = 0;
6001                         return;
6002                 }
6003         }
6004
6005         LEAVE;
6006 }
6007
6008 static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost)
6009 {
6010         int i;
6011
6012         ENTER;
6013         if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
6014                 return;
6015
6016         for (i = 0; i < nr_scsi_hw_queues; i++)
6017                 ibmvfc_deregister_scsi_channel(vhost, i);
6018
6019         LEAVE;
6020 }
6021
6022 static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
6023 {
6024         struct ibmvfc_queue *scrq;
6025         int i, j;
6026
6027         ENTER;
6028         if (!vhost->mq_enabled)
6029                 return;
6030
6031         vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues,
6032                                           sizeof(*vhost->scsi_scrqs.scrqs),
6033                                           GFP_KERNEL);
6034         if (!vhost->scsi_scrqs.scrqs) {
6035                 vhost->do_enquiry = 0;
6036                 return;
6037         }
6038
6039         for (i = 0; i < nr_scsi_hw_queues; i++) {
6040                 scrq = &vhost->scsi_scrqs.scrqs[i];
6041                 if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) {
6042                         for (j = i; j > 0; j--) {
6043                                 scrq = &vhost->scsi_scrqs.scrqs[j - 1];
6044                                 ibmvfc_free_queue(vhost, scrq);
6045                         }
6046                         kfree(vhost->scsi_scrqs.scrqs);
6047                         vhost->scsi_scrqs.scrqs = NULL;
6048                         vhost->scsi_scrqs.active_queues = 0;
6049                         vhost->do_enquiry = 0;
6050                         vhost->mq_enabled = 0;
6051                         return;
6052                 }
6053         }
6054
6055         ibmvfc_reg_sub_crqs(vhost);
6056
6057         LEAVE;
6058 }
6059
6060 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
6061 {
6062         struct ibmvfc_queue *scrq;
6063         int i;
6064
6065         ENTER;
6066         if (!vhost->scsi_scrqs.scrqs)
6067                 return;
6068
6069         ibmvfc_dereg_sub_crqs(vhost);
6070
6071         for (i = 0; i < nr_scsi_hw_queues; i++) {
6072                 scrq = &vhost->scsi_scrqs.scrqs[i];
6073                 ibmvfc_free_queue(vhost, scrq);
6074         }
6075
6076         kfree(vhost->scsi_scrqs.scrqs);
6077         vhost->scsi_scrqs.scrqs = NULL;
6078         vhost->scsi_scrqs.active_queues = 0;
6079         LEAVE;
6080 }
6081
6082 /**
6083  * ibmvfc_free_mem - Free memory for vhost
6084  * @vhost:      ibmvfc host struct
6085  *
6086  * Return value:
6087  *      none
6088  **/
6089 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
6090 {
6091         struct ibmvfc_queue *async_q = &vhost->async_crq;
6092
6093         ENTER;
6094         mempool_destroy(vhost->tgt_pool);
6095         kfree(vhost->trace);
6096         dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
6097                           vhost->disc_buf_dma);
6098         dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
6099                           vhost->login_buf, vhost->login_buf_dma);
6100         dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
6101                           vhost->channel_setup_buf, vhost->channel_setup_dma);
6102         dma_pool_destroy(vhost->sg_pool);
6103         ibmvfc_free_queue(vhost, async_q);
6104         LEAVE;
6105 }
6106
6107 /**
6108  * ibmvfc_alloc_mem - Allocate memory for vhost
6109  * @vhost:      ibmvfc host struct
6110  *
6111  * Return value:
6112  *      0 on success / non-zero on failure
6113  **/
6114 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
6115 {
6116         struct ibmvfc_queue *async_q = &vhost->async_crq;
6117         struct device *dev = vhost->dev;
6118
6119         ENTER;
6120         if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
6121                 dev_err(dev, "Couldn't allocate/map async queue.\n");
6122                 goto nomem;
6123         }
6124
6125         vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
6126                                          SG_ALL * sizeof(struct srp_direct_buf),
6127                                          sizeof(struct srp_direct_buf), 0);
6128
6129         if (!vhost->sg_pool) {
6130                 dev_err(dev, "Failed to allocate sg pool\n");
6131                 goto unmap_async_crq;
6132         }
6133
6134         vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
6135                                               &vhost->login_buf_dma, GFP_KERNEL);
6136
6137         if (!vhost->login_buf) {
6138                 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
6139                 goto free_sg_pool;
6140         }
6141
6142         vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
6143         vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
6144                                              &vhost->disc_buf_dma, GFP_KERNEL);
6145
6146         if (!vhost->disc_buf) {
6147                 dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
6148                 goto free_login_buffer;
6149         }
6150
6151         vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
6152                                sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
6153         atomic_set(&vhost->trace_index, -1);
6154
6155         if (!vhost->trace)
6156                 goto free_disc_buffer;
6157
6158         vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
6159                                                       sizeof(struct ibmvfc_target));
6160
6161         if (!vhost->tgt_pool) {
6162                 dev_err(dev, "Couldn't allocate target memory pool\n");
6163                 goto free_trace;
6164         }
6165
6166         vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
6167                                                       &vhost->channel_setup_dma,
6168                                                       GFP_KERNEL);
6169
6170         if (!vhost->channel_setup_buf) {
6171                 dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
6172                 goto free_tgt_pool;
6173         }
6174
6175         LEAVE;
6176         return 0;
6177
6178 free_tgt_pool:
6179         mempool_destroy(vhost->tgt_pool);
6180 free_trace:
6181         kfree(vhost->trace);
6182 free_disc_buffer:
6183         dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
6184                           vhost->disc_buf_dma);
6185 free_login_buffer:
6186         dma_free_coherent(dev, sizeof(*vhost->login_buf),
6187                           vhost->login_buf, vhost->login_buf_dma);
6188 free_sg_pool:
6189         dma_pool_destroy(vhost->sg_pool);
6190 unmap_async_crq:
6191         ibmvfc_free_queue(vhost, async_q);
6192 nomem:
6193         LEAVE;
6194         return -ENOMEM;
6195 }
6196
6197 /**
6198  * ibmvfc_rport_add_thread - Worker thread for rport adds
6199  * @work:       work struct
6200  *
6201  **/
6202 static void ibmvfc_rport_add_thread(struct work_struct *work)
6203 {
6204         struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
6205                                                  rport_add_work_q);
6206         struct ibmvfc_target *tgt;
6207         struct fc_rport *rport;
6208         unsigned long flags;
6209         int did_work;
6210
6211         ENTER;
6212         spin_lock_irqsave(vhost->host->host_lock, flags);
6213         do {
6214                 did_work = 0;
6215                 if (vhost->state != IBMVFC_ACTIVE)
6216                         break;
6217
6218                 list_for_each_entry(tgt, &vhost->targets, queue) {
6219                         if (tgt->add_rport) {
6220                                 did_work = 1;
6221                                 tgt->add_rport = 0;
6222                                 kref_get(&tgt->kref);
6223                                 rport = tgt->rport;
6224                                 if (!rport) {
6225                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6226                                         ibmvfc_tgt_add_rport(tgt);
6227                                 } else if (get_device(&rport->dev)) {
6228                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6229                                         tgt_dbg(tgt, "Setting rport roles\n");
6230                                         fc_remote_port_rolechg(rport, tgt->ids.roles);
6231                                         put_device(&rport->dev);
6232                                 } else {
6233                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6234                                 }
6235
6236                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
6237                                 spin_lock_irqsave(vhost->host->host_lock, flags);
6238                                 break;
6239                         }
6240                 }
6241         } while(did_work);
6242
6243         if (vhost->state == IBMVFC_ACTIVE)
6244                 vhost->scan_complete = 1;
6245         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6246         LEAVE;
6247 }
6248
6249 /**
6250  * ibmvfc_probe - Adapter hot plug add entry point
6251  * @vdev:       vio device struct
6252  * @id: vio device id struct
6253  *
6254  * Return value:
6255  *      0 on success / non-zero on failure
6256  **/
6257 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
6258 {
6259         struct ibmvfc_host *vhost;
6260         struct Scsi_Host *shost;
6261         struct device *dev = &vdev->dev;
6262         int rc = -ENOMEM;
6263         unsigned int online_cpus = num_online_cpus();
6264         unsigned int max_scsi_queues = min((unsigned int)IBMVFC_MAX_SCSI_QUEUES, online_cpus);
6265
6266         ENTER;
6267         shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
6268         if (!shost) {
6269                 dev_err(dev, "Couldn't allocate host data\n");
6270                 goto out;
6271         }
6272
6273         shost->transportt = ibmvfc_transport_template;
6274         shost->can_queue = scsi_qdepth;
6275         shost->max_lun = max_lun;
6276         shost->max_id = max_targets;
6277         shost->max_sectors = IBMVFC_MAX_SECTORS;
6278         shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
6279         shost->unique_id = shost->host_no;
6280         shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
6281
6282         vhost = shost_priv(shost);
6283         INIT_LIST_HEAD(&vhost->targets);
6284         INIT_LIST_HEAD(&vhost->purge);
6285         sprintf(vhost->name, IBMVFC_NAME);
6286         vhost->host = shost;
6287         vhost->dev = dev;
6288         vhost->partition_number = -1;
6289         vhost->log_level = log_level;
6290         vhost->task_set = 1;
6291
6292         vhost->mq_enabled = mq_enabled;
6293         vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
6294         vhost->using_channels = 0;
6295         vhost->do_enquiry = 1;
6296         vhost->scan_timeout = 0;
6297
6298         strcpy(vhost->partition_name, "UNKNOWN");
6299         init_waitqueue_head(&vhost->work_wait_q);
6300         init_waitqueue_head(&vhost->init_wait_q);
6301         INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
6302         mutex_init(&vhost->passthru_mutex);
6303
6304         if ((rc = ibmvfc_alloc_mem(vhost)))
6305                 goto free_scsi_host;
6306
6307         vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
6308                                          shost->host_no);
6309
6310         if (IS_ERR(vhost->work_thread)) {
6311                 dev_err(dev, "Couldn't create kernel thread: %ld\n",
6312                         PTR_ERR(vhost->work_thread));
6313                 rc = PTR_ERR(vhost->work_thread);
6314                 goto free_host_mem;
6315         }
6316
6317         if ((rc = ibmvfc_init_crq(vhost))) {
6318                 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
6319                 goto kill_kthread;
6320         }
6321
6322         if ((rc = scsi_add_host(shost, dev)))
6323                 goto release_crq;
6324
6325         fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
6326
6327         if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
6328                                            &ibmvfc_trace_attr))) {
6329                 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
6330                 goto remove_shost;
6331         }
6332
6333         ibmvfc_init_sub_crqs(vhost);
6334
6335         if (shost_to_fc_host(shost)->rqst_q)
6336                 blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
6337         dev_set_drvdata(dev, vhost);
6338         spin_lock(&ibmvfc_driver_lock);
6339         list_add_tail(&vhost->queue, &ibmvfc_head);
6340         spin_unlock(&ibmvfc_driver_lock);
6341
6342         ibmvfc_send_crq_init(vhost);
6343         scsi_scan_host(shost);
6344         return 0;
6345
6346 remove_shost:
6347         scsi_remove_host(shost);
6348 release_crq:
6349         ibmvfc_release_crq_queue(vhost);
6350 kill_kthread:
6351         kthread_stop(vhost->work_thread);
6352 free_host_mem:
6353         ibmvfc_free_mem(vhost);
6354 free_scsi_host:
6355         scsi_host_put(shost);
6356 out:
6357         LEAVE;
6358         return rc;
6359 }
6360
6361 /**
6362  * ibmvfc_remove - Adapter hot plug remove entry point
6363  * @vdev:       vio device struct
6364  *
6365  * Return value:
6366  *      0
6367  **/
6368 static void ibmvfc_remove(struct vio_dev *vdev)
6369 {
6370         struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
6371         LIST_HEAD(purge);
6372         unsigned long flags;
6373
6374         ENTER;
6375         ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
6376
6377         spin_lock_irqsave(vhost->host->host_lock, flags);
6378         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
6379         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6380
6381         ibmvfc_wait_while_resetting(vhost);
6382         kthread_stop(vhost->work_thread);
6383         fc_remove_host(vhost->host);
6384         scsi_remove_host(vhost->host);
6385
6386         spin_lock_irqsave(vhost->host->host_lock, flags);
6387         ibmvfc_purge_requests(vhost, DID_ERROR);
6388         list_splice_init(&vhost->purge, &purge);
6389         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6390         ibmvfc_complete_purge(&purge);
6391         ibmvfc_release_sub_crqs(vhost);
6392         ibmvfc_release_crq_queue(vhost);
6393
6394         ibmvfc_free_mem(vhost);
6395         spin_lock(&ibmvfc_driver_lock);
6396         list_del(&vhost->queue);
6397         spin_unlock(&ibmvfc_driver_lock);
6398         scsi_host_put(vhost->host);
6399         LEAVE;
6400 }
6401
6402 /**
6403  * ibmvfc_resume - Resume from suspend
6404  * @dev:        device struct
6405  *
6406  * We may have lost an interrupt across suspend/resume, so kick the
6407  * interrupt handler
6408  *
6409  */
6410 static int ibmvfc_resume(struct device *dev)
6411 {
6412         unsigned long flags;
6413         struct ibmvfc_host *vhost = dev_get_drvdata(dev);
6414         struct vio_dev *vdev = to_vio_dev(dev);
6415
6416         spin_lock_irqsave(vhost->host->host_lock, flags);
6417         vio_disable_interrupts(vdev);
6418         tasklet_schedule(&vhost->tasklet);
6419         spin_unlock_irqrestore(vhost->host->host_lock, flags);
6420         return 0;
6421 }
6422
6423 /**
6424  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
6425  * @vdev:       vio device struct
6426  *
6427  * Return value:
6428  *      Number of bytes the driver will need to DMA map at the same time in
6429  *      order to perform well.
6430  */
6431 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
6432 {
6433         unsigned long pool_dma;
6434
6435         pool_dma = (IBMVFC_MAX_SCSI_QUEUES * scsi_qdepth) * sizeof(union ibmvfc_iu);
6436         return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
6437 }
6438
6439 static const struct vio_device_id ibmvfc_device_table[] = {
6440         {"fcp", "IBM,vfc-client"},
6441         { "", "" }
6442 };
6443 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
6444
6445 static const struct dev_pm_ops ibmvfc_pm_ops = {
6446         .resume = ibmvfc_resume
6447 };
6448
6449 static struct vio_driver ibmvfc_driver = {
6450         .id_table = ibmvfc_device_table,
6451         .probe = ibmvfc_probe,
6452         .remove = ibmvfc_remove,
6453         .get_desired_dma = ibmvfc_get_desired_dma,
6454         .name = IBMVFC_NAME,
6455         .pm = &ibmvfc_pm_ops,
6456 };
6457
6458 static struct fc_function_template ibmvfc_transport_functions = {
6459         .show_host_fabric_name = 1,
6460         .show_host_node_name = 1,
6461         .show_host_port_name = 1,
6462         .show_host_supported_classes = 1,
6463         .show_host_port_type = 1,
6464         .show_host_port_id = 1,
6465         .show_host_maxframe_size = 1,
6466
6467         .get_host_port_state = ibmvfc_get_host_port_state,
6468         .show_host_port_state = 1,
6469
6470         .get_host_speed = ibmvfc_get_host_speed,
6471         .show_host_speed = 1,
6472
6473         .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
6474         .terminate_rport_io = ibmvfc_terminate_rport_io,
6475
6476         .show_rport_maxframe_size = 1,
6477         .show_rport_supported_classes = 1,
6478
6479         .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
6480         .show_rport_dev_loss_tmo = 1,
6481
6482         .get_starget_node_name = ibmvfc_get_starget_node_name,
6483         .show_starget_node_name = 1,
6484
6485         .get_starget_port_name = ibmvfc_get_starget_port_name,
6486         .show_starget_port_name = 1,
6487
6488         .get_starget_port_id = ibmvfc_get_starget_port_id,
6489         .show_starget_port_id = 1,
6490
6491         .bsg_request = ibmvfc_bsg_request,
6492         .bsg_timeout = ibmvfc_bsg_timeout,
6493 };
6494
6495 /**
6496  * ibmvfc_module_init - Initialize the ibmvfc module
6497  *
6498  * Return value:
6499  *      0 on success / other on failure
6500  **/
6501 static int __init ibmvfc_module_init(void)
6502 {
6503         int rc;
6504
6505         if (!firmware_has_feature(FW_FEATURE_VIO))
6506                 return -ENODEV;
6507
6508         printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
6509                IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
6510
6511         ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
6512         if (!ibmvfc_transport_template)
6513                 return -ENOMEM;
6514
6515         rc = vio_register_driver(&ibmvfc_driver);
6516         if (rc)
6517                 fc_release_transport(ibmvfc_transport_template);
6518         return rc;
6519 }
6520
6521 /**
6522  * ibmvfc_module_exit - Teardown the ibmvfc module
6523  *
6524  * Return value:
6525  *      nothing
6526  **/
6527 static void __exit ibmvfc_module_exit(void)
6528 {
6529         vio_unregister_driver(&ibmvfc_driver);
6530         fc_release_transport(ibmvfc_transport_template);
6531 }
6532
6533 module_init(ibmvfc_module_init);
6534 module_exit(ibmvfc_module_exit);