cxlflash: Refine host/device attributes
[linux-2.6-block.git] / drivers / scsi / cxlflash / main.c
CommitLineData
c21e0bbf
MO
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/delay.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19
20#include <asm/unaligned.h>
21
22#include <misc/cxl.h>
23
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_host.h>
65be2c79 26#include <uapi/scsi/cxlflash_ioctl.h>
c21e0bbf
MO
27
28#include "main.h"
29#include "sislite.h"
30#include "common.h"
31
32MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35MODULE_LICENSE("GPL");
36
37
38/**
15305514 39 * cmd_checkout() - checks out an AFU command
c21e0bbf
MO
40 * @afu: AFU to checkout from.
41 *
42 * Commands are checked out in a round-robin fashion. Note that since
43 * the command pool is larger than the hardware queue, the majority of
44 * times we will only loop once or twice before getting a command. The
45 * buffer and CDB within the command are initialized (zeroed) prior to
46 * returning.
47 *
48 * Return: The checked out command or NULL when command pool is empty.
49 */
15305514 50static struct afu_cmd *cmd_checkout(struct afu *afu)
c21e0bbf
MO
51{
52 int k, dec = CXLFLASH_NUM_CMDS;
53 struct afu_cmd *cmd;
54
55 while (dec--) {
56 k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
57
58 cmd = &afu->cmd[k];
59
60 if (!atomic_dec_if_positive(&cmd->free)) {
61 pr_debug("%s: returning found index=%d\n",
62 __func__, cmd->slot);
63 memset(cmd->buf, 0, CMD_BUFSIZE);
64 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
65 return cmd;
66 }
67 }
68
69 return NULL;
70}
71
72/**
15305514 73 * cmd_checkin() - checks in an AFU command
c21e0bbf
MO
74 * @cmd: AFU command to checkin.
75 *
76 * Safe to pass commands that have already been checked in. Several
77 * internal tracking fields are reset as part of the checkin. Note
78 * that these are intentionally reset prior to toggling the free bit
79 * to avoid clobbering values in the event that the command is checked
80 * out right away.
81 */
15305514 82static void cmd_checkin(struct afu_cmd *cmd)
c21e0bbf
MO
83{
84 cmd->rcb.scp = NULL;
85 cmd->rcb.timeout = 0;
86 cmd->sa.ioasc = 0;
87 cmd->cmd_tmf = false;
88 cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
89
90 if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
91 pr_err("%s: Freeing cmd (%d) that is not in use!\n",
92 __func__, cmd->slot);
93 return;
94 }
95
96 pr_debug("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
97}
98
99/**
100 * process_cmd_err() - command error handler
101 * @cmd: AFU command that experienced the error.
102 * @scp: SCSI command associated with the AFU command in error.
103 *
104 * Translates error bits from AFU command to SCSI command results.
105 */
106static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
107{
108 struct sisl_ioarcb *ioarcb;
109 struct sisl_ioasa *ioasa;
110
111 if (unlikely(!cmd))
112 return;
113
114 ioarcb = &(cmd->rcb);
115 ioasa = &(cmd->sa);
116
117 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
118 pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
119 __func__, cmd, scp);
120 scp->result = (DID_ERROR << 16);
121 }
122
123 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
124 pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
125 __func__, cmd, scp);
126 scp->result = (DID_ERROR << 16);
127 }
128
129 pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
130 "afu_extra=0x%X, scsi_entra=0x%X, fc_extra=0x%X\n",
131 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
132 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
133 ioasa->fc_extra);
134
135 if (ioasa->rc.scsi_rc) {
136 /* We have a SCSI status */
137 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
138 memcpy(scp->sense_buffer, ioasa->sense_data,
139 SISL_SENSE_DATA_LEN);
140 scp->result = ioasa->rc.scsi_rc;
141 } else
142 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
143 }
144
145 /*
146 * We encountered an error. Set scp->result based on nature
147 * of error.
148 */
149 if (ioasa->rc.fc_rc) {
150 /* We have an FC status */
151 switch (ioasa->rc.fc_rc) {
152 case SISL_FC_RC_LINKDOWN:
153 scp->result = (DID_REQUEUE << 16);
154 break;
155 case SISL_FC_RC_RESID:
156 /* This indicates an FCP resid underrun */
157 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
158 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
159 * then we will handle this error else where.
160 * If not then we must handle it here.
161 * This is probably an AFU bug. We will
162 * attempt a retry to see if that resolves it.
163 */
164 scp->result = (DID_ERROR << 16);
165 }
166 break;
167 case SISL_FC_RC_RESIDERR:
168 /* Resid mismatch between adapter and device */
169 case SISL_FC_RC_TGTABORT:
170 case SISL_FC_RC_ABORTOK:
171 case SISL_FC_RC_ABORTFAIL:
172 case SISL_FC_RC_NOLOGI:
173 case SISL_FC_RC_ABORTPEND:
174 case SISL_FC_RC_WRABORTPEND:
175 case SISL_FC_RC_NOEXP:
176 case SISL_FC_RC_INUSE:
177 scp->result = (DID_ERROR << 16);
178 break;
179 }
180 }
181
182 if (ioasa->rc.afu_rc) {
183 /* We have an AFU error */
184 switch (ioasa->rc.afu_rc) {
185 case SISL_AFU_RC_NO_CHANNELS:
186 scp->result = (DID_MEDIUM_ERROR << 16);
187 break;
188 case SISL_AFU_RC_DATA_DMA_ERR:
189 switch (ioasa->afu_extra) {
190 case SISL_AFU_DMA_ERR_PAGE_IN:
191 /* Retry */
192 scp->result = (DID_IMM_RETRY << 16);
193 break;
194 case SISL_AFU_DMA_ERR_INVALID_EA:
195 default:
196 scp->result = (DID_ERROR << 16);
197 }
198 break;
199 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
200 /* Retry */
201 scp->result = (DID_ALLOC_FAILURE << 16);
202 break;
203 default:
204 scp->result = (DID_ERROR << 16);
205 }
206 }
207}
208
209/**
210 * cmd_complete() - command completion handler
211 * @cmd: AFU command that has completed.
212 *
213 * Prepares and submits command that has either completed or timed out to
214 * the SCSI stack. Checks AFU command back into command pool for non-internal
215 * (rcb.scp populated) commands.
216 */
217static void cmd_complete(struct afu_cmd *cmd)
218{
219 struct scsi_cmnd *scp;
220 u32 resid;
221 ulong lock_flags;
222 struct afu *afu = cmd->parent;
223 struct cxlflash_cfg *cfg = afu->parent;
224 bool cmd_is_tmf;
225
226 spin_lock_irqsave(&cmd->slock, lock_flags);
227 cmd->sa.host_use_b[0] |= B_DONE;
228 spin_unlock_irqrestore(&cmd->slock, lock_flags);
229
230 if (cmd->rcb.scp) {
231 scp = cmd->rcb.scp;
232 if (unlikely(cmd->sa.rc.afu_rc ||
233 cmd->sa.rc.scsi_rc ||
234 cmd->sa.rc.fc_rc))
235 process_cmd_err(cmd, scp);
236 else
237 scp->result = (DID_OK << 16);
238
239 resid = cmd->sa.resid;
240 cmd_is_tmf = cmd->cmd_tmf;
15305514 241 cmd_checkin(cmd); /* Don't use cmd after here */
c21e0bbf
MO
242
243 pr_debug("%s: calling scsi_set_resid, scp=%p "
244 "result=%X resid=%d\n", __func__,
245 scp, scp->result, resid);
246
247 scsi_set_resid(scp, resid);
248 scsi_dma_unmap(scp);
249 scp->scsi_done(scp);
250
251 if (cmd_is_tmf) {
252 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
253 cfg->tmf_active = false;
254 wake_up_all_locked(&cfg->tmf_waitq);
255 spin_unlock_irqrestore(&cfg->tmf_waitq.lock,
256 lock_flags);
257 }
258 } else
259 complete(&cmd->cevent);
260}
261
15305514
MO
262/**
263 * context_reset() - timeout handler for AFU commands
264 * @cmd: AFU command that timed out.
265 *
266 * Sends a reset to the AFU.
267 */
268static void context_reset(struct afu_cmd *cmd)
269{
270 int nretry = 0;
271 u64 rrin = 0x1;
272 u64 room = 0;
273 struct afu *afu = cmd->parent;
274 ulong lock_flags;
275
276 pr_debug("%s: cmd=%p\n", __func__, cmd);
277
278 spin_lock_irqsave(&cmd->slock, lock_flags);
279
280 /* Already completed? */
281 if (cmd->sa.host_use_b[0] & B_DONE) {
282 spin_unlock_irqrestore(&cmd->slock, lock_flags);
283 return;
284 }
285
286 cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
287 spin_unlock_irqrestore(&cmd->slock, lock_flags);
288
289 /*
290 * We really want to send this reset at all costs, so spread
291 * out wait time on successive retries for available room.
292 */
293 do {
294 room = readq_be(&afu->host_map->cmd_room);
295 atomic64_set(&afu->room, room);
296 if (room)
297 goto write_rrin;
298 udelay(nretry);
299 } while (nretry++ < MC_ROOM_RETRY_CNT);
300
301 pr_err("%s: no cmd_room to send reset\n", __func__);
302 return;
303
304write_rrin:
305 nretry = 0;
306 writeq_be(rrin, &afu->host_map->ioarrin);
307 do {
308 rrin = readq_be(&afu->host_map->ioarrin);
309 if (rrin != 0x1)
310 break;
311 /* Double delay each time */
312 udelay(2 ^ nretry);
313 } while (nretry++ < MC_ROOM_RETRY_CNT);
314}
315
316/**
317 * send_cmd() - sends an AFU command
318 * @afu: AFU associated with the host.
319 * @cmd: AFU command to send.
320 *
321 * Return:
322 * 0 on success or SCSI_MLQUEUE_HOST_BUSY
323 */
324static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
325{
326 struct cxlflash_cfg *cfg = afu->parent;
327 struct device *dev = &cfg->dev->dev;
328 int nretry = 0;
329 int rc = 0;
330 u64 room;
331 long newval;
332
333 /*
334 * This routine is used by critical users such an AFU sync and to
335 * send a task management function (TMF). Thus we want to retry a
336 * bit before returning an error. To avoid the performance penalty
337 * of MMIO, we spread the update of 'room' over multiple commands.
338 */
339retry:
340 newval = atomic64_dec_if_positive(&afu->room);
341 if (!newval) {
342 do {
343 room = readq_be(&afu->host_map->cmd_room);
344 atomic64_set(&afu->room, room);
345 if (room)
346 goto write_ioarrin;
347 udelay(nretry);
348 } while (nretry++ < MC_ROOM_RETRY_CNT);
349
350 dev_err(dev, "%s: no cmd_room to send 0x%X\n",
351 __func__, cmd->rcb.cdb[0]);
352
353 goto no_room;
354 } else if (unlikely(newval < 0)) {
355 /* This should be rare. i.e. Only if two threads race and
356 * decrement before the MMIO read is done. In this case
357 * just benefit from the other thread having updated
358 * afu->room.
359 */
360 if (nretry++ < MC_ROOM_RETRY_CNT) {
361 udelay(nretry);
362 goto retry;
363 }
364
365 goto no_room;
366 }
367
368write_ioarrin:
369 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
370out:
371 pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
372 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
373 return rc;
374
375no_room:
376 afu->read_room = true;
377 schedule_work(&cfg->work_q);
378 rc = SCSI_MLQUEUE_HOST_BUSY;
379 goto out;
380}
381
382/**
383 * wait_resp() - polls for a response or timeout to a sent AFU command
384 * @afu: AFU associated with the host.
385 * @cmd: AFU command that was sent.
386 */
387static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
388{
389 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
390
391 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
392 if (!timeout)
393 context_reset(cmd);
394
395 if (unlikely(cmd->sa.ioasc != 0))
396 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
397 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
398 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
399 cmd->sa.rc.fc_rc);
400}
401
c21e0bbf
MO
402/**
403 * send_tmf() - sends a Task Management Function (TMF)
404 * @afu: AFU to checkout from.
405 * @scp: SCSI command from stack.
406 * @tmfcmd: TMF command to send.
407 *
408 * Return:
409 * 0 on success
410 * SCSI_MLQUEUE_HOST_BUSY when host is busy
411 */
412static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
413{
414 struct afu_cmd *cmd;
415
416 u32 port_sel = scp->device->channel + 1;
417 short lflag = 0;
418 struct Scsi_Host *host = scp->device->host;
419 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
420 ulong lock_flags;
421 int rc = 0;
422
15305514 423 cmd = cmd_checkout(afu);
c21e0bbf
MO
424 if (unlikely(!cmd)) {
425 pr_err("%s: could not get a free command\n", __func__);
426 rc = SCSI_MLQUEUE_HOST_BUSY;
427 goto out;
428 }
429
430 /* If a Task Management Function is active, do not send one more.
431 */
432 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
433 if (cfg->tmf_active)
434 wait_event_interruptible_locked_irq(cfg->tmf_waitq,
435 !cfg->tmf_active);
436 cfg->tmf_active = true;
437 cmd->cmd_tmf = true;
438 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
439
440 cmd->rcb.ctx_id = afu->ctx_hndl;
441 cmd->rcb.port_sel = port_sel;
442 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
443
444 lflag = SISL_REQ_FLAGS_TMF_CMD;
445
446 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
447 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
448
449 /* Stash the scp in the reserved field, for reuse during interrupt */
450 cmd->rcb.scp = scp;
451
452 /* Copy the CDB from the cmd passed in */
453 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
454
455 /* Send the command */
15305514 456 rc = send_cmd(afu, cmd);
c21e0bbf 457 if (unlikely(rc)) {
15305514 458 cmd_checkin(cmd);
c21e0bbf
MO
459 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
460 cfg->tmf_active = false;
461 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
462 goto out;
463 }
464
465 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
466 wait_event_interruptible_locked_irq(cfg->tmf_waitq, !cfg->tmf_active);
467 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
468out:
469 return rc;
470}
471
472/**
473 * cxlflash_driver_info() - information handler for this host driver
474 * @host: SCSI host associated with device.
475 *
476 * Return: A string describing the device.
477 */
478static const char *cxlflash_driver_info(struct Scsi_Host *host)
479{
480 return CXLFLASH_ADAPTER_NAME;
481}
482
483/**
484 * cxlflash_queuecommand() - sends a mid-layer request
485 * @host: SCSI host associated with device.
486 * @scp: SCSI command to send.
487 *
488 * Return:
489 * 0 on success
490 * SCSI_MLQUEUE_HOST_BUSY when host is busy
491 */
492static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
493{
494 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
495 struct afu *afu = cfg->afu;
496 struct pci_dev *pdev = cfg->dev;
497 struct afu_cmd *cmd;
498 u32 port_sel = scp->device->channel + 1;
499 int nseg, i, ncount;
500 struct scatterlist *sg;
501 ulong lock_flags;
502 short lflag = 0;
503 int rc = 0;
504
505 pr_debug("%s: (scp=%p) %d/%d/%d/%llu cdb=(%08X-%08X-%08X-%08X)\n",
506 __func__, scp, host->host_no, scp->device->channel,
507 scp->device->id, scp->device->lun,
508 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
509 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
510 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
511 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
512
513 /* If a Task Management Function is active, wait for it to complete
514 * before continuing with regular commands.
515 */
516 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
517 if (cfg->tmf_active) {
518 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
519 rc = SCSI_MLQUEUE_HOST_BUSY;
520 goto out;
521 }
522 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
523
5cdac81a 524 switch (cfg->state) {
439e85c1
MO
525 case STATE_RESET:
526 dev_dbg_ratelimited(&cfg->dev->dev, "%s: device is in reset!\n",
89576205 527 __func__);
5cdac81a
MO
528 rc = SCSI_MLQUEUE_HOST_BUSY;
529 goto out;
530 case STATE_FAILTERM:
89576205
MO
531 dev_dbg_ratelimited(&cfg->dev->dev, "%s: device has failed!\n",
532 __func__);
5cdac81a
MO
533 scp->result = (DID_NO_CONNECT << 16);
534 scp->scsi_done(scp);
535 rc = 0;
536 goto out;
537 default:
538 break;
539 }
540
15305514 541 cmd = cmd_checkout(afu);
c21e0bbf
MO
542 if (unlikely(!cmd)) {
543 pr_err("%s: could not get a free command\n", __func__);
544 rc = SCSI_MLQUEUE_HOST_BUSY;
545 goto out;
546 }
547
548 cmd->rcb.ctx_id = afu->ctx_hndl;
549 cmd->rcb.port_sel = port_sel;
550 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
551
552 if (scp->sc_data_direction == DMA_TO_DEVICE)
553 lflag = SISL_REQ_FLAGS_HOST_WRITE;
554 else
555 lflag = SISL_REQ_FLAGS_HOST_READ;
556
557 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
558 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
559
560 /* Stash the scp in the reserved field, for reuse during interrupt */
561 cmd->rcb.scp = scp;
562
563 nseg = scsi_dma_map(scp);
564 if (unlikely(nseg < 0)) {
565 dev_err(&pdev->dev, "%s: Fail DMA map! nseg=%d\n",
566 __func__, nseg);
567 rc = SCSI_MLQUEUE_HOST_BUSY;
568 goto out;
569 }
570
571 ncount = scsi_sg_count(scp);
572 scsi_for_each_sg(scp, sg, ncount, i) {
573 cmd->rcb.data_len = sg_dma_len(sg);
574 cmd->rcb.data_ea = sg_dma_address(sg);
575 }
576
577 /* Copy the CDB from the scsi_cmnd passed in */
578 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
579
580 /* Send the command */
15305514 581 rc = send_cmd(afu, cmd);
c21e0bbf 582 if (unlikely(rc)) {
15305514 583 cmd_checkin(cmd);
c21e0bbf
MO
584 scsi_dma_unmap(scp);
585 }
586
587out:
588 return rc;
589}
590
591/**
15305514
MO
592 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
593 * @cxlflash: Internal structure associated with the host.
c21e0bbf 594 */
15305514 595static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
c21e0bbf 596{
15305514 597 struct pci_dev *pdev = cfg->dev;
c21e0bbf 598
15305514
MO
599 if (pci_channel_offline(pdev))
600 wait_event_timeout(cfg->reset_waitq,
601 !pci_channel_offline(pdev),
602 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
c21e0bbf
MO
603}
604
605/**
15305514
MO
606 * free_mem() - free memory associated with the AFU
607 * @cxlflash: Internal structure associated with the host.
c21e0bbf 608 */
15305514 609static void free_mem(struct cxlflash_cfg *cfg)
c21e0bbf 610{
15305514
MO
611 int i;
612 char *buf = NULL;
613 struct afu *afu = cfg->afu;
c21e0bbf 614
15305514
MO
615 if (cfg->afu) {
616 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
617 buf = afu->cmd[i].buf;
618 if (!((u64)buf & (PAGE_SIZE - 1)))
619 free_page((ulong)buf);
620 }
c21e0bbf 621
15305514
MO
622 free_pages((ulong)afu, get_order(sizeof(struct afu)));
623 cfg->afu = NULL;
5cdac81a 624 }
c21e0bbf
MO
625}
626
627/**
15305514
MO
628 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
629 * @cxlflash: Internal structure associated with the host.
c21e0bbf 630 *
15305514 631 * Safe to call with AFU in a partially allocated/initialized state.
c21e0bbf 632 */
15305514 633static void stop_afu(struct cxlflash_cfg *cfg)
c21e0bbf 634{
15305514
MO
635 int i;
636 struct afu *afu = cfg->afu;
c21e0bbf 637
15305514
MO
638 if (likely(afu)) {
639 for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
640 complete(&afu->cmd[i].cevent);
c21e0bbf
MO
641
642 if (likely(afu->afu_map)) {
643 cxl_psa_unmap((void *)afu->afu_map);
644 afu->afu_map = NULL;
645 }
646 }
647}
648
649/**
650 * term_mc() - terminates the master context
651 * @cxlflash: Internal structure associated with the host.
652 * @level: Depth of allocation, where to begin waterfall tear down.
653 *
654 * Safe to call with AFU/MC in partially allocated/initialized state.
655 */
656static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
657{
658 int rc = 0;
659 struct afu *afu = cfg->afu;
660
661 if (!afu || !cfg->mcctx) {
662 pr_err("%s: returning from term_mc with NULL afu or MC\n",
663 __func__);
664 return;
665 }
666
667 switch (level) {
668 case UNDO_START:
669 rc = cxl_stop_context(cfg->mcctx);
670 BUG_ON(rc);
671 case UNMAP_THREE:
672 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
673 case UNMAP_TWO:
674 cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
675 case UNMAP_ONE:
676 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
677 case FREE_IRQ:
678 cxl_free_afu_irqs(cfg->mcctx);
679 case RELEASE_CONTEXT:
680 cfg->mcctx = NULL;
681 }
682}
683
684/**
685 * term_afu() - terminates the AFU
686 * @cxlflash: Internal structure associated with the host.
687 *
688 * Safe to call with AFU/MC in partially allocated/initialized state.
689 */
690static void term_afu(struct cxlflash_cfg *cfg)
691{
692 term_mc(cfg, UNDO_START);
693
694 if (cfg->afu)
695 stop_afu(cfg);
696
697 pr_debug("%s: returning\n", __func__);
698}
699
700/**
701 * cxlflash_remove() - PCI entry point to tear down host
702 * @pdev: PCI device associated with the host.
703 *
704 * Safe to use as a cleanup in partially allocated/initialized state.
705 */
706static void cxlflash_remove(struct pci_dev *pdev)
707{
708 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
709 ulong lock_flags;
710
711 /* If a Task Management Function is active, wait for it to complete
712 * before continuing with remove.
713 */
714 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
715 if (cfg->tmf_active)
716 wait_event_interruptible_locked_irq(cfg->tmf_waitq,
717 !cfg->tmf_active);
718 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
719
5cdac81a 720 cfg->state = STATE_FAILTERM;
65be2c79 721 cxlflash_stop_term_user_contexts(cfg);
5cdac81a 722
c21e0bbf
MO
723 switch (cfg->init_state) {
724 case INIT_STATE_SCSI:
65be2c79 725 cxlflash_term_local_luns(cfg);
c21e0bbf
MO
726 scsi_remove_host(cfg->host);
727 scsi_host_put(cfg->host);
728 /* Fall through */
729 case INIT_STATE_AFU:
730 term_afu(cfg);
731 case INIT_STATE_PCI:
732 pci_release_regions(cfg->dev);
733 pci_disable_device(pdev);
734 case INIT_STATE_NONE:
735 flush_work(&cfg->work_q);
736 free_mem(cfg);
737 break;
738 }
739
740 pr_debug("%s: returning\n", __func__);
741}
742
743/**
744 * alloc_mem() - allocates the AFU and its command pool
745 * @cxlflash: Internal structure associated with the host.
746 *
747 * A partially allocated state remains on failure.
748 *
749 * Return:
750 * 0 on success
751 * -ENOMEM on failure to allocate memory
752 */
753static int alloc_mem(struct cxlflash_cfg *cfg)
754{
755 int rc = 0;
756 int i;
757 char *buf = NULL;
758
759 /* This allocation is about 12K, i.e. only 1 64k page
760 * and upto 4 4k pages
761 */
762 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
763 get_order(sizeof(struct afu)));
764 if (unlikely(!cfg->afu)) {
765 pr_err("%s: cannot get %d free pages\n",
766 __func__, get_order(sizeof(struct afu)));
767 rc = -ENOMEM;
768 goto out;
769 }
770 cfg->afu->parent = cfg;
771 cfg->afu->afu_map = NULL;
772
773 for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
774 if (!((u64)buf & (PAGE_SIZE - 1))) {
775 buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
776 if (unlikely(!buf)) {
777 pr_err("%s: Allocate command buffers fail!\n",
778 __func__);
779 rc = -ENOMEM;
780 free_mem(cfg);
781 goto out;
782 }
783 }
784
785 cfg->afu->cmd[i].buf = buf;
786 atomic_set(&cfg->afu->cmd[i].free, 1);
787 cfg->afu->cmd[i].slot = i;
788 }
789
790out:
791 return rc;
792}
793
794/**
795 * init_pci() - initializes the host as a PCI device
796 * @cxlflash: Internal structure associated with the host.
797 *
798 * Return:
799 * 0 on success
800 * -EIO on unable to communicate with device
801 * A return code from the PCI sub-routines
802 */
803static int init_pci(struct cxlflash_cfg *cfg)
804{
805 struct pci_dev *pdev = cfg->dev;
806 int rc = 0;
807
808 cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0);
809 rc = pci_request_regions(pdev, CXLFLASH_NAME);
810 if (rc < 0) {
811 dev_err(&pdev->dev,
812 "%s: Couldn't register memory range of registers\n",
813 __func__);
814 goto out;
815 }
816
817 rc = pci_enable_device(pdev);
818 if (rc || pci_channel_offline(pdev)) {
819 if (pci_channel_offline(pdev)) {
820 cxlflash_wait_for_pci_err_recovery(cfg);
821 rc = pci_enable_device(pdev);
822 }
823
824 if (rc) {
825 dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
826 __func__);
827 cxlflash_wait_for_pci_err_recovery(cfg);
828 goto out_release_regions;
829 }
830 }
831
832 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
833 if (rc < 0) {
834 dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n",
835 __func__);
836 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
837 }
838
839 if (rc < 0) {
840 dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n",
841 __func__);
842 goto out_disable;
843 }
844
845 pci_set_master(pdev);
846
847 if (pci_channel_offline(pdev)) {
848 cxlflash_wait_for_pci_err_recovery(cfg);
849 if (pci_channel_offline(pdev)) {
850 rc = -EIO;
851 goto out_msi_disable;
852 }
853 }
854
855 rc = pci_save_state(pdev);
856
857 if (rc != PCIBIOS_SUCCESSFUL) {
858 dev_err(&pdev->dev, "%s: Failed to save PCI config space\n",
859 __func__);
860 rc = -EIO;
861 goto cleanup_nolog;
862 }
863
864out:
865 pr_debug("%s: returning rc=%d\n", __func__, rc);
866 return rc;
867
868cleanup_nolog:
869out_msi_disable:
870 cxlflash_wait_for_pci_err_recovery(cfg);
871out_disable:
872 pci_disable_device(pdev);
873out_release_regions:
874 pci_release_regions(pdev);
875 goto out;
876
877}
878
879/**
880 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
881 * @cxlflash: Internal structure associated with the host.
882 *
883 * Return:
884 * 0 on success
885 * A return code from adding the host
886 */
887static int init_scsi(struct cxlflash_cfg *cfg)
888{
889 struct pci_dev *pdev = cfg->dev;
890 int rc = 0;
891
892 rc = scsi_add_host(cfg->host, &pdev->dev);
893 if (rc) {
894 dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
895 __func__, rc);
896 goto out;
897 }
898
899 scsi_scan_host(cfg->host);
900
901out:
902 pr_debug("%s: returning rc=%d\n", __func__, rc);
903 return rc;
904}
905
906/**
907 * set_port_online() - transitions the specified host FC port to online state
908 * @fc_regs: Top of MMIO region defined for specified port.
909 *
910 * The provided MMIO region must be mapped prior to call. Online state means
911 * that the FC link layer has synced, completed the handshaking process, and
912 * is ready for login to start.
913 */
914static void set_port_online(u64 *fc_regs)
915{
916 u64 cmdcfg;
917
918 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
919 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
920 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
921 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
922}
923
924/**
925 * set_port_offline() - transitions the specified host FC port to offline state
926 * @fc_regs: Top of MMIO region defined for specified port.
927 *
928 * The provided MMIO region must be mapped prior to call.
929 */
930static void set_port_offline(u64 *fc_regs)
931{
932 u64 cmdcfg;
933
934 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
935 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
936 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
937 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
938}
939
940/**
941 * wait_port_online() - waits for the specified host FC port come online
942 * @fc_regs: Top of MMIO region defined for specified port.
943 * @delay_us: Number of microseconds to delay between reading port status.
944 * @nretry: Number of cycles to retry reading port status.
945 *
946 * The provided MMIO region must be mapped prior to call. This will timeout
947 * when the cable is not plugged in.
948 *
949 * Return:
950 * TRUE (1) when the specified port is online
951 * FALSE (0) when the specified port fails to come online after timeout
952 * -EINVAL when @delay_us is less than 1000
953 */
954static int wait_port_online(u64 *fc_regs, u32 delay_us, u32 nretry)
955{
956 u64 status;
957
958 if (delay_us < 1000) {
959 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
960 return -EINVAL;
961 }
962
963 do {
964 msleep(delay_us / 1000);
965 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
966 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
967 nretry--);
968
969 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
970}
971
972/**
973 * wait_port_offline() - waits for the specified host FC port go offline
974 * @fc_regs: Top of MMIO region defined for specified port.
975 * @delay_us: Number of microseconds to delay between reading port status.
976 * @nretry: Number of cycles to retry reading port status.
977 *
978 * The provided MMIO region must be mapped prior to call.
979 *
980 * Return:
981 * TRUE (1) when the specified port is offline
982 * FALSE (0) when the specified port fails to go offline after timeout
983 * -EINVAL when @delay_us is less than 1000
984 */
985static int wait_port_offline(u64 *fc_regs, u32 delay_us, u32 nretry)
986{
987 u64 status;
988
989 if (delay_us < 1000) {
990 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
991 return -EINVAL;
992 }
993
994 do {
995 msleep(delay_us / 1000);
996 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
997 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
998 nretry--);
999
1000 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1001}
1002
1003/**
1004 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1005 * @afu: AFU associated with the host that owns the specified FC port.
1006 * @port: Port number being configured.
1007 * @fc_regs: Top of MMIO region defined for specified port.
1008 * @wwpn: The world-wide-port-number previously discovered for port.
1009 *
1010 * The provided MMIO region must be mapped prior to call. As part of the
1011 * sequence to configure the WWPN, the port is toggled offline and then back
1012 * online. This toggling action can cause this routine to delay up to a few
1013 * seconds. When configured to use the internal LUN feature of the AFU, a
1014 * failure to come online is overridden.
1015 *
1016 * Return:
1017 * 0 when the WWPN is successfully written and the port comes back online
1018 * -1 when the port fails to go offline or come back up online
1019 */
1020static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn)
1021{
1022 int ret = 0;
1023
1024 set_port_offline(fc_regs);
1025
1026 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1027 FC_PORT_STATUS_RETRY_CNT)) {
1028 pr_debug("%s: wait on port %d to go offline timed out\n",
1029 __func__, port);
1030 ret = -1; /* but continue on to leave the port back online */
1031 }
1032
1033 if (ret == 0)
1034 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1035
1036 set_port_online(fc_regs);
1037
1038 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1039 FC_PORT_STATUS_RETRY_CNT)) {
1040 pr_debug("%s: wait on port %d to go online timed out\n",
1041 __func__, port);
1042 ret = -1;
1043
1044 /*
1045 * Override for internal lun!!!
1046 */
1047 if (afu->internal_lun) {
1048 pr_debug("%s: Overriding port %d online timeout!!!\n",
1049 __func__, port);
1050 ret = 0;
1051 }
1052 }
1053
1054 pr_debug("%s: returning rc=%d\n", __func__, ret);
1055
1056 return ret;
1057}
1058
1059/**
1060 * afu_link_reset() - resets the specified host FC port
1061 * @afu: AFU associated with the host that owns the specified FC port.
1062 * @port: Port number being configured.
1063 * @fc_regs: Top of MMIO region defined for specified port.
1064 *
1065 * The provided MMIO region must be mapped prior to call. The sequence to
1066 * reset the port involves toggling it offline and then back online. This
1067 * action can cause this routine to delay up to a few seconds. An effort
1068 * is made to maintain link with the device by switching to host to use
1069 * the alternate port exclusively while the reset takes place.
1070 * failure to come online is overridden.
1071 */
1072static void afu_link_reset(struct afu *afu, int port, u64 *fc_regs)
1073{
1074 u64 port_sel;
1075
1076 /* first switch the AFU to the other links, if any */
1077 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
4da74db0 1078 port_sel &= ~(1ULL << port);
c21e0bbf
MO
1079 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1080 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1081
1082 set_port_offline(fc_regs);
1083 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1084 FC_PORT_STATUS_RETRY_CNT))
1085 pr_err("%s: wait on port %d to go offline timed out\n",
1086 __func__, port);
1087
1088 set_port_online(fc_regs);
1089 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1090 FC_PORT_STATUS_RETRY_CNT))
1091 pr_err("%s: wait on port %d to go online timed out\n",
1092 __func__, port);
1093
1094 /* switch back to include this port */
4da74db0 1095 port_sel |= (1ULL << port);
c21e0bbf
MO
1096 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1097 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1098
1099 pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
1100}
1101
1102/*
1103 * Asynchronous interrupt information table
1104 */
1105static const struct asyc_intr_info ainfo[] = {
1106 {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
1107 {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
1108 {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
1109 {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, 0},
1110 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
1111 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, 0},
1112 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
1113 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
1114 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1115 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1116 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
1117 {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
1118 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
1119 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, 0},
1120 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
1121 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
1122 {0x0, "", 0, 0} /* terminator */
1123};
1124
1125/**
1126 * find_ainfo() - locates and returns asynchronous interrupt information
1127 * @status: Status code set by AFU on error.
1128 *
1129 * Return: The located information or NULL when the status code is invalid.
1130 */
1131static const struct asyc_intr_info *find_ainfo(u64 status)
1132{
1133 const struct asyc_intr_info *info;
1134
1135 for (info = &ainfo[0]; info->status; info++)
1136 if (info->status == status)
1137 return info;
1138
1139 return NULL;
1140}
1141
1142/**
1143 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1144 * @afu: AFU associated with the host.
1145 */
1146static void afu_err_intr_init(struct afu *afu)
1147{
1148 int i;
1149 u64 reg;
1150
1151 /* global async interrupts: AFU clears afu_ctrl on context exit
1152 * if async interrupts were sent to that context. This prevents
1153 * the AFU form sending further async interrupts when
1154 * there is
1155 * nobody to receive them.
1156 */
1157
1158 /* mask all */
1159 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1160 /* set LISN# to send and point to master context */
1161 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1162
1163 if (afu->internal_lun)
1164 reg |= 1; /* Bit 63 indicates local lun */
1165 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1166 /* clear all */
1167 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1168 /* unmask bits that are of interest */
1169 /* note: afu can send an interrupt after this step */
1170 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1171 /* clear again in case a bit came on after previous clear but before */
1172 /* unmask */
1173 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1174
1175 /* Clear/Set internal lun bits */
1176 reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1177 reg &= SISL_FC_INTERNAL_MASK;
1178 if (afu->internal_lun)
1179 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1180 writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1181
1182 /* now clear FC errors */
1183 for (i = 0; i < NUM_FC_PORTS; i++) {
1184 writeq_be(0xFFFFFFFFU,
1185 &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1186 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1187 }
1188
1189 /* sync interrupts for master's IOARRIN write */
1190 /* note that unlike asyncs, there can be no pending sync interrupts */
1191 /* at this time (this is a fresh context and master has not written */
1192 /* IOARRIN yet), so there is nothing to clear. */
1193
1194 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1195 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1196 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1197}
1198
1199/**
1200 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1201 * @irq: Interrupt number.
1202 * @data: Private data provided at interrupt registration, the AFU.
1203 *
1204 * Return: Always return IRQ_HANDLED.
1205 */
1206static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1207{
1208 struct afu *afu = (struct afu *)data;
1209 u64 reg;
1210 u64 reg_unmasked;
1211
1212 reg = readq_be(&afu->host_map->intr_status);
1213 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1214
1215 if (reg_unmasked == 0UL) {
1216 pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
1217 __func__, (u64)afu, reg);
1218 goto cxlflash_sync_err_irq_exit;
1219 }
1220
1221 pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
1222 __func__, (u64)afu, reg);
1223
1224 writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1225
1226cxlflash_sync_err_irq_exit:
1227 pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1228 return IRQ_HANDLED;
1229}
1230
1231/**
1232 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1233 * @irq: Interrupt number.
1234 * @data: Private data provided at interrupt registration, the AFU.
1235 *
1236 * Return: Always return IRQ_HANDLED.
1237 */
1238static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1239{
1240 struct afu *afu = (struct afu *)data;
1241 struct afu_cmd *cmd;
1242 bool toggle = afu->toggle;
1243 u64 entry,
1244 *hrrq_start = afu->hrrq_start,
1245 *hrrq_end = afu->hrrq_end,
1246 *hrrq_curr = afu->hrrq_curr;
1247
1248 /* Process however many RRQ entries that are ready */
1249 while (true) {
1250 entry = *hrrq_curr;
1251
1252 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1253 break;
1254
1255 cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
1256 cmd_complete(cmd);
1257
1258 /* Advance to next entry or wrap and flip the toggle bit */
1259 if (hrrq_curr < hrrq_end)
1260 hrrq_curr++;
1261 else {
1262 hrrq_curr = hrrq_start;
1263 toggle ^= SISL_RESP_HANDLE_T_BIT;
1264 }
1265 }
1266
1267 afu->hrrq_curr = hrrq_curr;
1268 afu->toggle = toggle;
1269
1270 return IRQ_HANDLED;
1271}
1272
1273/**
1274 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1275 * @irq: Interrupt number.
1276 * @data: Private data provided at interrupt registration, the AFU.
1277 *
1278 * Return: Always return IRQ_HANDLED.
1279 */
1280static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1281{
1282 struct afu *afu = (struct afu *)data;
1283 struct cxlflash_cfg *cfg;
1284 u64 reg_unmasked;
1285 const struct asyc_intr_info *info;
1286 struct sisl_global_map *global = &afu->afu_map->global;
1287 u64 reg;
1288 u8 port;
1289 int i;
1290
1291 cfg = afu->parent;
1292
1293 reg = readq_be(&global->regs.aintr_status);
1294 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1295
1296 if (reg_unmasked == 0) {
1297 pr_err("%s: spurious interrupt, aintr_status 0x%016llX\n",
1298 __func__, reg);
1299 goto out;
1300 }
1301
1302 /* it is OK to clear AFU status before FC_ERROR */
1303 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1304
1305 /* check each bit that is on */
1306 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1307 info = find_ainfo(1ULL << i);
1308 if ((reg_unmasked & 0x1) || !info)
1309 continue;
1310
1311 port = info->port;
1312
1313 pr_err("%s: FC Port %d -> %s, fc_status 0x%08llX\n",
1314 __func__, port, info->desc,
1315 readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1316
1317 /*
1318 * do link reset first, some OTHER errors will set FC_ERROR
1319 * again if cleared before or w/o a reset
1320 */
1321 if (info->action & LINK_RESET) {
1322 pr_err("%s: FC Port %d: resetting link\n",
1323 __func__, port);
1324 cfg->lr_state = LINK_RESET_REQUIRED;
1325 cfg->lr_port = port;
1326 schedule_work(&cfg->work_q);
1327 }
1328
1329 if (info->action & CLR_FC_ERROR) {
1330 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1331
1332 /*
1333 * since all errors are unmasked, FC_ERROR and FC_ERRCAP
1334 * should be the same and tracing one is sufficient.
1335 */
1336
1337 pr_err("%s: fc %d: clearing fc_error 0x%08llX\n",
1338 __func__, port, reg);
1339
1340 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1341 writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1342 }
1343 }
1344
1345out:
1346 pr_debug("%s: returning rc=%d, afu=%p\n", __func__, IRQ_HANDLED, afu);
1347 return IRQ_HANDLED;
1348}
1349
1350/**
1351 * start_context() - starts the master context
1352 * @cxlflash: Internal structure associated with the host.
1353 *
1354 * Return: A success or failure value from CXL services.
1355 */
1356static int start_context(struct cxlflash_cfg *cfg)
1357{
1358 int rc = 0;
1359
1360 rc = cxl_start_context(cfg->mcctx,
1361 cfg->afu->work.work_element_descriptor,
1362 NULL);
1363
1364 pr_debug("%s: returning rc=%d\n", __func__, rc);
1365 return rc;
1366}
1367
1368/**
1369 * read_vpd() - obtains the WWPNs from VPD
1370 * @cxlflash: Internal structure associated with the host.
1371 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
1372 *
1373 * Return:
1374 * 0 on success
1375 * -ENODEV when VPD or WWPN keywords not found
1376 */
1377static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1378{
1379 struct pci_dev *dev = cfg->parent_dev;
1380 int rc = 0;
1381 int ro_start, ro_size, i, j, k;
1382 ssize_t vpd_size;
1383 char vpd_data[CXLFLASH_VPD_LEN];
1384 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1385 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1386
1387 /* Get the VPD data from the device */
1388 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
1389 if (unlikely(vpd_size <= 0)) {
1390 pr_err("%s: Unable to read VPD (size = %ld)\n",
1391 __func__, vpd_size);
1392 rc = -ENODEV;
1393 goto out;
1394 }
1395
1396 /* Get the read only section offset */
1397 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1398 PCI_VPD_LRDT_RO_DATA);
1399 if (unlikely(ro_start < 0)) {
1400 pr_err("%s: VPD Read-only data not found\n", __func__);
1401 rc = -ENODEV;
1402 goto out;
1403 }
1404
1405 /* Get the read only section size, cap when extends beyond read VPD */
1406 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1407 j = ro_size;
1408 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1409 if (unlikely((i + j) > vpd_size)) {
1410 pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
1411 __func__, (i + j), vpd_size);
1412 ro_size = vpd_size - i;
1413 }
1414
1415 /*
1416 * Find the offset of the WWPN tag within the read only
1417 * VPD data and validate the found field (partials are
1418 * no good to us). Convert the ASCII data to an integer
1419 * value. Note that we must copy to a temporary buffer
1420 * because the conversion service requires that the ASCII
1421 * string be terminated.
1422 */
1423 for (k = 0; k < NUM_FC_PORTS; k++) {
1424 j = ro_size;
1425 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1426
1427 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1428 if (unlikely(i < 0)) {
1429 pr_err("%s: Port %d WWPN not found in VPD\n",
1430 __func__, k);
1431 rc = -ENODEV;
1432 goto out;
1433 }
1434
1435 j = pci_vpd_info_field_size(&vpd_data[i]);
1436 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1437 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1438 pr_err("%s: Port %d WWPN incomplete or VPD corrupt\n",
1439 __func__, k);
1440 rc = -ENODEV;
1441 goto out;
1442 }
1443
1444 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1445 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1446 if (unlikely(rc)) {
1447 pr_err("%s: Fail to convert port %d WWPN to integer\n",
1448 __func__, k);
1449 rc = -ENODEV;
1450 goto out;
1451 }
1452 }
1453
1454out:
1455 pr_debug("%s: returning rc=%d\n", __func__, rc);
1456 return rc;
1457}
1458
1459/**
15305514
MO
1460 * init_pcr() - initialize the provisioning and control registers
1461 * @cxlflash: Internal structure associated with the host.
c21e0bbf 1462 *
15305514
MO
1463 * Also sets up fast access to the mapped registers and initializes AFU
1464 * command fields that never change.
c21e0bbf 1465 */
15305514 1466static void init_pcr(struct cxlflash_cfg *cfg)
c21e0bbf
MO
1467{
1468 struct afu *afu = cfg->afu;
1469 struct sisl_ctrl_map *ctrl_map;
1470 int i;
1471
1472 for (i = 0; i < MAX_CONTEXT; i++) {
1473 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1474 /* disrupt any clients that could be running */
1475 /* e. g. clients that survived a master restart */
1476 writeq_be(0, &ctrl_map->rht_start);
1477 writeq_be(0, &ctrl_map->rht_cnt_id);
1478 writeq_be(0, &ctrl_map->ctx_cap);
1479 }
1480
1481 /* copy frequently used fields into afu */
1482 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1483 /* ctx_hndl is 16 bits in CAIA */
1484 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1485 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1486
1487 /* Program the Endian Control for the master context */
1488 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1489
1490 /* initialize cmd fields that never change */
1491 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1492 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
1493 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
1494 afu->cmd[i].rcb.rrq = 0x0;
1495 }
1496}
1497
1498/**
1499 * init_global() - initialize AFU global registers
1500 * @cxlflash: Internal structure associated with the host.
1501 */
15305514 1502static int init_global(struct cxlflash_cfg *cfg)
c21e0bbf
MO
1503{
1504 struct afu *afu = cfg->afu;
1505 u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
1506 int i = 0, num_ports = 0;
1507 int rc = 0;
1508 u64 reg;
1509
1510 rc = read_vpd(cfg, &wwpn[0]);
1511 if (rc) {
1512 pr_err("%s: could not read vpd rc=%d\n", __func__, rc);
1513 goto out;
1514 }
1515
1516 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1517
1518 /* set up RRQ in AFU for master issued cmds */
1519 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1520 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1521
1522 /* AFU configuration */
1523 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1524 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1525 /* enable all auto retry options and control endianness */
1526 /* leave others at default: */
1527 /* CTX_CAP write protected, mbox_r does not clear on read and */
1528 /* checker on if dual afu */
1529 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1530
1531 /* global port select: select either port */
1532 if (afu->internal_lun) {
1533 /* only use port 0 */
1534 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1535 num_ports = NUM_FC_PORTS - 1;
1536 } else {
1537 writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1538 num_ports = NUM_FC_PORTS;
1539 }
1540
1541 for (i = 0; i < num_ports; i++) {
1542 /* unmask all errors (but they are still masked at AFU) */
1543 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1544 /* clear CRC error cnt & set a threshold */
1545 (void)readq_be(&afu->afu_map->global.
1546 fc_regs[i][FC_CNT_CRCERR / 8]);
1547 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1548 [FC_CRC_THRESH / 8]);
1549
1550 /* set WWPNs. If already programmed, wwpn[i] is 0 */
1551 if (wwpn[i] != 0 &&
1552 afu_set_wwpn(afu, i,
1553 &afu->afu_map->global.fc_regs[i][0],
1554 wwpn[i])) {
1555 pr_err("%s: failed to set WWPN on port %d\n",
1556 __func__, i);
1557 rc = -EIO;
1558 goto out;
1559 }
1560 /* Programming WWPN back to back causes additional
1561 * offline/online transitions and a PLOGI
1562 */
1563 msleep(100);
1564
1565 }
1566
1567 /* set up master's own CTX_CAP to allow real mode, host translation */
1568 /* tbls, afu cmds and read/write GSCSI cmds. */
1569 /* First, unlock ctx_cap write by reading mbox */
1570 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
1571 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1572 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1573 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1574 &afu->ctrl_map->ctx_cap);
1575 /* init heartbeat */
1576 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1577
1578out:
1579 return rc;
1580}
1581
1582/**
1583 * start_afu() - initializes and starts the AFU
1584 * @cxlflash: Internal structure associated with the host.
1585 */
1586static int start_afu(struct cxlflash_cfg *cfg)
1587{
1588 struct afu *afu = cfg->afu;
1589 struct afu_cmd *cmd;
1590
1591 int i = 0;
1592 int rc = 0;
1593
1594 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1595 cmd = &afu->cmd[i];
1596
1597 init_completion(&cmd->cevent);
1598 spin_lock_init(&cmd->slock);
1599 cmd->parent = afu;
1600 }
1601
1602 init_pcr(cfg);
1603
1604 /* initialize RRQ pointers */
1605 afu->hrrq_start = &afu->rrq_entry[0];
1606 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1607 afu->hrrq_curr = afu->hrrq_start;
1608 afu->toggle = 1;
1609
1610 rc = init_global(cfg);
1611
1612 pr_debug("%s: returning rc=%d\n", __func__, rc);
1613 return rc;
1614}
1615
1616/**
1617 * init_mc() - create and register as the master context
1618 * @cxlflash: Internal structure associated with the host.
1619 *
1620 * Return:
1621 * 0 on success
1622 * -ENOMEM when unable to obtain a context from CXL services
1623 * A failure value from CXL services.
1624 */
1625static int init_mc(struct cxlflash_cfg *cfg)
1626{
1627 struct cxl_context *ctx;
1628 struct device *dev = &cfg->dev->dev;
1629 struct afu *afu = cfg->afu;
1630 int rc = 0;
1631 enum undo_level level;
1632
1633 ctx = cxl_get_context(cfg->dev);
1634 if (unlikely(!ctx))
1635 return -ENOMEM;
1636 cfg->mcctx = ctx;
1637
1638 /* Set it up as a master with the CXL */
1639 cxl_set_master(ctx);
1640
1641 /* During initialization reset the AFU to start from a clean slate */
1642 rc = cxl_afu_reset(cfg->mcctx);
1643 if (unlikely(rc)) {
1644 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1645 __func__, rc);
1646 level = RELEASE_CONTEXT;
1647 goto out;
1648 }
1649
1650 rc = cxl_allocate_afu_irqs(ctx, 3);
1651 if (unlikely(rc)) {
1652 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1653 __func__, rc);
1654 level = RELEASE_CONTEXT;
1655 goto out;
1656 }
1657
1658 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1659 "SISL_MSI_SYNC_ERROR");
1660 if (unlikely(rc <= 0)) {
1661 dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
1662 __func__);
1663 level = FREE_IRQ;
1664 goto out;
1665 }
1666
1667 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1668 "SISL_MSI_RRQ_UPDATED");
1669 if (unlikely(rc <= 0)) {
1670 dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
1671 __func__);
1672 level = UNMAP_ONE;
1673 goto out;
1674 }
1675
1676 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1677 "SISL_MSI_ASYNC_ERROR");
1678 if (unlikely(rc <= 0)) {
1679 dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
1680 __func__);
1681 level = UNMAP_TWO;
1682 goto out;
1683 }
1684
1685 rc = 0;
1686
1687 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1688 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1689 * element (pe) that is embedded in the context (ctx)
1690 */
1691 rc = start_context(cfg);
1692 if (unlikely(rc)) {
1693 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1694 level = UNMAP_THREE;
1695 goto out;
1696 }
1697ret:
1698 pr_debug("%s: returning rc=%d\n", __func__, rc);
1699 return rc;
1700out:
1701 term_mc(cfg, level);
1702 goto ret;
1703}
1704
1705/**
1706 * init_afu() - setup as master context and start AFU
1707 * @cxlflash: Internal structure associated with the host.
1708 *
1709 * This routine is a higher level of control for configuring the
1710 * AFU on probe and reset paths.
1711 *
1712 * Return:
1713 * 0 on success
1714 * -ENOMEM when unable to map the AFU MMIO space
1715 * A failure value from internal services.
1716 */
1717static int init_afu(struct cxlflash_cfg *cfg)
1718{
1719 u64 reg;
1720 int rc = 0;
1721 struct afu *afu = cfg->afu;
1722 struct device *dev = &cfg->dev->dev;
1723
5cdac81a
MO
1724 cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1725
c21e0bbf
MO
1726 rc = init_mc(cfg);
1727 if (rc) {
1728 dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
1729 __func__, rc);
1730 goto err1;
1731 }
1732
1733 /* Map the entire MMIO space of the AFU.
1734 */
1735 afu->afu_map = cxl_psa_map(cfg->mcctx);
1736 if (!afu->afu_map) {
1737 rc = -ENOMEM;
1738 term_mc(cfg, UNDO_START);
1739 dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
1740 goto err1;
1741 }
1742
1743 /* don't byte reverse on reading afu_version, else the string form */
1744 /* will be backwards */
1745 reg = afu->afu_map->global.regs.afu_version;
1746 memcpy(afu->version, &reg, 8);
1747 afu->interface_version =
1748 readq_be(&afu->afu_map->global.regs.interface_version);
1749 pr_debug("%s: afu version %s, interface version 0x%llX\n",
1750 __func__, afu->version, afu->interface_version);
1751
1752 rc = start_afu(cfg);
1753 if (rc) {
1754 dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
1755 __func__, rc);
1756 term_mc(cfg, UNDO_START);
1757 cxl_psa_unmap((void *)afu->afu_map);
1758 afu->afu_map = NULL;
1759 goto err1;
1760 }
1761
1762 afu_err_intr_init(cfg->afu);
1763 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
1764
2cb79266
MO
1765 /* Restore the LUN mappings */
1766 cxlflash_restore_luntable(cfg);
c21e0bbf
MO
1767err1:
1768 pr_debug("%s: returning rc=%d\n", __func__, rc);
1769 return rc;
1770}
1771
c21e0bbf
MO
1772/**
1773 * cxlflash_afu_sync() - builds and sends an AFU sync command
1774 * @afu: AFU associated with the host.
1775 * @ctx_hndl_u: Identifies context requesting sync.
1776 * @res_hndl_u: Identifies resource requesting sync.
1777 * @mode: Type of sync to issue (lightweight, heavyweight, global).
1778 *
1779 * The AFU can only take 1 sync command at a time. This routine enforces this
1780 * limitation by using a mutex to provide exlusive access to the AFU during
1781 * the sync. This design point requires calling threads to not be on interrupt
1782 * context due to the possibility of sleeping during concurrent sync operations.
1783 *
5cdac81a
MO
1784 * AFU sync operations are only necessary and allowed when the device is
1785 * operating normally. When not operating normally, sync requests can occur as
1786 * part of cleaning up resources associated with an adapter prior to removal.
1787 * In this scenario, these requests are simply ignored (safe due to the AFU
1788 * going away).
1789 *
c21e0bbf
MO
1790 * Return:
1791 * 0 on success
1792 * -1 on failure
1793 */
1794int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1795 res_hndl_t res_hndl_u, u8 mode)
1796{
5cdac81a 1797 struct cxlflash_cfg *cfg = afu->parent;
c21e0bbf
MO
1798 struct afu_cmd *cmd = NULL;
1799 int rc = 0;
1800 int retry_cnt = 0;
1801 static DEFINE_MUTEX(sync_active);
1802
5cdac81a
MO
1803 if (cfg->state != STATE_NORMAL) {
1804 pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
1805 return 0;
1806 }
1807
c21e0bbf
MO
1808 mutex_lock(&sync_active);
1809retry:
15305514 1810 cmd = cmd_checkout(afu);
c21e0bbf
MO
1811 if (unlikely(!cmd)) {
1812 retry_cnt++;
1813 udelay(1000 * retry_cnt);
1814 if (retry_cnt < MC_RETRY_CNT)
1815 goto retry;
1816 pr_err("%s: could not get a free command\n", __func__);
1817 rc = -1;
1818 goto out;
1819 }
1820
1821 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1822
1823 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
1824
1825 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1826 cmd->rcb.port_sel = 0x0; /* NA */
1827 cmd->rcb.lun_id = 0x0; /* NA */
1828 cmd->rcb.data_len = 0x0;
1829 cmd->rcb.data_ea = 0x0;
1830 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1831
1832 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
1833 cmd->rcb.cdb[1] = mode;
1834
1835 /* The cdb is aligned, no unaligned accessors required */
1836 *((u16 *)&cmd->rcb.cdb[2]) = swab16(ctx_hndl_u);
1837 *((u32 *)&cmd->rcb.cdb[4]) = swab32(res_hndl_u);
1838
15305514 1839 rc = send_cmd(afu, cmd);
c21e0bbf
MO
1840 if (unlikely(rc))
1841 goto out;
1842
15305514 1843 wait_resp(afu, cmd);
c21e0bbf
MO
1844
1845 /* set on timeout */
1846 if (unlikely((cmd->sa.ioasc != 0) ||
1847 (cmd->sa.host_use_b[0] & B_ERROR)))
1848 rc = -1;
1849out:
1850 mutex_unlock(&sync_active);
1851 if (cmd)
15305514 1852 cmd_checkin(cmd);
c21e0bbf
MO
1853 pr_debug("%s: returning rc=%d\n", __func__, rc);
1854 return rc;
1855}
1856
1857/**
15305514
MO
1858 * afu_reset() - resets the AFU
1859 * @cfg: Internal structure associated with the host.
c21e0bbf
MO
1860 *
1861 * Return:
1862 * 0 on success
1863 * A failure value from internal services.
1864 */
15305514 1865static int afu_reset(struct cxlflash_cfg *cfg)
c21e0bbf
MO
1866{
1867 int rc = 0;
1868 /* Stop the context before the reset. Since the context is
1869 * no longer available restart it after the reset is complete
1870 */
1871
1872 term_afu(cfg);
1873
1874 rc = init_afu(cfg);
1875
1876 pr_debug("%s: returning rc=%d\n", __func__, rc);
1877 return rc;
1878}
1879
15305514
MO
1880/**
1881 * cxlflash_eh_device_reset_handler() - reset a single LUN
1882 * @scp: SCSI command to send.
1883 *
1884 * Return:
1885 * SUCCESS as defined in scsi/scsi.h
1886 * FAILED as defined in scsi/scsi.h
1887 */
1888static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
1889{
1890 int rc = SUCCESS;
1891 struct Scsi_Host *host = scp->device->host;
1892 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1893 struct afu *afu = cfg->afu;
1894 int rcr = 0;
1895
1896 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1897 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1898 host->host_no, scp->device->channel,
1899 scp->device->id, scp->device->lun,
1900 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1901 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1902 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1903 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1904
1905 switch (cfg->state) {
1906 case STATE_NORMAL:
1907 rcr = send_tmf(afu, scp, TMF_LUN_RESET);
1908 if (unlikely(rcr))
1909 rc = FAILED;
1910 break;
1911 case STATE_RESET:
1912 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1913 if (cfg->state == STATE_NORMAL)
1914 break;
1915 /* fall through */
1916 default:
1917 rc = FAILED;
1918 break;
1919 }
1920
1921 pr_debug("%s: returning rc=%d\n", __func__, rc);
1922 return rc;
1923}
1924
1925/**
1926 * cxlflash_eh_host_reset_handler() - reset the host adapter
1927 * @scp: SCSI command from stack identifying host.
1928 *
1929 * Return:
1930 * SUCCESS as defined in scsi/scsi.h
1931 * FAILED as defined in scsi/scsi.h
1932 */
1933static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
1934{
1935 int rc = SUCCESS;
1936 int rcr = 0;
1937 struct Scsi_Host *host = scp->device->host;
1938 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1939
1940 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1941 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1942 host->host_no, scp->device->channel,
1943 scp->device->id, scp->device->lun,
1944 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1945 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1946 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1947 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1948
1949 switch (cfg->state) {
1950 case STATE_NORMAL:
1951 cfg->state = STATE_RESET;
1952 scsi_block_requests(cfg->host);
1953 cxlflash_mark_contexts_error(cfg);
1954 rcr = afu_reset(cfg);
1955 if (rcr) {
1956 rc = FAILED;
1957 cfg->state = STATE_FAILTERM;
1958 } else
1959 cfg->state = STATE_NORMAL;
1960 wake_up_all(&cfg->reset_waitq);
1961 scsi_unblock_requests(cfg->host);
1962 break;
1963 case STATE_RESET:
1964 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1965 if (cfg->state == STATE_NORMAL)
1966 break;
1967 /* fall through */
1968 default:
1969 rc = FAILED;
1970 break;
1971 }
1972
1973 pr_debug("%s: returning rc=%d\n", __func__, rc);
1974 return rc;
1975}
1976
1977/**
1978 * cxlflash_change_queue_depth() - change the queue depth for the device
1979 * @sdev: SCSI device destined for queue depth change.
1980 * @qdepth: Requested queue depth value to set.
1981 *
1982 * The requested queue depth is capped to the maximum supported value.
1983 *
1984 * Return: The actual queue depth set.
1985 */
1986static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
1987{
1988
1989 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
1990 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
1991
1992 scsi_change_queue_depth(sdev, qdepth);
1993 return sdev->queue_depth;
1994}
1995
1996/**
1997 * cxlflash_show_port_status() - queries and presents the current port status
e0f01a21
MO
1998 * @port: Desired port for status reporting.
1999 * @afu: AFU owning the specified port.
15305514
MO
2000 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2001 *
2002 * Return: The size of the ASCII string returned in @buf.
2003 */
e0f01a21 2004static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
15305514 2005{
15305514 2006 char *disp_status;
15305514 2007 u64 status;
e0f01a21 2008 __be64 __iomem *fc_regs;
15305514 2009
e0f01a21 2010 if (port >= NUM_FC_PORTS)
15305514
MO
2011 return 0;
2012
2013 fc_regs = &afu->afu_map->global.fc_regs[port][0];
e0f01a21
MO
2014 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
2015 status &= FC_MTIP_STATUS_MASK;
15305514
MO
2016
2017 if (status == FC_MTIP_STATUS_ONLINE)
2018 disp_status = "online";
2019 else if (status == FC_MTIP_STATUS_OFFLINE)
2020 disp_status = "offline";
2021 else
2022 disp_status = "unknown";
2023
e0f01a21
MO
2024 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2025}
2026
2027/**
2028 * port0_show() - queries and presents the current status of port 0
2029 * @dev: Generic device associated with the host owning the port.
2030 * @attr: Device attribute representing the port.
2031 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2032 *
2033 * Return: The size of the ASCII string returned in @buf.
2034 */
2035static ssize_t port0_show(struct device *dev,
2036 struct device_attribute *attr,
2037 char *buf)
2038{
2039 struct Scsi_Host *shost = class_to_shost(dev);
2040 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2041 struct afu *afu = cfg->afu;
2042
2043 return cxlflash_show_port_status(0, afu, buf);
15305514
MO
2044}
2045
2046/**
e0f01a21
MO
2047 * port1_show() - queries and presents the current status of port 1
2048 * @dev: Generic device associated with the host owning the port.
2049 * @attr: Device attribute representing the port.
2050 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2051 *
2052 * Return: The size of the ASCII string returned in @buf.
2053 */
2054static ssize_t port1_show(struct device *dev,
2055 struct device_attribute *attr,
2056 char *buf)
2057{
2058 struct Scsi_Host *shost = class_to_shost(dev);
2059 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2060 struct afu *afu = cfg->afu;
2061
2062 return cxlflash_show_port_status(1, afu, buf);
2063}
2064
2065/**
2066 * lun_mode_show() - presents the current LUN mode of the host
15305514 2067 * @dev: Generic device associated with the host.
e0f01a21 2068 * @attr: Device attribute representing the LUN mode.
15305514
MO
2069 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2070 *
2071 * Return: The size of the ASCII string returned in @buf.
2072 */
e0f01a21
MO
2073static ssize_t lun_mode_show(struct device *dev,
2074 struct device_attribute *attr, char *buf)
15305514
MO
2075{
2076 struct Scsi_Host *shost = class_to_shost(dev);
2077 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2078 struct afu *afu = cfg->afu;
2079
e0f01a21 2080 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
15305514
MO
2081}
2082
2083/**
e0f01a21 2084 * lun_mode_store() - sets the LUN mode of the host
15305514 2085 * @dev: Generic device associated with the host.
e0f01a21 2086 * @attr: Device attribute representing the LUN mode.
15305514
MO
2087 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2088 * @count: Length of data resizing in @buf.
2089 *
2090 * The CXL Flash AFU supports a dummy LUN mode where the external
2091 * links and storage are not required. Space on the FPGA is used
2092 * to create 1 or 2 small LUNs which are presented to the system
2093 * as if they were a normal storage device. This feature is useful
2094 * during development and also provides manufacturing with a way
2095 * to test the AFU without an actual device.
2096 *
2097 * 0 = external LUN[s] (default)
2098 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2099 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2100 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2101 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2102 *
2103 * Return: The size of the ASCII string returned in @buf.
2104 */
e0f01a21
MO
2105static ssize_t lun_mode_store(struct device *dev,
2106 struct device_attribute *attr,
2107 const char *buf, size_t count)
15305514
MO
2108{
2109 struct Scsi_Host *shost = class_to_shost(dev);
2110 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2111 struct afu *afu = cfg->afu;
2112 int rc;
2113 u32 lun_mode;
2114
2115 rc = kstrtouint(buf, 10, &lun_mode);
2116 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2117 afu->internal_lun = lun_mode;
2118 afu_reset(cfg);
2119 scsi_scan_host(cfg->host);
2120 }
2121
2122 return count;
2123}
2124
2125/**
e0f01a21 2126 * ioctl_version_show() - presents the current ioctl version of the host
15305514
MO
2127 * @dev: Generic device associated with the host.
2128 * @attr: Device attribute representing the ioctl version.
2129 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2130 *
2131 * Return: The size of the ASCII string returned in @buf.
2132 */
e0f01a21
MO
2133static ssize_t ioctl_version_show(struct device *dev,
2134 struct device_attribute *attr, char *buf)
15305514
MO
2135{
2136 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2137}
2138
2139/**
e0f01a21
MO
2140 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2141 * @port: Desired port for status reporting.
2142 * @afu: AFU owning the specified port.
2143 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2144 *
2145 * Return: The size of the ASCII string returned in @buf.
2146 */
2147static ssize_t cxlflash_show_port_lun_table(u32 port,
2148 struct afu *afu,
2149 char *buf)
2150{
2151 int i;
2152 ssize_t bytes = 0;
2153 __be64 __iomem *fc_port;
2154
2155 if (port >= NUM_FC_PORTS)
2156 return 0;
2157
2158 fc_port = &afu->afu_map->global.fc_port[port][0];
2159
2160 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2161 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2162 "%03d: %016llX\n", i, readq_be(&fc_port[i]));
2163 return bytes;
2164}
2165
2166/**
2167 * port0_lun_table_show() - presents the current LUN table of port 0
2168 * @dev: Generic device associated with the host owning the port.
2169 * @attr: Device attribute representing the port.
2170 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2171 *
2172 * Return: The size of the ASCII string returned in @buf.
2173 */
2174static ssize_t port0_lun_table_show(struct device *dev,
2175 struct device_attribute *attr,
2176 char *buf)
2177{
2178 struct Scsi_Host *shost = class_to_shost(dev);
2179 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2180 struct afu *afu = cfg->afu;
2181
2182 return cxlflash_show_port_lun_table(0, afu, buf);
2183}
2184
2185/**
2186 * port1_lun_table_show() - presents the current LUN table of port 1
2187 * @dev: Generic device associated with the host owning the port.
2188 * @attr: Device attribute representing the port.
2189 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2190 *
2191 * Return: The size of the ASCII string returned in @buf.
2192 */
2193static ssize_t port1_lun_table_show(struct device *dev,
2194 struct device_attribute *attr,
2195 char *buf)
2196{
2197 struct Scsi_Host *shost = class_to_shost(dev);
2198 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2199 struct afu *afu = cfg->afu;
2200
2201 return cxlflash_show_port_lun_table(1, afu, buf);
2202}
2203
2204/**
2205 * mode_show() - presents the current mode of the device
15305514
MO
2206 * @dev: Generic device associated with the device.
2207 * @attr: Device attribute representing the device mode.
2208 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2209 *
2210 * Return: The size of the ASCII string returned in @buf.
2211 */
e0f01a21
MO
2212static ssize_t mode_show(struct device *dev,
2213 struct device_attribute *attr, char *buf)
15305514
MO
2214{
2215 struct scsi_device *sdev = to_scsi_device(dev);
2216
e0f01a21
MO
2217 return scnprintf(buf, PAGE_SIZE, "%s\n",
2218 sdev->hostdata ? "superpipe" : "legacy");
15305514
MO
2219}
2220
2221/*
2222 * Host attributes
2223 */
e0f01a21
MO
2224static DEVICE_ATTR_RO(port0);
2225static DEVICE_ATTR_RO(port1);
2226static DEVICE_ATTR_RW(lun_mode);
2227static DEVICE_ATTR_RO(ioctl_version);
2228static DEVICE_ATTR_RO(port0_lun_table);
2229static DEVICE_ATTR_RO(port1_lun_table);
15305514
MO
2230
2231static struct device_attribute *cxlflash_host_attrs[] = {
2232 &dev_attr_port0,
2233 &dev_attr_port1,
2234 &dev_attr_lun_mode,
2235 &dev_attr_ioctl_version,
e0f01a21
MO
2236 &dev_attr_port0_lun_table,
2237 &dev_attr_port1_lun_table,
15305514
MO
2238 NULL
2239};
2240
2241/*
2242 * Device attributes
2243 */
e0f01a21 2244static DEVICE_ATTR_RO(mode);
15305514
MO
2245
2246static struct device_attribute *cxlflash_dev_attrs[] = {
2247 &dev_attr_mode,
2248 NULL
2249};
2250
2251/*
2252 * Host template
2253 */
2254static struct scsi_host_template driver_template = {
2255 .module = THIS_MODULE,
2256 .name = CXLFLASH_ADAPTER_NAME,
2257 .info = cxlflash_driver_info,
2258 .ioctl = cxlflash_ioctl,
2259 .proc_name = CXLFLASH_NAME,
2260 .queuecommand = cxlflash_queuecommand,
2261 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2262 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2263 .change_queue_depth = cxlflash_change_queue_depth,
2264 .cmd_per_lun = 16,
2265 .can_queue = CXLFLASH_MAX_CMDS,
2266 .this_id = -1,
2267 .sg_tablesize = SG_NONE, /* No scatter gather support. */
2268 .max_sectors = CXLFLASH_MAX_SECTORS,
2269 .use_clustering = ENABLE_CLUSTERING,
2270 .shost_attrs = cxlflash_host_attrs,
2271 .sdev_attrs = cxlflash_dev_attrs,
2272};
2273
2274/*
2275 * Device dependent values
2276 */
2277static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
2278
2279/*
2280 * PCI device binding table
2281 */
2282static struct pci_device_id cxlflash_pci_table[] = {
2283 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2284 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2285 {}
2286};
2287
2288MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2289
c21e0bbf
MO
2290/**
2291 * cxlflash_worker_thread() - work thread handler for the AFU
2292 * @work: Work structure contained within cxlflash associated with host.
2293 *
2294 * Handles the following events:
2295 * - Link reset which cannot be performed on interrupt context due to
2296 * blocking up to a few seconds
2297 * - Read AFU command room
2298 */
2299static void cxlflash_worker_thread(struct work_struct *work)
2300{
5cdac81a
MO
2301 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2302 work_q);
c21e0bbf
MO
2303 struct afu *afu = cfg->afu;
2304 int port;
2305 ulong lock_flags;
2306
5cdac81a
MO
2307 /* Avoid MMIO if the device has failed */
2308
2309 if (cfg->state != STATE_NORMAL)
2310 return;
2311
c21e0bbf
MO
2312 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2313
2314 if (cfg->lr_state == LINK_RESET_REQUIRED) {
2315 port = cfg->lr_port;
2316 if (port < 0)
2317 pr_err("%s: invalid port index %d\n", __func__, port);
2318 else {
2319 spin_unlock_irqrestore(cfg->host->host_lock,
2320 lock_flags);
2321
2322 /* The reset can block... */
2323 afu_link_reset(afu, port,
2324 &afu->afu_map->
2325 global.fc_regs[port][0]);
2326 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2327 }
2328
2329 cfg->lr_state = LINK_RESET_COMPLETE;
2330 }
2331
2332 if (afu->read_room) {
2333 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
2334 afu->read_room = false;
2335 }
2336
2337 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2338}
2339
2340/**
2341 * cxlflash_probe() - PCI entry point to add host
2342 * @pdev: PCI device associated with the host.
2343 * @dev_id: PCI device id associated with device.
2344 *
2345 * Return: 0 on success / non-zero on failure
2346 */
2347static int cxlflash_probe(struct pci_dev *pdev,
2348 const struct pci_device_id *dev_id)
2349{
2350 struct Scsi_Host *host;
2351 struct cxlflash_cfg *cfg = NULL;
2352 struct device *phys_dev;
2353 struct dev_dependent_vals *ddv;
2354 int rc = 0;
2355
2356 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2357 __func__, pdev->irq);
2358
2359 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2360 driver_template.max_sectors = ddv->max_sectors;
2361
2362 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2363 if (!host) {
2364 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2365 __func__);
2366 rc = -ENOMEM;
2367 goto out;
2368 }
2369
2370 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2371 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2372 host->max_channel = NUM_FC_PORTS - 1;
2373 host->unique_id = host->host_no;
2374 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2375
2376 cfg = (struct cxlflash_cfg *)host->hostdata;
2377 cfg->host = host;
2378 rc = alloc_mem(cfg);
2379 if (rc) {
2380 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2381 __func__);
2382 rc = -ENOMEM;
2383 goto out;
2384 }
2385
2386 cfg->init_state = INIT_STATE_NONE;
2387 cfg->dev = pdev;
2cb79266
MO
2388
2389 /*
2390 * The promoted LUNs move to the top of the LUN table. The rest stay
2391 * on the bottom half. The bottom half grows from the end
2392 * (index = 255), whereas the top half grows from the beginning
2393 * (index = 0).
2394 */
2395 cfg->promote_lun_index = 0;
2396 cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2397 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2398
c21e0bbf
MO
2399 cfg->dev_id = (struct pci_device_id *)dev_id;
2400 cfg->mcctx = NULL;
c21e0bbf
MO
2401
2402 init_waitqueue_head(&cfg->tmf_waitq);
439e85c1 2403 init_waitqueue_head(&cfg->reset_waitq);
c21e0bbf
MO
2404
2405 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2406 cfg->lr_state = LINK_RESET_INVALID;
2407 cfg->lr_port = -1;
65be2c79
MO
2408 mutex_init(&cfg->ctx_tbl_list_mutex);
2409 mutex_init(&cfg->ctx_recovery_mutex);
0a27ae51 2410 init_rwsem(&cfg->ioctl_rwsem);
65be2c79
MO
2411 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2412 INIT_LIST_HEAD(&cfg->lluns);
c21e0bbf
MO
2413
2414 pci_set_drvdata(pdev, cfg);
2415
2416 /* Use the special service provided to look up the physical
2417 * PCI device, since we are called on the probe of the virtual
2418 * PCI host bus (vphb)
2419 */
2420 phys_dev = cxl_get_phys_dev(pdev);
2421 if (!dev_is_pci(phys_dev)) {
2422 pr_err("%s: not a pci dev\n", __func__);
2423 rc = -ENODEV;
2424 goto out_remove;
2425 }
2426 cfg->parent_dev = to_pci_dev(phys_dev);
2427
2428 cfg->cxl_afu = cxl_pci_to_afu(pdev);
2429
2430 rc = init_pci(cfg);
2431 if (rc) {
2432 dev_err(&pdev->dev, "%s: call to init_pci "
2433 "failed rc=%d!\n", __func__, rc);
2434 goto out_remove;
2435 }
2436 cfg->init_state = INIT_STATE_PCI;
2437
2438 rc = init_afu(cfg);
2439 if (rc) {
2440 dev_err(&pdev->dev, "%s: call to init_afu "
2441 "failed rc=%d!\n", __func__, rc);
2442 goto out_remove;
2443 }
2444 cfg->init_state = INIT_STATE_AFU;
2445
2446
2447 rc = init_scsi(cfg);
2448 if (rc) {
2449 dev_err(&pdev->dev, "%s: call to init_scsi "
2450 "failed rc=%d!\n", __func__, rc);
2451 goto out_remove;
2452 }
2453 cfg->init_state = INIT_STATE_SCSI;
2454
2455out:
2456 pr_debug("%s: returning rc=%d\n", __func__, rc);
2457 return rc;
2458
2459out_remove:
2460 cxlflash_remove(pdev);
2461 goto out;
2462}
2463
0a27ae51
MO
2464/**
2465 * drain_ioctls() - wait until all currently executing ioctls have completed
2466 * @cfg: Internal structure associated with the host.
2467 *
2468 * Obtain write access to read/write semaphore that wraps ioctl
2469 * handling to 'drain' ioctls currently executing.
2470 */
2471static void drain_ioctls(struct cxlflash_cfg *cfg)
2472{
2473 down_write(&cfg->ioctl_rwsem);
2474 up_write(&cfg->ioctl_rwsem);
2475}
2476
5cdac81a
MO
2477/**
2478 * cxlflash_pci_error_detected() - called when a PCI error is detected
2479 * @pdev: PCI device struct.
2480 * @state: PCI channel state.
2481 *
2482 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2483 */
2484static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2485 pci_channel_state_t state)
2486{
65be2c79 2487 int rc = 0;
5cdac81a
MO
2488 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2489 struct device *dev = &cfg->dev->dev;
2490
2491 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2492
2493 switch (state) {
2494 case pci_channel_io_frozen:
439e85c1 2495 cfg->state = STATE_RESET;
5cdac81a 2496 scsi_block_requests(cfg->host);
0a27ae51 2497 drain_ioctls(cfg);
65be2c79
MO
2498 rc = cxlflash_mark_contexts_error(cfg);
2499 if (unlikely(rc))
2500 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2501 __func__, rc);
5cdac81a
MO
2502 term_mc(cfg, UNDO_START);
2503 stop_afu(cfg);
5cdac81a
MO
2504 return PCI_ERS_RESULT_NEED_RESET;
2505 case pci_channel_io_perm_failure:
2506 cfg->state = STATE_FAILTERM;
439e85c1 2507 wake_up_all(&cfg->reset_waitq);
5cdac81a
MO
2508 scsi_unblock_requests(cfg->host);
2509 return PCI_ERS_RESULT_DISCONNECT;
2510 default:
2511 break;
2512 }
2513 return PCI_ERS_RESULT_NEED_RESET;
2514}
2515
2516/**
2517 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2518 * @pdev: PCI device struct.
2519 *
2520 * This routine is called by the pci error recovery code after the PCI
2521 * slot has been reset, just before we should resume normal operations.
2522 *
2523 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2524 */
2525static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2526{
2527 int rc = 0;
2528 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2529 struct device *dev = &cfg->dev->dev;
2530
2531 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2532
2533 rc = init_afu(cfg);
2534 if (unlikely(rc)) {
2535 dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
2536 return PCI_ERS_RESULT_DISCONNECT;
2537 }
2538
2539 return PCI_ERS_RESULT_RECOVERED;
2540}
2541
2542/**
2543 * cxlflash_pci_resume() - called when normal operation can resume
2544 * @pdev: PCI device struct
2545 */
2546static void cxlflash_pci_resume(struct pci_dev *pdev)
2547{
2548 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2549 struct device *dev = &cfg->dev->dev;
2550
2551 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2552
2553 cfg->state = STATE_NORMAL;
439e85c1 2554 wake_up_all(&cfg->reset_waitq);
5cdac81a
MO
2555 scsi_unblock_requests(cfg->host);
2556}
2557
2558static const struct pci_error_handlers cxlflash_err_handler = {
2559 .error_detected = cxlflash_pci_error_detected,
2560 .slot_reset = cxlflash_pci_slot_reset,
2561 .resume = cxlflash_pci_resume,
2562};
2563
c21e0bbf
MO
2564/*
2565 * PCI device structure
2566 */
2567static struct pci_driver cxlflash_driver = {
2568 .name = CXLFLASH_NAME,
2569 .id_table = cxlflash_pci_table,
2570 .probe = cxlflash_probe,
2571 .remove = cxlflash_remove,
5cdac81a 2572 .err_handler = &cxlflash_err_handler,
c21e0bbf
MO
2573};
2574
2575/**
2576 * init_cxlflash() - module entry point
2577 *
2578 * Return: 0 on success / non-zero on failure
2579 */
2580static int __init init_cxlflash(void)
2581{
2582 pr_info("%s: IBM Power CXL Flash Adapter: %s\n",
2583 __func__, CXLFLASH_DRIVER_DATE);
2584
65be2c79
MO
2585 cxlflash_list_init();
2586
c21e0bbf
MO
2587 return pci_register_driver(&cxlflash_driver);
2588}
2589
2590/**
2591 * exit_cxlflash() - module exit point
2592 */
2593static void __exit exit_cxlflash(void)
2594{
65be2c79
MO
2595 cxlflash_term_global_luns();
2596 cxlflash_free_errpage();
2597
c21e0bbf
MO
2598 pci_unregister_driver(&cxlflash_driver);
2599}
2600
2601module_init(init_cxlflash);
2602module_exit(exit_cxlflash);