2 * CXL Flash Device Driver
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 * Copyright (C) 2015 IBM Corporation
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
20 #include <asm/unaligned.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
38 * process_cmd_err() - command error handler
39 * @cmd: AFU command that experienced the error.
40 * @scp: SCSI command associated with the AFU command in error.
42 * Translates error bits from AFU command to SCSI command results.
44 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
46 struct afu *afu = cmd->parent;
47 struct cxlflash_cfg *cfg = afu->parent;
48 struct device *dev = &cfg->dev->dev;
49 struct sisl_ioarcb *ioarcb;
50 struct sisl_ioasa *ioasa;
59 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
61 scsi_set_resid(scp, resid);
62 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
63 __func__, cmd, scp, resid);
66 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
67 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
69 scp->result = (DID_ERROR << 16);
72 dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
73 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
74 ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
75 ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
77 if (ioasa->rc.scsi_rc) {
78 /* We have a SCSI status */
79 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
80 memcpy(scp->sense_buffer, ioasa->sense_data,
82 scp->result = ioasa->rc.scsi_rc;
84 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
88 * We encountered an error. Set scp->result based on nature
91 if (ioasa->rc.fc_rc) {
92 /* We have an FC status */
93 switch (ioasa->rc.fc_rc) {
94 case SISL_FC_RC_LINKDOWN:
95 scp->result = (DID_REQUEUE << 16);
97 case SISL_FC_RC_RESID:
98 /* This indicates an FCP resid underrun */
99 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
100 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
101 * then we will handle this error else where.
102 * If not then we must handle it here.
103 * This is probably an AFU bug.
105 scp->result = (DID_ERROR << 16);
108 case SISL_FC_RC_RESIDERR:
109 /* Resid mismatch between adapter and device */
110 case SISL_FC_RC_TGTABORT:
111 case SISL_FC_RC_ABORTOK:
112 case SISL_FC_RC_ABORTFAIL:
113 case SISL_FC_RC_NOLOGI:
114 case SISL_FC_RC_ABORTPEND:
115 case SISL_FC_RC_WRABORTPEND:
116 case SISL_FC_RC_NOEXP:
117 case SISL_FC_RC_INUSE:
118 scp->result = (DID_ERROR << 16);
123 if (ioasa->rc.afu_rc) {
124 /* We have an AFU error */
125 switch (ioasa->rc.afu_rc) {
126 case SISL_AFU_RC_NO_CHANNELS:
127 scp->result = (DID_NO_CONNECT << 16);
129 case SISL_AFU_RC_DATA_DMA_ERR:
130 switch (ioasa->afu_extra) {
131 case SISL_AFU_DMA_ERR_PAGE_IN:
133 scp->result = (DID_IMM_RETRY << 16);
135 case SISL_AFU_DMA_ERR_INVALID_EA:
137 scp->result = (DID_ERROR << 16);
140 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
142 scp->result = (DID_ALLOC_FAILURE << 16);
145 scp->result = (DID_ERROR << 16);
151 * cmd_complete() - command completion handler
152 * @cmd: AFU command that has completed.
154 * Prepares and submits command that has either completed or timed out to
155 * the SCSI stack. Checks AFU command back into command pool for non-internal
156 * (cmd->scp populated) commands.
158 static void cmd_complete(struct afu_cmd *cmd)
160 struct scsi_cmnd *scp;
162 struct afu *afu = cmd->parent;
163 struct cxlflash_cfg *cfg = afu->parent;
164 struct device *dev = &cfg->dev->dev;
169 if (unlikely(cmd->sa.ioasc))
170 process_cmd_err(cmd, scp);
172 scp->result = (DID_OK << 16);
174 cmd_is_tmf = cmd->cmd_tmf;
176 dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
177 __func__, scp, scp->result, cmd->sa.ioasc);
183 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
184 cfg->tmf_active = false;
185 wake_up_all_locked(&cfg->tmf_waitq);
186 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
189 complete(&cmd->cevent);
193 * context_reset() - reset command owner context via specified register
194 * @cmd: AFU command that timed out.
195 * @reset_reg: MMIO register to perform reset.
197 static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg)
201 struct afu *afu = cmd->parent;
202 struct cxlflash_cfg *cfg = afu->parent;
203 struct device *dev = &cfg->dev->dev;
205 dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd);
207 writeq_be(rrin, reset_reg);
209 rrin = readq_be(reset_reg);
212 /* Double delay each time */
214 } while (nretry++ < MC_ROOM_RETRY_CNT);
216 dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n",
217 __func__, rrin, nretry);
221 * context_reset_ioarrin() - reset command owner context via IOARRIN register
222 * @cmd: AFU command that timed out.
224 static void context_reset_ioarrin(struct afu_cmd *cmd)
226 struct afu *afu = cmd->parent;
228 context_reset(cmd, &afu->host_map->ioarrin);
232 * context_reset_sq() - reset command owner context w/ SQ Context Reset register
233 * @cmd: AFU command that timed out.
235 static void context_reset_sq(struct afu_cmd *cmd)
237 struct afu *afu = cmd->parent;
239 context_reset(cmd, &afu->host_map->sq_ctx_reset);
243 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
244 * @afu: AFU associated with the host.
245 * @cmd: AFU command to send.
248 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
250 static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
252 struct cxlflash_cfg *cfg = afu->parent;
253 struct device *dev = &cfg->dev->dev;
259 * To avoid the performance penalty of MMIO, spread the update of
260 * 'room' over multiple commands.
262 spin_lock_irqsave(&afu->rrin_slock, lock_flags);
263 if (--afu->room < 0) {
264 room = readq_be(&afu->host_map->cmd_room);
266 dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
267 "0x%02X, room=0x%016llX\n",
268 __func__, cmd->rcb.cdb[0], room);
270 rc = SCSI_MLQUEUE_HOST_BUSY;
273 afu->room = room - 1;
276 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
278 spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
279 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
280 cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
285 * send_cmd_sq() - sends an AFU command via SQ ring
286 * @afu: AFU associated with the host.
287 * @cmd: AFU command to send.
290 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
292 static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
294 struct cxlflash_cfg *cfg = afu->parent;
295 struct device *dev = &cfg->dev->dev;
300 newval = atomic_dec_if_positive(&afu->hsq_credits);
302 rc = SCSI_MLQUEUE_HOST_BUSY;
306 cmd->rcb.ioasa = &cmd->sa;
308 spin_lock_irqsave(&afu->hsq_slock, lock_flags);
310 *afu->hsq_curr = cmd->rcb;
311 if (afu->hsq_curr < afu->hsq_end)
314 afu->hsq_curr = afu->hsq_start;
315 writeq_be((u64)afu->hsq_curr, &afu->host_map->sq_tail);
317 spin_unlock_irqrestore(&afu->hsq_slock, lock_flags);
319 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
320 "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
321 cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr,
322 readq_be(&afu->host_map->sq_head),
323 readq_be(&afu->host_map->sq_tail));
328 * wait_resp() - polls for a response or timeout to a sent AFU command
329 * @afu: AFU associated with the host.
330 * @cmd: AFU command that was sent.
333 * 0 on success, -1 on timeout/error
335 static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
337 struct cxlflash_cfg *cfg = afu->parent;
338 struct device *dev = &cfg->dev->dev;
340 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
342 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
344 afu->context_reset(cmd);
348 if (unlikely(cmd->sa.ioasc != 0)) {
349 dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
350 __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
358 * send_tmf() - sends a Task Management Function (TMF)
359 * @afu: AFU to checkout from.
360 * @scp: SCSI command from stack.
361 * @tmfcmd: TMF command to send.
364 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
366 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
368 struct cxlflash_cfg *cfg = shost_priv(scp->device->host);
369 struct afu_cmd *cmd = sc_to_afucz(scp);
370 struct device *dev = &cfg->dev->dev;
375 /* When Task Management Function is active do not send another */
376 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
378 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
381 cfg->tmf_active = true;
382 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
388 cmd->rcb.ctx_id = afu->ctx_hndl;
389 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
390 cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
391 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
392 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
393 SISL_REQ_FLAGS_SUP_UNDERRUN |
394 SISL_REQ_FLAGS_TMF_CMD);
395 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
397 rc = afu->send_cmd(afu, cmd);
399 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
400 cfg->tmf_active = false;
401 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
405 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
406 to = msecs_to_jiffies(5000);
407 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
412 cfg->tmf_active = false;
413 dev_err(dev, "%s: TMF timed out\n", __func__);
416 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
422 * cxlflash_driver_info() - information handler for this host driver
423 * @host: SCSI host associated with device.
425 * Return: A string describing the device.
427 static const char *cxlflash_driver_info(struct Scsi_Host *host)
429 return CXLFLASH_ADAPTER_NAME;
433 * cxlflash_queuecommand() - sends a mid-layer request
434 * @host: SCSI host associated with device.
435 * @scp: SCSI command to send.
437 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
439 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
441 struct cxlflash_cfg *cfg = shost_priv(host);
442 struct afu *afu = cfg->afu;
443 struct device *dev = &cfg->dev->dev;
444 struct afu_cmd *cmd = sc_to_afucz(scp);
445 struct scatterlist *sg = scsi_sglist(scp);
446 u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
451 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
452 "cdb=(%08x-%08x-%08x-%08x)\n",
453 __func__, scp, host->host_no, scp->device->channel,
454 scp->device->id, scp->device->lun,
455 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
456 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
457 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
458 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
461 * If a Task Management Function is active, wait for it to complete
462 * before continuing with regular commands.
464 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
465 if (cfg->tmf_active) {
466 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
467 rc = SCSI_MLQUEUE_HOST_BUSY;
470 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
472 switch (cfg->state) {
476 dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
477 rc = SCSI_MLQUEUE_HOST_BUSY;
480 dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
481 scp->result = (DID_NO_CONNECT << 16);
490 nseg = scsi_dma_map(scp);
491 if (unlikely(nseg < 0)) {
492 dev_err(dev, "%s: Fail DMA map\n", __func__);
493 rc = SCSI_MLQUEUE_HOST_BUSY;
497 cmd->rcb.data_len = sg_dma_len(sg);
498 cmd->rcb.data_ea = sg_dma_address(sg);
504 cmd->rcb.ctx_id = afu->ctx_hndl;
505 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
506 cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
507 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
509 if (scp->sc_data_direction == DMA_TO_DEVICE)
510 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
512 cmd->rcb.req_flags = req_flags;
513 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
515 rc = afu->send_cmd(afu, cmd);
523 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
524 * @cfg: Internal structure associated with the host.
526 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
528 struct pci_dev *pdev = cfg->dev;
530 if (pci_channel_offline(pdev))
531 wait_event_timeout(cfg->reset_waitq,
532 !pci_channel_offline(pdev),
533 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
537 * free_mem() - free memory associated with the AFU
538 * @cfg: Internal structure associated with the host.
540 static void free_mem(struct cxlflash_cfg *cfg)
542 struct afu *afu = cfg->afu;
545 free_pages((ulong)afu, get_order(sizeof(struct afu)));
551 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
552 * @cfg: Internal structure associated with the host.
554 * Safe to call with AFU in a partially allocated/initialized state.
556 * Cancels scheduled worker threads, waits for any active internal AFU
557 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
559 static void stop_afu(struct cxlflash_cfg *cfg)
561 struct afu *afu = cfg->afu;
563 cancel_work_sync(&cfg->work_q);
566 while (atomic_read(&afu->cmds_active))
568 if (afu_is_irqpoll_enabled(afu))
569 irq_poll_disable(&afu->irqpoll);
570 if (likely(afu->afu_map)) {
571 cxl_psa_unmap((void __iomem *)afu->afu_map);
578 * term_intr() - disables all AFU interrupts
579 * @cfg: Internal structure associated with the host.
580 * @level: Depth of allocation, where to begin waterfall tear down.
582 * Safe to call with AFU/MC in partially allocated/initialized state.
584 static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
586 struct afu *afu = cfg->afu;
587 struct device *dev = &cfg->dev->dev;
589 if (!afu || !cfg->mcctx) {
590 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
596 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
598 cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
600 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
602 cxl_free_afu_irqs(cfg->mcctx);
605 /* No action required */
611 * term_mc() - terminates the master context
612 * @cfg: Internal structure associated with the host.
613 * @level: Depth of allocation, where to begin waterfall tear down.
615 * Safe to call with AFU/MC in partially allocated/initialized state.
617 static void term_mc(struct cxlflash_cfg *cfg)
620 struct afu *afu = cfg->afu;
621 struct device *dev = &cfg->dev->dev;
623 if (!afu || !cfg->mcctx) {
624 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
628 rc = cxl_stop_context(cfg->mcctx);
634 * term_afu() - terminates the AFU
635 * @cfg: Internal structure associated with the host.
637 * Safe to call with AFU/MC in partially allocated/initialized state.
639 static void term_afu(struct cxlflash_cfg *cfg)
641 struct device *dev = &cfg->dev->dev;
644 * Tear down is carefully orchestrated to ensure
645 * no interrupts can come in when the problem state
648 * 1) Disable all AFU interrupts
649 * 2) Unmap the problem state area
650 * 3) Stop the master context
652 term_intr(cfg, UNMAP_THREE);
658 dev_dbg(dev, "%s: returning\n", __func__);
662 * notify_shutdown() - notifies device of pending shutdown
663 * @cfg: Internal structure associated with the host.
664 * @wait: Whether to wait for shutdown processing to complete.
666 * This function will notify the AFU that the adapter is being shutdown
667 * and will wait for shutdown processing to complete if wait is true.
668 * This notification should flush pending I/Os to the device and halt
669 * further I/Os until the next AFU reset is issued and device restarted.
671 static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
673 struct afu *afu = cfg->afu;
674 struct device *dev = &cfg->dev->dev;
675 struct dev_dependent_vals *ddv;
676 __be64 __iomem *fc_port_regs;
678 int i, retry_cnt = 0;
680 ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
681 if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
684 if (!afu || !afu->afu_map) {
685 dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
690 for (i = 0; i < cfg->num_fc_ports; i++) {
691 fc_port_regs = get_fc_port_regs(cfg, i);
693 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
694 reg |= SISL_FC_SHUTDOWN_NORMAL;
695 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
701 /* Wait up to 1.5 seconds for shutdown processing to complete */
702 for (i = 0; i < cfg->num_fc_ports; i++) {
703 fc_port_regs = get_fc_port_regs(cfg, i);
707 status = readq_be(&fc_port_regs[FC_STATUS / 8]);
708 if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
710 if (++retry_cnt >= MC_RETRY_CNT) {
711 dev_dbg(dev, "%s: port %d shutdown processing "
712 "not yet completed\n", __func__, i);
715 msleep(100 * retry_cnt);
721 * cxlflash_remove() - PCI entry point to tear down host
722 * @pdev: PCI device associated with the host.
724 * Safe to use as a cleanup in partially allocated/initialized state. Note that
725 * the reset_waitq is flushed as part of the stop/termination of user contexts.
727 static void cxlflash_remove(struct pci_dev *pdev)
729 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
730 struct device *dev = &pdev->dev;
733 if (!pci_is_enabled(pdev)) {
734 dev_dbg(dev, "%s: Device is disabled\n", __func__);
738 /* If a Task Management Function is active, wait for it to complete
739 * before continuing with remove.
741 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
743 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
746 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
748 /* Notify AFU and wait for shutdown processing to complete */
749 notify_shutdown(cfg, true);
751 cfg->state = STATE_FAILTERM;
752 cxlflash_stop_term_user_contexts(cfg);
754 switch (cfg->init_state) {
755 case INIT_STATE_SCSI:
756 cxlflash_term_local_luns(cfg);
757 scsi_remove_host(cfg->host);
761 pci_disable_device(pdev);
762 case INIT_STATE_NONE:
764 scsi_host_put(cfg->host);
768 dev_dbg(dev, "%s: returning\n", __func__);
772 * alloc_mem() - allocates the AFU and its command pool
773 * @cfg: Internal structure associated with the host.
775 * A partially allocated state remains on failure.
779 * -ENOMEM on failure to allocate memory
781 static int alloc_mem(struct cxlflash_cfg *cfg)
784 struct device *dev = &cfg->dev->dev;
786 /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
787 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
788 get_order(sizeof(struct afu)));
789 if (unlikely(!cfg->afu)) {
790 dev_err(dev, "%s: cannot get %d free pages\n",
791 __func__, get_order(sizeof(struct afu)));
795 cfg->afu->parent = cfg;
796 cfg->afu->afu_map = NULL;
802 * init_pci() - initializes the host as a PCI device
803 * @cfg: Internal structure associated with the host.
805 * Return: 0 on success, -errno on failure
807 static int init_pci(struct cxlflash_cfg *cfg)
809 struct pci_dev *pdev = cfg->dev;
810 struct device *dev = &cfg->dev->dev;
813 rc = pci_enable_device(pdev);
814 if (rc || pci_channel_offline(pdev)) {
815 if (pci_channel_offline(pdev)) {
816 cxlflash_wait_for_pci_err_recovery(cfg);
817 rc = pci_enable_device(pdev);
821 dev_err(dev, "%s: Cannot enable adapter\n", __func__);
822 cxlflash_wait_for_pci_err_recovery(cfg);
828 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
833 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
834 * @cfg: Internal structure associated with the host.
836 * Return: 0 on success, -errno on failure
838 static int init_scsi(struct cxlflash_cfg *cfg)
840 struct pci_dev *pdev = cfg->dev;
841 struct device *dev = &cfg->dev->dev;
844 rc = scsi_add_host(cfg->host, &pdev->dev);
846 dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
850 scsi_scan_host(cfg->host);
853 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
858 * set_port_online() - transitions the specified host FC port to online state
859 * @fc_regs: Top of MMIO region defined for specified port.
861 * The provided MMIO region must be mapped prior to call. Online state means
862 * that the FC link layer has synced, completed the handshaking process, and
863 * is ready for login to start.
865 static void set_port_online(__be64 __iomem *fc_regs)
869 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
870 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
871 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
872 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
876 * set_port_offline() - transitions the specified host FC port to offline state
877 * @fc_regs: Top of MMIO region defined for specified port.
879 * The provided MMIO region must be mapped prior to call.
881 static void set_port_offline(__be64 __iomem *fc_regs)
885 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
886 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
887 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
888 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
892 * wait_port_online() - waits for the specified host FC port come online
893 * @fc_regs: Top of MMIO region defined for specified port.
894 * @delay_us: Number of microseconds to delay between reading port status.
895 * @nretry: Number of cycles to retry reading port status.
897 * The provided MMIO region must be mapped prior to call. This will timeout
898 * when the cable is not plugged in.
901 * TRUE (1) when the specified port is online
902 * FALSE (0) when the specified port fails to come online after timeout
904 static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
908 WARN_ON(delay_us < 1000);
911 msleep(delay_us / 1000);
912 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
913 if (status == U64_MAX)
915 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
918 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
922 * wait_port_offline() - waits for the specified host FC port go offline
923 * @fc_regs: Top of MMIO region defined for specified port.
924 * @delay_us: Number of microseconds to delay between reading port status.
925 * @nretry: Number of cycles to retry reading port status.
927 * The provided MMIO region must be mapped prior to call.
930 * TRUE (1) when the specified port is offline
931 * FALSE (0) when the specified port fails to go offline after timeout
933 static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
937 WARN_ON(delay_us < 1000);
940 msleep(delay_us / 1000);
941 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
942 if (status == U64_MAX)
944 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
947 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
951 * afu_set_wwpn() - configures the WWPN for the specified host FC port
952 * @afu: AFU associated with the host that owns the specified FC port.
953 * @port: Port number being configured.
954 * @fc_regs: Top of MMIO region defined for specified port.
955 * @wwpn: The world-wide-port-number previously discovered for port.
957 * The provided MMIO region must be mapped prior to call. As part of the
958 * sequence to configure the WWPN, the port is toggled offline and then back
959 * online. This toggling action can cause this routine to delay up to a few
960 * seconds. When configured to use the internal LUN feature of the AFU, a
961 * failure to come online is overridden.
963 static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
966 struct cxlflash_cfg *cfg = afu->parent;
967 struct device *dev = &cfg->dev->dev;
969 set_port_offline(fc_regs);
970 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
971 FC_PORT_STATUS_RETRY_CNT)) {
972 dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
976 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
978 set_port_online(fc_regs);
979 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
980 FC_PORT_STATUS_RETRY_CNT)) {
981 dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
987 * afu_link_reset() - resets the specified host FC port
988 * @afu: AFU associated with the host that owns the specified FC port.
989 * @port: Port number being configured.
990 * @fc_regs: Top of MMIO region defined for specified port.
992 * The provided MMIO region must be mapped prior to call. The sequence to
993 * reset the port involves toggling it offline and then back online. This
994 * action can cause this routine to delay up to a few seconds. An effort
995 * is made to maintain link with the device by switching to host to use
996 * the alternate port exclusively while the reset takes place.
997 * failure to come online is overridden.
999 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1001 struct cxlflash_cfg *cfg = afu->parent;
1002 struct device *dev = &cfg->dev->dev;
1005 /* first switch the AFU to the other links, if any */
1006 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1007 port_sel &= ~(1ULL << port);
1008 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1009 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1011 set_port_offline(fc_regs);
1012 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1013 FC_PORT_STATUS_RETRY_CNT))
1014 dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1017 set_port_online(fc_regs);
1018 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1019 FC_PORT_STATUS_RETRY_CNT))
1020 dev_err(dev, "%s: wait on port %d to go online timed out\n",
1023 /* switch back to include this port */
1024 port_sel |= (1ULL << port);
1025 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1026 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1028 dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1032 * Asynchronous interrupt information table
1034 * NOTE: The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1035 * as complex and complains because it is not wrapped with parentheses/braces.
1037 #define ASTATUS_FC(_a, _b, _c, _d) \
1038 { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1040 #define BUILD_SISL_ASTATUS_FC_PORT(_a) \
1041 ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET), \
1042 ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
1043 ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
1044 ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1045 ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
1046 ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
1047 ASTATUS_FC(_a, LINK_DN, "link down", 0), \
1048 ASTATUS_FC(_a, LINK_UP, "link up", 0)
1050 static const struct asyc_intr_info ainfo[] = {
1051 BUILD_SISL_ASTATUS_FC_PORT(2),
1052 BUILD_SISL_ASTATUS_FC_PORT(3),
1053 BUILD_SISL_ASTATUS_FC_PORT(0),
1054 BUILD_SISL_ASTATUS_FC_PORT(1),
1059 * find_ainfo() - locates and returns asynchronous interrupt information
1060 * @status: Status code set by AFU on error.
1062 * Return: The located information or NULL when the status code is invalid.
1064 static const struct asyc_intr_info *find_ainfo(u64 status)
1066 const struct asyc_intr_info *info;
1068 BUILD_BUG_ON(ainfo[ARRAY_SIZE(ainfo) - 1].status != 0);
1070 for (info = &ainfo[0]; info->status; info++)
1071 if (info->status == status)
1078 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1079 * @afu: AFU associated with the host.
1081 static void afu_err_intr_init(struct afu *afu)
1083 struct cxlflash_cfg *cfg = afu->parent;
1084 __be64 __iomem *fc_port_regs;
1088 /* global async interrupts: AFU clears afu_ctrl on context exit
1089 * if async interrupts were sent to that context. This prevents
1090 * the AFU form sending further async interrupts when
1092 * nobody to receive them.
1096 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1097 /* set LISN# to send and point to master context */
1098 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1100 if (afu->internal_lun)
1101 reg |= 1; /* Bit 63 indicates local lun */
1102 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1104 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1105 /* unmask bits that are of interest */
1106 /* note: afu can send an interrupt after this step */
1107 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1108 /* clear again in case a bit came on after previous clear but before */
1110 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1112 /* Clear/Set internal lun bits */
1113 fc_port_regs = get_fc_port_regs(cfg, 0);
1114 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1115 reg &= SISL_FC_INTERNAL_MASK;
1116 if (afu->internal_lun)
1117 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1118 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1120 /* now clear FC errors */
1121 for (i = 0; i < cfg->num_fc_ports; i++) {
1122 fc_port_regs = get_fc_port_regs(cfg, i);
1124 writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1125 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1128 /* sync interrupts for master's IOARRIN write */
1129 /* note that unlike asyncs, there can be no pending sync interrupts */
1130 /* at this time (this is a fresh context and master has not written */
1131 /* IOARRIN yet), so there is nothing to clear. */
1133 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1134 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1135 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1139 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1140 * @irq: Interrupt number.
1141 * @data: Private data provided at interrupt registration, the AFU.
1143 * Return: Always return IRQ_HANDLED.
1145 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1147 struct afu *afu = (struct afu *)data;
1148 struct cxlflash_cfg *cfg = afu->parent;
1149 struct device *dev = &cfg->dev->dev;
1153 reg = readq_be(&afu->host_map->intr_status);
1154 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1156 if (reg_unmasked == 0UL) {
1157 dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1159 goto cxlflash_sync_err_irq_exit;
1162 dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1165 writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1167 cxlflash_sync_err_irq_exit:
1172 * process_hrrq() - process the read-response queue
1173 * @afu: AFU associated with the host.
1174 * @doneq: Queue of commands harvested from the RRQ.
1175 * @budget: Threshold of RRQ entries to process.
1177 * This routine must be called holding the disabled RRQ spin lock.
1179 * Return: The number of entries processed.
1181 static int process_hrrq(struct afu *afu, struct list_head *doneq, int budget)
1183 struct afu_cmd *cmd;
1184 struct sisl_ioasa *ioasa;
1185 struct sisl_ioarcb *ioarcb;
1186 bool toggle = afu->toggle;
1189 *hrrq_start = afu->hrrq_start,
1190 *hrrq_end = afu->hrrq_end,
1191 *hrrq_curr = afu->hrrq_curr;
1193 /* Process ready RRQ entries up to the specified budget (if any) */
1197 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1200 entry &= ~SISL_RESP_HANDLE_T_BIT;
1202 if (afu_is_sq_cmd_mode(afu)) {
1203 ioasa = (struct sisl_ioasa *)entry;
1204 cmd = container_of(ioasa, struct afu_cmd, sa);
1206 ioarcb = (struct sisl_ioarcb *)entry;
1207 cmd = container_of(ioarcb, struct afu_cmd, rcb);
1210 list_add_tail(&cmd->queue, doneq);
1212 /* Advance to next entry or wrap and flip the toggle bit */
1213 if (hrrq_curr < hrrq_end)
1216 hrrq_curr = hrrq_start;
1217 toggle ^= SISL_RESP_HANDLE_T_BIT;
1220 atomic_inc(&afu->hsq_credits);
1223 if (budget > 0 && num_hrrq >= budget)
1227 afu->hrrq_curr = hrrq_curr;
1228 afu->toggle = toggle;
1234 * process_cmd_doneq() - process a queue of harvested RRQ commands
1235 * @doneq: Queue of completed commands.
1237 * Note that upon return the queue can no longer be trusted.
1239 static void process_cmd_doneq(struct list_head *doneq)
1241 struct afu_cmd *cmd, *tmp;
1243 WARN_ON(list_empty(doneq));
1245 list_for_each_entry_safe(cmd, tmp, doneq, queue)
1250 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1251 * @irqpoll: IRQ poll structure associated with queue to poll.
1252 * @budget: Threshold of RRQ entries to process per poll.
1254 * Return: The number of entries processed.
1256 static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1258 struct afu *afu = container_of(irqpoll, struct afu, irqpoll);
1259 unsigned long hrrq_flags;
1261 int num_entries = 0;
1263 spin_lock_irqsave(&afu->hrrq_slock, hrrq_flags);
1265 num_entries = process_hrrq(afu, &doneq, budget);
1266 if (num_entries < budget)
1267 irq_poll_complete(irqpoll);
1269 spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags);
1271 process_cmd_doneq(&doneq);
1276 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1277 * @irq: Interrupt number.
1278 * @data: Private data provided at interrupt registration, the AFU.
1280 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1282 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1284 struct afu *afu = (struct afu *)data;
1285 unsigned long hrrq_flags;
1287 int num_entries = 0;
1289 spin_lock_irqsave(&afu->hrrq_slock, hrrq_flags);
1291 if (afu_is_irqpoll_enabled(afu)) {
1292 irq_poll_sched(&afu->irqpoll);
1293 spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags);
1297 num_entries = process_hrrq(afu, &doneq, -1);
1298 spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags);
1300 if (num_entries == 0)
1303 process_cmd_doneq(&doneq);
1308 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1309 * @irq: Interrupt number.
1310 * @data: Private data provided at interrupt registration, the AFU.
1312 * Return: Always return IRQ_HANDLED.
1314 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1316 struct afu *afu = (struct afu *)data;
1317 struct cxlflash_cfg *cfg = afu->parent;
1318 struct device *dev = &cfg->dev->dev;
1320 const struct asyc_intr_info *info;
1321 struct sisl_global_map __iomem *global = &afu->afu_map->global;
1322 __be64 __iomem *fc_port_regs;
1327 reg = readq_be(&global->regs.aintr_status);
1328 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1330 if (reg_unmasked == 0) {
1331 dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1336 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1337 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1339 /* Check each bit that is on */
1340 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1341 info = find_ainfo(1ULL << i);
1342 if (((reg_unmasked & 0x1) == 0) || !info)
1346 fc_port_regs = get_fc_port_regs(cfg, port);
1348 dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1349 __func__, port, info->desc,
1350 readq_be(&fc_port_regs[FC_STATUS / 8]));
1353 * Do link reset first, some OTHER errors will set FC_ERROR
1354 * again if cleared before or w/o a reset
1356 if (info->action & LINK_RESET) {
1357 dev_err(dev, "%s: FC Port %d: resetting link\n",
1359 cfg->lr_state = LINK_RESET_REQUIRED;
1360 cfg->lr_port = port;
1361 schedule_work(&cfg->work_q);
1364 if (info->action & CLR_FC_ERROR) {
1365 reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1368 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1369 * should be the same and tracing one is sufficient.
1372 dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1373 __func__, port, reg);
1375 writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1376 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1379 if (info->action & SCAN_HOST) {
1380 atomic_inc(&cfg->scan_host_needed);
1381 schedule_work(&cfg->work_q);
1390 * start_context() - starts the master context
1391 * @cfg: Internal structure associated with the host.
1393 * Return: A success or failure value from CXL services.
1395 static int start_context(struct cxlflash_cfg *cfg)
1397 struct device *dev = &cfg->dev->dev;
1400 rc = cxl_start_context(cfg->mcctx,
1401 cfg->afu->work.work_element_descriptor,
1404 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1409 * read_vpd() - obtains the WWPNs from VPD
1410 * @cfg: Internal structure associated with the host.
1411 * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
1413 * Return: 0 on success, -errno on failure
1415 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1417 struct device *dev = &cfg->dev->dev;
1418 struct pci_dev *pdev = cfg->dev;
1420 int ro_start, ro_size, i, j, k;
1422 char vpd_data[CXLFLASH_VPD_LEN];
1423 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1424 char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1426 /* Get the VPD data from the device */
1427 vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1428 if (unlikely(vpd_size <= 0)) {
1429 dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1430 __func__, vpd_size);
1435 /* Get the read only section offset */
1436 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1437 PCI_VPD_LRDT_RO_DATA);
1438 if (unlikely(ro_start < 0)) {
1439 dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1444 /* Get the read only section size, cap when extends beyond read VPD */
1445 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1447 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1448 if (unlikely((i + j) > vpd_size)) {
1449 dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1450 __func__, (i + j), vpd_size);
1451 ro_size = vpd_size - i;
1455 * Find the offset of the WWPN tag within the read only
1456 * VPD data and validate the found field (partials are
1457 * no good to us). Convert the ASCII data to an integer
1458 * value. Note that we must copy to a temporary buffer
1459 * because the conversion service requires that the ASCII
1460 * string be terminated.
1462 for (k = 0; k < cfg->num_fc_ports; k++) {
1464 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1466 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1467 if (unlikely(i < 0)) {
1468 dev_err(dev, "%s: Port %d WWPN not found in VPD\n",
1474 j = pci_vpd_info_field_size(&vpd_data[i]);
1475 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1476 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1477 dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1483 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1484 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1486 dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1492 dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1496 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1501 * init_pcr() - initialize the provisioning and control registers
1502 * @cfg: Internal structure associated with the host.
1504 * Also sets up fast access to the mapped registers and initializes AFU
1505 * command fields that never change.
1507 static void init_pcr(struct cxlflash_cfg *cfg)
1509 struct afu *afu = cfg->afu;
1510 struct sisl_ctrl_map __iomem *ctrl_map;
1513 for (i = 0; i < MAX_CONTEXT; i++) {
1514 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1515 /* Disrupt any clients that could be running */
1516 /* e.g. clients that survived a master restart */
1517 writeq_be(0, &ctrl_map->rht_start);
1518 writeq_be(0, &ctrl_map->rht_cnt_id);
1519 writeq_be(0, &ctrl_map->ctx_cap);
1522 /* Copy frequently used fields into afu */
1523 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1524 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1525 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1527 /* Program the Endian Control for the master context */
1528 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1532 * init_global() - initialize AFU global registers
1533 * @cfg: Internal structure associated with the host.
1535 static int init_global(struct cxlflash_cfg *cfg)
1537 struct afu *afu = cfg->afu;
1538 struct device *dev = &cfg->dev->dev;
1539 __be64 __iomem *fc_port_regs;
1540 u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
1541 int i = 0, num_ports = 0;
1545 rc = read_vpd(cfg, &wwpn[0]);
1547 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1551 /* Set up RRQ and SQ in AFU for master issued cmds */
1552 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1553 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1555 if (afu_is_sq_cmd_mode(afu)) {
1556 writeq_be((u64)afu->hsq_start, &afu->host_map->sq_start);
1557 writeq_be((u64)afu->hsq_end, &afu->host_map->sq_end);
1560 /* AFU configuration */
1561 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1562 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1563 /* enable all auto retry options and control endianness */
1564 /* leave others at default: */
1565 /* CTX_CAP write protected, mbox_r does not clear on read and */
1566 /* checker on if dual afu */
1567 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1569 /* Global port select: select either port */
1570 if (afu->internal_lun) {
1571 /* Only use port 0 */
1572 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1575 writeq_be(PORT_MASK(cfg->num_fc_ports),
1576 &afu->afu_map->global.regs.afu_port_sel);
1577 num_ports = cfg->num_fc_ports;
1580 for (i = 0; i < num_ports; i++) {
1581 fc_port_regs = get_fc_port_regs(cfg, i);
1583 /* Unmask all errors (but they are still masked at AFU) */
1584 writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1585 /* Clear CRC error cnt & set a threshold */
1586 (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1587 writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1589 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1591 afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1592 /* Programming WWPN back to back causes additional
1593 * offline/online transitions and a PLOGI
1598 /* Set up master's own CTX_CAP to allow real mode, host translation */
1599 /* tables, afu cmds and read/write GSCSI cmds. */
1600 /* First, unlock ctx_cap write by reading mbox */
1601 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
1602 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1603 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1604 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1605 &afu->ctrl_map->ctx_cap);
1606 /* Initialize heartbeat */
1607 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1613 * start_afu() - initializes and starts the AFU
1614 * @cfg: Internal structure associated with the host.
1616 static int start_afu(struct cxlflash_cfg *cfg)
1618 struct afu *afu = cfg->afu;
1619 struct device *dev = &cfg->dev->dev;
1624 /* Initialize RRQ */
1625 memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
1626 afu->hrrq_start = &afu->rrq_entry[0];
1627 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1628 afu->hrrq_curr = afu->hrrq_start;
1630 spin_lock_init(&afu->hrrq_slock);
1633 if (afu_is_sq_cmd_mode(afu)) {
1634 memset(&afu->sq, 0, sizeof(afu->sq));
1635 afu->hsq_start = &afu->sq[0];
1636 afu->hsq_end = &afu->sq[NUM_SQ_ENTRY - 1];
1637 afu->hsq_curr = afu->hsq_start;
1639 spin_lock_init(&afu->hsq_slock);
1640 atomic_set(&afu->hsq_credits, NUM_SQ_ENTRY - 1);
1643 /* Initialize IRQ poll */
1644 if (afu_is_irqpoll_enabled(afu))
1645 irq_poll_init(&afu->irqpoll, afu->irqpoll_weight,
1648 rc = init_global(cfg);
1650 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1655 * init_intr() - setup interrupt handlers for the master context
1656 * @cfg: Internal structure associated with the host.
1658 * Return: 0 on success, -errno on failure
1660 static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1661 struct cxl_context *ctx)
1663 struct afu *afu = cfg->afu;
1664 struct device *dev = &cfg->dev->dev;
1666 enum undo_level level = UNDO_NOOP;
1668 rc = cxl_allocate_afu_irqs(ctx, 3);
1670 dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1676 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1677 "SISL_MSI_SYNC_ERROR");
1678 if (unlikely(rc <= 0)) {
1679 dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1684 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1685 "SISL_MSI_RRQ_UPDATED");
1686 if (unlikely(rc <= 0)) {
1687 dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1692 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1693 "SISL_MSI_ASYNC_ERROR");
1694 if (unlikely(rc <= 0)) {
1695 dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1704 * init_mc() - create and register as the master context
1705 * @cfg: Internal structure associated with the host.
1707 * Return: 0 on success, -errno on failure
1709 static int init_mc(struct cxlflash_cfg *cfg)
1711 struct cxl_context *ctx;
1712 struct device *dev = &cfg->dev->dev;
1714 enum undo_level level;
1716 ctx = cxl_get_context(cfg->dev);
1717 if (unlikely(!ctx)) {
1723 /* Set it up as a master with the CXL */
1724 cxl_set_master(ctx);
1726 /* During initialization reset the AFU to start from a clean slate */
1727 rc = cxl_afu_reset(cfg->mcctx);
1729 dev_err(dev, "%s: AFU reset failed rc=%d\n", __func__, rc);
1733 level = init_intr(cfg, ctx);
1734 if (unlikely(level)) {
1735 dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
1739 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1740 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1741 * element (pe) that is embedded in the context (ctx)
1743 rc = start_context(cfg);
1745 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1746 level = UNMAP_THREE;
1750 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1753 term_intr(cfg, level);
1758 * get_num_afu_ports() - determines and configures the number of AFU ports
1759 * @cfg: Internal structure associated with the host.
1761 * This routine determines the number of AFU ports by converting the global
1762 * port selection mask. The converted value is only valid following an AFU
1763 * reset (explicit or power-on). This routine must be invoked shortly after
1764 * mapping as other routines are dependent on the number of ports during the
1765 * initialization sequence.
1767 * To support legacy AFUs that might not have reflected an initial global
1768 * port mask (value read is 0), default to the number of ports originally
1769 * supported by the cxlflash driver (2) before hardware with other port
1770 * offerings was introduced.
1772 static void get_num_afu_ports(struct cxlflash_cfg *cfg)
1774 struct afu *afu = cfg->afu;
1775 struct device *dev = &cfg->dev->dev;
1777 int num_fc_ports = LEGACY_FC_PORTS;
1779 port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1780 if (port_mask != 0ULL)
1781 num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
1783 dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
1784 __func__, port_mask, num_fc_ports);
1786 cfg->num_fc_ports = num_fc_ports;
1787 cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
1791 * init_afu() - setup as master context and start AFU
1792 * @cfg: Internal structure associated with the host.
1794 * This routine is a higher level of control for configuring the
1795 * AFU on probe and reset paths.
1797 * Return: 0 on success, -errno on failure
1799 static int init_afu(struct cxlflash_cfg *cfg)
1803 struct afu *afu = cfg->afu;
1804 struct device *dev = &cfg->dev->dev;
1806 cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1810 dev_err(dev, "%s: init_mc failed rc=%d\n",
1815 /* Map the entire MMIO space of the AFU */
1816 afu->afu_map = cxl_psa_map(cfg->mcctx);
1817 if (!afu->afu_map) {
1818 dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
1823 /* No byte reverse on reading afu_version or string will be backwards */
1824 reg = readq(&afu->afu_map->global.regs.afu_version);
1825 memcpy(afu->version, ®, sizeof(reg));
1826 afu->interface_version =
1827 readq_be(&afu->afu_map->global.regs.interface_version);
1828 if ((afu->interface_version + 1) == 0) {
1829 dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
1830 "interface version %016llx\n", afu->version,
1831 afu->interface_version);
1836 if (afu_is_sq_cmd_mode(afu)) {
1837 afu->send_cmd = send_cmd_sq;
1838 afu->context_reset = context_reset_sq;
1840 afu->send_cmd = send_cmd_ioarrin;
1841 afu->context_reset = context_reset_ioarrin;
1844 dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
1845 afu->version, afu->interface_version);
1847 get_num_afu_ports(cfg);
1849 rc = start_afu(cfg);
1851 dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
1855 afu_err_intr_init(cfg->afu);
1856 spin_lock_init(&afu->rrin_slock);
1857 afu->room = readq_be(&afu->host_map->cmd_room);
1859 /* Restore the LUN mappings */
1860 cxlflash_restore_luntable(cfg);
1862 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1866 term_intr(cfg, UNMAP_THREE);
1872 * cxlflash_afu_sync() - builds and sends an AFU sync command
1873 * @afu: AFU associated with the host.
1874 * @ctx_hndl_u: Identifies context requesting sync.
1875 * @res_hndl_u: Identifies resource requesting sync.
1876 * @mode: Type of sync to issue (lightweight, heavyweight, global).
1878 * The AFU can only take 1 sync command at a time. This routine enforces this
1879 * limitation by using a mutex to provide exclusive access to the AFU during
1880 * the sync. This design point requires calling threads to not be on interrupt
1881 * context due to the possibility of sleeping during concurrent sync operations.
1883 * AFU sync operations are only necessary and allowed when the device is
1884 * operating normally. When not operating normally, sync requests can occur as
1885 * part of cleaning up resources associated with an adapter prior to removal.
1886 * In this scenario, these requests are simply ignored (safe due to the AFU
1893 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1894 res_hndl_t res_hndl_u, u8 mode)
1896 struct cxlflash_cfg *cfg = afu->parent;
1897 struct device *dev = &cfg->dev->dev;
1898 struct afu_cmd *cmd = NULL;
1901 static DEFINE_MUTEX(sync_active);
1903 if (cfg->state != STATE_NORMAL) {
1904 dev_dbg(dev, "%s: Sync not required state=%u\n",
1905 __func__, cfg->state);
1909 mutex_lock(&sync_active);
1910 atomic_inc(&afu->cmds_active);
1911 buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
1912 if (unlikely(!buf)) {
1913 dev_err(dev, "%s: no memory for command\n", __func__);
1918 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
1919 init_completion(&cmd->cevent);
1922 dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1924 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1925 cmd->rcb.ctx_id = afu->ctx_hndl;
1926 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
1927 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1929 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
1930 cmd->rcb.cdb[1] = mode;
1932 /* The cdb is aligned, no unaligned accessors required */
1933 *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
1934 *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
1936 rc = afu->send_cmd(afu, cmd);
1940 rc = wait_resp(afu, cmd);
1944 atomic_dec(&afu->cmds_active);
1945 mutex_unlock(&sync_active);
1947 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1952 * afu_reset() - resets the AFU
1953 * @cfg: Internal structure associated with the host.
1955 * Return: 0 on success, -errno on failure
1957 static int afu_reset(struct cxlflash_cfg *cfg)
1959 struct device *dev = &cfg->dev->dev;
1962 /* Stop the context before the reset. Since the context is
1963 * no longer available restart it after the reset is complete
1969 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1974 * drain_ioctls() - wait until all currently executing ioctls have completed
1975 * @cfg: Internal structure associated with the host.
1977 * Obtain write access to read/write semaphore that wraps ioctl
1978 * handling to 'drain' ioctls currently executing.
1980 static void drain_ioctls(struct cxlflash_cfg *cfg)
1982 down_write(&cfg->ioctl_rwsem);
1983 up_write(&cfg->ioctl_rwsem);
1987 * cxlflash_eh_device_reset_handler() - reset a single LUN
1988 * @scp: SCSI command to send.
1991 * SUCCESS as defined in scsi/scsi.h
1992 * FAILED as defined in scsi/scsi.h
1994 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
1997 struct Scsi_Host *host = scp->device->host;
1998 struct cxlflash_cfg *cfg = shost_priv(host);
1999 struct device *dev = &cfg->dev->dev;
2000 struct afu *afu = cfg->afu;
2003 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2004 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2005 scp->device->channel, scp->device->id, scp->device->lun,
2006 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2007 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2008 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2009 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2012 switch (cfg->state) {
2014 rcr = send_tmf(afu, scp, TMF_LUN_RESET);
2019 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2026 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2031 * cxlflash_eh_host_reset_handler() - reset the host adapter
2032 * @scp: SCSI command from stack identifying host.
2034 * Following a reset, the state is evaluated again in case an EEH occurred
2035 * during the reset. In such a scenario, the host reset will either yield
2036 * until the EEH recovery is complete or return success or failure based
2037 * upon the current device state.
2040 * SUCCESS as defined in scsi/scsi.h
2041 * FAILED as defined in scsi/scsi.h
2043 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2047 struct Scsi_Host *host = scp->device->host;
2048 struct cxlflash_cfg *cfg = shost_priv(host);
2049 struct device *dev = &cfg->dev->dev;
2051 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2052 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2053 scp->device->channel, scp->device->id, scp->device->lun,
2054 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2055 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2056 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2057 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2059 switch (cfg->state) {
2061 cfg->state = STATE_RESET;
2063 cxlflash_mark_contexts_error(cfg);
2064 rcr = afu_reset(cfg);
2067 cfg->state = STATE_FAILTERM;
2069 cfg->state = STATE_NORMAL;
2070 wake_up_all(&cfg->reset_waitq);
2074 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2075 if (cfg->state == STATE_NORMAL)
2083 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2088 * cxlflash_change_queue_depth() - change the queue depth for the device
2089 * @sdev: SCSI device destined for queue depth change.
2090 * @qdepth: Requested queue depth value to set.
2092 * The requested queue depth is capped to the maximum supported value.
2094 * Return: The actual queue depth set.
2096 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2099 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2100 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2102 scsi_change_queue_depth(sdev, qdepth);
2103 return sdev->queue_depth;
2107 * cxlflash_show_port_status() - queries and presents the current port status
2108 * @port: Desired port for status reporting.
2109 * @cfg: Internal structure associated with the host.
2110 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2112 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2114 static ssize_t cxlflash_show_port_status(u32 port,
2115 struct cxlflash_cfg *cfg,
2118 struct device *dev = &cfg->dev->dev;
2121 __be64 __iomem *fc_port_regs;
2123 WARN_ON(port >= MAX_FC_PORTS);
2125 if (port >= cfg->num_fc_ports) {
2126 dev_info(dev, "%s: Port %d not supported on this card.\n",
2131 fc_port_regs = get_fc_port_regs(cfg, port);
2132 status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2133 status &= FC_MTIP_STATUS_MASK;
2135 if (status == FC_MTIP_STATUS_ONLINE)
2136 disp_status = "online";
2137 else if (status == FC_MTIP_STATUS_OFFLINE)
2138 disp_status = "offline";
2140 disp_status = "unknown";
2142 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2146 * port0_show() - queries and presents the current status of port 0
2147 * @dev: Generic device associated with the host owning the port.
2148 * @attr: Device attribute representing the port.
2149 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2151 * Return: The size of the ASCII string returned in @buf.
2153 static ssize_t port0_show(struct device *dev,
2154 struct device_attribute *attr,
2157 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2159 return cxlflash_show_port_status(0, cfg, buf);
2163 * port1_show() - queries and presents the current status of port 1
2164 * @dev: Generic device associated with the host owning the port.
2165 * @attr: Device attribute representing the port.
2166 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2168 * Return: The size of the ASCII string returned in @buf.
2170 static ssize_t port1_show(struct device *dev,
2171 struct device_attribute *attr,
2174 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2176 return cxlflash_show_port_status(1, cfg, buf);
2180 * port2_show() - queries and presents the current status of port 2
2181 * @dev: Generic device associated with the host owning the port.
2182 * @attr: Device attribute representing the port.
2183 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2185 * Return: The size of the ASCII string returned in @buf.
2187 static ssize_t port2_show(struct device *dev,
2188 struct device_attribute *attr,
2191 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2193 return cxlflash_show_port_status(2, cfg, buf);
2197 * port3_show() - queries and presents the current status of port 3
2198 * @dev: Generic device associated with the host owning the port.
2199 * @attr: Device attribute representing the port.
2200 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2202 * Return: The size of the ASCII string returned in @buf.
2204 static ssize_t port3_show(struct device *dev,
2205 struct device_attribute *attr,
2208 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2210 return cxlflash_show_port_status(3, cfg, buf);
2214 * lun_mode_show() - presents the current LUN mode of the host
2215 * @dev: Generic device associated with the host.
2216 * @attr: Device attribute representing the LUN mode.
2217 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2219 * Return: The size of the ASCII string returned in @buf.
2221 static ssize_t lun_mode_show(struct device *dev,
2222 struct device_attribute *attr, char *buf)
2224 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2225 struct afu *afu = cfg->afu;
2227 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2231 * lun_mode_store() - sets the LUN mode of the host
2232 * @dev: Generic device associated with the host.
2233 * @attr: Device attribute representing the LUN mode.
2234 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2235 * @count: Length of data resizing in @buf.
2237 * The CXL Flash AFU supports a dummy LUN mode where the external
2238 * links and storage are not required. Space on the FPGA is used
2239 * to create 1 or 2 small LUNs which are presented to the system
2240 * as if they were a normal storage device. This feature is useful
2241 * during development and also provides manufacturing with a way
2242 * to test the AFU without an actual device.
2244 * 0 = external LUN[s] (default)
2245 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2246 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2247 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2248 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2250 * Return: The size of the ASCII string returned in @buf.
2252 static ssize_t lun_mode_store(struct device *dev,
2253 struct device_attribute *attr,
2254 const char *buf, size_t count)
2256 struct Scsi_Host *shost = class_to_shost(dev);
2257 struct cxlflash_cfg *cfg = shost_priv(shost);
2258 struct afu *afu = cfg->afu;
2262 rc = kstrtouint(buf, 10, &lun_mode);
2263 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2264 afu->internal_lun = lun_mode;
2267 * When configured for internal LUN, there is only one channel,
2268 * channel number 0, else there will be one less than the number
2269 * of fc ports for this card.
2271 if (afu->internal_lun)
2272 shost->max_channel = 0;
2274 shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2277 scsi_scan_host(cfg->host);
2284 * ioctl_version_show() - presents the current ioctl version of the host
2285 * @dev: Generic device associated with the host.
2286 * @attr: Device attribute representing the ioctl version.
2287 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2289 * Return: The size of the ASCII string returned in @buf.
2291 static ssize_t ioctl_version_show(struct device *dev,
2292 struct device_attribute *attr, char *buf)
2294 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2298 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2299 * @port: Desired port for status reporting.
2300 * @cfg: Internal structure associated with the host.
2301 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2303 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2305 static ssize_t cxlflash_show_port_lun_table(u32 port,
2306 struct cxlflash_cfg *cfg,
2309 struct device *dev = &cfg->dev->dev;
2310 __be64 __iomem *fc_port_luns;
2314 WARN_ON(port >= MAX_FC_PORTS);
2316 if (port >= cfg->num_fc_ports) {
2317 dev_info(dev, "%s: Port %d not supported on this card.\n",
2322 fc_port_luns = get_fc_port_luns(cfg, port);
2324 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2325 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2327 i, readq_be(&fc_port_luns[i]));
2332 * port0_lun_table_show() - presents the current LUN table of port 0
2333 * @dev: Generic device associated with the host owning the port.
2334 * @attr: Device attribute representing the port.
2335 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2337 * Return: The size of the ASCII string returned in @buf.
2339 static ssize_t port0_lun_table_show(struct device *dev,
2340 struct device_attribute *attr,
2343 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2345 return cxlflash_show_port_lun_table(0, cfg, buf);
2349 * port1_lun_table_show() - presents the current LUN table of port 1
2350 * @dev: Generic device associated with the host owning the port.
2351 * @attr: Device attribute representing the port.
2352 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2354 * Return: The size of the ASCII string returned in @buf.
2356 static ssize_t port1_lun_table_show(struct device *dev,
2357 struct device_attribute *attr,
2360 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2362 return cxlflash_show_port_lun_table(1, cfg, buf);
2366 * port2_lun_table_show() - presents the current LUN table of port 2
2367 * @dev: Generic device associated with the host owning the port.
2368 * @attr: Device attribute representing the port.
2369 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2371 * Return: The size of the ASCII string returned in @buf.
2373 static ssize_t port2_lun_table_show(struct device *dev,
2374 struct device_attribute *attr,
2377 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2379 return cxlflash_show_port_lun_table(2, cfg, buf);
2383 * port3_lun_table_show() - presents the current LUN table of port 3
2384 * @dev: Generic device associated with the host owning the port.
2385 * @attr: Device attribute representing the port.
2386 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2388 * Return: The size of the ASCII string returned in @buf.
2390 static ssize_t port3_lun_table_show(struct device *dev,
2391 struct device_attribute *attr,
2394 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2396 return cxlflash_show_port_lun_table(3, cfg, buf);
2400 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2401 * @dev: Generic device associated with the host.
2402 * @attr: Device attribute representing the IRQ poll weight.
2403 * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
2406 * An IRQ poll weight of 0 indicates polling is disabled.
2408 * Return: The size of the ASCII string returned in @buf.
2410 static ssize_t irqpoll_weight_show(struct device *dev,
2411 struct device_attribute *attr, char *buf)
2413 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2414 struct afu *afu = cfg->afu;
2416 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2420 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2421 * @dev: Generic device associated with the host.
2422 * @attr: Device attribute representing the IRQ poll weight.
2423 * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
2425 * @count: Length of data resizing in @buf.
2427 * An IRQ poll weight of 0 indicates polling is disabled.
2429 * Return: The size of the ASCII string returned in @buf.
2431 static ssize_t irqpoll_weight_store(struct device *dev,
2432 struct device_attribute *attr,
2433 const char *buf, size_t count)
2435 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2436 struct device *cfgdev = &cfg->dev->dev;
2437 struct afu *afu = cfg->afu;
2441 rc = kstrtouint(buf, 10, &weight);
2447 "Invalid IRQ poll weight. It must be 256 or less.\n");
2451 if (weight == afu->irqpoll_weight) {
2453 "Current IRQ poll weight has the same weight.\n");
2457 if (afu_is_irqpoll_enabled(afu))
2458 irq_poll_disable(&afu->irqpoll);
2460 afu->irqpoll_weight = weight;
2463 irq_poll_init(&afu->irqpoll, weight, cxlflash_irqpoll);
2469 * mode_show() - presents the current mode of the device
2470 * @dev: Generic device associated with the device.
2471 * @attr: Device attribute representing the device mode.
2472 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2474 * Return: The size of the ASCII string returned in @buf.
2476 static ssize_t mode_show(struct device *dev,
2477 struct device_attribute *attr, char *buf)
2479 struct scsi_device *sdev = to_scsi_device(dev);
2481 return scnprintf(buf, PAGE_SIZE, "%s\n",
2482 sdev->hostdata ? "superpipe" : "legacy");
2488 static DEVICE_ATTR_RO(port0);
2489 static DEVICE_ATTR_RO(port1);
2490 static DEVICE_ATTR_RO(port2);
2491 static DEVICE_ATTR_RO(port3);
2492 static DEVICE_ATTR_RW(lun_mode);
2493 static DEVICE_ATTR_RO(ioctl_version);
2494 static DEVICE_ATTR_RO(port0_lun_table);
2495 static DEVICE_ATTR_RO(port1_lun_table);
2496 static DEVICE_ATTR_RO(port2_lun_table);
2497 static DEVICE_ATTR_RO(port3_lun_table);
2498 static DEVICE_ATTR_RW(irqpoll_weight);
2500 static struct device_attribute *cxlflash_host_attrs[] = {
2506 &dev_attr_ioctl_version,
2507 &dev_attr_port0_lun_table,
2508 &dev_attr_port1_lun_table,
2509 &dev_attr_port2_lun_table,
2510 &dev_attr_port3_lun_table,
2511 &dev_attr_irqpoll_weight,
2518 static DEVICE_ATTR_RO(mode);
2520 static struct device_attribute *cxlflash_dev_attrs[] = {
2528 static struct scsi_host_template driver_template = {
2529 .module = THIS_MODULE,
2530 .name = CXLFLASH_ADAPTER_NAME,
2531 .info = cxlflash_driver_info,
2532 .ioctl = cxlflash_ioctl,
2533 .proc_name = CXLFLASH_NAME,
2534 .queuecommand = cxlflash_queuecommand,
2535 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2536 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2537 .change_queue_depth = cxlflash_change_queue_depth,
2538 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
2539 .can_queue = CXLFLASH_MAX_CMDS,
2540 .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
2542 .sg_tablesize = 1, /* No scatter gather support */
2543 .max_sectors = CXLFLASH_MAX_SECTORS,
2544 .use_clustering = ENABLE_CLUSTERING,
2545 .shost_attrs = cxlflash_host_attrs,
2546 .sdev_attrs = cxlflash_dev_attrs,
2550 * Device dependent values
2552 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
2554 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
2555 CXLFLASH_NOTIFY_SHUTDOWN };
2556 static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
2557 CXLFLASH_NOTIFY_SHUTDOWN };
2560 * PCI device binding table
2562 static struct pci_device_id cxlflash_pci_table[] = {
2563 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2564 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2565 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
2566 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
2567 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
2568 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
2572 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2575 * cxlflash_worker_thread() - work thread handler for the AFU
2576 * @work: Work structure contained within cxlflash associated with host.
2578 * Handles the following events:
2579 * - Link reset which cannot be performed on interrupt context due to
2580 * blocking up to a few seconds
2583 static void cxlflash_worker_thread(struct work_struct *work)
2585 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2587 struct afu *afu = cfg->afu;
2588 struct device *dev = &cfg->dev->dev;
2589 __be64 __iomem *fc_port_regs;
2593 /* Avoid MMIO if the device has failed */
2595 if (cfg->state != STATE_NORMAL)
2598 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2600 if (cfg->lr_state == LINK_RESET_REQUIRED) {
2601 port = cfg->lr_port;
2603 dev_err(dev, "%s: invalid port index %d\n",
2606 spin_unlock_irqrestore(cfg->host->host_lock,
2609 /* The reset can block... */
2610 fc_port_regs = get_fc_port_regs(cfg, port);
2611 afu_link_reset(afu, port, fc_port_regs);
2612 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2615 cfg->lr_state = LINK_RESET_COMPLETE;
2618 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2620 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2621 scsi_scan_host(cfg->host);
2625 * cxlflash_probe() - PCI entry point to add host
2626 * @pdev: PCI device associated with the host.
2627 * @dev_id: PCI device id associated with device.
2629 * The device will initially start out in a 'probing' state and
2630 * transition to the 'normal' state at the end of a successful
2631 * probe. Should an EEH event occur during probe, the notification
2632 * thread (error_detected()) will wait until the probe handler
2633 * is nearly complete. At that time, the device will be moved to
2634 * a 'probed' state and the EEH thread woken up to drive the slot
2635 * reset and recovery (device moves to 'normal' state). Meanwhile,
2636 * the probe will be allowed to exit successfully.
2638 * Return: 0 on success, -errno on failure
2640 static int cxlflash_probe(struct pci_dev *pdev,
2641 const struct pci_device_id *dev_id)
2643 struct Scsi_Host *host;
2644 struct cxlflash_cfg *cfg = NULL;
2645 struct device *dev = &pdev->dev;
2646 struct dev_dependent_vals *ddv;
2650 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2651 __func__, pdev->irq);
2653 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2654 driver_template.max_sectors = ddv->max_sectors;
2656 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2658 dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
2663 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2664 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2665 host->unique_id = host->host_no;
2666 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2668 cfg = shost_priv(host);
2670 rc = alloc_mem(cfg);
2672 dev_err(dev, "%s: alloc_mem failed\n", __func__);
2674 scsi_host_put(cfg->host);
2678 cfg->init_state = INIT_STATE_NONE;
2680 cfg->cxl_fops = cxlflash_cxl_fops;
2683 * Promoted LUNs move to the top of the LUN table. The rest stay on
2684 * the bottom half. The bottom half grows from the end (index = 255),
2685 * whereas the top half grows from the beginning (index = 0).
2687 * Initialize the last LUN index for all possible ports.
2689 cfg->promote_lun_index = 0;
2691 for (k = 0; k < MAX_FC_PORTS; k++)
2692 cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
2694 cfg->dev_id = (struct pci_device_id *)dev_id;
2696 init_waitqueue_head(&cfg->tmf_waitq);
2697 init_waitqueue_head(&cfg->reset_waitq);
2699 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2700 cfg->lr_state = LINK_RESET_INVALID;
2702 spin_lock_init(&cfg->tmf_slock);
2703 mutex_init(&cfg->ctx_tbl_list_mutex);
2704 mutex_init(&cfg->ctx_recovery_mutex);
2705 init_rwsem(&cfg->ioctl_rwsem);
2706 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2707 INIT_LIST_HEAD(&cfg->lluns);
2709 pci_set_drvdata(pdev, cfg);
2711 cfg->cxl_afu = cxl_pci_to_afu(pdev);
2715 dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
2718 cfg->init_state = INIT_STATE_PCI;
2721 if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
2722 dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
2725 cfg->init_state = INIT_STATE_AFU;
2727 rc = init_scsi(cfg);
2729 dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
2732 cfg->init_state = INIT_STATE_SCSI;
2734 if (wq_has_sleeper(&cfg->reset_waitq)) {
2735 cfg->state = STATE_PROBED;
2736 wake_up_all(&cfg->reset_waitq);
2738 cfg->state = STATE_NORMAL;
2740 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2744 cxlflash_remove(pdev);
2749 * cxlflash_pci_error_detected() - called when a PCI error is detected
2750 * @pdev: PCI device struct.
2751 * @state: PCI channel state.
2753 * When an EEH occurs during an active reset, wait until the reset is
2754 * complete and then take action based upon the device state.
2756 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2758 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2759 pci_channel_state_t state)
2762 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2763 struct device *dev = &cfg->dev->dev;
2765 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2768 case pci_channel_io_frozen:
2769 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
2770 cfg->state != STATE_PROBING);
2771 if (cfg->state == STATE_FAILTERM)
2772 return PCI_ERS_RESULT_DISCONNECT;
2774 cfg->state = STATE_RESET;
2775 scsi_block_requests(cfg->host);
2777 rc = cxlflash_mark_contexts_error(cfg);
2779 dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
2782 return PCI_ERS_RESULT_NEED_RESET;
2783 case pci_channel_io_perm_failure:
2784 cfg->state = STATE_FAILTERM;
2785 wake_up_all(&cfg->reset_waitq);
2786 scsi_unblock_requests(cfg->host);
2787 return PCI_ERS_RESULT_DISCONNECT;
2791 return PCI_ERS_RESULT_NEED_RESET;
2795 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2796 * @pdev: PCI device struct.
2798 * This routine is called by the pci error recovery code after the PCI
2799 * slot has been reset, just before we should resume normal operations.
2801 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2803 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2806 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2807 struct device *dev = &cfg->dev->dev;
2809 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2813 dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
2814 return PCI_ERS_RESULT_DISCONNECT;
2817 return PCI_ERS_RESULT_RECOVERED;
2821 * cxlflash_pci_resume() - called when normal operation can resume
2822 * @pdev: PCI device struct
2824 static void cxlflash_pci_resume(struct pci_dev *pdev)
2826 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2827 struct device *dev = &cfg->dev->dev;
2829 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2831 cfg->state = STATE_NORMAL;
2832 wake_up_all(&cfg->reset_waitq);
2833 scsi_unblock_requests(cfg->host);
2836 static const struct pci_error_handlers cxlflash_err_handler = {
2837 .error_detected = cxlflash_pci_error_detected,
2838 .slot_reset = cxlflash_pci_slot_reset,
2839 .resume = cxlflash_pci_resume,
2843 * PCI device structure
2845 static struct pci_driver cxlflash_driver = {
2846 .name = CXLFLASH_NAME,
2847 .id_table = cxlflash_pci_table,
2848 .probe = cxlflash_probe,
2849 .remove = cxlflash_remove,
2850 .shutdown = cxlflash_remove,
2851 .err_handler = &cxlflash_err_handler,
2855 * init_cxlflash() - module entry point
2857 * Return: 0 on success, -errno on failure
2859 static int __init init_cxlflash(void)
2861 cxlflash_list_init();
2863 return pci_register_driver(&cxlflash_driver);
2867 * exit_cxlflash() - module exit point
2869 static void __exit exit_cxlflash(void)
2871 cxlflash_term_global_luns();
2872 cxlflash_free_errpage();
2874 pci_unregister_driver(&cxlflash_driver);
2877 module_init(init_cxlflash);
2878 module_exit(exit_cxlflash);