Commit | Line | Data |
---|---|---|
c21e0bbf MO |
1 | /* |
2 | * CXL Flash Device Driver | |
3 | * | |
4 | * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation | |
5 | * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation | |
6 | * | |
7 | * Copyright (C) 2015 IBM Corporation | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | ||
15 | #include <linux/delay.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/pci.h> | |
19 | ||
20 | #include <asm/unaligned.h> | |
21 | ||
22 | #include <misc/cxl.h> | |
23 | ||
24 | #include <scsi/scsi_cmnd.h> | |
25 | #include <scsi/scsi_host.h> | |
65be2c79 | 26 | #include <uapi/scsi/cxlflash_ioctl.h> |
c21e0bbf MO |
27 | |
28 | #include "main.h" | |
29 | #include "sislite.h" | |
30 | #include "common.h" | |
31 | ||
32 | MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); | |
33 | MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); | |
34 | MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); | |
35 | MODULE_LICENSE("GPL"); | |
36 | ||
37 | ||
38 | /** | |
15305514 | 39 | * cmd_checkout() - checks out an AFU command |
c21e0bbf MO |
40 | * @afu: AFU to checkout from. |
41 | * | |
42 | * Commands are checked out in a round-robin fashion. Note that since | |
43 | * the command pool is larger than the hardware queue, the majority of | |
44 | * times we will only loop once or twice before getting a command. The | |
45 | * buffer and CDB within the command are initialized (zeroed) prior to | |
46 | * returning. | |
47 | * | |
48 | * Return: The checked out command or NULL when command pool is empty. | |
49 | */ | |
15305514 | 50 | static struct afu_cmd *cmd_checkout(struct afu *afu) |
c21e0bbf MO |
51 | { |
52 | int k, dec = CXLFLASH_NUM_CMDS; | |
53 | struct afu_cmd *cmd; | |
54 | ||
55 | while (dec--) { | |
56 | k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1)); | |
57 | ||
58 | cmd = &afu->cmd[k]; | |
59 | ||
60 | if (!atomic_dec_if_positive(&cmd->free)) { | |
4392ba49 MO |
61 | pr_devel("%s: returning found index=%d cmd=%p\n", |
62 | __func__, cmd->slot, cmd); | |
c21e0bbf MO |
63 | memset(cmd->buf, 0, CMD_BUFSIZE); |
64 | memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); | |
65 | return cmd; | |
66 | } | |
67 | } | |
68 | ||
69 | return NULL; | |
70 | } | |
71 | ||
72 | /** | |
15305514 | 73 | * cmd_checkin() - checks in an AFU command |
c21e0bbf MO |
74 | * @cmd: AFU command to checkin. |
75 | * | |
76 | * Safe to pass commands that have already been checked in. Several | |
77 | * internal tracking fields are reset as part of the checkin. Note | |
78 | * that these are intentionally reset prior to toggling the free bit | |
79 | * to avoid clobbering values in the event that the command is checked | |
80 | * out right away. | |
81 | */ | |
15305514 | 82 | static void cmd_checkin(struct afu_cmd *cmd) |
c21e0bbf MO |
83 | { |
84 | cmd->rcb.scp = NULL; | |
85 | cmd->rcb.timeout = 0; | |
86 | cmd->sa.ioasc = 0; | |
87 | cmd->cmd_tmf = false; | |
88 | cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */ | |
89 | ||
90 | if (unlikely(atomic_inc_return(&cmd->free) != 1)) { | |
91 | pr_err("%s: Freeing cmd (%d) that is not in use!\n", | |
92 | __func__, cmd->slot); | |
93 | return; | |
94 | } | |
95 | ||
4392ba49 | 96 | pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot); |
c21e0bbf MO |
97 | } |
98 | ||
99 | /** | |
100 | * process_cmd_err() - command error handler | |
101 | * @cmd: AFU command that experienced the error. | |
102 | * @scp: SCSI command associated with the AFU command in error. | |
103 | * | |
104 | * Translates error bits from AFU command to SCSI command results. | |
105 | */ | |
106 | static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) | |
107 | { | |
108 | struct sisl_ioarcb *ioarcb; | |
109 | struct sisl_ioasa *ioasa; | |
8396012f | 110 | u32 resid; |
c21e0bbf MO |
111 | |
112 | if (unlikely(!cmd)) | |
113 | return; | |
114 | ||
115 | ioarcb = &(cmd->rcb); | |
116 | ioasa = &(cmd->sa); | |
117 | ||
118 | if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { | |
8396012f MO |
119 | resid = ioasa->resid; |
120 | scsi_set_resid(scp, resid); | |
121 | pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n", | |
122 | __func__, cmd, scp, resid); | |
c21e0bbf MO |
123 | } |
124 | ||
125 | if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { | |
126 | pr_debug("%s: cmd underrun cmd = %p scp = %p\n", | |
127 | __func__, cmd, scp); | |
128 | scp->result = (DID_ERROR << 16); | |
129 | } | |
130 | ||
131 | pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d " | |
4392ba49 | 132 | "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n", |
c21e0bbf MO |
133 | __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc, |
134 | ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra, | |
135 | ioasa->fc_extra); | |
136 | ||
137 | if (ioasa->rc.scsi_rc) { | |
138 | /* We have a SCSI status */ | |
139 | if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { | |
140 | memcpy(scp->sense_buffer, ioasa->sense_data, | |
141 | SISL_SENSE_DATA_LEN); | |
142 | scp->result = ioasa->rc.scsi_rc; | |
143 | } else | |
144 | scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); | |
145 | } | |
146 | ||
147 | /* | |
148 | * We encountered an error. Set scp->result based on nature | |
149 | * of error. | |
150 | */ | |
151 | if (ioasa->rc.fc_rc) { | |
152 | /* We have an FC status */ | |
153 | switch (ioasa->rc.fc_rc) { | |
154 | case SISL_FC_RC_LINKDOWN: | |
155 | scp->result = (DID_REQUEUE << 16); | |
156 | break; | |
157 | case SISL_FC_RC_RESID: | |
158 | /* This indicates an FCP resid underrun */ | |
159 | if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { | |
160 | /* If the SISL_RC_FLAGS_OVERRUN flag was set, | |
161 | * then we will handle this error else where. | |
162 | * If not then we must handle it here. | |
8396012f | 163 | * This is probably an AFU bug. |
c21e0bbf MO |
164 | */ |
165 | scp->result = (DID_ERROR << 16); | |
166 | } | |
167 | break; | |
168 | case SISL_FC_RC_RESIDERR: | |
169 | /* Resid mismatch between adapter and device */ | |
170 | case SISL_FC_RC_TGTABORT: | |
171 | case SISL_FC_RC_ABORTOK: | |
172 | case SISL_FC_RC_ABORTFAIL: | |
173 | case SISL_FC_RC_NOLOGI: | |
174 | case SISL_FC_RC_ABORTPEND: | |
175 | case SISL_FC_RC_WRABORTPEND: | |
176 | case SISL_FC_RC_NOEXP: | |
177 | case SISL_FC_RC_INUSE: | |
178 | scp->result = (DID_ERROR << 16); | |
179 | break; | |
180 | } | |
181 | } | |
182 | ||
183 | if (ioasa->rc.afu_rc) { | |
184 | /* We have an AFU error */ | |
185 | switch (ioasa->rc.afu_rc) { | |
186 | case SISL_AFU_RC_NO_CHANNELS: | |
8396012f | 187 | scp->result = (DID_NO_CONNECT << 16); |
c21e0bbf MO |
188 | break; |
189 | case SISL_AFU_RC_DATA_DMA_ERR: | |
190 | switch (ioasa->afu_extra) { | |
191 | case SISL_AFU_DMA_ERR_PAGE_IN: | |
192 | /* Retry */ | |
193 | scp->result = (DID_IMM_RETRY << 16); | |
194 | break; | |
195 | case SISL_AFU_DMA_ERR_INVALID_EA: | |
196 | default: | |
197 | scp->result = (DID_ERROR << 16); | |
198 | } | |
199 | break; | |
200 | case SISL_AFU_RC_OUT_OF_DATA_BUFS: | |
201 | /* Retry */ | |
202 | scp->result = (DID_ALLOC_FAILURE << 16); | |
203 | break; | |
204 | default: | |
205 | scp->result = (DID_ERROR << 16); | |
206 | } | |
207 | } | |
208 | } | |
209 | ||
210 | /** | |
211 | * cmd_complete() - command completion handler | |
212 | * @cmd: AFU command that has completed. | |
213 | * | |
214 | * Prepares and submits command that has either completed or timed out to | |
215 | * the SCSI stack. Checks AFU command back into command pool for non-internal | |
216 | * (rcb.scp populated) commands. | |
217 | */ | |
218 | static void cmd_complete(struct afu_cmd *cmd) | |
219 | { | |
220 | struct scsi_cmnd *scp; | |
c21e0bbf MO |
221 | ulong lock_flags; |
222 | struct afu *afu = cmd->parent; | |
223 | struct cxlflash_cfg *cfg = afu->parent; | |
224 | bool cmd_is_tmf; | |
225 | ||
226 | spin_lock_irqsave(&cmd->slock, lock_flags); | |
227 | cmd->sa.host_use_b[0] |= B_DONE; | |
228 | spin_unlock_irqrestore(&cmd->slock, lock_flags); | |
229 | ||
230 | if (cmd->rcb.scp) { | |
231 | scp = cmd->rcb.scp; | |
8396012f | 232 | if (unlikely(cmd->sa.ioasc)) |
c21e0bbf MO |
233 | process_cmd_err(cmd, scp); |
234 | else | |
235 | scp->result = (DID_OK << 16); | |
236 | ||
c21e0bbf | 237 | cmd_is_tmf = cmd->cmd_tmf; |
15305514 | 238 | cmd_checkin(cmd); /* Don't use cmd after here */ |
c21e0bbf | 239 | |
4392ba49 MO |
240 | pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X " |
241 | "ioasc=%d\n", __func__, scp, scp->result, | |
242 | cmd->sa.ioasc); | |
c21e0bbf | 243 | |
c21e0bbf MO |
244 | scsi_dma_unmap(scp); |
245 | scp->scsi_done(scp); | |
246 | ||
247 | if (cmd_is_tmf) { | |
018d1dc9 | 248 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
249 | cfg->tmf_active = false; |
250 | wake_up_all_locked(&cfg->tmf_waitq); | |
018d1dc9 | 251 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
252 | } |
253 | } else | |
254 | complete(&cmd->cevent); | |
255 | } | |
256 | ||
15305514 MO |
257 | /** |
258 | * context_reset() - timeout handler for AFU commands | |
259 | * @cmd: AFU command that timed out. | |
260 | * | |
261 | * Sends a reset to the AFU. | |
262 | */ | |
263 | static void context_reset(struct afu_cmd *cmd) | |
264 | { | |
265 | int nretry = 0; | |
266 | u64 rrin = 0x1; | |
267 | u64 room = 0; | |
268 | struct afu *afu = cmd->parent; | |
269 | ulong lock_flags; | |
270 | ||
271 | pr_debug("%s: cmd=%p\n", __func__, cmd); | |
272 | ||
273 | spin_lock_irqsave(&cmd->slock, lock_flags); | |
274 | ||
275 | /* Already completed? */ | |
276 | if (cmd->sa.host_use_b[0] & B_DONE) { | |
277 | spin_unlock_irqrestore(&cmd->slock, lock_flags); | |
278 | return; | |
279 | } | |
280 | ||
281 | cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT); | |
282 | spin_unlock_irqrestore(&cmd->slock, lock_flags); | |
283 | ||
284 | /* | |
285 | * We really want to send this reset at all costs, so spread | |
286 | * out wait time on successive retries for available room. | |
287 | */ | |
288 | do { | |
289 | room = readq_be(&afu->host_map->cmd_room); | |
290 | atomic64_set(&afu->room, room); | |
291 | if (room) | |
292 | goto write_rrin; | |
293 | udelay(nretry); | |
294 | } while (nretry++ < MC_ROOM_RETRY_CNT); | |
295 | ||
296 | pr_err("%s: no cmd_room to send reset\n", __func__); | |
297 | return; | |
298 | ||
299 | write_rrin: | |
300 | nretry = 0; | |
301 | writeq_be(rrin, &afu->host_map->ioarrin); | |
302 | do { | |
303 | rrin = readq_be(&afu->host_map->ioarrin); | |
304 | if (rrin != 0x1) | |
305 | break; | |
306 | /* Double delay each time */ | |
307 | udelay(2 ^ nretry); | |
308 | } while (nretry++ < MC_ROOM_RETRY_CNT); | |
309 | } | |
310 | ||
311 | /** | |
312 | * send_cmd() - sends an AFU command | |
313 | * @afu: AFU associated with the host. | |
314 | * @cmd: AFU command to send. | |
315 | * | |
316 | * Return: | |
317 | * 0 on success or SCSI_MLQUEUE_HOST_BUSY | |
318 | */ | |
319 | static int send_cmd(struct afu *afu, struct afu_cmd *cmd) | |
320 | { | |
321 | struct cxlflash_cfg *cfg = afu->parent; | |
322 | struct device *dev = &cfg->dev->dev; | |
323 | int nretry = 0; | |
324 | int rc = 0; | |
325 | u64 room; | |
326 | long newval; | |
327 | ||
328 | /* | |
329 | * This routine is used by critical users such an AFU sync and to | |
330 | * send a task management function (TMF). Thus we want to retry a | |
331 | * bit before returning an error. To avoid the performance penalty | |
332 | * of MMIO, we spread the update of 'room' over multiple commands. | |
333 | */ | |
334 | retry: | |
335 | newval = atomic64_dec_if_positive(&afu->room); | |
336 | if (!newval) { | |
337 | do { | |
338 | room = readq_be(&afu->host_map->cmd_room); | |
339 | atomic64_set(&afu->room, room); | |
340 | if (room) | |
341 | goto write_ioarrin; | |
342 | udelay(nretry); | |
343 | } while (nretry++ < MC_ROOM_RETRY_CNT); | |
344 | ||
345 | dev_err(dev, "%s: no cmd_room to send 0x%X\n", | |
346 | __func__, cmd->rcb.cdb[0]); | |
347 | ||
348 | goto no_room; | |
349 | } else if (unlikely(newval < 0)) { | |
350 | /* This should be rare. i.e. Only if two threads race and | |
351 | * decrement before the MMIO read is done. In this case | |
352 | * just benefit from the other thread having updated | |
353 | * afu->room. | |
354 | */ | |
355 | if (nretry++ < MC_ROOM_RETRY_CNT) { | |
356 | udelay(nretry); | |
357 | goto retry; | |
358 | } | |
359 | ||
360 | goto no_room; | |
361 | } | |
362 | ||
363 | write_ioarrin: | |
364 | writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); | |
365 | out: | |
366 | pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd, | |
367 | cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc); | |
368 | return rc; | |
369 | ||
370 | no_room: | |
371 | afu->read_room = true; | |
372 | schedule_work(&cfg->work_q); | |
373 | rc = SCSI_MLQUEUE_HOST_BUSY; | |
374 | goto out; | |
375 | } | |
376 | ||
377 | /** | |
378 | * wait_resp() - polls for a response or timeout to a sent AFU command | |
379 | * @afu: AFU associated with the host. | |
380 | * @cmd: AFU command that was sent. | |
381 | */ | |
382 | static void wait_resp(struct afu *afu, struct afu_cmd *cmd) | |
383 | { | |
384 | ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); | |
385 | ||
386 | timeout = wait_for_completion_timeout(&cmd->cevent, timeout); | |
387 | if (!timeout) | |
388 | context_reset(cmd); | |
389 | ||
390 | if (unlikely(cmd->sa.ioasc != 0)) | |
391 | pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, " | |
392 | "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0], | |
393 | cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc, | |
394 | cmd->sa.rc.fc_rc); | |
395 | } | |
396 | ||
c21e0bbf MO |
397 | /** |
398 | * send_tmf() - sends a Task Management Function (TMF) | |
399 | * @afu: AFU to checkout from. | |
400 | * @scp: SCSI command from stack. | |
401 | * @tmfcmd: TMF command to send. | |
402 | * | |
403 | * Return: | |
404 | * 0 on success | |
405 | * SCSI_MLQUEUE_HOST_BUSY when host is busy | |
406 | */ | |
407 | static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) | |
408 | { | |
409 | struct afu_cmd *cmd; | |
410 | ||
411 | u32 port_sel = scp->device->channel + 1; | |
412 | short lflag = 0; | |
413 | struct Scsi_Host *host = scp->device->host; | |
414 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; | |
4392ba49 | 415 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
416 | ulong lock_flags; |
417 | int rc = 0; | |
018d1dc9 | 418 | ulong to; |
c21e0bbf | 419 | |
15305514 | 420 | cmd = cmd_checkout(afu); |
c21e0bbf | 421 | if (unlikely(!cmd)) { |
4392ba49 | 422 | dev_err(dev, "%s: could not get a free command\n", __func__); |
c21e0bbf MO |
423 | rc = SCSI_MLQUEUE_HOST_BUSY; |
424 | goto out; | |
425 | } | |
426 | ||
018d1dc9 MO |
427 | /* When Task Management Function is active do not send another */ |
428 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); | |
c21e0bbf | 429 | if (cfg->tmf_active) |
018d1dc9 MO |
430 | wait_event_interruptible_lock_irq(cfg->tmf_waitq, |
431 | !cfg->tmf_active, | |
432 | cfg->tmf_slock); | |
c21e0bbf MO |
433 | cfg->tmf_active = true; |
434 | cmd->cmd_tmf = true; | |
018d1dc9 | 435 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
436 | |
437 | cmd->rcb.ctx_id = afu->ctx_hndl; | |
438 | cmd->rcb.port_sel = port_sel; | |
439 | cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); | |
440 | ||
441 | lflag = SISL_REQ_FLAGS_TMF_CMD; | |
442 | ||
443 | cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | | |
444 | SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); | |
445 | ||
446 | /* Stash the scp in the reserved field, for reuse during interrupt */ | |
447 | cmd->rcb.scp = scp; | |
448 | ||
449 | /* Copy the CDB from the cmd passed in */ | |
450 | memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); | |
451 | ||
452 | /* Send the command */ | |
15305514 | 453 | rc = send_cmd(afu, cmd); |
c21e0bbf | 454 | if (unlikely(rc)) { |
15305514 | 455 | cmd_checkin(cmd); |
018d1dc9 | 456 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 457 | cfg->tmf_active = false; |
018d1dc9 | 458 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
459 | goto out; |
460 | } | |
461 | ||
018d1dc9 MO |
462 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
463 | to = msecs_to_jiffies(5000); | |
464 | to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, | |
465 | !cfg->tmf_active, | |
466 | cfg->tmf_slock, | |
467 | to); | |
468 | if (!to) { | |
469 | cfg->tmf_active = false; | |
470 | dev_err(dev, "%s: TMF timed out!\n", __func__); | |
471 | rc = -1; | |
472 | } | |
473 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); | |
c21e0bbf MO |
474 | out: |
475 | return rc; | |
476 | } | |
477 | ||
478 | /** | |
479 | * cxlflash_driver_info() - information handler for this host driver | |
480 | * @host: SCSI host associated with device. | |
481 | * | |
482 | * Return: A string describing the device. | |
483 | */ | |
484 | static const char *cxlflash_driver_info(struct Scsi_Host *host) | |
485 | { | |
486 | return CXLFLASH_ADAPTER_NAME; | |
487 | } | |
488 | ||
489 | /** | |
490 | * cxlflash_queuecommand() - sends a mid-layer request | |
491 | * @host: SCSI host associated with device. | |
492 | * @scp: SCSI command to send. | |
493 | * | |
494 | * Return: | |
495 | * 0 on success | |
496 | * SCSI_MLQUEUE_HOST_BUSY when host is busy | |
497 | */ | |
498 | static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) | |
499 | { | |
500 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; | |
501 | struct afu *afu = cfg->afu; | |
4392ba49 | 502 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
503 | struct afu_cmd *cmd; |
504 | u32 port_sel = scp->device->channel + 1; | |
505 | int nseg, i, ncount; | |
506 | struct scatterlist *sg; | |
507 | ulong lock_flags; | |
508 | short lflag = 0; | |
509 | int rc = 0; | |
510 | ||
4392ba49 MO |
511 | dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
512 | "cdb=(%08X-%08X-%08X-%08X)\n", | |
513 | __func__, scp, host->host_no, scp->device->channel, | |
514 | scp->device->id, scp->device->lun, | |
515 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), | |
516 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), | |
517 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), | |
518 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); | |
c21e0bbf | 519 | |
018d1dc9 MO |
520 | /* |
521 | * If a Task Management Function is active, wait for it to complete | |
c21e0bbf MO |
522 | * before continuing with regular commands. |
523 | */ | |
018d1dc9 | 524 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 525 | if (cfg->tmf_active) { |
018d1dc9 | 526 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
527 | rc = SCSI_MLQUEUE_HOST_BUSY; |
528 | goto out; | |
529 | } | |
018d1dc9 | 530 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 531 | |
5cdac81a | 532 | switch (cfg->state) { |
439e85c1 | 533 | case STATE_RESET: |
4392ba49 | 534 | dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__); |
5cdac81a MO |
535 | rc = SCSI_MLQUEUE_HOST_BUSY; |
536 | goto out; | |
537 | case STATE_FAILTERM: | |
4392ba49 | 538 | dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__); |
5cdac81a MO |
539 | scp->result = (DID_NO_CONNECT << 16); |
540 | scp->scsi_done(scp); | |
541 | rc = 0; | |
542 | goto out; | |
543 | default: | |
544 | break; | |
545 | } | |
546 | ||
15305514 | 547 | cmd = cmd_checkout(afu); |
c21e0bbf | 548 | if (unlikely(!cmd)) { |
4392ba49 | 549 | dev_err(dev, "%s: could not get a free command\n", __func__); |
c21e0bbf MO |
550 | rc = SCSI_MLQUEUE_HOST_BUSY; |
551 | goto out; | |
552 | } | |
553 | ||
554 | cmd->rcb.ctx_id = afu->ctx_hndl; | |
555 | cmd->rcb.port_sel = port_sel; | |
556 | cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); | |
557 | ||
558 | if (scp->sc_data_direction == DMA_TO_DEVICE) | |
559 | lflag = SISL_REQ_FLAGS_HOST_WRITE; | |
560 | else | |
561 | lflag = SISL_REQ_FLAGS_HOST_READ; | |
562 | ||
563 | cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | | |
564 | SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); | |
565 | ||
566 | /* Stash the scp in the reserved field, for reuse during interrupt */ | |
567 | cmd->rcb.scp = scp; | |
568 | ||
569 | nseg = scsi_dma_map(scp); | |
570 | if (unlikely(nseg < 0)) { | |
4392ba49 | 571 | dev_err(dev, "%s: Fail DMA map! nseg=%d\n", |
c21e0bbf MO |
572 | __func__, nseg); |
573 | rc = SCSI_MLQUEUE_HOST_BUSY; | |
574 | goto out; | |
575 | } | |
576 | ||
577 | ncount = scsi_sg_count(scp); | |
578 | scsi_for_each_sg(scp, sg, ncount, i) { | |
579 | cmd->rcb.data_len = sg_dma_len(sg); | |
580 | cmd->rcb.data_ea = sg_dma_address(sg); | |
581 | } | |
582 | ||
583 | /* Copy the CDB from the scsi_cmnd passed in */ | |
584 | memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); | |
585 | ||
586 | /* Send the command */ | |
15305514 | 587 | rc = send_cmd(afu, cmd); |
c21e0bbf | 588 | if (unlikely(rc)) { |
15305514 | 589 | cmd_checkin(cmd); |
c21e0bbf MO |
590 | scsi_dma_unmap(scp); |
591 | } | |
592 | ||
593 | out: | |
4392ba49 | 594 | pr_devel("%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
595 | return rc; |
596 | } | |
597 | ||
598 | /** | |
15305514 MO |
599 | * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe |
600 | * @cxlflash: Internal structure associated with the host. | |
c21e0bbf | 601 | */ |
15305514 | 602 | static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) |
c21e0bbf | 603 | { |
15305514 | 604 | struct pci_dev *pdev = cfg->dev; |
c21e0bbf | 605 | |
15305514 MO |
606 | if (pci_channel_offline(pdev)) |
607 | wait_event_timeout(cfg->reset_waitq, | |
608 | !pci_channel_offline(pdev), | |
609 | CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); | |
c21e0bbf MO |
610 | } |
611 | ||
612 | /** | |
15305514 MO |
613 | * free_mem() - free memory associated with the AFU |
614 | * @cxlflash: Internal structure associated with the host. | |
c21e0bbf | 615 | */ |
15305514 | 616 | static void free_mem(struct cxlflash_cfg *cfg) |
c21e0bbf | 617 | { |
15305514 MO |
618 | int i; |
619 | char *buf = NULL; | |
620 | struct afu *afu = cfg->afu; | |
c21e0bbf | 621 | |
15305514 MO |
622 | if (cfg->afu) { |
623 | for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { | |
624 | buf = afu->cmd[i].buf; | |
625 | if (!((u64)buf & (PAGE_SIZE - 1))) | |
626 | free_page((ulong)buf); | |
627 | } | |
c21e0bbf | 628 | |
15305514 MO |
629 | free_pages((ulong)afu, get_order(sizeof(struct afu))); |
630 | cfg->afu = NULL; | |
5cdac81a | 631 | } |
c21e0bbf MO |
632 | } |
633 | ||
634 | /** | |
15305514 MO |
635 | * stop_afu() - stops the AFU command timers and unmaps the MMIO space |
636 | * @cxlflash: Internal structure associated with the host. | |
c21e0bbf | 637 | * |
15305514 | 638 | * Safe to call with AFU in a partially allocated/initialized state. |
c21e0bbf | 639 | */ |
15305514 | 640 | static void stop_afu(struct cxlflash_cfg *cfg) |
c21e0bbf | 641 | { |
15305514 MO |
642 | int i; |
643 | struct afu *afu = cfg->afu; | |
c21e0bbf | 644 | |
15305514 MO |
645 | if (likely(afu)) { |
646 | for (i = 0; i < CXLFLASH_NUM_CMDS; i++) | |
647 | complete(&afu->cmd[i].cevent); | |
c21e0bbf MO |
648 | |
649 | if (likely(afu->afu_map)) { | |
650 | cxl_psa_unmap((void *)afu->afu_map); | |
651 | afu->afu_map = NULL; | |
652 | } | |
653 | } | |
654 | } | |
655 | ||
656 | /** | |
657 | * term_mc() - terminates the master context | |
658 | * @cxlflash: Internal structure associated with the host. | |
659 | * @level: Depth of allocation, where to begin waterfall tear down. | |
660 | * | |
661 | * Safe to call with AFU/MC in partially allocated/initialized state. | |
662 | */ | |
663 | static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level) | |
664 | { | |
665 | int rc = 0; | |
666 | struct afu *afu = cfg->afu; | |
4392ba49 | 667 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
668 | |
669 | if (!afu || !cfg->mcctx) { | |
4392ba49 | 670 | dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n", |
c21e0bbf MO |
671 | __func__); |
672 | return; | |
673 | } | |
674 | ||
675 | switch (level) { | |
676 | case UNDO_START: | |
677 | rc = cxl_stop_context(cfg->mcctx); | |
678 | BUG_ON(rc); | |
679 | case UNMAP_THREE: | |
680 | cxl_unmap_afu_irq(cfg->mcctx, 3, afu); | |
681 | case UNMAP_TWO: | |
682 | cxl_unmap_afu_irq(cfg->mcctx, 2, afu); | |
683 | case UNMAP_ONE: | |
684 | cxl_unmap_afu_irq(cfg->mcctx, 1, afu); | |
685 | case FREE_IRQ: | |
686 | cxl_free_afu_irqs(cfg->mcctx); | |
687 | case RELEASE_CONTEXT: | |
688 | cfg->mcctx = NULL; | |
689 | } | |
690 | } | |
691 | ||
692 | /** | |
693 | * term_afu() - terminates the AFU | |
694 | * @cxlflash: Internal structure associated with the host. | |
695 | * | |
696 | * Safe to call with AFU/MC in partially allocated/initialized state. | |
697 | */ | |
698 | static void term_afu(struct cxlflash_cfg *cfg) | |
699 | { | |
700 | term_mc(cfg, UNDO_START); | |
701 | ||
702 | if (cfg->afu) | |
703 | stop_afu(cfg); | |
704 | ||
705 | pr_debug("%s: returning\n", __func__); | |
706 | } | |
707 | ||
708 | /** | |
709 | * cxlflash_remove() - PCI entry point to tear down host | |
710 | * @pdev: PCI device associated with the host. | |
711 | * | |
712 | * Safe to use as a cleanup in partially allocated/initialized state. | |
713 | */ | |
714 | static void cxlflash_remove(struct pci_dev *pdev) | |
715 | { | |
716 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); | |
717 | ulong lock_flags; | |
718 | ||
719 | /* If a Task Management Function is active, wait for it to complete | |
720 | * before continuing with remove. | |
721 | */ | |
018d1dc9 | 722 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 723 | if (cfg->tmf_active) |
018d1dc9 MO |
724 | wait_event_interruptible_lock_irq(cfg->tmf_waitq, |
725 | !cfg->tmf_active, | |
726 | cfg->tmf_slock); | |
727 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); | |
c21e0bbf | 728 | |
5cdac81a | 729 | cfg->state = STATE_FAILTERM; |
65be2c79 | 730 | cxlflash_stop_term_user_contexts(cfg); |
5cdac81a | 731 | |
c21e0bbf MO |
732 | switch (cfg->init_state) { |
733 | case INIT_STATE_SCSI: | |
65be2c79 | 734 | cxlflash_term_local_luns(cfg); |
c21e0bbf | 735 | scsi_remove_host(cfg->host); |
c21e0bbf MO |
736 | /* Fall through */ |
737 | case INIT_STATE_AFU: | |
738 | term_afu(cfg); | |
739 | case INIT_STATE_PCI: | |
740 | pci_release_regions(cfg->dev); | |
741 | pci_disable_device(pdev); | |
742 | case INIT_STATE_NONE: | |
743 | flush_work(&cfg->work_q); | |
744 | free_mem(cfg); | |
8b5b1e87 | 745 | scsi_host_put(cfg->host); |
c21e0bbf MO |
746 | break; |
747 | } | |
748 | ||
749 | pr_debug("%s: returning\n", __func__); | |
750 | } | |
751 | ||
752 | /** | |
753 | * alloc_mem() - allocates the AFU and its command pool | |
754 | * @cxlflash: Internal structure associated with the host. | |
755 | * | |
756 | * A partially allocated state remains on failure. | |
757 | * | |
758 | * Return: | |
759 | * 0 on success | |
760 | * -ENOMEM on failure to allocate memory | |
761 | */ | |
762 | static int alloc_mem(struct cxlflash_cfg *cfg) | |
763 | { | |
764 | int rc = 0; | |
765 | int i; | |
766 | char *buf = NULL; | |
4392ba49 | 767 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
768 | |
769 | /* This allocation is about 12K, i.e. only 1 64k page | |
770 | * and upto 4 4k pages | |
771 | */ | |
772 | cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
773 | get_order(sizeof(struct afu))); | |
774 | if (unlikely(!cfg->afu)) { | |
4392ba49 MO |
775 | dev_err(dev, "%s: cannot get %d free pages\n", |
776 | __func__, get_order(sizeof(struct afu))); | |
c21e0bbf MO |
777 | rc = -ENOMEM; |
778 | goto out; | |
779 | } | |
780 | cfg->afu->parent = cfg; | |
781 | cfg->afu->afu_map = NULL; | |
782 | ||
783 | for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) { | |
784 | if (!((u64)buf & (PAGE_SIZE - 1))) { | |
785 | buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | |
786 | if (unlikely(!buf)) { | |
4392ba49 MO |
787 | dev_err(dev, |
788 | "%s: Allocate command buffers fail!\n", | |
c21e0bbf MO |
789 | __func__); |
790 | rc = -ENOMEM; | |
791 | free_mem(cfg); | |
792 | goto out; | |
793 | } | |
794 | } | |
795 | ||
796 | cfg->afu->cmd[i].buf = buf; | |
797 | atomic_set(&cfg->afu->cmd[i].free, 1); | |
798 | cfg->afu->cmd[i].slot = i; | |
799 | } | |
800 | ||
801 | out: | |
802 | return rc; | |
803 | } | |
804 | ||
805 | /** | |
806 | * init_pci() - initializes the host as a PCI device | |
807 | * @cxlflash: Internal structure associated with the host. | |
808 | * | |
809 | * Return: | |
810 | * 0 on success | |
811 | * -EIO on unable to communicate with device | |
812 | * A return code from the PCI sub-routines | |
813 | */ | |
814 | static int init_pci(struct cxlflash_cfg *cfg) | |
815 | { | |
816 | struct pci_dev *pdev = cfg->dev; | |
817 | int rc = 0; | |
818 | ||
819 | cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0); | |
820 | rc = pci_request_regions(pdev, CXLFLASH_NAME); | |
821 | if (rc < 0) { | |
822 | dev_err(&pdev->dev, | |
823 | "%s: Couldn't register memory range of registers\n", | |
824 | __func__); | |
825 | goto out; | |
826 | } | |
827 | ||
828 | rc = pci_enable_device(pdev); | |
829 | if (rc || pci_channel_offline(pdev)) { | |
830 | if (pci_channel_offline(pdev)) { | |
831 | cxlflash_wait_for_pci_err_recovery(cfg); | |
832 | rc = pci_enable_device(pdev); | |
833 | } | |
834 | ||
835 | if (rc) { | |
836 | dev_err(&pdev->dev, "%s: Cannot enable adapter\n", | |
837 | __func__); | |
838 | cxlflash_wait_for_pci_err_recovery(cfg); | |
839 | goto out_release_regions; | |
840 | } | |
841 | } | |
842 | ||
843 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
844 | if (rc < 0) { | |
845 | dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n", | |
846 | __func__); | |
847 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
848 | } | |
849 | ||
850 | if (rc < 0) { | |
851 | dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n", | |
852 | __func__); | |
853 | goto out_disable; | |
854 | } | |
855 | ||
856 | pci_set_master(pdev); | |
857 | ||
858 | if (pci_channel_offline(pdev)) { | |
859 | cxlflash_wait_for_pci_err_recovery(cfg); | |
860 | if (pci_channel_offline(pdev)) { | |
861 | rc = -EIO; | |
862 | goto out_msi_disable; | |
863 | } | |
864 | } | |
865 | ||
866 | rc = pci_save_state(pdev); | |
867 | ||
868 | if (rc != PCIBIOS_SUCCESSFUL) { | |
869 | dev_err(&pdev->dev, "%s: Failed to save PCI config space\n", | |
870 | __func__); | |
871 | rc = -EIO; | |
872 | goto cleanup_nolog; | |
873 | } | |
874 | ||
875 | out: | |
876 | pr_debug("%s: returning rc=%d\n", __func__, rc); | |
877 | return rc; | |
878 | ||
879 | cleanup_nolog: | |
880 | out_msi_disable: | |
881 | cxlflash_wait_for_pci_err_recovery(cfg); | |
882 | out_disable: | |
883 | pci_disable_device(pdev); | |
884 | out_release_regions: | |
885 | pci_release_regions(pdev); | |
886 | goto out; | |
887 | ||
888 | } | |
889 | ||
890 | /** | |
891 | * init_scsi() - adds the host to the SCSI stack and kicks off host scan | |
892 | * @cxlflash: Internal structure associated with the host. | |
893 | * | |
894 | * Return: | |
895 | * 0 on success | |
896 | * A return code from adding the host | |
897 | */ | |
898 | static int init_scsi(struct cxlflash_cfg *cfg) | |
899 | { | |
900 | struct pci_dev *pdev = cfg->dev; | |
901 | int rc = 0; | |
902 | ||
903 | rc = scsi_add_host(cfg->host, &pdev->dev); | |
904 | if (rc) { | |
905 | dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n", | |
906 | __func__, rc); | |
907 | goto out; | |
908 | } | |
909 | ||
910 | scsi_scan_host(cfg->host); | |
911 | ||
912 | out: | |
913 | pr_debug("%s: returning rc=%d\n", __func__, rc); | |
914 | return rc; | |
915 | } | |
916 | ||
917 | /** | |
918 | * set_port_online() - transitions the specified host FC port to online state | |
919 | * @fc_regs: Top of MMIO region defined for specified port. | |
920 | * | |
921 | * The provided MMIO region must be mapped prior to call. Online state means | |
922 | * that the FC link layer has synced, completed the handshaking process, and | |
923 | * is ready for login to start. | |
924 | */ | |
925 | static void set_port_online(u64 *fc_regs) | |
926 | { | |
927 | u64 cmdcfg; | |
928 | ||
929 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); | |
930 | cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ | |
931 | cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ | |
932 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); | |
933 | } | |
934 | ||
935 | /** | |
936 | * set_port_offline() - transitions the specified host FC port to offline state | |
937 | * @fc_regs: Top of MMIO region defined for specified port. | |
938 | * | |
939 | * The provided MMIO region must be mapped prior to call. | |
940 | */ | |
941 | static void set_port_offline(u64 *fc_regs) | |
942 | { | |
943 | u64 cmdcfg; | |
944 | ||
945 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); | |
946 | cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ | |
947 | cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ | |
948 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); | |
949 | } | |
950 | ||
951 | /** | |
952 | * wait_port_online() - waits for the specified host FC port come online | |
953 | * @fc_regs: Top of MMIO region defined for specified port. | |
954 | * @delay_us: Number of microseconds to delay between reading port status. | |
955 | * @nretry: Number of cycles to retry reading port status. | |
956 | * | |
957 | * The provided MMIO region must be mapped prior to call. This will timeout | |
958 | * when the cable is not plugged in. | |
959 | * | |
960 | * Return: | |
961 | * TRUE (1) when the specified port is online | |
962 | * FALSE (0) when the specified port fails to come online after timeout | |
963 | * -EINVAL when @delay_us is less than 1000 | |
964 | */ | |
965 | static int wait_port_online(u64 *fc_regs, u32 delay_us, u32 nretry) | |
966 | { | |
967 | u64 status; | |
968 | ||
969 | if (delay_us < 1000) { | |
970 | pr_err("%s: invalid delay specified %d\n", __func__, delay_us); | |
971 | return -EINVAL; | |
972 | } | |
973 | ||
974 | do { | |
975 | msleep(delay_us / 1000); | |
976 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); | |
977 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && | |
978 | nretry--); | |
979 | ||
980 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); | |
981 | } | |
982 | ||
983 | /** | |
984 | * wait_port_offline() - waits for the specified host FC port go offline | |
985 | * @fc_regs: Top of MMIO region defined for specified port. | |
986 | * @delay_us: Number of microseconds to delay between reading port status. | |
987 | * @nretry: Number of cycles to retry reading port status. | |
988 | * | |
989 | * The provided MMIO region must be mapped prior to call. | |
990 | * | |
991 | * Return: | |
992 | * TRUE (1) when the specified port is offline | |
993 | * FALSE (0) when the specified port fails to go offline after timeout | |
994 | * -EINVAL when @delay_us is less than 1000 | |
995 | */ | |
996 | static int wait_port_offline(u64 *fc_regs, u32 delay_us, u32 nretry) | |
997 | { | |
998 | u64 status; | |
999 | ||
1000 | if (delay_us < 1000) { | |
1001 | pr_err("%s: invalid delay specified %d\n", __func__, delay_us); | |
1002 | return -EINVAL; | |
1003 | } | |
1004 | ||
1005 | do { | |
1006 | msleep(delay_us / 1000); | |
1007 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); | |
1008 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && | |
1009 | nretry--); | |
1010 | ||
1011 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); | |
1012 | } | |
1013 | ||
1014 | /** | |
1015 | * afu_set_wwpn() - configures the WWPN for the specified host FC port | |
1016 | * @afu: AFU associated with the host that owns the specified FC port. | |
1017 | * @port: Port number being configured. | |
1018 | * @fc_regs: Top of MMIO region defined for specified port. | |
1019 | * @wwpn: The world-wide-port-number previously discovered for port. | |
1020 | * | |
1021 | * The provided MMIO region must be mapped prior to call. As part of the | |
1022 | * sequence to configure the WWPN, the port is toggled offline and then back | |
1023 | * online. This toggling action can cause this routine to delay up to a few | |
1024 | * seconds. When configured to use the internal LUN feature of the AFU, a | |
1025 | * failure to come online is overridden. | |
1026 | * | |
1027 | * Return: | |
1028 | * 0 when the WWPN is successfully written and the port comes back online | |
1029 | * -1 when the port fails to go offline or come back up online | |
1030 | */ | |
1031 | static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn) | |
1032 | { | |
964497b3 | 1033 | int rc = 0; |
c21e0bbf MO |
1034 | |
1035 | set_port_offline(fc_regs); | |
1036 | ||
1037 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, | |
1038 | FC_PORT_STATUS_RETRY_CNT)) { | |
1039 | pr_debug("%s: wait on port %d to go offline timed out\n", | |
1040 | __func__, port); | |
964497b3 | 1041 | rc = -1; /* but continue on to leave the port back online */ |
c21e0bbf MO |
1042 | } |
1043 | ||
964497b3 | 1044 | if (rc == 0) |
c21e0bbf MO |
1045 | writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); |
1046 | ||
964497b3 MO |
1047 | /* Always return success after programming WWPN */ |
1048 | rc = 0; | |
1049 | ||
c21e0bbf MO |
1050 | set_port_online(fc_regs); |
1051 | ||
1052 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, | |
1053 | FC_PORT_STATUS_RETRY_CNT)) { | |
964497b3 MO |
1054 | pr_err("%s: wait on port %d to go online timed out\n", |
1055 | __func__, port); | |
c21e0bbf MO |
1056 | } |
1057 | ||
964497b3 | 1058 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
c21e0bbf | 1059 | |
964497b3 | 1060 | return rc; |
c21e0bbf MO |
1061 | } |
1062 | ||
1063 | /** | |
1064 | * afu_link_reset() - resets the specified host FC port | |
1065 | * @afu: AFU associated with the host that owns the specified FC port. | |
1066 | * @port: Port number being configured. | |
1067 | * @fc_regs: Top of MMIO region defined for specified port. | |
1068 | * | |
1069 | * The provided MMIO region must be mapped prior to call. The sequence to | |
1070 | * reset the port involves toggling it offline and then back online. This | |
1071 | * action can cause this routine to delay up to a few seconds. An effort | |
1072 | * is made to maintain link with the device by switching to host to use | |
1073 | * the alternate port exclusively while the reset takes place. | |
1074 | * failure to come online is overridden. | |
1075 | */ | |
1076 | static void afu_link_reset(struct afu *afu, int port, u64 *fc_regs) | |
1077 | { | |
1078 | u64 port_sel; | |
1079 | ||
1080 | /* first switch the AFU to the other links, if any */ | |
1081 | port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); | |
4da74db0 | 1082 | port_sel &= ~(1ULL << port); |
c21e0bbf MO |
1083 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); |
1084 | cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); | |
1085 | ||
1086 | set_port_offline(fc_regs); | |
1087 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, | |
1088 | FC_PORT_STATUS_RETRY_CNT)) | |
1089 | pr_err("%s: wait on port %d to go offline timed out\n", | |
1090 | __func__, port); | |
1091 | ||
1092 | set_port_online(fc_regs); | |
1093 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, | |
1094 | FC_PORT_STATUS_RETRY_CNT)) | |
1095 | pr_err("%s: wait on port %d to go online timed out\n", | |
1096 | __func__, port); | |
1097 | ||
1098 | /* switch back to include this port */ | |
4da74db0 | 1099 | port_sel |= (1ULL << port); |
c21e0bbf MO |
1100 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); |
1101 | cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); | |
1102 | ||
1103 | pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel); | |
1104 | } | |
1105 | ||
1106 | /* | |
1107 | * Asynchronous interrupt information table | |
1108 | */ | |
1109 | static const struct asyc_intr_info ainfo[] = { | |
1110 | {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET}, | |
1111 | {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0}, | |
1112 | {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET}, | |
1113 | {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, 0}, | |
1114 | {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR}, | |
ef51074a | 1115 | {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST}, |
c21e0bbf | 1116 | {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0}, |
ef51074a | 1117 | {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST}, |
c21e0bbf MO |
1118 | {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, |
1119 | {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, | |
1120 | {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, | |
1121 | {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0}, | |
1122 | {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, | |
ef51074a | 1123 | {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST}, |
c21e0bbf | 1124 | {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, |
ef51074a | 1125 | {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST}, |
c21e0bbf MO |
1126 | {0x0, "", 0, 0} /* terminator */ |
1127 | }; | |
1128 | ||
1129 | /** | |
1130 | * find_ainfo() - locates and returns asynchronous interrupt information | |
1131 | * @status: Status code set by AFU on error. | |
1132 | * | |
1133 | * Return: The located information or NULL when the status code is invalid. | |
1134 | */ | |
1135 | static const struct asyc_intr_info *find_ainfo(u64 status) | |
1136 | { | |
1137 | const struct asyc_intr_info *info; | |
1138 | ||
1139 | for (info = &ainfo[0]; info->status; info++) | |
1140 | if (info->status == status) | |
1141 | return info; | |
1142 | ||
1143 | return NULL; | |
1144 | } | |
1145 | ||
1146 | /** | |
1147 | * afu_err_intr_init() - clears and initializes the AFU for error interrupts | |
1148 | * @afu: AFU associated with the host. | |
1149 | */ | |
1150 | static void afu_err_intr_init(struct afu *afu) | |
1151 | { | |
1152 | int i; | |
1153 | u64 reg; | |
1154 | ||
1155 | /* global async interrupts: AFU clears afu_ctrl on context exit | |
1156 | * if async interrupts were sent to that context. This prevents | |
1157 | * the AFU form sending further async interrupts when | |
1158 | * there is | |
1159 | * nobody to receive them. | |
1160 | */ | |
1161 | ||
1162 | /* mask all */ | |
1163 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); | |
1164 | /* set LISN# to send and point to master context */ | |
1165 | reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); | |
1166 | ||
1167 | if (afu->internal_lun) | |
1168 | reg |= 1; /* Bit 63 indicates local lun */ | |
1169 | writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); | |
1170 | /* clear all */ | |
1171 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); | |
1172 | /* unmask bits that are of interest */ | |
1173 | /* note: afu can send an interrupt after this step */ | |
1174 | writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); | |
1175 | /* clear again in case a bit came on after previous clear but before */ | |
1176 | /* unmask */ | |
1177 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); | |
1178 | ||
1179 | /* Clear/Set internal lun bits */ | |
1180 | reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); | |
1181 | reg &= SISL_FC_INTERNAL_MASK; | |
1182 | if (afu->internal_lun) | |
1183 | reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); | |
1184 | writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); | |
1185 | ||
1186 | /* now clear FC errors */ | |
1187 | for (i = 0; i < NUM_FC_PORTS; i++) { | |
1188 | writeq_be(0xFFFFFFFFU, | |
1189 | &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]); | |
1190 | writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]); | |
1191 | } | |
1192 | ||
1193 | /* sync interrupts for master's IOARRIN write */ | |
1194 | /* note that unlike asyncs, there can be no pending sync interrupts */ | |
1195 | /* at this time (this is a fresh context and master has not written */ | |
1196 | /* IOARRIN yet), so there is nothing to clear. */ | |
1197 | ||
1198 | /* set LISN#, it is always sent to the context that wrote IOARRIN */ | |
1199 | writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl); | |
1200 | writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask); | |
1201 | } | |
1202 | ||
1203 | /** | |
1204 | * cxlflash_sync_err_irq() - interrupt handler for synchronous errors | |
1205 | * @irq: Interrupt number. | |
1206 | * @data: Private data provided at interrupt registration, the AFU. | |
1207 | * | |
1208 | * Return: Always return IRQ_HANDLED. | |
1209 | */ | |
1210 | static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) | |
1211 | { | |
1212 | struct afu *afu = (struct afu *)data; | |
1213 | u64 reg; | |
1214 | u64 reg_unmasked; | |
1215 | ||
1216 | reg = readq_be(&afu->host_map->intr_status); | |
1217 | reg_unmasked = (reg & SISL_ISTATUS_UNMASK); | |
1218 | ||
1219 | if (reg_unmasked == 0UL) { | |
1220 | pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n", | |
1221 | __func__, (u64)afu, reg); | |
1222 | goto cxlflash_sync_err_irq_exit; | |
1223 | } | |
1224 | ||
1225 | pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n", | |
1226 | __func__, (u64)afu, reg); | |
1227 | ||
1228 | writeq_be(reg_unmasked, &afu->host_map->intr_clear); | |
1229 | ||
1230 | cxlflash_sync_err_irq_exit: | |
1231 | pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED); | |
1232 | return IRQ_HANDLED; | |
1233 | } | |
1234 | ||
1235 | /** | |
1236 | * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) | |
1237 | * @irq: Interrupt number. | |
1238 | * @data: Private data provided at interrupt registration, the AFU. | |
1239 | * | |
1240 | * Return: Always return IRQ_HANDLED. | |
1241 | */ | |
1242 | static irqreturn_t cxlflash_rrq_irq(int irq, void *data) | |
1243 | { | |
1244 | struct afu *afu = (struct afu *)data; | |
1245 | struct afu_cmd *cmd; | |
1246 | bool toggle = afu->toggle; | |
1247 | u64 entry, | |
1248 | *hrrq_start = afu->hrrq_start, | |
1249 | *hrrq_end = afu->hrrq_end, | |
1250 | *hrrq_curr = afu->hrrq_curr; | |
1251 | ||
1252 | /* Process however many RRQ entries that are ready */ | |
1253 | while (true) { | |
1254 | entry = *hrrq_curr; | |
1255 | ||
1256 | if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) | |
1257 | break; | |
1258 | ||
1259 | cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT); | |
1260 | cmd_complete(cmd); | |
1261 | ||
1262 | /* Advance to next entry or wrap and flip the toggle bit */ | |
1263 | if (hrrq_curr < hrrq_end) | |
1264 | hrrq_curr++; | |
1265 | else { | |
1266 | hrrq_curr = hrrq_start; | |
1267 | toggle ^= SISL_RESP_HANDLE_T_BIT; | |
1268 | } | |
1269 | } | |
1270 | ||
1271 | afu->hrrq_curr = hrrq_curr; | |
1272 | afu->toggle = toggle; | |
1273 | ||
1274 | return IRQ_HANDLED; | |
1275 | } | |
1276 | ||
1277 | /** | |
1278 | * cxlflash_async_err_irq() - interrupt handler for asynchronous errors | |
1279 | * @irq: Interrupt number. | |
1280 | * @data: Private data provided at interrupt registration, the AFU. | |
1281 | * | |
1282 | * Return: Always return IRQ_HANDLED. | |
1283 | */ | |
1284 | static irqreturn_t cxlflash_async_err_irq(int irq, void *data) | |
1285 | { | |
1286 | struct afu *afu = (struct afu *)data; | |
4392ba49 MO |
1287 | struct cxlflash_cfg *cfg = afu->parent; |
1288 | struct device *dev = &cfg->dev->dev; | |
c21e0bbf MO |
1289 | u64 reg_unmasked; |
1290 | const struct asyc_intr_info *info; | |
1291 | struct sisl_global_map *global = &afu->afu_map->global; | |
1292 | u64 reg; | |
1293 | u8 port; | |
1294 | int i; | |
1295 | ||
c21e0bbf MO |
1296 | reg = readq_be(&global->regs.aintr_status); |
1297 | reg_unmasked = (reg & SISL_ASTATUS_UNMASK); | |
1298 | ||
1299 | if (reg_unmasked == 0) { | |
4392ba49 MO |
1300 | dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n", |
1301 | __func__, reg); | |
c21e0bbf MO |
1302 | goto out; |
1303 | } | |
1304 | ||
1305 | /* it is OK to clear AFU status before FC_ERROR */ | |
1306 | writeq_be(reg_unmasked, &global->regs.aintr_clear); | |
1307 | ||
1308 | /* check each bit that is on */ | |
1309 | for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) { | |
1310 | info = find_ainfo(1ULL << i); | |
16798d34 | 1311 | if (((reg_unmasked & 0x1) == 0) || !info) |
c21e0bbf MO |
1312 | continue; |
1313 | ||
1314 | port = info->port; | |
1315 | ||
4392ba49 MO |
1316 | dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n", |
1317 | __func__, port, info->desc, | |
c21e0bbf MO |
1318 | readq_be(&global->fc_regs[port][FC_STATUS / 8])); |
1319 | ||
1320 | /* | |
1321 | * do link reset first, some OTHER errors will set FC_ERROR | |
1322 | * again if cleared before or w/o a reset | |
1323 | */ | |
1324 | if (info->action & LINK_RESET) { | |
4392ba49 MO |
1325 | dev_err(dev, "%s: FC Port %d: resetting link\n", |
1326 | __func__, port); | |
c21e0bbf MO |
1327 | cfg->lr_state = LINK_RESET_REQUIRED; |
1328 | cfg->lr_port = port; | |
1329 | schedule_work(&cfg->work_q); | |
1330 | } | |
1331 | ||
1332 | if (info->action & CLR_FC_ERROR) { | |
1333 | reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]); | |
1334 | ||
1335 | /* | |
1336 | * since all errors are unmasked, FC_ERROR and FC_ERRCAP | |
1337 | * should be the same and tracing one is sufficient. | |
1338 | */ | |
1339 | ||
4392ba49 MO |
1340 | dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n", |
1341 | __func__, port, reg); | |
c21e0bbf MO |
1342 | |
1343 | writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]); | |
1344 | writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]); | |
1345 | } | |
ef51074a MO |
1346 | |
1347 | if (info->action & SCAN_HOST) { | |
1348 | atomic_inc(&cfg->scan_host_needed); | |
1349 | schedule_work(&cfg->work_q); | |
1350 | } | |
c21e0bbf MO |
1351 | } |
1352 | ||
1353 | out: | |
4392ba49 | 1354 | dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu); |
c21e0bbf MO |
1355 | return IRQ_HANDLED; |
1356 | } | |
1357 | ||
1358 | /** | |
1359 | * start_context() - starts the master context | |
1360 | * @cxlflash: Internal structure associated with the host. | |
1361 | * | |
1362 | * Return: A success or failure value from CXL services. | |
1363 | */ | |
1364 | static int start_context(struct cxlflash_cfg *cfg) | |
1365 | { | |
1366 | int rc = 0; | |
1367 | ||
1368 | rc = cxl_start_context(cfg->mcctx, | |
1369 | cfg->afu->work.work_element_descriptor, | |
1370 | NULL); | |
1371 | ||
1372 | pr_debug("%s: returning rc=%d\n", __func__, rc); | |
1373 | return rc; | |
1374 | } | |
1375 | ||
1376 | /** | |
1377 | * read_vpd() - obtains the WWPNs from VPD | |
1378 | * @cxlflash: Internal structure associated with the host. | |
1379 | * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs | |
1380 | * | |
1381 | * Return: | |
1382 | * 0 on success | |
1383 | * -ENODEV when VPD or WWPN keywords not found | |
1384 | */ | |
1385 | static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) | |
1386 | { | |
1387 | struct pci_dev *dev = cfg->parent_dev; | |
1388 | int rc = 0; | |
1389 | int ro_start, ro_size, i, j, k; | |
1390 | ssize_t vpd_size; | |
1391 | char vpd_data[CXLFLASH_VPD_LEN]; | |
1392 | char tmp_buf[WWPN_BUF_LEN] = { 0 }; | |
1393 | char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" }; | |
1394 | ||
1395 | /* Get the VPD data from the device */ | |
1396 | vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); | |
1397 | if (unlikely(vpd_size <= 0)) { | |
4392ba49 | 1398 | dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n", |
c21e0bbf MO |
1399 | __func__, vpd_size); |
1400 | rc = -ENODEV; | |
1401 | goto out; | |
1402 | } | |
1403 | ||
1404 | /* Get the read only section offset */ | |
1405 | ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, | |
1406 | PCI_VPD_LRDT_RO_DATA); | |
1407 | if (unlikely(ro_start < 0)) { | |
4392ba49 MO |
1408 | dev_err(&dev->dev, "%s: VPD Read-only data not found\n", |
1409 | __func__); | |
c21e0bbf MO |
1410 | rc = -ENODEV; |
1411 | goto out; | |
1412 | } | |
1413 | ||
1414 | /* Get the read only section size, cap when extends beyond read VPD */ | |
1415 | ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); | |
1416 | j = ro_size; | |
1417 | i = ro_start + PCI_VPD_LRDT_TAG_SIZE; | |
1418 | if (unlikely((i + j) > vpd_size)) { | |
1419 | pr_debug("%s: Might need to read more VPD (%d > %ld)\n", | |
1420 | __func__, (i + j), vpd_size); | |
1421 | ro_size = vpd_size - i; | |
1422 | } | |
1423 | ||
1424 | /* | |
1425 | * Find the offset of the WWPN tag within the read only | |
1426 | * VPD data and validate the found field (partials are | |
1427 | * no good to us). Convert the ASCII data to an integer | |
1428 | * value. Note that we must copy to a temporary buffer | |
1429 | * because the conversion service requires that the ASCII | |
1430 | * string be terminated. | |
1431 | */ | |
1432 | for (k = 0; k < NUM_FC_PORTS; k++) { | |
1433 | j = ro_size; | |
1434 | i = ro_start + PCI_VPD_LRDT_TAG_SIZE; | |
1435 | ||
1436 | i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); | |
1437 | if (unlikely(i < 0)) { | |
4392ba49 MO |
1438 | dev_err(&dev->dev, "%s: Port %d WWPN not found " |
1439 | "in VPD\n", __func__, k); | |
c21e0bbf MO |
1440 | rc = -ENODEV; |
1441 | goto out; | |
1442 | } | |
1443 | ||
1444 | j = pci_vpd_info_field_size(&vpd_data[i]); | |
1445 | i += PCI_VPD_INFO_FLD_HDR_SIZE; | |
1446 | if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { | |
4392ba49 MO |
1447 | dev_err(&dev->dev, "%s: Port %d WWPN incomplete or " |
1448 | "VPD corrupt\n", | |
c21e0bbf MO |
1449 | __func__, k); |
1450 | rc = -ENODEV; | |
1451 | goto out; | |
1452 | } | |
1453 | ||
1454 | memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); | |
1455 | rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); | |
1456 | if (unlikely(rc)) { | |
4392ba49 MO |
1457 | dev_err(&dev->dev, "%s: Fail to convert port %d WWPN " |
1458 | "to integer\n", __func__, k); | |
c21e0bbf MO |
1459 | rc = -ENODEV; |
1460 | goto out; | |
1461 | } | |
1462 | } | |
1463 | ||
1464 | out: | |
1465 | pr_debug("%s: returning rc=%d\n", __func__, rc); | |
1466 | return rc; | |
1467 | } | |
1468 | ||
1469 | /** | |
15305514 MO |
1470 | * init_pcr() - initialize the provisioning and control registers |
1471 | * @cxlflash: Internal structure associated with the host. | |
c21e0bbf | 1472 | * |
15305514 MO |
1473 | * Also sets up fast access to the mapped registers and initializes AFU |
1474 | * command fields that never change. | |
c21e0bbf | 1475 | */ |
15305514 | 1476 | static void init_pcr(struct cxlflash_cfg *cfg) |
c21e0bbf MO |
1477 | { |
1478 | struct afu *afu = cfg->afu; | |
1479 | struct sisl_ctrl_map *ctrl_map; | |
1480 | int i; | |
1481 | ||
1482 | for (i = 0; i < MAX_CONTEXT; i++) { | |
1483 | ctrl_map = &afu->afu_map->ctrls[i].ctrl; | |
1484 | /* disrupt any clients that could be running */ | |
1485 | /* e. g. clients that survived a master restart */ | |
1486 | writeq_be(0, &ctrl_map->rht_start); | |
1487 | writeq_be(0, &ctrl_map->rht_cnt_id); | |
1488 | writeq_be(0, &ctrl_map->ctx_cap); | |
1489 | } | |
1490 | ||
1491 | /* copy frequently used fields into afu */ | |
1492 | afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx); | |
1493 | /* ctx_hndl is 16 bits in CAIA */ | |
1494 | afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host; | |
1495 | afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl; | |
1496 | ||
1497 | /* Program the Endian Control for the master context */ | |
1498 | writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); | |
1499 | ||
1500 | /* initialize cmd fields that never change */ | |
1501 | for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { | |
1502 | afu->cmd[i].rcb.ctx_id = afu->ctx_hndl; | |
1503 | afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; | |
1504 | afu->cmd[i].rcb.rrq = 0x0; | |
1505 | } | |
1506 | } | |
1507 | ||
1508 | /** | |
1509 | * init_global() - initialize AFU global registers | |
1510 | * @cxlflash: Internal structure associated with the host. | |
1511 | */ | |
15305514 | 1512 | static int init_global(struct cxlflash_cfg *cfg) |
c21e0bbf MO |
1513 | { |
1514 | struct afu *afu = cfg->afu; | |
4392ba49 | 1515 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
1516 | u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */ |
1517 | int i = 0, num_ports = 0; | |
1518 | int rc = 0; | |
1519 | u64 reg; | |
1520 | ||
1521 | rc = read_vpd(cfg, &wwpn[0]); | |
1522 | if (rc) { | |
4392ba49 | 1523 | dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); |
c21e0bbf MO |
1524 | goto out; |
1525 | } | |
1526 | ||
1527 | pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]); | |
1528 | ||
1529 | /* set up RRQ in AFU for master issued cmds */ | |
1530 | writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); | |
1531 | writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); | |
1532 | ||
1533 | /* AFU configuration */ | |
1534 | reg = readq_be(&afu->afu_map->global.regs.afu_config); | |
1535 | reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; | |
1536 | /* enable all auto retry options and control endianness */ | |
1537 | /* leave others at default: */ | |
1538 | /* CTX_CAP write protected, mbox_r does not clear on read and */ | |
1539 | /* checker on if dual afu */ | |
1540 | writeq_be(reg, &afu->afu_map->global.regs.afu_config); | |
1541 | ||
1542 | /* global port select: select either port */ | |
1543 | if (afu->internal_lun) { | |
1544 | /* only use port 0 */ | |
1545 | writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); | |
1546 | num_ports = NUM_FC_PORTS - 1; | |
1547 | } else { | |
1548 | writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel); | |
1549 | num_ports = NUM_FC_PORTS; | |
1550 | } | |
1551 | ||
1552 | for (i = 0; i < num_ports; i++) { | |
1553 | /* unmask all errors (but they are still masked at AFU) */ | |
1554 | writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]); | |
1555 | /* clear CRC error cnt & set a threshold */ | |
1556 | (void)readq_be(&afu->afu_map->global. | |
1557 | fc_regs[i][FC_CNT_CRCERR / 8]); | |
1558 | writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i] | |
1559 | [FC_CRC_THRESH / 8]); | |
1560 | ||
1561 | /* set WWPNs. If already programmed, wwpn[i] is 0 */ | |
1562 | if (wwpn[i] != 0 && | |
1563 | afu_set_wwpn(afu, i, | |
1564 | &afu->afu_map->global.fc_regs[i][0], | |
1565 | wwpn[i])) { | |
4392ba49 | 1566 | dev_err(dev, "%s: failed to set WWPN on port %d\n", |
c21e0bbf MO |
1567 | __func__, i); |
1568 | rc = -EIO; | |
1569 | goto out; | |
1570 | } | |
1571 | /* Programming WWPN back to back causes additional | |
1572 | * offline/online transitions and a PLOGI | |
1573 | */ | |
1574 | msleep(100); | |
1575 | ||
1576 | } | |
1577 | ||
1578 | /* set up master's own CTX_CAP to allow real mode, host translation */ | |
1579 | /* tbls, afu cmds and read/write GSCSI cmds. */ | |
1580 | /* First, unlock ctx_cap write by reading mbox */ | |
1581 | (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */ | |
1582 | writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | | |
1583 | SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | | |
1584 | SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), | |
1585 | &afu->ctrl_map->ctx_cap); | |
1586 | /* init heartbeat */ | |
1587 | afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); | |
1588 | ||
1589 | out: | |
1590 | return rc; | |
1591 | } | |
1592 | ||
1593 | /** | |
1594 | * start_afu() - initializes and starts the AFU | |
1595 | * @cxlflash: Internal structure associated with the host. | |
1596 | */ | |
1597 | static int start_afu(struct cxlflash_cfg *cfg) | |
1598 | { | |
1599 | struct afu *afu = cfg->afu; | |
1600 | struct afu_cmd *cmd; | |
1601 | ||
1602 | int i = 0; | |
1603 | int rc = 0; | |
1604 | ||
1605 | for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { | |
1606 | cmd = &afu->cmd[i]; | |
1607 | ||
1608 | init_completion(&cmd->cevent); | |
1609 | spin_lock_init(&cmd->slock); | |
1610 | cmd->parent = afu; | |
1611 | } | |
1612 | ||
1613 | init_pcr(cfg); | |
1614 | ||
1615 | /* initialize RRQ pointers */ | |
1616 | afu->hrrq_start = &afu->rrq_entry[0]; | |
1617 | afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1]; | |
1618 | afu->hrrq_curr = afu->hrrq_start; | |
1619 | afu->toggle = 1; | |
1620 | ||
1621 | rc = init_global(cfg); | |
1622 | ||
1623 | pr_debug("%s: returning rc=%d\n", __func__, rc); | |
1624 | return rc; | |
1625 | } | |
1626 | ||
1627 | /** | |
1628 | * init_mc() - create and register as the master context | |
1629 | * @cxlflash: Internal structure associated with the host. | |
1630 | * | |
1631 | * Return: | |
1632 | * 0 on success | |
1633 | * -ENOMEM when unable to obtain a context from CXL services | |
1634 | * A failure value from CXL services. | |
1635 | */ | |
1636 | static int init_mc(struct cxlflash_cfg *cfg) | |
1637 | { | |
1638 | struct cxl_context *ctx; | |
1639 | struct device *dev = &cfg->dev->dev; | |
1640 | struct afu *afu = cfg->afu; | |
1641 | int rc = 0; | |
1642 | enum undo_level level; | |
1643 | ||
1644 | ctx = cxl_get_context(cfg->dev); | |
1645 | if (unlikely(!ctx)) | |
1646 | return -ENOMEM; | |
1647 | cfg->mcctx = ctx; | |
1648 | ||
1649 | /* Set it up as a master with the CXL */ | |
1650 | cxl_set_master(ctx); | |
1651 | ||
1652 | /* During initialization reset the AFU to start from a clean slate */ | |
1653 | rc = cxl_afu_reset(cfg->mcctx); | |
1654 | if (unlikely(rc)) { | |
1655 | dev_err(dev, "%s: initial AFU reset failed rc=%d\n", | |
1656 | __func__, rc); | |
1657 | level = RELEASE_CONTEXT; | |
1658 | goto out; | |
1659 | } | |
1660 | ||
1661 | rc = cxl_allocate_afu_irqs(ctx, 3); | |
1662 | if (unlikely(rc)) { | |
1663 | dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", | |
1664 | __func__, rc); | |
1665 | level = RELEASE_CONTEXT; | |
1666 | goto out; | |
1667 | } | |
1668 | ||
1669 | rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu, | |
1670 | "SISL_MSI_SYNC_ERROR"); | |
1671 | if (unlikely(rc <= 0)) { | |
1672 | dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n", | |
1673 | __func__); | |
1674 | level = FREE_IRQ; | |
1675 | goto out; | |
1676 | } | |
1677 | ||
1678 | rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu, | |
1679 | "SISL_MSI_RRQ_UPDATED"); | |
1680 | if (unlikely(rc <= 0)) { | |
1681 | dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n", | |
1682 | __func__); | |
1683 | level = UNMAP_ONE; | |
1684 | goto out; | |
1685 | } | |
1686 | ||
1687 | rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu, | |
1688 | "SISL_MSI_ASYNC_ERROR"); | |
1689 | if (unlikely(rc <= 0)) { | |
1690 | dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n", | |
1691 | __func__); | |
1692 | level = UNMAP_TWO; | |
1693 | goto out; | |
1694 | } | |
1695 | ||
1696 | rc = 0; | |
1697 | ||
1698 | /* This performs the equivalent of the CXL_IOCTL_START_WORK. | |
1699 | * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process | |
1700 | * element (pe) that is embedded in the context (ctx) | |
1701 | */ | |
1702 | rc = start_context(cfg); | |
1703 | if (unlikely(rc)) { | |
1704 | dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); | |
1705 | level = UNMAP_THREE; | |
1706 | goto out; | |
1707 | } | |
1708 | ret: | |
1709 | pr_debug("%s: returning rc=%d\n", __func__, rc); | |
1710 | return rc; | |
1711 | out: | |
1712 | term_mc(cfg, level); | |
1713 | goto ret; | |
1714 | } | |
1715 | ||
1716 | /** | |
1717 | * init_afu() - setup as master context and start AFU | |
1718 | * @cxlflash: Internal structure associated with the host. | |
1719 | * | |
1720 | * This routine is a higher level of control for configuring the | |
1721 | * AFU on probe and reset paths. | |
1722 | * | |
1723 | * Return: | |
1724 | * 0 on success | |
1725 | * -ENOMEM when unable to map the AFU MMIO space | |
1726 | * A failure value from internal services. | |
1727 | */ | |
1728 | static int init_afu(struct cxlflash_cfg *cfg) | |
1729 | { | |
1730 | u64 reg; | |
1731 | int rc = 0; | |
1732 | struct afu *afu = cfg->afu; | |
1733 | struct device *dev = &cfg->dev->dev; | |
1734 | ||
5cdac81a MO |
1735 | cxl_perst_reloads_same_image(cfg->cxl_afu, true); |
1736 | ||
c21e0bbf MO |
1737 | rc = init_mc(cfg); |
1738 | if (rc) { | |
1739 | dev_err(dev, "%s: call to init_mc failed, rc=%d!\n", | |
1740 | __func__, rc); | |
1741 | goto err1; | |
1742 | } | |
1743 | ||
1744 | /* Map the entire MMIO space of the AFU. | |
1745 | */ | |
1746 | afu->afu_map = cxl_psa_map(cfg->mcctx); | |
1747 | if (!afu->afu_map) { | |
1748 | rc = -ENOMEM; | |
1749 | term_mc(cfg, UNDO_START); | |
1750 | dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__); | |
1751 | goto err1; | |
1752 | } | |
1753 | ||
e5ce067b MO |
1754 | /* No byte reverse on reading afu_version or string will be backwards */ |
1755 | reg = readq(&afu->afu_map->global.regs.afu_version); | |
1756 | memcpy(afu->version, ®, sizeof(reg)); | |
c21e0bbf MO |
1757 | afu->interface_version = |
1758 | readq_be(&afu->afu_map->global.regs.interface_version); | |
e5ce067b MO |
1759 | if ((afu->interface_version + 1) == 0) { |
1760 | pr_err("Back level AFU, please upgrade. AFU version %s " | |
1761 | "interface version 0x%llx\n", afu->version, | |
1762 | afu->interface_version); | |
1763 | rc = -EINVAL; | |
1764 | goto err1; | |
1765 | } else | |
1766 | pr_debug("%s: afu version %s, interface version 0x%llX\n", | |
1767 | __func__, afu->version, afu->interface_version); | |
c21e0bbf MO |
1768 | |
1769 | rc = start_afu(cfg); | |
1770 | if (rc) { | |
1771 | dev_err(dev, "%s: call to start_afu failed, rc=%d!\n", | |
1772 | __func__, rc); | |
1773 | term_mc(cfg, UNDO_START); | |
1774 | cxl_psa_unmap((void *)afu->afu_map); | |
1775 | afu->afu_map = NULL; | |
1776 | goto err1; | |
1777 | } | |
1778 | ||
1779 | afu_err_intr_init(cfg->afu); | |
1780 | atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); | |
1781 | ||
2cb79266 MO |
1782 | /* Restore the LUN mappings */ |
1783 | cxlflash_restore_luntable(cfg); | |
c21e0bbf MO |
1784 | err1: |
1785 | pr_debug("%s: returning rc=%d\n", __func__, rc); | |
1786 | return rc; | |
1787 | } | |
1788 | ||
c21e0bbf MO |
1789 | /** |
1790 | * cxlflash_afu_sync() - builds and sends an AFU sync command | |
1791 | * @afu: AFU associated with the host. | |
1792 | * @ctx_hndl_u: Identifies context requesting sync. | |
1793 | * @res_hndl_u: Identifies resource requesting sync. | |
1794 | * @mode: Type of sync to issue (lightweight, heavyweight, global). | |
1795 | * | |
1796 | * The AFU can only take 1 sync command at a time. This routine enforces this | |
1797 | * limitation by using a mutex to provide exlusive access to the AFU during | |
1798 | * the sync. This design point requires calling threads to not be on interrupt | |
1799 | * context due to the possibility of sleeping during concurrent sync operations. | |
1800 | * | |
5cdac81a MO |
1801 | * AFU sync operations are only necessary and allowed when the device is |
1802 | * operating normally. When not operating normally, sync requests can occur as | |
1803 | * part of cleaning up resources associated with an adapter prior to removal. | |
1804 | * In this scenario, these requests are simply ignored (safe due to the AFU | |
1805 | * going away). | |
1806 | * | |
c21e0bbf MO |
1807 | * Return: |
1808 | * 0 on success | |
1809 | * -1 on failure | |
1810 | */ | |
1811 | int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, | |
1812 | res_hndl_t res_hndl_u, u8 mode) | |
1813 | { | |
5cdac81a | 1814 | struct cxlflash_cfg *cfg = afu->parent; |
4392ba49 | 1815 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
1816 | struct afu_cmd *cmd = NULL; |
1817 | int rc = 0; | |
1818 | int retry_cnt = 0; | |
1819 | static DEFINE_MUTEX(sync_active); | |
1820 | ||
5cdac81a MO |
1821 | if (cfg->state != STATE_NORMAL) { |
1822 | pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state); | |
1823 | return 0; | |
1824 | } | |
1825 | ||
c21e0bbf MO |
1826 | mutex_lock(&sync_active); |
1827 | retry: | |
15305514 | 1828 | cmd = cmd_checkout(afu); |
c21e0bbf MO |
1829 | if (unlikely(!cmd)) { |
1830 | retry_cnt++; | |
1831 | udelay(1000 * retry_cnt); | |
1832 | if (retry_cnt < MC_RETRY_CNT) | |
1833 | goto retry; | |
4392ba49 | 1834 | dev_err(dev, "%s: could not get a free command\n", __func__); |
c21e0bbf MO |
1835 | rc = -1; |
1836 | goto out; | |
1837 | } | |
1838 | ||
1839 | pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); | |
1840 | ||
1841 | memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); | |
1842 | ||
1843 | cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; | |
1844 | cmd->rcb.port_sel = 0x0; /* NA */ | |
1845 | cmd->rcb.lun_id = 0x0; /* NA */ | |
1846 | cmd->rcb.data_len = 0x0; | |
1847 | cmd->rcb.data_ea = 0x0; | |
1848 | cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; | |
1849 | ||
1850 | cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ | |
1851 | cmd->rcb.cdb[1] = mode; | |
1852 | ||
1853 | /* The cdb is aligned, no unaligned accessors required */ | |
1854 | *((u16 *)&cmd->rcb.cdb[2]) = swab16(ctx_hndl_u); | |
1855 | *((u32 *)&cmd->rcb.cdb[4]) = swab32(res_hndl_u); | |
1856 | ||
15305514 | 1857 | rc = send_cmd(afu, cmd); |
c21e0bbf MO |
1858 | if (unlikely(rc)) |
1859 | goto out; | |
1860 | ||
15305514 | 1861 | wait_resp(afu, cmd); |
c21e0bbf MO |
1862 | |
1863 | /* set on timeout */ | |
1864 | if (unlikely((cmd->sa.ioasc != 0) || | |
1865 | (cmd->sa.host_use_b[0] & B_ERROR))) | |
1866 | rc = -1; | |
1867 | out: | |
1868 | mutex_unlock(&sync_active); | |
1869 | if (cmd) | |
15305514 | 1870 | cmd_checkin(cmd); |
c21e0bbf MO |
1871 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
1872 | return rc; | |
1873 | } | |
1874 | ||
1875 | /** | |
15305514 MO |
1876 | * afu_reset() - resets the AFU |
1877 | * @cfg: Internal structure associated with the host. | |
c21e0bbf MO |
1878 | * |
1879 | * Return: | |
1880 | * 0 on success | |
1881 | * A failure value from internal services. | |
1882 | */ | |
15305514 | 1883 | static int afu_reset(struct cxlflash_cfg *cfg) |
c21e0bbf MO |
1884 | { |
1885 | int rc = 0; | |
1886 | /* Stop the context before the reset. Since the context is | |
1887 | * no longer available restart it after the reset is complete | |
1888 | */ | |
1889 | ||
1890 | term_afu(cfg); | |
1891 | ||
1892 | rc = init_afu(cfg); | |
1893 | ||
1894 | pr_debug("%s: returning rc=%d\n", __func__, rc); | |
1895 | return rc; | |
1896 | } | |
1897 | ||
15305514 MO |
1898 | /** |
1899 | * cxlflash_eh_device_reset_handler() - reset a single LUN | |
1900 | * @scp: SCSI command to send. | |
1901 | * | |
1902 | * Return: | |
1903 | * SUCCESS as defined in scsi/scsi.h | |
1904 | * FAILED as defined in scsi/scsi.h | |
1905 | */ | |
1906 | static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) | |
1907 | { | |
1908 | int rc = SUCCESS; | |
1909 | struct Scsi_Host *host = scp->device->host; | |
1910 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; | |
1911 | struct afu *afu = cfg->afu; | |
1912 | int rcr = 0; | |
1913 | ||
1914 | pr_debug("%s: (scp=%p) %d/%d/%d/%llu " | |
1915 | "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, | |
1916 | host->host_no, scp->device->channel, | |
1917 | scp->device->id, scp->device->lun, | |
1918 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), | |
1919 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), | |
1920 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), | |
1921 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); | |
1922 | ||
1923 | switch (cfg->state) { | |
1924 | case STATE_NORMAL: | |
1925 | rcr = send_tmf(afu, scp, TMF_LUN_RESET); | |
1926 | if (unlikely(rcr)) | |
1927 | rc = FAILED; | |
1928 | break; | |
1929 | case STATE_RESET: | |
1930 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); | |
1931 | if (cfg->state == STATE_NORMAL) | |
1932 | break; | |
1933 | /* fall through */ | |
1934 | default: | |
1935 | rc = FAILED; | |
1936 | break; | |
1937 | } | |
1938 | ||
1939 | pr_debug("%s: returning rc=%d\n", __func__, rc); | |
1940 | return rc; | |
1941 | } | |
1942 | ||
1943 | /** | |
1944 | * cxlflash_eh_host_reset_handler() - reset the host adapter | |
1945 | * @scp: SCSI command from stack identifying host. | |
1946 | * | |
1947 | * Return: | |
1948 | * SUCCESS as defined in scsi/scsi.h | |
1949 | * FAILED as defined in scsi/scsi.h | |
1950 | */ | |
1951 | static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) | |
1952 | { | |
1953 | int rc = SUCCESS; | |
1954 | int rcr = 0; | |
1955 | struct Scsi_Host *host = scp->device->host; | |
1956 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; | |
1957 | ||
1958 | pr_debug("%s: (scp=%p) %d/%d/%d/%llu " | |
1959 | "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, | |
1960 | host->host_no, scp->device->channel, | |
1961 | scp->device->id, scp->device->lun, | |
1962 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), | |
1963 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), | |
1964 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), | |
1965 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); | |
1966 | ||
1967 | switch (cfg->state) { | |
1968 | case STATE_NORMAL: | |
1969 | cfg->state = STATE_RESET; | |
1970 | scsi_block_requests(cfg->host); | |
1971 | cxlflash_mark_contexts_error(cfg); | |
1972 | rcr = afu_reset(cfg); | |
1973 | if (rcr) { | |
1974 | rc = FAILED; | |
1975 | cfg->state = STATE_FAILTERM; | |
1976 | } else | |
1977 | cfg->state = STATE_NORMAL; | |
1978 | wake_up_all(&cfg->reset_waitq); | |
1979 | scsi_unblock_requests(cfg->host); | |
1980 | break; | |
1981 | case STATE_RESET: | |
1982 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); | |
1983 | if (cfg->state == STATE_NORMAL) | |
1984 | break; | |
1985 | /* fall through */ | |
1986 | default: | |
1987 | rc = FAILED; | |
1988 | break; | |
1989 | } | |
1990 | ||
1991 | pr_debug("%s: returning rc=%d\n", __func__, rc); | |
1992 | return rc; | |
1993 | } | |
1994 | ||
1995 | /** | |
1996 | * cxlflash_change_queue_depth() - change the queue depth for the device | |
1997 | * @sdev: SCSI device destined for queue depth change. | |
1998 | * @qdepth: Requested queue depth value to set. | |
1999 | * | |
2000 | * The requested queue depth is capped to the maximum supported value. | |
2001 | * | |
2002 | * Return: The actual queue depth set. | |
2003 | */ | |
2004 | static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) | |
2005 | { | |
2006 | ||
2007 | if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) | |
2008 | qdepth = CXLFLASH_MAX_CMDS_PER_LUN; | |
2009 | ||
2010 | scsi_change_queue_depth(sdev, qdepth); | |
2011 | return sdev->queue_depth; | |
2012 | } | |
2013 | ||
2014 | /** | |
2015 | * cxlflash_show_port_status() - queries and presents the current port status | |
e0f01a21 MO |
2016 | * @port: Desired port for status reporting. |
2017 | * @afu: AFU owning the specified port. | |
15305514 MO |
2018 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2019 | * | |
2020 | * Return: The size of the ASCII string returned in @buf. | |
2021 | */ | |
e0f01a21 | 2022 | static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf) |
15305514 | 2023 | { |
15305514 | 2024 | char *disp_status; |
15305514 | 2025 | u64 status; |
e0f01a21 | 2026 | __be64 __iomem *fc_regs; |
15305514 | 2027 | |
e0f01a21 | 2028 | if (port >= NUM_FC_PORTS) |
15305514 MO |
2029 | return 0; |
2030 | ||
2031 | fc_regs = &afu->afu_map->global.fc_regs[port][0]; | |
e0f01a21 MO |
2032 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); |
2033 | status &= FC_MTIP_STATUS_MASK; | |
15305514 MO |
2034 | |
2035 | if (status == FC_MTIP_STATUS_ONLINE) | |
2036 | disp_status = "online"; | |
2037 | else if (status == FC_MTIP_STATUS_OFFLINE) | |
2038 | disp_status = "offline"; | |
2039 | else | |
2040 | disp_status = "unknown"; | |
2041 | ||
e0f01a21 MO |
2042 | return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); |
2043 | } | |
2044 | ||
2045 | /** | |
2046 | * port0_show() - queries and presents the current status of port 0 | |
2047 | * @dev: Generic device associated with the host owning the port. | |
2048 | * @attr: Device attribute representing the port. | |
2049 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2050 | * | |
2051 | * Return: The size of the ASCII string returned in @buf. | |
2052 | */ | |
2053 | static ssize_t port0_show(struct device *dev, | |
2054 | struct device_attribute *attr, | |
2055 | char *buf) | |
2056 | { | |
2057 | struct Scsi_Host *shost = class_to_shost(dev); | |
2058 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; | |
2059 | struct afu *afu = cfg->afu; | |
2060 | ||
2061 | return cxlflash_show_port_status(0, afu, buf); | |
15305514 MO |
2062 | } |
2063 | ||
2064 | /** | |
e0f01a21 MO |
2065 | * port1_show() - queries and presents the current status of port 1 |
2066 | * @dev: Generic device associated with the host owning the port. | |
2067 | * @attr: Device attribute representing the port. | |
2068 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2069 | * | |
2070 | * Return: The size of the ASCII string returned in @buf. | |
2071 | */ | |
2072 | static ssize_t port1_show(struct device *dev, | |
2073 | struct device_attribute *attr, | |
2074 | char *buf) | |
2075 | { | |
2076 | struct Scsi_Host *shost = class_to_shost(dev); | |
2077 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; | |
2078 | struct afu *afu = cfg->afu; | |
2079 | ||
2080 | return cxlflash_show_port_status(1, afu, buf); | |
2081 | } | |
2082 | ||
2083 | /** | |
2084 | * lun_mode_show() - presents the current LUN mode of the host | |
15305514 | 2085 | * @dev: Generic device associated with the host. |
e0f01a21 | 2086 | * @attr: Device attribute representing the LUN mode. |
15305514 MO |
2087 | * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. |
2088 | * | |
2089 | * Return: The size of the ASCII string returned in @buf. | |
2090 | */ | |
e0f01a21 MO |
2091 | static ssize_t lun_mode_show(struct device *dev, |
2092 | struct device_attribute *attr, char *buf) | |
15305514 MO |
2093 | { |
2094 | struct Scsi_Host *shost = class_to_shost(dev); | |
2095 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; | |
2096 | struct afu *afu = cfg->afu; | |
2097 | ||
e0f01a21 | 2098 | return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); |
15305514 MO |
2099 | } |
2100 | ||
2101 | /** | |
e0f01a21 | 2102 | * lun_mode_store() - sets the LUN mode of the host |
15305514 | 2103 | * @dev: Generic device associated with the host. |
e0f01a21 | 2104 | * @attr: Device attribute representing the LUN mode. |
15305514 MO |
2105 | * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. |
2106 | * @count: Length of data resizing in @buf. | |
2107 | * | |
2108 | * The CXL Flash AFU supports a dummy LUN mode where the external | |
2109 | * links and storage are not required. Space on the FPGA is used | |
2110 | * to create 1 or 2 small LUNs which are presented to the system | |
2111 | * as if they were a normal storage device. This feature is useful | |
2112 | * during development and also provides manufacturing with a way | |
2113 | * to test the AFU without an actual device. | |
2114 | * | |
2115 | * 0 = external LUN[s] (default) | |
2116 | * 1 = internal LUN (1 x 64K, 512B blocks, id 0) | |
2117 | * 2 = internal LUN (1 x 64K, 4K blocks, id 0) | |
2118 | * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) | |
2119 | * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) | |
2120 | * | |
2121 | * Return: The size of the ASCII string returned in @buf. | |
2122 | */ | |
e0f01a21 MO |
2123 | static ssize_t lun_mode_store(struct device *dev, |
2124 | struct device_attribute *attr, | |
2125 | const char *buf, size_t count) | |
15305514 MO |
2126 | { |
2127 | struct Scsi_Host *shost = class_to_shost(dev); | |
2128 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; | |
2129 | struct afu *afu = cfg->afu; | |
2130 | int rc; | |
2131 | u32 lun_mode; | |
2132 | ||
2133 | rc = kstrtouint(buf, 10, &lun_mode); | |
2134 | if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { | |
2135 | afu->internal_lun = lun_mode; | |
2136 | afu_reset(cfg); | |
2137 | scsi_scan_host(cfg->host); | |
2138 | } | |
2139 | ||
2140 | return count; | |
2141 | } | |
2142 | ||
2143 | /** | |
e0f01a21 | 2144 | * ioctl_version_show() - presents the current ioctl version of the host |
15305514 MO |
2145 | * @dev: Generic device associated with the host. |
2146 | * @attr: Device attribute representing the ioctl version. | |
2147 | * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. | |
2148 | * | |
2149 | * Return: The size of the ASCII string returned in @buf. | |
2150 | */ | |
e0f01a21 MO |
2151 | static ssize_t ioctl_version_show(struct device *dev, |
2152 | struct device_attribute *attr, char *buf) | |
15305514 MO |
2153 | { |
2154 | return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0); | |
2155 | } | |
2156 | ||
2157 | /** | |
e0f01a21 MO |
2158 | * cxlflash_show_port_lun_table() - queries and presents the port LUN table |
2159 | * @port: Desired port for status reporting. | |
2160 | * @afu: AFU owning the specified port. | |
2161 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2162 | * | |
2163 | * Return: The size of the ASCII string returned in @buf. | |
2164 | */ | |
2165 | static ssize_t cxlflash_show_port_lun_table(u32 port, | |
2166 | struct afu *afu, | |
2167 | char *buf) | |
2168 | { | |
2169 | int i; | |
2170 | ssize_t bytes = 0; | |
2171 | __be64 __iomem *fc_port; | |
2172 | ||
2173 | if (port >= NUM_FC_PORTS) | |
2174 | return 0; | |
2175 | ||
2176 | fc_port = &afu->afu_map->global.fc_port[port][0]; | |
2177 | ||
2178 | for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) | |
2179 | bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, | |
2180 | "%03d: %016llX\n", i, readq_be(&fc_port[i])); | |
2181 | return bytes; | |
2182 | } | |
2183 | ||
2184 | /** | |
2185 | * port0_lun_table_show() - presents the current LUN table of port 0 | |
2186 | * @dev: Generic device associated with the host owning the port. | |
2187 | * @attr: Device attribute representing the port. | |
2188 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2189 | * | |
2190 | * Return: The size of the ASCII string returned in @buf. | |
2191 | */ | |
2192 | static ssize_t port0_lun_table_show(struct device *dev, | |
2193 | struct device_attribute *attr, | |
2194 | char *buf) | |
2195 | { | |
2196 | struct Scsi_Host *shost = class_to_shost(dev); | |
2197 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; | |
2198 | struct afu *afu = cfg->afu; | |
2199 | ||
2200 | return cxlflash_show_port_lun_table(0, afu, buf); | |
2201 | } | |
2202 | ||
2203 | /** | |
2204 | * port1_lun_table_show() - presents the current LUN table of port 1 | |
2205 | * @dev: Generic device associated with the host owning the port. | |
2206 | * @attr: Device attribute representing the port. | |
2207 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2208 | * | |
2209 | * Return: The size of the ASCII string returned in @buf. | |
2210 | */ | |
2211 | static ssize_t port1_lun_table_show(struct device *dev, | |
2212 | struct device_attribute *attr, | |
2213 | char *buf) | |
2214 | { | |
2215 | struct Scsi_Host *shost = class_to_shost(dev); | |
2216 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; | |
2217 | struct afu *afu = cfg->afu; | |
2218 | ||
2219 | return cxlflash_show_port_lun_table(1, afu, buf); | |
2220 | } | |
2221 | ||
2222 | /** | |
2223 | * mode_show() - presents the current mode of the device | |
15305514 MO |
2224 | * @dev: Generic device associated with the device. |
2225 | * @attr: Device attribute representing the device mode. | |
2226 | * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. | |
2227 | * | |
2228 | * Return: The size of the ASCII string returned in @buf. | |
2229 | */ | |
e0f01a21 MO |
2230 | static ssize_t mode_show(struct device *dev, |
2231 | struct device_attribute *attr, char *buf) | |
15305514 MO |
2232 | { |
2233 | struct scsi_device *sdev = to_scsi_device(dev); | |
2234 | ||
e0f01a21 MO |
2235 | return scnprintf(buf, PAGE_SIZE, "%s\n", |
2236 | sdev->hostdata ? "superpipe" : "legacy"); | |
15305514 MO |
2237 | } |
2238 | ||
2239 | /* | |
2240 | * Host attributes | |
2241 | */ | |
e0f01a21 MO |
2242 | static DEVICE_ATTR_RO(port0); |
2243 | static DEVICE_ATTR_RO(port1); | |
2244 | static DEVICE_ATTR_RW(lun_mode); | |
2245 | static DEVICE_ATTR_RO(ioctl_version); | |
2246 | static DEVICE_ATTR_RO(port0_lun_table); | |
2247 | static DEVICE_ATTR_RO(port1_lun_table); | |
15305514 MO |
2248 | |
2249 | static struct device_attribute *cxlflash_host_attrs[] = { | |
2250 | &dev_attr_port0, | |
2251 | &dev_attr_port1, | |
2252 | &dev_attr_lun_mode, | |
2253 | &dev_attr_ioctl_version, | |
e0f01a21 MO |
2254 | &dev_attr_port0_lun_table, |
2255 | &dev_attr_port1_lun_table, | |
15305514 MO |
2256 | NULL |
2257 | }; | |
2258 | ||
2259 | /* | |
2260 | * Device attributes | |
2261 | */ | |
e0f01a21 | 2262 | static DEVICE_ATTR_RO(mode); |
15305514 MO |
2263 | |
2264 | static struct device_attribute *cxlflash_dev_attrs[] = { | |
2265 | &dev_attr_mode, | |
2266 | NULL | |
2267 | }; | |
2268 | ||
2269 | /* | |
2270 | * Host template | |
2271 | */ | |
2272 | static struct scsi_host_template driver_template = { | |
2273 | .module = THIS_MODULE, | |
2274 | .name = CXLFLASH_ADAPTER_NAME, | |
2275 | .info = cxlflash_driver_info, | |
2276 | .ioctl = cxlflash_ioctl, | |
2277 | .proc_name = CXLFLASH_NAME, | |
2278 | .queuecommand = cxlflash_queuecommand, | |
2279 | .eh_device_reset_handler = cxlflash_eh_device_reset_handler, | |
2280 | .eh_host_reset_handler = cxlflash_eh_host_reset_handler, | |
2281 | .change_queue_depth = cxlflash_change_queue_depth, | |
2282 | .cmd_per_lun = 16, | |
2283 | .can_queue = CXLFLASH_MAX_CMDS, | |
2284 | .this_id = -1, | |
2285 | .sg_tablesize = SG_NONE, /* No scatter gather support. */ | |
2286 | .max_sectors = CXLFLASH_MAX_SECTORS, | |
2287 | .use_clustering = ENABLE_CLUSTERING, | |
2288 | .shost_attrs = cxlflash_host_attrs, | |
2289 | .sdev_attrs = cxlflash_dev_attrs, | |
2290 | }; | |
2291 | ||
2292 | /* | |
2293 | * Device dependent values | |
2294 | */ | |
2295 | static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS }; | |
2296 | ||
2297 | /* | |
2298 | * PCI device binding table | |
2299 | */ | |
2300 | static struct pci_device_id cxlflash_pci_table[] = { | |
2301 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, | |
2302 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, | |
2303 | {} | |
2304 | }; | |
2305 | ||
2306 | MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); | |
2307 | ||
c21e0bbf MO |
2308 | /** |
2309 | * cxlflash_worker_thread() - work thread handler for the AFU | |
2310 | * @work: Work structure contained within cxlflash associated with host. | |
2311 | * | |
2312 | * Handles the following events: | |
2313 | * - Link reset which cannot be performed on interrupt context due to | |
2314 | * blocking up to a few seconds | |
2315 | * - Read AFU command room | |
ef51074a | 2316 | * - Rescan the host |
c21e0bbf MO |
2317 | */ |
2318 | static void cxlflash_worker_thread(struct work_struct *work) | |
2319 | { | |
5cdac81a MO |
2320 | struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, |
2321 | work_q); | |
c21e0bbf | 2322 | struct afu *afu = cfg->afu; |
4392ba49 | 2323 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
2324 | int port; |
2325 | ulong lock_flags; | |
2326 | ||
5cdac81a MO |
2327 | /* Avoid MMIO if the device has failed */ |
2328 | ||
2329 | if (cfg->state != STATE_NORMAL) | |
2330 | return; | |
2331 | ||
c21e0bbf MO |
2332 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); |
2333 | ||
2334 | if (cfg->lr_state == LINK_RESET_REQUIRED) { | |
2335 | port = cfg->lr_port; | |
2336 | if (port < 0) | |
4392ba49 MO |
2337 | dev_err(dev, "%s: invalid port index %d\n", |
2338 | __func__, port); | |
c21e0bbf MO |
2339 | else { |
2340 | spin_unlock_irqrestore(cfg->host->host_lock, | |
2341 | lock_flags); | |
2342 | ||
2343 | /* The reset can block... */ | |
2344 | afu_link_reset(afu, port, | |
2345 | &afu->afu_map-> | |
2346 | global.fc_regs[port][0]); | |
2347 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); | |
2348 | } | |
2349 | ||
2350 | cfg->lr_state = LINK_RESET_COMPLETE; | |
2351 | } | |
2352 | ||
2353 | if (afu->read_room) { | |
2354 | atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); | |
2355 | afu->read_room = false; | |
2356 | } | |
2357 | ||
2358 | spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); | |
ef51074a MO |
2359 | |
2360 | if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) | |
2361 | scsi_scan_host(cfg->host); | |
c21e0bbf MO |
2362 | } |
2363 | ||
2364 | /** | |
2365 | * cxlflash_probe() - PCI entry point to add host | |
2366 | * @pdev: PCI device associated with the host. | |
2367 | * @dev_id: PCI device id associated with device. | |
2368 | * | |
2369 | * Return: 0 on success / non-zero on failure | |
2370 | */ | |
2371 | static int cxlflash_probe(struct pci_dev *pdev, | |
2372 | const struct pci_device_id *dev_id) | |
2373 | { | |
2374 | struct Scsi_Host *host; | |
2375 | struct cxlflash_cfg *cfg = NULL; | |
2376 | struct device *phys_dev; | |
2377 | struct dev_dependent_vals *ddv; | |
2378 | int rc = 0; | |
2379 | ||
2380 | dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", | |
2381 | __func__, pdev->irq); | |
2382 | ||
2383 | ddv = (struct dev_dependent_vals *)dev_id->driver_data; | |
2384 | driver_template.max_sectors = ddv->max_sectors; | |
2385 | ||
2386 | host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); | |
2387 | if (!host) { | |
2388 | dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n", | |
2389 | __func__); | |
2390 | rc = -ENOMEM; | |
2391 | goto out; | |
2392 | } | |
2393 | ||
2394 | host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; | |
2395 | host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; | |
2396 | host->max_channel = NUM_FC_PORTS - 1; | |
2397 | host->unique_id = host->host_no; | |
2398 | host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; | |
2399 | ||
2400 | cfg = (struct cxlflash_cfg *)host->hostdata; | |
2401 | cfg->host = host; | |
2402 | rc = alloc_mem(cfg); | |
2403 | if (rc) { | |
2404 | dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n", | |
2405 | __func__); | |
2406 | rc = -ENOMEM; | |
8b5b1e87 | 2407 | scsi_host_put(cfg->host); |
c21e0bbf MO |
2408 | goto out; |
2409 | } | |
2410 | ||
2411 | cfg->init_state = INIT_STATE_NONE; | |
2412 | cfg->dev = pdev; | |
2cb79266 MO |
2413 | |
2414 | /* | |
2415 | * The promoted LUNs move to the top of the LUN table. The rest stay | |
2416 | * on the bottom half. The bottom half grows from the end | |
2417 | * (index = 255), whereas the top half grows from the beginning | |
2418 | * (index = 0). | |
2419 | */ | |
2420 | cfg->promote_lun_index = 0; | |
2421 | cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1; | |
2422 | cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1; | |
2423 | ||
c21e0bbf MO |
2424 | cfg->dev_id = (struct pci_device_id *)dev_id; |
2425 | cfg->mcctx = NULL; | |
c21e0bbf MO |
2426 | |
2427 | init_waitqueue_head(&cfg->tmf_waitq); | |
439e85c1 | 2428 | init_waitqueue_head(&cfg->reset_waitq); |
c21e0bbf MO |
2429 | |
2430 | INIT_WORK(&cfg->work_q, cxlflash_worker_thread); | |
2431 | cfg->lr_state = LINK_RESET_INVALID; | |
2432 | cfg->lr_port = -1; | |
65be2c79 MO |
2433 | mutex_init(&cfg->ctx_tbl_list_mutex); |
2434 | mutex_init(&cfg->ctx_recovery_mutex); | |
0a27ae51 | 2435 | init_rwsem(&cfg->ioctl_rwsem); |
65be2c79 MO |
2436 | INIT_LIST_HEAD(&cfg->ctx_err_recovery); |
2437 | INIT_LIST_HEAD(&cfg->lluns); | |
c21e0bbf MO |
2438 | |
2439 | pci_set_drvdata(pdev, cfg); | |
2440 | ||
2441 | /* Use the special service provided to look up the physical | |
2442 | * PCI device, since we are called on the probe of the virtual | |
2443 | * PCI host bus (vphb) | |
2444 | */ | |
2445 | phys_dev = cxl_get_phys_dev(pdev); | |
2446 | if (!dev_is_pci(phys_dev)) { | |
4392ba49 | 2447 | dev_err(&pdev->dev, "%s: not a pci dev\n", __func__); |
c21e0bbf MO |
2448 | rc = -ENODEV; |
2449 | goto out_remove; | |
2450 | } | |
2451 | cfg->parent_dev = to_pci_dev(phys_dev); | |
2452 | ||
2453 | cfg->cxl_afu = cxl_pci_to_afu(pdev); | |
2454 | ||
2455 | rc = init_pci(cfg); | |
2456 | if (rc) { | |
2457 | dev_err(&pdev->dev, "%s: call to init_pci " | |
2458 | "failed rc=%d!\n", __func__, rc); | |
2459 | goto out_remove; | |
2460 | } | |
2461 | cfg->init_state = INIT_STATE_PCI; | |
2462 | ||
2463 | rc = init_afu(cfg); | |
2464 | if (rc) { | |
2465 | dev_err(&pdev->dev, "%s: call to init_afu " | |
2466 | "failed rc=%d!\n", __func__, rc); | |
2467 | goto out_remove; | |
2468 | } | |
2469 | cfg->init_state = INIT_STATE_AFU; | |
2470 | ||
2471 | ||
2472 | rc = init_scsi(cfg); | |
2473 | if (rc) { | |
2474 | dev_err(&pdev->dev, "%s: call to init_scsi " | |
2475 | "failed rc=%d!\n", __func__, rc); | |
2476 | goto out_remove; | |
2477 | } | |
2478 | cfg->init_state = INIT_STATE_SCSI; | |
2479 | ||
2480 | out: | |
2481 | pr_debug("%s: returning rc=%d\n", __func__, rc); | |
2482 | return rc; | |
2483 | ||
2484 | out_remove: | |
2485 | cxlflash_remove(pdev); | |
2486 | goto out; | |
2487 | } | |
2488 | ||
0a27ae51 MO |
2489 | /** |
2490 | * drain_ioctls() - wait until all currently executing ioctls have completed | |
2491 | * @cfg: Internal structure associated with the host. | |
2492 | * | |
2493 | * Obtain write access to read/write semaphore that wraps ioctl | |
2494 | * handling to 'drain' ioctls currently executing. | |
2495 | */ | |
2496 | static void drain_ioctls(struct cxlflash_cfg *cfg) | |
2497 | { | |
2498 | down_write(&cfg->ioctl_rwsem); | |
2499 | up_write(&cfg->ioctl_rwsem); | |
2500 | } | |
2501 | ||
5cdac81a MO |
2502 | /** |
2503 | * cxlflash_pci_error_detected() - called when a PCI error is detected | |
2504 | * @pdev: PCI device struct. | |
2505 | * @state: PCI channel state. | |
2506 | * | |
2507 | * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT | |
2508 | */ | |
2509 | static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, | |
2510 | pci_channel_state_t state) | |
2511 | { | |
65be2c79 | 2512 | int rc = 0; |
5cdac81a MO |
2513 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
2514 | struct device *dev = &cfg->dev->dev; | |
2515 | ||
2516 | dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); | |
2517 | ||
2518 | switch (state) { | |
2519 | case pci_channel_io_frozen: | |
439e85c1 | 2520 | cfg->state = STATE_RESET; |
5cdac81a | 2521 | scsi_block_requests(cfg->host); |
0a27ae51 | 2522 | drain_ioctls(cfg); |
65be2c79 MO |
2523 | rc = cxlflash_mark_contexts_error(cfg); |
2524 | if (unlikely(rc)) | |
2525 | dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", | |
2526 | __func__, rc); | |
5cdac81a MO |
2527 | term_mc(cfg, UNDO_START); |
2528 | stop_afu(cfg); | |
5cdac81a MO |
2529 | return PCI_ERS_RESULT_NEED_RESET; |
2530 | case pci_channel_io_perm_failure: | |
2531 | cfg->state = STATE_FAILTERM; | |
439e85c1 | 2532 | wake_up_all(&cfg->reset_waitq); |
5cdac81a MO |
2533 | scsi_unblock_requests(cfg->host); |
2534 | return PCI_ERS_RESULT_DISCONNECT; | |
2535 | default: | |
2536 | break; | |
2537 | } | |
2538 | return PCI_ERS_RESULT_NEED_RESET; | |
2539 | } | |
2540 | ||
2541 | /** | |
2542 | * cxlflash_pci_slot_reset() - called when PCI slot has been reset | |
2543 | * @pdev: PCI device struct. | |
2544 | * | |
2545 | * This routine is called by the pci error recovery code after the PCI | |
2546 | * slot has been reset, just before we should resume normal operations. | |
2547 | * | |
2548 | * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT | |
2549 | */ | |
2550 | static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) | |
2551 | { | |
2552 | int rc = 0; | |
2553 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); | |
2554 | struct device *dev = &cfg->dev->dev; | |
2555 | ||
2556 | dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); | |
2557 | ||
2558 | rc = init_afu(cfg); | |
2559 | if (unlikely(rc)) { | |
2560 | dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc); | |
2561 | return PCI_ERS_RESULT_DISCONNECT; | |
2562 | } | |
2563 | ||
2564 | return PCI_ERS_RESULT_RECOVERED; | |
2565 | } | |
2566 | ||
2567 | /** | |
2568 | * cxlflash_pci_resume() - called when normal operation can resume | |
2569 | * @pdev: PCI device struct | |
2570 | */ | |
2571 | static void cxlflash_pci_resume(struct pci_dev *pdev) | |
2572 | { | |
2573 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); | |
2574 | struct device *dev = &cfg->dev->dev; | |
2575 | ||
2576 | dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); | |
2577 | ||
2578 | cfg->state = STATE_NORMAL; | |
439e85c1 | 2579 | wake_up_all(&cfg->reset_waitq); |
5cdac81a MO |
2580 | scsi_unblock_requests(cfg->host); |
2581 | } | |
2582 | ||
2583 | static const struct pci_error_handlers cxlflash_err_handler = { | |
2584 | .error_detected = cxlflash_pci_error_detected, | |
2585 | .slot_reset = cxlflash_pci_slot_reset, | |
2586 | .resume = cxlflash_pci_resume, | |
2587 | }; | |
2588 | ||
c21e0bbf MO |
2589 | /* |
2590 | * PCI device structure | |
2591 | */ | |
2592 | static struct pci_driver cxlflash_driver = { | |
2593 | .name = CXLFLASH_NAME, | |
2594 | .id_table = cxlflash_pci_table, | |
2595 | .probe = cxlflash_probe, | |
2596 | .remove = cxlflash_remove, | |
5cdac81a | 2597 | .err_handler = &cxlflash_err_handler, |
c21e0bbf MO |
2598 | }; |
2599 | ||
2600 | /** | |
2601 | * init_cxlflash() - module entry point | |
2602 | * | |
2603 | * Return: 0 on success / non-zero on failure | |
2604 | */ | |
2605 | static int __init init_cxlflash(void) | |
2606 | { | |
2607 | pr_info("%s: IBM Power CXL Flash Adapter: %s\n", | |
2608 | __func__, CXLFLASH_DRIVER_DATE); | |
2609 | ||
65be2c79 MO |
2610 | cxlflash_list_init(); |
2611 | ||
c21e0bbf MO |
2612 | return pci_register_driver(&cxlflash_driver); |
2613 | } | |
2614 | ||
2615 | /** | |
2616 | * exit_cxlflash() - module exit point | |
2617 | */ | |
2618 | static void __exit exit_cxlflash(void) | |
2619 | { | |
65be2c79 MO |
2620 | cxlflash_term_global_luns(); |
2621 | cxlflash_free_errpage(); | |
2622 | ||
c21e0bbf MO |
2623 | pci_unregister_driver(&cxlflash_driver); |
2624 | } | |
2625 | ||
2626 | module_init(init_cxlflash); | |
2627 | module_exit(exit_cxlflash); |