scsi: qla2xxx: Complain if a soft reset fails
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_tmpl.c
CommitLineData
f73cb695
CD
1/*
2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
f73cb695
CD
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8#include "qla_tmpl.h"
9
ce0366df
JC
10#define ISPREG(vha) (&(vha)->hw->iobase->isp24)
11#define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
12#define IOBASE(vha) IOBAR(ISPREG(vha))
f73cb695
CD
13
14static inline void
15qla27xx_insert16(uint16_t value, void *buf, ulong *len)
16{
17 if (buf) {
18 buf += *len;
19 *(__le16 *)buf = cpu_to_le16(value);
20 }
21 *len += sizeof(value);
22}
23
24static inline void
25qla27xx_insert32(uint32_t value, void *buf, ulong *len)
26{
27 if (buf) {
28 buf += *len;
29 *(__le32 *)buf = cpu_to_le32(value);
30 }
31 *len += sizeof(value);
32}
33
34static inline void
35qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
36{
ce9b9b08 37 if (buf && mem && size) {
f73cb695 38 buf += *len;
ce9b9b08 39 memcpy(buf, mem, size);
f73cb695
CD
40 }
41 *len += size;
42}
43
44static inline void
8dfa4b5a 45qla27xx_read8(void __iomem *window, void *buf, ulong *len)
f73cb695
CD
46{
47 uint8_t value = ~0;
48
49 if (buf) {
8dfa4b5a 50 value = RD_REG_BYTE(window);
f73cb695
CD
51 }
52 qla27xx_insert32(value, buf, len);
53}
54
55static inline void
8dfa4b5a 56qla27xx_read16(void __iomem *window, void *buf, ulong *len)
f73cb695
CD
57{
58 uint16_t value = ~0;
59
60 if (buf) {
8dfa4b5a 61 value = RD_REG_WORD(window);
f73cb695
CD
62 }
63 qla27xx_insert32(value, buf, len);
64}
65
66static inline void
8dfa4b5a 67qla27xx_read32(void __iomem *window, void *buf, ulong *len)
f73cb695
CD
68{
69 uint32_t value = ~0;
70
71 if (buf) {
8dfa4b5a 72 value = RD_REG_DWORD(window);
f73cb695
CD
73 }
74 qla27xx_insert32(value, buf, len);
75}
76
8dfa4b5a 77static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
f73cb695
CD
78{
79 return
80 (width == 1) ? qla27xx_read8 :
81 (width == 2) ? qla27xx_read16 :
82 qla27xx_read32;
83}
84
85static inline void
86qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
87 uint offset, void *buf, ulong *len)
88{
8dfa4b5a 89 void __iomem *window = (void __iomem *)reg + offset;
f73cb695 90
f73cb695
CD
91 qla27xx_read32(window, buf, len);
92}
93
94static inline void
95qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
96 uint offset, uint32_t data, void *buf)
97{
f73cb695 98 if (buf) {
2ff01671
JC
99 void __iomem *window = (void __iomem *)reg + offset;
100
f73cb695
CD
101 WRT_REG_DWORD(window, data);
102 }
103}
104
105static inline void
106qla27xx_read_window(__iomem struct device_reg_24xx *reg,
c0496401 107 uint32_t addr, uint offset, uint count, uint width, void *buf,
f73cb695
CD
108 ulong *len)
109{
8dfa4b5a
BVA
110 void __iomem *window = (void __iomem *)reg + offset;
111 void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
f73cb695 112
ce0366df 113 qla27xx_write_reg(reg, IOBAR(reg), addr, buf);
f73cb695 114 while (count--) {
c0496401 115 qla27xx_insert32(addr, buf, len);
f73cb695
CD
116 readn(window, buf, len);
117 window += width;
c0496401 118 addr++;
f73cb695
CD
119 }
120}
121
122static inline void
123qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
124{
125 if (buf)
126 ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
127}
128
64f61d99
JC
129static inline struct qla27xx_fwdt_entry *
130qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
131{
f8f97b0c 132 return (void *)ent + le32_to_cpu(ent->hdr.size);
64f61d99
JC
133}
134
135static struct qla27xx_fwdt_entry *
f73cb695
CD
136qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
137 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
138{
139 ql_dbg(ql_dbg_misc, vha, 0xd100,
140 "%s: nop [%lx]\n", __func__, *len);
141 qla27xx_skip_entry(ent, buf);
142
64f61d99 143 return qla27xx_next_entry(ent);
f73cb695
CD
144}
145
64f61d99 146static struct qla27xx_fwdt_entry *
f73cb695
CD
147qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
148 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
149{
150 ql_dbg(ql_dbg_misc, vha, 0xd1ff,
151 "%s: end [%lx]\n", __func__, *len);
152 qla27xx_skip_entry(ent, buf);
153
154 /* terminate */
64f61d99 155 return NULL;
f73cb695
CD
156}
157
64f61d99 158static struct qla27xx_fwdt_entry *
f73cb695
CD
159qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
160 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
161{
f8f97b0c
JC
162 ulong addr = le32_to_cpu(ent->t256.base_addr);
163 uint offset = ent->t256.pci_offset;
164 ulong count = le16_to_cpu(ent->t256.reg_count);
165 uint width = ent->t256.reg_width;
f73cb695
CD
166
167 ql_dbg(ql_dbg_misc, vha, 0xd200,
168 "%s: rdio t1 [%lx]\n", __func__, *len);
ce0366df 169 qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
f73cb695 170
64f61d99 171 return qla27xx_next_entry(ent);
f73cb695
CD
172}
173
64f61d99 174static struct qla27xx_fwdt_entry *
f73cb695
CD
175qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
176 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
177{
f8f97b0c
JC
178 ulong addr = le32_to_cpu(ent->t257.base_addr);
179 uint offset = ent->t257.pci_offset;
180 ulong data = le32_to_cpu(ent->t257.write_data);
f73cb695
CD
181
182 ql_dbg(ql_dbg_misc, vha, 0xd201,
183 "%s: wrio t1 [%lx]\n", __func__, *len);
ce0366df
JC
184 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
185 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
f73cb695 186
64f61d99 187 return qla27xx_next_entry(ent);
f73cb695
CD
188}
189
64f61d99 190static struct qla27xx_fwdt_entry *
f73cb695
CD
191qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
192 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
193{
f8f97b0c
JC
194 uint banksel = ent->t258.banksel_offset;
195 ulong bank = le32_to_cpu(ent->t258.bank);
196 ulong addr = le32_to_cpu(ent->t258.base_addr);
197 uint offset = ent->t258.pci_offset;
198 uint count = le16_to_cpu(ent->t258.reg_count);
199 uint width = ent->t258.reg_width;
f73cb695
CD
200
201 ql_dbg(ql_dbg_misc, vha, 0xd202,
202 "%s: rdio t2 [%lx]\n", __func__, *len);
ce0366df
JC
203 qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
204 qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
f73cb695 205
64f61d99 206 return qla27xx_next_entry(ent);
f73cb695
CD
207}
208
64f61d99 209static struct qla27xx_fwdt_entry *
f73cb695
CD
210qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
211 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
212{
f8f97b0c
JC
213 ulong addr = le32_to_cpu(ent->t259.base_addr);
214 uint banksel = ent->t259.banksel_offset;
215 ulong bank = le32_to_cpu(ent->t259.bank);
216 uint offset = ent->t259.pci_offset;
217 ulong data = le32_to_cpu(ent->t259.write_data);
f73cb695
CD
218
219 ql_dbg(ql_dbg_misc, vha, 0xd203,
220 "%s: wrio t2 [%lx]\n", __func__, *len);
ce0366df
JC
221 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
222 qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
223 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
f73cb695 224
64f61d99 225 return qla27xx_next_entry(ent);
f73cb695
CD
226}
227
64f61d99 228static struct qla27xx_fwdt_entry *
f73cb695
CD
229qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
230 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
231{
f8f97b0c 232 uint offset = ent->t260.pci_offset;
f73cb695
CD
233
234 ql_dbg(ql_dbg_misc, vha, 0xd204,
235 "%s: rdpci [%lx]\n", __func__, *len);
f8f97b0c 236 qla27xx_insert32(offset, buf, len);
ce0366df 237 qla27xx_read_reg(ISPREG(vha), offset, buf, len);
f73cb695 238
64f61d99 239 return qla27xx_next_entry(ent);
f73cb695
CD
240}
241
64f61d99 242static struct qla27xx_fwdt_entry *
f73cb695
CD
243qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
244 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
245{
f8f97b0c
JC
246 uint offset = ent->t261.pci_offset;
247 ulong data = le32_to_cpu(ent->t261.write_data);
f73cb695
CD
248
249 ql_dbg(ql_dbg_misc, vha, 0xd205,
250 "%s: wrpci [%lx]\n", __func__, *len);
ce0366df 251 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
f73cb695 252
64f61d99 253 return qla27xx_next_entry(ent);
f73cb695
CD
254}
255
64f61d99 256static struct qla27xx_fwdt_entry *
f73cb695
CD
257qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
258 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
259{
f8f97b0c
JC
260 uint area = ent->t262.ram_area;
261 ulong start = le32_to_cpu(ent->t262.start_addr);
262 ulong end = le32_to_cpu(ent->t262.end_addr);
f73cb695 263 ulong dwords;
f73cb695
CD
264
265 ql_dbg(ql_dbg_misc, vha, 0xd206,
266 "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
f73cb695 267
f8f97b0c 268 if (area == T262_RAM_AREA_CRITICAL_RAM) {
f73cb695 269 ;
f8f97b0c 270 } else if (area == T262_RAM_AREA_EXTERNAL_RAM) {
f73cb695
CD
271 end = vha->hw->fw_memory_size;
272 if (buf)
f8f97b0c
JC
273 ent->t262.end_addr = cpu_to_le32(end);
274 } else if (area == T262_RAM_AREA_SHARED_RAM) {
f73cb695
CD
275 start = vha->hw->fw_shared_ram_start;
276 end = vha->hw->fw_shared_ram_end;
277 if (buf) {
f8f97b0c
JC
278 ent->t262.start_addr = cpu_to_le32(start);
279 ent->t262.end_addr = cpu_to_le32(end);
f73cb695 280 }
f8f97b0c 281 } else if (area == T262_RAM_AREA_DDR_RAM) {
ad1ef177
JC
282 start = vha->hw->fw_ddr_ram_start;
283 end = vha->hw->fw_ddr_ram_end;
284 if (buf) {
f8f97b0c
JC
285 ent->t262.start_addr = cpu_to_le32(start);
286 ent->t262.end_addr = cpu_to_le32(end);
ad1ef177 287 }
f8f97b0c 288 } else if (area == T262_RAM_AREA_MISC) {
64f61d99 289 if (buf) {
f8f97b0c
JC
290 ent->t262.start_addr = cpu_to_le32(start);
291 ent->t262.end_addr = cpu_to_le32(end);
64f61d99 292 }
f73cb695
CD
293 } else {
294 ql_dbg(ql_dbg_misc, vha, 0xd022,
f8f97b0c 295 "%s: unknown area %x\n", __func__, area);
f73cb695
CD
296 qla27xx_skip_entry(ent, buf);
297 goto done;
298 }
299
ce6c668b 300 if (end < start || start == 0 || end == 0) {
f73cb695 301 ql_dbg(ql_dbg_misc, vha, 0xd023,
f8f97b0c
JC
302 "%s: unusable range (start=%lx end=%lx)\n",
303 __func__, start, end);
f73cb695
CD
304 qla27xx_skip_entry(ent, buf);
305 goto done;
306 }
307
308 dwords = end - start + 1;
309 if (buf) {
f73cb695
CD
310 buf += *len;
311 qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
312 }
313 *len += dwords * sizeof(uint32_t);
314done:
64f61d99 315 return qla27xx_next_entry(ent);
f73cb695
CD
316}
317
64f61d99 318static struct qla27xx_fwdt_entry *
f73cb695
CD
319qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
320 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
321{
f8f97b0c 322 uint type = ent->t263.queue_type;
f73cb695
CD
323 uint count = 0;
324 uint i;
325 uint length;
326
f8f97b0c
JC
327 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207,
328 "%s: getq(%x) [%lx]\n", __func__, type, *len);
329 if (type == T263_QUEUE_TYPE_REQ) {
f73cb695
CD
330 for (i = 0; i < vha->hw->max_req_queues; i++) {
331 struct req_que *req = vha->hw->req_q_map[i];
cb43285f 332
f73cb695
CD
333 if (req || !buf) {
334 length = req ?
335 req->length : REQUEST_ENTRY_CNT_24XX;
336 qla27xx_insert16(i, buf, len);
337 qla27xx_insert16(length, buf, len);
338 qla27xx_insertbuf(req ? req->ring : NULL,
339 length * sizeof(*req->ring), buf, len);
340 count++;
341 }
342 }
f8f97b0c 343 } else if (type == T263_QUEUE_TYPE_RSP) {
f73cb695
CD
344 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
345 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
cb43285f 346
f73cb695
CD
347 if (rsp || !buf) {
348 length = rsp ?
349 rsp->length : RESPONSE_ENTRY_CNT_MQ;
350 qla27xx_insert16(i, buf, len);
351 qla27xx_insert16(length, buf, len);
352 qla27xx_insertbuf(rsp ? rsp->ring : NULL,
353 length * sizeof(*rsp->ring), buf, len);
354 count++;
355 }
356 }
1cbb9156
HM
357 } else if (QLA_TGT_MODE_ENABLED() &&
358 ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
359 struct qla_hw_data *ha = vha->hw;
360 struct atio *atr = ha->tgt.atio_ring;
361
362 if (atr || !buf) {
363 length = ha->tgt.atio_q_length;
364 qla27xx_insert16(0, buf, len);
365 qla27xx_insert16(length, buf, len);
366 qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
367 count++;
368 }
f73cb695
CD
369 } else {
370 ql_dbg(ql_dbg_misc, vha, 0xd026,
f8f97b0c 371 "%s: unknown queue %x\n", __func__, type);
f73cb695 372 qla27xx_skip_entry(ent, buf);
f73cb695
CD
373 }
374
998722d1
JC
375 if (buf) {
376 if (count)
377 ent->t263.num_queues = count;
378 else
379 qla27xx_skip_entry(ent, buf);
380 }
c0496401 381
64f61d99 382 return qla27xx_next_entry(ent);
f73cb695
CD
383}
384
64f61d99 385static struct qla27xx_fwdt_entry *
f73cb695
CD
386qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
387 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
388{
389 ql_dbg(ql_dbg_misc, vha, 0xd208,
390 "%s: getfce [%lx]\n", __func__, *len);
391 if (vha->hw->fce) {
392 if (buf) {
393 ent->t264.fce_trace_size = FCE_SIZE;
394 ent->t264.write_pointer = vha->hw->fce_wr;
395 ent->t264.base_pointer = vha->hw->fce_dma;
396 ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
397 ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
398 ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
399 ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
400 ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
401 ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
402 }
403 qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
404 } else {
405 ql_dbg(ql_dbg_misc, vha, 0xd027,
406 "%s: missing fce\n", __func__);
407 qla27xx_skip_entry(ent, buf);
408 }
409
64f61d99 410 return qla27xx_next_entry(ent);
f73cb695
CD
411}
412
64f61d99 413static struct qla27xx_fwdt_entry *
f73cb695
CD
414qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
415 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
416{
ce0366df 417 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209,
f73cb695
CD
418 "%s: pause risc [%lx]\n", __func__, *len);
419 if (buf)
ce0366df 420 qla24xx_pause_risc(ISPREG(vha), vha->hw);
f73cb695 421
64f61d99 422 return qla27xx_next_entry(ent);
f73cb695
CD
423}
424
64f61d99 425static struct qla27xx_fwdt_entry *
f73cb695
CD
426qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
427 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
428{
429 ql_dbg(ql_dbg_misc, vha, 0xd20a,
430 "%s: reset risc [%lx]\n", __func__, *len);
431 if (buf)
fb325097 432 WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
f73cb695 433
64f61d99 434 return qla27xx_next_entry(ent);
f73cb695
CD
435}
436
64f61d99 437static struct qla27xx_fwdt_entry *
f73cb695
CD
438qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
439 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
440{
f8f97b0c
JC
441 uint offset = ent->t267.pci_offset;
442 ulong data = le32_to_cpu(ent->t267.data);
f73cb695
CD
443
444 ql_dbg(ql_dbg_misc, vha, 0xd20b,
445 "%s: dis intr [%lx]\n", __func__, *len);
ce0366df 446 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
f73cb695 447
64f61d99 448 return qla27xx_next_entry(ent);
f73cb695
CD
449}
450
64f61d99 451static struct qla27xx_fwdt_entry *
f73cb695
CD
452qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
453 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
454{
455 ql_dbg(ql_dbg_misc, vha, 0xd20c,
456 "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
ad0a0b01
QT
457 switch (ent->t268.buf_type) {
458 case T268_BUF_TYPE_EXTD_TRACE:
f73cb695
CD
459 if (vha->hw->eft) {
460 if (buf) {
461 ent->t268.buf_size = EFT_SIZE;
462 ent->t268.start_addr = vha->hw->eft_dma;
463 }
464 qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
465 } else {
466 ql_dbg(ql_dbg_misc, vha, 0xd028,
467 "%s: missing eft\n", __func__);
468 qla27xx_skip_entry(ent, buf);
469 }
ad0a0b01
QT
470 break;
471 case T268_BUF_TYPE_EXCH_BUFOFF:
472 if (vha->hw->exchoffld_buf) {
473 if (buf) {
474 ent->t268.buf_size = vha->hw->exchoffld_size;
475 ent->t268.start_addr =
476 vha->hw->exchoffld_buf_dma;
477 }
478 qla27xx_insertbuf(vha->hw->exchoffld_buf,
479 vha->hw->exchoffld_size, buf, len);
480 } else {
481 ql_dbg(ql_dbg_misc, vha, 0xd028,
482 "%s: missing exch offld\n", __func__);
483 qla27xx_skip_entry(ent, buf);
484 }
485 break;
486 case T268_BUF_TYPE_EXTD_LOGIN:
487 if (vha->hw->exlogin_buf) {
488 if (buf) {
489 ent->t268.buf_size = vha->hw->exlogin_size;
490 ent->t268.start_addr =
491 vha->hw->exlogin_buf_dma;
492 }
493 qla27xx_insertbuf(vha->hw->exlogin_buf,
494 vha->hw->exlogin_size, buf, len);
495 } else {
496 ql_dbg(ql_dbg_misc, vha, 0xd028,
497 "%s: missing ext login\n", __func__);
498 qla27xx_skip_entry(ent, buf);
499 }
500 break;
501
3f915271
QT
502 case T268_BUF_TYPE_REQ_MIRROR:
503 case T268_BUF_TYPE_RSP_MIRROR:
504 /*
505 * Mirror pointers are not implemented in the
506 * driver, instead shadow pointers are used by
507 * the drier. Skip these entries.
508 */
509 qla27xx_skip_entry(ent, buf);
510 break;
ad0a0b01
QT
511 default:
512 ql_dbg(ql_dbg_async, vha, 0xd02b,
349c390f 513 "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
f73cb695 514 qla27xx_skip_entry(ent, buf);
ad0a0b01 515 break;
f73cb695
CD
516 }
517
64f61d99 518 return qla27xx_next_entry(ent);
f73cb695
CD
519}
520
64f61d99 521static struct qla27xx_fwdt_entry *
f73cb695
CD
522qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
523 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
524{
525 ql_dbg(ql_dbg_misc, vha, 0xd20d,
526 "%s: scratch [%lx]\n", __func__, *len);
527 qla27xx_insert32(0xaaaaaaaa, buf, len);
528 qla27xx_insert32(0xbbbbbbbb, buf, len);
529 qla27xx_insert32(0xcccccccc, buf, len);
530 qla27xx_insert32(0xdddddddd, buf, len);
531 qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
532 if (buf)
533 ent->t269.scratch_size = 5 * sizeof(uint32_t);
534
64f61d99 535 return qla27xx_next_entry(ent);
f73cb695
CD
536}
537
64f61d99 538static struct qla27xx_fwdt_entry *
f73cb695
CD
539qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
540 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
541{
f8f97b0c
JC
542 ulong addr = le32_to_cpu(ent->t270.addr);
543 ulong dwords = le32_to_cpu(ent->t270.count);
f73cb695
CD
544
545 ql_dbg(ql_dbg_misc, vha, 0xd20e,
546 "%s: rdremreg [%lx]\n", __func__, *len);
ce0366df 547 qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, 0x40, buf);
f73cb695 548 while (dwords--) {
ce0366df 549 qla27xx_write_reg(ISPREG(vha), 0xc0, addr|0x80000000, buf);
f73cb695 550 qla27xx_insert32(addr, buf, len);
ce0366df 551 qla27xx_read_reg(ISPREG(vha), 0xc4, buf, len);
fbce4f49 552 addr += sizeof(uint32_t);
f73cb695
CD
553 }
554
64f61d99 555 return qla27xx_next_entry(ent);
f73cb695
CD
556}
557
64f61d99 558static struct qla27xx_fwdt_entry *
f73cb695
CD
559qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
560 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
561{
f8f97b0c
JC
562 ulong addr = le32_to_cpu(ent->t271.addr);
563 ulong data = le32_to_cpu(ent->t271.data);
f73cb695
CD
564
565 ql_dbg(ql_dbg_misc, vha, 0xd20f,
566 "%s: wrremreg [%lx]\n", __func__, *len);
ce0366df
JC
567 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), 0x40, buf);
568 qla27xx_write_reg(ISPREG(vha), 0xc4, data, buf);
569 qla27xx_write_reg(ISPREG(vha), 0xc0, addr, buf);
f73cb695 570
64f61d99 571 return qla27xx_next_entry(ent);
f73cb695
CD
572}
573
64f61d99 574static struct qla27xx_fwdt_entry *
f73cb695
CD
575qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
576 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
577{
f8f97b0c
JC
578 ulong dwords = le32_to_cpu(ent->t272.count);
579 ulong start = le32_to_cpu(ent->t272.addr);
f73cb695
CD
580
581 ql_dbg(ql_dbg_misc, vha, 0xd210,
582 "%s: rdremram [%lx]\n", __func__, *len);
583 if (buf) {
584 ql_dbg(ql_dbg_misc, vha, 0xd02c,
585 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
586 buf += *len;
587 qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
588 }
589 *len += dwords * sizeof(uint32_t);
590
64f61d99 591 return qla27xx_next_entry(ent);
f73cb695
CD
592}
593
64f61d99 594static struct qla27xx_fwdt_entry *
f73cb695
CD
595qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
596 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
597{
f8f97b0c
JC
598 ulong dwords = le32_to_cpu(ent->t273.count);
599 ulong addr = le32_to_cpu(ent->t273.addr);
f73cb695
CD
600 uint32_t value;
601
602 ql_dbg(ql_dbg_misc, vha, 0xd211,
603 "%s: pcicfg [%lx]\n", __func__, *len);
604 while (dwords--) {
605 value = ~0;
606 if (pci_read_config_dword(vha->hw->pdev, addr, &value))
607 ql_dbg(ql_dbg_misc, vha, 0xd02d,
608 "%s: failed pcicfg read at %lx\n", __func__, addr);
609 qla27xx_insert32(addr, buf, len);
610 qla27xx_insert32(value, buf, len);
c0496401
JC
611 addr += sizeof(uint32_t);
612 }
613
64f61d99 614 return qla27xx_next_entry(ent);
c0496401
JC
615}
616
64f61d99 617static struct qla27xx_fwdt_entry *
c0496401
JC
618qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
619 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
620{
f8f97b0c 621 ulong type = ent->t274.queue_type;
c0496401
JC
622 uint count = 0;
623 uint i;
624
f8f97b0c
JC
625 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212,
626 "%s: getqsh(%lx) [%lx]\n", __func__, type, *len);
627 if (type == T274_QUEUE_TYPE_REQ_SHAD) {
c0496401
JC
628 for (i = 0; i < vha->hw->max_req_queues; i++) {
629 struct req_que *req = vha->hw->req_q_map[i];
cb43285f 630
c0496401
JC
631 if (req || !buf) {
632 qla27xx_insert16(i, buf, len);
633 qla27xx_insert16(1, buf, len);
7c6300e3
JC
634 qla27xx_insert32(req && req->out_ptr ?
635 *req->out_ptr : 0, buf, len);
c0496401
JC
636 count++;
637 }
638 }
f8f97b0c 639 } else if (type == T274_QUEUE_TYPE_RSP_SHAD) {
c0496401
JC
640 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
641 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
cb43285f 642
c0496401
JC
643 if (rsp || !buf) {
644 qla27xx_insert16(i, buf, len);
645 qla27xx_insert16(1, buf, len);
7c6300e3
JC
646 qla27xx_insert32(rsp && rsp->in_ptr ?
647 *rsp->in_ptr : 0, buf, len);
c0496401
JC
648 count++;
649 }
650 }
1cbb9156
HM
651 } else if (QLA_TGT_MODE_ENABLED() &&
652 ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
653 struct qla_hw_data *ha = vha->hw;
654 struct atio *atr = ha->tgt.atio_ring_ptr;
655
656 if (atr || !buf) {
657 qla27xx_insert16(0, buf, len);
658 qla27xx_insert16(1, buf, len);
659 qla27xx_insert32(ha->tgt.atio_q_in ?
660 readl(ha->tgt.atio_q_in) : 0, buf, len);
661 count++;
662 }
c0496401
JC
663 } else {
664 ql_dbg(ql_dbg_misc, vha, 0xd02f,
f8f97b0c 665 "%s: unknown queue %lx\n", __func__, type);
c0496401 666 qla27xx_skip_entry(ent, buf);
f73cb695
CD
667 }
668
998722d1
JC
669 if (buf) {
670 if (count)
671 ent->t274.num_queues = count;
672 else
673 qla27xx_skip_entry(ent, buf);
674 }
c0496401 675
64f61d99 676 return qla27xx_next_entry(ent);
f73cb695
CD
677}
678
64f61d99 679static struct qla27xx_fwdt_entry *
2ac224bc
JC
680qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
681 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
682{
683 ulong offset = offsetof(typeof(*ent), t275.buffer);
f8f97b0c
JC
684 ulong length = le32_to_cpu(ent->t275.length);
685 ulong size = le32_to_cpu(ent->hdr.size);
686 void *buffer = ent->t275.buffer;
2ac224bc 687
f8f97b0c
JC
688 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213,
689 "%s: buffer(%lx) [%lx]\n", __func__, length, *len);
690 if (!length) {
2ac224bc
JC
691 ql_dbg(ql_dbg_misc, vha, 0xd020,
692 "%s: buffer zero length\n", __func__);
693 qla27xx_skip_entry(ent, buf);
694 goto done;
695 }
f8f97b0c 696 if (offset + length > size) {
2ff01671 697 length = size - offset;
2ac224bc 698 ql_dbg(ql_dbg_misc, vha, 0xd030,
2ff01671
JC
699 "%s: buffer overflow, truncate [%lx]\n", __func__, length);
700 ent->t275.length = cpu_to_le32(length);
2ac224bc
JC
701 }
702
f8f97b0c 703 qla27xx_insertbuf(buffer, length, buf, len);
2ac224bc 704done:
64f61d99
JC
705 return qla27xx_next_entry(ent);
706}
707
708static struct qla27xx_fwdt_entry *
709qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
710 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
711{
64f61d99
JC
712 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
713 "%s: cond [%lx]\n", __func__, *len);
714
2ff01671
JC
715 if (buf) {
716 ulong cond1 = le32_to_cpu(ent->t276.cond1);
717 ulong cond2 = le32_to_cpu(ent->t276.cond2);
718 uint type = vha->hw->pdev->device >> 4 & 0xf;
719 uint func = vha->hw->port_no & 0x3;
720
721 if (type != cond1 || func != cond2) {
722 struct qla27xx_fwdt_template *tmp = buf;
723
724 tmp->count--;
725 ent = qla27xx_next_entry(ent);
726 qla27xx_skip_entry(ent, buf);
727 }
64f61d99
JC
728 }
729
730 return qla27xx_next_entry(ent);
731}
732
733static struct qla27xx_fwdt_entry *
734qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
735 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
736{
f8f97b0c
JC
737 ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr);
738 ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data);
739 ulong data_addr = le32_to_cpu(ent->t277.data_addr);
64f61d99
JC
740
741 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
742 "%s: rdpep [%lx]\n", __func__, *len);
f8f97b0c 743 qla27xx_insert32(wr_cmd_data, buf, len);
ce0366df
JC
744 qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
745 qla27xx_read_reg(ISPREG(vha), data_addr, buf, len);
64f61d99
JC
746
747 return qla27xx_next_entry(ent);
748}
749
750static struct qla27xx_fwdt_entry *
751qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
752 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
753{
f8f97b0c
JC
754 ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr);
755 ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data);
756 ulong data_addr = le32_to_cpu(ent->t278.data_addr);
757 ulong wr_data = le32_to_cpu(ent->t278.wr_data);
64f61d99
JC
758
759 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
760 "%s: wrpep [%lx]\n", __func__, *len);
ce0366df
JC
761 qla27xx_write_reg(ISPREG(vha), data_addr, wr_data, buf);
762 qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
64f61d99
JC
763
764 return qla27xx_next_entry(ent);
2ac224bc
JC
765}
766
64f61d99 767static struct qla27xx_fwdt_entry *
f73cb695
CD
768qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
769 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
770{
f8f97b0c
JC
771 ulong type = le32_to_cpu(ent->hdr.type);
772
f73cb695 773 ql_dbg(ql_dbg_misc, vha, 0xd2ff,
f8f97b0c 774 "%s: other %lx [%lx]\n", __func__, type, *len);
f73cb695
CD
775 qla27xx_skip_entry(ent, buf);
776
64f61d99 777 return qla27xx_next_entry(ent);
f73cb695
CD
778}
779
64f61d99 780static struct {
aa2dc372 781 uint type;
64f61d99
JC
782 typeof(qla27xx_fwdt_entry_other)(*call);
783} qla27xx_fwdt_entry_call[] = {
5241f7ca
HM
784 { ENTRY_TYPE_NOP, qla27xx_fwdt_entry_t0 },
785 { ENTRY_TYPE_TMP_END, qla27xx_fwdt_entry_t255 },
786 { ENTRY_TYPE_RD_IOB_T1, qla27xx_fwdt_entry_t256 },
787 { ENTRY_TYPE_WR_IOB_T1, qla27xx_fwdt_entry_t257 },
788 { ENTRY_TYPE_RD_IOB_T2, qla27xx_fwdt_entry_t258 },
789 { ENTRY_TYPE_WR_IOB_T2, qla27xx_fwdt_entry_t259 },
790 { ENTRY_TYPE_RD_PCI, qla27xx_fwdt_entry_t260 },
791 { ENTRY_TYPE_WR_PCI, qla27xx_fwdt_entry_t261 },
792 { ENTRY_TYPE_RD_RAM, qla27xx_fwdt_entry_t262 },
793 { ENTRY_TYPE_GET_QUEUE, qla27xx_fwdt_entry_t263 },
794 { ENTRY_TYPE_GET_FCE, qla27xx_fwdt_entry_t264 },
795 { ENTRY_TYPE_PSE_RISC, qla27xx_fwdt_entry_t265 },
796 { ENTRY_TYPE_RST_RISC, qla27xx_fwdt_entry_t266 },
797 { ENTRY_TYPE_DIS_INTR, qla27xx_fwdt_entry_t267 },
798 { ENTRY_TYPE_GET_HBUF, qla27xx_fwdt_entry_t268 },
799 { ENTRY_TYPE_SCRATCH, qla27xx_fwdt_entry_t269 },
800 { ENTRY_TYPE_RDREMREG, qla27xx_fwdt_entry_t270 },
801 { ENTRY_TYPE_WRREMREG, qla27xx_fwdt_entry_t271 },
802 { ENTRY_TYPE_RDREMRAM, qla27xx_fwdt_entry_t272 },
803 { ENTRY_TYPE_PCICFG, qla27xx_fwdt_entry_t273 },
804 { ENTRY_TYPE_GET_SHADOW, qla27xx_fwdt_entry_t274 },
805 { ENTRY_TYPE_WRITE_BUF, qla27xx_fwdt_entry_t275 },
64f61d99
JC
806 { ENTRY_TYPE_CONDITIONAL, qla27xx_fwdt_entry_t276 },
807 { ENTRY_TYPE_RDPEPREG, qla27xx_fwdt_entry_t277 },
808 { ENTRY_TYPE_WRPEPREG, qla27xx_fwdt_entry_t278 },
5241f7ca 809 { -1, qla27xx_fwdt_entry_other }
f73cb695
CD
810};
811
64f61d99
JC
812static inline
813typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type))
f73cb695 814{
64f61d99 815 typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call;
f73cb695 816
aa2dc372 817 while (list->type < type)
f73cb695
CD
818 list++;
819
aa2dc372
JC
820 if (list->type == type)
821 return list->call;
822 return qla27xx_fwdt_entry_other;
f73cb695
CD
823}
824
f73cb695
CD
825static void
826qla27xx_walk_template(struct scsi_qla_host *vha,
827 struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
828{
f8f97b0c
JC
829 struct qla27xx_fwdt_entry *ent = (void *)tmp +
830 le32_to_cpu(tmp->entry_offset);
2ff01671 831 ulong type;
f73cb695 832
2ff01671 833 tmp->count = le32_to_cpu(tmp->entry_count);
f73cb695 834 ql_dbg(ql_dbg_misc, vha, 0xd01a,
2ff01671
JC
835 "%s: entry count %u\n", __func__, tmp->count);
836 while (ent && tmp->count--) {
f8f97b0c
JC
837 type = le32_to_cpu(ent->hdr.type);
838 ent = qla27xx_find_entry(type)(vha, ent, buf, len);
64f61d99 839 if (!ent)
383a298b 840 break;
f73cb695 841 }
299f5e27 842
2ff01671 843 if (tmp->count)
299f5e27 844 ql_dbg(ql_dbg_misc, vha, 0xd018,
2ff01671 845 "%s: entry count residual=+%u\n", __func__, tmp->count);
299f5e27 846
64f61d99 847 if (ent)
299f5e27 848 ql_dbg(ql_dbg_misc, vha, 0xd019,
a28d9e4e 849 "%s: missing end entry\n", __func__);
f73cb695
CD
850}
851
852static void
853qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
854{
855 tmp->capture_timestamp = jiffies;
856}
857
858static void
859qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
860{
861 uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
f73cb695 862
6f153bcf
BVA
863 WARN_ON_ONCE(sscanf(qla2x00_version_str,
864 "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
865 v+0, v+1, v+2, v+3, v+4, v+5) != 6);
f73cb695
CD
866
867 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
868 tmp->driver_info[1] = v[5] << 8 | v[4];
869 tmp->driver_info[2] = 0x12345678;
870}
871
872static void
a28d9e4e
JC
873qla27xx_firmware_info(struct scsi_qla_host *vha,
874 struct qla27xx_fwdt_template *tmp)
f73cb695
CD
875{
876 tmp->firmware_version[0] = vha->hw->fw_major_version;
877 tmp->firmware_version[1] = vha->hw->fw_minor_version;
878 tmp->firmware_version[2] = vha->hw->fw_subminor_version;
879 tmp->firmware_version[3] =
880 vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
881 tmp->firmware_version[4] =
882 vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
883}
884
885static void
886ql27xx_edit_template(struct scsi_qla_host *vha,
887 struct qla27xx_fwdt_template *tmp)
888{
889 qla27xx_time_stamp(tmp);
890 qla27xx_driver_info(tmp);
a28d9e4e 891 qla27xx_firmware_info(vha, tmp);
f73cb695
CD
892}
893
894static inline uint32_t
895qla27xx_template_checksum(void *p, ulong size)
896{
f8f97b0c 897 __le32 *buf = p;
f73cb695
CD
898 uint64_t sum = 0;
899
900 size /= sizeof(*buf);
901
f8f97b0c
JC
902 for ( ; size--; buf++)
903 sum += le32_to_cpu(*buf);
f73cb695
CD
904
905 sum = (sum & 0xffffffff) + (sum >> 32);
906
907 return ~sum;
908}
909
910static inline int
911qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
912{
913 return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
914}
915
916static inline int
917qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
918{
f8f97b0c 919 return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP;
f73cb695
CD
920}
921
a28d9e4e
JC
922static ulong
923qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
924 struct qla27xx_fwdt_template *tmp, void *buf)
f73cb695 925{
a28d9e4e 926 ulong len = 0;
f73cb695
CD
927
928 if (qla27xx_fwdt_template_valid(tmp)) {
929 len = tmp->template_size;
a28d9e4e 930 tmp = memcpy(buf, tmp, len);
f73cb695 931 ql27xx_edit_template(vha, tmp);
a28d9e4e 932 qla27xx_walk_template(vha, tmp, buf, &len);
f73cb695 933 }
a28d9e4e
JC
934
935 return len;
f73cb695
CD
936}
937
938ulong
a28d9e4e 939qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
f73cb695 940{
a28d9e4e 941 struct qla27xx_fwdt_template *tmp = p;
f73cb695
CD
942 ulong len = 0;
943
944 if (qla27xx_fwdt_template_valid(tmp)) {
945 len = tmp->template_size;
946 qla27xx_walk_template(vha, tmp, NULL, &len);
947 }
948
949 return len;
950}
951
952ulong
953qla27xx_fwdt_template_size(void *p)
954{
955 struct qla27xx_fwdt_template *tmp = p;
956
957 return tmp->template_size;
958}
959
f73cb695
CD
960int
961qla27xx_fwdt_template_valid(void *p)
962{
963 struct qla27xx_fwdt_template *tmp = p;
964
965 if (!qla27xx_verify_template_header(tmp)) {
966 ql_log(ql_log_warn, NULL, 0xd01c,
f8f97b0c
JC
967 "%s: template type %x\n", __func__,
968 le32_to_cpu(tmp->template_type));
f73cb695
CD
969 return false;
970 }
971
972 if (!qla27xx_verify_template_checksum(tmp)) {
973 ql_log(ql_log_warn, NULL, 0xd01d,
974 "%s: failed template checksum\n", __func__);
975 return false;
976 }
977
978 return true;
979}
980
981void
982qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
983{
984 ulong flags = 0;
985
8d16366b 986#ifndef __CHECKER__
f73cb695
CD
987 if (!hardware_locked)
988 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
8d16366b 989#endif
f73cb695 990
a28d9e4e
JC
991 if (!vha->hw->fw_dump) {
992 ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
993 } else if (vha->hw->fw_dumped) {
994 ql_log(ql_log_warn, vha, 0xd01f,
995 "-> Firmware already dumped (%p) -- ignoring request\n",
996 vha->hw->fw_dump);
997 } else {
998 struct fwdt *fwdt = vha->hw->fwdt;
999 uint j;
1000 ulong len;
1001 void *buf = vha->hw->fw_dump;
1002
1003 for (j = 0; j < 2; j++, fwdt++, buf += len) {
1004 ql_log(ql_log_warn, vha, 0xd011,
1005 "-> fwdt%u running...\n", j);
1006 if (!fwdt->template) {
1007 ql_log(ql_log_warn, vha, 0xd012,
1008 "-> fwdt%u no template\n", j);
1009 break;
1010 }
1011 len = qla27xx_execute_fwdt_template(vha,
1012 fwdt->template, buf);
1013 if (len != fwdt->dump_size) {
1014 ql_log(ql_log_warn, vha, 0xd013,
1015 "-> fwdt%u fwdump residual=%+ld\n",
1016 j, fwdt->dump_size - len);
1017 }
1018 }
1019 vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump;
1020 vha->hw->fw_dumped = 1;
1021
1022 ql_log(ql_log_warn, vha, 0xd015,
1023 "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
1024 vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
1025 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
7e84766c 1026 }
f73cb695 1027
8d16366b 1028#ifndef __CHECKER__
f73cb695
CD
1029 if (!hardware_locked)
1030 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
8d16366b 1031#endif
f73cb695 1032}