scsi: qla2xxx: Correction and improvement to fwdt processing
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_tmpl.c
CommitLineData
f73cb695
CD
1/*
2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
f73cb695
CD
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8#include "qla_tmpl.h"
9
2ff6ae85 10#define IOBASE(reg) offsetof(typeof(*reg), iobase_addr)
f73cb695
CD
11
12static inline void __iomem *
13qla27xx_isp_reg(struct scsi_qla_host *vha)
14{
15 return &vha->hw->iobase->isp24;
16}
17
18static inline void
19qla27xx_insert16(uint16_t value, void *buf, ulong *len)
20{
21 if (buf) {
22 buf += *len;
23 *(__le16 *)buf = cpu_to_le16(value);
24 }
25 *len += sizeof(value);
26}
27
28static inline void
29qla27xx_insert32(uint32_t value, void *buf, ulong *len)
30{
31 if (buf) {
32 buf += *len;
33 *(__le32 *)buf = cpu_to_le32(value);
34 }
35 *len += sizeof(value);
36}
37
38static inline void
39qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
40{
ce9b9b08 41 if (buf && mem && size) {
f73cb695 42 buf += *len;
ce9b9b08 43 memcpy(buf, mem, size);
f73cb695
CD
44 }
45 *len += size;
46}
47
48static inline void
8dfa4b5a 49qla27xx_read8(void __iomem *window, void *buf, ulong *len)
f73cb695
CD
50{
51 uint8_t value = ~0;
52
53 if (buf) {
8dfa4b5a 54 value = RD_REG_BYTE(window);
f73cb695
CD
55 }
56 qla27xx_insert32(value, buf, len);
57}
58
59static inline void
8dfa4b5a 60qla27xx_read16(void __iomem *window, void *buf, ulong *len)
f73cb695
CD
61{
62 uint16_t value = ~0;
63
64 if (buf) {
8dfa4b5a 65 value = RD_REG_WORD(window);
f73cb695
CD
66 }
67 qla27xx_insert32(value, buf, len);
68}
69
70static inline void
8dfa4b5a 71qla27xx_read32(void __iomem *window, void *buf, ulong *len)
f73cb695
CD
72{
73 uint32_t value = ~0;
74
75 if (buf) {
8dfa4b5a 76 value = RD_REG_DWORD(window);
f73cb695
CD
77 }
78 qla27xx_insert32(value, buf, len);
79}
80
8dfa4b5a 81static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
f73cb695
CD
82{
83 return
84 (width == 1) ? qla27xx_read8 :
85 (width == 2) ? qla27xx_read16 :
86 qla27xx_read32;
87}
88
89static inline void
90qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
91 uint offset, void *buf, ulong *len)
92{
8dfa4b5a 93 void __iomem *window = (void __iomem *)reg + offset;
f73cb695 94
f73cb695
CD
95 qla27xx_read32(window, buf, len);
96}
97
98static inline void
99qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
100 uint offset, uint32_t data, void *buf)
101{
f73cb695 102 if (buf) {
2ff01671
JC
103 void __iomem *window = (void __iomem *)reg + offset;
104
f73cb695
CD
105 WRT_REG_DWORD(window, data);
106 }
107}
108
109static inline void
110qla27xx_read_window(__iomem struct device_reg_24xx *reg,
c0496401 111 uint32_t addr, uint offset, uint count, uint width, void *buf,
f73cb695
CD
112 ulong *len)
113{
8dfa4b5a
BVA
114 void __iomem *window = (void __iomem *)reg + offset;
115 void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
f73cb695 116
c0496401 117 qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
f73cb695 118 while (count--) {
c0496401 119 qla27xx_insert32(addr, buf, len);
f73cb695
CD
120 readn(window, buf, len);
121 window += width;
c0496401 122 addr++;
f73cb695
CD
123 }
124}
125
126static inline void
127qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
128{
129 if (buf)
130 ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
131}
132
64f61d99
JC
133static inline struct qla27xx_fwdt_entry *
134qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
135{
f8f97b0c 136 return (void *)ent + le32_to_cpu(ent->hdr.size);
64f61d99
JC
137}
138
139static struct qla27xx_fwdt_entry *
f73cb695
CD
140qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
141 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
142{
143 ql_dbg(ql_dbg_misc, vha, 0xd100,
144 "%s: nop [%lx]\n", __func__, *len);
145 qla27xx_skip_entry(ent, buf);
146
64f61d99 147 return qla27xx_next_entry(ent);
f73cb695
CD
148}
149
64f61d99 150static struct qla27xx_fwdt_entry *
f73cb695
CD
151qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
152 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
153{
154 ql_dbg(ql_dbg_misc, vha, 0xd1ff,
155 "%s: end [%lx]\n", __func__, *len);
156 qla27xx_skip_entry(ent, buf);
157
158 /* terminate */
64f61d99 159 return NULL;
f73cb695
CD
160}
161
64f61d99 162static struct qla27xx_fwdt_entry *
f73cb695
CD
163qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
164 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
165{
166 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
f8f97b0c
JC
167 ulong addr = le32_to_cpu(ent->t256.base_addr);
168 uint offset = ent->t256.pci_offset;
169 ulong count = le16_to_cpu(ent->t256.reg_count);
170 uint width = ent->t256.reg_width;
f73cb695
CD
171
172 ql_dbg(ql_dbg_misc, vha, 0xd200,
173 "%s: rdio t1 [%lx]\n", __func__, *len);
f8f97b0c 174 qla27xx_read_window(reg, addr, offset, count, width, buf, len);
f73cb695 175
64f61d99 176 return qla27xx_next_entry(ent);
f73cb695
CD
177}
178
64f61d99 179static struct qla27xx_fwdt_entry *
f73cb695
CD
180qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
181 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
182{
183 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
f8f97b0c
JC
184 ulong addr = le32_to_cpu(ent->t257.base_addr);
185 uint offset = ent->t257.pci_offset;
186 ulong data = le32_to_cpu(ent->t257.write_data);
f73cb695
CD
187
188 ql_dbg(ql_dbg_misc, vha, 0xd201,
189 "%s: wrio t1 [%lx]\n", __func__, *len);
f8f97b0c
JC
190 qla27xx_write_reg(reg, IOBASE(reg), addr, buf);
191 qla27xx_write_reg(reg, offset, data, buf);
f73cb695 192
64f61d99 193 return qla27xx_next_entry(ent);
f73cb695
CD
194}
195
64f61d99 196static struct qla27xx_fwdt_entry *
f73cb695
CD
197qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
198 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
199{
200 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
f8f97b0c
JC
201 uint banksel = ent->t258.banksel_offset;
202 ulong bank = le32_to_cpu(ent->t258.bank);
203 ulong addr = le32_to_cpu(ent->t258.base_addr);
204 uint offset = ent->t258.pci_offset;
205 uint count = le16_to_cpu(ent->t258.reg_count);
206 uint width = ent->t258.reg_width;
f73cb695
CD
207
208 ql_dbg(ql_dbg_misc, vha, 0xd202,
209 "%s: rdio t2 [%lx]\n", __func__, *len);
f8f97b0c
JC
210 qla27xx_write_reg(reg, banksel, bank, buf);
211 qla27xx_read_window(reg, addr, offset, count, width, buf, len);
f73cb695 212
64f61d99 213 return qla27xx_next_entry(ent);
f73cb695
CD
214}
215
64f61d99 216static struct qla27xx_fwdt_entry *
f73cb695
CD
217qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
218 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
219{
220 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
f8f97b0c
JC
221 ulong addr = le32_to_cpu(ent->t259.base_addr);
222 uint banksel = ent->t259.banksel_offset;
223 ulong bank = le32_to_cpu(ent->t259.bank);
224 uint offset = ent->t259.pci_offset;
225 ulong data = le32_to_cpu(ent->t259.write_data);
f73cb695
CD
226
227 ql_dbg(ql_dbg_misc, vha, 0xd203,
228 "%s: wrio t2 [%lx]\n", __func__, *len);
f8f97b0c
JC
229 qla27xx_write_reg(reg, IOBASE(reg), addr, buf);
230 qla27xx_write_reg(reg, banksel, bank, buf);
231 qla27xx_write_reg(reg, offset, data, buf);
f73cb695 232
64f61d99 233 return qla27xx_next_entry(ent);
f73cb695
CD
234}
235
64f61d99 236static struct qla27xx_fwdt_entry *
f73cb695
CD
237qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
238 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
239{
240 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
f8f97b0c 241 uint offset = ent->t260.pci_offset;
f73cb695
CD
242
243 ql_dbg(ql_dbg_misc, vha, 0xd204,
244 "%s: rdpci [%lx]\n", __func__, *len);
f8f97b0c
JC
245 qla27xx_insert32(offset, buf, len);
246 qla27xx_read_reg(reg, offset, buf, len);
f73cb695 247
64f61d99 248 return qla27xx_next_entry(ent);
f73cb695
CD
249}
250
64f61d99 251static struct qla27xx_fwdt_entry *
f73cb695
CD
252qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
253 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
254{
255 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
f8f97b0c
JC
256 uint offset = ent->t261.pci_offset;
257 ulong data = le32_to_cpu(ent->t261.write_data);
f73cb695
CD
258
259 ql_dbg(ql_dbg_misc, vha, 0xd205,
260 "%s: wrpci [%lx]\n", __func__, *len);
f8f97b0c 261 qla27xx_write_reg(reg, offset, data, buf);
f73cb695 262
64f61d99 263 return qla27xx_next_entry(ent);
f73cb695
CD
264}
265
64f61d99 266static struct qla27xx_fwdt_entry *
f73cb695
CD
267qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
268 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
269{
f8f97b0c
JC
270 uint area = ent->t262.ram_area;
271 ulong start = le32_to_cpu(ent->t262.start_addr);
272 ulong end = le32_to_cpu(ent->t262.end_addr);
f73cb695 273 ulong dwords;
f73cb695
CD
274
275 ql_dbg(ql_dbg_misc, vha, 0xd206,
276 "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
f73cb695 277
f8f97b0c 278 if (area == T262_RAM_AREA_CRITICAL_RAM) {
f73cb695 279 ;
f8f97b0c 280 } else if (area == T262_RAM_AREA_EXTERNAL_RAM) {
f73cb695
CD
281 end = vha->hw->fw_memory_size;
282 if (buf)
f8f97b0c
JC
283 ent->t262.end_addr = cpu_to_le32(end);
284 } else if (area == T262_RAM_AREA_SHARED_RAM) {
f73cb695
CD
285 start = vha->hw->fw_shared_ram_start;
286 end = vha->hw->fw_shared_ram_end;
287 if (buf) {
f8f97b0c
JC
288 ent->t262.start_addr = cpu_to_le32(start);
289 ent->t262.end_addr = cpu_to_le32(end);
f73cb695 290 }
f8f97b0c 291 } else if (area == T262_RAM_AREA_DDR_RAM) {
ad1ef177
JC
292 start = vha->hw->fw_ddr_ram_start;
293 end = vha->hw->fw_ddr_ram_end;
294 if (buf) {
f8f97b0c
JC
295 ent->t262.start_addr = cpu_to_le32(start);
296 ent->t262.end_addr = cpu_to_le32(end);
ad1ef177 297 }
f8f97b0c 298 } else if (area == T262_RAM_AREA_MISC) {
64f61d99 299 if (buf) {
f8f97b0c
JC
300 ent->t262.start_addr = cpu_to_le32(start);
301 ent->t262.end_addr = cpu_to_le32(end);
64f61d99 302 }
f73cb695
CD
303 } else {
304 ql_dbg(ql_dbg_misc, vha, 0xd022,
f8f97b0c 305 "%s: unknown area %x\n", __func__, area);
f73cb695
CD
306 qla27xx_skip_entry(ent, buf);
307 goto done;
308 }
309
ce6c668b 310 if (end < start || start == 0 || end == 0) {
f73cb695 311 ql_dbg(ql_dbg_misc, vha, 0xd023,
f8f97b0c
JC
312 "%s: unusable range (start=%lx end=%lx)\n",
313 __func__, start, end);
f73cb695
CD
314 qla27xx_skip_entry(ent, buf);
315 goto done;
316 }
317
318 dwords = end - start + 1;
319 if (buf) {
f73cb695
CD
320 buf += *len;
321 qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
322 }
323 *len += dwords * sizeof(uint32_t);
324done:
64f61d99 325 return qla27xx_next_entry(ent);
f73cb695
CD
326}
327
64f61d99 328static struct qla27xx_fwdt_entry *
f73cb695
CD
329qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
330 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
331{
f8f97b0c 332 uint type = ent->t263.queue_type;
f73cb695
CD
333 uint count = 0;
334 uint i;
335 uint length;
336
f8f97b0c
JC
337 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207,
338 "%s: getq(%x) [%lx]\n", __func__, type, *len);
339 if (type == T263_QUEUE_TYPE_REQ) {
f73cb695
CD
340 for (i = 0; i < vha->hw->max_req_queues; i++) {
341 struct req_que *req = vha->hw->req_q_map[i];
cb43285f 342
f73cb695
CD
343 if (req || !buf) {
344 length = req ?
345 req->length : REQUEST_ENTRY_CNT_24XX;
346 qla27xx_insert16(i, buf, len);
347 qla27xx_insert16(length, buf, len);
348 qla27xx_insertbuf(req ? req->ring : NULL,
349 length * sizeof(*req->ring), buf, len);
350 count++;
351 }
352 }
f8f97b0c 353 } else if (type == T263_QUEUE_TYPE_RSP) {
f73cb695
CD
354 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
355 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
cb43285f 356
f73cb695
CD
357 if (rsp || !buf) {
358 length = rsp ?
359 rsp->length : RESPONSE_ENTRY_CNT_MQ;
360 qla27xx_insert16(i, buf, len);
361 qla27xx_insert16(length, buf, len);
362 qla27xx_insertbuf(rsp ? rsp->ring : NULL,
363 length * sizeof(*rsp->ring), buf, len);
364 count++;
365 }
366 }
1cbb9156
HM
367 } else if (QLA_TGT_MODE_ENABLED() &&
368 ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
369 struct qla_hw_data *ha = vha->hw;
370 struct atio *atr = ha->tgt.atio_ring;
371
372 if (atr || !buf) {
373 length = ha->tgt.atio_q_length;
374 qla27xx_insert16(0, buf, len);
375 qla27xx_insert16(length, buf, len);
376 qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
377 count++;
378 }
f73cb695
CD
379 } else {
380 ql_dbg(ql_dbg_misc, vha, 0xd026,
f8f97b0c 381 "%s: unknown queue %x\n", __func__, type);
f73cb695 382 qla27xx_skip_entry(ent, buf);
f73cb695
CD
383 }
384
998722d1
JC
385 if (buf) {
386 if (count)
387 ent->t263.num_queues = count;
388 else
389 qla27xx_skip_entry(ent, buf);
390 }
c0496401 391
64f61d99 392 return qla27xx_next_entry(ent);
f73cb695
CD
393}
394
64f61d99 395static struct qla27xx_fwdt_entry *
f73cb695
CD
396qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
397 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
398{
399 ql_dbg(ql_dbg_misc, vha, 0xd208,
400 "%s: getfce [%lx]\n", __func__, *len);
401 if (vha->hw->fce) {
402 if (buf) {
403 ent->t264.fce_trace_size = FCE_SIZE;
404 ent->t264.write_pointer = vha->hw->fce_wr;
405 ent->t264.base_pointer = vha->hw->fce_dma;
406 ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
407 ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
408 ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
409 ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
410 ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
411 ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
412 }
413 qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
414 } else {
415 ql_dbg(ql_dbg_misc, vha, 0xd027,
416 "%s: missing fce\n", __func__);
417 qla27xx_skip_entry(ent, buf);
418 }
419
64f61d99 420 return qla27xx_next_entry(ent);
f73cb695
CD
421}
422
64f61d99 423static struct qla27xx_fwdt_entry *
f73cb695
CD
424qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
425 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
426{
427 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
428
429 ql_dbg(ql_dbg_misc, vha, 0xd209,
430 "%s: pause risc [%lx]\n", __func__, *len);
431 if (buf)
61f098dd 432 qla24xx_pause_risc(reg, vha->hw);
f73cb695 433
64f61d99 434 return qla27xx_next_entry(ent);
f73cb695
CD
435}
436
64f61d99 437static struct qla27xx_fwdt_entry *
f73cb695
CD
438qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
439 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
440{
441 ql_dbg(ql_dbg_misc, vha, 0xd20a,
442 "%s: reset risc [%lx]\n", __func__, *len);
443 if (buf)
444 qla24xx_soft_reset(vha->hw);
445
64f61d99 446 return qla27xx_next_entry(ent);
f73cb695
CD
447}
448
64f61d99 449static struct qla27xx_fwdt_entry *
f73cb695
CD
450qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
451 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
452{
453 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
f8f97b0c
JC
454 uint offset = ent->t267.pci_offset;
455 ulong data = le32_to_cpu(ent->t267.data);
f73cb695
CD
456
457 ql_dbg(ql_dbg_misc, vha, 0xd20b,
458 "%s: dis intr [%lx]\n", __func__, *len);
f8f97b0c 459 qla27xx_write_reg(reg, offset, data, buf);
f73cb695 460
64f61d99 461 return qla27xx_next_entry(ent);
f73cb695
CD
462}
463
64f61d99 464static struct qla27xx_fwdt_entry *
f73cb695
CD
465qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
466 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
467{
468 ql_dbg(ql_dbg_misc, vha, 0xd20c,
469 "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
ad0a0b01
QT
470 switch (ent->t268.buf_type) {
471 case T268_BUF_TYPE_EXTD_TRACE:
f73cb695
CD
472 if (vha->hw->eft) {
473 if (buf) {
474 ent->t268.buf_size = EFT_SIZE;
475 ent->t268.start_addr = vha->hw->eft_dma;
476 }
477 qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
478 } else {
479 ql_dbg(ql_dbg_misc, vha, 0xd028,
480 "%s: missing eft\n", __func__);
481 qla27xx_skip_entry(ent, buf);
482 }
ad0a0b01
QT
483 break;
484 case T268_BUF_TYPE_EXCH_BUFOFF:
485 if (vha->hw->exchoffld_buf) {
486 if (buf) {
487 ent->t268.buf_size = vha->hw->exchoffld_size;
488 ent->t268.start_addr =
489 vha->hw->exchoffld_buf_dma;
490 }
491 qla27xx_insertbuf(vha->hw->exchoffld_buf,
492 vha->hw->exchoffld_size, buf, len);
493 } else {
494 ql_dbg(ql_dbg_misc, vha, 0xd028,
495 "%s: missing exch offld\n", __func__);
496 qla27xx_skip_entry(ent, buf);
497 }
498 break;
499 case T268_BUF_TYPE_EXTD_LOGIN:
500 if (vha->hw->exlogin_buf) {
501 if (buf) {
502 ent->t268.buf_size = vha->hw->exlogin_size;
503 ent->t268.start_addr =
504 vha->hw->exlogin_buf_dma;
505 }
506 qla27xx_insertbuf(vha->hw->exlogin_buf,
507 vha->hw->exlogin_size, buf, len);
508 } else {
509 ql_dbg(ql_dbg_misc, vha, 0xd028,
510 "%s: missing ext login\n", __func__);
511 qla27xx_skip_entry(ent, buf);
512 }
513 break;
514
3f915271
QT
515 case T268_BUF_TYPE_REQ_MIRROR:
516 case T268_BUF_TYPE_RSP_MIRROR:
517 /*
518 * Mirror pointers are not implemented in the
519 * driver, instead shadow pointers are used by
520 * the drier. Skip these entries.
521 */
522 qla27xx_skip_entry(ent, buf);
523 break;
ad0a0b01
QT
524 default:
525 ql_dbg(ql_dbg_async, vha, 0xd02b,
349c390f 526 "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
f73cb695 527 qla27xx_skip_entry(ent, buf);
ad0a0b01 528 break;
f73cb695
CD
529 }
530
64f61d99 531 return qla27xx_next_entry(ent);
f73cb695
CD
532}
533
64f61d99 534static struct qla27xx_fwdt_entry *
f73cb695
CD
535qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
536 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
537{
538 ql_dbg(ql_dbg_misc, vha, 0xd20d,
539 "%s: scratch [%lx]\n", __func__, *len);
540 qla27xx_insert32(0xaaaaaaaa, buf, len);
541 qla27xx_insert32(0xbbbbbbbb, buf, len);
542 qla27xx_insert32(0xcccccccc, buf, len);
543 qla27xx_insert32(0xdddddddd, buf, len);
544 qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
545 if (buf)
546 ent->t269.scratch_size = 5 * sizeof(uint32_t);
547
64f61d99 548 return qla27xx_next_entry(ent);
f73cb695
CD
549}
550
64f61d99 551static struct qla27xx_fwdt_entry *
f73cb695
CD
552qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
553 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
554{
555 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
f8f97b0c
JC
556 ulong addr = le32_to_cpu(ent->t270.addr);
557 ulong dwords = le32_to_cpu(ent->t270.count);
f73cb695
CD
558
559 ql_dbg(ql_dbg_misc, vha, 0xd20e,
560 "%s: rdremreg [%lx]\n", __func__, *len);
561 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
562 while (dwords--) {
563 qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
f73cb695 564 qla27xx_insert32(addr, buf, len);
c0496401 565 qla27xx_read_reg(reg, 0xc4, buf, len);
fbce4f49 566 addr += sizeof(uint32_t);
f73cb695
CD
567 }
568
64f61d99 569 return qla27xx_next_entry(ent);
f73cb695
CD
570}
571
64f61d99 572static struct qla27xx_fwdt_entry *
f73cb695
CD
573qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
574 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
575{
576 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
f8f97b0c
JC
577 ulong addr = le32_to_cpu(ent->t271.addr);
578 ulong data = le32_to_cpu(ent->t271.data);
f73cb695
CD
579
580 ql_dbg(ql_dbg_misc, vha, 0xd20f,
581 "%s: wrremreg [%lx]\n", __func__, *len);
582 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
0d90c34c 583 qla27xx_write_reg(reg, 0xc4, data, buf);
f73cb695
CD
584 qla27xx_write_reg(reg, 0xc0, addr, buf);
585
64f61d99 586 return qla27xx_next_entry(ent);
f73cb695
CD
587}
588
64f61d99 589static struct qla27xx_fwdt_entry *
f73cb695
CD
590qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
591 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
592{
f8f97b0c
JC
593 ulong dwords = le32_to_cpu(ent->t272.count);
594 ulong start = le32_to_cpu(ent->t272.addr);
f73cb695
CD
595
596 ql_dbg(ql_dbg_misc, vha, 0xd210,
597 "%s: rdremram [%lx]\n", __func__, *len);
598 if (buf) {
599 ql_dbg(ql_dbg_misc, vha, 0xd02c,
600 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
601 buf += *len;
602 qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
603 }
604 *len += dwords * sizeof(uint32_t);
605
64f61d99 606 return qla27xx_next_entry(ent);
f73cb695
CD
607}
608
64f61d99 609static struct qla27xx_fwdt_entry *
f73cb695
CD
610qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
611 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
612{
f8f97b0c
JC
613 ulong dwords = le32_to_cpu(ent->t273.count);
614 ulong addr = le32_to_cpu(ent->t273.addr);
f73cb695
CD
615 uint32_t value;
616
617 ql_dbg(ql_dbg_misc, vha, 0xd211,
618 "%s: pcicfg [%lx]\n", __func__, *len);
619 while (dwords--) {
620 value = ~0;
621 if (pci_read_config_dword(vha->hw->pdev, addr, &value))
622 ql_dbg(ql_dbg_misc, vha, 0xd02d,
623 "%s: failed pcicfg read at %lx\n", __func__, addr);
624 qla27xx_insert32(addr, buf, len);
625 qla27xx_insert32(value, buf, len);
c0496401
JC
626 addr += sizeof(uint32_t);
627 }
628
64f61d99 629 return qla27xx_next_entry(ent);
c0496401
JC
630}
631
64f61d99 632static struct qla27xx_fwdt_entry *
c0496401
JC
633qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
634 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
635{
f8f97b0c 636 ulong type = ent->t274.queue_type;
c0496401
JC
637 uint count = 0;
638 uint i;
639
f8f97b0c
JC
640 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212,
641 "%s: getqsh(%lx) [%lx]\n", __func__, type, *len);
642 if (type == T274_QUEUE_TYPE_REQ_SHAD) {
c0496401
JC
643 for (i = 0; i < vha->hw->max_req_queues; i++) {
644 struct req_que *req = vha->hw->req_q_map[i];
cb43285f 645
c0496401
JC
646 if (req || !buf) {
647 qla27xx_insert16(i, buf, len);
648 qla27xx_insert16(1, buf, len);
7c6300e3
JC
649 qla27xx_insert32(req && req->out_ptr ?
650 *req->out_ptr : 0, buf, len);
c0496401
JC
651 count++;
652 }
653 }
f8f97b0c 654 } else if (type == T274_QUEUE_TYPE_RSP_SHAD) {
c0496401
JC
655 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
656 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
cb43285f 657
c0496401
JC
658 if (rsp || !buf) {
659 qla27xx_insert16(i, buf, len);
660 qla27xx_insert16(1, buf, len);
7c6300e3
JC
661 qla27xx_insert32(rsp && rsp->in_ptr ?
662 *rsp->in_ptr : 0, buf, len);
c0496401
JC
663 count++;
664 }
665 }
1cbb9156
HM
666 } else if (QLA_TGT_MODE_ENABLED() &&
667 ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
668 struct qla_hw_data *ha = vha->hw;
669 struct atio *atr = ha->tgt.atio_ring_ptr;
670
671 if (atr || !buf) {
672 qla27xx_insert16(0, buf, len);
673 qla27xx_insert16(1, buf, len);
674 qla27xx_insert32(ha->tgt.atio_q_in ?
675 readl(ha->tgt.atio_q_in) : 0, buf, len);
676 count++;
677 }
c0496401
JC
678 } else {
679 ql_dbg(ql_dbg_misc, vha, 0xd02f,
f8f97b0c 680 "%s: unknown queue %lx\n", __func__, type);
c0496401 681 qla27xx_skip_entry(ent, buf);
f73cb695
CD
682 }
683
998722d1
JC
684 if (buf) {
685 if (count)
686 ent->t274.num_queues = count;
687 else
688 qla27xx_skip_entry(ent, buf);
689 }
c0496401 690
64f61d99 691 return qla27xx_next_entry(ent);
f73cb695
CD
692}
693
64f61d99 694static struct qla27xx_fwdt_entry *
2ac224bc
JC
695qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
696 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
697{
698 ulong offset = offsetof(typeof(*ent), t275.buffer);
f8f97b0c
JC
699 ulong length = le32_to_cpu(ent->t275.length);
700 ulong size = le32_to_cpu(ent->hdr.size);
701 void *buffer = ent->t275.buffer;
2ac224bc 702
f8f97b0c
JC
703 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213,
704 "%s: buffer(%lx) [%lx]\n", __func__, length, *len);
705 if (!length) {
2ac224bc
JC
706 ql_dbg(ql_dbg_misc, vha, 0xd020,
707 "%s: buffer zero length\n", __func__);
708 qla27xx_skip_entry(ent, buf);
709 goto done;
710 }
f8f97b0c 711 if (offset + length > size) {
2ff01671 712 length = size - offset;
2ac224bc 713 ql_dbg(ql_dbg_misc, vha, 0xd030,
2ff01671
JC
714 "%s: buffer overflow, truncate [%lx]\n", __func__, length);
715 ent->t275.length = cpu_to_le32(length);
2ac224bc
JC
716 }
717
f8f97b0c 718 qla27xx_insertbuf(buffer, length, buf, len);
2ac224bc 719done:
64f61d99
JC
720 return qla27xx_next_entry(ent);
721}
722
723static struct qla27xx_fwdt_entry *
724qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
725 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
726{
64f61d99
JC
727 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
728 "%s: cond [%lx]\n", __func__, *len);
729
2ff01671
JC
730 if (buf) {
731 ulong cond1 = le32_to_cpu(ent->t276.cond1);
732 ulong cond2 = le32_to_cpu(ent->t276.cond2);
733 uint type = vha->hw->pdev->device >> 4 & 0xf;
734 uint func = vha->hw->port_no & 0x3;
735
736 if (type != cond1 || func != cond2) {
737 struct qla27xx_fwdt_template *tmp = buf;
738
739 tmp->count--;
740 ent = qla27xx_next_entry(ent);
741 qla27xx_skip_entry(ent, buf);
742 }
64f61d99
JC
743 }
744
745 return qla27xx_next_entry(ent);
746}
747
748static struct qla27xx_fwdt_entry *
749qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
750 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
751{
752 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
f8f97b0c
JC
753 ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr);
754 ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data);
755 ulong data_addr = le32_to_cpu(ent->t277.data_addr);
64f61d99
JC
756
757 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
758 "%s: rdpep [%lx]\n", __func__, *len);
f8f97b0c
JC
759 qla27xx_insert32(wr_cmd_data, buf, len);
760 qla27xx_write_reg(reg, cmd_addr, wr_cmd_data, buf);
761 qla27xx_read_reg(reg, data_addr, buf, len);
64f61d99
JC
762
763 return qla27xx_next_entry(ent);
764}
765
766static struct qla27xx_fwdt_entry *
767qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
768 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
769{
770 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
f8f97b0c
JC
771 ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr);
772 ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data);
773 ulong data_addr = le32_to_cpu(ent->t278.data_addr);
774 ulong wr_data = le32_to_cpu(ent->t278.wr_data);
64f61d99
JC
775
776 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
777 "%s: wrpep [%lx]\n", __func__, *len);
f8f97b0c
JC
778 qla27xx_write_reg(reg, data_addr, wr_data, buf);
779 qla27xx_write_reg(reg, cmd_addr, wr_cmd_data, buf);
64f61d99
JC
780
781 return qla27xx_next_entry(ent);
2ac224bc
JC
782}
783
64f61d99 784static struct qla27xx_fwdt_entry *
f73cb695
CD
785qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
786 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
787{
f8f97b0c
JC
788 ulong type = le32_to_cpu(ent->hdr.type);
789
f73cb695 790 ql_dbg(ql_dbg_misc, vha, 0xd2ff,
f8f97b0c 791 "%s: other %lx [%lx]\n", __func__, type, *len);
f73cb695
CD
792 qla27xx_skip_entry(ent, buf);
793
64f61d99 794 return qla27xx_next_entry(ent);
f73cb695
CD
795}
796
64f61d99 797static struct {
aa2dc372 798 uint type;
64f61d99
JC
799 typeof(qla27xx_fwdt_entry_other)(*call);
800} qla27xx_fwdt_entry_call[] = {
5241f7ca
HM
801 { ENTRY_TYPE_NOP, qla27xx_fwdt_entry_t0 },
802 { ENTRY_TYPE_TMP_END, qla27xx_fwdt_entry_t255 },
803 { ENTRY_TYPE_RD_IOB_T1, qla27xx_fwdt_entry_t256 },
804 { ENTRY_TYPE_WR_IOB_T1, qla27xx_fwdt_entry_t257 },
805 { ENTRY_TYPE_RD_IOB_T2, qla27xx_fwdt_entry_t258 },
806 { ENTRY_TYPE_WR_IOB_T2, qla27xx_fwdt_entry_t259 },
807 { ENTRY_TYPE_RD_PCI, qla27xx_fwdt_entry_t260 },
808 { ENTRY_TYPE_WR_PCI, qla27xx_fwdt_entry_t261 },
809 { ENTRY_TYPE_RD_RAM, qla27xx_fwdt_entry_t262 },
810 { ENTRY_TYPE_GET_QUEUE, qla27xx_fwdt_entry_t263 },
811 { ENTRY_TYPE_GET_FCE, qla27xx_fwdt_entry_t264 },
812 { ENTRY_TYPE_PSE_RISC, qla27xx_fwdt_entry_t265 },
813 { ENTRY_TYPE_RST_RISC, qla27xx_fwdt_entry_t266 },
814 { ENTRY_TYPE_DIS_INTR, qla27xx_fwdt_entry_t267 },
815 { ENTRY_TYPE_GET_HBUF, qla27xx_fwdt_entry_t268 },
816 { ENTRY_TYPE_SCRATCH, qla27xx_fwdt_entry_t269 },
817 { ENTRY_TYPE_RDREMREG, qla27xx_fwdt_entry_t270 },
818 { ENTRY_TYPE_WRREMREG, qla27xx_fwdt_entry_t271 },
819 { ENTRY_TYPE_RDREMRAM, qla27xx_fwdt_entry_t272 },
820 { ENTRY_TYPE_PCICFG, qla27xx_fwdt_entry_t273 },
821 { ENTRY_TYPE_GET_SHADOW, qla27xx_fwdt_entry_t274 },
822 { ENTRY_TYPE_WRITE_BUF, qla27xx_fwdt_entry_t275 },
64f61d99
JC
823 { ENTRY_TYPE_CONDITIONAL, qla27xx_fwdt_entry_t276 },
824 { ENTRY_TYPE_RDPEPREG, qla27xx_fwdt_entry_t277 },
825 { ENTRY_TYPE_WRPEPREG, qla27xx_fwdt_entry_t278 },
5241f7ca 826 { -1, qla27xx_fwdt_entry_other }
f73cb695
CD
827};
828
64f61d99
JC
829static inline
830typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type))
f73cb695 831{
64f61d99 832 typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call;
f73cb695 833
aa2dc372 834 while (list->type < type)
f73cb695
CD
835 list++;
836
aa2dc372
JC
837 if (list->type == type)
838 return list->call;
839 return qla27xx_fwdt_entry_other;
f73cb695
CD
840}
841
f73cb695
CD
842static void
843qla27xx_walk_template(struct scsi_qla_host *vha,
844 struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
845{
f8f97b0c
JC
846 struct qla27xx_fwdt_entry *ent = (void *)tmp +
847 le32_to_cpu(tmp->entry_offset);
2ff01671 848 ulong type;
f73cb695 849
2ff01671 850 tmp->count = le32_to_cpu(tmp->entry_count);
f73cb695 851 ql_dbg(ql_dbg_misc, vha, 0xd01a,
2ff01671
JC
852 "%s: entry count %u\n", __func__, tmp->count);
853 while (ent && tmp->count--) {
f8f97b0c
JC
854 type = le32_to_cpu(ent->hdr.type);
855 ent = qla27xx_find_entry(type)(vha, ent, buf, len);
64f61d99 856 if (!ent)
383a298b 857 break;
f73cb695 858 }
299f5e27 859
2ff01671 860 if (tmp->count)
299f5e27 861 ql_dbg(ql_dbg_misc, vha, 0xd018,
2ff01671 862 "%s: entry count residual=+%u\n", __func__, tmp->count);
299f5e27 863
64f61d99 864 if (ent)
299f5e27 865 ql_dbg(ql_dbg_misc, vha, 0xd019,
a28d9e4e 866 "%s: missing end entry\n", __func__);
f73cb695
CD
867}
868
869static void
870qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
871{
872 tmp->capture_timestamp = jiffies;
873}
874
875static void
876qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
877{
878 uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
f73cb695 879
52c82823 880 sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
f73cb695
CD
881 v+0, v+1, v+2, v+3, v+4, v+5);
882
883 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
884 tmp->driver_info[1] = v[5] << 8 | v[4];
885 tmp->driver_info[2] = 0x12345678;
886}
887
888static void
a28d9e4e
JC
889qla27xx_firmware_info(struct scsi_qla_host *vha,
890 struct qla27xx_fwdt_template *tmp)
f73cb695
CD
891{
892 tmp->firmware_version[0] = vha->hw->fw_major_version;
893 tmp->firmware_version[1] = vha->hw->fw_minor_version;
894 tmp->firmware_version[2] = vha->hw->fw_subminor_version;
895 tmp->firmware_version[3] =
896 vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
897 tmp->firmware_version[4] =
898 vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
899}
900
901static void
902ql27xx_edit_template(struct scsi_qla_host *vha,
903 struct qla27xx_fwdt_template *tmp)
904{
905 qla27xx_time_stamp(tmp);
906 qla27xx_driver_info(tmp);
a28d9e4e 907 qla27xx_firmware_info(vha, tmp);
f73cb695
CD
908}
909
910static inline uint32_t
911qla27xx_template_checksum(void *p, ulong size)
912{
f8f97b0c 913 __le32 *buf = p;
f73cb695
CD
914 uint64_t sum = 0;
915
916 size /= sizeof(*buf);
917
f8f97b0c
JC
918 for ( ; size--; buf++)
919 sum += le32_to_cpu(*buf);
f73cb695
CD
920
921 sum = (sum & 0xffffffff) + (sum >> 32);
922
923 return ~sum;
924}
925
926static inline int
927qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
928{
929 return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
930}
931
932static inline int
933qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
934{
f8f97b0c 935 return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP;
f73cb695
CD
936}
937
a28d9e4e
JC
938static ulong
939qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
940 struct qla27xx_fwdt_template *tmp, void *buf)
f73cb695 941{
a28d9e4e 942 ulong len = 0;
f73cb695
CD
943
944 if (qla27xx_fwdt_template_valid(tmp)) {
945 len = tmp->template_size;
a28d9e4e 946 tmp = memcpy(buf, tmp, len);
f73cb695 947 ql27xx_edit_template(vha, tmp);
a28d9e4e 948 qla27xx_walk_template(vha, tmp, buf, &len);
f73cb695 949 }
a28d9e4e
JC
950
951 return len;
f73cb695
CD
952}
953
954ulong
a28d9e4e 955qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
f73cb695 956{
a28d9e4e 957 struct qla27xx_fwdt_template *tmp = p;
f73cb695
CD
958 ulong len = 0;
959
960 if (qla27xx_fwdt_template_valid(tmp)) {
961 len = tmp->template_size;
962 qla27xx_walk_template(vha, tmp, NULL, &len);
963 }
964
965 return len;
966}
967
968ulong
969qla27xx_fwdt_template_size(void *p)
970{
971 struct qla27xx_fwdt_template *tmp = p;
972
973 return tmp->template_size;
974}
975
f73cb695
CD
976int
977qla27xx_fwdt_template_valid(void *p)
978{
979 struct qla27xx_fwdt_template *tmp = p;
980
981 if (!qla27xx_verify_template_header(tmp)) {
982 ql_log(ql_log_warn, NULL, 0xd01c,
f8f97b0c
JC
983 "%s: template type %x\n", __func__,
984 le32_to_cpu(tmp->template_type));
f73cb695
CD
985 return false;
986 }
987
988 if (!qla27xx_verify_template_checksum(tmp)) {
989 ql_log(ql_log_warn, NULL, 0xd01d,
990 "%s: failed template checksum\n", __func__);
991 return false;
992 }
993
994 return true;
995}
996
997void
998qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
999{
1000 ulong flags = 0;
1001
8d16366b 1002#ifndef __CHECKER__
f73cb695
CD
1003 if (!hardware_locked)
1004 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
8d16366b 1005#endif
f73cb695 1006
a28d9e4e
JC
1007 if (!vha->hw->fw_dump) {
1008 ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
1009 } else if (vha->hw->fw_dumped) {
1010 ql_log(ql_log_warn, vha, 0xd01f,
1011 "-> Firmware already dumped (%p) -- ignoring request\n",
1012 vha->hw->fw_dump);
1013 } else {
1014 struct fwdt *fwdt = vha->hw->fwdt;
1015 uint j;
1016 ulong len;
1017 void *buf = vha->hw->fw_dump;
1018
1019 for (j = 0; j < 2; j++, fwdt++, buf += len) {
1020 ql_log(ql_log_warn, vha, 0xd011,
1021 "-> fwdt%u running...\n", j);
1022 if (!fwdt->template) {
1023 ql_log(ql_log_warn, vha, 0xd012,
1024 "-> fwdt%u no template\n", j);
1025 break;
1026 }
1027 len = qla27xx_execute_fwdt_template(vha,
1028 fwdt->template, buf);
1029 if (len != fwdt->dump_size) {
1030 ql_log(ql_log_warn, vha, 0xd013,
1031 "-> fwdt%u fwdump residual=%+ld\n",
1032 j, fwdt->dump_size - len);
1033 }
1034 }
1035 vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump;
1036 vha->hw->fw_dumped = 1;
1037
1038 ql_log(ql_log_warn, vha, 0xd015,
1039 "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
1040 vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
1041 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
7e84766c 1042 }
f73cb695 1043
8d16366b 1044#ifndef __CHECKER__
f73cb695
CD
1045 if (!hardware_locked)
1046 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
8d16366b 1047#endif
f73cb695 1048}