Commit | Line | Data |
---|---|---|
824a1566 KD |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* | |
3 | * Driver for Broadcom MPI3 Storage Controllers | |
4 | * | |
e74f2fbd | 5 | * Copyright (C) 2017-2023 Broadcom Inc. |
824a1566 KD |
6 | * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) |
7 | * | |
8 | */ | |
9 | ||
10 | #include "mpi3mr.h" | |
11 | #include <linux/io-64-nonatomic-lo-hi.h> | |
12 | ||
59bd9cfe | 13 | static int |
0a2714b7 | 14 | mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason); |
59bd9cfe | 15 | static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc); |
c5758fc7 SR |
16 | static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, |
17 | struct mpi3_ioc_facts_data *facts_data); | |
43ca1100 SS |
18 | static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, |
19 | struct mpi3mr_drv_cmd *drv_cmd); | |
f195fc06 | 20 | static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc); |
afd3a579 SR |
21 | static int poll_queues; |
22 | module_param(poll_queues, int, 0444); | |
23 | MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); | |
24 | ||
824a1566 KD |
25 | #if defined(writeq) && defined(CONFIG_64BIT) |
26 | static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) | |
27 | { | |
28 | writeq(b, addr); | |
29 | } | |
30 | #else | |
31 | static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) | |
32 | { | |
33 | __u64 data_out = b; | |
34 | ||
35 | writel((u32)(data_out), addr); | |
36 | writel((u32)(data_out >> 32), (addr + 4)); | |
37 | } | |
38 | #endif | |
39 | ||
023ab2a9 KD |
40 | static inline bool |
41 | mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q) | |
42 | { | |
43 | u16 pi, ci, max_entries; | |
44 | bool is_qfull = false; | |
45 | ||
46 | pi = op_req_q->pi; | |
47 | ci = READ_ONCE(op_req_q->ci); | |
48 | max_entries = op_req_q->num_requests; | |
49 | ||
50 | if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1)))) | |
51 | is_qfull = true; | |
52 | ||
53 | return is_qfull; | |
54 | } | |
55 | ||
824a1566 KD |
56 | static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) |
57 | { | |
58 | u16 i, max_vectors; | |
59 | ||
60 | max_vectors = mrioc->intr_info_count; | |
61 | ||
62 | for (i = 0; i < max_vectors; i++) | |
63 | synchronize_irq(pci_irq_vector(mrioc->pdev, i)); | |
64 | } | |
65 | ||
66 | void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) | |
67 | { | |
68 | mrioc->intr_enabled = 0; | |
69 | mpi3mr_sync_irqs(mrioc); | |
70 | } | |
71 | ||
72 | void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) | |
73 | { | |
74 | mrioc->intr_enabled = 1; | |
75 | } | |
76 | ||
77 | static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) | |
78 | { | |
79 | u16 i; | |
80 | ||
81 | mpi3mr_ioc_disable_intr(mrioc); | |
82 | ||
83 | if (!mrioc->intr_info) | |
84 | return; | |
85 | ||
86 | for (i = 0; i < mrioc->intr_info_count; i++) | |
87 | free_irq(pci_irq_vector(mrioc->pdev, i), | |
88 | (mrioc->intr_info + i)); | |
89 | ||
90 | kfree(mrioc->intr_info); | |
91 | mrioc->intr_info = NULL; | |
92 | mrioc->intr_info_count = 0; | |
fe6db615 | 93 | mrioc->is_intr_info_set = false; |
824a1566 KD |
94 | pci_free_irq_vectors(mrioc->pdev); |
95 | } | |
96 | ||
97 | void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, | |
98 | dma_addr_t dma_addr) | |
99 | { | |
100 | struct mpi3_sge_common *sgel = paddr; | |
101 | ||
102 | sgel->flags = flags; | |
103 | sgel->length = cpu_to_le32(length); | |
104 | sgel->address = cpu_to_le64(dma_addr); | |
105 | } | |
106 | ||
107 | void mpi3mr_build_zero_len_sge(void *paddr) | |
108 | { | |
109 | u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; | |
110 | ||
111 | mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); | |
112 | } | |
113 | ||
114 | void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, | |
115 | dma_addr_t phys_addr) | |
116 | { | |
117 | if (!phys_addr) | |
118 | return NULL; | |
119 | ||
120 | if ((phys_addr < mrioc->reply_buf_dma) || | |
121 | (phys_addr > mrioc->reply_buf_dma_max_address)) | |
122 | return NULL; | |
123 | ||
124 | return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); | |
125 | } | |
126 | ||
127 | void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, | |
128 | dma_addr_t phys_addr) | |
129 | { | |
130 | if (!phys_addr) | |
131 | return NULL; | |
132 | ||
133 | return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); | |
134 | } | |
135 | ||
136 | static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, | |
137 | u64 reply_dma) | |
138 | { | |
139 | u32 old_idx = 0; | |
a83ec831 | 140 | unsigned long flags; |
824a1566 | 141 | |
a83ec831 | 142 | spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags); |
824a1566 KD |
143 | old_idx = mrioc->reply_free_queue_host_index; |
144 | mrioc->reply_free_queue_host_index = ( | |
145 | (mrioc->reply_free_queue_host_index == | |
146 | (mrioc->reply_free_qsz - 1)) ? 0 : | |
147 | (mrioc->reply_free_queue_host_index + 1)); | |
148 | mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); | |
149 | writel(mrioc->reply_free_queue_host_index, | |
150 | &mrioc->sysif_regs->reply_free_host_index); | |
a83ec831 | 151 | spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags); |
824a1566 KD |
152 | } |
153 | ||
154 | void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, | |
155 | u64 sense_buf_dma) | |
156 | { | |
157 | u32 old_idx = 0; | |
a83ec831 | 158 | unsigned long flags; |
824a1566 | 159 | |
a83ec831 | 160 | spin_lock_irqsave(&mrioc->sbq_lock, flags); |
824a1566 KD |
161 | old_idx = mrioc->sbq_host_index; |
162 | mrioc->sbq_host_index = ((mrioc->sbq_host_index == | |
163 | (mrioc->sense_buf_q_sz - 1)) ? 0 : | |
164 | (mrioc->sbq_host_index + 1)); | |
165 | mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); | |
166 | writel(mrioc->sbq_host_index, | |
167 | &mrioc->sysif_regs->sense_buffer_free_host_index); | |
a83ec831 | 168 | spin_unlock_irqrestore(&mrioc->sbq_lock, flags); |
824a1566 KD |
169 | } |
170 | ||
9fc4abfe KD |
171 | static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, |
172 | struct mpi3_event_notification_reply *event_reply) | |
173 | { | |
174 | char *desc = NULL; | |
175 | u16 event; | |
176 | ||
b0b7ee3b RK |
177 | if (!(mrioc->logging_level & MPI3_DEBUG_EVENT)) |
178 | return; | |
179 | ||
9fc4abfe KD |
180 | event = event_reply->event; |
181 | ||
182 | switch (event) { | |
183 | case MPI3_EVENT_LOG_DATA: | |
184 | desc = "Log Data"; | |
185 | break; | |
186 | case MPI3_EVENT_CHANGE: | |
187 | desc = "Event Change"; | |
188 | break; | |
189 | case MPI3_EVENT_GPIO_INTERRUPT: | |
190 | desc = "GPIO Interrupt"; | |
191 | break; | |
9fc4abfe KD |
192 | case MPI3_EVENT_CABLE_MGMT: |
193 | desc = "Cable Management"; | |
194 | break; | |
195 | case MPI3_EVENT_ENERGY_PACK_CHANGE: | |
196 | desc = "Energy Pack Change"; | |
197 | break; | |
198 | case MPI3_EVENT_DEVICE_ADDED: | |
199 | { | |
200 | struct mpi3_device_page0 *event_data = | |
201 | (struct mpi3_device_page0 *)event_reply->event_data; | |
202 | ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n", | |
203 | event_data->dev_handle, event_data->device_form); | |
204 | return; | |
205 | } | |
206 | case MPI3_EVENT_DEVICE_INFO_CHANGED: | |
207 | { | |
208 | struct mpi3_device_page0 *event_data = | |
209 | (struct mpi3_device_page0 *)event_reply->event_data; | |
210 | ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n", | |
211 | event_data->dev_handle, event_data->device_form); | |
212 | return; | |
213 | } | |
214 | case MPI3_EVENT_DEVICE_STATUS_CHANGE: | |
215 | { | |
216 | struct mpi3_event_data_device_status_change *event_data = | |
217 | (struct mpi3_event_data_device_status_change *)event_reply->event_data; | |
218 | ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n", | |
219 | event_data->dev_handle, event_data->reason_code); | |
220 | return; | |
221 | } | |
222 | case MPI3_EVENT_SAS_DISCOVERY: | |
223 | { | |
224 | struct mpi3_event_data_sas_discovery *event_data = | |
225 | (struct mpi3_event_data_sas_discovery *)event_reply->event_data; | |
226 | ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n", | |
227 | (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ? | |
228 | "start" : "stop", | |
229 | le32_to_cpu(event_data->discovery_status)); | |
230 | return; | |
231 | } | |
232 | case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: | |
233 | desc = "SAS Broadcast Primitive"; | |
234 | break; | |
235 | case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE: | |
236 | desc = "SAS Notify Primitive"; | |
237 | break; | |
238 | case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: | |
239 | desc = "SAS Init Device Status Change"; | |
240 | break; | |
241 | case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW: | |
242 | desc = "SAS Init Table Overflow"; | |
243 | break; | |
244 | case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: | |
245 | desc = "SAS Topology Change List"; | |
246 | break; | |
247 | case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: | |
248 | desc = "Enclosure Device Status Change"; | |
249 | break; | |
7188c03f SR |
250 | case MPI3_EVENT_ENCL_DEVICE_ADDED: |
251 | desc = "Enclosure Added"; | |
252 | break; | |
9fc4abfe KD |
253 | case MPI3_EVENT_HARD_RESET_RECEIVED: |
254 | desc = "Hard Reset Received"; | |
255 | break; | |
256 | case MPI3_EVENT_SAS_PHY_COUNTER: | |
257 | desc = "SAS PHY Counter"; | |
258 | break; | |
259 | case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: | |
260 | desc = "SAS Device Discovery Error"; | |
261 | break; | |
262 | case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: | |
263 | desc = "PCIE Topology Change List"; | |
264 | break; | |
265 | case MPI3_EVENT_PCIE_ENUMERATION: | |
266 | { | |
267 | struct mpi3_event_data_pcie_enumeration *event_data = | |
268 | (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data; | |
269 | ioc_info(mrioc, "PCIE Enumeration: (%s)", | |
270 | (event_data->reason_code == | |
271 | MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop"); | |
272 | if (event_data->enumeration_status) | |
273 | ioc_info(mrioc, "enumeration_status(0x%08x)\n", | |
274 | le32_to_cpu(event_data->enumeration_status)); | |
275 | return; | |
276 | } | |
277 | case MPI3_EVENT_PREPARE_FOR_RESET: | |
278 | desc = "Prepare For Reset"; | |
279 | break; | |
d8d08d16 RK |
280 | case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE: |
281 | desc = "Diagnostic Buffer Status Change"; | |
282 | break; | |
9fc4abfe KD |
283 | } |
284 | ||
285 | if (!desc) | |
286 | return; | |
287 | ||
288 | ioc_info(mrioc, "%s\n", desc); | |
289 | } | |
290 | ||
824a1566 KD |
291 | static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, |
292 | struct mpi3_default_reply *def_reply) | |
293 | { | |
294 | struct mpi3_event_notification_reply *event_reply = | |
295 | (struct mpi3_event_notification_reply *)def_reply; | |
296 | ||
297 | mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); | |
9fc4abfe | 298 | mpi3mr_print_event_data(mrioc, event_reply); |
13ef29ea | 299 | mpi3mr_os_handle_events(mrioc, event_reply); |
824a1566 KD |
300 | } |
301 | ||
302 | static struct mpi3mr_drv_cmd * | |
303 | mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, | |
304 | struct mpi3_default_reply *def_reply) | |
305 | { | |
13ef29ea KD |
306 | u16 idx; |
307 | ||
824a1566 KD |
308 | switch (host_tag) { |
309 | case MPI3MR_HOSTTAG_INITCMDS: | |
310 | return &mrioc->init_cmds; | |
32d457d5 SR |
311 | case MPI3MR_HOSTTAG_CFG_CMDS: |
312 | return &mrioc->cfg_cmds; | |
f5e6d5a3 SS |
313 | case MPI3MR_HOSTTAG_BSG_CMDS: |
314 | return &mrioc->bsg_cmds; | |
e844adb1 KD |
315 | case MPI3MR_HOSTTAG_BLK_TMS: |
316 | return &mrioc->host_tm_cmds; | |
43ca1100 SS |
317 | case MPI3MR_HOSTTAG_PEL_ABORT: |
318 | return &mrioc->pel_abort_cmd; | |
319 | case MPI3MR_HOSTTAG_PEL_WAIT: | |
320 | return &mrioc->pel_cmds; | |
2bd37e28 SR |
321 | case MPI3MR_HOSTTAG_TRANSPORT_CMDS: |
322 | return &mrioc->transport_cmds; | |
824a1566 KD |
323 | case MPI3MR_HOSTTAG_INVALID: |
324 | if (def_reply && def_reply->function == | |
325 | MPI3_FUNCTION_EVENT_NOTIFICATION) | |
326 | mpi3mr_handle_events(mrioc, def_reply); | |
327 | return NULL; | |
328 | default: | |
329 | break; | |
330 | } | |
13ef29ea KD |
331 | if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN && |
332 | host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) { | |
333 | idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; | |
334 | return &mrioc->dev_rmhs_cmds[idx]; | |
335 | } | |
824a1566 | 336 | |
c1af985d SR |
337 | if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN && |
338 | host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) { | |
339 | idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; | |
340 | return &mrioc->evtack_cmds[idx]; | |
341 | } | |
342 | ||
824a1566 KD |
343 | return NULL; |
344 | } | |
345 | ||
346 | static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, | |
347 | struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) | |
348 | { | |
349 | u16 reply_desc_type, host_tag = 0; | |
350 | u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; | |
6dc7050d | 351 | u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS; |
d8d08d16 | 352 | u32 ioc_loginfo = 0, sense_count = 0; |
824a1566 KD |
353 | struct mpi3_status_reply_descriptor *status_desc; |
354 | struct mpi3_address_reply_descriptor *addr_desc; | |
355 | struct mpi3_success_reply_descriptor *success_desc; | |
356 | struct mpi3_default_reply *def_reply = NULL; | |
357 | struct mpi3mr_drv_cmd *cmdptr = NULL; | |
358 | struct mpi3_scsi_io_reply *scsi_reply; | |
d8d08d16 | 359 | struct scsi_sense_hdr sshdr; |
824a1566 KD |
360 | u8 *sense_buf = NULL; |
361 | ||
362 | *reply_dma = 0; | |
363 | reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & | |
364 | MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; | |
365 | switch (reply_desc_type) { | |
366 | case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: | |
367 | status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; | |
368 | host_tag = le16_to_cpu(status_desc->host_tag); | |
369 | ioc_status = le16_to_cpu(status_desc->ioc_status); | |
370 | if (ioc_status & | |
371 | MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) | |
372 | ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); | |
6dc7050d RK |
373 | masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK; |
374 | mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo); | |
824a1566 KD |
375 | break; |
376 | case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: | |
377 | addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; | |
378 | *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); | |
379 | def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); | |
380 | if (!def_reply) | |
381 | goto out; | |
382 | host_tag = le16_to_cpu(def_reply->host_tag); | |
383 | ioc_status = le16_to_cpu(def_reply->ioc_status); | |
384 | if (ioc_status & | |
385 | MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) | |
386 | ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); | |
6dc7050d | 387 | masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK; |
824a1566 KD |
388 | if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { |
389 | scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; | |
390 | sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, | |
391 | le64_to_cpu(scsi_reply->sense_data_buffer_address)); | |
d8d08d16 RK |
392 | sense_count = le32_to_cpu(scsi_reply->sense_count); |
393 | if (sense_buf) { | |
394 | scsi_normalize_sense(sense_buf, sense_count, | |
395 | &sshdr); | |
396 | mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key, | |
397 | sshdr.asc, sshdr.ascq); | |
398 | } | |
824a1566 | 399 | } |
6dc7050d | 400 | mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo); |
824a1566 KD |
401 | break; |
402 | case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: | |
403 | success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; | |
404 | host_tag = le16_to_cpu(success_desc->host_tag); | |
405 | break; | |
406 | default: | |
407 | break; | |
408 | } | |
409 | ||
410 | cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); | |
411 | if (cmdptr) { | |
412 | if (cmdptr->state & MPI3MR_CMD_PENDING) { | |
413 | cmdptr->state |= MPI3MR_CMD_COMPLETE; | |
414 | cmdptr->ioc_loginfo = ioc_loginfo; | |
6dc7050d RK |
415 | if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS) |
416 | cmdptr->ioc_status = ioc_status; | |
417 | else | |
418 | cmdptr->ioc_status = masked_ioc_status; | |
824a1566 KD |
419 | cmdptr->state &= ~MPI3MR_CMD_PENDING; |
420 | if (def_reply) { | |
421 | cmdptr->state |= MPI3MR_CMD_REPLY_VALID; | |
422 | memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, | |
c5758fc7 | 423 | mrioc->reply_sz); |
824a1566 | 424 | } |
f762326b SP |
425 | if (sense_buf && cmdptr->sensebuf) { |
426 | cmdptr->is_sense = 1; | |
427 | memcpy(cmdptr->sensebuf, sense_buf, | |
428 | MPI3MR_SENSE_BUF_SZ); | |
429 | } | |
824a1566 KD |
430 | if (cmdptr->is_waiting) { |
431 | complete(&cmdptr->done); | |
432 | cmdptr->is_waiting = 0; | |
433 | } else if (cmdptr->callback) | |
434 | cmdptr->callback(mrioc, cmdptr); | |
435 | } | |
436 | } | |
437 | out: | |
438 | if (sense_buf) | |
439 | mpi3mr_repost_sense_buf(mrioc, | |
440 | le64_to_cpu(scsi_reply->sense_data_buffer_address)); | |
441 | } | |
442 | ||
02ca7da2 | 443 | int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) |
824a1566 KD |
444 | { |
445 | u32 exp_phase = mrioc->admin_reply_ephase; | |
446 | u32 admin_reply_ci = mrioc->admin_reply_ci; | |
447 | u32 num_admin_replies = 0; | |
448 | u64 reply_dma = 0; | |
199510e3 | 449 | u16 threshold_comps = 0; |
824a1566 KD |
450 | struct mpi3_default_reply_descriptor *reply_desc; |
451 | ||
ca41929b RK |
452 | if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) { |
453 | atomic_inc(&mrioc->admin_pend_isr); | |
02ca7da2 | 454 | return 0; |
ca41929b | 455 | } |
02ca7da2 | 456 | |
3b5091fe | 457 | atomic_set(&mrioc->admin_pend_isr, 0); |
824a1566 KD |
458 | reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + |
459 | admin_reply_ci; | |
460 | ||
461 | if ((le16_to_cpu(reply_desc->reply_flags) & | |
02ca7da2 RK |
462 | MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { |
463 | atomic_dec(&mrioc->admin_reply_q_in_use); | |
824a1566 | 464 | return 0; |
02ca7da2 | 465 | } |
824a1566 KD |
466 | |
467 | do { | |
f195fc06 | 468 | if (mrioc->unrecoverable || mrioc->io_admin_reset_sync) |
f2a79d20 SR |
469 | break; |
470 | ||
824a1566 KD |
471 | mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); |
472 | mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); | |
473 | if (reply_dma) | |
474 | mpi3mr_repost_reply_buf(mrioc, reply_dma); | |
475 | num_admin_replies++; | |
199510e3 | 476 | threshold_comps++; |
824a1566 KD |
477 | if (++admin_reply_ci == mrioc->num_admin_replies) { |
478 | admin_reply_ci = 0; | |
479 | exp_phase ^= 1; | |
480 | } | |
481 | reply_desc = | |
482 | (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + | |
483 | admin_reply_ci; | |
484 | if ((le16_to_cpu(reply_desc->reply_flags) & | |
485 | MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) | |
486 | break; | |
199510e3 RK |
487 | if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) { |
488 | writel(admin_reply_ci, | |
489 | &mrioc->sysif_regs->admin_reply_queue_ci); | |
490 | threshold_comps = 0; | |
491 | } | |
824a1566 KD |
492 | } while (1); |
493 | ||
494 | writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); | |
495 | mrioc->admin_reply_ci = admin_reply_ci; | |
496 | mrioc->admin_reply_ephase = exp_phase; | |
02ca7da2 | 497 | atomic_dec(&mrioc->admin_reply_q_in_use); |
824a1566 KD |
498 | |
499 | return num_admin_replies; | |
500 | } | |
501 | ||
023ab2a9 KD |
502 | /** |
503 | * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to | |
504 | * queue's consumer index from operational reply descriptor queue. | |
505 | * @op_reply_q: op_reply_qinfo object | |
506 | * @reply_ci: operational reply descriptor's queue consumer index | |
507 | * | |
904fdd20 | 508 | * Returns: reply descriptor frame address |
023ab2a9 KD |
509 | */ |
510 | static inline struct mpi3_default_reply_descriptor * | |
511 | mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) | |
512 | { | |
513 | void *segment_base_addr; | |
514 | struct segments *segments = op_reply_q->q_segments; | |
515 | struct mpi3_default_reply_descriptor *reply_desc = NULL; | |
516 | ||
517 | segment_base_addr = | |
518 | segments[reply_ci / op_reply_q->segment_qd].segment; | |
519 | reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr + | |
520 | (reply_ci % op_reply_q->segment_qd); | |
521 | return reply_desc; | |
522 | } | |
523 | ||
afd3a579 SR |
524 | /** |
525 | * mpi3mr_process_op_reply_q - Operational reply queue handler | |
526 | * @mrioc: Adapter instance reference | |
527 | * @op_reply_q: Operational reply queue info | |
528 | * | |
529 | * Checks the specific operational reply queue and drains the | |
530 | * reply queue entries until the queue is empty and process the | |
531 | * individual reply descriptors. | |
532 | * | |
533 | * Return: 0 if queue is already processed,or number of reply | |
534 | * descriptors processed. | |
535 | */ | |
536 | int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, | |
537 | struct op_reply_qinfo *op_reply_q) | |
023ab2a9 | 538 | { |
023ab2a9 KD |
539 | struct op_req_qinfo *op_req_q; |
540 | u32 exp_phase; | |
541 | u32 reply_ci; | |
542 | u32 num_op_reply = 0; | |
543 | u64 reply_dma = 0; | |
544 | struct mpi3_default_reply_descriptor *reply_desc; | |
199510e3 | 545 | u16 req_q_idx = 0, reply_qidx, threshold_comps = 0; |
023ab2a9 KD |
546 | |
547 | reply_qidx = op_reply_q->qid - 1; | |
548 | ||
463429f8 KD |
549 | if (!atomic_add_unless(&op_reply_q->in_use, 1, 1)) |
550 | return 0; | |
551 | ||
023ab2a9 KD |
552 | exp_phase = op_reply_q->ephase; |
553 | reply_ci = op_reply_q->ci; | |
554 | ||
555 | reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); | |
556 | if ((le16_to_cpu(reply_desc->reply_flags) & | |
557 | MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { | |
463429f8 | 558 | atomic_dec(&op_reply_q->in_use); |
023ab2a9 KD |
559 | return 0; |
560 | } | |
561 | ||
562 | do { | |
f195fc06 | 563 | if (mrioc->unrecoverable || mrioc->io_admin_reset_sync) |
f2a79d20 SR |
564 | break; |
565 | ||
023ab2a9 KD |
566 | req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; |
567 | op_req_q = &mrioc->req_qinfo[req_q_idx]; | |
568 | ||
569 | WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); | |
570 | mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, | |
571 | reply_qidx); | |
cdd44525 | 572 | |
023ab2a9 KD |
573 | if (reply_dma) |
574 | mpi3mr_repost_reply_buf(mrioc, reply_dma); | |
575 | num_op_reply++; | |
199510e3 | 576 | threshold_comps++; |
023ab2a9 KD |
577 | |
578 | if (++reply_ci == op_reply_q->num_replies) { | |
579 | reply_ci = 0; | |
580 | exp_phase ^= 1; | |
581 | } | |
582 | ||
583 | reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); | |
584 | ||
585 | if ((le16_to_cpu(reply_desc->reply_flags) & | |
586 | MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) | |
587 | break; | |
7f9f953d | 588 | #ifndef CONFIG_PREEMPT_RT |
463429f8 KD |
589 | /* |
590 | * Exit completion loop to avoid CPU lockup | |
591 | * Ensure remaining completion happens from threaded ISR. | |
592 | */ | |
593 | if (num_op_reply > mrioc->max_host_ios) { | |
afd3a579 | 594 | op_reply_q->enable_irq_poll = true; |
463429f8 KD |
595 | break; |
596 | } | |
7f9f953d | 597 | #endif |
199510e3 RK |
598 | if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) { |
599 | writel(reply_ci, | |
600 | &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); | |
601 | atomic_sub(threshold_comps, &op_reply_q->pend_ios); | |
602 | threshold_comps = 0; | |
603 | } | |
023ab2a9 KD |
604 | } while (1); |
605 | ||
606 | writel(reply_ci, | |
607 | &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); | |
608 | op_reply_q->ci = reply_ci; | |
609 | op_reply_q->ephase = exp_phase; | |
199510e3 | 610 | atomic_sub(threshold_comps, &op_reply_q->pend_ios); |
463429f8 | 611 | atomic_dec(&op_reply_q->in_use); |
023ab2a9 KD |
612 | return num_op_reply; |
613 | } | |
614 | ||
afd3a579 SR |
615 | /** |
616 | * mpi3mr_blk_mq_poll - Operational reply queue handler | |
617 | * @shost: SCSI Host reference | |
618 | * @queue_num: Request queue number (w.r.t OS it is hardware context number) | |
619 | * | |
620 | * Checks the specific operational reply queue and drains the | |
621 | * reply queue entries until the queue is empty and process the | |
622 | * individual reply descriptors. | |
623 | * | |
624 | * Return: 0 if queue is already processed,or number of reply | |
625 | * descriptors processed. | |
626 | */ | |
627 | int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) | |
628 | { | |
629 | int num_entries = 0; | |
630 | struct mpi3mr_ioc *mrioc; | |
631 | ||
632 | mrioc = (struct mpi3mr_ioc *)shost->hostdata; | |
633 | ||
f2a79d20 | 634 | if ((mrioc->reset_in_progress || mrioc->prepare_for_reset || |
1c342b05 | 635 | mrioc->unrecoverable || mrioc->pci_err_recovery)) |
afd3a579 SR |
636 | return 0; |
637 | ||
638 | num_entries = mpi3mr_process_op_reply_q(mrioc, | |
639 | &mrioc->op_reply_qinfo[queue_num]); | |
640 | ||
641 | return num_entries; | |
642 | } | |
643 | ||
824a1566 KD |
644 | static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) |
645 | { | |
646 | struct mpi3mr_intr_info *intr_info = privdata; | |
647 | struct mpi3mr_ioc *mrioc; | |
648 | u16 midx; | |
463429f8 | 649 | u32 num_admin_replies = 0, num_op_reply = 0; |
824a1566 KD |
650 | |
651 | if (!intr_info) | |
652 | return IRQ_NONE; | |
653 | ||
654 | mrioc = intr_info->mrioc; | |
655 | ||
656 | if (!mrioc->intr_enabled) | |
657 | return IRQ_NONE; | |
658 | ||
659 | midx = intr_info->msix_index; | |
660 | ||
661 | if (!midx) | |
662 | num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); | |
463429f8 | 663 | if (intr_info->op_reply_q) |
afd3a579 SR |
664 | num_op_reply = mpi3mr_process_op_reply_q(mrioc, |
665 | intr_info->op_reply_q); | |
824a1566 | 666 | |
463429f8 | 667 | if (num_admin_replies || num_op_reply) |
824a1566 KD |
668 | return IRQ_HANDLED; |
669 | else | |
670 | return IRQ_NONE; | |
671 | } | |
672 | ||
7f9f953d SR |
673 | #ifndef CONFIG_PREEMPT_RT |
674 | ||
824a1566 KD |
675 | static irqreturn_t mpi3mr_isr(int irq, void *privdata) |
676 | { | |
677 | struct mpi3mr_intr_info *intr_info = privdata; | |
678 | int ret; | |
679 | ||
680 | if (!intr_info) | |
681 | return IRQ_NONE; | |
682 | ||
683 | /* Call primary ISR routine */ | |
684 | ret = mpi3mr_isr_primary(irq, privdata); | |
685 | ||
463429f8 KD |
686 | /* |
687 | * If more IOs are expected, schedule IRQ polling thread. | |
688 | * Otherwise exit from ISR. | |
689 | */ | |
690 | if (!intr_info->op_reply_q) | |
691 | return ret; | |
692 | ||
693 | if (!intr_info->op_reply_q->enable_irq_poll || | |
694 | !atomic_read(&intr_info->op_reply_q->pend_ios)) | |
695 | return ret; | |
696 | ||
2e31be86 | 697 | disable_irq_nosync(intr_info->os_irq); |
463429f8 KD |
698 | |
699 | return IRQ_WAKE_THREAD; | |
824a1566 KD |
700 | } |
701 | ||
702 | /** | |
703 | * mpi3mr_isr_poll - Reply queue polling routine | |
704 | * @irq: IRQ | |
705 | * @privdata: Interrupt info | |
706 | * | |
707 | * poll for pending I/O completions in a loop until pending I/Os | |
708 | * present or controller queue depth I/Os are processed. | |
709 | * | |
710 | * Return: IRQ_NONE or IRQ_HANDLED | |
711 | */ | |
712 | static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) | |
713 | { | |
463429f8 KD |
714 | struct mpi3mr_intr_info *intr_info = privdata; |
715 | struct mpi3mr_ioc *mrioc; | |
716 | u16 midx; | |
717 | u32 num_op_reply = 0; | |
718 | ||
719 | if (!intr_info || !intr_info->op_reply_q) | |
720 | return IRQ_NONE; | |
721 | ||
722 | mrioc = intr_info->mrioc; | |
723 | midx = intr_info->msix_index; | |
724 | ||
725 | /* Poll for pending IOs completions */ | |
726 | do { | |
f2a79d20 | 727 | if (!mrioc->intr_enabled || mrioc->unrecoverable) |
463429f8 KD |
728 | break; |
729 | ||
730 | if (!midx) | |
731 | mpi3mr_process_admin_reply_q(mrioc); | |
732 | if (intr_info->op_reply_q) | |
733 | num_op_reply += | |
afd3a579 SR |
734 | mpi3mr_process_op_reply_q(mrioc, |
735 | intr_info->op_reply_q); | |
463429f8 | 736 | |
24d7071d | 737 | usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1); |
463429f8 KD |
738 | |
739 | } while (atomic_read(&intr_info->op_reply_q->pend_ios) && | |
740 | (num_op_reply < mrioc->max_host_ios)); | |
741 | ||
742 | intr_info->op_reply_q->enable_irq_poll = false; | |
2e31be86 | 743 | enable_irq(intr_info->os_irq); |
463429f8 | 744 | |
824a1566 KD |
745 | return IRQ_HANDLED; |
746 | } | |
747 | ||
7f9f953d SR |
748 | #endif |
749 | ||
824a1566 KD |
750 | /** |
751 | * mpi3mr_request_irq - Request IRQ and register ISR | |
752 | * @mrioc: Adapter instance reference | |
753 | * @index: IRQ vector index | |
754 | * | |
755 | * Request threaded ISR with primary ISR and secondary | |
756 | * | |
757 | * Return: 0 on success and non zero on failures. | |
758 | */ | |
759 | static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) | |
760 | { | |
761 | struct pci_dev *pdev = mrioc->pdev; | |
762 | struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; | |
763 | int retval = 0; | |
764 | ||
765 | intr_info->mrioc = mrioc; | |
766 | intr_info->msix_index = index; | |
767 | intr_info->op_reply_q = NULL; | |
768 | ||
769 | snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", | |
770 | mrioc->driver_name, mrioc->id, index); | |
771 | ||
7f9f953d | 772 | #ifndef CONFIG_PREEMPT_RT |
824a1566 KD |
773 | retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, |
774 | mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); | |
7f9f953d SR |
775 | #else |
776 | retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary, | |
777 | NULL, IRQF_SHARED, intr_info->name, intr_info); | |
778 | #endif | |
824a1566 KD |
779 | if (retval) { |
780 | ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", | |
781 | intr_info->name, pci_irq_vector(pdev, index)); | |
782 | return retval; | |
783 | } | |
784 | ||
2e31be86 | 785 | intr_info->os_irq = pci_irq_vector(pdev, index); |
824a1566 KD |
786 | return retval; |
787 | } | |
788 | ||
afd3a579 SR |
789 | static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors) |
790 | { | |
791 | if (!mrioc->requested_poll_qcount) | |
792 | return; | |
793 | ||
794 | /* Reserved for Admin and Default Queue */ | |
795 | if (max_vectors > 2 && | |
796 | (mrioc->requested_poll_qcount < max_vectors - 2)) { | |
797 | ioc_info(mrioc, | |
798 | "enabled polled queues (%d) msix (%d)\n", | |
799 | mrioc->requested_poll_qcount, max_vectors); | |
800 | } else { | |
801 | ioc_info(mrioc, | |
802 | "disabled polled queues (%d) msix (%d) because of no resources for default queue\n", | |
803 | mrioc->requested_poll_qcount, max_vectors); | |
804 | mrioc->requested_poll_qcount = 0; | |
805 | } | |
806 | } | |
807 | ||
824a1566 KD |
808 | /** |
809 | * mpi3mr_setup_isr - Setup ISR for the controller | |
810 | * @mrioc: Adapter instance reference | |
811 | * @setup_one: Request one IRQ or more | |
812 | * | |
813 | * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR | |
814 | * | |
815 | * Return: 0 on success and non zero on failures. | |
816 | */ | |
817 | static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) | |
818 | { | |
819 | unsigned int irq_flags = PCI_IRQ_MSIX; | |
afd3a579 | 820 | int max_vectors, min_vec; |
2938bedd DC |
821 | int retval; |
822 | int i; | |
afd3a579 | 823 | struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 }; |
824a1566 | 824 | |
fe6db615 SR |
825 | if (mrioc->is_intr_info_set) |
826 | return 0; | |
827 | ||
824a1566 KD |
828 | mpi3mr_cleanup_isr(mrioc); |
829 | ||
afd3a579 | 830 | if (setup_one || reset_devices) { |
824a1566 | 831 | max_vectors = 1; |
afd3a579 SR |
832 | retval = pci_alloc_irq_vectors(mrioc->pdev, |
833 | 1, max_vectors, irq_flags); | |
834 | if (retval < 0) { | |
835 | ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", | |
836 | retval); | |
837 | goto out_failed; | |
838 | } | |
839 | } else { | |
824a1566 | 840 | max_vectors = |
afd3a579 SR |
841 | min_t(int, mrioc->cpu_count + 1 + |
842 | mrioc->requested_poll_qcount, mrioc->msix_count); | |
843 | ||
844 | mpi3mr_calc_poll_queues(mrioc, max_vectors); | |
824a1566 KD |
845 | |
846 | ioc_info(mrioc, | |
847 | "MSI-X vectors supported: %d, no of cores: %d,", | |
848 | mrioc->msix_count, mrioc->cpu_count); | |
849 | ioc_info(mrioc, | |
afd3a579 SR |
850 | "MSI-x vectors requested: %d poll_queues %d\n", |
851 | max_vectors, mrioc->requested_poll_qcount); | |
852 | ||
853 | desc.post_vectors = mrioc->requested_poll_qcount; | |
854 | min_vec = desc.pre_vectors + desc.post_vectors; | |
855 | irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; | |
856 | ||
857 | retval = pci_alloc_irq_vectors_affinity(mrioc->pdev, | |
858 | min_vec, max_vectors, irq_flags, &desc); | |
859 | ||
860 | if (retval < 0) { | |
861 | ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", | |
862 | retval); | |
863 | goto out_failed; | |
864 | } | |
824a1566 | 865 | |
824a1566 | 866 | |
c9566231 KD |
867 | /* |
868 | * If only one MSI-x is allocated, then MSI-x 0 will be shared | |
869 | * between Admin queue and operational queue | |
870 | */ | |
afd3a579 | 871 | if (retval == min_vec) |
c9566231 | 872 | mrioc->op_reply_q_offset = 0; |
afd3a579 SR |
873 | else if (retval != (max_vectors)) { |
874 | ioc_info(mrioc, | |
875 | "allocated vectors (%d) are less than configured (%d)\n", | |
876 | retval, max_vectors); | |
877 | } | |
824a1566 | 878 | |
2938bedd | 879 | max_vectors = retval; |
afd3a579 SR |
880 | mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; |
881 | ||
882 | mpi3mr_calc_poll_queues(mrioc, max_vectors); | |
883 | ||
824a1566 | 884 | } |
afd3a579 | 885 | |
824a1566 KD |
886 | mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, |
887 | GFP_KERNEL); | |
888 | if (!mrioc->intr_info) { | |
2938bedd | 889 | retval = -ENOMEM; |
824a1566 KD |
890 | pci_free_irq_vectors(mrioc->pdev); |
891 | goto out_failed; | |
892 | } | |
893 | for (i = 0; i < max_vectors; i++) { | |
894 | retval = mpi3mr_request_irq(mrioc, i); | |
895 | if (retval) { | |
896 | mrioc->intr_info_count = i; | |
897 | goto out_failed; | |
898 | } | |
899 | } | |
fe6db615 SR |
900 | if (reset_devices || !setup_one) |
901 | mrioc->is_intr_info_set = true; | |
824a1566 KD |
902 | mrioc->intr_info_count = max_vectors; |
903 | mpi3mr_ioc_enable_intr(mrioc); | |
2938bedd DC |
904 | return 0; |
905 | ||
824a1566 KD |
906 | out_failed: |
907 | mpi3mr_cleanup_isr(mrioc); | |
908 | ||
909 | return retval; | |
910 | } | |
911 | ||
912 | static const struct { | |
913 | enum mpi3mr_iocstate value; | |
914 | char *name; | |
915 | } mrioc_states[] = { | |
916 | { MRIOC_STATE_READY, "ready" }, | |
917 | { MRIOC_STATE_FAULT, "fault" }, | |
918 | { MRIOC_STATE_RESET, "reset" }, | |
919 | { MRIOC_STATE_BECOMING_READY, "becoming ready" }, | |
920 | { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, | |
921 | { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, | |
922 | }; | |
923 | ||
924 | static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) | |
925 | { | |
926 | int i; | |
927 | char *name = NULL; | |
928 | ||
929 | for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { | |
930 | if (mrioc_states[i].value == mrioc_state) { | |
931 | name = mrioc_states[i].name; | |
932 | break; | |
933 | } | |
934 | } | |
935 | return name; | |
936 | } | |
937 | ||
f061178e KD |
938 | /* Reset reason to name mapper structure*/ |
939 | static const struct { | |
940 | enum mpi3mr_reset_reason value; | |
941 | char *name; | |
942 | } mpi3mr_reset_reason_codes[] = { | |
943 | { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" }, | |
944 | { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" }, | |
f5e6d5a3 | 945 | { MPI3MR_RESET_FROM_APP, "application invocation" }, |
f061178e KD |
946 | { MPI3MR_RESET_FROM_EH_HOS, "error handling" }, |
947 | { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" }, | |
f5e6d5a3 | 948 | { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" }, |
f061178e KD |
949 | { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" }, |
950 | { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" }, | |
951 | { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" }, | |
952 | { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" }, | |
953 | { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" }, | |
954 | { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" }, | |
955 | { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" }, | |
956 | { | |
957 | MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT, | |
958 | "create request queue timeout" | |
959 | }, | |
960 | { | |
961 | MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT, | |
962 | "create reply queue timeout" | |
963 | }, | |
964 | { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" }, | |
965 | { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" }, | |
966 | { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" }, | |
967 | { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" }, | |
968 | { | |
969 | MPI3MR_RESET_FROM_CIACTVRST_TIMER, | |
970 | "component image activation timeout" | |
971 | }, | |
972 | { | |
973 | MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT, | |
974 | "get package version timeout" | |
975 | }, | |
976 | { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, | |
977 | { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, | |
d8d08d16 RK |
978 | { |
979 | MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT, | |
980 | "diagnostic buffer post timeout" | |
981 | }, | |
982 | { | |
983 | MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT, | |
984 | "diagnostic buffer release timeout" | |
985 | }, | |
5867b856 | 986 | { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" }, |
32d457d5 | 987 | { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"}, |
2bd37e28 | 988 | { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" }, |
f061178e KD |
989 | }; |
990 | ||
991 | /** | |
992 | * mpi3mr_reset_rc_name - get reset reason code name | |
993 | * @reason_code: reset reason code value | |
994 | * | |
995 | * Map reset reason to an NULL terminated ASCII string | |
996 | * | |
997 | * Return: name corresponding to reset reason value or NULL. | |
998 | */ | |
999 | static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code) | |
1000 | { | |
1001 | int i; | |
1002 | char *name = NULL; | |
1003 | ||
1004 | for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) { | |
1005 | if (mpi3mr_reset_reason_codes[i].value == reason_code) { | |
1006 | name = mpi3mr_reset_reason_codes[i].name; | |
1007 | break; | |
1008 | } | |
1009 | } | |
1010 | return name; | |
1011 | } | |
1012 | ||
1013 | /* Reset type to name mapper structure*/ | |
1014 | static const struct { | |
1015 | u16 reset_type; | |
1016 | char *name; | |
1017 | } mpi3mr_reset_types[] = { | |
1018 | { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" }, | |
1019 | { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" }, | |
1020 | }; | |
1021 | ||
1022 | /** | |
1023 | * mpi3mr_reset_type_name - get reset type name | |
1024 | * @reset_type: reset type value | |
1025 | * | |
1026 | * Map reset type to an NULL terminated ASCII string | |
1027 | * | |
1028 | * Return: name corresponding to reset type value or NULL. | |
1029 | */ | |
1030 | static const char *mpi3mr_reset_type_name(u16 reset_type) | |
1031 | { | |
1032 | int i; | |
1033 | char *name = NULL; | |
1034 | ||
1035 | for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) { | |
1036 | if (mpi3mr_reset_types[i].reset_type == reset_type) { | |
1037 | name = mpi3mr_reset_types[i].name; | |
1038 | break; | |
1039 | } | |
1040 | } | |
1041 | return name; | |
1042 | } | |
1043 | ||
fb6eb98f RK |
1044 | /** |
1045 | * mpi3mr_is_fault_recoverable - Read fault code and decide | |
1046 | * whether the controller can be recoverable | |
1047 | * @mrioc: Adapter instance reference | |
1048 | * Return: true if fault is recoverable, false otherwise. | |
1049 | */ | |
1050 | static inline bool mpi3mr_is_fault_recoverable(struct mpi3mr_ioc *mrioc) | |
1051 | { | |
1052 | u32 fault; | |
1053 | ||
1054 | fault = (readl(&mrioc->sysif_regs->fault) & | |
1055 | MPI3_SYSIF_FAULT_CODE_MASK); | |
1056 | ||
1057 | switch (fault) { | |
1058 | case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED: | |
1059 | case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: | |
1060 | ioc_warn(mrioc, | |
1061 | "controller requires system power cycle, marking controller as unrecoverable\n"); | |
1062 | return false; | |
1063 | case MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER: | |
1064 | ioc_warn(mrioc, | |
1065 | "controller faulted due to insufficient power,\n" | |
1066 | " try by connecting it to a different slot\n"); | |
1067 | return false; | |
1068 | default: | |
1069 | break; | |
1070 | } | |
1071 | return true; | |
1072 | } | |
1073 | ||
824a1566 KD |
1074 | /** |
1075 | * mpi3mr_print_fault_info - Display fault information | |
1076 | * @mrioc: Adapter instance reference | |
1077 | * | |
1078 | * Display the controller fault information if there is a | |
1079 | * controller fault. | |
1080 | * | |
1081 | * Return: Nothing. | |
1082 | */ | |
b64845a7 | 1083 | void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) |
824a1566 KD |
1084 | { |
1085 | u32 ioc_status, code, code1, code2, code3; | |
1086 | ||
1087 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); | |
1088 | ||
1089 | if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { | |
1090 | code = readl(&mrioc->sysif_regs->fault); | |
1091 | code1 = readl(&mrioc->sysif_regs->fault_info[0]); | |
1092 | code2 = readl(&mrioc->sysif_regs->fault_info[1]); | |
1093 | code3 = readl(&mrioc->sysif_regs->fault_info[2]); | |
1094 | ||
1095 | ioc_info(mrioc, | |
1096 | "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", | |
1097 | code, code1, code2, code3); | |
1098 | } | |
1099 | } | |
1100 | ||
1101 | /** | |
1102 | * mpi3mr_get_iocstate - Get IOC State | |
1103 | * @mrioc: Adapter instance reference | |
1104 | * | |
1105 | * Return a proper IOC state enum based on the IOC status and | |
1106 | * IOC configuration and unrcoverable state of the controller. | |
1107 | * | |
1108 | * Return: Current IOC state. | |
1109 | */ | |
1110 | enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) | |
1111 | { | |
1112 | u32 ioc_status, ioc_config; | |
1113 | u8 ready, enabled; | |
1114 | ||
1115 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); | |
1116 | ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); | |
1117 | ||
1118 | if (mrioc->unrecoverable) | |
1119 | return MRIOC_STATE_UNRECOVERABLE; | |
1120 | if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) | |
1121 | return MRIOC_STATE_FAULT; | |
1122 | ||
1123 | ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); | |
1124 | enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); | |
1125 | ||
1126 | if (ready && enabled) | |
1127 | return MRIOC_STATE_READY; | |
1128 | if ((!ready) && (!enabled)) | |
1129 | return MRIOC_STATE_RESET; | |
1130 | if ((!ready) && (enabled)) | |
1131 | return MRIOC_STATE_BECOMING_READY; | |
1132 | ||
1133 | return MRIOC_STATE_RESET_REQUESTED; | |
1134 | } | |
1135 | ||
c432e167 C |
1136 | /** |
1137 | * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma | |
1138 | * @mrioc: Adapter instance reference | |
1139 | * | |
1140 | * Free the DMA memory allocated for IOCTL handling purpose. | |
c432e167 C |
1141 | * |
1142 | * Return: None | |
1143 | */ | |
1144 | static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc) | |
1145 | { | |
1146 | struct dma_memory_desc *mem_desc; | |
1147 | u16 i; | |
1148 | ||
1149 | if (!mrioc->ioctl_dma_pool) | |
1150 | return; | |
1151 | ||
1152 | for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) { | |
1153 | mem_desc = &mrioc->ioctl_sge[i]; | |
1154 | if (mem_desc->addr) { | |
1155 | dma_pool_free(mrioc->ioctl_dma_pool, | |
1156 | mem_desc->addr, | |
1157 | mem_desc->dma_addr); | |
1158 | mem_desc->addr = NULL; | |
1159 | } | |
1160 | } | |
1161 | dma_pool_destroy(mrioc->ioctl_dma_pool); | |
1162 | mrioc->ioctl_dma_pool = NULL; | |
1163 | mem_desc = &mrioc->ioctl_chain_sge; | |
1164 | ||
1165 | if (mem_desc->addr) { | |
1166 | dma_free_coherent(&mrioc->pdev->dev, mem_desc->size, | |
1167 | mem_desc->addr, mem_desc->dma_addr); | |
1168 | mem_desc->addr = NULL; | |
1169 | } | |
1170 | mem_desc = &mrioc->ioctl_resp_sge; | |
1171 | if (mem_desc->addr) { | |
1172 | dma_free_coherent(&mrioc->pdev->dev, mem_desc->size, | |
1173 | mem_desc->addr, mem_desc->dma_addr); | |
1174 | mem_desc->addr = NULL; | |
1175 | } | |
1176 | ||
1177 | mrioc->ioctl_sges_allocated = false; | |
1178 | } | |
1179 | ||
1180 | /** | |
1181 | * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma | |
1182 | * @mrioc: Adapter instance reference | |
c432e167 C |
1183 | * |
1184 | * This function allocates dmaable memory required to handle the | |
1185 | * application issued MPI3 IOCTL requests. | |
1186 | * | |
1187 | * Return: None | |
1188 | */ | |
1189 | static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc) | |
1190 | ||
1191 | { | |
1192 | struct dma_memory_desc *mem_desc; | |
1193 | u16 i; | |
1194 | ||
1195 | mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool", | |
1196 | &mrioc->pdev->dev, | |
1197 | MPI3MR_IOCTL_SGE_SIZE, | |
1198 | MPI3MR_PAGE_SIZE_4K, 0); | |
1199 | ||
1200 | if (!mrioc->ioctl_dma_pool) { | |
1201 | ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n"); | |
1202 | goto out_failed; | |
1203 | } | |
1204 | ||
1205 | for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) { | |
1206 | mem_desc = &mrioc->ioctl_sge[i]; | |
1207 | mem_desc->size = MPI3MR_IOCTL_SGE_SIZE; | |
1208 | mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool, | |
1209 | GFP_KERNEL, | |
1210 | &mem_desc->dma_addr); | |
1211 | if (!mem_desc->addr) | |
1212 | goto out_failed; | |
1213 | } | |
1214 | ||
1215 | mem_desc = &mrioc->ioctl_chain_sge; | |
1216 | mem_desc->size = MPI3MR_PAGE_SIZE_4K; | |
1217 | mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev, | |
1218 | mem_desc->size, | |
1219 | &mem_desc->dma_addr, | |
1220 | GFP_KERNEL); | |
1221 | if (!mem_desc->addr) | |
1222 | goto out_failed; | |
1223 | ||
1224 | mem_desc = &mrioc->ioctl_resp_sge; | |
1225 | mem_desc->size = MPI3MR_PAGE_SIZE_4K; | |
1226 | mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev, | |
1227 | mem_desc->size, | |
1228 | &mem_desc->dma_addr, | |
1229 | GFP_KERNEL); | |
1230 | if (!mem_desc->addr) | |
1231 | goto out_failed; | |
1232 | ||
1233 | mrioc->ioctl_sges_allocated = true; | |
1234 | ||
1235 | return; | |
1236 | out_failed: | |
1237 | ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n" | |
1238 | "from the applications, application interface for MPT command is disabled\n"); | |
1239 | mpi3mr_free_ioctl_dma_memory(mrioc); | |
1240 | } | |
1241 | ||
824a1566 KD |
1242 | /** |
1243 | * mpi3mr_clear_reset_history - clear reset history | |
1244 | * @mrioc: Adapter instance reference | |
1245 | * | |
1246 | * Write the reset history bit in IOC status to clear the bit, | |
1247 | * if it is already set. | |
1248 | * | |
1249 | * Return: Nothing. | |
1250 | */ | |
1251 | static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) | |
1252 | { | |
1253 | u32 ioc_status; | |
1254 | ||
1255 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); | |
1256 | if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) | |
1257 | writel(ioc_status, &mrioc->sysif_regs->ioc_status); | |
1258 | } | |
1259 | ||
1260 | /** | |
1261 | * mpi3mr_issue_and_process_mur - Message unit Reset handler | |
1262 | * @mrioc: Adapter instance reference | |
1263 | * @reset_reason: Reset reason code | |
1264 | * | |
1265 | * Issue Message unit Reset to the controller and wait for it to | |
1266 | * be complete. | |
1267 | * | |
1268 | * Return: 0 on success, -1 on failure. | |
1269 | */ | |
1270 | static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, | |
1271 | u32 reset_reason) | |
1272 | { | |
0a2714b7 | 1273 | u32 ioc_config, timeout, ioc_status, scratch_pad0; |
824a1566 KD |
1274 | int retval = -1; |
1275 | ||
1276 | ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); | |
1277 | if (mrioc->unrecoverable) { | |
1278 | ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); | |
1279 | return retval; | |
1280 | } | |
1281 | mpi3mr_clear_reset_history(mrioc); | |
0a2714b7 RK |
1282 | scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX << |
1283 | MPI3MR_RESET_REASON_OSTYPE_SHIFT) | | |
1284 | (mrioc->facts.ioc_num << | |
1285 | MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason); | |
1286 | writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]); | |
824a1566 KD |
1287 | ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); |
1288 | ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; | |
1289 | writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); | |
1290 | ||
22beef38 | 1291 | timeout = MPI3MR_MUR_TIMEOUT * 10; |
824a1566 KD |
1292 | do { |
1293 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); | |
1294 | if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { | |
1295 | mpi3mr_clear_reset_history(mrioc); | |
b64845a7 SR |
1296 | break; |
1297 | } | |
1298 | if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { | |
1299 | mpi3mr_print_fault_info(mrioc); | |
1300 | break; | |
824a1566 KD |
1301 | } |
1302 | msleep(100); | |
1303 | } while (--timeout); | |
1304 | ||
824a1566 | 1305 | ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); |
b64845a7 SR |
1306 | if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || |
1307 | (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || | |
1308 | (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) | |
1309 | retval = 0; | |
824a1566 | 1310 | |
339a7b32 | 1311 | ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n", |
824a1566 KD |
1312 | (!retval) ? "successful" : "failed", ioc_status, ioc_config); |
1313 | return retval; | |
1314 | } | |
1315 | ||
c5758fc7 SR |
1316 | /** |
1317 | * mpi3mr_revalidate_factsdata - validate IOCFacts parameters | |
1318 | * during reset/resume | |
1319 | * @mrioc: Adapter instance reference | |
1320 | * | |
904fdd20 | 1321 | * Return: zero if the new IOCFacts parameters value is compatible with |
c5758fc7 SR |
1322 | * older values else return -EPERM |
1323 | */ | |
1324 | static int | |
1325 | mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) | |
1326 | { | |
144679df | 1327 | unsigned long *removepend_bitmap; |
c5758fc7 SR |
1328 | |
1329 | if (mrioc->facts.reply_sz > mrioc->reply_sz) { | |
1330 | ioc_err(mrioc, | |
1331 | "cannot increase reply size from %d to %d\n", | |
1332 | mrioc->reply_sz, mrioc->facts.reply_sz); | |
1333 | return -EPERM; | |
1334 | } | |
1335 | ||
1336 | if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) { | |
1337 | ioc_err(mrioc, | |
1338 | "cannot reduce number of operational reply queues from %d to %d\n", | |
1339 | mrioc->num_op_reply_q, | |
1340 | mrioc->facts.max_op_reply_q); | |
1341 | return -EPERM; | |
1342 | } | |
1343 | ||
1344 | if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) { | |
1345 | ioc_err(mrioc, | |
1346 | "cannot reduce number of operational request queues from %d to %d\n", | |
1347 | mrioc->num_op_req_q, mrioc->facts.max_op_req_q); | |
1348 | return -EPERM; | |
1349 | } | |
1350 | ||
d9adb81e RK |
1351 | if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512)) |
1352 | ioc_err(mrioc, "Warning: The maximum data transfer length\n" | |
1353 | "\tchanged after reset: previous(%d), new(%d),\n" | |
1354 | "the driver cannot change this at run time\n", | |
1355 | mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length); | |
1356 | ||
c4723e68 | 1357 | if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities & |
57a80be5 | 1358 | MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) |
c4723e68 SR |
1359 | ioc_err(mrioc, |
1360 | "critical error: multipath capability is enabled at the\n" | |
1361 | "\tcontroller while sas transport support is enabled at the\n" | |
1362 | "\tdriver, please reboot the system or reload the driver\n"); | |
1363 | ||
339a7b32 RK |
1364 | if (mrioc->seg_tb_support) { |
1365 | if (!(mrioc->facts.ioc_capabilities & | |
1366 | MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) { | |
1367 | ioc_err(mrioc, | |
1368 | "critical error: previously enabled segmented trace\n" | |
1369 | " buffer capability is disabled after reset. Please\n" | |
1370 | " update the firmware or reboot the system or\n" | |
1371 | " reload the driver to enable trace diag buffer\n"); | |
1372 | mrioc->diag_buffers[0].disabled_after_reset = true; | |
1373 | } else | |
1374 | mrioc->diag_buffers[0].disabled_after_reset = false; | |
1375 | } | |
1376 | ||
339e6156 SK |
1377 | if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) { |
1378 | removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle, | |
1379 | GFP_KERNEL); | |
c5758fc7 SR |
1380 | if (!removepend_bitmap) { |
1381 | ioc_err(mrioc, | |
339e6156 SK |
1382 | "failed to increase removepend_bitmap bits from %d to %d\n", |
1383 | mrioc->dev_handle_bitmap_bits, | |
1384 | mrioc->facts.max_devhandle); | |
c5758fc7 SR |
1385 | return -EPERM; |
1386 | } | |
339e6156 | 1387 | bitmap_free(mrioc->removepend_bitmap); |
c5758fc7 SR |
1388 | mrioc->removepend_bitmap = removepend_bitmap; |
1389 | ioc_info(mrioc, | |
339e6156 SK |
1390 | "increased bits of dev_handle_bitmap from %d to %d\n", |
1391 | mrioc->dev_handle_bitmap_bits, | |
1392 | mrioc->facts.max_devhandle); | |
1393 | mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle; | |
c5758fc7 SR |
1394 | } |
1395 | ||
1396 | return 0; | |
1397 | } | |
1398 | ||
824a1566 KD |
1399 | /** |
1400 | * mpi3mr_bring_ioc_ready - Bring controller to ready state | |
1401 | * @mrioc: Adapter instance reference | |
1402 | * | |
1403 | * Set Enable IOC bit in IOC configuration register and wait for | |
1404 | * the controller to become ready. | |
1405 | * | |
59bd9cfe | 1406 | * Return: 0 on success, appropriate error on failure. |
824a1566 KD |
1407 | */ |
1408 | static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) | |
1409 | { | |
0a319f16 | 1410 | u32 ioc_config, ioc_status, timeout, host_diagnostic; |
59bd9cfe SR |
1411 | int retval = 0; |
1412 | enum mpi3mr_iocstate ioc_state; | |
1413 | u64 base_info; | |
9634bb07 RK |
1414 | u8 retry = 0; |
1415 | u64 start_time, elapsed_time_sec; | |
1416 | ||
1417 | retry_bring_ioc_ready: | |
824a1566 | 1418 | |
59bd9cfe SR |
1419 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); |
1420 | ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); | |
1421 | base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); | |
1422 | ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n", | |
1423 | ioc_status, ioc_config, base_info); | |
1424 | ||
fb6eb98f RK |
1425 | if (!mpi3mr_is_fault_recoverable(mrioc)) { |
1426 | mrioc->unrecoverable = 1; | |
1427 | goto out_device_not_present; | |
1428 | } | |
1429 | ||
59bd9cfe SR |
1430 | /*The timeout value is in 2sec unit, changing it to seconds*/ |
1431 | mrioc->ready_timeout = | |
1432 | ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> | |
1433 | MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; | |
1434 | ||
1435 | ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout); | |
1436 | ||
1437 | ioc_state = mpi3mr_get_iocstate(mrioc); | |
1438 | ioc_info(mrioc, "controller is in %s state during detection\n", | |
1439 | mpi3mr_iocstate_name(ioc_state)); | |
1440 | ||
4616a4b3 RK |
1441 | timeout = mrioc->ready_timeout * 10; |
1442 | ||
1443 | do { | |
1444 | ioc_state = mpi3mr_get_iocstate(mrioc); | |
1445 | ||
1446 | if (ioc_state != MRIOC_STATE_BECOMING_READY && | |
1447 | ioc_state != MRIOC_STATE_RESET_REQUESTED) | |
1448 | break; | |
59bd9cfe | 1449 | |
f2a79d20 SR |
1450 | if (!pci_device_is_present(mrioc->pdev)) { |
1451 | mrioc->unrecoverable = 1; | |
4616a4b3 | 1452 | ioc_err(mrioc, "controller is not present while waiting to reset\n"); |
f2a79d20 SR |
1453 | goto out_device_not_present; |
1454 | } | |
1455 | ||
4616a4b3 RK |
1456 | msleep(100); |
1457 | } while (--timeout); | |
59bd9cfe SR |
1458 | |
1459 | if (ioc_state == MRIOC_STATE_READY) { | |
1460 | ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); | |
1461 | retval = mpi3mr_issue_and_process_mur(mrioc, | |
1462 | MPI3MR_RESET_FROM_BRINGUP); | |
1463 | ioc_state = mpi3mr_get_iocstate(mrioc); | |
1464 | if (retval) | |
1465 | ioc_err(mrioc, | |
1466 | "message unit reset failed with error %d current state %s\n", | |
1467 | retval, mpi3mr_iocstate_name(ioc_state)); | |
1468 | } | |
1469 | if (ioc_state != MRIOC_STATE_RESET) { | |
0a319f16 RK |
1470 | if (ioc_state == MRIOC_STATE_FAULT) { |
1471 | timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; | |
1472 | mpi3mr_print_fault_info(mrioc); | |
1473 | do { | |
1474 | host_diagnostic = | |
1475 | readl(&mrioc->sysif_regs->host_diagnostic); | |
1476 | if (!(host_diagnostic & | |
1477 | MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) | |
1478 | break; | |
1479 | if (!pci_device_is_present(mrioc->pdev)) { | |
1480 | mrioc->unrecoverable = 1; | |
1481 | ioc_err(mrioc, "controller is not present at the bringup\n"); | |
1482 | goto out_device_not_present; | |
1483 | } | |
1484 | msleep(100); | |
1485 | } while (--timeout); | |
1486 | } | |
59bd9cfe SR |
1487 | mpi3mr_print_fault_info(mrioc); |
1488 | ioc_info(mrioc, "issuing soft reset to bring to reset state\n"); | |
1489 | retval = mpi3mr_issue_reset(mrioc, | |
1490 | MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, | |
1491 | MPI3MR_RESET_FROM_BRINGUP); | |
1492 | if (retval) { | |
1493 | ioc_err(mrioc, | |
1494 | "soft reset failed with error %d\n", retval); | |
1495 | goto out_failed; | |
1496 | } | |
1497 | } | |
1498 | ioc_state = mpi3mr_get_iocstate(mrioc); | |
1499 | if (ioc_state != MRIOC_STATE_RESET) { | |
1500 | ioc_err(mrioc, | |
1501 | "cannot bring controller to reset state, current state: %s\n", | |
1502 | mpi3mr_iocstate_name(ioc_state)); | |
1503 | goto out_failed; | |
1504 | } | |
1505 | mpi3mr_clear_reset_history(mrioc); | |
1506 | retval = mpi3mr_setup_admin_qpair(mrioc); | |
1507 | if (retval) { | |
1508 | ioc_err(mrioc, "failed to setup admin queues: error %d\n", | |
1509 | retval); | |
1510 | goto out_failed; | |
1511 | } | |
1512 | ||
1513 | ioc_info(mrioc, "bringing controller to ready state\n"); | |
824a1566 KD |
1514 | ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); |
1515 | ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; | |
1516 | writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); | |
1517 | ||
9634bb07 RK |
1518 | if (retry == 0) |
1519 | start_time = jiffies; | |
1520 | ||
824a1566 KD |
1521 | timeout = mrioc->ready_timeout * 10; |
1522 | do { | |
59bd9cfe SR |
1523 | ioc_state = mpi3mr_get_iocstate(mrioc); |
1524 | if (ioc_state == MRIOC_STATE_READY) { | |
1525 | ioc_info(mrioc, | |
5867b856 | 1526 | "successfully transitioned to %s state\n", |
59bd9cfe | 1527 | mpi3mr_iocstate_name(ioc_state)); |
824a1566 | 1528 | return 0; |
59bd9cfe | 1529 | } |
9634bb07 RK |
1530 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); |
1531 | if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || | |
1532 | (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { | |
1533 | mpi3mr_print_fault_info(mrioc); | |
1534 | goto out_failed; | |
1535 | } | |
f2a79d20 SR |
1536 | if (!pci_device_is_present(mrioc->pdev)) { |
1537 | mrioc->unrecoverable = 1; | |
1538 | ioc_err(mrioc, | |
1539 | "controller is not present at the bringup\n"); | |
1540 | retval = -1; | |
1541 | goto out_device_not_present; | |
1542 | } | |
824a1566 | 1543 | msleep(100); |
9634bb07 RK |
1544 | elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000; |
1545 | } while (elapsed_time_sec < mrioc->ready_timeout); | |
824a1566 | 1546 | |
59bd9cfe | 1547 | out_failed: |
9634bb07 RK |
1548 | elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000; |
1549 | if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) { | |
1550 | retry++; | |
1551 | ||
1552 | ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n" | |
1553 | " elapsed time =%llu\n", retry, elapsed_time_sec); | |
1554 | ||
1555 | goto retry_bring_ioc_ready; | |
1556 | } | |
59bd9cfe SR |
1557 | ioc_state = mpi3mr_get_iocstate(mrioc); |
1558 | ioc_err(mrioc, | |
1559 | "failed to bring to ready state, current state: %s\n", | |
1560 | mpi3mr_iocstate_name(ioc_state)); | |
f2a79d20 | 1561 | out_device_not_present: |
59bd9cfe | 1562 | return retval; |
824a1566 KD |
1563 | } |
1564 | ||
f061178e KD |
1565 | /** |
1566 | * mpi3mr_soft_reset_success - Check softreset is success or not | |
1567 | * @ioc_status: IOC status register value | |
1568 | * @ioc_config: IOC config register value | |
1569 | * | |
1570 | * Check whether the soft reset is successful or not based on | |
1571 | * IOC status and IOC config register values. | |
1572 | * | |
1573 | * Return: True when the soft reset is success, false otherwise. | |
1574 | */ | |
1575 | static inline bool | |
1576 | mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config) | |
1577 | { | |
1578 | if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || | |
f061178e KD |
1579 | (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) |
1580 | return true; | |
1581 | return false; | |
1582 | } | |
1583 | ||
1584 | /** | |
1585 | * mpi3mr_diagfault_success - Check diag fault is success or not | |
1586 | * @mrioc: Adapter reference | |
1587 | * @ioc_status: IOC status register value | |
1588 | * | |
1589 | * Check whether the controller hit diag reset fault code. | |
1590 | * | |
1591 | * Return: True when there is diag fault, false otherwise. | |
1592 | */ | |
1593 | static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc, | |
1594 | u32 ioc_status) | |
1595 | { | |
1596 | u32 fault; | |
1597 | ||
1598 | if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) | |
1599 | return false; | |
1600 | fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; | |
b64845a7 SR |
1601 | if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) { |
1602 | mpi3mr_print_fault_info(mrioc); | |
f061178e | 1603 | return true; |
b64845a7 | 1604 | } |
f061178e KD |
1605 | return false; |
1606 | } | |
1607 | ||
824a1566 KD |
1608 | /** |
1609 | * mpi3mr_set_diagsave - Set diag save bit for snapdump | |
1610 | * @mrioc: Adapter reference | |
1611 | * | |
1612 | * Set diag save bit in IOC configuration register to enable | |
1613 | * snapdump. | |
1614 | * | |
1615 | * Return: Nothing. | |
1616 | */ | |
1617 | static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) | |
1618 | { | |
1619 | u32 ioc_config; | |
1620 | ||
1621 | ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); | |
1622 | ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; | |
1623 | writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); | |
1624 | } | |
1625 | ||
1626 | /** | |
1627 | * mpi3mr_issue_reset - Issue reset to the controller | |
1628 | * @mrioc: Adapter reference | |
1629 | * @reset_type: Reset type | |
1630 | * @reset_reason: Reset reason code | |
1631 | * | |
f061178e KD |
1632 | * Unlock the host diagnostic registers and write the specific |
1633 | * reset type to that, wait for reset acknowledgment from the | |
1634 | * controller, if the reset is not successful retry for the | |
1635 | * predefined number of times. | |
824a1566 KD |
1636 | * |
1637 | * Return: 0 on success, non-zero on failure. | |
1638 | */ | |
1639 | static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, | |
0a2714b7 | 1640 | u16 reset_reason) |
824a1566 | 1641 | { |
f061178e | 1642 | int retval = -1; |
b64845a7 | 1643 | u8 unlock_retry_count = 0; |
0a2714b7 | 1644 | u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0; |
b64845a7 | 1645 | u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; |
f061178e | 1646 | |
f061178e KD |
1647 | if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) && |
1648 | (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)) | |
b64845a7 | 1649 | return retval; |
f061178e | 1650 | if (mrioc->unrecoverable) |
b64845a7 SR |
1651 | return retval; |
1652 | if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) { | |
1653 | retval = 0; | |
1654 | return retval; | |
1655 | } | |
1656 | ||
1657 | ioc_info(mrioc, "%s reset due to %s(0x%x)\n", | |
1658 | mpi3mr_reset_type_name(reset_type), | |
1659 | mpi3mr_reset_rc_name(reset_reason), reset_reason); | |
1660 | ||
f061178e KD |
1661 | mpi3mr_clear_reset_history(mrioc); |
1662 | do { | |
1663 | ioc_info(mrioc, | |
1664 | "Write magic sequence to unlock host diag register (retry=%d)\n", | |
1665 | ++unlock_retry_count); | |
1666 | if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) { | |
b64845a7 SR |
1667 | ioc_err(mrioc, |
1668 | "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n", | |
1669 | mpi3mr_reset_type_name(reset_type), | |
1670 | host_diagnostic); | |
f061178e | 1671 | mrioc->unrecoverable = 1; |
b64845a7 | 1672 | return retval; |
f061178e KD |
1673 | } |
1674 | ||
1675 | writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH, | |
1676 | &mrioc->sysif_regs->write_sequence); | |
1677 | writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST, | |
1678 | &mrioc->sysif_regs->write_sequence); | |
1679 | writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, | |
1680 | &mrioc->sysif_regs->write_sequence); | |
1681 | writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD, | |
1682 | &mrioc->sysif_regs->write_sequence); | |
1683 | writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH, | |
1684 | &mrioc->sysif_regs->write_sequence); | |
1685 | writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH, | |
1686 | &mrioc->sysif_regs->write_sequence); | |
1687 | writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH, | |
1688 | &mrioc->sysif_regs->write_sequence); | |
1689 | usleep_range(1000, 1100); | |
1690 | host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); | |
1691 | ioc_info(mrioc, | |
1692 | "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n", | |
1693 | unlock_retry_count, host_diagnostic); | |
1694 | } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE)); | |
1695 | ||
0a2714b7 RK |
1696 | scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX << |
1697 | MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num << | |
1698 | MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason); | |
f061178e | 1699 | writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); |
f061178e KD |
1700 | writel(host_diagnostic | reset_type, |
1701 | &mrioc->sysif_regs->host_diagnostic); | |
b64845a7 SR |
1702 | switch (reset_type) { |
1703 | case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET: | |
f061178e KD |
1704 | do { |
1705 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); | |
b64845a7 SR |
1706 | ioc_config = |
1707 | readl(&mrioc->sysif_regs->ioc_configuration); | |
1708 | if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) | |
1709 | && mpi3mr_soft_reset_success(ioc_status, ioc_config) | |
1710 | ) { | |
f061178e | 1711 | mpi3mr_clear_reset_history(mrioc); |
b64845a7 SR |
1712 | retval = 0; |
1713 | break; | |
f061178e KD |
1714 | } |
1715 | msleep(100); | |
1716 | } while (--timeout); | |
b64845a7 SR |
1717 | mpi3mr_print_fault_info(mrioc); |
1718 | break; | |
1719 | case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT: | |
f061178e KD |
1720 | do { |
1721 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); | |
1722 | if (mpi3mr_diagfault_success(mrioc, ioc_status)) { | |
1723 | retval = 0; | |
1724 | break; | |
1725 | } | |
1726 | msleep(100); | |
1727 | } while (--timeout); | |
b64845a7 SR |
1728 | break; |
1729 | default: | |
1730 | break; | |
f061178e KD |
1731 | } |
1732 | ||
b64845a7 SR |
1733 | writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, |
1734 | &mrioc->sysif_regs->write_sequence); | |
f061178e | 1735 | |
b64845a7 SR |
1736 | ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); |
1737 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); | |
f061178e | 1738 | ioc_info(mrioc, |
339a7b32 | 1739 | "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n", |
b64845a7 | 1740 | (!retval)?"successful":"failed", ioc_status, |
f061178e | 1741 | ioc_config); |
b64845a7 SR |
1742 | if (retval) |
1743 | mrioc->unrecoverable = 1; | |
f061178e | 1744 | return retval; |
824a1566 KD |
1745 | } |
1746 | ||
1747 | /** | |
1748 | * mpi3mr_admin_request_post - Post request to admin queue | |
1749 | * @mrioc: Adapter reference | |
1750 | * @admin_req: MPI3 request | |
1751 | * @admin_req_sz: Request size | |
1752 | * @ignore_reset: Ignore reset in process | |
1753 | * | |
1754 | * Post the MPI3 request into admin request queue and | |
1755 | * inform the controller, if the queue is full return | |
1756 | * appropriate error. | |
1757 | * | |
1758 | * Return: 0 on success, non-zero on failure. | |
1759 | */ | |
1760 | int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, | |
1761 | u16 admin_req_sz, u8 ignore_reset) | |
1762 | { | |
1763 | u16 areq_pi = 0, areq_ci = 0, max_entries = 0; | |
1764 | int retval = 0; | |
1765 | unsigned long flags; | |
1766 | u8 *areq_entry; | |
1767 | ||
1768 | if (mrioc->unrecoverable) { | |
1769 | ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); | |
1770 | return -EFAULT; | |
1771 | } | |
1772 | ||
1773 | spin_lock_irqsave(&mrioc->admin_req_lock, flags); | |
1774 | areq_pi = mrioc->admin_req_pi; | |
1775 | areq_ci = mrioc->admin_req_ci; | |
1776 | max_entries = mrioc->num_admin_req; | |
1777 | if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && | |
1778 | (areq_pi == (max_entries - 1)))) { | |
1779 | ioc_err(mrioc, "AdminReqQ full condition detected\n"); | |
1780 | retval = -EAGAIN; | |
1781 | goto out; | |
1782 | } | |
1783 | if (!ignore_reset && mrioc->reset_in_progress) { | |
1784 | ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); | |
1785 | retval = -EAGAIN; | |
1786 | goto out; | |
1787 | } | |
1c342b05 SS |
1788 | if (mrioc->pci_err_recovery) { |
1789 | ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n"); | |
1790 | retval = -EAGAIN; | |
1791 | goto out; | |
1792 | } | |
1793 | ||
824a1566 KD |
1794 | areq_entry = (u8 *)mrioc->admin_req_base + |
1795 | (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); | |
1796 | memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); | |
1797 | memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); | |
1798 | ||
1799 | if (++areq_pi == max_entries) | |
1800 | areq_pi = 0; | |
1801 | mrioc->admin_req_pi = areq_pi; | |
1802 | ||
1803 | writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); | |
1804 | ||
1805 | out: | |
1806 | spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); | |
1807 | ||
1808 | return retval; | |
1809 | } | |
1810 | ||
c9566231 KD |
1811 | /** |
1812 | * mpi3mr_free_op_req_q_segments - free request memory segments | |
1813 | * @mrioc: Adapter instance reference | |
1814 | * @q_idx: operational request queue index | |
1815 | * | |
1816 | * Free memory segments allocated for operational request queue | |
1817 | * | |
1818 | * Return: Nothing. | |
1819 | */ | |
1820 | static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) | |
1821 | { | |
1822 | u16 j; | |
1823 | int size; | |
1824 | struct segments *segments; | |
1825 | ||
1826 | segments = mrioc->req_qinfo[q_idx].q_segments; | |
1827 | if (!segments) | |
1828 | return; | |
1829 | ||
1830 | if (mrioc->enable_segqueue) { | |
1831 | size = MPI3MR_OP_REQ_Q_SEG_SIZE; | |
1832 | if (mrioc->req_qinfo[q_idx].q_segment_list) { | |
1833 | dma_free_coherent(&mrioc->pdev->dev, | |
1834 | MPI3MR_MAX_SEG_LIST_SIZE, | |
1835 | mrioc->req_qinfo[q_idx].q_segment_list, | |
1836 | mrioc->req_qinfo[q_idx].q_segment_list_dma); | |
d44b5fef | 1837 | mrioc->req_qinfo[q_idx].q_segment_list = NULL; |
c9566231 KD |
1838 | } |
1839 | } else | |
243bcc8e | 1840 | size = mrioc->req_qinfo[q_idx].segment_qd * |
c9566231 KD |
1841 | mrioc->facts.op_req_sz; |
1842 | ||
1843 | for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { | |
1844 | if (!segments[j].segment) | |
1845 | continue; | |
1846 | dma_free_coherent(&mrioc->pdev->dev, | |
1847 | size, segments[j].segment, segments[j].segment_dma); | |
1848 | segments[j].segment = NULL; | |
1849 | } | |
1850 | kfree(mrioc->req_qinfo[q_idx].q_segments); | |
1851 | mrioc->req_qinfo[q_idx].q_segments = NULL; | |
1852 | mrioc->req_qinfo[q_idx].qid = 0; | |
1853 | } | |
1854 | ||
1855 | /** | |
1856 | * mpi3mr_free_op_reply_q_segments - free reply memory segments | |
1857 | * @mrioc: Adapter instance reference | |
1858 | * @q_idx: operational reply queue index | |
1859 | * | |
1860 | * Free memory segments allocated for operational reply queue | |
1861 | * | |
1862 | * Return: Nothing. | |
1863 | */ | |
1864 | static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) | |
1865 | { | |
1866 | u16 j; | |
1867 | int size; | |
1868 | struct segments *segments; | |
1869 | ||
1870 | segments = mrioc->op_reply_qinfo[q_idx].q_segments; | |
1871 | if (!segments) | |
1872 | return; | |
1873 | ||
1874 | if (mrioc->enable_segqueue) { | |
1875 | size = MPI3MR_OP_REP_Q_SEG_SIZE; | |
1876 | if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { | |
1877 | dma_free_coherent(&mrioc->pdev->dev, | |
1878 | MPI3MR_MAX_SEG_LIST_SIZE, | |
1879 | mrioc->op_reply_qinfo[q_idx].q_segment_list, | |
1880 | mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); | |
1881 | mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; | |
1882 | } | |
1883 | } else | |
1884 | size = mrioc->op_reply_qinfo[q_idx].segment_qd * | |
1885 | mrioc->op_reply_desc_sz; | |
1886 | ||
1887 | for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { | |
1888 | if (!segments[j].segment) | |
1889 | continue; | |
1890 | dma_free_coherent(&mrioc->pdev->dev, | |
1891 | size, segments[j].segment, segments[j].segment_dma); | |
1892 | segments[j].segment = NULL; | |
1893 | } | |
1894 | ||
1895 | kfree(mrioc->op_reply_qinfo[q_idx].q_segments); | |
1896 | mrioc->op_reply_qinfo[q_idx].q_segments = NULL; | |
1897 | mrioc->op_reply_qinfo[q_idx].qid = 0; | |
1898 | } | |
1899 | ||
1900 | /** | |
1901 | * mpi3mr_delete_op_reply_q - delete operational reply queue | |
1902 | * @mrioc: Adapter instance reference | |
1903 | * @qidx: operational reply queue index | |
1904 | * | |
1905 | * Delete operatinal reply queue by issuing MPI request | |
1906 | * through admin queue. | |
1907 | * | |
1908 | * Return: 0 on success, non-zero on failure. | |
1909 | */ | |
1910 | static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) | |
1911 | { | |
1912 | struct mpi3_delete_reply_queue_request delq_req; | |
afd3a579 | 1913 | struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; |
c9566231 KD |
1914 | int retval = 0; |
1915 | u16 reply_qid = 0, midx; | |
1916 | ||
afd3a579 | 1917 | reply_qid = op_reply_q->qid; |
c9566231 KD |
1918 | |
1919 | midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); | |
1920 | ||
1921 | if (!reply_qid) { | |
1922 | retval = -1; | |
1923 | ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); | |
1924 | goto out; | |
1925 | } | |
1926 | ||
afd3a579 SR |
1927 | (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- : |
1928 | mrioc->active_poll_qcount--; | |
1929 | ||
c9566231 KD |
1930 | memset(&delq_req, 0, sizeof(delq_req)); |
1931 | mutex_lock(&mrioc->init_cmds.mutex); | |
1932 | if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { | |
1933 | retval = -1; | |
1934 | ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); | |
1935 | mutex_unlock(&mrioc->init_cmds.mutex); | |
1936 | goto out; | |
1937 | } | |
1938 | mrioc->init_cmds.state = MPI3MR_CMD_PENDING; | |
1939 | mrioc->init_cmds.is_waiting = 1; | |
1940 | mrioc->init_cmds.callback = NULL; | |
1941 | delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); | |
1942 | delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; | |
1943 | delq_req.queue_id = cpu_to_le16(reply_qid); | |
1944 | ||
1945 | init_completion(&mrioc->init_cmds.done); | |
1946 | retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), | |
1947 | 1); | |
1948 | if (retval) { | |
1949 | ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); | |
1950 | goto out_unlock; | |
1951 | } | |
1952 | wait_for_completion_timeout(&mrioc->init_cmds.done, | |
1953 | (MPI3MR_INTADMCMD_TIMEOUT * HZ)); | |
1954 | if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { | |
a6856cc4 SR |
1955 | ioc_err(mrioc, "delete reply queue timed out\n"); |
1956 | mpi3mr_check_rh_fault_ioc(mrioc, | |
c9566231 | 1957 | MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); |
c9566231 KD |
1958 | retval = -1; |
1959 | goto out_unlock; | |
1960 | } | |
1961 | if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) | |
1962 | != MPI3_IOCSTATUS_SUCCESS) { | |
1963 | ioc_err(mrioc, | |
1964 | "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", | |
1965 | (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), | |
1966 | mrioc->init_cmds.ioc_loginfo); | |
1967 | retval = -1; | |
1968 | goto out_unlock; | |
1969 | } | |
1970 | mrioc->intr_info[midx].op_reply_q = NULL; | |
1971 | ||
1972 | mpi3mr_free_op_reply_q_segments(mrioc, qidx); | |
1973 | out_unlock: | |
1974 | mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; | |
1975 | mutex_unlock(&mrioc->init_cmds.mutex); | |
1976 | out: | |
1977 | ||
1978 | return retval; | |
1979 | } | |
1980 | ||
1981 | /** | |
1982 | * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool | |
1983 | * @mrioc: Adapter instance reference | |
1984 | * @qidx: request queue index | |
1985 | * | |
1986 | * Allocate segmented memory pools for operational reply | |
1987 | * queue. | |
1988 | * | |
1989 | * Return: 0 on success, non-zero on failure. | |
1990 | */ | |
1991 | static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) | |
1992 | { | |
1993 | struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; | |
1994 | int i, size; | |
1995 | u64 *q_segment_list_entry = NULL; | |
1996 | struct segments *segments; | |
1997 | ||
1998 | if (mrioc->enable_segqueue) { | |
1999 | op_reply_q->segment_qd = | |
2000 | MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; | |
2001 | ||
2002 | size = MPI3MR_OP_REP_Q_SEG_SIZE; | |
2003 | ||
2004 | op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, | |
2005 | MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, | |
2006 | GFP_KERNEL); | |
2007 | if (!op_reply_q->q_segment_list) | |
2008 | return -ENOMEM; | |
2009 | q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; | |
2010 | } else { | |
2011 | op_reply_q->segment_qd = op_reply_q->num_replies; | |
2012 | size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; | |
2013 | } | |
2014 | ||
2015 | op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, | |
2016 | op_reply_q->segment_qd); | |
2017 | ||
2018 | op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, | |
2019 | sizeof(struct segments), GFP_KERNEL); | |
2020 | if (!op_reply_q->q_segments) | |
2021 | return -ENOMEM; | |
2022 | ||
2023 | segments = op_reply_q->q_segments; | |
2024 | for (i = 0; i < op_reply_q->num_segments; i++) { | |
2025 | segments[i].segment = | |
2026 | dma_alloc_coherent(&mrioc->pdev->dev, | |
2027 | size, &segments[i].segment_dma, GFP_KERNEL); | |
2028 | if (!segments[i].segment) | |
2029 | return -ENOMEM; | |
2030 | if (mrioc->enable_segqueue) | |
2031 | q_segment_list_entry[i] = | |
2032 | (unsigned long)segments[i].segment_dma; | |
2033 | } | |
2034 | ||
2035 | return 0; | |
2036 | } | |
2037 | ||
2038 | /** | |
2039 | * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. | |
2040 | * @mrioc: Adapter instance reference | |
2041 | * @qidx: request queue index | |
2042 | * | |
2043 | * Allocate segmented memory pools for operational request | |
2044 | * queue. | |
2045 | * | |
2046 | * Return: 0 on success, non-zero on failure. | |
2047 | */ | |
2048 | static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) | |
2049 | { | |
2050 | struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; | |
2051 | int i, size; | |
2052 | u64 *q_segment_list_entry = NULL; | |
2053 | struct segments *segments; | |
2054 | ||
2055 | if (mrioc->enable_segqueue) { | |
2056 | op_req_q->segment_qd = | |
2057 | MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; | |
2058 | ||
2059 | size = MPI3MR_OP_REQ_Q_SEG_SIZE; | |
2060 | ||
2061 | op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, | |
2062 | MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, | |
2063 | GFP_KERNEL); | |
2064 | if (!op_req_q->q_segment_list) | |
2065 | return -ENOMEM; | |
2066 | q_segment_list_entry = (u64 *)op_req_q->q_segment_list; | |
2067 | ||
2068 | } else { | |
2069 | op_req_q->segment_qd = op_req_q->num_requests; | |
2070 | size = op_req_q->num_requests * mrioc->facts.op_req_sz; | |
2071 | } | |
2072 | ||
2073 | op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, | |
2074 | op_req_q->segment_qd); | |
2075 | ||
2076 | op_req_q->q_segments = kcalloc(op_req_q->num_segments, | |
2077 | sizeof(struct segments), GFP_KERNEL); | |
2078 | if (!op_req_q->q_segments) | |
2079 | return -ENOMEM; | |
2080 | ||
2081 | segments = op_req_q->q_segments; | |
2082 | for (i = 0; i < op_req_q->num_segments; i++) { | |
2083 | segments[i].segment = | |
2084 | dma_alloc_coherent(&mrioc->pdev->dev, | |
2085 | size, &segments[i].segment_dma, GFP_KERNEL); | |
2086 | if (!segments[i].segment) | |
2087 | return -ENOMEM; | |
2088 | if (mrioc->enable_segqueue) | |
2089 | q_segment_list_entry[i] = | |
2090 | (unsigned long)segments[i].segment_dma; | |
2091 | } | |
2092 | ||
2093 | return 0; | |
2094 | } | |
2095 | ||
2096 | /** | |
2097 | * mpi3mr_create_op_reply_q - create operational reply queue | |
2098 | * @mrioc: Adapter instance reference | |
2099 | * @qidx: operational reply queue index | |
2100 | * | |
2101 | * Create operatinal reply queue by issuing MPI request | |
2102 | * through admin queue. | |
2103 | * | |
2104 | * Return: 0 on success, non-zero on failure. | |
2105 | */ | |
2106 | static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) | |
2107 | { | |
2108 | struct mpi3_create_reply_queue_request create_req; | |
2109 | struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; | |
2110 | int retval = 0; | |
2111 | u16 reply_qid = 0, midx; | |
2112 | ||
2113 | reply_qid = op_reply_q->qid; | |
2114 | ||
2115 | midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); | |
2116 | ||
2117 | if (reply_qid) { | |
2118 | retval = -1; | |
2119 | ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", | |
2120 | reply_qid); | |
2121 | ||
2122 | return retval; | |
2123 | } | |
2124 | ||
2125 | reply_qid = qidx + 1; | |
f08b24d8 RK |
2126 | |
2127 | if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) { | |
2128 | if (mrioc->pdev->revision) | |
2129 | op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; | |
2130 | else | |
2131 | op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K; | |
2132 | } else | |
2133 | op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K; | |
2134 | ||
c9566231 KD |
2135 | op_reply_q->ci = 0; |
2136 | op_reply_q->ephase = 1; | |
463429f8 KD |
2137 | atomic_set(&op_reply_q->pend_ios, 0); |
2138 | atomic_set(&op_reply_q->in_use, 0); | |
2139 | op_reply_q->enable_irq_poll = false; | |
f08b24d8 RK |
2140 | op_reply_q->qfull_watermark = |
2141 | op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2); | |
c9566231 KD |
2142 | |
2143 | if (!op_reply_q->q_segments) { | |
2144 | retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); | |
2145 | if (retval) { | |
2146 | mpi3mr_free_op_reply_q_segments(mrioc, qidx); | |
2147 | goto out; | |
2148 | } | |
2149 | } | |
2150 | ||
2151 | memset(&create_req, 0, sizeof(create_req)); | |
2152 | mutex_lock(&mrioc->init_cmds.mutex); | |
2153 | if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { | |
2154 | retval = -1; | |
2155 | ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); | |
f9dc034d | 2156 | goto out_unlock; |
c9566231 KD |
2157 | } |
2158 | mrioc->init_cmds.state = MPI3MR_CMD_PENDING; | |
2159 | mrioc->init_cmds.is_waiting = 1; | |
2160 | mrioc->init_cmds.callback = NULL; | |
2161 | create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); | |
2162 | create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; | |
2163 | create_req.queue_id = cpu_to_le16(reply_qid); | |
afd3a579 SR |
2164 | |
2165 | if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount)) | |
2166 | op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE; | |
2167 | else | |
2168 | op_reply_q->qtype = MPI3MR_POLL_QUEUE; | |
2169 | ||
2170 | if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) { | |
2171 | create_req.flags = | |
2172 | MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; | |
2173 | create_req.msix_index = | |
2174 | cpu_to_le16(mrioc->intr_info[midx].msix_index); | |
2175 | } else { | |
2176 | create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1); | |
2177 | ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n", | |
2178 | reply_qid, midx); | |
2179 | if (!mrioc->active_poll_qcount) | |
2180 | disable_irq_nosync(pci_irq_vector(mrioc->pdev, | |
2181 | mrioc->intr_info_count - 1)); | |
2182 | } | |
2183 | ||
c9566231 KD |
2184 | if (mrioc->enable_segqueue) { |
2185 | create_req.flags |= | |
2186 | MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; | |
2187 | create_req.base_address = cpu_to_le64( | |
2188 | op_reply_q->q_segment_list_dma); | |
2189 | } else | |
2190 | create_req.base_address = cpu_to_le64( | |
2191 | op_reply_q->q_segments[0].segment_dma); | |
2192 | ||
2193 | create_req.size = cpu_to_le16(op_reply_q->num_replies); | |
2194 | ||
2195 | init_completion(&mrioc->init_cmds.done); | |
2196 | retval = mpi3mr_admin_request_post(mrioc, &create_req, | |
2197 | sizeof(create_req), 1); | |
2198 | if (retval) { | |
2199 | ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); | |
2200 | goto out_unlock; | |
2201 | } | |
2202 | wait_for_completion_timeout(&mrioc->init_cmds.done, | |
2203 | (MPI3MR_INTADMCMD_TIMEOUT * HZ)); | |
2204 | if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { | |
a6856cc4 SR |
2205 | ioc_err(mrioc, "create reply queue timed out\n"); |
2206 | mpi3mr_check_rh_fault_ioc(mrioc, | |
c9566231 | 2207 | MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); |
c9566231 KD |
2208 | retval = -1; |
2209 | goto out_unlock; | |
2210 | } | |
2211 | if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) | |
2212 | != MPI3_IOCSTATUS_SUCCESS) { | |
2213 | ioc_err(mrioc, | |
2214 | "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", | |
2215 | (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), | |
2216 | mrioc->init_cmds.ioc_loginfo); | |
2217 | retval = -1; | |
2218 | goto out_unlock; | |
2219 | } | |
2220 | op_reply_q->qid = reply_qid; | |
fe6db615 SR |
2221 | if (midx < mrioc->intr_info_count) |
2222 | mrioc->intr_info[midx].op_reply_q = op_reply_q; | |
c9566231 | 2223 | |
afd3a579 SR |
2224 | (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ : |
2225 | mrioc->active_poll_qcount++; | |
2226 | ||
c9566231 KD |
2227 | out_unlock: |
2228 | mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; | |
2229 | mutex_unlock(&mrioc->init_cmds.mutex); | |
2230 | out: | |
2231 | ||
2232 | return retval; | |
2233 | } | |
2234 | ||
2235 | /** | |
2236 | * mpi3mr_create_op_req_q - create operational request queue | |
2237 | * @mrioc: Adapter instance reference | |
2238 | * @idx: operational request queue index | |
2239 | * @reply_qid: Reply queue ID | |
2240 | * | |
2241 | * Create operatinal request queue by issuing MPI request | |
2242 | * through admin queue. | |
2243 | * | |
2244 | * Return: 0 on success, non-zero on failure. | |
2245 | */ | |
2246 | static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, | |
2247 | u16 reply_qid) | |
2248 | { | |
2249 | struct mpi3_create_request_queue_request create_req; | |
2250 | struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; | |
2251 | int retval = 0; | |
2252 | u16 req_qid = 0; | |
2253 | ||
2254 | req_qid = op_req_q->qid; | |
2255 | ||
2256 | if (req_qid) { | |
2257 | retval = -1; | |
2258 | ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", | |
2259 | req_qid); | |
2260 | ||
2261 | return retval; | |
2262 | } | |
2263 | req_qid = idx + 1; | |
2264 | ||
2265 | op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; | |
2266 | op_req_q->ci = 0; | |
2267 | op_req_q->pi = 0; | |
2268 | op_req_q->reply_qid = reply_qid; | |
2269 | spin_lock_init(&op_req_q->q_lock); | |
2270 | ||
2271 | if (!op_req_q->q_segments) { | |
2272 | retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); | |
2273 | if (retval) { | |
2274 | mpi3mr_free_op_req_q_segments(mrioc, idx); | |
2275 | goto out; | |
2276 | } | |
2277 | } | |
2278 | ||
2279 | memset(&create_req, 0, sizeof(create_req)); | |
2280 | mutex_lock(&mrioc->init_cmds.mutex); | |
2281 | if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { | |
2282 | retval = -1; | |
2283 | ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); | |
f9dc034d | 2284 | goto out_unlock; |
c9566231 KD |
2285 | } |
2286 | mrioc->init_cmds.state = MPI3MR_CMD_PENDING; | |
2287 | mrioc->init_cmds.is_waiting = 1; | |
2288 | mrioc->init_cmds.callback = NULL; | |
2289 | create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); | |
2290 | create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; | |
2291 | create_req.queue_id = cpu_to_le16(req_qid); | |
2292 | if (mrioc->enable_segqueue) { | |
2293 | create_req.flags = | |
2294 | MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; | |
2295 | create_req.base_address = cpu_to_le64( | |
2296 | op_req_q->q_segment_list_dma); | |
2297 | } else | |
2298 | create_req.base_address = cpu_to_le64( | |
2299 | op_req_q->q_segments[0].segment_dma); | |
2300 | create_req.reply_queue_id = cpu_to_le16(reply_qid); | |
2301 | create_req.size = cpu_to_le16(op_req_q->num_requests); | |
2302 | ||
2303 | init_completion(&mrioc->init_cmds.done); | |
2304 | retval = mpi3mr_admin_request_post(mrioc, &create_req, | |
2305 | sizeof(create_req), 1); | |
2306 | if (retval) { | |
2307 | ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); | |
2308 | goto out_unlock; | |
2309 | } | |
2310 | wait_for_completion_timeout(&mrioc->init_cmds.done, | |
2311 | (MPI3MR_INTADMCMD_TIMEOUT * HZ)); | |
2312 | if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { | |
a6856cc4 SR |
2313 | ioc_err(mrioc, "create request queue timed out\n"); |
2314 | mpi3mr_check_rh_fault_ioc(mrioc, | |
2315 | MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT); | |
c9566231 KD |
2316 | retval = -1; |
2317 | goto out_unlock; | |
2318 | } | |
2319 | if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) | |
2320 | != MPI3_IOCSTATUS_SUCCESS) { | |
2321 | ioc_err(mrioc, | |
2322 | "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", | |
2323 | (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), | |
2324 | mrioc->init_cmds.ioc_loginfo); | |
2325 | retval = -1; | |
2326 | goto out_unlock; | |
2327 | } | |
2328 | op_req_q->qid = req_qid; | |
2329 | ||
2330 | out_unlock: | |
2331 | mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; | |
2332 | mutex_unlock(&mrioc->init_cmds.mutex); | |
2333 | out: | |
2334 | ||
2335 | return retval; | |
2336 | } | |
2337 | ||
2338 | /** | |
2339 | * mpi3mr_create_op_queues - create operational queue pairs | |
2340 | * @mrioc: Adapter instance reference | |
2341 | * | |
2342 | * Allocate memory for operational queue meta data and call | |
2343 | * create request and reply queue functions. | |
2344 | * | |
2345 | * Return: 0 on success, non-zero on failures. | |
2346 | */ | |
2347 | static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) | |
2348 | { | |
2349 | int retval = 0; | |
2350 | u16 num_queues = 0, i = 0, msix_count_op_q = 1; | |
2351 | ||
2352 | num_queues = min_t(int, mrioc->facts.max_op_reply_q, | |
2353 | mrioc->facts.max_op_req_q); | |
2354 | ||
2355 | msix_count_op_q = | |
2356 | mrioc->intr_info_count - mrioc->op_reply_q_offset; | |
2357 | if (!mrioc->num_queues) | |
2358 | mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); | |
c5758fc7 SR |
2359 | /* |
2360 | * During reset set the num_queues to the number of queues | |
2361 | * that was set before the reset. | |
2362 | */ | |
2363 | num_queues = mrioc->num_op_reply_q ? | |
2364 | mrioc->num_op_reply_q : mrioc->num_queues; | |
2365 | ioc_info(mrioc, "trying to create %d operational queue pairs\n", | |
c9566231 KD |
2366 | num_queues); |
2367 | ||
2368 | if (!mrioc->req_qinfo) { | |
2369 | mrioc->req_qinfo = kcalloc(num_queues, | |
2370 | sizeof(struct op_req_qinfo), GFP_KERNEL); | |
2371 | if (!mrioc->req_qinfo) { | |
2372 | retval = -1; | |
2373 | goto out_failed; | |
2374 | } | |
2375 | ||
2376 | mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * | |
2377 | num_queues, GFP_KERNEL); | |
2378 | if (!mrioc->op_reply_qinfo) { | |
2379 | retval = -1; | |
2380 | goto out_failed; | |
2381 | } | |
2382 | } | |
2383 | ||
2384 | if (mrioc->enable_segqueue) | |
2385 | ioc_info(mrioc, | |
2386 | "allocating operational queues through segmented queues\n"); | |
2387 | ||
2388 | for (i = 0; i < num_queues; i++) { | |
2389 | if (mpi3mr_create_op_reply_q(mrioc, i)) { | |
2390 | ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); | |
2391 | break; | |
2392 | } | |
2393 | if (mpi3mr_create_op_req_q(mrioc, i, | |
2394 | mrioc->op_reply_qinfo[i].qid)) { | |
2395 | ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); | |
2396 | mpi3mr_delete_op_reply_q(mrioc, i); | |
2397 | break; | |
2398 | } | |
2399 | } | |
2400 | ||
2401 | if (i == 0) { | |
2402 | /* Not even one queue is created successfully*/ | |
2403 | retval = -1; | |
2404 | goto out_failed; | |
2405 | } | |
2406 | mrioc->num_op_reply_q = mrioc->num_op_req_q = i; | |
afd3a579 SR |
2407 | ioc_info(mrioc, |
2408 | "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n", | |
2409 | mrioc->num_op_reply_q, mrioc->default_qcount, | |
2410 | mrioc->active_poll_qcount); | |
c9566231 KD |
2411 | |
2412 | return retval; | |
2413 | out_failed: | |
2414 | kfree(mrioc->req_qinfo); | |
2415 | mrioc->req_qinfo = NULL; | |
2416 | ||
2417 | kfree(mrioc->op_reply_qinfo); | |
2418 | mrioc->op_reply_qinfo = NULL; | |
2419 | ||
2420 | return retval; | |
2421 | } | |
2422 | ||
023ab2a9 KD |
2423 | /** |
2424 | * mpi3mr_op_request_post - Post request to operational queue | |
2425 | * @mrioc: Adapter reference | |
2426 | * @op_req_q: Operational request queue info | |
2427 | * @req: MPI3 request | |
2428 | * | |
2429 | * Post the MPI3 request into operational request queue and | |
2430 | * inform the controller, if the queue is full return | |
2431 | * appropriate error. | |
2432 | * | |
2433 | * Return: 0 on success, non-zero on failure. | |
2434 | */ | |
2435 | int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, | |
2436 | struct op_req_qinfo *op_req_q, u8 *req) | |
2437 | { | |
2438 | u16 pi = 0, max_entries, reply_qidx = 0, midx; | |
2439 | int retval = 0; | |
2440 | unsigned long flags; | |
2441 | u8 *req_entry; | |
2442 | void *segment_base_addr; | |
2443 | u16 req_sz = mrioc->facts.op_req_sz; | |
2444 | struct segments *segments = op_req_q->q_segments; | |
f08b24d8 | 2445 | struct op_reply_qinfo *op_reply_q = NULL; |
023ab2a9 KD |
2446 | |
2447 | reply_qidx = op_req_q->reply_qid - 1; | |
f08b24d8 | 2448 | op_reply_q = mrioc->op_reply_qinfo + reply_qidx; |
023ab2a9 KD |
2449 | |
2450 | if (mrioc->unrecoverable) | |
2451 | return -EFAULT; | |
2452 | ||
2453 | spin_lock_irqsave(&op_req_q->q_lock, flags); | |
2454 | pi = op_req_q->pi; | |
2455 | max_entries = op_req_q->num_requests; | |
2456 | ||
2457 | if (mpi3mr_check_req_qfull(op_req_q)) { | |
2458 | midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( | |
2459 | reply_qidx, mrioc->op_reply_q_offset); | |
afd3a579 | 2460 | mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q); |
023ab2a9 KD |
2461 | |
2462 | if (mpi3mr_check_req_qfull(op_req_q)) { | |
2463 | retval = -EAGAIN; | |
2464 | goto out; | |
2465 | } | |
2466 | } | |
2467 | ||
2468 | if (mrioc->reset_in_progress) { | |
2469 | ioc_err(mrioc, "OpReqQ submit reset in progress\n"); | |
2470 | retval = -EAGAIN; | |
2471 | goto out; | |
2472 | } | |
1c342b05 SS |
2473 | if (mrioc->pci_err_recovery) { |
2474 | ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n"); | |
2475 | retval = -EAGAIN; | |
2476 | goto out; | |
2477 | } | |
023ab2a9 | 2478 | |
f08b24d8 RK |
2479 | /* Reply queue is nearing to get full, push back IOs to SML */ |
2480 | if ((mrioc->prevent_reply_qfull == true) && | |
2481 | (atomic_read(&op_reply_q->pend_ios) > | |
2482 | (op_reply_q->qfull_watermark))) { | |
2483 | atomic_inc(&mrioc->reply_qfull_count); | |
2484 | retval = -EAGAIN; | |
2485 | goto out; | |
2486 | } | |
2487 | ||
023ab2a9 KD |
2488 | segment_base_addr = segments[pi / op_req_q->segment_qd].segment; |
2489 | req_entry = (u8 *)segment_base_addr + | |
2490 | ((pi % op_req_q->segment_qd) * req_sz); | |
2491 | ||
2492 | memset(req_entry, 0, req_sz); | |
2493 | memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ); | |
2494 | ||
2495 | if (++pi == max_entries) | |
2496 | pi = 0; | |
2497 | op_req_q->pi = pi; | |
2498 | ||
7f9f953d | 2499 | #ifndef CONFIG_PREEMPT_RT |
463429f8 KD |
2500 | if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios) |
2501 | > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT) | |
2502 | mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true; | |
7f9f953d SR |
2503 | #else |
2504 | atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios); | |
2505 | #endif | |
463429f8 | 2506 | |
023ab2a9 KD |
2507 | writel(op_req_q->pi, |
2508 | &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); | |
2509 | ||
2510 | out: | |
2511 | spin_unlock_irqrestore(&op_req_q->q_lock, flags); | |
2512 | return retval; | |
2513 | } | |
2514 | ||
a6856cc4 SR |
2515 | /** |
2516 | * mpi3mr_check_rh_fault_ioc - check reset history and fault | |
2517 | * controller | |
2518 | * @mrioc: Adapter instance reference | |
3bb3c24e | 2519 | * @reason_code: reason code for the fault. |
a6856cc4 SR |
2520 | * |
2521 | * This routine will save snapdump and fault the controller with | |
2522 | * the given reason code if it is not already in the fault or | |
2523 | * not asynchronosuly reset. This will be used to handle | |
2524 | * initilaization time faults/resets/timeout as in those cases | |
2525 | * immediate soft reset invocation is not required. | |
2526 | * | |
2527 | * Return: None. | |
2528 | */ | |
2529 | void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) | |
2530 | { | |
2531 | u32 ioc_status, host_diagnostic, timeout; | |
d8d08d16 | 2532 | union mpi3mr_trigger_data trigger_data; |
a6856cc4 | 2533 | |
f2a79d20 SR |
2534 | if (mrioc->unrecoverable) { |
2535 | ioc_err(mrioc, "controller is unrecoverable\n"); | |
2536 | return; | |
2537 | } | |
2538 | ||
2539 | if (!pci_device_is_present(mrioc->pdev)) { | |
2540 | mrioc->unrecoverable = 1; | |
2541 | ioc_err(mrioc, "controller is not present\n"); | |
2542 | return; | |
2543 | } | |
d8d08d16 | 2544 | memset(&trigger_data, 0, sizeof(trigger_data)); |
a6856cc4 | 2545 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); |
d8d08d16 RK |
2546 | |
2547 | if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { | |
2548 | mpi3mr_set_trigger_data_in_all_hdb(mrioc, | |
2549 | MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0); | |
2550 | return; | |
2551 | } else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { | |
2552 | trigger_data.fault = (readl(&mrioc->sysif_regs->fault) & | |
2553 | MPI3_SYSIF_FAULT_CODE_MASK); | |
2554 | ||
2555 | mpi3mr_set_trigger_data_in_all_hdb(mrioc, | |
2556 | MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0); | |
a6856cc4 SR |
2557 | mpi3mr_print_fault_info(mrioc); |
2558 | return; | |
2559 | } | |
d8d08d16 | 2560 | |
a6856cc4 SR |
2561 | mpi3mr_set_diagsave(mrioc); |
2562 | mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, | |
2563 | reason_code); | |
d8d08d16 RK |
2564 | trigger_data.fault = (readl(&mrioc->sysif_regs->fault) & |
2565 | MPI3_SYSIF_FAULT_CODE_MASK); | |
2566 | mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT, | |
2567 | &trigger_data, 0); | |
a6856cc4 SR |
2568 | timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; |
2569 | do { | |
2570 | host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); | |
2571 | if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) | |
2572 | break; | |
2573 | msleep(100); | |
2574 | } while (--timeout); | |
2575 | } | |
2576 | ||
54dfcffb KD |
2577 | /** |
2578 | * mpi3mr_sync_timestamp - Issue time stamp sync request | |
2579 | * @mrioc: Adapter reference | |
2580 | * | |
2581 | * Issue IO unit control MPI request to synchornize firmware | |
2582 | * timestamp with host time. | |
2583 | * | |
2584 | * Return: 0 on success, non-zero on failure. | |
2585 | */ | |
2586 | static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc) | |
2587 | { | |
2588 | ktime_t current_time; | |
2589 | struct mpi3_iounit_control_request iou_ctrl; | |
2590 | int retval = 0; | |
2591 | ||
2592 | memset(&iou_ctrl, 0, sizeof(iou_ctrl)); | |
2593 | mutex_lock(&mrioc->init_cmds.mutex); | |
2594 | if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { | |
2595 | retval = -1; | |
2596 | ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n"); | |
2597 | mutex_unlock(&mrioc->init_cmds.mutex); | |
2598 | goto out; | |
2599 | } | |
2600 | mrioc->init_cmds.state = MPI3MR_CMD_PENDING; | |
2601 | mrioc->init_cmds.is_waiting = 1; | |
2602 | mrioc->init_cmds.callback = NULL; | |
2603 | iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); | |
2604 | iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; | |
2605 | iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP; | |
2606 | current_time = ktime_get_real(); | |
2607 | iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time)); | |
2608 | ||
2609 | init_completion(&mrioc->init_cmds.done); | |
2610 | retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, | |
2611 | sizeof(iou_ctrl), 0); | |
2612 | if (retval) { | |
2613 | ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n"); | |
2614 | goto out_unlock; | |
2615 | } | |
2616 | ||
2617 | wait_for_completion_timeout(&mrioc->init_cmds.done, | |
2618 | (MPI3MR_INTADMCMD_TIMEOUT * HZ)); | |
2619 | if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { | |
2620 | ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n"); | |
2621 | mrioc->init_cmds.is_waiting = 0; | |
fbaa9aa4 | 2622 | if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) |
9134211f RK |
2623 | mpi3mr_check_rh_fault_ioc(mrioc, |
2624 | MPI3MR_RESET_FROM_TSU_TIMEOUT); | |
54dfcffb KD |
2625 | retval = -1; |
2626 | goto out_unlock; | |
2627 | } | |
2628 | if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) | |
2629 | != MPI3_IOCSTATUS_SUCCESS) { | |
2630 | ioc_err(mrioc, | |
2631 | "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", | |
2632 | (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), | |
2633 | mrioc->init_cmds.ioc_loginfo); | |
2634 | retval = -1; | |
2635 | goto out_unlock; | |
2636 | } | |
2637 | ||
2638 | out_unlock: | |
2639 | mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; | |
2640 | mutex_unlock(&mrioc->init_cmds.mutex); | |
2641 | ||
2642 | out: | |
2643 | return retval; | |
2644 | } | |
2645 | ||
2ac794ba SR |
2646 | /** |
2647 | * mpi3mr_print_pkg_ver - display controller fw package version | |
2648 | * @mrioc: Adapter reference | |
2649 | * | |
2650 | * Retrieve firmware package version from the component image | |
2651 | * header of the controller flash and display it. | |
2652 | * | |
2653 | * Return: 0 on success and non-zero on failure. | |
2654 | */ | |
2655 | static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc) | |
2656 | { | |
2657 | struct mpi3_ci_upload_request ci_upload; | |
2658 | int retval = -1; | |
2659 | void *data = NULL; | |
2660 | dma_addr_t data_dma; | |
2661 | struct mpi3_ci_manifest_mpi *manifest; | |
2662 | u32 data_len = sizeof(struct mpi3_ci_manifest_mpi); | |
2663 | u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; | |
2664 | ||
2665 | data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, | |
2666 | GFP_KERNEL); | |
2667 | if (!data) | |
2668 | return -ENOMEM; | |
2669 | ||
2670 | memset(&ci_upload, 0, sizeof(ci_upload)); | |
2671 | mutex_lock(&mrioc->init_cmds.mutex); | |
2672 | if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { | |
2673 | ioc_err(mrioc, "sending get package version failed due to command in use\n"); | |
2674 | mutex_unlock(&mrioc->init_cmds.mutex); | |
2675 | goto out; | |
2676 | } | |
2677 | mrioc->init_cmds.state = MPI3MR_CMD_PENDING; | |
2678 | mrioc->init_cmds.is_waiting = 1; | |
2679 | mrioc->init_cmds.callback = NULL; | |
2680 | ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); | |
2681 | ci_upload.function = MPI3_FUNCTION_CI_UPLOAD; | |
2682 | ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY; | |
2683 | ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST); | |
2684 | ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE); | |
2685 | ci_upload.segment_size = cpu_to_le32(data_len); | |
2686 | ||
2687 | mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len, | |
2688 | data_dma); | |
2689 | init_completion(&mrioc->init_cmds.done); | |
2690 | retval = mpi3mr_admin_request_post(mrioc, &ci_upload, | |
2691 | sizeof(ci_upload), 1); | |
2692 | if (retval) { | |
2693 | ioc_err(mrioc, "posting get package version failed\n"); | |
2694 | goto out_unlock; | |
2695 | } | |
2696 | wait_for_completion_timeout(&mrioc->init_cmds.done, | |
2697 | (MPI3MR_INTADMCMD_TIMEOUT * HZ)); | |
2698 | if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { | |
2699 | ioc_err(mrioc, "get package version timed out\n"); | |
a6856cc4 SR |
2700 | mpi3mr_check_rh_fault_ioc(mrioc, |
2701 | MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT); | |
2ac794ba SR |
2702 | retval = -1; |
2703 | goto out_unlock; | |
2704 | } | |
2705 | if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) | |
2706 | == MPI3_IOCSTATUS_SUCCESS) { | |
2707 | manifest = (struct mpi3_ci_manifest_mpi *) data; | |
2708 | if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) { | |
2709 | ioc_info(mrioc, | |
2710 | "firmware package version(%d.%d.%d.%d.%05d-%05d)\n", | |
2711 | manifest->package_version.gen_major, | |
2712 | manifest->package_version.gen_minor, | |
2713 | manifest->package_version.phase_major, | |
2714 | manifest->package_version.phase_minor, | |
2715 | manifest->package_version.customer_id, | |
2716 | manifest->package_version.build_num); | |
2717 | } | |
2718 | } | |
2719 | retval = 0; | |
2720 | out_unlock: | |
2721 | mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; | |
2722 | mutex_unlock(&mrioc->init_cmds.mutex); | |
2723 | ||
2724 | out: | |
2725 | if (data) | |
2726 | dma_free_coherent(&mrioc->pdev->dev, data_len, data, | |
2727 | data_dma); | |
2728 | return retval; | |
2729 | } | |
2730 | ||
672ae26c KD |
2731 | /** |
2732 | * mpi3mr_watchdog_work - watchdog thread to monitor faults | |
2733 | * @work: work struct | |
2734 | * | |
2735 | * Watch dog work periodically executed (1 second interval) to | |
2736 | * monitor firmware fault and to issue periodic timer sync to | |
2737 | * the firmware. | |
2738 | * | |
2739 | * Return: Nothing. | |
2740 | */ | |
2741 | static void mpi3mr_watchdog_work(struct work_struct *work) | |
2742 | { | |
2743 | struct mpi3mr_ioc *mrioc = | |
2744 | container_of(work, struct mpi3mr_ioc, watchdog_work.work); | |
2745 | unsigned long flags; | |
2746 | enum mpi3mr_iocstate ioc_state; | |
d8d08d16 RK |
2747 | u32 host_diagnostic, ioc_status; |
2748 | union mpi3mr_trigger_data trigger_data; | |
0a2714b7 | 2749 | u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH; |
672ae26c | 2750 | |
1c342b05 | 2751 | if (mrioc->reset_in_progress || mrioc->pci_err_recovery) |
f2a79d20 SR |
2752 | return; |
2753 | ||
2754 | if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) { | |
2755 | ioc_err(mrioc, "watchdog could not detect the controller\n"); | |
2756 | mrioc->unrecoverable = 1; | |
2757 | } | |
2758 | ||
2759 | if (mrioc->unrecoverable) { | |
2760 | ioc_err(mrioc, | |
2761 | "flush pending commands for unrecoverable controller\n"); | |
2762 | mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); | |
b64845a7 | 2763 | return; |
f2a79d20 | 2764 | } |
b64845a7 | 2765 | |
ca41929b RK |
2766 | if (atomic_read(&mrioc->admin_pend_isr)) { |
2767 | ioc_err(mrioc, "Unprocessed admin ISR instance found\n" | |
2768 | "flush admin replies\n"); | |
2769 | mpi3mr_process_admin_reply_q(mrioc); | |
2770 | } | |
2771 | ||
83a9d30d RK |
2772 | if (!(mrioc->facts.ioc_capabilities & |
2773 | MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) && | |
2774 | (mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) { | |
2775 | ||
54dfcffb KD |
2776 | mrioc->ts_update_counter = 0; |
2777 | mpi3mr_sync_timestamp(mrioc); | |
2778 | } | |
2779 | ||
78b76a07 SR |
2780 | if ((mrioc->prepare_for_reset) && |
2781 | ((mrioc->prepare_for_reset_timeout_counter++) >= | |
2782 | MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) { | |
2783 | mpi3mr_soft_reset_handler(mrioc, | |
2784 | MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1); | |
2785 | return; | |
2786 | } | |
2787 | ||
d8d08d16 | 2788 | memset(&trigger_data, 0, sizeof(trigger_data)); |
78b76a07 SR |
2789 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); |
2790 | if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { | |
d8d08d16 RK |
2791 | mpi3mr_set_trigger_data_in_all_hdb(mrioc, |
2792 | MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0); | |
78b76a07 SR |
2793 | mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0); |
2794 | return; | |
2795 | } | |
2796 | ||
672ae26c KD |
2797 | /*Check for fault state every one second and issue Soft reset*/ |
2798 | ioc_state = mpi3mr_get_iocstate(mrioc); | |
78b76a07 SR |
2799 | if (ioc_state != MRIOC_STATE_FAULT) |
2800 | goto schedule_work; | |
672ae26c | 2801 | |
d8d08d16 RK |
2802 | trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; |
2803 | mpi3mr_set_trigger_data_in_all_hdb(mrioc, | |
2804 | MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0); | |
78b76a07 SR |
2805 | host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); |
2806 | if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { | |
2807 | if (!mrioc->diagsave_timeout) { | |
2808 | mpi3mr_print_fault_info(mrioc); | |
2809 | ioc_warn(mrioc, "diag save in progress\n"); | |
672ae26c | 2810 | } |
78b76a07 SR |
2811 | if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT) |
2812 | goto schedule_work; | |
2813 | } | |
672ae26c | 2814 | |
78b76a07 SR |
2815 | mpi3mr_print_fault_info(mrioc); |
2816 | mrioc->diagsave_timeout = 0; | |
2817 | ||
fb6eb98f RK |
2818 | if (!mpi3mr_is_fault_recoverable(mrioc)) { |
2819 | mrioc->unrecoverable = 1; | |
2820 | goto schedule_work; | |
2821 | } | |
2822 | ||
d8d08d16 | 2823 | switch (trigger_data.fault) { |
bad2f28d | 2824 | case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED: |
78b76a07 | 2825 | case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: |
bad2f28d | 2826 | ioc_warn(mrioc, |
78b76a07 SR |
2827 | "controller requires system power cycle, marking controller as unrecoverable\n"); |
2828 | mrioc->unrecoverable = 1; | |
f2a79d20 | 2829 | goto schedule_work; |
78b76a07 | 2830 | case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS: |
a3d27dfd | 2831 | goto schedule_work; |
78b76a07 SR |
2832 | case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET: |
2833 | reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT; | |
2834 | break; | |
2835 | default: | |
2836 | break; | |
672ae26c | 2837 | } |
78b76a07 SR |
2838 | mpi3mr_soft_reset_handler(mrioc, reset_reason, 0); |
2839 | return; | |
672ae26c KD |
2840 | |
2841 | schedule_work: | |
2842 | spin_lock_irqsave(&mrioc->watchdog_lock, flags); | |
2843 | if (mrioc->watchdog_work_q) | |
2844 | queue_delayed_work(mrioc->watchdog_work_q, | |
2845 | &mrioc->watchdog_work, | |
2846 | msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); | |
2847 | spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); | |
672ae26c KD |
2848 | return; |
2849 | } | |
2850 | ||
2851 | /** | |
2852 | * mpi3mr_start_watchdog - Start watchdog | |
2853 | * @mrioc: Adapter instance reference | |
2854 | * | |
2855 | * Create and start the watchdog thread to monitor controller | |
2856 | * faults. | |
2857 | * | |
2858 | * Return: Nothing. | |
2859 | */ | |
2860 | void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) | |
2861 | { | |
2862 | if (mrioc->watchdog_work_q) | |
2863 | return; | |
2864 | ||
2865 | INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work); | |
2866 | snprintf(mrioc->watchdog_work_q_name, | |
2867 | sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, | |
2868 | mrioc->id); | |
b97c0741 BVA |
2869 | mrioc->watchdog_work_q = alloc_ordered_workqueue( |
2870 | "%s", WQ_MEM_RECLAIM, mrioc->watchdog_work_q_name); | |
672ae26c KD |
2871 | if (!mrioc->watchdog_work_q) { |
2872 | ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); | |
2873 | return; | |
2874 | } | |
2875 | ||
2876 | if (mrioc->watchdog_work_q) | |
2877 | queue_delayed_work(mrioc->watchdog_work_q, | |
2878 | &mrioc->watchdog_work, | |
2879 | msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); | |
2880 | } | |
2881 | ||
2882 | /** | |
2883 | * mpi3mr_stop_watchdog - Stop watchdog | |
2884 | * @mrioc: Adapter instance reference | |
2885 | * | |
2886 | * Stop the watchdog thread created to monitor controller | |
2887 | * faults. | |
2888 | * | |
2889 | * Return: Nothing. | |
2890 | */ | |
2891 | void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc) | |
2892 | { | |
2893 | unsigned long flags; | |
2894 | struct workqueue_struct *wq; | |
2895 | ||
2896 | spin_lock_irqsave(&mrioc->watchdog_lock, flags); | |
2897 | wq = mrioc->watchdog_work_q; | |
2898 | mrioc->watchdog_work_q = NULL; | |
2899 | spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); | |
2900 | if (wq) { | |
2901 | if (!cancel_delayed_work_sync(&mrioc->watchdog_work)) | |
2902 | flush_workqueue(wq); | |
2903 | destroy_workqueue(wq); | |
2904 | } | |
2905 | } | |
2906 | ||
824a1566 KD |
2907 | /** |
2908 | * mpi3mr_setup_admin_qpair - Setup admin queue pair | |
2909 | * @mrioc: Adapter instance reference | |
2910 | * | |
2911 | * Allocate memory for admin queue pair if required and register | |
2912 | * the admin queue with the controller. | |
2913 | * | |
2914 | * Return: 0 on success, non-zero on failures. | |
2915 | */ | |
2916 | static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) | |
2917 | { | |
2918 | int retval = 0; | |
2919 | u32 num_admin_entries = 0; | |
2920 | ||
2921 | mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; | |
2922 | mrioc->num_admin_req = mrioc->admin_req_q_sz / | |
2923 | MPI3MR_ADMIN_REQ_FRAME_SZ; | |
2924 | mrioc->admin_req_ci = mrioc->admin_req_pi = 0; | |
824a1566 KD |
2925 | |
2926 | mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; | |
2927 | mrioc->num_admin_replies = mrioc->admin_reply_q_sz / | |
2928 | MPI3MR_ADMIN_REPLY_FRAME_SZ; | |
2929 | mrioc->admin_reply_ci = 0; | |
2930 | mrioc->admin_reply_ephase = 1; | |
02ca7da2 | 2931 | atomic_set(&mrioc->admin_reply_q_in_use, 0); |
3b5091fe | 2932 | atomic_set(&mrioc->admin_pend_isr, 0); |
824a1566 KD |
2933 | |
2934 | if (!mrioc->admin_req_base) { | |
2935 | mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, | |
2936 | mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); | |
2937 | ||
2938 | if (!mrioc->admin_req_base) { | |
2939 | retval = -1; | |
2940 | goto out_failed; | |
2941 | } | |
2942 | ||
2943 | mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, | |
2944 | mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, | |
2945 | GFP_KERNEL); | |
2946 | ||
2947 | if (!mrioc->admin_reply_base) { | |
2948 | retval = -1; | |
2949 | goto out_failed; | |
2950 | } | |
2951 | } | |
2952 | ||
2953 | num_admin_entries = (mrioc->num_admin_replies << 16) | | |
2954 | (mrioc->num_admin_req); | |
2955 | writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); | |
2956 | mpi3mr_writeq(mrioc->admin_req_dma, | |
2957 | &mrioc->sysif_regs->admin_request_queue_address); | |
2958 | mpi3mr_writeq(mrioc->admin_reply_dma, | |
2959 | &mrioc->sysif_regs->admin_reply_queue_address); | |
2960 | writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); | |
2961 | writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); | |
2962 | return retval; | |
2963 | ||
2964 | out_failed: | |
2965 | ||
2966 | if (mrioc->admin_reply_base) { | |
2967 | dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, | |
2968 | mrioc->admin_reply_base, mrioc->admin_reply_dma); | |
2969 | mrioc->admin_reply_base = NULL; | |
2970 | } | |
2971 | if (mrioc->admin_req_base) { | |
2972 | dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, | |
2973 | mrioc->admin_req_base, mrioc->admin_req_dma); | |
2974 | mrioc->admin_req_base = NULL; | |
2975 | } | |
2976 | return retval; | |
2977 | } | |
2978 | ||
2979 | /** | |
2980 | * mpi3mr_issue_iocfacts - Send IOC Facts | |
2981 | * @mrioc: Adapter instance reference | |
2982 | * @facts_data: Cached IOC facts data | |
2983 | * | |
2984 | * Issue IOC Facts MPI request through admin queue and wait for | |
2985 | * the completion of it or time out. | |
2986 | * | |
2987 | * Return: 0 on success, non-zero on failures. | |
2988 | */ | |
2989 | static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, | |
2990 | struct mpi3_ioc_facts_data *facts_data) | |
2991 | { | |
2992 | struct mpi3_ioc_facts_request iocfacts_req; | |
2993 | void *data = NULL; | |
2994 | dma_addr_t data_dma; | |
2995 | u32 data_len = sizeof(*facts_data); | |
2996 | int retval = 0; | |
2997 | u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; | |
2998 | ||
2999 | data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, | |
3000 | GFP_KERNEL); | |
3001 | ||
3002 | if (!data) { | |
3003 | retval = -1; | |
3004 | goto out; | |
3005 | } | |
3006 | ||
3007 | memset(&iocfacts_req, 0, sizeof(iocfacts_req)); | |
3008 | mutex_lock(&mrioc->init_cmds.mutex); | |
3009 | if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { | |
3010 | retval = -1; | |
3011 | ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); | |
3012 | mutex_unlock(&mrioc->init_cmds.mutex); | |
3013 | goto out; | |
3014 | } | |
3015 | mrioc->init_cmds.state = MPI3MR_CMD_PENDING; | |
3016 | mrioc->init_cmds.is_waiting = 1; | |
3017 | mrioc->init_cmds.callback = NULL; | |
3018 | iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); | |
3019 | iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; | |
3020 | ||
3021 | mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, | |
3022 | data_dma); | |
3023 | ||
3024 | init_completion(&mrioc->init_cmds.done); | |
3025 | retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, | |
3026 | sizeof(iocfacts_req), 1); | |
3027 | if (retval) { | |
3028 | ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); | |
3029 | goto out_unlock; | |
3030 | } | |
3031 | wait_for_completion_timeout(&mrioc->init_cmds.done, | |
3032 | (MPI3MR_INTADMCMD_TIMEOUT * HZ)); | |
3033 | if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { | |
a6856cc4 SR |
3034 | ioc_err(mrioc, "ioc_facts timed out\n"); |
3035 | mpi3mr_check_rh_fault_ioc(mrioc, | |
824a1566 | 3036 | MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); |
824a1566 KD |
3037 | retval = -1; |
3038 | goto out_unlock; | |
3039 | } | |
3040 | if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) | |
3041 | != MPI3_IOCSTATUS_SUCCESS) { | |
3042 | ioc_err(mrioc, | |
3043 | "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", | |
3044 | (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), | |
3045 | mrioc->init_cmds.ioc_loginfo); | |
3046 | retval = -1; | |
3047 | goto out_unlock; | |
3048 | } | |
3049 | memcpy(facts_data, (u8 *)data, data_len); | |
c5758fc7 | 3050 | mpi3mr_process_factsdata(mrioc, facts_data); |
824a1566 KD |
3051 | out_unlock: |
3052 | mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; | |
3053 | mutex_unlock(&mrioc->init_cmds.mutex); | |
3054 | ||
3055 | out: | |
3056 | if (data) | |
3057 | dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); | |
3058 | ||
3059 | return retval; | |
3060 | } | |
3061 | ||
3062 | /** | |
3063 | * mpi3mr_check_reset_dma_mask - Process IOC facts data | |
3064 | * @mrioc: Adapter instance reference | |
3065 | * | |
3066 | * Check whether the new DMA mask requested through IOCFacts by | |
3067 | * firmware needs to be set, if so set it . | |
3068 | * | |
3069 | * Return: 0 on success, non-zero on failure. | |
3070 | */ | |
3071 | static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) | |
3072 | { | |
3073 | struct pci_dev *pdev = mrioc->pdev; | |
3074 | int r; | |
3075 | u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); | |
3076 | ||
3077 | if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) | |
3078 | return 0; | |
3079 | ||
3080 | ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", | |
3081 | mrioc->dma_mask, facts_dma_mask); | |
3082 | ||
3083 | r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); | |
3084 | if (r) { | |
3085 | ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", | |
3086 | facts_dma_mask, r); | |
3087 | return r; | |
3088 | } | |
3089 | mrioc->dma_mask = facts_dma_mask; | |
3090 | return r; | |
3091 | } | |
3092 | ||
3093 | /** | |
3094 | * mpi3mr_process_factsdata - Process IOC facts data | |
3095 | * @mrioc: Adapter instance reference | |
3096 | * @facts_data: Cached IOC facts data | |
3097 | * | |
3098 | * Convert IOC facts data into cpu endianness and cache it in | |
3099 | * the driver . | |
3100 | * | |
3101 | * Return: Nothing. | |
3102 | */ | |
3103 | static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, | |
3104 | struct mpi3_ioc_facts_data *facts_data) | |
3105 | { | |
3106 | u32 ioc_config, req_sz, facts_flags; | |
3107 | ||
3108 | if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != | |
3109 | (sizeof(*facts_data) / 4)) { | |
3110 | ioc_warn(mrioc, | |
3111 | "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", | |
3112 | sizeof(*facts_data), | |
3113 | le16_to_cpu(facts_data->ioc_facts_data_length) * 4); | |
3114 | } | |
3115 | ||
3116 | ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); | |
3117 | req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> | |
3118 | MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); | |
3119 | if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { | |
3120 | ioc_err(mrioc, | |
3121 | "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", | |
3122 | req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); | |
3123 | } | |
3124 | ||
3125 | memset(&mrioc->facts, 0, sizeof(mrioc->facts)); | |
3126 | ||
3127 | facts_flags = le32_to_cpu(facts_data->flags); | |
3128 | mrioc->facts.op_req_sz = req_sz; | |
3129 | mrioc->op_reply_desc_sz = 1 << ((ioc_config & | |
3130 | MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> | |
3131 | MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); | |
3132 | ||
3133 | mrioc->facts.ioc_num = facts_data->ioc_number; | |
3134 | mrioc->facts.who_init = facts_data->who_init; | |
3135 | mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); | |
3136 | mrioc->facts.personality = (facts_flags & | |
3137 | MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); | |
3138 | mrioc->facts.dma_mask = (facts_flags & | |
3139 | MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> | |
3140 | MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; | |
f08b24d8 RK |
3141 | mrioc->facts.dma_mask = (facts_flags & |
3142 | MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> | |
3143 | MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; | |
824a1566 KD |
3144 | mrioc->facts.protocol_flags = facts_data->protocol_flags; |
3145 | mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); | |
04b27e53 | 3146 | mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests); |
824a1566 KD |
3147 | mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); |
3148 | mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; | |
3149 | mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); | |
3150 | mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); | |
824a1566 KD |
3151 | mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); |
3152 | mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); | |
ec5ebd2c SR |
3153 | mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds); |
3154 | mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds); | |
824a1566 KD |
3155 | mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); |
3156 | mrioc->facts.max_pcie_switches = | |
ec5ebd2c | 3157 | le16_to_cpu(facts_data->max_pcie_switches); |
824a1566 KD |
3158 | mrioc->facts.max_sasexpanders = |
3159 | le16_to_cpu(facts_data->max_sas_expanders); | |
d9adb81e | 3160 | mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length); |
824a1566 KD |
3161 | mrioc->facts.max_sasinitiators = |
3162 | le16_to_cpu(facts_data->max_sas_initiators); | |
3163 | mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); | |
3164 | mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); | |
3165 | mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); | |
3166 | mrioc->facts.max_op_req_q = | |
3167 | le16_to_cpu(facts_data->max_operational_request_queues); | |
3168 | mrioc->facts.max_op_reply_q = | |
3169 | le16_to_cpu(facts_data->max_operational_reply_queues); | |
3170 | mrioc->facts.ioc_capabilities = | |
3171 | le32_to_cpu(facts_data->ioc_capabilities); | |
3172 | mrioc->facts.fw_ver.build_num = | |
3173 | le16_to_cpu(facts_data->fw_version.build_num); | |
3174 | mrioc->facts.fw_ver.cust_id = | |
3175 | le16_to_cpu(facts_data->fw_version.customer_id); | |
3176 | mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; | |
3177 | mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; | |
3178 | mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; | |
3179 | mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; | |
3180 | mrioc->msix_count = min_t(int, mrioc->msix_count, | |
3181 | mrioc->facts.max_msix_vectors); | |
3182 | mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; | |
3183 | mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; | |
3184 | mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; | |
3185 | mrioc->facts.shutdown_timeout = | |
3186 | le16_to_cpu(facts_data->shutdown_timeout); | |
fc444494 RK |
3187 | mrioc->facts.diag_trace_sz = |
3188 | le32_to_cpu(facts_data->diag_trace_size); | |
3189 | mrioc->facts.diag_fw_sz = | |
3190 | le32_to_cpu(facts_data->diag_fw_size); | |
3191 | mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size); | |
f10af057 SR |
3192 | mrioc->facts.max_dev_per_tg = |
3193 | facts_data->max_devices_per_throttle_group; | |
3194 | mrioc->facts.io_throttle_data_length = | |
3195 | le16_to_cpu(facts_data->io_throttle_data_length); | |
3196 | mrioc->facts.max_io_throttle_group = | |
3197 | le16_to_cpu(facts_data->max_io_throttle_group); | |
3198 | mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low); | |
3199 | mrioc->facts.io_throttle_high = | |
3200 | le16_to_cpu(facts_data->io_throttle_high); | |
3201 | ||
d9adb81e RK |
3202 | if (mrioc->facts.max_data_length == |
3203 | MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED) | |
3204 | mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE; | |
3205 | else | |
3206 | mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K; | |
f10af057 SR |
3207 | /* Store in 512b block count */ |
3208 | if (mrioc->facts.io_throttle_data_length) | |
3209 | mrioc->io_throttle_data_length = | |
3210 | (mrioc->facts.io_throttle_data_length * 2 * 4); | |
3211 | else | |
3212 | /* set the length to 1MB + 1K to disable throttle */ | |
d9adb81e | 3213 | mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2; |
f10af057 SR |
3214 | |
3215 | mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024); | |
3216 | mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024); | |
3217 | ||
824a1566 KD |
3218 | ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", |
3219 | mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, | |
3220 | mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); | |
3221 | ioc_info(mrioc, | |
ec5ebd2c | 3222 | "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n", |
824a1566 | 3223 | mrioc->facts.max_reqs, mrioc->facts.min_devhandle, |
ec5ebd2c | 3224 | mrioc->facts.max_msix_vectors, mrioc->facts.max_perids); |
824a1566 KD |
3225 | ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", |
3226 | mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, | |
3227 | mrioc->facts.sge_mod_shift); | |
d9adb81e | 3228 | ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n", |
824a1566 | 3229 | mrioc->facts.dma_mask, (facts_flags & |
d9adb81e | 3230 | MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length); |
f10af057 SR |
3231 | ioc_info(mrioc, |
3232 | "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n", | |
3233 | mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group); | |
3234 | ioc_info(mrioc, | |
3235 | "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n", | |
3236 | mrioc->facts.io_throttle_data_length * 4, | |
3237 | mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low); | |
824a1566 KD |
3238 | } |
3239 | ||
3240 | /** | |
3241 | * mpi3mr_alloc_reply_sense_bufs - Send IOC Init | |
3242 | * @mrioc: Adapter instance reference | |
3243 | * | |
3244 | * Allocate and initialize the reply free buffers, sense | |
3245 | * buffers, reply free queue and sense buffer queue. | |
3246 | * | |
3247 | * Return: 0 on success, non-zero on failures. | |
3248 | */ | |
3249 | static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) | |
3250 | { | |
3251 | int retval = 0; | |
3252 | u32 sz, i; | |
824a1566 KD |
3253 | |
3254 | if (mrioc->init_cmds.reply) | |
e3605f65 | 3255 | return retval; |
824a1566 | 3256 | |
c5758fc7 | 3257 | mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); |
824a1566 KD |
3258 | if (!mrioc->init_cmds.reply) |
3259 | goto out_failed; | |
3260 | ||
f5e6d5a3 SS |
3261 | mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); |
3262 | if (!mrioc->bsg_cmds.reply) | |
3263 | goto out_failed; | |
3264 | ||
2bd37e28 SR |
3265 | mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); |
3266 | if (!mrioc->transport_cmds.reply) | |
3267 | goto out_failed; | |
3268 | ||
13ef29ea | 3269 | for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { |
c5758fc7 | 3270 | mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz, |
13ef29ea KD |
3271 | GFP_KERNEL); |
3272 | if (!mrioc->dev_rmhs_cmds[i].reply) | |
3273 | goto out_failed; | |
3274 | } | |
3275 | ||
c1af985d SR |
3276 | for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { |
3277 | mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz, | |
3278 | GFP_KERNEL); | |
3279 | if (!mrioc->evtack_cmds[i].reply) | |
3280 | goto out_failed; | |
3281 | } | |
3282 | ||
c5758fc7 | 3283 | mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); |
e844adb1 KD |
3284 | if (!mrioc->host_tm_cmds.reply) |
3285 | goto out_failed; | |
3286 | ||
43ca1100 SS |
3287 | mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); |
3288 | if (!mrioc->pel_cmds.reply) | |
3289 | goto out_failed; | |
3290 | ||
3291 | mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); | |
3292 | if (!mrioc->pel_abort_cmd.reply) | |
3293 | goto out_failed; | |
3294 | ||
339e6156 SK |
3295 | mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle; |
3296 | mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits, | |
3297 | GFP_KERNEL); | |
e844adb1 KD |
3298 | if (!mrioc->removepend_bitmap) |
3299 | goto out_failed; | |
3300 | ||
339e6156 | 3301 | mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL); |
e844adb1 KD |
3302 | if (!mrioc->devrem_bitmap) |
3303 | goto out_failed; | |
3304 | ||
339e6156 SK |
3305 | mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD, |
3306 | GFP_KERNEL); | |
c1af985d SR |
3307 | if (!mrioc->evtack_cmds_bitmap) |
3308 | goto out_failed; | |
3309 | ||
824a1566 KD |
3310 | mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; |
3311 | mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; | |
3312 | mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; | |
3313 | mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; | |
3314 | ||
3315 | /* reply buffer pool, 16 byte align */ | |
c5758fc7 | 3316 | sz = mrioc->num_reply_bufs * mrioc->reply_sz; |
824a1566 KD |
3317 | mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", |
3318 | &mrioc->pdev->dev, sz, 16, 0); | |
3319 | if (!mrioc->reply_buf_pool) { | |
3320 | ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); | |
3321 | goto out_failed; | |
3322 | } | |
3323 | ||
3324 | mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, | |
3325 | &mrioc->reply_buf_dma); | |
3326 | if (!mrioc->reply_buf) | |
3327 | goto out_failed; | |
3328 | ||
3329 | mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; | |
3330 | ||
3331 | /* reply free queue, 8 byte align */ | |
3332 | sz = mrioc->reply_free_qsz * 8; | |
3333 | mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", | |
3334 | &mrioc->pdev->dev, sz, 8, 0); | |
3335 | if (!mrioc->reply_free_q_pool) { | |
3336 | ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); | |
3337 | goto out_failed; | |
3338 | } | |
3339 | mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, | |
3340 | GFP_KERNEL, &mrioc->reply_free_q_dma); | |
3341 | if (!mrioc->reply_free_q) | |
3342 | goto out_failed; | |
3343 | ||
3344 | /* sense buffer pool, 4 byte align */ | |
ec5ebd2c | 3345 | sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; |
824a1566 KD |
3346 | mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", |
3347 | &mrioc->pdev->dev, sz, 4, 0); | |
3348 | if (!mrioc->sense_buf_pool) { | |
3349 | ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); | |
3350 | goto out_failed; | |
3351 | } | |
3352 | mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, | |
3353 | &mrioc->sense_buf_dma); | |
3354 | if (!mrioc->sense_buf) | |
3355 | goto out_failed; | |
3356 | ||
3357 | /* sense buffer queue, 8 byte align */ | |
3358 | sz = mrioc->sense_buf_q_sz * 8; | |
3359 | mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", | |
3360 | &mrioc->pdev->dev, sz, 8, 0); | |
3361 | if (!mrioc->sense_buf_q_pool) { | |
3362 | ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); | |
3363 | goto out_failed; | |
3364 | } | |
3365 | mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, | |
3366 | GFP_KERNEL, &mrioc->sense_buf_q_dma); | |
3367 | if (!mrioc->sense_buf_q) | |
3368 | goto out_failed; | |
3369 | ||
e3605f65 SR |
3370 | return retval; |
3371 | ||
3372 | out_failed: | |
3373 | retval = -1; | |
3374 | return retval; | |
3375 | } | |
3376 | ||
3377 | /** | |
3378 | * mpimr_initialize_reply_sbuf_queues - initialize reply sense | |
3379 | * buffers | |
3380 | * @mrioc: Adapter instance reference | |
3381 | * | |
3382 | * Helper function to initialize reply and sense buffers along | |
3383 | * with some debug prints. | |
3384 | * | |
3385 | * Return: None. | |
3386 | */ | |
3387 | static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc) | |
3388 | { | |
3389 | u32 sz, i; | |
3390 | dma_addr_t phy_addr; | |
3391 | ||
c5758fc7 | 3392 | sz = mrioc->num_reply_bufs * mrioc->reply_sz; |
824a1566 KD |
3393 | ioc_info(mrioc, |
3394 | "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", | |
c5758fc7 | 3395 | mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz, |
824a1566 KD |
3396 | (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); |
3397 | sz = mrioc->reply_free_qsz * 8; | |
3398 | ioc_info(mrioc, | |
3399 | "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", | |
3400 | mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), | |
3401 | (unsigned long long)mrioc->reply_free_q_dma); | |
ec5ebd2c | 3402 | sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; |
824a1566 KD |
3403 | ioc_info(mrioc, |
3404 | "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", | |
ec5ebd2c | 3405 | mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ, |
824a1566 KD |
3406 | (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); |
3407 | sz = mrioc->sense_buf_q_sz * 8; | |
3408 | ioc_info(mrioc, | |
3409 | "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", | |
3410 | mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), | |
3411 | (unsigned long long)mrioc->sense_buf_q_dma); | |
3412 | ||
3413 | /* initialize Reply buffer Queue */ | |
3414 | for (i = 0, phy_addr = mrioc->reply_buf_dma; | |
c5758fc7 | 3415 | i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz) |
824a1566 KD |
3416 | mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); |
3417 | mrioc->reply_free_q[i] = cpu_to_le64(0); | |
3418 | ||
3419 | /* initialize Sense Buffer Queue */ | |
3420 | for (i = 0, phy_addr = mrioc->sense_buf_dma; | |
ec5ebd2c | 3421 | i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ) |
824a1566 KD |
3422 | mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); |
3423 | mrioc->sense_buf_q[i] = cpu_to_le64(0); | |
824a1566 KD |
3424 | } |
3425 | ||
3426 | /** | |
3427 | * mpi3mr_issue_iocinit - Send IOC Init | |
3428 | * @mrioc: Adapter instance reference | |
3429 | * | |
3430 | * Issue IOC Init MPI request through admin queue and wait for | |
3431 | * the completion of it or time out. | |
3432 | * | |
3433 | * Return: 0 on success, non-zero on failures. | |
3434 | */ | |
3435 | static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) | |
3436 | { | |
3437 | struct mpi3_ioc_init_request iocinit_req; | |
3438 | struct mpi3_driver_info_layout *drv_info; | |
3439 | dma_addr_t data_dma; | |
3440 | u32 data_len = sizeof(*drv_info); | |
3441 | int retval = 0; | |
3442 | ktime_t current_time; | |
3443 | ||
3444 | drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, | |
3445 | GFP_KERNEL); | |
3446 | if (!drv_info) { | |
3447 | retval = -1; | |
3448 | goto out; | |
3449 | } | |
e3605f65 SR |
3450 | mpimr_initialize_reply_sbuf_queues(mrioc); |
3451 | ||
824a1566 | 3452 | drv_info->information_length = cpu_to_le32(data_len); |
aa0dc6a7 SR |
3453 | strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); |
3454 | strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); | |
3455 | strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); | |
3456 | strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); | |
3457 | strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); | |
3458 | strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, | |
3459 | sizeof(drv_info->driver_release_date)); | |
824a1566 KD |
3460 | drv_info->driver_capabilities = 0; |
3461 | memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, | |
3462 | sizeof(mrioc->driver_info)); | |
3463 | ||
3464 | memset(&iocinit_req, 0, sizeof(iocinit_req)); | |
3465 | mutex_lock(&mrioc->init_cmds.mutex); | |
3466 | if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { | |
3467 | retval = -1; | |
3468 | ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); | |
3469 | mutex_unlock(&mrioc->init_cmds.mutex); | |
3470 | goto out; | |
3471 | } | |
3472 | mrioc->init_cmds.state = MPI3MR_CMD_PENDING; | |
3473 | mrioc->init_cmds.is_waiting = 1; | |
3474 | mrioc->init_cmds.callback = NULL; | |
3475 | iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); | |
3476 | iocinit_req.function = MPI3_FUNCTION_IOC_INIT; | |
3477 | iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; | |
3478 | iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; | |
3479 | iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; | |
3480 | iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; | |
3481 | iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; | |
3482 | iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); | |
3483 | iocinit_req.reply_free_queue_address = | |
3484 | cpu_to_le64(mrioc->reply_free_q_dma); | |
ec5ebd2c | 3485 | iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ); |
824a1566 KD |
3486 | iocinit_req.sense_buffer_free_queue_depth = |
3487 | cpu_to_le16(mrioc->sense_buf_q_sz); | |
3488 | iocinit_req.sense_buffer_free_queue_address = | |
3489 | cpu_to_le64(mrioc->sense_buf_q_dma); | |
3490 | iocinit_req.driver_information_address = cpu_to_le64(data_dma); | |
3491 | ||
3492 | current_time = ktime_get_real(); | |
3493 | iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); | |
3494 | ||
1193a89d SS |
3495 | iocinit_req.msg_flags |= |
3496 | MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED; | |
e8a5a3c3 RK |
3497 | iocinit_req.msg_flags |= |
3498 | MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED; | |
1193a89d | 3499 | |
824a1566 KD |
3500 | init_completion(&mrioc->init_cmds.done); |
3501 | retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, | |
3502 | sizeof(iocinit_req), 1); | |
3503 | if (retval) { | |
3504 | ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); | |
3505 | goto out_unlock; | |
3506 | } | |
3507 | wait_for_completion_timeout(&mrioc->init_cmds.done, | |
3508 | (MPI3MR_INTADMCMD_TIMEOUT * HZ)); | |
3509 | if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { | |
a6856cc4 | 3510 | mpi3mr_check_rh_fault_ioc(mrioc, |
824a1566 | 3511 | MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); |
a6856cc4 | 3512 | ioc_err(mrioc, "ioc_init timed out\n"); |
824a1566 KD |
3513 | retval = -1; |
3514 | goto out_unlock; | |
3515 | } | |
3516 | if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) | |
3517 | != MPI3_IOCSTATUS_SUCCESS) { | |
3518 | ioc_err(mrioc, | |
3519 | "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", | |
3520 | (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), | |
3521 | mrioc->init_cmds.ioc_loginfo); | |
3522 | retval = -1; | |
3523 | goto out_unlock; | |
3524 | } | |
3525 | ||
e3605f65 SR |
3526 | mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; |
3527 | writel(mrioc->reply_free_queue_host_index, | |
3528 | &mrioc->sysif_regs->reply_free_host_index); | |
3529 | ||
3530 | mrioc->sbq_host_index = mrioc->num_sense_bufs; | |
3531 | writel(mrioc->sbq_host_index, | |
3532 | &mrioc->sysif_regs->sense_buffer_free_host_index); | |
824a1566 KD |
3533 | out_unlock: |
3534 | mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; | |
3535 | mutex_unlock(&mrioc->init_cmds.mutex); | |
3536 | ||
3537 | out: | |
3538 | if (drv_info) | |
3539 | dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, | |
3540 | data_dma); | |
3541 | ||
3542 | return retval; | |
3543 | } | |
3544 | ||
13ef29ea KD |
3545 | /** |
3546 | * mpi3mr_unmask_events - Unmask events in event mask bitmap | |
3547 | * @mrioc: Adapter instance reference | |
3548 | * @event: MPI event ID | |
3549 | * | |
3550 | * Un mask the specific event by resetting the event_mask | |
3551 | * bitmap. | |
3552 | * | |
3553 | * Return: 0 on success, non-zero on failures. | |
3554 | */ | |
3555 | static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event) | |
3556 | { | |
3557 | u32 desired_event; | |
3558 | u8 word; | |
3559 | ||
3560 | if (event >= 128) | |
3561 | return; | |
3562 | ||
3563 | desired_event = (1 << (event % 32)); | |
3564 | word = event / 32; | |
3565 | ||
3566 | mrioc->event_masks[word] &= ~desired_event; | |
3567 | } | |
3568 | ||
3569 | /** | |
3570 | * mpi3mr_issue_event_notification - Send event notification | |
3571 | * @mrioc: Adapter instance reference | |
3572 | * | |
3573 | * Issue event notification MPI request through admin queue and | |
3574 | * wait for the completion of it or time out. | |
3575 | * | |
3576 | * Return: 0 on success, non-zero on failures. | |
3577 | */ | |
3578 | static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc) | |
3579 | { | |
3580 | struct mpi3_event_notification_request evtnotify_req; | |
3581 | int retval = 0; | |
3582 | u8 i; | |
3583 | ||
3584 | memset(&evtnotify_req, 0, sizeof(evtnotify_req)); | |
3585 | mutex_lock(&mrioc->init_cmds.mutex); | |
3586 | if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { | |
3587 | retval = -1; | |
3588 | ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n"); | |
3589 | mutex_unlock(&mrioc->init_cmds.mutex); | |
3590 | goto out; | |
3591 | } | |
3592 | mrioc->init_cmds.state = MPI3MR_CMD_PENDING; | |
3593 | mrioc->init_cmds.is_waiting = 1; | |
3594 | mrioc->init_cmds.callback = NULL; | |
3595 | evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); | |
3596 | evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION; | |
3597 | for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) | |
3598 | evtnotify_req.event_masks[i] = | |
3599 | cpu_to_le32(mrioc->event_masks[i]); | |
3600 | init_completion(&mrioc->init_cmds.done); | |
3601 | retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req, | |
3602 | sizeof(evtnotify_req), 1); | |
3603 | if (retval) { | |
3604 | ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n"); | |
3605 | goto out_unlock; | |
3606 | } | |
3607 | wait_for_completion_timeout(&mrioc->init_cmds.done, | |
3608 | (MPI3MR_INTADMCMD_TIMEOUT * HZ)); | |
3609 | if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { | |
a6856cc4 SR |
3610 | ioc_err(mrioc, "event notification timed out\n"); |
3611 | mpi3mr_check_rh_fault_ioc(mrioc, | |
13ef29ea | 3612 | MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT); |
13ef29ea KD |
3613 | retval = -1; |
3614 | goto out_unlock; | |
3615 | } | |
3616 | if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) | |
3617 | != MPI3_IOCSTATUS_SUCCESS) { | |
3618 | ioc_err(mrioc, | |
3619 | "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", | |
3620 | (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), | |
3621 | mrioc->init_cmds.ioc_loginfo); | |
3622 | retval = -1; | |
3623 | goto out_unlock; | |
3624 | } | |
3625 | ||
3626 | out_unlock: | |
3627 | mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; | |
3628 | mutex_unlock(&mrioc->init_cmds.mutex); | |
3629 | out: | |
3630 | return retval; | |
3631 | } | |
3632 | ||
3633 | /** | |
c1af985d | 3634 | * mpi3mr_process_event_ack - Process event acknowledgment |
13ef29ea KD |
3635 | * @mrioc: Adapter instance reference |
3636 | * @event: MPI3 event ID | |
c1af985d | 3637 | * @event_ctx: event context |
13ef29ea KD |
3638 | * |
3639 | * Send event acknowledgment through admin queue and wait for | |
3640 | * it to complete. | |
3641 | * | |
3642 | * Return: 0 on success, non-zero on failures. | |
3643 | */ | |
c1af985d | 3644 | int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event, |
13ef29ea KD |
3645 | u32 event_ctx) |
3646 | { | |
3647 | struct mpi3_event_ack_request evtack_req; | |
3648 | int retval = 0; | |
3649 | ||
3650 | memset(&evtack_req, 0, sizeof(evtack_req)); | |
3651 | mutex_lock(&mrioc->init_cmds.mutex); | |
3652 | if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { | |
3653 | retval = -1; | |
3654 | ioc_err(mrioc, "Send EvtAck: Init command is in use\n"); | |
3655 | mutex_unlock(&mrioc->init_cmds.mutex); | |
3656 | goto out; | |
3657 | } | |
3658 | mrioc->init_cmds.state = MPI3MR_CMD_PENDING; | |
3659 | mrioc->init_cmds.is_waiting = 1; | |
3660 | mrioc->init_cmds.callback = NULL; | |
3661 | evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); | |
3662 | evtack_req.function = MPI3_FUNCTION_EVENT_ACK; | |
3663 | evtack_req.event = event; | |
3664 | evtack_req.event_context = cpu_to_le32(event_ctx); | |
3665 | ||
3666 | init_completion(&mrioc->init_cmds.done); | |
3667 | retval = mpi3mr_admin_request_post(mrioc, &evtack_req, | |
3668 | sizeof(evtack_req), 1); | |
3669 | if (retval) { | |
3670 | ioc_err(mrioc, "Send EvtAck: Admin Post failed\n"); | |
3671 | goto out_unlock; | |
3672 | } | |
3673 | wait_for_completion_timeout(&mrioc->init_cmds.done, | |
3674 | (MPI3MR_INTADMCMD_TIMEOUT * HZ)); | |
3675 | if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { | |
3676 | ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); | |
fbaa9aa4 | 3677 | if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) |
9134211f RK |
3678 | mpi3mr_check_rh_fault_ioc(mrioc, |
3679 | MPI3MR_RESET_FROM_EVTACK_TIMEOUT); | |
13ef29ea KD |
3680 | retval = -1; |
3681 | goto out_unlock; | |
3682 | } | |
3683 | if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) | |
3684 | != MPI3_IOCSTATUS_SUCCESS) { | |
3685 | ioc_err(mrioc, | |
3686 | "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", | |
3687 | (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), | |
3688 | mrioc->init_cmds.ioc_loginfo); | |
3689 | retval = -1; | |
3690 | goto out_unlock; | |
3691 | } | |
3692 | ||
3693 | out_unlock: | |
3694 | mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; | |
3695 | mutex_unlock(&mrioc->init_cmds.mutex); | |
3696 | out: | |
3697 | return retval; | |
3698 | } | |
3699 | ||
824a1566 KD |
3700 | /** |
3701 | * mpi3mr_alloc_chain_bufs - Allocate chain buffers | |
3702 | * @mrioc: Adapter instance reference | |
3703 | * | |
3704 | * Allocate chain buffers and set a bitmap to indicate free | |
3705 | * chain buffers. Chain buffers are used to pass the SGE | |
3706 | * information along with MPI3 SCSI IO requests for host I/O. | |
3707 | * | |
3708 | * Return: 0 on success, non-zero on failure | |
3709 | */ | |
3710 | static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) | |
3711 | { | |
3712 | int retval = 0; | |
3713 | u32 sz, i; | |
3714 | u16 num_chains; | |
3715 | ||
fe6db615 SR |
3716 | if (mrioc->chain_sgl_list) |
3717 | return retval; | |
3718 | ||
824a1566 KD |
3719 | num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; |
3720 | ||
74e1f30a KD |
3721 | if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION |
3722 | | SHOST_DIX_TYPE1_PROTECTION | |
3723 | | SHOST_DIX_TYPE2_PROTECTION | |
3724 | | SHOST_DIX_TYPE3_PROTECTION)) | |
3725 | num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR); | |
3726 | ||
824a1566 KD |
3727 | mrioc->chain_buf_count = num_chains; |
3728 | sz = sizeof(struct chain_element) * num_chains; | |
3729 | mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); | |
3730 | if (!mrioc->chain_sgl_list) | |
3731 | goto out_failed; | |
3732 | ||
d9adb81e RK |
3733 | if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length / |
3734 | MPI3MR_PAGE_SIZE_4K)) | |
3735 | mrioc->max_sgl_entries = mrioc->facts.max_data_length / | |
3736 | MPI3MR_PAGE_SIZE_4K; | |
3737 | sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common); | |
3738 | ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n", | |
3739 | mrioc->max_sgl_entries, sz/1024); | |
3740 | ||
824a1566 KD |
3741 | mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", |
3742 | &mrioc->pdev->dev, sz, 16, 0); | |
3743 | if (!mrioc->chain_buf_pool) { | |
3744 | ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); | |
3745 | goto out_failed; | |
3746 | } | |
3747 | ||
3748 | for (i = 0; i < num_chains; i++) { | |
3749 | mrioc->chain_sgl_list[i].addr = | |
3750 | dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, | |
3751 | &mrioc->chain_sgl_list[i].dma_addr); | |
3752 | ||
3753 | if (!mrioc->chain_sgl_list[i].addr) | |
3754 | goto out_failed; | |
3755 | } | |
339e6156 | 3756 | mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL); |
824a1566 KD |
3757 | if (!mrioc->chain_bitmap) |
3758 | goto out_failed; | |
3759 | return retval; | |
3760 | out_failed: | |
3761 | retval = -1; | |
3762 | return retval; | |
3763 | } | |
3764 | ||
023ab2a9 KD |
3765 | /** |
3766 | * mpi3mr_port_enable_complete - Mark port enable complete | |
3767 | * @mrioc: Adapter instance reference | |
3768 | * @drv_cmd: Internal command tracker | |
3769 | * | |
3770 | * Call back for asynchronous port enable request sets the | |
3771 | * driver command to indicate port enable request is complete. | |
3772 | * | |
3773 | * Return: Nothing | |
3774 | */ | |
3775 | static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc, | |
3776 | struct mpi3mr_drv_cmd *drv_cmd) | |
3777 | { | |
023ab2a9 | 3778 | drv_cmd->callback = NULL; |
023ab2a9 | 3779 | mrioc->scan_started = 0; |
f2a79d20 SR |
3780 | if (drv_cmd->state & MPI3MR_CMD_RESET) |
3781 | mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; | |
3782 | else | |
3783 | mrioc->scan_failed = drv_cmd->ioc_status; | |
3784 | drv_cmd->state = MPI3MR_CMD_NOTUSED; | |
023ab2a9 KD |
3785 | } |
3786 | ||
3787 | /** | |
3788 | * mpi3mr_issue_port_enable - Issue Port Enable | |
3789 | * @mrioc: Adapter instance reference | |
3790 | * @async: Flag to wait for completion or not | |
3791 | * | |
3792 | * Issue Port Enable MPI request through admin queue and if the | |
3793 | * async flag is not set wait for the completion of the port | |
3794 | * enable or time out. | |
3795 | * | |
3796 | * Return: 0 on success, non-zero on failures. | |
3797 | */ | |
3798 | int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) | |
3799 | { | |
3800 | struct mpi3_port_enable_request pe_req; | |
3801 | int retval = 0; | |
3802 | u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; | |
3803 | ||
3804 | memset(&pe_req, 0, sizeof(pe_req)); | |
3805 | mutex_lock(&mrioc->init_cmds.mutex); | |
3806 | if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { | |
3807 | retval = -1; | |
3808 | ioc_err(mrioc, "Issue PortEnable: Init command is in use\n"); | |
3809 | mutex_unlock(&mrioc->init_cmds.mutex); | |
3810 | goto out; | |
3811 | } | |
3812 | mrioc->init_cmds.state = MPI3MR_CMD_PENDING; | |
3813 | if (async) { | |
3814 | mrioc->init_cmds.is_waiting = 0; | |
3815 | mrioc->init_cmds.callback = mpi3mr_port_enable_complete; | |
3816 | } else { | |
3817 | mrioc->init_cmds.is_waiting = 1; | |
3818 | mrioc->init_cmds.callback = NULL; | |
3819 | init_completion(&mrioc->init_cmds.done); | |
3820 | } | |
3821 | pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); | |
3822 | pe_req.function = MPI3_FUNCTION_PORT_ENABLE; | |
3823 | ||
3824 | retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1); | |
3825 | if (retval) { | |
3826 | ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); | |
3827 | goto out_unlock; | |
3828 | } | |
a6856cc4 SR |
3829 | if (async) { |
3830 | mutex_unlock(&mrioc->init_cmds.mutex); | |
3831 | goto out; | |
3832 | } | |
3833 | ||
3834 | wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ)); | |
3835 | if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { | |
3836 | ioc_err(mrioc, "port enable timed out\n"); | |
3837 | retval = -1; | |
3838 | mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT); | |
3839 | goto out_unlock; | |
023ab2a9 | 3840 | } |
a6856cc4 SR |
3841 | mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); |
3842 | ||
023ab2a9 | 3843 | out_unlock: |
a6856cc4 | 3844 | mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; |
023ab2a9 KD |
3845 | mutex_unlock(&mrioc->init_cmds.mutex); |
3846 | out: | |
3847 | return retval; | |
3848 | } | |
3849 | ||
a6856cc4 | 3850 | /* Protocol type to name mapper structure */ |
ff9561e9 KD |
3851 | static const struct { |
3852 | u8 protocol; | |
3853 | char *name; | |
3854 | } mpi3mr_protocols[] = { | |
3855 | { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" }, | |
3856 | { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" }, | |
3857 | { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" }, | |
3858 | }; | |
3859 | ||
3860 | /* Capability to name mapper structure*/ | |
3861 | static const struct { | |
3862 | u32 capability; | |
3863 | char *name; | |
3864 | } mpi3mr_capabilities[] = { | |
57a80be5 RK |
3865 | { MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" }, |
3866 | { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" }, | |
ff9561e9 KD |
3867 | }; |
3868 | ||
fc444494 RK |
3869 | /** |
3870 | * mpi3mr_repost_diag_bufs - repost host diag buffers | |
3871 | * @mrioc: Adapter instance reference | |
3872 | * | |
3873 | * repost firmware and trace diag buffers based on global | |
3874 | * trigger flag from driver page 2 | |
3875 | * | |
3876 | * Return: 0 on success, non-zero on failures. | |
3877 | */ | |
3878 | static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc) | |
3879 | { | |
3880 | u64 global_trigger; | |
3881 | union mpi3mr_trigger_data prev_trigger_data; | |
3882 | struct diag_buffer_desc *trace_hdb = NULL; | |
3883 | struct diag_buffer_desc *fw_hdb = NULL; | |
3884 | int retval = 0; | |
3885 | bool trace_repost_needed = false; | |
3886 | bool fw_repost_needed = false; | |
3887 | u8 prev_trigger_type; | |
3888 | ||
3889 | retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT); | |
3890 | if (retval) | |
3891 | return -1; | |
3892 | ||
3893 | trace_hdb = mpi3mr_diag_buffer_for_type(mrioc, | |
3894 | MPI3_DIAG_BUFFER_TYPE_TRACE); | |
3895 | ||
3896 | if (trace_hdb && | |
3897 | trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED && | |
3898 | trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL && | |
3899 | trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) | |
3900 | trace_repost_needed = true; | |
3901 | ||
3902 | fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW); | |
3903 | ||
3904 | if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED && | |
3905 | fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL && | |
3906 | fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) | |
3907 | fw_repost_needed = true; | |
3908 | ||
3909 | if (trace_repost_needed || fw_repost_needed) { | |
3910 | global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger); | |
3911 | if (global_trigger & | |
3912 | MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED) | |
3913 | trace_repost_needed = false; | |
3914 | if (global_trigger & | |
3915 | MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED) | |
3916 | fw_repost_needed = false; | |
3917 | } | |
3918 | ||
3919 | if (trace_repost_needed) { | |
3920 | prev_trigger_type = trace_hdb->trigger_type; | |
3921 | memcpy(&prev_trigger_data, &trace_hdb->trigger_data, | |
3922 | sizeof(trace_hdb->trigger_data)); | |
3923 | retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb); | |
3924 | if (!retval) { | |
3925 | dprint_init(mrioc, "trace diag buffer reposted"); | |
3926 | mpi3mr_set_trigger_data_in_hdb(trace_hdb, | |
3927 | MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); | |
3928 | } else { | |
3929 | trace_hdb->trigger_type = prev_trigger_type; | |
3930 | memcpy(&trace_hdb->trigger_data, &prev_trigger_data, | |
3931 | sizeof(prev_trigger_data)); | |
3932 | ioc_err(mrioc, "trace diag buffer repost failed"); | |
3933 | return -1; | |
3934 | } | |
3935 | } | |
3936 | ||
3937 | if (fw_repost_needed) { | |
3938 | prev_trigger_type = fw_hdb->trigger_type; | |
3939 | memcpy(&prev_trigger_data, &fw_hdb->trigger_data, | |
3940 | sizeof(fw_hdb->trigger_data)); | |
3941 | retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb); | |
3942 | if (!retval) { | |
3943 | dprint_init(mrioc, "firmware diag buffer reposted"); | |
3944 | mpi3mr_set_trigger_data_in_hdb(fw_hdb, | |
3945 | MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); | |
3946 | } else { | |
3947 | fw_hdb->trigger_type = prev_trigger_type; | |
3948 | memcpy(&fw_hdb->trigger_data, &prev_trigger_data, | |
3949 | sizeof(prev_trigger_data)); | |
3950 | ioc_err(mrioc, "firmware diag buffer repost failed"); | |
3951 | return -1; | |
3952 | } | |
3953 | } | |
3954 | return retval; | |
3955 | } | |
3956 | ||
fc1ddda3 RK |
3957 | /** |
3958 | * mpi3mr_read_tsu_interval - Update time stamp interval | |
3959 | * @mrioc: Adapter instance reference | |
3960 | * | |
3961 | * Update time stamp interval if its defined in driver page 1, | |
3962 | * otherwise use default value. | |
3963 | * | |
3964 | * Return: Nothing | |
3965 | */ | |
3966 | static void | |
3967 | mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc) | |
3968 | { | |
3969 | struct mpi3_driver_page1 driver_pg1; | |
3970 | u16 pg_sz = sizeof(driver_pg1); | |
3971 | int retval = 0; | |
3972 | ||
3973 | mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL; | |
3974 | ||
3975 | retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz); | |
3976 | if (!retval && driver_pg1.time_stamp_update) | |
3977 | mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60); | |
3978 | } | |
3979 | ||
ff9561e9 KD |
3980 | /** |
3981 | * mpi3mr_print_ioc_info - Display controller information | |
3982 | * @mrioc: Adapter instance reference | |
3983 | * | |
8c8e2422 | 3984 | * Display controller personality, capability, supported |
ff9561e9 KD |
3985 | * protocols etc. |
3986 | * | |
3987 | * Return: Nothing | |
3988 | */ | |
3989 | static void | |
3990 | mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc) | |
3991 | { | |
76a4f7cc | 3992 | int i = 0, bytes_written = 0; |
8c8e2422 | 3993 | const char *personality; |
ff9561e9 KD |
3994 | char protocol[50] = {0}; |
3995 | char capabilities[100] = {0}; | |
ff9561e9 KD |
3996 | struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; |
3997 | ||
3998 | switch (mrioc->facts.personality) { | |
3999 | case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA: | |
8c8e2422 | 4000 | personality = "Enhanced HBA"; |
ff9561e9 KD |
4001 | break; |
4002 | case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR: | |
8c8e2422 | 4003 | personality = "RAID"; |
ff9561e9 KD |
4004 | break; |
4005 | default: | |
8c8e2422 | 4006 | personality = "Unknown"; |
ff9561e9 KD |
4007 | break; |
4008 | } | |
4009 | ||
4010 | ioc_info(mrioc, "Running in %s Personality", personality); | |
4011 | ||
4012 | ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n", | |
4013 | fwver->gen_major, fwver->gen_minor, fwver->ph_major, | |
4014 | fwver->ph_minor, fwver->cust_id, fwver->build_num); | |
4015 | ||
4016 | for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) { | |
4017 | if (mrioc->facts.protocol_flags & | |
4018 | mpi3mr_protocols[i].protocol) { | |
30e99f05 | 4019 | bytes_written += scnprintf(protocol + bytes_written, |
76a4f7cc DC |
4020 | sizeof(protocol) - bytes_written, "%s%s", |
4021 | bytes_written ? "," : "", | |
ff9561e9 | 4022 | mpi3mr_protocols[i].name); |
ff9561e9 KD |
4023 | } |
4024 | } | |
4025 | ||
76a4f7cc | 4026 | bytes_written = 0; |
ff9561e9 KD |
4027 | for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) { |
4028 | if (mrioc->facts.protocol_flags & | |
4029 | mpi3mr_capabilities[i].capability) { | |
30e99f05 | 4030 | bytes_written += scnprintf(capabilities + bytes_written, |
76a4f7cc DC |
4031 | sizeof(capabilities) - bytes_written, "%s%s", |
4032 | bytes_written ? "," : "", | |
ff9561e9 | 4033 | mpi3mr_capabilities[i].name); |
ff9561e9 KD |
4034 | } |
4035 | } | |
4036 | ||
4037 | ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n", | |
76a4f7cc | 4038 | protocol, capabilities); |
ff9561e9 KD |
4039 | } |
4040 | ||
824a1566 KD |
4041 | /** |
4042 | * mpi3mr_cleanup_resources - Free PCI resources | |
4043 | * @mrioc: Adapter instance reference | |
4044 | * | |
4045 | * Unmap PCI device memory and disable PCI device. | |
4046 | * | |
4047 | * Return: 0 on success and non-zero on failure. | |
4048 | */ | |
4049 | void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) | |
4050 | { | |
4051 | struct pci_dev *pdev = mrioc->pdev; | |
4052 | ||
4053 | mpi3mr_cleanup_isr(mrioc); | |
4054 | ||
4055 | if (mrioc->sysif_regs) { | |
4056 | iounmap((void __iomem *)mrioc->sysif_regs); | |
4057 | mrioc->sysif_regs = NULL; | |
4058 | } | |
4059 | ||
4060 | if (pci_is_enabled(pdev)) { | |
4061 | if (mrioc->bars) | |
4062 | pci_release_selected_regions(pdev, mrioc->bars); | |
4063 | pci_disable_device(pdev); | |
4064 | } | |
4065 | } | |
4066 | ||
4067 | /** | |
4068 | * mpi3mr_setup_resources - Enable PCI resources | |
4069 | * @mrioc: Adapter instance reference | |
4070 | * | |
4071 | * Enable PCI device memory, MSI-x registers and set DMA mask. | |
4072 | * | |
4073 | * Return: 0 on success and non-zero on failure. | |
4074 | */ | |
4075 | int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) | |
4076 | { | |
4077 | struct pci_dev *pdev = mrioc->pdev; | |
4078 | u32 memap_sz = 0; | |
4079 | int i, retval = 0, capb = 0; | |
4080 | u16 message_control; | |
4081 | u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : | |
d347a951 | 4082 | ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); |
824a1566 KD |
4083 | |
4084 | if (pci_enable_device_mem(pdev)) { | |
4085 | ioc_err(mrioc, "pci_enable_device_mem: failed\n"); | |
4086 | retval = -ENODEV; | |
4087 | goto out_failed; | |
4088 | } | |
4089 | ||
4090 | capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); | |
4091 | if (!capb) { | |
4092 | ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); | |
4093 | retval = -ENODEV; | |
4094 | goto out_failed; | |
4095 | } | |
4096 | mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); | |
4097 | ||
4098 | if (pci_request_selected_regions(pdev, mrioc->bars, | |
4099 | mrioc->driver_name)) { | |
4100 | ioc_err(mrioc, "pci_request_selected_regions: failed\n"); | |
4101 | retval = -ENODEV; | |
4102 | goto out_failed; | |
4103 | } | |
4104 | ||
4105 | for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { | |
4106 | if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { | |
4107 | mrioc->sysif_regs_phys = pci_resource_start(pdev, i); | |
4108 | memap_sz = pci_resource_len(pdev, i); | |
4109 | mrioc->sysif_regs = | |
4110 | ioremap(mrioc->sysif_regs_phys, memap_sz); | |
4111 | break; | |
4112 | } | |
4113 | } | |
4114 | ||
4115 | pci_set_master(pdev); | |
4116 | ||
4117 | retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); | |
4118 | if (retval) { | |
4119 | if (dma_mask != DMA_BIT_MASK(32)) { | |
4120 | ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); | |
4121 | dma_mask = DMA_BIT_MASK(32); | |
4122 | retval = dma_set_mask_and_coherent(&pdev->dev, | |
4123 | dma_mask); | |
4124 | } | |
4125 | if (retval) { | |
4126 | mrioc->dma_mask = 0; | |
4127 | ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); | |
4128 | goto out_failed; | |
4129 | } | |
4130 | } | |
4131 | mrioc->dma_mask = dma_mask; | |
4132 | ||
4133 | if (!mrioc->sysif_regs) { | |
4134 | ioc_err(mrioc, | |
4135 | "Unable to map adapter memory or resource not found\n"); | |
4136 | retval = -EINVAL; | |
4137 | goto out_failed; | |
4138 | } | |
4139 | ||
4140 | pci_read_config_word(pdev, capb + 2, &message_control); | |
4141 | mrioc->msix_count = (message_control & 0x3FF) + 1; | |
4142 | ||
4143 | pci_save_state(pdev); | |
4144 | ||
4145 | pci_set_drvdata(pdev, mrioc->shost); | |
4146 | ||
4147 | mpi3mr_ioc_disable_intr(mrioc); | |
4148 | ||
4149 | ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", | |
4150 | (unsigned long long)mrioc->sysif_regs_phys, | |
4151 | mrioc->sysif_regs, memap_sz); | |
4152 | ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", | |
4153 | mrioc->msix_count); | |
afd3a579 SR |
4154 | |
4155 | if (!reset_devices && poll_queues > 0) | |
4156 | mrioc->requested_poll_qcount = min_t(int, poll_queues, | |
4157 | mrioc->msix_count - 2); | |
824a1566 KD |
4158 | return retval; |
4159 | ||
4160 | out_failed: | |
4161 | mpi3mr_cleanup_resources(mrioc); | |
4162 | return retval; | |
4163 | } | |
4164 | ||
e3605f65 SR |
4165 | /** |
4166 | * mpi3mr_enable_events - Enable required events | |
4167 | * @mrioc: Adapter instance reference | |
4168 | * | |
4169 | * This routine unmasks the events required by the driver by | |
4170 | * sennding appropriate event mask bitmapt through an event | |
4171 | * notification request. | |
4172 | * | |
4173 | * Return: 0 on success and non-zero on failure. | |
4174 | */ | |
4175 | static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc) | |
4176 | { | |
4177 | int retval = 0; | |
4178 | u32 i; | |
4179 | ||
4180 | for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) | |
4181 | mrioc->event_masks[i] = -1; | |
4182 | ||
4183 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED); | |
4184 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED); | |
4185 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE); | |
4186 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE); | |
7188c03f | 4187 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED); |
e3605f65 SR |
4188 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST); |
4189 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY); | |
4190 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR); | |
4191 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE); | |
4192 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); | |
4193 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION); | |
78b76a07 | 4194 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET); |
e3605f65 SR |
4195 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); |
4196 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); | |
d8d08d16 | 4197 | mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE); |
e3605f65 SR |
4198 | |
4199 | retval = mpi3mr_issue_event_notification(mrioc); | |
4200 | if (retval) | |
4201 | ioc_err(mrioc, "failed to issue event notification %d\n", | |
4202 | retval); | |
4203 | return retval; | |
4204 | } | |
4205 | ||
824a1566 KD |
4206 | /** |
4207 | * mpi3mr_init_ioc - Initialize the controller | |
4208 | * @mrioc: Adapter instance reference | |
4209 | * | |
4210 | * This the controller initialization routine, executed either | |
4211 | * after soft reset or from pci probe callback. | |
4212 | * Setup the required resources, memory map the controller | |
4213 | * registers, create admin and operational reply queue pairs, | |
4214 | * allocate required memory for reply pool, sense buffer pool, | |
4215 | * issue IOC init request to the firmware, unmask the events and | |
4216 | * issue port enable to discover SAS/SATA/NVMe devies and RAID | |
4217 | * volumes. | |
4218 | * | |
4219 | * Return: 0 on success and non-zero on failure. | |
4220 | */ | |
fe6db615 | 4221 | int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) |
824a1566 KD |
4222 | { |
4223 | int retval = 0; | |
fe6db615 | 4224 | u8 retry = 0; |
824a1566 | 4225 | struct mpi3_ioc_facts_data facts_data; |
f10af057 | 4226 | u32 sz; |
824a1566 | 4227 | |
fe6db615 | 4228 | retry_init: |
824a1566 KD |
4229 | retval = mpi3mr_bring_ioc_ready(mrioc); |
4230 | if (retval) { | |
4231 | ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", | |
4232 | retval); | |
fe6db615 | 4233 | goto out_failed_noretry; |
824a1566 KD |
4234 | } |
4235 | ||
fe6db615 SR |
4236 | retval = mpi3mr_setup_isr(mrioc, 1); |
4237 | if (retval) { | |
4238 | ioc_err(mrioc, "Failed to setup ISR error %d\n", | |
4239 | retval); | |
4240 | goto out_failed_noretry; | |
4241 | } | |
824a1566 KD |
4242 | |
4243 | retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); | |
4244 | if (retval) { | |
4245 | ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", | |
4246 | retval); | |
4247 | goto out_failed; | |
4248 | } | |
4249 | ||
c5758fc7 | 4250 | mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; |
d9adb81e | 4251 | mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512; |
f10af057 SR |
4252 | mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group; |
4253 | atomic_set(&mrioc->pend_large_data_sz, 0); | |
4254 | ||
c5758fc7 SR |
4255 | if (reset_devices) |
4256 | mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, | |
4257 | MPI3MR_HOST_IOS_KDUMP); | |
4258 | ||
c4723e68 | 4259 | if (!(mrioc->facts.ioc_capabilities & |
57a80be5 | 4260 | MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) { |
c4723e68 | 4261 | mrioc->sas_transport_enabled = 1; |
626665e9 SR |
4262 | mrioc->scsi_device_channel = 1; |
4263 | mrioc->shost->max_channel = 1; | |
176d4aa6 | 4264 | mrioc->shost->transportt = mpi3mr_transport_template; |
c4723e68 SR |
4265 | } |
4266 | ||
f08b24d8 RK |
4267 | if (mrioc->facts.max_req_limit) |
4268 | mrioc->prevent_reply_qfull = true; | |
4269 | ||
339a7b32 RK |
4270 | if (mrioc->facts.ioc_capabilities & |
4271 | MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED) | |
4272 | mrioc->seg_tb_support = true; | |
4273 | ||
c5758fc7 | 4274 | mrioc->reply_sz = mrioc->facts.reply_sz; |
fe6db615 SR |
4275 | |
4276 | retval = mpi3mr_check_reset_dma_mask(mrioc); | |
4277 | if (retval) { | |
4278 | ioc_err(mrioc, "Resetting dma mask failed %d\n", | |
4279 | retval); | |
4280 | goto out_failed_noretry; | |
824a1566 KD |
4281 | } |
4282 | ||
fc1ddda3 | 4283 | mpi3mr_read_tsu_interval(mrioc); |
ff9561e9 KD |
4284 | mpi3mr_print_ioc_info(mrioc); |
4285 | ||
fc444494 RK |
4286 | dprint_init(mrioc, "allocating host diag buffers\n"); |
4287 | mpi3mr_alloc_diag_bufs(mrioc); | |
4288 | ||
c432e167 C |
4289 | dprint_init(mrioc, "allocating ioctl dma buffers\n"); |
4290 | mpi3mr_alloc_ioctl_dma_memory(mrioc); | |
4291 | ||
fc444494 RK |
4292 | dprint_init(mrioc, "posting host diag buffers\n"); |
4293 | retval = mpi3mr_post_diag_bufs(mrioc); | |
4294 | ||
4295 | if (retval) | |
4296 | ioc_warn(mrioc, "failed to post host diag buffers\n"); | |
4297 | ||
c7983044 TH |
4298 | if (!mrioc->init_cmds.reply) { |
4299 | retval = mpi3mr_alloc_reply_sense_bufs(mrioc); | |
4300 | if (retval) { | |
4301 | ioc_err(mrioc, | |
4302 | "%s :Failed to allocated reply sense buffers %d\n", | |
4303 | __func__, retval); | |
4304 | goto out_failed_noretry; | |
4305 | } | |
824a1566 KD |
4306 | } |
4307 | ||
c7983044 TH |
4308 | if (!mrioc->chain_sgl_list) { |
4309 | retval = mpi3mr_alloc_chain_bufs(mrioc); | |
4310 | if (retval) { | |
4311 | ioc_err(mrioc, "Failed to allocated chain buffers %d\n", | |
4312 | retval); | |
4313 | goto out_failed_noretry; | |
4314 | } | |
824a1566 KD |
4315 | } |
4316 | ||
4317 | retval = mpi3mr_issue_iocinit(mrioc); | |
4318 | if (retval) { | |
4319 | ioc_err(mrioc, "Failed to Issue IOC Init %d\n", | |
4320 | retval); | |
4321 | goto out_failed; | |
4322 | } | |
824a1566 | 4323 | |
2ac794ba SR |
4324 | retval = mpi3mr_print_pkg_ver(mrioc); |
4325 | if (retval) { | |
4326 | ioc_err(mrioc, "failed to get package version\n"); | |
4327 | goto out_failed; | |
4328 | } | |
4329 | ||
fe6db615 SR |
4330 | retval = mpi3mr_setup_isr(mrioc, 0); |
4331 | if (retval) { | |
4332 | ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", | |
4333 | retval); | |
4334 | goto out_failed_noretry; | |
824a1566 KD |
4335 | } |
4336 | ||
c9566231 KD |
4337 | retval = mpi3mr_create_op_queues(mrioc); |
4338 | if (retval) { | |
4339 | ioc_err(mrioc, "Failed to create OpQueues error %d\n", | |
4340 | retval); | |
4341 | goto out_failed; | |
4342 | } | |
4343 | ||
43ca1100 SS |
4344 | if (!mrioc->pel_seqnum_virt) { |
4345 | dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n"); | |
4346 | mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq); | |
4347 | mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev, | |
4348 | mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma, | |
4349 | GFP_KERNEL); | |
bc7896d3 DC |
4350 | if (!mrioc->pel_seqnum_virt) { |
4351 | retval = -ENOMEM; | |
43ca1100 | 4352 | goto out_failed_noretry; |
bc7896d3 | 4353 | } |
43ca1100 SS |
4354 | } |
4355 | ||
f10af057 SR |
4356 | if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) { |
4357 | dprint_init(mrioc, "allocating memory for throttle groups\n"); | |
4358 | sz = sizeof(struct mpi3mr_throttle_group_info); | |
c863a2dc | 4359 | mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL); |
ba8a9ba4 RK |
4360 | if (!mrioc->throttle_groups) { |
4361 | retval = -1; | |
f10af057 | 4362 | goto out_failed_noretry; |
ba8a9ba4 | 4363 | } |
f10af057 SR |
4364 | } |
4365 | ||
e3605f65 | 4366 | retval = mpi3mr_enable_events(mrioc); |
13ef29ea | 4367 | if (retval) { |
e3605f65 | 4368 | ioc_err(mrioc, "failed to enable events %d\n", |
13ef29ea KD |
4369 | retval); |
4370 | goto out_failed; | |
4371 | } | |
4372 | ||
d8d08d16 RK |
4373 | retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT); |
4374 | if (retval) { | |
4375 | ioc_err(mrioc, "failed to refresh triggers\n"); | |
4376 | goto out_failed; | |
4377 | } | |
4378 | ||
fe6db615 | 4379 | ioc_info(mrioc, "controller initialization completed successfully\n"); |
824a1566 | 4380 | return retval; |
824a1566 | 4381 | out_failed: |
fe6db615 SR |
4382 | if (retry < 2) { |
4383 | retry++; | |
4384 | ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n", | |
4385 | retry); | |
4386 | mpi3mr_memset_buffers(mrioc); | |
4387 | goto retry_init; | |
4388 | } | |
ba8a9ba4 | 4389 | retval = -1; |
fe6db615 SR |
4390 | out_failed_noretry: |
4391 | ioc_err(mrioc, "controller initialization failed\n"); | |
4392 | mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, | |
4393 | MPI3MR_RESET_FROM_CTLR_CLEANUP); | |
4394 | mrioc->unrecoverable = 1; | |
824a1566 KD |
4395 | return retval; |
4396 | } | |
4397 | ||
c0b00a93 SR |
4398 | /** |
4399 | * mpi3mr_reinit_ioc - Re-Initialize the controller | |
4400 | * @mrioc: Adapter instance reference | |
4401 | * @is_resume: Called from resume or reset path | |
4402 | * | |
4403 | * This the controller re-initialization routine, executed from | |
4404 | * the soft reset handler or resume callback. Creates | |
4405 | * operational reply queue pairs, allocate required memory for | |
4406 | * reply pool, sense buffer pool, issue IOC init request to the | |
4407 | * firmware, unmask the events and issue port enable to discover | |
4408 | * SAS/SATA/NVMe devices and RAID volumes. | |
4409 | * | |
4410 | * Return: 0 on success and non-zero on failure. | |
4411 | */ | |
fe6db615 SR |
4412 | int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume) |
4413 | { | |
c0b00a93 SR |
4414 | int retval = 0; |
4415 | u8 retry = 0; | |
4416 | struct mpi3_ioc_facts_data facts_data; | |
f2a79d20 | 4417 | u32 pe_timeout, ioc_status; |
fe6db615 | 4418 | |
c0b00a93 | 4419 | retry_init: |
f2a79d20 SR |
4420 | pe_timeout = |
4421 | (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL); | |
4422 | ||
c0b00a93 SR |
4423 | dprint_reset(mrioc, "bringing up the controller to ready state\n"); |
4424 | retval = mpi3mr_bring_ioc_ready(mrioc); | |
4425 | if (retval) { | |
4426 | ioc_err(mrioc, "failed to bring to ready state\n"); | |
4427 | goto out_failed_noretry; | |
4428 | } | |
4429 | ||
f195fc06 | 4430 | mrioc->io_admin_reset_sync = 0; |
1c342b05 | 4431 | if (is_resume || mrioc->block_on_pci_err) { |
c0b00a93 SR |
4432 | dprint_reset(mrioc, "setting up single ISR\n"); |
4433 | retval = mpi3mr_setup_isr(mrioc, 1); | |
4434 | if (retval) { | |
4435 | ioc_err(mrioc, "failed to setup ISR\n"); | |
4436 | goto out_failed_noretry; | |
4437 | } | |
4438 | } else | |
4439 | mpi3mr_ioc_enable_intr(mrioc); | |
4440 | ||
4441 | dprint_reset(mrioc, "getting ioc_facts\n"); | |
4442 | retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); | |
4443 | if (retval) { | |
4444 | ioc_err(mrioc, "failed to get ioc_facts\n"); | |
4445 | goto out_failed; | |
4446 | } | |
4447 | ||
c5758fc7 SR |
4448 | dprint_reset(mrioc, "validating ioc_facts\n"); |
4449 | retval = mpi3mr_revalidate_factsdata(mrioc); | |
4450 | if (retval) { | |
4451 | ioc_err(mrioc, "failed to revalidate ioc_facts data\n"); | |
4452 | goto out_failed_noretry; | |
4453 | } | |
c0b00a93 | 4454 | |
fc1ddda3 | 4455 | mpi3mr_read_tsu_interval(mrioc); |
c0b00a93 SR |
4456 | mpi3mr_print_ioc_info(mrioc); |
4457 | ||
fc444494 RK |
4458 | if (is_resume) { |
4459 | dprint_reset(mrioc, "posting host diag buffers\n"); | |
4460 | retval = mpi3mr_post_diag_bufs(mrioc); | |
4461 | if (retval) | |
4462 | ioc_warn(mrioc, "failed to post host diag buffers\n"); | |
4463 | } else { | |
4464 | retval = mpi3mr_repost_diag_bufs(mrioc); | |
4465 | if (retval) | |
4466 | ioc_warn(mrioc, "failed to re post host diag buffers\n"); | |
4467 | } | |
4468 | ||
c0b00a93 SR |
4469 | dprint_reset(mrioc, "sending ioc_init\n"); |
4470 | retval = mpi3mr_issue_iocinit(mrioc); | |
4471 | if (retval) { | |
4472 | ioc_err(mrioc, "failed to send ioc_init\n"); | |
4473 | goto out_failed; | |
4474 | } | |
4475 | ||
4476 | dprint_reset(mrioc, "getting package version\n"); | |
4477 | retval = mpi3mr_print_pkg_ver(mrioc); | |
4478 | if (retval) { | |
4479 | ioc_err(mrioc, "failed to get package version\n"); | |
4480 | goto out_failed; | |
4481 | } | |
4482 | ||
1c342b05 | 4483 | if (is_resume || mrioc->block_on_pci_err) { |
c0b00a93 SR |
4484 | dprint_reset(mrioc, "setting up multiple ISR\n"); |
4485 | retval = mpi3mr_setup_isr(mrioc, 0); | |
4486 | if (retval) { | |
4487 | ioc_err(mrioc, "failed to re-setup ISR\n"); | |
4488 | goto out_failed_noretry; | |
4489 | } | |
4490 | } | |
4491 | ||
4492 | dprint_reset(mrioc, "creating operational queue pairs\n"); | |
4493 | retval = mpi3mr_create_op_queues(mrioc); | |
4494 | if (retval) { | |
4495 | ioc_err(mrioc, "failed to create operational queue pairs\n"); | |
4496 | goto out_failed; | |
4497 | } | |
4498 | ||
43ca1100 SS |
4499 | if (!mrioc->pel_seqnum_virt) { |
4500 | dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n"); | |
4501 | mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq); | |
4502 | mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev, | |
4503 | mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma, | |
4504 | GFP_KERNEL); | |
bc7896d3 DC |
4505 | if (!mrioc->pel_seqnum_virt) { |
4506 | retval = -ENOMEM; | |
43ca1100 | 4507 | goto out_failed_noretry; |
bc7896d3 | 4508 | } |
43ca1100 SS |
4509 | } |
4510 | ||
c0b00a93 SR |
4511 | if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) { |
4512 | ioc_err(mrioc, | |
5867b856 | 4513 | "cannot create minimum number of operational queues expected:%d created:%d\n", |
c0b00a93 | 4514 | mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q); |
ba8a9ba4 | 4515 | retval = -1; |
c0b00a93 SR |
4516 | goto out_failed_noretry; |
4517 | } | |
4518 | ||
4519 | dprint_reset(mrioc, "enabling events\n"); | |
4520 | retval = mpi3mr_enable_events(mrioc); | |
4521 | if (retval) { | |
4522 | ioc_err(mrioc, "failed to enable events\n"); | |
4523 | goto out_failed; | |
4524 | } | |
4525 | ||
f84e8b5b SR |
4526 | mrioc->device_refresh_on = 1; |
4527 | mpi3mr_add_event_wait_for_device_refresh(mrioc); | |
2745ce0e | 4528 | |
c0b00a93 | 4529 | ioc_info(mrioc, "sending port enable\n"); |
f2a79d20 | 4530 | retval = mpi3mr_issue_port_enable(mrioc, 1); |
c0b00a93 SR |
4531 | if (retval) { |
4532 | ioc_err(mrioc, "failed to issue port enable\n"); | |
4533 | goto out_failed; | |
4534 | } | |
f2a79d20 SR |
4535 | do { |
4536 | ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL); | |
4537 | if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED) | |
4538 | break; | |
4539 | if (!pci_device_is_present(mrioc->pdev)) | |
4540 | mrioc->unrecoverable = 1; | |
4541 | if (mrioc->unrecoverable) { | |
4542 | retval = -1; | |
4543 | goto out_failed_noretry; | |
4544 | } | |
4545 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); | |
4546 | if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || | |
4547 | (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { | |
4548 | mpi3mr_print_fault_info(mrioc); | |
4549 | mrioc->init_cmds.is_waiting = 0; | |
4550 | mrioc->init_cmds.callback = NULL; | |
4551 | mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; | |
4552 | goto out_failed; | |
4553 | } | |
4554 | } while (--pe_timeout); | |
4555 | ||
4556 | if (!pe_timeout) { | |
4557 | ioc_err(mrioc, "port enable timed out\n"); | |
4558 | mpi3mr_check_rh_fault_ioc(mrioc, | |
4559 | MPI3MR_RESET_FROM_PE_TIMEOUT); | |
4560 | mrioc->init_cmds.is_waiting = 0; | |
4561 | mrioc->init_cmds.callback = NULL; | |
4562 | mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; | |
4563 | goto out_failed; | |
4564 | } else if (mrioc->scan_failed) { | |
4565 | ioc_err(mrioc, | |
4566 | "port enable failed with status=0x%04x\n", | |
4567 | mrioc->scan_failed); | |
4568 | } else | |
4569 | ioc_info(mrioc, "port enable completed successfully\n"); | |
c0b00a93 SR |
4570 | |
4571 | ioc_info(mrioc, "controller %s completed successfully\n", | |
4572 | (is_resume)?"resume":"re-initialization"); | |
4573 | return retval; | |
4574 | out_failed: | |
4575 | if (retry < 2) { | |
4576 | retry++; | |
4577 | ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n", | |
4578 | (is_resume)?"resume":"re-initialization", retry); | |
4579 | mpi3mr_memset_buffers(mrioc); | |
4580 | goto retry_init; | |
4581 | } | |
ba8a9ba4 | 4582 | retval = -1; |
c0b00a93 SR |
4583 | out_failed_noretry: |
4584 | ioc_err(mrioc, "controller %s is failed\n", | |
4585 | (is_resume)?"resume":"re-initialization"); | |
4586 | mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, | |
4587 | MPI3MR_RESET_FROM_CTLR_CLEANUP); | |
4588 | mrioc->unrecoverable = 1; | |
4589 | return retval; | |
fe6db615 SR |
4590 | } |
4591 | ||
fb9b0457 KD |
4592 | /** |
4593 | * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's | |
4594 | * segments | |
4595 | * @mrioc: Adapter instance reference | |
4596 | * @qidx: Operational reply queue index | |
4597 | * | |
4598 | * Return: Nothing. | |
4599 | */ | |
4600 | static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) | |
4601 | { | |
4602 | struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; | |
4603 | struct segments *segments; | |
4604 | int i, size; | |
4605 | ||
4606 | if (!op_reply_q->q_segments) | |
4607 | return; | |
4608 | ||
4609 | size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz; | |
4610 | segments = op_reply_q->q_segments; | |
4611 | for (i = 0; i < op_reply_q->num_segments; i++) | |
4612 | memset(segments[i].segment, 0, size); | |
4613 | } | |
4614 | ||
4615 | /** | |
4616 | * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's | |
4617 | * segments | |
4618 | * @mrioc: Adapter instance reference | |
4619 | * @qidx: Operational request queue index | |
4620 | * | |
4621 | * Return: Nothing. | |
4622 | */ | |
4623 | static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) | |
4624 | { | |
4625 | struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; | |
4626 | struct segments *segments; | |
4627 | int i, size; | |
4628 | ||
4629 | if (!op_req_q->q_segments) | |
4630 | return; | |
4631 | ||
4632 | size = op_req_q->segment_qd * mrioc->facts.op_req_sz; | |
4633 | segments = op_req_q->q_segments; | |
4634 | for (i = 0; i < op_req_q->num_segments; i++) | |
4635 | memset(segments[i].segment, 0, size); | |
4636 | } | |
4637 | ||
4638 | /** | |
4639 | * mpi3mr_memset_buffers - memset memory for a controller | |
4640 | * @mrioc: Adapter instance reference | |
4641 | * | |
4642 | * clear all the memory allocated for a controller, typically | |
4643 | * called post reset to reuse the memory allocated during the | |
4644 | * controller init. | |
4645 | * | |
4646 | * Return: Nothing. | |
4647 | */ | |
0da66348 | 4648 | void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) |
fb9b0457 KD |
4649 | { |
4650 | u16 i; | |
f10af057 | 4651 | struct mpi3mr_throttle_group_info *tg; |
fb9b0457 | 4652 | |
fe6db615 | 4653 | mrioc->change_count = 0; |
afd3a579 SR |
4654 | mrioc->active_poll_qcount = 0; |
4655 | mrioc->default_qcount = 0; | |
fe6db615 SR |
4656 | if (mrioc->admin_req_base) |
4657 | memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz); | |
4658 | if (mrioc->admin_reply_base) | |
4659 | memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); | |
02ca7da2 | 4660 | atomic_set(&mrioc->admin_reply_q_in_use, 0); |
3b5091fe | 4661 | atomic_set(&mrioc->admin_pend_isr, 0); |
fe6db615 SR |
4662 | |
4663 | if (mrioc->init_cmds.reply) { | |
4664 | memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); | |
f5e6d5a3 SS |
4665 | memset(mrioc->bsg_cmds.reply, 0, |
4666 | sizeof(*mrioc->bsg_cmds.reply)); | |
fe6db615 SR |
4667 | memset(mrioc->host_tm_cmds.reply, 0, |
4668 | sizeof(*mrioc->host_tm_cmds.reply)); | |
43ca1100 SS |
4669 | memset(mrioc->pel_cmds.reply, 0, |
4670 | sizeof(*mrioc->pel_cmds.reply)); | |
4671 | memset(mrioc->pel_abort_cmd.reply, 0, | |
4672 | sizeof(*mrioc->pel_abort_cmd.reply)); | |
2bd37e28 SR |
4673 | memset(mrioc->transport_cmds.reply, 0, |
4674 | sizeof(*mrioc->transport_cmds.reply)); | |
fe6db615 SR |
4675 | for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) |
4676 | memset(mrioc->dev_rmhs_cmds[i].reply, 0, | |
4677 | sizeof(*mrioc->dev_rmhs_cmds[i].reply)); | |
c1af985d SR |
4678 | for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) |
4679 | memset(mrioc->evtack_cmds[i].reply, 0, | |
4680 | sizeof(*mrioc->evtack_cmds[i].reply)); | |
339e6156 SK |
4681 | bitmap_clear(mrioc->removepend_bitmap, 0, |
4682 | mrioc->dev_handle_bitmap_bits); | |
4683 | bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD); | |
4684 | bitmap_clear(mrioc->evtack_cmds_bitmap, 0, | |
4685 | MPI3MR_NUM_EVTACKCMD); | |
fe6db615 | 4686 | } |
fb9b0457 KD |
4687 | |
4688 | for (i = 0; i < mrioc->num_queues; i++) { | |
4689 | mrioc->op_reply_qinfo[i].qid = 0; | |
4690 | mrioc->op_reply_qinfo[i].ci = 0; | |
4691 | mrioc->op_reply_qinfo[i].num_replies = 0; | |
4692 | mrioc->op_reply_qinfo[i].ephase = 0; | |
463429f8 KD |
4693 | atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); |
4694 | atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); | |
fb9b0457 KD |
4695 | mpi3mr_memset_op_reply_q_buffers(mrioc, i); |
4696 | ||
4697 | mrioc->req_qinfo[i].ci = 0; | |
4698 | mrioc->req_qinfo[i].pi = 0; | |
4699 | mrioc->req_qinfo[i].num_requests = 0; | |
4700 | mrioc->req_qinfo[i].qid = 0; | |
4701 | mrioc->req_qinfo[i].reply_qid = 0; | |
4702 | spin_lock_init(&mrioc->req_qinfo[i].q_lock); | |
4703 | mpi3mr_memset_op_req_q_buffers(mrioc, i); | |
4704 | } | |
f10af057 SR |
4705 | |
4706 | atomic_set(&mrioc->pend_large_data_sz, 0); | |
4707 | if (mrioc->throttle_groups) { | |
4708 | tg = mrioc->throttle_groups; | |
4709 | for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) { | |
4710 | tg->id = 0; | |
cf1ce8b7 SR |
4711 | tg->fw_qd = 0; |
4712 | tg->modified_qd = 0; | |
f10af057 | 4713 | tg->io_divert = 0; |
cf1ce8b7 | 4714 | tg->need_qd_reduction = 0; |
f10af057 SR |
4715 | tg->high = 0; |
4716 | tg->low = 0; | |
cf1ce8b7 | 4717 | tg->qd_reduction = 0; |
f10af057 SR |
4718 | atomic_set(&tg->pend_large_data_sz, 0); |
4719 | } | |
4720 | } | |
fb9b0457 KD |
4721 | } |
4722 | ||
824a1566 KD |
4723 | /** |
4724 | * mpi3mr_free_mem - Free memory allocated for a controller | |
4725 | * @mrioc: Adapter instance reference | |
4726 | * | |
4727 | * Free all the memory allocated for a controller. | |
4728 | * | |
4729 | * Return: Nothing. | |
4730 | */ | |
fe6db615 | 4731 | void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) |
824a1566 | 4732 | { |
339a7b32 | 4733 | u16 i, j; |
824a1566 | 4734 | struct mpi3mr_intr_info *intr_info; |
fc444494 | 4735 | struct diag_buffer_desc *diag_buffer; |
824a1566 | 4736 | |
130fc180 | 4737 | mpi3mr_free_enclosure_list(mrioc); |
c432e167 | 4738 | mpi3mr_free_ioctl_dma_memory(mrioc); |
130fc180 | 4739 | |
824a1566 KD |
4740 | if (mrioc->sense_buf_pool) { |
4741 | if (mrioc->sense_buf) | |
4742 | dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, | |
4743 | mrioc->sense_buf_dma); | |
4744 | dma_pool_destroy(mrioc->sense_buf_pool); | |
4745 | mrioc->sense_buf = NULL; | |
4746 | mrioc->sense_buf_pool = NULL; | |
4747 | } | |
4748 | if (mrioc->sense_buf_q_pool) { | |
4749 | if (mrioc->sense_buf_q) | |
4750 | dma_pool_free(mrioc->sense_buf_q_pool, | |
4751 | mrioc->sense_buf_q, mrioc->sense_buf_q_dma); | |
4752 | dma_pool_destroy(mrioc->sense_buf_q_pool); | |
4753 | mrioc->sense_buf_q = NULL; | |
4754 | mrioc->sense_buf_q_pool = NULL; | |
4755 | } | |
4756 | ||
4757 | if (mrioc->reply_buf_pool) { | |
4758 | if (mrioc->reply_buf) | |
4759 | dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, | |
4760 | mrioc->reply_buf_dma); | |
4761 | dma_pool_destroy(mrioc->reply_buf_pool); | |
4762 | mrioc->reply_buf = NULL; | |
4763 | mrioc->reply_buf_pool = NULL; | |
4764 | } | |
4765 | if (mrioc->reply_free_q_pool) { | |
4766 | if (mrioc->reply_free_q) | |
4767 | dma_pool_free(mrioc->reply_free_q_pool, | |
4768 | mrioc->reply_free_q, mrioc->reply_free_q_dma); | |
4769 | dma_pool_destroy(mrioc->reply_free_q_pool); | |
4770 | mrioc->reply_free_q = NULL; | |
4771 | mrioc->reply_free_q_pool = NULL; | |
4772 | } | |
4773 | ||
c9566231 KD |
4774 | for (i = 0; i < mrioc->num_op_req_q; i++) |
4775 | mpi3mr_free_op_req_q_segments(mrioc, i); | |
4776 | ||
4777 | for (i = 0; i < mrioc->num_op_reply_q; i++) | |
4778 | mpi3mr_free_op_reply_q_segments(mrioc, i); | |
4779 | ||
824a1566 KD |
4780 | for (i = 0; i < mrioc->intr_info_count; i++) { |
4781 | intr_info = mrioc->intr_info + i; | |
d46bdecd | 4782 | intr_info->op_reply_q = NULL; |
824a1566 KD |
4783 | } |
4784 | ||
4785 | kfree(mrioc->req_qinfo); | |
4786 | mrioc->req_qinfo = NULL; | |
4787 | mrioc->num_op_req_q = 0; | |
4788 | ||
4789 | kfree(mrioc->op_reply_qinfo); | |
4790 | mrioc->op_reply_qinfo = NULL; | |
4791 | mrioc->num_op_reply_q = 0; | |
4792 | ||
4793 | kfree(mrioc->init_cmds.reply); | |
4794 | mrioc->init_cmds.reply = NULL; | |
4795 | ||
f5e6d5a3 SS |
4796 | kfree(mrioc->bsg_cmds.reply); |
4797 | mrioc->bsg_cmds.reply = NULL; | |
4798 | ||
e844adb1 KD |
4799 | kfree(mrioc->host_tm_cmds.reply); |
4800 | mrioc->host_tm_cmds.reply = NULL; | |
4801 | ||
43ca1100 SS |
4802 | kfree(mrioc->pel_cmds.reply); |
4803 | mrioc->pel_cmds.reply = NULL; | |
4804 | ||
4805 | kfree(mrioc->pel_abort_cmd.reply); | |
4806 | mrioc->pel_abort_cmd.reply = NULL; | |
4807 | ||
c1af985d SR |
4808 | for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { |
4809 | kfree(mrioc->evtack_cmds[i].reply); | |
4810 | mrioc->evtack_cmds[i].reply = NULL; | |
4811 | } | |
4812 | ||
339e6156 | 4813 | bitmap_free(mrioc->removepend_bitmap); |
e844adb1 KD |
4814 | mrioc->removepend_bitmap = NULL; |
4815 | ||
339e6156 | 4816 | bitmap_free(mrioc->devrem_bitmap); |
e844adb1 KD |
4817 | mrioc->devrem_bitmap = NULL; |
4818 | ||
339e6156 | 4819 | bitmap_free(mrioc->evtack_cmds_bitmap); |
c1af985d SR |
4820 | mrioc->evtack_cmds_bitmap = NULL; |
4821 | ||
339e6156 | 4822 | bitmap_free(mrioc->chain_bitmap); |
824a1566 KD |
4823 | mrioc->chain_bitmap = NULL; |
4824 | ||
2bd37e28 SR |
4825 | kfree(mrioc->transport_cmds.reply); |
4826 | mrioc->transport_cmds.reply = NULL; | |
4827 | ||
13ef29ea KD |
4828 | for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { |
4829 | kfree(mrioc->dev_rmhs_cmds[i].reply); | |
4830 | mrioc->dev_rmhs_cmds[i].reply = NULL; | |
4831 | } | |
4832 | ||
824a1566 KD |
4833 | if (mrioc->chain_buf_pool) { |
4834 | for (i = 0; i < mrioc->chain_buf_count; i++) { | |
4835 | if (mrioc->chain_sgl_list[i].addr) { | |
4836 | dma_pool_free(mrioc->chain_buf_pool, | |
4837 | mrioc->chain_sgl_list[i].addr, | |
4838 | mrioc->chain_sgl_list[i].dma_addr); | |
4839 | mrioc->chain_sgl_list[i].addr = NULL; | |
4840 | } | |
4841 | } | |
4842 | dma_pool_destroy(mrioc->chain_buf_pool); | |
4843 | mrioc->chain_buf_pool = NULL; | |
4844 | } | |
4845 | ||
4846 | kfree(mrioc->chain_sgl_list); | |
4847 | mrioc->chain_sgl_list = NULL; | |
4848 | ||
4849 | if (mrioc->admin_reply_base) { | |
4850 | dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, | |
4851 | mrioc->admin_reply_base, mrioc->admin_reply_dma); | |
4852 | mrioc->admin_reply_base = NULL; | |
4853 | } | |
4854 | if (mrioc->admin_req_base) { | |
4855 | dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, | |
4856 | mrioc->admin_req_base, mrioc->admin_req_dma); | |
4857 | mrioc->admin_req_base = NULL; | |
4858 | } | |
711201a8 | 4859 | |
43ca1100 SS |
4860 | if (mrioc->pel_seqnum_virt) { |
4861 | dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz, | |
4862 | mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma); | |
4863 | mrioc->pel_seqnum_virt = NULL; | |
4864 | } | |
4865 | ||
fc444494 RK |
4866 | for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { |
4867 | diag_buffer = &mrioc->diag_buffers[i]; | |
339a7b32 RK |
4868 | if ((i == 0) && mrioc->seg_tb_support) { |
4869 | if (mrioc->trace_buf_pool) { | |
4870 | for (j = 0; j < mrioc->num_tb_segs; j++) { | |
4871 | if (mrioc->trace_buf[j].segment) { | |
4872 | dma_pool_free(mrioc->trace_buf_pool, | |
4873 | mrioc->trace_buf[j].segment, | |
4874 | mrioc->trace_buf[j].segment_dma); | |
4875 | mrioc->trace_buf[j].segment = NULL; | |
4876 | } | |
4877 | ||
4878 | mrioc->trace_buf[j].segment = NULL; | |
4879 | } | |
4880 | dma_pool_destroy(mrioc->trace_buf_pool); | |
4881 | mrioc->trace_buf_pool = NULL; | |
4882 | } | |
4883 | ||
4884 | kfree(mrioc->trace_buf); | |
4885 | mrioc->trace_buf = NULL; | |
4886 | diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs; | |
4887 | } | |
fc444494 RK |
4888 | if (diag_buffer->addr) { |
4889 | dma_free_coherent(&mrioc->pdev->dev, | |
4890 | diag_buffer->size, diag_buffer->addr, | |
4891 | diag_buffer->dma_addr); | |
4892 | diag_buffer->addr = NULL; | |
4893 | diag_buffer->size = 0; | |
4894 | diag_buffer->type = 0; | |
4895 | diag_buffer->status = 0; | |
4896 | } | |
4897 | } | |
4898 | ||
f305a7b6 TH |
4899 | kfree(mrioc->throttle_groups); |
4900 | mrioc->throttle_groups = NULL; | |
4901 | ||
43ca1100 SS |
4902 | kfree(mrioc->logdata_buf); |
4903 | mrioc->logdata_buf = NULL; | |
4904 | ||
824a1566 KD |
4905 | } |
4906 | ||
4907 | /** | |
4908 | * mpi3mr_issue_ioc_shutdown - shutdown controller | |
4909 | * @mrioc: Adapter instance reference | |
4910 | * | |
4911 | * Send shutodwn notification to the controller and wait for the | |
4912 | * shutdown_timeout for it to be completed. | |
4913 | * | |
4914 | * Return: Nothing. | |
4915 | */ | |
4916 | static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) | |
4917 | { | |
4918 | u32 ioc_config, ioc_status; | |
4919 | u8 retval = 1; | |
4920 | u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; | |
4921 | ||
4922 | ioc_info(mrioc, "Issuing shutdown Notification\n"); | |
4923 | if (mrioc->unrecoverable) { | |
4924 | ioc_warn(mrioc, | |
4925 | "IOC is unrecoverable shutdown is not issued\n"); | |
4926 | return; | |
4927 | } | |
4928 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); | |
4929 | if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) | |
4930 | == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { | |
4931 | ioc_info(mrioc, "shutdown already in progress\n"); | |
4932 | return; | |
4933 | } | |
4934 | ||
4935 | ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); | |
4936 | ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; | |
ec5ebd2c | 4937 | ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; |
824a1566 KD |
4938 | |
4939 | writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); | |
4940 | ||
4941 | if (mrioc->facts.shutdown_timeout) | |
4942 | timeout = mrioc->facts.shutdown_timeout * 10; | |
4943 | ||
4944 | do { | |
4945 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); | |
4946 | if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) | |
4947 | == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { | |
4948 | retval = 0; | |
4949 | break; | |
4950 | } | |
4951 | msleep(100); | |
4952 | } while (--timeout); | |
4953 | ||
4954 | ioc_status = readl(&mrioc->sysif_regs->ioc_status); | |
4955 | ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); | |
4956 | ||
4957 | if (retval) { | |
4958 | if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) | |
4959 | == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) | |
4960 | ioc_warn(mrioc, | |
4961 | "shutdown still in progress after timeout\n"); | |
4962 | } | |
4963 | ||
4964 | ioc_info(mrioc, | |
339a7b32 | 4965 | "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n", |
824a1566 KD |
4966 | (!retval) ? "successful" : "failed", ioc_status, |
4967 | ioc_config); | |
4968 | } | |
4969 | ||
4970 | /** | |
4971 | * mpi3mr_cleanup_ioc - Cleanup controller | |
4972 | * @mrioc: Adapter instance reference | |
3bb3c24e | 4973 | * |
824a1566 | 4974 | * controller cleanup handler, Message unit reset or soft reset |
fe6db615 | 4975 | * and shutdown notification is issued to the controller. |
824a1566 KD |
4976 | * |
4977 | * Return: Nothing. | |
4978 | */ | |
fe6db615 | 4979 | void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) |
824a1566 KD |
4980 | { |
4981 | enum mpi3mr_iocstate ioc_state; | |
4982 | ||
fe6db615 | 4983 | dprint_exit(mrioc, "cleaning up the controller\n"); |
824a1566 KD |
4984 | mpi3mr_ioc_disable_intr(mrioc); |
4985 | ||
4986 | ioc_state = mpi3mr_get_iocstate(mrioc); | |
4987 | ||
1c342b05 SS |
4988 | if (!mrioc->unrecoverable && !mrioc->reset_in_progress && |
4989 | !mrioc->pci_err_recovery && | |
824a1566 KD |
4990 | (ioc_state == MRIOC_STATE_READY)) { |
4991 | if (mpi3mr_issue_and_process_mur(mrioc, | |
4992 | MPI3MR_RESET_FROM_CTLR_CLEANUP)) | |
4993 | mpi3mr_issue_reset(mrioc, | |
4994 | MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, | |
4995 | MPI3MR_RESET_FROM_MUR_FAILURE); | |
fe6db615 | 4996 | mpi3mr_issue_ioc_shutdown(mrioc); |
fb9b0457 | 4997 | } |
fe6db615 | 4998 | dprint_exit(mrioc, "controller cleanup completed\n"); |
fb9b0457 KD |
4999 | } |
5000 | ||
5001 | /** | |
5002 | * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command | |
5003 | * @mrioc: Adapter instance reference | |
5004 | * @cmdptr: Internal command tracker | |
5005 | * | |
5006 | * Complete an internal driver commands with state indicating it | |
5007 | * is completed due to reset. | |
5008 | * | |
5009 | * Return: Nothing. | |
5010 | */ | |
5011 | static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc, | |
5012 | struct mpi3mr_drv_cmd *cmdptr) | |
5013 | { | |
5014 | if (cmdptr->state & MPI3MR_CMD_PENDING) { | |
5015 | cmdptr->state |= MPI3MR_CMD_RESET; | |
5016 | cmdptr->state &= ~MPI3MR_CMD_PENDING; | |
5017 | if (cmdptr->is_waiting) { | |
5018 | complete(&cmdptr->done); | |
5019 | cmdptr->is_waiting = 0; | |
5020 | } else if (cmdptr->callback) | |
5021 | cmdptr->callback(mrioc, cmdptr); | |
5022 | } | |
5023 | } | |
5024 | ||
5025 | /** | |
5026 | * mpi3mr_flush_drv_cmds - Flush internaldriver commands | |
5027 | * @mrioc: Adapter instance reference | |
5028 | * | |
5029 | * Flush all internal driver commands post reset | |
5030 | * | |
5031 | * Return: Nothing. | |
5032 | */ | |
f2a79d20 | 5033 | void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc) |
fb9b0457 KD |
5034 | { |
5035 | struct mpi3mr_drv_cmd *cmdptr; | |
5036 | u8 i; | |
5037 | ||
5038 | cmdptr = &mrioc->init_cmds; | |
5039 | mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); | |
32d457d5 SR |
5040 | |
5041 | cmdptr = &mrioc->cfg_cmds; | |
5042 | mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); | |
5043 | ||
f5e6d5a3 SS |
5044 | cmdptr = &mrioc->bsg_cmds; |
5045 | mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); | |
e844adb1 KD |
5046 | cmdptr = &mrioc->host_tm_cmds; |
5047 | mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); | |
fb9b0457 KD |
5048 | |
5049 | for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { | |
5050 | cmdptr = &mrioc->dev_rmhs_cmds[i]; | |
5051 | mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); | |
5052 | } | |
c1af985d SR |
5053 | |
5054 | for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { | |
5055 | cmdptr = &mrioc->evtack_cmds[i]; | |
5056 | mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); | |
5057 | } | |
43ca1100 SS |
5058 | |
5059 | cmdptr = &mrioc->pel_cmds; | |
5060 | mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); | |
5061 | ||
5062 | cmdptr = &mrioc->pel_abort_cmd; | |
5063 | mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); | |
5064 | ||
2bd37e28 SR |
5065 | cmdptr = &mrioc->transport_cmds; |
5066 | mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); | |
43ca1100 SS |
5067 | } |
5068 | ||
5069 | /** | |
5070 | * mpi3mr_pel_wait_post - Issue PEL Wait | |
5071 | * @mrioc: Adapter instance reference | |
5072 | * @drv_cmd: Internal command tracker | |
5073 | * | |
5074 | * Issue PEL Wait MPI request through admin queue and return. | |
5075 | * | |
5076 | * Return: Nothing. | |
5077 | */ | |
5078 | static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc, | |
5079 | struct mpi3mr_drv_cmd *drv_cmd) | |
5080 | { | |
5081 | struct mpi3_pel_req_action_wait pel_wait; | |
5082 | ||
5083 | mrioc->pel_abort_requested = false; | |
5084 | ||
5085 | memset(&pel_wait, 0, sizeof(pel_wait)); | |
5086 | drv_cmd->state = MPI3MR_CMD_PENDING; | |
5087 | drv_cmd->is_waiting = 0; | |
5088 | drv_cmd->callback = mpi3mr_pel_wait_complete; | |
5089 | drv_cmd->ioc_status = 0; | |
5090 | drv_cmd->ioc_loginfo = 0; | |
5091 | pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); | |
5092 | pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; | |
5093 | pel_wait.action = MPI3_PEL_ACTION_WAIT; | |
5094 | pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum); | |
5095 | pel_wait.locale = cpu_to_le16(mrioc->pel_locale); | |
5096 | pel_wait.class = cpu_to_le16(mrioc->pel_class); | |
5097 | pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT; | |
5098 | dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n", | |
5099 | mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale); | |
5100 | ||
5101 | if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) { | |
5102 | dprint_bsg_err(mrioc, | |
5103 | "Issuing PELWait: Admin post failed\n"); | |
5104 | drv_cmd->state = MPI3MR_CMD_NOTUSED; | |
5105 | drv_cmd->callback = NULL; | |
5106 | drv_cmd->retry_count = 0; | |
5107 | mrioc->pel_enabled = false; | |
5108 | } | |
5109 | } | |
5110 | ||
5111 | /** | |
5112 | * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number | |
5113 | * @mrioc: Adapter instance reference | |
5114 | * @drv_cmd: Internal command tracker | |
5115 | * | |
5116 | * Issue PEL get sequence number MPI request through admin queue | |
5117 | * and return. | |
5118 | * | |
5119 | * Return: 0 on success, non-zero on failure. | |
5120 | */ | |
5121 | int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc, | |
5122 | struct mpi3mr_drv_cmd *drv_cmd) | |
5123 | { | |
5124 | struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req; | |
5125 | u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; | |
5126 | int retval = 0; | |
5127 | ||
5128 | memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); | |
5129 | mrioc->pel_cmds.state = MPI3MR_CMD_PENDING; | |
5130 | mrioc->pel_cmds.is_waiting = 0; | |
5131 | mrioc->pel_cmds.ioc_status = 0; | |
5132 | mrioc->pel_cmds.ioc_loginfo = 0; | |
5133 | mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete; | |
5134 | pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); | |
5135 | pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; | |
5136 | pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM; | |
5137 | mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags, | |
5138 | mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma); | |
5139 | ||
5140 | retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req, | |
5141 | sizeof(pel_getseq_req), 0); | |
5142 | if (retval) { | |
5143 | if (drv_cmd) { | |
5144 | drv_cmd->state = MPI3MR_CMD_NOTUSED; | |
5145 | drv_cmd->callback = NULL; | |
5146 | drv_cmd->retry_count = 0; | |
5147 | } | |
5148 | mrioc->pel_enabled = false; | |
5149 | } | |
5150 | ||
5151 | return retval; | |
5152 | } | |
5153 | ||
5154 | /** | |
5155 | * mpi3mr_pel_wait_complete - PELWait Completion callback | |
5156 | * @mrioc: Adapter instance reference | |
5157 | * @drv_cmd: Internal command tracker | |
5158 | * | |
5159 | * This is a callback handler for the PELWait request and | |
5160 | * firmware completes a PELWait request when it is aborted or a | |
5161 | * new PEL entry is available. This sends AEN to the application | |
5162 | * and if the PELwait completion is not due to PELAbort then | |
5163 | * this will send a request for new PEL Sequence number | |
5164 | * | |
5165 | * Return: Nothing. | |
5166 | */ | |
5167 | static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, | |
5168 | struct mpi3mr_drv_cmd *drv_cmd) | |
5169 | { | |
5170 | struct mpi3_pel_reply *pel_reply = NULL; | |
5171 | u16 ioc_status, pe_log_status; | |
5172 | bool do_retry = false; | |
5173 | ||
5174 | if (drv_cmd->state & MPI3MR_CMD_RESET) | |
5175 | goto cleanup_drv_cmd; | |
5176 | ||
5177 | ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK; | |
5178 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
5179 | ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", | |
5180 | __func__, ioc_status, drv_cmd->ioc_loginfo); | |
5181 | dprint_bsg_err(mrioc, | |
5182 | "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n", | |
5183 | ioc_status, drv_cmd->ioc_loginfo); | |
5184 | do_retry = true; | |
5185 | } | |
5186 | ||
5187 | if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) | |
5188 | pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply; | |
5189 | ||
5190 | if (!pel_reply) { | |
5191 | dprint_bsg_err(mrioc, | |
5192 | "pel_wait: failed due to no reply\n"); | |
5193 | goto out_failed; | |
5194 | } | |
5195 | ||
5196 | pe_log_status = le16_to_cpu(pel_reply->pe_log_status); | |
5197 | if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) && | |
5198 | (pe_log_status != MPI3_PEL_STATUS_ABORTED)) { | |
5199 | ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n", | |
5200 | __func__, pe_log_status); | |
5201 | dprint_bsg_err(mrioc, | |
5202 | "pel_wait: failed due to pel_log_status(0x%04x)\n", | |
5203 | pe_log_status); | |
5204 | do_retry = true; | |
5205 | } | |
5206 | ||
5207 | if (do_retry) { | |
5208 | if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) { | |
5209 | drv_cmd->retry_count++; | |
5210 | dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n", | |
5211 | drv_cmd->retry_count); | |
5212 | mpi3mr_pel_wait_post(mrioc, drv_cmd); | |
5213 | return; | |
5214 | } | |
5215 | dprint_bsg_err(mrioc, | |
5216 | "pel_wait: failed after all retries(%d)\n", | |
5217 | drv_cmd->retry_count); | |
5218 | goto out_failed; | |
5219 | } | |
5220 | atomic64_inc(&event_counter); | |
5221 | if (!mrioc->pel_abort_requested) { | |
5222 | mrioc->pel_cmds.retry_count = 0; | |
5223 | mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds); | |
5224 | } | |
5225 | ||
5226 | return; | |
5227 | out_failed: | |
5228 | mrioc->pel_enabled = false; | |
5229 | cleanup_drv_cmd: | |
5230 | drv_cmd->state = MPI3MR_CMD_NOTUSED; | |
5231 | drv_cmd->callback = NULL; | |
5232 | drv_cmd->retry_count = 0; | |
5233 | } | |
5234 | ||
5235 | /** | |
5236 | * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback | |
5237 | * @mrioc: Adapter instance reference | |
5238 | * @drv_cmd: Internal command tracker | |
5239 | * | |
5240 | * This is a callback handler for the PEL get sequence number | |
5241 | * request and a new PEL wait request will be issued to the | |
5242 | * firmware from this | |
5243 | * | |
5244 | * Return: Nothing. | |
5245 | */ | |
5246 | void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc, | |
5247 | struct mpi3mr_drv_cmd *drv_cmd) | |
5248 | { | |
5249 | struct mpi3_pel_reply *pel_reply = NULL; | |
5250 | struct mpi3_pel_seq *pel_seqnum_virt; | |
5251 | u16 ioc_status; | |
5252 | bool do_retry = false; | |
5253 | ||
5254 | pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt; | |
5255 | ||
5256 | if (drv_cmd->state & MPI3MR_CMD_RESET) | |
5257 | goto cleanup_drv_cmd; | |
5258 | ||
5259 | ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK; | |
5260 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
5261 | dprint_bsg_err(mrioc, | |
5262 | "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n", | |
5263 | ioc_status, drv_cmd->ioc_loginfo); | |
5264 | do_retry = true; | |
5265 | } | |
5266 | ||
5267 | if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) | |
5268 | pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply; | |
5269 | if (!pel_reply) { | |
5270 | dprint_bsg_err(mrioc, | |
5271 | "pel_get_seqnum: failed due to no reply\n"); | |
5272 | goto out_failed; | |
5273 | } | |
5274 | ||
5275 | if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) { | |
5276 | dprint_bsg_err(mrioc, | |
5277 | "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n", | |
5278 | le16_to_cpu(pel_reply->pe_log_status)); | |
5279 | do_retry = true; | |
5280 | } | |
5281 | ||
5282 | if (do_retry) { | |
5283 | if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) { | |
5284 | drv_cmd->retry_count++; | |
5285 | dprint_bsg_err(mrioc, | |
5286 | "pel_get_seqnum: retrying(%d)\n", | |
5287 | drv_cmd->retry_count); | |
5288 | mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd); | |
5289 | return; | |
5290 | } | |
5291 | ||
5292 | dprint_bsg_err(mrioc, | |
5293 | "pel_get_seqnum: failed after all retries(%d)\n", | |
5294 | drv_cmd->retry_count); | |
5295 | goto out_failed; | |
5296 | } | |
5297 | mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1; | |
5298 | drv_cmd->retry_count = 0; | |
5299 | mpi3mr_pel_wait_post(mrioc, drv_cmd); | |
5300 | ||
5301 | return; | |
5302 | out_failed: | |
5303 | mrioc->pel_enabled = false; | |
5304 | cleanup_drv_cmd: | |
5305 | drv_cmd->state = MPI3MR_CMD_NOTUSED; | |
5306 | drv_cmd->callback = NULL; | |
5307 | drv_cmd->retry_count = 0; | |
fb9b0457 KD |
5308 | } |
5309 | ||
f195fc06 RK |
5310 | /** |
5311 | * mpi3mr_check_op_admin_proc - | |
5312 | * @mrioc: Adapter instance reference | |
5313 | * | |
5314 | * Check if any of the operation reply queues | |
5315 | * or the admin reply queue are currently in use. | |
5316 | * If any queue is in use, this function waits for | |
5317 | * a maximum of 10 seconds for them to become available. | |
5318 | * | |
5319 | * Return: 0 on success, non-zero on failure. | |
5320 | */ | |
5321 | static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc) | |
5322 | { | |
5323 | ||
5324 | u16 timeout = 10 * 10; | |
5325 | u16 elapsed_time = 0; | |
5326 | bool op_admin_in_use = false; | |
5327 | ||
5328 | do { | |
5329 | op_admin_in_use = false; | |
5330 | ||
5331 | /* Check admin_reply queue first to exit early */ | |
5332 | if (atomic_read(&mrioc->admin_reply_q_in_use) == 1) | |
5333 | op_admin_in_use = true; | |
5334 | else { | |
5335 | /* Check op_reply queues */ | |
5336 | int i; | |
5337 | ||
5338 | for (i = 0; i < mrioc->num_queues; i++) { | |
5339 | if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) { | |
5340 | op_admin_in_use = true; | |
5341 | break; | |
5342 | } | |
5343 | } | |
5344 | } | |
5345 | ||
5346 | if (!op_admin_in_use) | |
5347 | break; | |
5348 | ||
5349 | msleep(100); | |
5350 | ||
5351 | } while (++elapsed_time < timeout); | |
5352 | ||
5353 | if (op_admin_in_use) | |
5354 | return 1; | |
5355 | ||
5356 | return 0; | |
5357 | } | |
5358 | ||
824a1566 KD |
5359 | /** |
5360 | * mpi3mr_soft_reset_handler - Reset the controller | |
5361 | * @mrioc: Adapter instance reference | |
5362 | * @reset_reason: Reset reason code | |
5363 | * @snapdump: Flag to generate snapdump in firmware or not | |
5364 | * | |
fb9b0457 KD |
5365 | * This is an handler for recovering controller by issuing soft |
5366 | * reset are diag fault reset. This is a blocking function and | |
5367 | * when one reset is executed if any other resets they will be | |
f5e6d5a3 | 5368 | * blocked. All BSG requests will be blocked during the reset. If |
fb9b0457 KD |
5369 | * controller reset is successful then the controller will be |
5370 | * reinitalized, otherwise the controller will be marked as not | |
5371 | * recoverable | |
5372 | * | |
5373 | * In snapdump bit is set, the controller is issued with diag | |
5374 | * fault reset so that the firmware can create a snap dump and | |
5375 | * post that the firmware will result in F000 fault and the | |
5376 | * driver will issue soft reset to recover from that. | |
824a1566 KD |
5377 | * |
5378 | * Return: 0 on success, non-zero on failure. | |
5379 | */ | |
5380 | int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, | |
0a2714b7 | 5381 | u16 reset_reason, u8 snapdump) |
824a1566 | 5382 | { |
fb9b0457 KD |
5383 | int retval = 0, i; |
5384 | unsigned long flags; | |
5385 | u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; | |
d8d08d16 | 5386 | union mpi3mr_trigger_data trigger_data; |
fb9b0457 | 5387 | |
b64845a7 SR |
5388 | /* Block the reset handler until diag save in progress*/ |
5389 | dprint_reset(mrioc, | |
5390 | "soft_reset_handler: check and block on diagsave_timeout(%d)\n", | |
5391 | mrioc->diagsave_timeout); | |
5392 | while (mrioc->diagsave_timeout) | |
5393 | ssleep(1); | |
fb9b0457 KD |
5394 | /* |
5395 | * Block new resets until the currently executing one is finished and | |
5396 | * return the status of the existing reset for all blocked resets | |
5397 | */ | |
b64845a7 | 5398 | dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n"); |
fb9b0457 | 5399 | if (!mutex_trylock(&mrioc->reset_mutex)) { |
b64845a7 SR |
5400 | ioc_info(mrioc, |
5401 | "controller reset triggered by %s is blocked due to another reset in progress\n", | |
5402 | mpi3mr_reset_rc_name(reset_reason)); | |
5403 | do { | |
5404 | ssleep(1); | |
5405 | } while (mrioc->reset_in_progress == 1); | |
5406 | ioc_info(mrioc, | |
5407 | "returning previous reset result(%d) for the reset triggered by %s\n", | |
5408 | mrioc->prev_reset_result, | |
5409 | mpi3mr_reset_rc_name(reset_reason)); | |
5410 | return mrioc->prev_reset_result; | |
fb9b0457 | 5411 | } |
b64845a7 SR |
5412 | ioc_info(mrioc, "controller reset is triggered by %s\n", |
5413 | mpi3mr_reset_rc_name(reset_reason)); | |
5414 | ||
2745ce0e | 5415 | mrioc->device_refresh_on = 0; |
fb9b0457 | 5416 | mrioc->reset_in_progress = 1; |
f5e6d5a3 | 5417 | mrioc->stop_bsgs = 1; |
b64845a7 | 5418 | mrioc->prev_reset_result = -1; |
d8d08d16 | 5419 | memset(&trigger_data, 0, sizeof(trigger_data)); |
fb9b0457 KD |
5420 | |
5421 | if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && | |
b64845a7 | 5422 | (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) && |
fb9b0457 | 5423 | (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { |
d8d08d16 RK |
5424 | mpi3mr_set_trigger_data_in_all_hdb(mrioc, |
5425 | MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0); | |
fc444494 RK |
5426 | dprint_reset(mrioc, |
5427 | "soft_reset_handler: releasing host diagnostic buffers\n"); | |
5428 | mpi3mr_release_diag_bufs(mrioc, 0); | |
fb9b0457 KD |
5429 | for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) |
5430 | mrioc->event_masks[i] = -1; | |
5431 | ||
b64845a7 SR |
5432 | dprint_reset(mrioc, "soft_reset_handler: masking events\n"); |
5433 | mpi3mr_issue_event_notification(mrioc); | |
fb9b0457 KD |
5434 | } |
5435 | ||
44dc724f KD |
5436 | mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); |
5437 | ||
fb9b0457 | 5438 | mpi3mr_ioc_disable_intr(mrioc); |
f195fc06 | 5439 | mrioc->io_admin_reset_sync = 1; |
fb9b0457 KD |
5440 | |
5441 | if (snapdump) { | |
5442 | mpi3mr_set_diagsave(mrioc); | |
5443 | retval = mpi3mr_issue_reset(mrioc, | |
5444 | MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); | |
5445 | if (!retval) { | |
d8d08d16 RK |
5446 | trigger_data.fault = (readl(&mrioc->sysif_regs->fault) & |
5447 | MPI3_SYSIF_FAULT_CODE_MASK); | |
fb9b0457 KD |
5448 | do { |
5449 | host_diagnostic = | |
5450 | readl(&mrioc->sysif_regs->host_diagnostic); | |
5451 | if (!(host_diagnostic & | |
5452 | MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) | |
5453 | break; | |
5454 | msleep(100); | |
5455 | } while (--timeout); | |
d8d08d16 RK |
5456 | mpi3mr_set_trigger_data_in_all_hdb(mrioc, |
5457 | MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0); | |
fb9b0457 KD |
5458 | } |
5459 | } | |
5460 | ||
5461 | retval = mpi3mr_issue_reset(mrioc, | |
5462 | MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason); | |
5463 | if (retval) { | |
5464 | ioc_err(mrioc, "Failed to issue soft reset to the ioc\n"); | |
5465 | goto out; | |
5466 | } | |
f195fc06 RK |
5467 | |
5468 | retval = mpi3mr_check_op_admin_proc(mrioc); | |
5469 | if (retval) { | |
5470 | ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n" | |
5471 | "thread still processing replies even after a 10 second\n" | |
5472 | "timeout. Marking the controller as unrecoverable!\n"); | |
5473 | ||
5474 | goto out; | |
5475 | } | |
5476 | ||
f10af057 SR |
5477 | if (mrioc->num_io_throttle_group != |
5478 | mrioc->facts.max_io_throttle_group) { | |
5479 | ioc_err(mrioc, | |
5480 | "max io throttle group doesn't match old(%d), new(%d)\n", | |
5481 | mrioc->num_io_throttle_group, | |
5482 | mrioc->facts.max_io_throttle_group); | |
2a8a0147 DC |
5483 | retval = -EPERM; |
5484 | goto out; | |
f10af057 | 5485 | } |
fb9b0457 | 5486 | |
c1af985d | 5487 | mpi3mr_flush_delayed_cmd_lists(mrioc); |
fb9b0457 | 5488 | mpi3mr_flush_drv_cmds(mrioc); |
339e6156 SK |
5489 | bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD); |
5490 | bitmap_clear(mrioc->removepend_bitmap, 0, | |
5491 | mrioc->dev_handle_bitmap_bits); | |
5492 | bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD); | |
fb9b0457 | 5493 | mpi3mr_flush_host_io(mrioc); |
580e6742 | 5494 | mpi3mr_cleanup_fwevt_list(mrioc); |
fb9b0457 | 5495 | mpi3mr_invalidate_devhandles(mrioc); |
130fc180 SR |
5496 | mpi3mr_free_enclosure_list(mrioc); |
5497 | ||
78b76a07 SR |
5498 | if (mrioc->prepare_for_reset) { |
5499 | mrioc->prepare_for_reset = 0; | |
5500 | mrioc->prepare_for_reset_timeout_counter = 0; | |
5501 | } | |
fb9b0457 | 5502 | mpi3mr_memset_buffers(mrioc); |
fc444494 | 5503 | mpi3mr_release_diag_bufs(mrioc, 1); |
d8d08d16 RK |
5504 | mrioc->fw_release_trigger_active = false; |
5505 | mrioc->trace_release_trigger_active = false; | |
5506 | mrioc->snapdump_trigger_active = false; | |
5507 | mpi3mr_set_trigger_data_in_all_hdb(mrioc, | |
5508 | MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0); | |
5509 | ||
5510 | dprint_reset(mrioc, | |
5511 | "soft_reset_handler: reinitializing the controller\n"); | |
fe6db615 | 5512 | retval = mpi3mr_reinit_ioc(mrioc, 0); |
fb9b0457 KD |
5513 | if (retval) { |
5514 | pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", | |
5515 | mrioc->name, reset_reason); | |
5516 | goto out; | |
5517 | } | |
f84e8b5b | 5518 | ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); |
fb9b0457 KD |
5519 | |
5520 | out: | |
5521 | if (!retval) { | |
b64845a7 | 5522 | mrioc->diagsave_timeout = 0; |
fb9b0457 | 5523 | mrioc->reset_in_progress = 0; |
43ca1100 SS |
5524 | mrioc->pel_abort_requested = 0; |
5525 | if (mrioc->pel_enabled) { | |
5526 | mrioc->pel_cmds.retry_count = 0; | |
5527 | mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds); | |
5528 | } | |
5529 | ||
2745ce0e SR |
5530 | mrioc->device_refresh_on = 0; |
5531 | ||
54dfcffb | 5532 | mrioc->ts_update_counter = 0; |
fb9b0457 KD |
5533 | spin_lock_irqsave(&mrioc->watchdog_lock, flags); |
5534 | if (mrioc->watchdog_work_q) | |
5535 | queue_delayed_work(mrioc->watchdog_work_q, | |
5536 | &mrioc->watchdog_work, | |
5537 | msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); | |
5538 | spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); | |
f5e6d5a3 | 5539 | mrioc->stop_bsgs = 0; |
43ca1100 SS |
5540 | if (mrioc->pel_enabled) |
5541 | atomic64_inc(&event_counter); | |
fb9b0457 KD |
5542 | } else { |
5543 | mpi3mr_issue_reset(mrioc, | |
5544 | MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); | |
2745ce0e | 5545 | mrioc->device_refresh_on = 0; |
fb9b0457 KD |
5546 | mrioc->unrecoverable = 1; |
5547 | mrioc->reset_in_progress = 0; | |
31ec576e | 5548 | mrioc->stop_bsgs = 0; |
fb9b0457 | 5549 | retval = -1; |
f2a79d20 | 5550 | mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); |
fb9b0457 | 5551 | } |
b64845a7 | 5552 | mrioc->prev_reset_result = retval; |
fb9b0457 | 5553 | mutex_unlock(&mrioc->reset_mutex); |
b64845a7 SR |
5554 | ioc_info(mrioc, "controller reset is %s\n", |
5555 | ((retval == 0) ? "successful" : "failed")); | |
fb9b0457 | 5556 | return retval; |
824a1566 | 5557 | } |
32d457d5 | 5558 | |
32d457d5 SR |
5559 | /** |
5560 | * mpi3mr_post_cfg_req - Issue config requests and wait | |
5561 | * @mrioc: Adapter instance reference | |
5562 | * @cfg_req: Configuration request | |
5563 | * @timeout: Timeout in seconds | |
5564 | * @ioc_status: Pointer to return ioc status | |
5565 | * | |
5566 | * A generic function for posting MPI3 configuration request to | |
5567 | * the firmware. This blocks for the completion of request for | |
5568 | * timeout seconds and if the request times out this function | |
5569 | * faults the controller with proper reason code. | |
5570 | * | |
5571 | * On successful completion of the request this function returns | |
5572 | * appropriate ioc status from the firmware back to the caller. | |
5573 | * | |
5574 | * Return: 0 on success, non-zero on failure. | |
5575 | */ | |
5576 | static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc, | |
5577 | struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status) | |
5578 | { | |
5579 | int retval = 0; | |
5580 | ||
5581 | mutex_lock(&mrioc->cfg_cmds.mutex); | |
5582 | if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) { | |
5583 | retval = -1; | |
5584 | ioc_err(mrioc, "sending config request failed due to command in use\n"); | |
5585 | mutex_unlock(&mrioc->cfg_cmds.mutex); | |
5586 | goto out; | |
5587 | } | |
5588 | mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING; | |
5589 | mrioc->cfg_cmds.is_waiting = 1; | |
5590 | mrioc->cfg_cmds.callback = NULL; | |
5591 | mrioc->cfg_cmds.ioc_status = 0; | |
5592 | mrioc->cfg_cmds.ioc_loginfo = 0; | |
5593 | ||
5594 | cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS); | |
5595 | cfg_req->function = MPI3_FUNCTION_CONFIG; | |
5596 | ||
5597 | init_completion(&mrioc->cfg_cmds.done); | |
5598 | dprint_cfg_info(mrioc, "posting config request\n"); | |
5599 | if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) | |
5600 | dprint_dump(cfg_req, sizeof(struct mpi3_config_request), | |
5601 | "mpi3_cfg_req"); | |
5602 | retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1); | |
5603 | if (retval) { | |
5604 | ioc_err(mrioc, "posting config request failed\n"); | |
5605 | goto out_unlock; | |
5606 | } | |
5607 | wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ)); | |
5608 | if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) { | |
5609 | mpi3mr_check_rh_fault_ioc(mrioc, | |
5610 | MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT); | |
5611 | ioc_err(mrioc, "config request timed out\n"); | |
5612 | retval = -1; | |
5613 | goto out_unlock; | |
5614 | } | |
5615 | *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK; | |
5616 | if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS) | |
5617 | dprint_cfg_err(mrioc, | |
5618 | "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n", | |
5619 | *ioc_status, mrioc->cfg_cmds.ioc_loginfo); | |
5620 | ||
5621 | out_unlock: | |
5622 | mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED; | |
5623 | mutex_unlock(&mrioc->cfg_cmds.mutex); | |
5624 | ||
5625 | out: | |
5626 | return retval; | |
5627 | } | |
5628 | ||
5629 | /** | |
5630 | * mpi3mr_process_cfg_req - config page request processor | |
5631 | * @mrioc: Adapter instance reference | |
5632 | * @cfg_req: Configuration request | |
5633 | * @cfg_hdr: Configuration page header | |
5634 | * @timeout: Timeout in seconds | |
5635 | * @ioc_status: Pointer to return ioc status | |
5636 | * @cfg_buf: Memory pointer to copy config page or header | |
5637 | * @cfg_buf_sz: Size of the memory to get config page or header | |
5638 | * | |
5639 | * This is handler for config page read, write and config page | |
5640 | * header read operations. | |
5641 | * | |
5642 | * This function expects the cfg_req to be populated with page | |
5643 | * type, page number, action for the header read and with page | |
5644 | * address for all other operations. | |
5645 | * | |
5646 | * The cfg_hdr can be passed as null for reading required header | |
5647 | * details for read/write pages the cfg_hdr should point valid | |
5648 | * configuration page header. | |
5649 | * | |
5650 | * This allocates dmaable memory based on the size of the config | |
5651 | * buffer and set the SGE of the cfg_req. | |
5652 | * | |
5653 | * For write actions, the config page data has to be passed in | |
5654 | * the cfg_buf and size of the data has to be mentioned in the | |
5655 | * cfg_buf_sz. | |
5656 | * | |
5657 | * For read/header actions, on successful completion of the | |
5658 | * request with successful ioc_status the data will be copied | |
5659 | * into the cfg_buf limited to a minimum of actual page size and | |
5660 | * cfg_buf_sz | |
5661 | * | |
5662 | * | |
5663 | * Return: 0 on success, non-zero on failure. | |
5664 | */ | |
5665 | static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc, | |
5666 | struct mpi3_config_request *cfg_req, | |
5667 | struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status, | |
5668 | void *cfg_buf, u32 cfg_buf_sz) | |
5669 | { | |
5670 | struct dma_memory_desc mem_desc; | |
5671 | int retval = -1; | |
5672 | u8 invalid_action = 0; | |
5673 | u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; | |
5674 | ||
5675 | memset(&mem_desc, 0, sizeof(struct dma_memory_desc)); | |
5676 | ||
5677 | if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER) | |
5678 | mem_desc.size = sizeof(struct mpi3_config_page_header); | |
5679 | else { | |
5680 | if (!cfg_hdr) { | |
5681 | ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n", | |
5682 | cfg_req->action, cfg_req->page_type, | |
5683 | cfg_req->page_number); | |
5684 | goto out; | |
5685 | } | |
5686 | switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) { | |
5687 | case MPI3_CONFIG_PAGEATTR_READ_ONLY: | |
5688 | if (cfg_req->action | |
5689 | != MPI3_CONFIG_ACTION_READ_CURRENT) | |
5690 | invalid_action = 1; | |
5691 | break; | |
5692 | case MPI3_CONFIG_PAGEATTR_CHANGEABLE: | |
5693 | if ((cfg_req->action == | |
5694 | MPI3_CONFIG_ACTION_READ_PERSISTENT) || | |
5695 | (cfg_req->action == | |
5696 | MPI3_CONFIG_ACTION_WRITE_PERSISTENT)) | |
5697 | invalid_action = 1; | |
5698 | break; | |
5699 | case MPI3_CONFIG_PAGEATTR_PERSISTENT: | |
5700 | default: | |
5701 | break; | |
5702 | } | |
5703 | if (invalid_action) { | |
5704 | ioc_err(mrioc, | |
5705 | "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n", | |
5706 | cfg_req->action, cfg_req->page_type, | |
5707 | cfg_req->page_number, cfg_hdr->page_attribute); | |
5708 | goto out; | |
5709 | } | |
5710 | mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4; | |
5711 | cfg_req->page_length = cfg_hdr->page_length; | |
5712 | cfg_req->page_version = cfg_hdr->page_version; | |
5713 | } | |
711201a8 RK |
5714 | |
5715 | mem_desc.addr = dma_alloc_coherent(&mrioc->pdev->dev, | |
5716 | mem_desc.size, &mem_desc.dma_addr, GFP_KERNEL); | |
5717 | ||
5718 | if (!mem_desc.addr) | |
5719 | return retval; | |
32d457d5 SR |
5720 | |
5721 | mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size, | |
5722 | mem_desc.dma_addr); | |
5723 | ||
5724 | if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) || | |
5725 | (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) { | |
5726 | memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size, | |
5727 | cfg_buf_sz)); | |
5728 | dprint_cfg_info(mrioc, "config buffer to be written\n"); | |
5729 | if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) | |
5730 | dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf"); | |
5731 | } | |
5732 | ||
5733 | if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status)) | |
5734 | goto out; | |
5735 | ||
5736 | retval = 0; | |
5737 | if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) && | |
5738 | (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) && | |
5739 | (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) { | |
5740 | memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size, | |
5741 | cfg_buf_sz)); | |
5742 | dprint_cfg_info(mrioc, "config buffer read\n"); | |
5743 | if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) | |
5744 | dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf"); | |
5745 | } | |
5746 | ||
5747 | out: | |
711201a8 RK |
5748 | if (mem_desc.addr) { |
5749 | dma_free_coherent(&mrioc->pdev->dev, mem_desc.size, | |
5750 | mem_desc.addr, mem_desc.dma_addr); | |
5751 | mem_desc.addr = NULL; | |
5752 | } | |
5753 | ||
32d457d5 SR |
5754 | return retval; |
5755 | } | |
64a8d931 SR |
5756 | |
5757 | /** | |
5758 | * mpi3mr_cfg_get_dev_pg0 - Read current device page0 | |
5759 | * @mrioc: Adapter instance reference | |
5760 | * @ioc_status: Pointer to return ioc status | |
5761 | * @dev_pg0: Pointer to return device page 0 | |
5762 | * @pg_sz: Size of the memory allocated to the page pointer | |
5763 | * @form: The form to be used for addressing the page | |
5764 | * @form_spec: Form specific information like device handle | |
5765 | * | |
5766 | * This is handler for config page read for a specific device | |
5767 | * page0. The ioc_status has the controller returned ioc_status. | |
5768 | * This routine doesn't check ioc_status to decide whether the | |
5769 | * page read is success or not and it is the callers | |
5770 | * responsibility. | |
5771 | * | |
5772 | * Return: 0 on success, non-zero on failure. | |
5773 | */ | |
5774 | int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, | |
5775 | struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec) | |
5776 | { | |
5777 | struct mpi3_config_page_header cfg_hdr; | |
5778 | struct mpi3_config_request cfg_req; | |
5779 | u32 page_address; | |
5780 | ||
5781 | memset(dev_pg0, 0, pg_sz); | |
5782 | memset(&cfg_hdr, 0, sizeof(cfg_hdr)); | |
5783 | memset(&cfg_req, 0, sizeof(cfg_req)); | |
5784 | ||
5785 | cfg_req.function = MPI3_FUNCTION_CONFIG; | |
5786 | cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; | |
5787 | cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE; | |
5788 | cfg_req.page_number = 0; | |
5789 | cfg_req.page_address = 0; | |
5790 | ||
5791 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, | |
5792 | MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { | |
5793 | ioc_err(mrioc, "device page0 header read failed\n"); | |
5794 | goto out_failed; | |
5795 | } | |
5796 | if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
5797 | ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n", | |
5798 | *ioc_status); | |
5799 | goto out_failed; | |
5800 | } | |
5801 | cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; | |
5802 | page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) | | |
5803 | (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK)); | |
5804 | cfg_req.page_address = cpu_to_le32(page_address); | |
5805 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, | |
5806 | MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) { | |
5807 | ioc_err(mrioc, "device page0 read failed\n"); | |
5808 | goto out_failed; | |
5809 | } | |
5810 | return 0; | |
5811 | out_failed: | |
5812 | return -1; | |
5813 | } | |
5814 | ||
5815 | ||
5816 | /** | |
5817 | * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0 | |
5818 | * @mrioc: Adapter instance reference | |
5819 | * @ioc_status: Pointer to return ioc status | |
5820 | * @phy_pg0: Pointer to return SAS Phy page 0 | |
5821 | * @pg_sz: Size of the memory allocated to the page pointer | |
5822 | * @form: The form to be used for addressing the page | |
5823 | * @form_spec: Form specific information like phy number | |
5824 | * | |
5825 | * This is handler for config page read for a specific SAS Phy | |
5826 | * page0. The ioc_status has the controller returned ioc_status. | |
5827 | * This routine doesn't check ioc_status to decide whether the | |
5828 | * page read is success or not and it is the callers | |
5829 | * responsibility. | |
5830 | * | |
5831 | * Return: 0 on success, non-zero on failure. | |
5832 | */ | |
5833 | int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, | |
5834 | struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form, | |
5835 | u32 form_spec) | |
5836 | { | |
5837 | struct mpi3_config_page_header cfg_hdr; | |
5838 | struct mpi3_config_request cfg_req; | |
5839 | u32 page_address; | |
5840 | ||
5841 | memset(phy_pg0, 0, pg_sz); | |
5842 | memset(&cfg_hdr, 0, sizeof(cfg_hdr)); | |
5843 | memset(&cfg_req, 0, sizeof(cfg_req)); | |
5844 | ||
5845 | cfg_req.function = MPI3_FUNCTION_CONFIG; | |
5846 | cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; | |
5847 | cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY; | |
5848 | cfg_req.page_number = 0; | |
5849 | cfg_req.page_address = 0; | |
5850 | ||
5851 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, | |
5852 | MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { | |
5853 | ioc_err(mrioc, "sas phy page0 header read failed\n"); | |
5854 | goto out_failed; | |
5855 | } | |
5856 | if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
5857 | ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n", | |
5858 | *ioc_status); | |
5859 | goto out_failed; | |
5860 | } | |
5861 | cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; | |
5862 | page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) | | |
5863 | (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK)); | |
5864 | cfg_req.page_address = cpu_to_le32(page_address); | |
5865 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, | |
5866 | MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) { | |
5867 | ioc_err(mrioc, "sas phy page0 read failed\n"); | |
5868 | goto out_failed; | |
5869 | } | |
5870 | return 0; | |
5871 | out_failed: | |
5872 | return -1; | |
5873 | } | |
5874 | ||
5875 | /** | |
5876 | * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1 | |
5877 | * @mrioc: Adapter instance reference | |
5878 | * @ioc_status: Pointer to return ioc status | |
5879 | * @phy_pg1: Pointer to return SAS Phy page 1 | |
5880 | * @pg_sz: Size of the memory allocated to the page pointer | |
5881 | * @form: The form to be used for addressing the page | |
5882 | * @form_spec: Form specific information like phy number | |
5883 | * | |
5884 | * This is handler for config page read for a specific SAS Phy | |
5885 | * page1. The ioc_status has the controller returned ioc_status. | |
5886 | * This routine doesn't check ioc_status to decide whether the | |
5887 | * page read is success or not and it is the callers | |
5888 | * responsibility. | |
5889 | * | |
5890 | * Return: 0 on success, non-zero on failure. | |
5891 | */ | |
5892 | int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, | |
5893 | struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form, | |
5894 | u32 form_spec) | |
5895 | { | |
5896 | struct mpi3_config_page_header cfg_hdr; | |
5897 | struct mpi3_config_request cfg_req; | |
5898 | u32 page_address; | |
5899 | ||
5900 | memset(phy_pg1, 0, pg_sz); | |
5901 | memset(&cfg_hdr, 0, sizeof(cfg_hdr)); | |
5902 | memset(&cfg_req, 0, sizeof(cfg_req)); | |
5903 | ||
5904 | cfg_req.function = MPI3_FUNCTION_CONFIG; | |
5905 | cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; | |
5906 | cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY; | |
5907 | cfg_req.page_number = 1; | |
5908 | cfg_req.page_address = 0; | |
5909 | ||
5910 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, | |
5911 | MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { | |
5912 | ioc_err(mrioc, "sas phy page1 header read failed\n"); | |
5913 | goto out_failed; | |
5914 | } | |
5915 | if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
5916 | ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n", | |
5917 | *ioc_status); | |
5918 | goto out_failed; | |
5919 | } | |
5920 | cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; | |
5921 | page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) | | |
5922 | (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK)); | |
5923 | cfg_req.page_address = cpu_to_le32(page_address); | |
5924 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, | |
5925 | MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) { | |
5926 | ioc_err(mrioc, "sas phy page1 read failed\n"); | |
5927 | goto out_failed; | |
5928 | } | |
5929 | return 0; | |
5930 | out_failed: | |
5931 | return -1; | |
5932 | } | |
5933 | ||
5934 | ||
5935 | /** | |
5936 | * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0 | |
5937 | * @mrioc: Adapter instance reference | |
5938 | * @ioc_status: Pointer to return ioc status | |
5939 | * @exp_pg0: Pointer to return SAS Expander page 0 | |
5940 | * @pg_sz: Size of the memory allocated to the page pointer | |
5941 | * @form: The form to be used for addressing the page | |
5942 | * @form_spec: Form specific information like device handle | |
5943 | * | |
5944 | * This is handler for config page read for a specific SAS | |
5945 | * Expander page0. The ioc_status has the controller returned | |
5946 | * ioc_status. This routine doesn't check ioc_status to decide | |
5947 | * whether the page read is success or not and it is the callers | |
5948 | * responsibility. | |
5949 | * | |
5950 | * Return: 0 on success, non-zero on failure. | |
5951 | */ | |
5952 | int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, | |
5953 | struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form, | |
5954 | u32 form_spec) | |
5955 | { | |
5956 | struct mpi3_config_page_header cfg_hdr; | |
5957 | struct mpi3_config_request cfg_req; | |
5958 | u32 page_address; | |
5959 | ||
5960 | memset(exp_pg0, 0, pg_sz); | |
5961 | memset(&cfg_hdr, 0, sizeof(cfg_hdr)); | |
5962 | memset(&cfg_req, 0, sizeof(cfg_req)); | |
5963 | ||
5964 | cfg_req.function = MPI3_FUNCTION_CONFIG; | |
5965 | cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; | |
5966 | cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER; | |
5967 | cfg_req.page_number = 0; | |
5968 | cfg_req.page_address = 0; | |
5969 | ||
5970 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, | |
5971 | MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { | |
5972 | ioc_err(mrioc, "expander page0 header read failed\n"); | |
5973 | goto out_failed; | |
5974 | } | |
5975 | if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
5976 | ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n", | |
5977 | *ioc_status); | |
5978 | goto out_failed; | |
5979 | } | |
5980 | cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; | |
5981 | page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) | | |
5982 | (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK | | |
5983 | MPI3_SAS_EXPAND_PGAD_HANDLE_MASK))); | |
5984 | cfg_req.page_address = cpu_to_le32(page_address); | |
5985 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, | |
5986 | MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) { | |
5987 | ioc_err(mrioc, "expander page0 read failed\n"); | |
5988 | goto out_failed; | |
5989 | } | |
5990 | return 0; | |
5991 | out_failed: | |
5992 | return -1; | |
5993 | } | |
5994 | ||
5995 | /** | |
5996 | * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1 | |
5997 | * @mrioc: Adapter instance reference | |
5998 | * @ioc_status: Pointer to return ioc status | |
5999 | * @exp_pg1: Pointer to return SAS Expander page 1 | |
6000 | * @pg_sz: Size of the memory allocated to the page pointer | |
6001 | * @form: The form to be used for addressing the page | |
6002 | * @form_spec: Form specific information like phy number | |
6003 | * | |
6004 | * This is handler for config page read for a specific SAS | |
6005 | * Expander page1. The ioc_status has the controller returned | |
6006 | * ioc_status. This routine doesn't check ioc_status to decide | |
6007 | * whether the page read is success or not and it is the callers | |
6008 | * responsibility. | |
6009 | * | |
6010 | * Return: 0 on success, non-zero on failure. | |
6011 | */ | |
6012 | int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, | |
6013 | struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form, | |
6014 | u32 form_spec) | |
6015 | { | |
6016 | struct mpi3_config_page_header cfg_hdr; | |
6017 | struct mpi3_config_request cfg_req; | |
6018 | u32 page_address; | |
6019 | ||
6020 | memset(exp_pg1, 0, pg_sz); | |
6021 | memset(&cfg_hdr, 0, sizeof(cfg_hdr)); | |
6022 | memset(&cfg_req, 0, sizeof(cfg_req)); | |
6023 | ||
6024 | cfg_req.function = MPI3_FUNCTION_CONFIG; | |
6025 | cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; | |
6026 | cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER; | |
6027 | cfg_req.page_number = 1; | |
6028 | cfg_req.page_address = 0; | |
6029 | ||
6030 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, | |
6031 | MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { | |
6032 | ioc_err(mrioc, "expander page1 header read failed\n"); | |
6033 | goto out_failed; | |
6034 | } | |
6035 | if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6036 | ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n", | |
6037 | *ioc_status); | |
6038 | goto out_failed; | |
6039 | } | |
6040 | cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; | |
6041 | page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) | | |
6042 | (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK | | |
6043 | MPI3_SAS_EXPAND_PGAD_HANDLE_MASK))); | |
6044 | cfg_req.page_address = cpu_to_le32(page_address); | |
6045 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, | |
6046 | MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) { | |
6047 | ioc_err(mrioc, "expander page1 read failed\n"); | |
6048 | goto out_failed; | |
6049 | } | |
6050 | return 0; | |
6051 | out_failed: | |
6052 | return -1; | |
6053 | } | |
6054 | ||
6055 | /** | |
6056 | * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0 | |
6057 | * @mrioc: Adapter instance reference | |
6058 | * @ioc_status: Pointer to return ioc status | |
6059 | * @encl_pg0: Pointer to return Enclosure page 0 | |
6060 | * @pg_sz: Size of the memory allocated to the page pointer | |
6061 | * @form: The form to be used for addressing the page | |
6062 | * @form_spec: Form specific information like device handle | |
6063 | * | |
6064 | * This is handler for config page read for a specific Enclosure | |
6065 | * page0. The ioc_status has the controller returned ioc_status. | |
6066 | * This routine doesn't check ioc_status to decide whether the | |
6067 | * page read is success or not and it is the callers | |
6068 | * responsibility. | |
6069 | * | |
6070 | * Return: 0 on success, non-zero on failure. | |
6071 | */ | |
6072 | int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, | |
6073 | struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form, | |
6074 | u32 form_spec) | |
6075 | { | |
6076 | struct mpi3_config_page_header cfg_hdr; | |
6077 | struct mpi3_config_request cfg_req; | |
6078 | u32 page_address; | |
6079 | ||
6080 | memset(encl_pg0, 0, pg_sz); | |
6081 | memset(&cfg_hdr, 0, sizeof(cfg_hdr)); | |
6082 | memset(&cfg_req, 0, sizeof(cfg_req)); | |
6083 | ||
6084 | cfg_req.function = MPI3_FUNCTION_CONFIG; | |
6085 | cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; | |
6086 | cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE; | |
6087 | cfg_req.page_number = 0; | |
6088 | cfg_req.page_address = 0; | |
6089 | ||
6090 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, | |
6091 | MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { | |
6092 | ioc_err(mrioc, "enclosure page0 header read failed\n"); | |
6093 | goto out_failed; | |
6094 | } | |
6095 | if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6096 | ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n", | |
6097 | *ioc_status); | |
6098 | goto out_failed; | |
6099 | } | |
6100 | cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; | |
6101 | page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) | | |
6102 | (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK)); | |
6103 | cfg_req.page_address = cpu_to_le32(page_address); | |
6104 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, | |
6105 | MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) { | |
6106 | ioc_err(mrioc, "enclosure page0 read failed\n"); | |
6107 | goto out_failed; | |
6108 | } | |
6109 | return 0; | |
6110 | out_failed: | |
6111 | return -1; | |
6112 | } | |
6113 | ||
6114 | ||
6115 | /** | |
6116 | * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0 | |
6117 | * @mrioc: Adapter instance reference | |
6118 | * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0 | |
6119 | * @pg_sz: Size of the memory allocated to the page pointer | |
6120 | * | |
6121 | * This is handler for config page read for the SAS IO Unit | |
6122 | * page0. This routine checks ioc_status to decide whether the | |
6123 | * page read is success or not. | |
6124 | * | |
6125 | * Return: 0 on success, non-zero on failure. | |
6126 | */ | |
6127 | int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc, | |
6128 | struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz) | |
6129 | { | |
6130 | struct mpi3_config_page_header cfg_hdr; | |
6131 | struct mpi3_config_request cfg_req; | |
6132 | u16 ioc_status = 0; | |
6133 | ||
6134 | memset(sas_io_unit_pg0, 0, pg_sz); | |
6135 | memset(&cfg_hdr, 0, sizeof(cfg_hdr)); | |
6136 | memset(&cfg_req, 0, sizeof(cfg_req)); | |
6137 | ||
6138 | cfg_req.function = MPI3_FUNCTION_CONFIG; | |
6139 | cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; | |
6140 | cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; | |
6141 | cfg_req.page_number = 0; | |
6142 | cfg_req.page_address = 0; | |
6143 | ||
6144 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, | |
6145 | MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { | |
6146 | ioc_err(mrioc, "sas io unit page0 header read failed\n"); | |
6147 | goto out_failed; | |
6148 | } | |
6149 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6150 | ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n", | |
6151 | ioc_status); | |
6152 | goto out_failed; | |
6153 | } | |
6154 | cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; | |
6155 | ||
6156 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, | |
6157 | MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) { | |
6158 | ioc_err(mrioc, "sas io unit page0 read failed\n"); | |
6159 | goto out_failed; | |
6160 | } | |
6161 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6162 | ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n", | |
6163 | ioc_status); | |
6164 | goto out_failed; | |
6165 | } | |
6166 | return 0; | |
6167 | out_failed: | |
6168 | return -1; | |
6169 | } | |
6170 | ||
6171 | /** | |
6172 | * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1 | |
6173 | * @mrioc: Adapter instance reference | |
6174 | * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1 | |
6175 | * @pg_sz: Size of the memory allocated to the page pointer | |
6176 | * | |
6177 | * This is handler for config page read for the SAS IO Unit | |
6178 | * page1. This routine checks ioc_status to decide whether the | |
6179 | * page read is success or not. | |
6180 | * | |
6181 | * Return: 0 on success, non-zero on failure. | |
6182 | */ | |
6183 | int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, | |
6184 | struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz) | |
6185 | { | |
6186 | struct mpi3_config_page_header cfg_hdr; | |
6187 | struct mpi3_config_request cfg_req; | |
6188 | u16 ioc_status = 0; | |
6189 | ||
6190 | memset(sas_io_unit_pg1, 0, pg_sz); | |
6191 | memset(&cfg_hdr, 0, sizeof(cfg_hdr)); | |
6192 | memset(&cfg_req, 0, sizeof(cfg_req)); | |
6193 | ||
6194 | cfg_req.function = MPI3_FUNCTION_CONFIG; | |
6195 | cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; | |
6196 | cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; | |
6197 | cfg_req.page_number = 1; | |
6198 | cfg_req.page_address = 0; | |
6199 | ||
6200 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, | |
6201 | MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { | |
6202 | ioc_err(mrioc, "sas io unit page1 header read failed\n"); | |
6203 | goto out_failed; | |
6204 | } | |
6205 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6206 | ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n", | |
6207 | ioc_status); | |
6208 | goto out_failed; | |
6209 | } | |
6210 | cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; | |
6211 | ||
6212 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, | |
6213 | MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { | |
6214 | ioc_err(mrioc, "sas io unit page1 read failed\n"); | |
6215 | goto out_failed; | |
6216 | } | |
6217 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6218 | ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n", | |
6219 | ioc_status); | |
6220 | goto out_failed; | |
6221 | } | |
6222 | return 0; | |
6223 | out_failed: | |
6224 | return -1; | |
6225 | } | |
6226 | ||
6227 | /** | |
6228 | * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1 | |
6229 | * @mrioc: Adapter instance reference | |
6230 | * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write | |
6231 | * @pg_sz: Size of the memory allocated to the page pointer | |
6232 | * | |
6233 | * This is handler for config page write for the SAS IO Unit | |
6234 | * page1. This routine checks ioc_status to decide whether the | |
6235 | * page read is success or not. This will modify both current | |
6236 | * and persistent page. | |
6237 | * | |
6238 | * Return: 0 on success, non-zero on failure. | |
6239 | */ | |
6240 | int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, | |
6241 | struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz) | |
6242 | { | |
6243 | struct mpi3_config_page_header cfg_hdr; | |
6244 | struct mpi3_config_request cfg_req; | |
6245 | u16 ioc_status = 0; | |
6246 | ||
6247 | memset(&cfg_hdr, 0, sizeof(cfg_hdr)); | |
6248 | memset(&cfg_req, 0, sizeof(cfg_req)); | |
6249 | ||
6250 | cfg_req.function = MPI3_FUNCTION_CONFIG; | |
6251 | cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; | |
6252 | cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; | |
6253 | cfg_req.page_number = 1; | |
6254 | cfg_req.page_address = 0; | |
6255 | ||
6256 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, | |
6257 | MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { | |
6258 | ioc_err(mrioc, "sas io unit page1 header read failed\n"); | |
6259 | goto out_failed; | |
6260 | } | |
6261 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6262 | ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n", | |
6263 | ioc_status); | |
6264 | goto out_failed; | |
6265 | } | |
6266 | cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT; | |
6267 | ||
6268 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, | |
6269 | MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { | |
6270 | ioc_err(mrioc, "sas io unit page1 write current failed\n"); | |
6271 | goto out_failed; | |
6272 | } | |
6273 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6274 | ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n", | |
6275 | ioc_status); | |
6276 | goto out_failed; | |
6277 | } | |
6278 | ||
6279 | cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT; | |
6280 | ||
6281 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, | |
6282 | MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { | |
6283 | ioc_err(mrioc, "sas io unit page1 write persistent failed\n"); | |
6284 | goto out_failed; | |
6285 | } | |
6286 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6287 | ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n", | |
6288 | ioc_status); | |
6289 | goto out_failed; | |
6290 | } | |
6291 | return 0; | |
6292 | out_failed: | |
6293 | return -1; | |
6294 | } | |
6295 | ||
6296 | /** | |
6297 | * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1 | |
6298 | * @mrioc: Adapter instance reference | |
6299 | * @driver_pg1: Pointer to return Driver page 1 | |
6300 | * @pg_sz: Size of the memory allocated to the page pointer | |
6301 | * | |
6302 | * This is handler for config page read for the Driver page1. | |
6303 | * This routine checks ioc_status to decide whether the page | |
6304 | * read is success or not. | |
6305 | * | |
6306 | * Return: 0 on success, non-zero on failure. | |
6307 | */ | |
6308 | int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc, | |
6309 | struct mpi3_driver_page1 *driver_pg1, u16 pg_sz) | |
6310 | { | |
6311 | struct mpi3_config_page_header cfg_hdr; | |
6312 | struct mpi3_config_request cfg_req; | |
6313 | u16 ioc_status = 0; | |
6314 | ||
6315 | memset(driver_pg1, 0, pg_sz); | |
6316 | memset(&cfg_hdr, 0, sizeof(cfg_hdr)); | |
6317 | memset(&cfg_req, 0, sizeof(cfg_req)); | |
6318 | ||
6319 | cfg_req.function = MPI3_FUNCTION_CONFIG; | |
6320 | cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; | |
6321 | cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER; | |
6322 | cfg_req.page_number = 1; | |
6323 | cfg_req.page_address = 0; | |
6324 | ||
6325 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, | |
6326 | MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { | |
6327 | ioc_err(mrioc, "driver page1 header read failed\n"); | |
6328 | goto out_failed; | |
6329 | } | |
6330 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6331 | ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n", | |
6332 | ioc_status); | |
6333 | goto out_failed; | |
6334 | } | |
6335 | cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; | |
6336 | ||
6337 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, | |
6338 | MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) { | |
6339 | ioc_err(mrioc, "driver page1 read failed\n"); | |
6340 | goto out_failed; | |
6341 | } | |
6342 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6343 | ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n", | |
6344 | ioc_status); | |
6345 | goto out_failed; | |
6346 | } | |
6347 | return 0; | |
6348 | out_failed: | |
6349 | return -1; | |
6350 | } | |
fc444494 RK |
6351 | |
6352 | /** | |
6353 | * mpi3mr_cfg_get_driver_pg2 - Read current driver page2 | |
6354 | * @mrioc: Adapter instance reference | |
6355 | * @driver_pg2: Pointer to return driver page 2 | |
6356 | * @pg_sz: Size of the memory allocated to the page pointer | |
6357 | * @page_action: Page action | |
6358 | * | |
6359 | * This is handler for config page read for the driver page2. | |
6360 | * This routine checks ioc_status to decide whether the page | |
6361 | * read is success or not. | |
6362 | * | |
6363 | * Return: 0 on success, non-zero on failure. | |
6364 | */ | |
6365 | int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc, | |
6366 | struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action) | |
6367 | { | |
6368 | struct mpi3_config_page_header cfg_hdr; | |
6369 | struct mpi3_config_request cfg_req; | |
6370 | u16 ioc_status = 0; | |
6371 | ||
6372 | memset(driver_pg2, 0, pg_sz); | |
6373 | memset(&cfg_hdr, 0, sizeof(cfg_hdr)); | |
6374 | memset(&cfg_req, 0, sizeof(cfg_req)); | |
6375 | ||
6376 | cfg_req.function = MPI3_FUNCTION_CONFIG; | |
6377 | cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; | |
6378 | cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER; | |
6379 | cfg_req.page_number = 2; | |
6380 | cfg_req.page_address = 0; | |
6381 | cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION; | |
6382 | ||
6383 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, | |
6384 | MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { | |
6385 | ioc_err(mrioc, "driver page2 header read failed\n"); | |
6386 | goto out_failed; | |
6387 | } | |
6388 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6389 | ioc_err(mrioc, "driver page2 header read failed with\n" | |
6390 | "ioc_status(0x%04x)\n", | |
6391 | ioc_status); | |
6392 | goto out_failed; | |
6393 | } | |
6394 | cfg_req.action = page_action; | |
6395 | ||
6396 | if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, | |
6397 | MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) { | |
6398 | ioc_err(mrioc, "driver page2 read failed\n"); | |
6399 | goto out_failed; | |
6400 | } | |
6401 | if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { | |
6402 | ioc_err(mrioc, "driver page2 read failed with\n" | |
6403 | "ioc_status(0x%04x)\n", | |
6404 | ioc_status); | |
6405 | goto out_failed; | |
6406 | } | |
6407 | return 0; | |
6408 | out_failed: | |
6409 | return -1; | |
6410 | } | |
6411 |