scsi: mpt3sas: Fix, False timeout prints for ioctl and other internal commands during...
[linux-2.6-block.git] / drivers / scsi / mpt3sas / mpt3sas_base.c
CommitLineData
f92363d1
SR
1/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
a4ffce0d 6 * Copyright (C) 2012-2014 LSI Corporation
a03bd153
SR
7 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
f92363d1
SR
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * NO WARRANTY
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
30
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43 * USA.
44 */
45
f92363d1
SR
46#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/kdev_t.h>
54#include <linux/blkdev.h>
55#include <linux/delay.h>
56#include <linux/interrupt.h>
57#include <linux/dma-mapping.h>
58#include <linux/io.h>
59#include <linux/time.h>
23409bd4 60#include <linux/ktime.h>
f92363d1 61#include <linux/kthread.h>
016d5c35 62#include <asm/page.h> /* To get host page size per arch */
f92363d1
SR
63#include <linux/aer.h>
64
65
66#include "mpt3sas_base.h"
67
68static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
69
70
71#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
72
73 /* maximum controller queue depth */
74#define MAX_HBA_QUEUE_DEPTH 30000
75#define MAX_CHAIN_DEPTH 100000
76static int max_queue_depth = -1;
77module_param(max_queue_depth, int, 0);
78MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
79
80static int max_sgl_entries = -1;
81module_param(max_sgl_entries, int, 0);
82MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
83
84static int msix_disable = -1;
85module_param(msix_disable, int, 0);
86MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
87
64038301
SPS
88static int smp_affinity_enable = 1;
89module_param(smp_affinity_enable, int, S_IRUGO);
23b389c2 90MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
64038301 91
fb77bb53 92static int max_msix_vectors = -1;
9c500060
SR
93module_param(max_msix_vectors, int, 0);
94MODULE_PARM_DESC(max_msix_vectors,
fb77bb53 95 " max msix vectors");
f92363d1
SR
96
97static int mpt3sas_fwfault_debug;
98MODULE_PARM_DESC(mpt3sas_fwfault_debug,
99 " enable detection of firmware fault and halt firmware - (default=0)");
100
9b05c91a 101static int
98c56ad3 102_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
f92363d1 103
d37306ca
C
104/**
105 * mpt3sas_base_check_cmd_timeout - Function
106 * to check timeout and command termination due
107 * to Host reset.
108 *
109 * @ioc: per adapter object.
110 * @status: Status of issued command.
111 * @mpi_request:mf request pointer.
112 * @sz: size of buffer.
113 *
114 * @Returns - 1/0 Reset to be done or Not
115 */
116u8
117mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
118 u8 status, void *mpi_request, int sz)
119{
120 u8 issue_reset = 0;
121
122 if (!(status & MPT3_CMD_RESET))
123 issue_reset = 1;
124
125 pr_err(MPT3SAS_FMT "Command %s\n", ioc->name,
126 ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout"));
127 _debug_dump_mf(mpi_request, sz);
128
129 return issue_reset;
130}
131
f92363d1
SR
132/**
133 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
134 *
135 */
136static int
e4dca7b7 137_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
f92363d1
SR
138{
139 int ret = param_set_int(val, kp);
140 struct MPT3SAS_ADAPTER *ioc;
141
142 if (ret)
143 return ret;
144
08c4d550 145 /* global ioc spinlock to protect controller list on list operations */
f92363d1 146 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
08c4d550 147 spin_lock(&gioc_lock);
f92363d1
SR
148 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
149 ioc->fwfault_debug = mpt3sas_fwfault_debug;
08c4d550 150 spin_unlock(&gioc_lock);
f92363d1
SR
151 return 0;
152}
153module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
154 param_get_int, &mpt3sas_fwfault_debug, 0644);
155
b4472d71
SPS
156/**
157 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
158 * in BAR0 space.
159 *
160 * @ioc: per adapter object
161 * @reply: reply message frame(lower 32bit addr)
162 * @index: System request message index.
163 *
164 * @Returns - Nothing
165 */
166static void
167_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
168 u32 index)
169{
170 /*
171 * 256 is offset within sys register.
172 * 256 offset MPI frame starts. Max MPI frame supported is 32.
173 * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
174 */
175 u16 cmd_credit = ioc->facts.RequestCredit + 1;
176 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
177 MPI_FRAME_START_OFFSET +
178 (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
179
180 writel(reply, reply_free_iomem);
181}
182
e5747439
SPS
183/**
184 * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
185 * to system/BAR0 region.
186 *
187 * @dst_iomem: Pointer to the destinaltion location in BAR0 space.
188 * @src: Pointer to the Source data.
189 * @size: Size of data to be copied.
190 */
191static void
192_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
193{
194 int i;
195 u32 *src_virt_mem = (u32 *)src;
196
197 for (i = 0; i < size/4; i++)
198 writel((u32)src_virt_mem[i],
199 (void __iomem *)dst_iomem + (i * 4));
200}
201
182ac784
SPS
202/**
203 * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
204 *
205 * @dst_iomem: Pointer to the destination location in BAR0 space.
206 * @src: Pointer to the Source data.
207 * @size: Size of data to be copied.
208 */
209static void
210_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
211{
212 int i;
213 u32 *src_virt_mem = (u32 *)(src);
214
215 for (i = 0; i < size/4; i++)
216 writel((u32)src_virt_mem[i],
217 (void __iomem *)dst_iomem + (i * 4));
218}
219
22ae5a3c
SPS
220/**
221 * _base_get_chain - Calculates and Returns virtual chain address
222 * for the provided smid in BAR0 space.
223 *
224 * @ioc: per adapter object
225 * @smid: system request message index
226 * @sge_chain_count: Scatter gather chain count.
227 *
228 * @Return: chain address.
229 */
230static inline void __iomem*
231_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
232 u8 sge_chain_count)
233{
234 void __iomem *base_chain, *chain_virt;
235 u16 cmd_credit = ioc->facts.RequestCredit + 1;
236
237 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
238 (cmd_credit * ioc->request_sz) +
239 REPLY_FREE_POOL_SIZE;
240 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
241 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
242 return chain_virt;
243}
244
245/**
246 * _base_get_chain_phys - Calculates and Returns physical address
247 * in BAR0 for scatter gather chains, for
248 * the provided smid.
249 *
250 * @ioc: per adapter object
251 * @smid: system request message index
252 * @sge_chain_count: Scatter gather chain count.
253 *
254 * @Return - Physical chain address.
255 */
6f9e09fd 256static inline phys_addr_t
22ae5a3c
SPS
257_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
258 u8 sge_chain_count)
259{
6f9e09fd 260 phys_addr_t base_chain_phys, chain_phys;
22ae5a3c
SPS
261 u16 cmd_credit = ioc->facts.RequestCredit + 1;
262
6f9e09fd 263 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
22ae5a3c
SPS
264 (cmd_credit * ioc->request_sz) +
265 REPLY_FREE_POOL_SIZE;
266 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
267 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
268 return chain_phys;
269}
270
271/**
272 * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
273 * buffer address for the provided smid.
274 * (Each smid can have 64K starts from 17024)
275 *
276 * @ioc: per adapter object
277 * @smid: system request message index
278 *
279 * @Returns - Pointer to buffer location in BAR0.
280 */
281
282static void __iomem *
283_base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
284{
285 u16 cmd_credit = ioc->facts.RequestCredit + 1;
286 // Added extra 1 to reach end of chain.
287 void __iomem *chain_end = _base_get_chain(ioc,
288 cmd_credit + 1,
289 ioc->facts.MaxChainDepth);
290 return chain_end + (smid * 64 * 1024);
291}
292
293/**
294 * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
295 * Host buffer Physical address for the provided smid.
296 * (Each smid can have 64K starts from 17024)
297 *
298 * @ioc: per adapter object
299 * @smid: system request message index
300 *
301 * @Returns - Pointer to buffer location in BAR0.
302 */
6f9e09fd 303static phys_addr_t
22ae5a3c
SPS
304_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
305{
306 u16 cmd_credit = ioc->facts.RequestCredit + 1;
6f9e09fd 307 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
22ae5a3c
SPS
308 cmd_credit + 1,
309 ioc->facts.MaxChainDepth);
310 return chain_end_phys + (smid * 64 * 1024);
311}
312
182ac784
SPS
313/**
314 * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
315 * lookup list and Provides chain_buffer
316 * address for the matching dma address.
317 * (Each smid can have 64K starts from 17024)
318 *
319 * @ioc: per adapter object
320 * @chain_buffer_dma: Chain buffer dma address.
321 *
322 * @Returns - Pointer to chain buffer. Or Null on Failure.
323 */
324static void *
325_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
326 dma_addr_t chain_buffer_dma)
327{
93204b78
C
328 u16 index, j;
329 struct chain_tracker *ct;
330
331 for (index = 0; index < ioc->scsiio_depth; index++) {
332 for (j = 0; j < ioc->chains_needed_per_io; j++) {
333 ct = &ioc->chain_lookup[index].chains_per_smid[j];
334 if (ct && ct->chain_buffer_dma == chain_buffer_dma)
335 return ct->chain_buffer;
336 }
182ac784
SPS
337 }
338 pr_info(MPT3SAS_FMT
339 "Provided chain_buffer_dma address is not in the lookup list\n",
340 ioc->name);
341 return NULL;
342}
343
344/**
345 * _clone_sg_entries - MPI EP's scsiio and config requests
346 * are handled here. Base function for
347 * double buffering, before submitting
348 * the requests.
349 *
350 * @ioc: per adapter object.
351 * @mpi_request: mf request pointer.
352 * @smid: system request message index.
353 *
354 * @Returns: Nothing.
355 */
356static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
357 void *mpi_request, u16 smid)
358{
359 Mpi2SGESimple32_t *sgel, *sgel_next;
360 u32 sgl_flags, sge_chain_count = 0;
361 bool is_write = 0;
362 u16 i = 0;
363 void __iomem *buffer_iomem;
6f9e09fd 364 phys_addr_t buffer_iomem_phys;
182ac784 365 void __iomem *buff_ptr;
6f9e09fd 366 phys_addr_t buff_ptr_phys;
182ac784 367 void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
6f9e09fd
AB
368 void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
369 phys_addr_t dst_addr_phys;
182ac784
SPS
370 MPI2RequestHeader_t *request_hdr;
371 struct scsi_cmnd *scmd;
372 struct scatterlist *sg_scmd = NULL;
373 int is_scsiio_req = 0;
374
375 request_hdr = (MPI2RequestHeader_t *) mpi_request;
376
377 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
378 Mpi25SCSIIORequest_t *scsiio_request =
379 (Mpi25SCSIIORequest_t *)mpi_request;
380 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
381 is_scsiio_req = 1;
382 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
383 Mpi2ConfigRequest_t *config_req =
384 (Mpi2ConfigRequest_t *)mpi_request;
385 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
386 } else
387 return;
388
389 /* From smid we can get scsi_cmd, once we have sg_scmd,
390 * we just need to get sg_virt and sg_next to get virual
391 * address associated with sgel->Address.
392 */
393
394 if (is_scsiio_req) {
395 /* Get scsi_cmd using smid */
396 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
397 if (scmd == NULL) {
398 pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name);
399 return;
400 }
401
402 /* Get sg_scmd from scmd provided */
403 sg_scmd = scsi_sglist(scmd);
404 }
405
406 /*
407 * 0 - 255 System register
408 * 256 - 4352 MPI Frame. (This is based on maxCredit 32)
409 * 4352 - 4864 Reply_free pool (512 byte is reserved
410 * considering maxCredit 32. Reply need extra
411 * room, for mCPU case kept four times of
412 * maxCredit).
413 * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
414 * 128 byte size = 12288)
415 * 17152 - x Host buffer mapped with smid.
416 * (Each smid can have 64K Max IO.)
417 * BAR0+Last 1K MSIX Addr and Data
418 * Total size in use 2113664 bytes of 4MB BAR0
419 */
420
421 buffer_iomem = _base_get_buffer_bar0(ioc, smid);
422 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
423
424 buff_ptr = buffer_iomem;
425 buff_ptr_phys = buffer_iomem_phys;
6f9e09fd 426 WARN_ON(buff_ptr_phys > U32_MAX);
182ac784 427
cf6bf971 428 if (le32_to_cpu(sgel->FlagsLength) &
182ac784
SPS
429 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
430 is_write = 1;
431
432 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
433
cf6bf971
C
434 sgl_flags =
435 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
182ac784
SPS
436
437 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
438 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
439 /*
440 * Helper function which on passing
441 * chain_buffer_dma returns chain_buffer. Get
442 * the virtual address for sgel->Address
443 */
444 sgel_next =
445 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
cf6bf971 446 le32_to_cpu(sgel->Address));
182ac784
SPS
447 if (sgel_next == NULL)
448 return;
449 /*
450 * This is coping 128 byte chain
451 * frame (not a host buffer)
452 */
453 dst_chain_addr[sge_chain_count] =
454 _base_get_chain(ioc,
455 smid, sge_chain_count);
456 src_chain_addr[sge_chain_count] =
457 (void *) sgel_next;
6f9e09fd 458 dst_addr_phys = _base_get_chain_phys(ioc,
182ac784 459 smid, sge_chain_count);
6f9e09fd 460 WARN_ON(dst_addr_phys > U32_MAX);
cf6bf971
C
461 sgel->Address =
462 cpu_to_le32(lower_32_bits(dst_addr_phys));
182ac784
SPS
463 sgel = sgel_next;
464 sge_chain_count++;
465 break;
466 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
467 if (is_write) {
468 if (is_scsiio_req) {
469 _base_clone_to_sys_mem(buff_ptr,
470 sg_virt(sg_scmd),
cf6bf971
C
471 (le32_to_cpu(sgel->FlagsLength) &
472 0x00ffffff));
6f9e09fd
AB
473 /*
474 * FIXME: this relies on a a zero
475 * PCI mem_offset.
476 */
cf6bf971
C
477 sgel->Address =
478 cpu_to_le32((u32)buff_ptr_phys);
182ac784
SPS
479 } else {
480 _base_clone_to_sys_mem(buff_ptr,
481 ioc->config_vaddr,
cf6bf971
C
482 (le32_to_cpu(sgel->FlagsLength) &
483 0x00ffffff));
484 sgel->Address =
485 cpu_to_le32((u32)buff_ptr_phys);
182ac784
SPS
486 }
487 }
cf6bf971
C
488 buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
489 0x00ffffff);
490 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
491 0x00ffffff);
492 if ((le32_to_cpu(sgel->FlagsLength) &
182ac784
SPS
493 (MPI2_SGE_FLAGS_END_OF_BUFFER
494 << MPI2_SGE_FLAGS_SHIFT)))
495 goto eob_clone_chain;
496 else {
497 /*
498 * Every single element in MPT will have
499 * associated sg_next. Better to sanity that
500 * sg_next is not NULL, but it will be a bug
501 * if it is null.
502 */
503 if (is_scsiio_req) {
504 sg_scmd = sg_next(sg_scmd);
505 if (sg_scmd)
506 sgel++;
507 else
508 goto eob_clone_chain;
509 }
510 }
511 break;
512 }
513 }
514
515eob_clone_chain:
516 for (i = 0; i < sge_chain_count; i++) {
517 if (is_scsiio_req)
518 _base_clone_to_sys_mem(dst_chain_addr[i],
519 src_chain_addr[i], ioc->request_sz);
520 }
521}
522
f92363d1
SR
523/**
524 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
525 * @arg: input argument, used to derive ioc
526 *
527 * Return 0 if controller is removed from pci subsystem.
528 * Return -1 for other case.
529 */
530static int mpt3sas_remove_dead_ioc_func(void *arg)
531{
532 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
533 struct pci_dev *pdev;
534
535 if ((ioc == NULL))
536 return -1;
537
538 pdev = ioc->pdev;
539 if ((pdev == NULL))
540 return -1;
64cdb418 541 pci_stop_and_remove_bus_device_locked(pdev);
f92363d1
SR
542 return 0;
543}
544
545/**
546 * _base_fault_reset_work - workq handling ioc fault conditions
547 * @work: input argument, used to derive ioc
548 * Context: sleep.
549 *
550 * Return nothing.
551 */
552static void
553_base_fault_reset_work(struct work_struct *work)
554{
555 struct MPT3SAS_ADAPTER *ioc =
556 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
557 unsigned long flags;
558 u32 doorbell;
559 int rc;
560 struct task_struct *p;
561
562
563 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
16e179bd 564 if (ioc->shost_recovery || ioc->pci_error_recovery)
f92363d1
SR
565 goto rearm_timer;
566 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
567
568 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
569 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
570 pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
571 ioc->name);
572
16e179bd
SR
573 /* It may be possible that EEH recovery can resolve some of
574 * pci bus failure issues rather removing the dead ioc function
575 * by considering controller is in a non-operational state. So
576 * here priority is given to the EEH recovery. If it doesn't
577 * not resolve this issue, mpt3sas driver will consider this
578 * controller to non-operational state and remove the dead ioc
579 * function.
580 */
581 if (ioc->non_operational_loop++ < 5) {
582 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
583 flags);
584 goto rearm_timer;
585 }
586
f92363d1
SR
587 /*
588 * Call _scsih_flush_pending_cmds callback so that we flush all
589 * pending commands back to OS. This call is required to aovid
590 * deadlock at block layer. Dead IOC will fail to do diag reset,
591 * and this call is safe since dead ioc will never return any
592 * command back from HW.
593 */
594 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
595 /*
596 * Set remove_host flag early since kernel thread will
597 * take some time to execute.
598 */
599 ioc->remove_host = 1;
600 /*Remove the Dead Host */
601 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
c84b06a4 602 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
f92363d1
SR
603 if (IS_ERR(p))
604 pr_err(MPT3SAS_FMT
605 "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
606 ioc->name, __func__);
607 else
608 pr_err(MPT3SAS_FMT
609 "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
610 ioc->name, __func__);
611 return; /* don't rearm timer */
612 }
613
16e179bd
SR
614 ioc->non_operational_loop = 0;
615
f92363d1 616 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
98c56ad3 617 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
f92363d1
SR
618 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
619 __func__, (rc == 0) ? "success" : "failed");
620 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
621 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
622 mpt3sas_base_fault_info(ioc, doorbell &
623 MPI2_DOORBELL_DATA_MASK);
624 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
625 MPI2_IOC_STATE_OPERATIONAL)
626 return; /* don't rearm timer */
627 }
628
629 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
630 rearm_timer:
631 if (ioc->fault_reset_work_q)
632 queue_delayed_work(ioc->fault_reset_work_q,
633 &ioc->fault_reset_work,
634 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
635 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
636}
637
638/**
639 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
640 * @ioc: per adapter object
641 * Context: sleep.
642 *
643 * Return nothing.
644 */
645void
646mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
647{
648 unsigned long flags;
649
650 if (ioc->fault_reset_work_q)
651 return;
652
653 /* initialize fault polling */
654
655 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
656 snprintf(ioc->fault_reset_work_q_name,
c84b06a4
SR
657 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
658 ioc->driver_name, ioc->id);
f92363d1
SR
659 ioc->fault_reset_work_q =
660 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
661 if (!ioc->fault_reset_work_q) {
662 pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
663 ioc->name, __func__, __LINE__);
664 return;
665 }
666 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
667 if (ioc->fault_reset_work_q)
668 queue_delayed_work(ioc->fault_reset_work_q,
669 &ioc->fault_reset_work,
670 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
671 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
672}
673
674/**
675 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
676 * @ioc: per adapter object
677 * Context: sleep.
678 *
679 * Return nothing.
680 */
681void
682mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
683{
684 unsigned long flags;
685 struct workqueue_struct *wq;
686
687 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
688 wq = ioc->fault_reset_work_q;
689 ioc->fault_reset_work_q = NULL;
690 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
691 if (wq) {
4dc06fd8 692 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
f92363d1
SR
693 flush_workqueue(wq);
694 destroy_workqueue(wq);
695 }
696}
697
698/**
699 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
700 * @ioc: per adapter object
701 * @fault_code: fault code
702 *
703 * Return nothing.
704 */
705void
706mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
707{
708 pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
709 ioc->name, fault_code);
710}
711
712/**
713 * mpt3sas_halt_firmware - halt's mpt controller firmware
714 * @ioc: per adapter object
715 *
716 * For debugging timeout related issues. Writing 0xCOFFEE00
717 * to the doorbell register will halt controller firmware. With
718 * the purpose to stop both driver and firmware, the enduser can
719 * obtain a ring buffer from controller UART.
720 */
721void
722mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
723{
724 u32 doorbell;
725
726 if (!ioc->fwfault_debug)
727 return;
728
729 dump_stack();
730
731 doorbell = readl(&ioc->chip->Doorbell);
732 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
733 mpt3sas_base_fault_info(ioc , doorbell);
734 else {
735 writel(0xC0FFEE00, &ioc->chip->Doorbell);
736 pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
737 ioc->name);
738 }
739
740 if (ioc->fwfault_debug == 2)
741 for (;;)
742 ;
743 else
744 panic("panic in %s\n", __func__);
745}
746
f92363d1
SR
747/**
748 * _base_sas_ioc_info - verbose translation of the ioc status
749 * @ioc: per adapter object
750 * @mpi_reply: reply mf payload returned from firmware
751 * @request_hdr: request mf
752 *
753 * Return nothing.
754 */
755static void
756_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
757 MPI2RequestHeader_t *request_hdr)
758{
759 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
760 MPI2_IOCSTATUS_MASK;
761 char *desc = NULL;
762 u16 frame_sz;
763 char *func_str = NULL;
764
765 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
766 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
767 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
768 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
769 return;
770
771 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
772 return;
773
774 switch (ioc_status) {
775
776/****************************************************************************
777* Common IOCStatus values for all replies
778****************************************************************************/
779
780 case MPI2_IOCSTATUS_INVALID_FUNCTION:
781 desc = "invalid function";
782 break;
783 case MPI2_IOCSTATUS_BUSY:
784 desc = "busy";
785 break;
786 case MPI2_IOCSTATUS_INVALID_SGL:
787 desc = "invalid sgl";
788 break;
789 case MPI2_IOCSTATUS_INTERNAL_ERROR:
790 desc = "internal error";
791 break;
792 case MPI2_IOCSTATUS_INVALID_VPID:
793 desc = "invalid vpid";
794 break;
795 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
796 desc = "insufficient resources";
797 break;
b130b0d5
SS
798 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
799 desc = "insufficient power";
800 break;
f92363d1
SR
801 case MPI2_IOCSTATUS_INVALID_FIELD:
802 desc = "invalid field";
803 break;
804 case MPI2_IOCSTATUS_INVALID_STATE:
805 desc = "invalid state";
806 break;
807 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
808 desc = "op state not supported";
809 break;
810
811/****************************************************************************
812* Config IOCStatus values
813****************************************************************************/
814
815 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
816 desc = "config invalid action";
817 break;
818 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
819 desc = "config invalid type";
820 break;
821 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
822 desc = "config invalid page";
823 break;
824 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
825 desc = "config invalid data";
826 break;
827 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
828 desc = "config no defaults";
829 break;
830 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
831 desc = "config cant commit";
832 break;
833
834/****************************************************************************
835* SCSI IO Reply
836****************************************************************************/
837
838 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
839 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
840 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
841 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
842 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
843 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
844 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
845 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
846 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
847 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
848 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
849 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
850 break;
851
852/****************************************************************************
853* For use by SCSI Initiator and SCSI Target end-to-end data protection
854****************************************************************************/
855
856 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
857 desc = "eedp guard error";
858 break;
859 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
860 desc = "eedp ref tag error";
861 break;
862 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
863 desc = "eedp app tag error";
864 break;
865
866/****************************************************************************
867* SCSI Target values
868****************************************************************************/
869
870 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
871 desc = "target invalid io index";
872 break;
873 case MPI2_IOCSTATUS_TARGET_ABORTED:
874 desc = "target aborted";
875 break;
876 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
877 desc = "target no conn retryable";
878 break;
879 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
880 desc = "target no connection";
881 break;
882 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
883 desc = "target xfer count mismatch";
884 break;
885 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
886 desc = "target data offset error";
887 break;
888 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
889 desc = "target too much write data";
890 break;
891 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
892 desc = "target iu too short";
893 break;
894 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
895 desc = "target ack nak timeout";
896 break;
897 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
898 desc = "target nak received";
899 break;
900
901/****************************************************************************
902* Serial Attached SCSI values
903****************************************************************************/
904
905 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
906 desc = "smp request failed";
907 break;
908 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
909 desc = "smp data overrun";
910 break;
911
912/****************************************************************************
913* Diagnostic Buffer Post / Diagnostic Release values
914****************************************************************************/
915
916 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
917 desc = "diagnostic released";
918 break;
919 default:
920 break;
921 }
922
923 if (!desc)
924 return;
925
926 switch (request_hdr->Function) {
927 case MPI2_FUNCTION_CONFIG:
928 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
929 func_str = "config_page";
930 break;
931 case MPI2_FUNCTION_SCSI_TASK_MGMT:
932 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
933 func_str = "task_mgmt";
934 break;
935 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
936 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
937 func_str = "sas_iounit_ctl";
938 break;
939 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
940 frame_sz = sizeof(Mpi2SepRequest_t);
941 func_str = "enclosure";
942 break;
943 case MPI2_FUNCTION_IOC_INIT:
944 frame_sz = sizeof(Mpi2IOCInitRequest_t);
945 func_str = "ioc_init";
946 break;
947 case MPI2_FUNCTION_PORT_ENABLE:
948 frame_sz = sizeof(Mpi2PortEnableRequest_t);
949 func_str = "port_enable";
950 break;
951 case MPI2_FUNCTION_SMP_PASSTHROUGH:
952 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
953 func_str = "smp_passthru";
954 break;
aff39e61
SPS
955 case MPI2_FUNCTION_NVME_ENCAPSULATED:
956 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
957 ioc->sge_size;
958 func_str = "nvme_encapsulated";
959 break;
f92363d1
SR
960 default:
961 frame_sz = 32;
962 func_str = "unknown";
963 break;
964 }
965
966 pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
967 ioc->name, desc, ioc_status, request_hdr, func_str);
968
969 _debug_dump_mf(request_hdr, frame_sz/4);
970}
971
972/**
973 * _base_display_event_data - verbose translation of firmware asyn events
974 * @ioc: per adapter object
975 * @mpi_reply: reply mf payload returned from firmware
976 *
977 * Return nothing.
978 */
979static void
980_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
981 Mpi2EventNotificationReply_t *mpi_reply)
982{
983 char *desc = NULL;
984 u16 event;
985
986 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
987 return;
988
989 event = le16_to_cpu(mpi_reply->Event);
990
991 switch (event) {
992 case MPI2_EVENT_LOG_DATA:
993 desc = "Log Data";
994 break;
995 case MPI2_EVENT_STATE_CHANGE:
996 desc = "Status Change";
997 break;
998 case MPI2_EVENT_HARD_RESET_RECEIVED:
999 desc = "Hard Reset Received";
1000 break;
1001 case MPI2_EVENT_EVENT_CHANGE:
1002 desc = "Event Change";
1003 break;
1004 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1005 desc = "Device Status Change";
1006 break;
1007 case MPI2_EVENT_IR_OPERATION_STATUS:
7786ab6a
SR
1008 if (!ioc->hide_ir_msg)
1009 desc = "IR Operation Status";
f92363d1
SR
1010 break;
1011 case MPI2_EVENT_SAS_DISCOVERY:
1012 {
1013 Mpi2EventDataSasDiscovery_t *event_data =
1014 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1015 pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
1016 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
1017 "start" : "stop");
1018 if (event_data->DiscoveryStatus)
bbaf61e2 1019 pr_cont(" discovery_status(0x%08x)",
f92363d1 1020 le32_to_cpu(event_data->DiscoveryStatus));
bbaf61e2 1021 pr_cont("\n");
f92363d1
SR
1022 return;
1023 }
1024 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1025 desc = "SAS Broadcast Primitive";
1026 break;
1027 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1028 desc = "SAS Init Device Status Change";
1029 break;
1030 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1031 desc = "SAS Init Table Overflow";
1032 break;
1033 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1034 desc = "SAS Topology Change List";
1035 break;
1036 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1037 desc = "SAS Enclosure Device Status Change";
1038 break;
1039 case MPI2_EVENT_IR_VOLUME:
7786ab6a
SR
1040 if (!ioc->hide_ir_msg)
1041 desc = "IR Volume";
f92363d1
SR
1042 break;
1043 case MPI2_EVENT_IR_PHYSICAL_DISK:
7786ab6a
SR
1044 if (!ioc->hide_ir_msg)
1045 desc = "IR Physical Disk";
f92363d1
SR
1046 break;
1047 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7786ab6a
SR
1048 if (!ioc->hide_ir_msg)
1049 desc = "IR Configuration Change List";
f92363d1
SR
1050 break;
1051 case MPI2_EVENT_LOG_ENTRY_ADDED:
7786ab6a
SR
1052 if (!ioc->hide_ir_msg)
1053 desc = "Log Entry Added";
f92363d1 1054 break;
2d8ce8c9
SR
1055 case MPI2_EVENT_TEMP_THRESHOLD:
1056 desc = "Temperature Threshold";
1057 break;
a470a51c 1058 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
b99b1993 1059 desc = "Cable Event";
a470a51c 1060 break;
95540b8e
C
1061 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1062 desc = "SAS Device Discovery Error";
1063 break;
4318c734
SPS
1064 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1065 desc = "PCIE Device Status Change";
1066 break;
1067 case MPI2_EVENT_PCIE_ENUMERATION:
1068 {
1069 Mpi26EventDataPCIeEnumeration_t *event_data =
1070 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1071 pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
1072 (event_data->ReasonCode ==
1073 MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
1074 "start" : "stop");
1075 if (event_data->EnumerationStatus)
1076 pr_info("enumeration_status(0x%08x)",
1077 le32_to_cpu(event_data->EnumerationStatus));
1078 pr_info("\n");
1079 return;
1080 }
1081 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1082 desc = "PCIE Topology Change List";
1083 break;
f92363d1
SR
1084 }
1085
1086 if (!desc)
1087 return;
1088
1089 pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
1090}
f92363d1
SR
1091
1092/**
1093 * _base_sas_log_info - verbose translation of firmware log info
1094 * @ioc: per adapter object
1095 * @log_info: log info
1096 *
1097 * Return nothing.
1098 */
1099static void
1100_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1101{
1102 union loginfo_type {
1103 u32 loginfo;
1104 struct {
1105 u32 subcode:16;
1106 u32 code:8;
1107 u32 originator:4;
1108 u32 bus_type:4;
1109 } dw;
1110 };
1111 union loginfo_type sas_loginfo;
1112 char *originator_str = NULL;
1113
1114 sas_loginfo.loginfo = log_info;
1115 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1116 return;
1117
1118 /* each nexus loss loginfo */
1119 if (log_info == 0x31170000)
1120 return;
1121
1122 /* eat the loginfos associated with task aborts */
1123 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1124 0x31140000 || log_info == 0x31130000))
1125 return;
1126
1127 switch (sas_loginfo.dw.originator) {
1128 case 0:
1129 originator_str = "IOP";
1130 break;
1131 case 1:
1132 originator_str = "PL";
1133 break;
1134 case 2:
7786ab6a
SR
1135 if (!ioc->hide_ir_msg)
1136 originator_str = "IR";
1137 else
1138 originator_str = "WarpDrive";
f92363d1
SR
1139 break;
1140 }
1141
1142 pr_warn(MPT3SAS_FMT
1143 "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1144 ioc->name, log_info,
1145 originator_str, sas_loginfo.dw.code,
1146 sas_loginfo.dw.subcode);
1147}
1148
1149/**
1150 * _base_display_reply_info -
1151 * @ioc: per adapter object
1152 * @smid: system request message index
1153 * @msix_index: MSIX table index supplied by the OS
1154 * @reply: reply message frame(lower 32bit addr)
1155 *
1156 * Return nothing.
1157 */
1158static void
1159_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1160 u32 reply)
1161{
1162 MPI2DefaultReply_t *mpi_reply;
1163 u16 ioc_status;
1164 u32 loginfo = 0;
1165
1166 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1167 if (unlikely(!mpi_reply)) {
1168 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
1169 ioc->name, __FILE__, __LINE__, __func__);
1170 return;
1171 }
1172 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
af009411 1173
f92363d1
SR
1174 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1175 (ioc->logging_level & MPT_DEBUG_REPLY)) {
1176 _base_sas_ioc_info(ioc , mpi_reply,
1177 mpt3sas_base_get_msg_frame(ioc, smid));
1178 }
af009411 1179
f92363d1
SR
1180 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1181 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1182 _base_sas_log_info(ioc, loginfo);
1183 }
1184
1185 if (ioc_status || loginfo) {
1186 ioc_status &= MPI2_IOCSTATUS_MASK;
1187 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1188 }
1189}
1190
1191/**
1192 * mpt3sas_base_done - base internal command completion routine
1193 * @ioc: per adapter object
1194 * @smid: system request message index
1195 * @msix_index: MSIX table index supplied by the OS
1196 * @reply: reply message frame(lower 32bit addr)
1197 *
1198 * Return 1 meaning mf should be freed from _base_interrupt
1199 * 0 means the mf is freed from this function.
1200 */
1201u8
1202mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1203 u32 reply)
1204{
1205 MPI2DefaultReply_t *mpi_reply;
1206
1207 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1208 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
fd0331b3 1209 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
f92363d1
SR
1210
1211 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1212 return 1;
1213
1214 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1215 if (mpi_reply) {
1216 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1217 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1218 }
1219 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1220
1221 complete(&ioc->base_cmds.done);
1222 return 1;
1223}
1224
1225/**
1226 * _base_async_event - main callback handler for firmware asyn events
1227 * @ioc: per adapter object
1228 * @msix_index: MSIX table index supplied by the OS
1229 * @reply: reply message frame(lower 32bit addr)
1230 *
1231 * Return 1 meaning mf should be freed from _base_interrupt
1232 * 0 means the mf is freed from this function.
1233 */
1234static u8
1235_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1236{
1237 Mpi2EventNotificationReply_t *mpi_reply;
1238 Mpi2EventAckRequest_t *ack_request;
1239 u16 smid;
fd0331b3 1240 struct _event_ack_list *delayed_event_ack;
f92363d1
SR
1241
1242 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1243 if (!mpi_reply)
1244 return 1;
1245 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1246 return 1;
af009411 1247
f92363d1 1248 _base_display_event_data(ioc, mpi_reply);
af009411 1249
f92363d1
SR
1250 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1251 goto out;
1252 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1253 if (!smid) {
fd0331b3
SS
1254 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1255 GFP_ATOMIC);
1256 if (!delayed_event_ack)
1257 goto out;
1258 INIT_LIST_HEAD(&delayed_event_ack->list);
1259 delayed_event_ack->Event = mpi_reply->Event;
1260 delayed_event_ack->EventContext = mpi_reply->EventContext;
1261 list_add_tail(&delayed_event_ack->list,
1262 &ioc->delayed_event_ack_list);
1263 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1264 "DELAYED: EVENT ACK: event (0x%04x)\n",
1265 ioc->name, le16_to_cpu(mpi_reply->Event)));
f92363d1
SR
1266 goto out;
1267 }
1268
1269 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1270 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1271 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1272 ack_request->Event = mpi_reply->Event;
1273 ack_request->EventContext = mpi_reply->EventContext;
1274 ack_request->VF_ID = 0; /* TODO */
1275 ack_request->VP_ID = 0;
40114bde 1276 mpt3sas_base_put_smid_default(ioc, smid);
f92363d1
SR
1277
1278 out:
1279
1280 /* scsih callback handler */
1281 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1282
1283 /* ctl callback handler */
1284 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1285
1286 return 1;
1287}
1288
61dfb8a5 1289static struct scsiio_tracker *
dbec4c90 1290_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
12e7c678 1291{
dbec4c90
SPS
1292 struct scsi_cmnd *cmd;
1293
12e7c678
HR
1294 if (WARN_ON(!smid) ||
1295 WARN_ON(smid >= ioc->hi_priority_smid))
1296 return NULL;
dbec4c90
SPS
1297
1298 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1299 if (cmd)
1300 return scsi_cmd_priv(cmd);
1301
1302 return NULL;
12e7c678
HR
1303}
1304
f92363d1
SR
1305/**
1306 * _base_get_cb_idx - obtain the callback index
1307 * @ioc: per adapter object
1308 * @smid: system request message index
1309 *
1310 * Return callback index.
1311 */
1312static u8
1313_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1314{
1315 int i;
b0cd285e 1316 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
ba4494d4 1317 u8 cb_idx = 0xFF;
f92363d1
SR
1318
1319 if (smid < ioc->hi_priority_smid) {
12e7c678
HR
1320 struct scsiio_tracker *st;
1321
b0cd285e 1322 if (smid < ctl_smid) {
dbec4c90 1323 st = _get_st_from_smid(ioc, smid);
b0cd285e
HR
1324 if (st)
1325 cb_idx = st->cb_idx;
1326 } else if (smid == ctl_smid)
1327 cb_idx = ioc->ctl_cb_idx;
f92363d1
SR
1328 } else if (smid < ioc->internal_smid) {
1329 i = smid - ioc->hi_priority_smid;
1330 cb_idx = ioc->hpr_lookup[i].cb_idx;
1331 } else if (smid <= ioc->hba_queue_depth) {
1332 i = smid - ioc->internal_smid;
1333 cb_idx = ioc->internal_lookup[i].cb_idx;
ba4494d4 1334 }
f92363d1
SR
1335 return cb_idx;
1336}
1337
1338/**
1339 * _base_mask_interrupts - disable interrupts
1340 * @ioc: per adapter object
1341 *
1342 * Disabling ResetIRQ, Reply and Doorbell Interrupts
1343 *
1344 * Return nothing.
1345 */
1346static void
1347_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1348{
1349 u32 him_register;
1350
1351 ioc->mask_interrupts = 1;
1352 him_register = readl(&ioc->chip->HostInterruptMask);
1353 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1354 writel(him_register, &ioc->chip->HostInterruptMask);
1355 readl(&ioc->chip->HostInterruptMask);
1356}
1357
1358/**
1359 * _base_unmask_interrupts - enable interrupts
1360 * @ioc: per adapter object
1361 *
1362 * Enabling only Reply Interrupts
1363 *
1364 * Return nothing.
1365 */
1366static void
1367_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1368{
1369 u32 him_register;
1370
1371 him_register = readl(&ioc->chip->HostInterruptMask);
1372 him_register &= ~MPI2_HIM_RIM;
1373 writel(him_register, &ioc->chip->HostInterruptMask);
1374 ioc->mask_interrupts = 0;
1375}
1376
1377union reply_descriptor {
1378 u64 word;
1379 struct {
1380 u32 low;
1381 u32 high;
1382 } u;
1383};
1384
1385/**
1386 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1387 * @irq: irq number (not used)
1388 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
1389 * @r: pt_regs pointer (not used)
1390 *
1391 * Return IRQ_HANDLE if processed, else IRQ_NONE.
1392 */
1393static irqreturn_t
1394_base_interrupt(int irq, void *bus_id)
1395{
1396 struct adapter_reply_queue *reply_q = bus_id;
1397 union reply_descriptor rd;
1398 u32 completed_cmds;
1399 u8 request_desript_type;
1400 u16 smid;
1401 u8 cb_idx;
1402 u32 reply;
1403 u8 msix_index = reply_q->msix_index;
1404 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1405 Mpi2ReplyDescriptorsUnion_t *rpf;
1406 u8 rc;
1407
1408 if (ioc->mask_interrupts)
1409 return IRQ_NONE;
1410
1411 if (!atomic_add_unless(&reply_q->busy, 1, 1))
1412 return IRQ_NONE;
1413
1414 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1415 request_desript_type = rpf->Default.ReplyFlags
1416 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1417 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1418 atomic_dec(&reply_q->busy);
1419 return IRQ_NONE;
1420 }
1421
1422 completed_cmds = 0;
1423 cb_idx = 0xFF;
1424 do {
1425 rd.word = le64_to_cpu(rpf->Words);
1426 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1427 goto out;
1428 reply = 0;
1429 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1430 if (request_desript_type ==
1431 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1432 request_desript_type ==
aff39e61
SPS
1433 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1434 request_desript_type ==
1435 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
f92363d1
SR
1436 cb_idx = _base_get_cb_idx(ioc, smid);
1437 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1438 (likely(mpt_callbacks[cb_idx] != NULL))) {
1439 rc = mpt_callbacks[cb_idx](ioc, smid,
1440 msix_index, 0);
1441 if (rc)
1442 mpt3sas_base_free_smid(ioc, smid);
1443 }
1444 } else if (request_desript_type ==
1445 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1446 reply = le32_to_cpu(
1447 rpf->AddressReply.ReplyFrameAddress);
1448 if (reply > ioc->reply_dma_max_address ||
1449 reply < ioc->reply_dma_min_address)
1450 reply = 0;
1451 if (smid) {
1452 cb_idx = _base_get_cb_idx(ioc, smid);
1453 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1454 (likely(mpt_callbacks[cb_idx] != NULL))) {
1455 rc = mpt_callbacks[cb_idx](ioc, smid,
1456 msix_index, reply);
1457 if (reply)
1458 _base_display_reply_info(ioc,
1459 smid, msix_index, reply);
1460 if (rc)
1461 mpt3sas_base_free_smid(ioc,
1462 smid);
1463 }
1464 } else {
1465 _base_async_event(ioc, msix_index, reply);
1466 }
1467
1468 /* reply free queue handling */
1469 if (reply) {
1470 ioc->reply_free_host_index =
1471 (ioc->reply_free_host_index ==
1472 (ioc->reply_free_queue_depth - 1)) ?
1473 0 : ioc->reply_free_host_index + 1;
1474 ioc->reply_free[ioc->reply_free_host_index] =
1475 cpu_to_le32(reply);
b4472d71
SPS
1476 if (ioc->is_mcpu_endpoint)
1477 _base_clone_reply_to_sys_mem(ioc,
cf6bf971 1478 reply,
b4472d71 1479 ioc->reply_free_host_index);
f92363d1
SR
1480 writel(ioc->reply_free_host_index,
1481 &ioc->chip->ReplyFreeHostIndex);
1482 }
1483 }
1484
1485 rpf->Words = cpu_to_le64(ULLONG_MAX);
1486 reply_q->reply_post_host_index =
1487 (reply_q->reply_post_host_index ==
1488 (ioc->reply_post_queue_depth - 1)) ? 0 :
1489 reply_q->reply_post_host_index + 1;
1490 request_desript_type =
1491 reply_q->reply_post_free[reply_q->reply_post_host_index].
1492 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1493 completed_cmds++;
6b4c335a
C
1494 /* Update the reply post host index after continuously
1495 * processing the threshold number of Reply Descriptors.
1496 * So that FW can find enough entries to post the Reply
1497 * Descriptors in the reply descriptor post queue.
1498 */
1499 if (completed_cmds > ioc->hba_queue_depth/3) {
1500 if (ioc->combined_reply_queue) {
1501 writel(reply_q->reply_post_host_index |
1502 ((msix_index & 7) <<
1503 MPI2_RPHI_MSIX_INDEX_SHIFT),
1504 ioc->replyPostRegisterIndex[msix_index/8]);
1505 } else {
1506 writel(reply_q->reply_post_host_index |
1507 (msix_index <<
1508 MPI2_RPHI_MSIX_INDEX_SHIFT),
1509 &ioc->chip->ReplyPostHostIndex);
1510 }
1511 completed_cmds = 1;
1512 }
f92363d1
SR
1513 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1514 goto out;
1515 if (!reply_q->reply_post_host_index)
1516 rpf = reply_q->reply_post_free;
1517 else
1518 rpf++;
1519 } while (1);
1520
1521 out:
1522
1523 if (!completed_cmds) {
1524 atomic_dec(&reply_q->busy);
1525 return IRQ_NONE;
1526 }
1527
7786ab6a
SR
1528 if (ioc->is_warpdrive) {
1529 writel(reply_q->reply_post_host_index,
1530 ioc->reply_post_host_index[msix_index]);
1531 atomic_dec(&reply_q->busy);
1532 return IRQ_HANDLED;
1533 }
fb77bb53
SR
1534
1535 /* Update Reply Post Host Index.
1536 * For those HBA's which support combined reply queue feature
1537 * 1. Get the correct Supplemental Reply Post Host Index Register.
1538 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1539 * Index Register address bank i.e replyPostRegisterIndex[],
1540 * 2. Then update this register with new reply host index value
1541 * in ReplyPostIndex field and the MSIxIndex field with
1542 * msix_index value reduced to a value between 0 and 7,
1543 * using a modulo 8 operation. Since each Supplemental Reply Post
1544 * Host Index Register supports 8 MSI-X vectors.
1545 *
1546 * For other HBA's just update the Reply Post Host Index register with
1547 * new reply host index value in ReplyPostIndex Field and msix_index
1548 * value in MSIxIndex field.
1549 */
0bb337c9 1550 if (ioc->combined_reply_queue)
fb77bb53
SR
1551 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1552 MPI2_RPHI_MSIX_INDEX_SHIFT),
1553 ioc->replyPostRegisterIndex[msix_index/8]);
1554 else
1555 writel(reply_q->reply_post_host_index | (msix_index <<
1556 MPI2_RPHI_MSIX_INDEX_SHIFT),
1557 &ioc->chip->ReplyPostHostIndex);
f92363d1
SR
1558 atomic_dec(&reply_q->busy);
1559 return IRQ_HANDLED;
1560}
1561
1562/**
1563 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1564 * @ioc: per adapter object
1565 *
1566 */
1567static inline int
1568_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1569{
1570 return (ioc->facts.IOCCapabilities &
1571 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1572}
1573
1574/**
5f0dfb7a 1575 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
f92363d1 1576 * @ioc: per adapter object
5f0dfb7a 1577 * Context: non ISR conext
f92363d1 1578 *
5f0dfb7a 1579 * Called when a Task Management request has completed.
f92363d1
SR
1580 *
1581 * Return nothing.
1582 */
1583void
5f0dfb7a 1584mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
1585{
1586 struct adapter_reply_queue *reply_q;
1587
1588 /* If MSIX capability is turned off
1589 * then multi-queues are not enabled
1590 */
1591 if (!_base_is_controller_msix_enabled(ioc))
1592 return;
1593
1594 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5f0dfb7a
C
1595 if (ioc->shost_recovery || ioc->remove_host ||
1596 ioc->pci_error_recovery)
f92363d1
SR
1597 return;
1598 /* TMs are on msix_index == 0 */
1599 if (reply_q->msix_index == 0)
1600 continue;
1d55abc0 1601 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
f92363d1
SR
1602 }
1603}
1604
1605/**
1606 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1607 * @cb_idx: callback index
1608 *
1609 * Return nothing.
1610 */
1611void
1612mpt3sas_base_release_callback_handler(u8 cb_idx)
1613{
1614 mpt_callbacks[cb_idx] = NULL;
1615}
1616
1617/**
1618 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1619 * @cb_func: callback function
1620 *
1621 * Returns cb_func.
1622 */
1623u8
1624mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1625{
1626 u8 cb_idx;
1627
1628 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1629 if (mpt_callbacks[cb_idx] == NULL)
1630 break;
1631
1632 mpt_callbacks[cb_idx] = cb_func;
1633 return cb_idx;
1634}
1635
1636/**
1637 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1638 *
1639 * Return nothing.
1640 */
1641void
1642mpt3sas_base_initialize_callback_handler(void)
1643{
1644 u8 cb_idx;
1645
1646 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1647 mpt3sas_base_release_callback_handler(cb_idx);
1648}
1649
1650
1651/**
1652 * _base_build_zero_len_sge - build zero length sg entry
1653 * @ioc: per adapter object
1654 * @paddr: virtual address for SGE
1655 *
1656 * Create a zero length scatter gather entry to insure the IOCs hardware has
1657 * something to use if the target device goes brain dead and tries
1658 * to send data even when none is asked for.
1659 *
1660 * Return nothing.
1661 */
1662static void
1663_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1664{
1665 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1666 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1667 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1668 MPI2_SGE_FLAGS_SHIFT);
1669 ioc->base_add_sg_single(paddr, flags_length, -1);
1670}
1671
1672/**
1673 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1674 * @paddr: virtual address for SGE
1675 * @flags_length: SGE flags and data transfer length
1676 * @dma_addr: Physical address
1677 *
1678 * Return nothing.
1679 */
1680static void
1681_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1682{
1683 Mpi2SGESimple32_t *sgel = paddr;
1684
1685 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1686 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1687 sgel->FlagsLength = cpu_to_le32(flags_length);
1688 sgel->Address = cpu_to_le32(dma_addr);
1689}
1690
1691
1692/**
1693 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1694 * @paddr: virtual address for SGE
1695 * @flags_length: SGE flags and data transfer length
1696 * @dma_addr: Physical address
1697 *
1698 * Return nothing.
1699 */
1700static void
1701_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1702{
1703 Mpi2SGESimple64_t *sgel = paddr;
1704
1705 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1706 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1707 sgel->FlagsLength = cpu_to_le32(flags_length);
1708 sgel->Address = cpu_to_le64(dma_addr);
1709}
1710
1711/**
1712 * _base_get_chain_buffer_tracker - obtain chain tracker
1713 * @ioc: per adapter object
dbec4c90 1714 * @scmd: SCSI commands of the IO request
f92363d1 1715 *
93204b78
C
1716 * Returns chain tracker from chain_lookup table using key as
1717 * smid and smid's chain_offset.
f92363d1
SR
1718 */
1719static struct chain_tracker *
dbec4c90
SPS
1720_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1721 struct scsi_cmnd *scmd)
f92363d1
SR
1722{
1723 struct chain_tracker *chain_req;
dbec4c90 1724 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
93204b78
C
1725 u16 smid = st->smid;
1726 u8 chain_offset =
1727 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
f92363d1 1728
93204b78 1729 if (chain_offset == ioc->chains_needed_per_io)
f92363d1 1730 return NULL;
93204b78
C
1731
1732 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
1733 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
f92363d1
SR
1734 return chain_req;
1735}
1736
1737
1738/**
1739 * _base_build_sg - build generic sg
1740 * @ioc: per adapter object
1741 * @psge: virtual address for SGE
1742 * @data_out_dma: physical address for WRITES
1743 * @data_out_sz: data xfer size for WRITES
1744 * @data_in_dma: physical address for READS
1745 * @data_in_sz: data xfer size for READS
1746 *
1747 * Return nothing.
1748 */
1749static void
1750_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1751 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1752 size_t data_in_sz)
1753{
1754 u32 sgl_flags;
1755
1756 if (!data_out_sz && !data_in_sz) {
1757 _base_build_zero_len_sge(ioc, psge);
1758 return;
1759 }
1760
1761 if (data_out_sz && data_in_sz) {
1762 /* WRITE sgel first */
1763 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1764 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1765 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1766 ioc->base_add_sg_single(psge, sgl_flags |
1767 data_out_sz, data_out_dma);
1768
1769 /* incr sgel */
1770 psge += ioc->sge_size;
1771
1772 /* READ sgel last */
1773 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1774 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1775 MPI2_SGE_FLAGS_END_OF_LIST);
1776 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1777 ioc->base_add_sg_single(psge, sgl_flags |
1778 data_in_sz, data_in_dma);
1779 } else if (data_out_sz) /* WRITE */ {
1780 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1781 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1782 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1783 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1784 ioc->base_add_sg_single(psge, sgl_flags |
1785 data_out_sz, data_out_dma);
1786 } else if (data_in_sz) /* READ */ {
1787 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1788 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1789 MPI2_SGE_FLAGS_END_OF_LIST);
1790 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1791 ioc->base_add_sg_single(psge, sgl_flags |
1792 data_in_sz, data_in_dma);
1793 }
1794}
1795
aff39e61
SPS
1796/* IEEE format sgls */
1797
1798/**
1799 * _base_build_nvme_prp - This function is called for NVMe end devices to build
1800 * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
1801 * entry of the NVMe message (PRP1). If the data buffer is small enough to be
1802 * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
1803 * used to describe a larger data buffer. If the data buffer is too large to
1804 * describe using the two PRP entriess inside the NVMe message, then PRP1
1805 * describes the first data memory segment, and PRP2 contains a pointer to a PRP
1806 * list located elsewhere in memory to describe the remaining data memory
1807 * segments. The PRP list will be contiguous.
1808
1809 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
1810 * consists of a list of PRP entries to describe a number of noncontigous
1811 * physical memory segments as a single memory buffer, just as a SGL does. Note
1812 * however, that this function is only used by the IOCTL call, so the memory
1813 * given will be guaranteed to be contiguous. There is no need to translate
1814 * non-contiguous SGL into a PRP in this case. All PRPs will describe
1815 * contiguous space that is one page size each.
1816 *
1817 * Each NVMe message contains two PRP entries. The first (PRP1) either contains
1818 * a PRP list pointer or a PRP element, depending upon the command. PRP2
1819 * contains the second PRP element if the memory being described fits within 2
1820 * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
1821 *
1822 * A PRP list pointer contains the address of a PRP list, structured as a linear
1823 * array of PRP entries. Each PRP entry in this list describes a segment of
1824 * physical memory.
1825 *
1826 * Each 64-bit PRP entry comprises an address and an offset field. The address
1827 * always points at the beginning of a 4KB physical memory page, and the offset
1828 * describes where within that 4KB page the memory segment begins. Only the
1829 * first element in a PRP list may contain a non-zero offest, implying that all
1830 * memory segments following the first begin at the start of a 4KB page.
1831 *
1832 * Each PRP element normally describes 4KB of physical memory, with exceptions
1833 * for the first and last elements in the list. If the memory being described
1834 * by the list begins at a non-zero offset within the first 4KB page, then the
1835 * first PRP element will contain a non-zero offset indicating where the region
1836 * begins within the 4KB page. The last memory segment may end before the end
1837 * of the 4KB segment, depending upon the overall size of the memory being
1838 * described by the PRP list.
1839 *
1840 * Since PRP entries lack any indication of size, the overall data buffer length
1841 * is used to determine where the end of the data memory buffer is located, and
1842 * how many PRP entries are required to describe it.
1843 *
1844 * @ioc: per adapter object
1845 * @smid: system request message index for getting asscociated SGL
1846 * @nvme_encap_request: the NVMe request msg frame pointer
1847 * @data_out_dma: physical address for WRITES
1848 * @data_out_sz: data xfer size for WRITES
1849 * @data_in_dma: physical address for READS
1850 * @data_in_sz: data xfer size for READS
1851 *
1852 * Returns nothing.
1853 */
1854static void
1855_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
1856 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
1857 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1858 size_t data_in_sz)
1859{
1860 int prp_size = NVME_PRP_SIZE;
d8335ae2
AB
1861 __le64 *prp_entry, *prp1_entry, *prp2_entry;
1862 __le64 *prp_page;
1863 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
aff39e61
SPS
1864 u32 offset, entry_len;
1865 u32 page_mask_result, page_mask;
aff39e61
SPS
1866 size_t length;
1867
1868 /*
1869 * Not all commands require a data transfer. If no data, just return
1870 * without constructing any PRP.
1871 */
1872 if (!data_in_sz && !data_out_sz)
1873 return;
1874 /*
1875 * Set pointers to PRP1 and PRP2, which are in the NVMe command.
1876 * PRP1 is located at a 24 byte offset from the start of the NVMe
1877 * command. Then set the current PRP entry pointer to PRP1.
1878 */
494f401b 1879 prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
aff39e61 1880 NVME_CMD_PRP1_OFFSET);
494f401b 1881 prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
aff39e61
SPS
1882 NVME_CMD_PRP2_OFFSET);
1883 prp_entry = prp1_entry;
1884 /*
1885 * For the PRP entries, use the specially allocated buffer of
1886 * contiguous memory.
1887 */
494f401b 1888 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
d8335ae2 1889 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
aff39e61
SPS
1890
1891 /*
1892 * Check if we are within 1 entry of a page boundary we don't
1893 * want our first entry to be a PRP List entry.
1894 */
1895 page_mask = ioc->page_size - 1;
1896 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
1897 if (!page_mask_result) {
1898 /* Bump up to next page boundary. */
494f401b 1899 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
d8335ae2 1900 prp_page_dma = prp_page_dma + prp_size;
aff39e61
SPS
1901 }
1902
1903 /*
1904 * Set PRP physical pointer, which initially points to the current PRP
1905 * DMA memory page.
1906 */
d8335ae2 1907 prp_entry_dma = prp_page_dma;
aff39e61
SPS
1908
1909 /* Get physical address and length of the data buffer. */
1910 if (data_in_sz) {
d8335ae2 1911 dma_addr = data_in_dma;
aff39e61
SPS
1912 length = data_in_sz;
1913 } else {
d8335ae2 1914 dma_addr = data_out_dma;
aff39e61
SPS
1915 length = data_out_sz;
1916 }
1917
1918 /* Loop while the length is not zero. */
1919 while (length) {
1920 /*
1921 * Check if we need to put a list pointer here if we are at
1922 * page boundary - prp_size (8 bytes).
1923 */
d8335ae2 1924 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
aff39e61
SPS
1925 if (!page_mask_result) {
1926 /*
1927 * This is the last entry in a PRP List, so we need to
1928 * put a PRP list pointer here. What this does is:
1929 * - bump the current memory pointer to the next
1930 * address, which will be the next full page.
1931 * - set the PRP Entry to point to that page. This
1932 * is now the PRP List pointer.
1933 * - bump the PRP Entry pointer the start of the
1934 * next page. Since all of this PRP memory is
1935 * contiguous, no need to get a new page - it's
1936 * just the next address.
1937 */
d8335ae2
AB
1938 prp_entry_dma++;
1939 *prp_entry = cpu_to_le64(prp_entry_dma);
aff39e61
SPS
1940 prp_entry++;
1941 }
1942
1943 /* Need to handle if entry will be part of a page. */
d8335ae2 1944 offset = dma_addr & page_mask;
aff39e61
SPS
1945 entry_len = ioc->page_size - offset;
1946
1947 if (prp_entry == prp1_entry) {
1948 /*
1949 * Must fill in the first PRP pointer (PRP1) before
1950 * moving on.
1951 */
d8335ae2 1952 *prp1_entry = cpu_to_le64(dma_addr);
aff39e61
SPS
1953
1954 /*
1955 * Now point to the second PRP entry within the
1956 * command (PRP2).
1957 */
1958 prp_entry = prp2_entry;
1959 } else if (prp_entry == prp2_entry) {
1960 /*
1961 * Should the PRP2 entry be a PRP List pointer or just
1962 * a regular PRP pointer? If there is more than one
1963 * more page of data, must use a PRP List pointer.
1964 */
1965 if (length > ioc->page_size) {
1966 /*
1967 * PRP2 will contain a PRP List pointer because
1968 * more PRP's are needed with this command. The
1969 * list will start at the beginning of the
1970 * contiguous buffer.
1971 */
d8335ae2 1972 *prp2_entry = cpu_to_le64(prp_entry_dma);
aff39e61
SPS
1973
1974 /*
1975 * The next PRP Entry will be the start of the
1976 * first PRP List.
1977 */
1978 prp_entry = prp_page;
1979 } else {
1980 /*
1981 * After this, the PRP Entries are complete.
1982 * This command uses 2 PRP's and no PRP list.
1983 */
d8335ae2 1984 *prp2_entry = cpu_to_le64(dma_addr);
aff39e61
SPS
1985 }
1986 } else {
1987 /*
1988 * Put entry in list and bump the addresses.
1989 *
1990 * After PRP1 and PRP2 are filled in, this will fill in
1991 * all remaining PRP entries in a PRP List, one per
1992 * each time through the loop.
1993 */
d8335ae2 1994 *prp_entry = cpu_to_le64(dma_addr);
aff39e61 1995 prp_entry++;
d8335ae2 1996 prp_entry_dma++;
aff39e61
SPS
1997 }
1998
1999 /*
2000 * Bump the phys address of the command's data buffer by the
2001 * entry_len.
2002 */
d8335ae2 2003 dma_addr += entry_len;
aff39e61
SPS
2004
2005 /* Decrement length accounting for last partial page. */
2006 if (entry_len > length)
2007 length = 0;
2008 else
2009 length -= entry_len;
2010 }
2011}
2012
016d5c35
SPS
2013/**
2014 * base_make_prp_nvme -
2015 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
2016 *
2017 * @ioc: per adapter object
2018 * @scmd: SCSI command from the mid-layer
2019 * @mpi_request: mpi request
2020 * @smid: msg Index
2021 * @sge_count: scatter gather element count.
2022 *
2023 * Returns: true: PRPs are built
2024 * false: IEEE SGLs needs to be built
2025 */
494f401b 2026static void
016d5c35
SPS
2027base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2028 struct scsi_cmnd *scmd,
2029 Mpi25SCSIIORequest_t *mpi_request,
2030 u16 smid, int sge_count)
2031{
d8335ae2 2032 int sge_len, num_prp_in_chain = 0;
016d5c35 2033 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
494f401b 2034 __le64 *curr_buff;
d8335ae2 2035 dma_addr_t msg_dma, sge_addr, offset;
016d5c35
SPS
2036 u32 page_mask, page_mask_result;
2037 struct scatterlist *sg_scmd;
2038 u32 first_prp_len;
2039 int data_len = scsi_bufflen(scmd);
2040 u32 nvme_pg_size;
2041
2042 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2043 /*
2044 * Nvme has a very convoluted prp format. One prp is required
2045 * for each page or partial page. Driver need to split up OS sg_list
2046 * entries if it is longer than one page or cross a page
2047 * boundary. Driver also have to insert a PRP list pointer entry as
2048 * the last entry in each physical page of the PRP list.
2049 *
2050 * NOTE: The first PRP "entry" is actually placed in the first
2051 * SGL entry in the main message as IEEE 64 format. The 2nd
2052 * entry in the main message is the chain element, and the rest
2053 * of the PRP entries are built in the contiguous pcie buffer.
2054 */
2055 page_mask = nvme_pg_size - 1;
2056
2057 /*
2058 * Native SGL is needed.
2059 * Put a chain element in main message frame that points to the first
2060 * chain buffer.
2061 *
2062 * NOTE: The ChainOffset field must be 0 when using a chain pointer to
2063 * a native SGL.
2064 */
2065
2066 /* Set main message chain element pointer */
2067 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2068 /*
2069 * For NVMe the chain element needs to be the 2nd SG entry in the main
2070 * message.
2071 */
2072 main_chain_element = (Mpi25IeeeSgeChain64_t *)
2073 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2074
2075 /*
2076 * For the PRP entries, use the specially allocated buffer of
2077 * contiguous memory. Normal chain buffers can't be used
2078 * because each chain buffer would need to be the size of an OS
2079 * page (4k).
2080 */
2081 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
d8335ae2 2082 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
016d5c35 2083
d8335ae2 2084 main_chain_element->Address = cpu_to_le64(msg_dma);
016d5c35
SPS
2085 main_chain_element->NextChainOffset = 0;
2086 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2087 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2088 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2089
2090 /* Build first prp, sge need not to be page aligned*/
2091 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2092 sg_scmd = scsi_sglist(scmd);
2093 sge_addr = sg_dma_address(sg_scmd);
2094 sge_len = sg_dma_len(sg_scmd);
2095
d8335ae2 2096 offset = sge_addr & page_mask;
016d5c35
SPS
2097 first_prp_len = nvme_pg_size - offset;
2098
2099 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2100 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2101
2102 data_len -= first_prp_len;
2103
2104 if (sge_len > first_prp_len) {
2105 sge_addr += first_prp_len;
2106 sge_len -= first_prp_len;
2107 } else if (data_len && (sge_len == first_prp_len)) {
2108 sg_scmd = sg_next(sg_scmd);
2109 sge_addr = sg_dma_address(sg_scmd);
2110 sge_len = sg_dma_len(sg_scmd);
2111 }
2112
2113 for (;;) {
d8335ae2 2114 offset = sge_addr & page_mask;
016d5c35
SPS
2115
2116 /* Put PRP pointer due to page boundary*/
2117 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2118 if (unlikely(!page_mask_result)) {
2119 scmd_printk(KERN_NOTICE,
2120 scmd, "page boundary curr_buff: 0x%p\n",
2121 curr_buff);
d8335ae2
AB
2122 msg_dma += 8;
2123 *curr_buff = cpu_to_le64(msg_dma);
016d5c35
SPS
2124 curr_buff++;
2125 num_prp_in_chain++;
2126 }
2127
2128 *curr_buff = cpu_to_le64(sge_addr);
2129 curr_buff++;
d8335ae2 2130 msg_dma += 8;
016d5c35
SPS
2131 num_prp_in_chain++;
2132
2133 sge_addr += nvme_pg_size;
2134 sge_len -= nvme_pg_size;
2135 data_len -= nvme_pg_size;
2136
2137 if (data_len <= 0)
2138 break;
2139
2140 if (sge_len > 0)
2141 continue;
2142
2143 sg_scmd = sg_next(sg_scmd);
2144 sge_addr = sg_dma_address(sg_scmd);
2145 sge_len = sg_dma_len(sg_scmd);
2146 }
2147
2148 main_chain_element->Length =
2149 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2150 return;
2151}
2152
2153static bool
2154base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2155 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2156{
2157 u32 data_length = 0;
2158 struct scatterlist *sg_scmd;
2159 bool build_prp = true;
2160
494f401b 2161 data_length = scsi_bufflen(scmd);
016d5c35
SPS
2162 sg_scmd = scsi_sglist(scmd);
2163
2164 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2165 * we built IEEE SGL
2166 */
2167 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2168 build_prp = false;
2169
2170 return build_prp;
2171}
2172
2173/**
2174 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2175 * determine if the driver needs to build a native SGL. If so, that native
2176 * SGL is built in the special contiguous buffers allocated especially for
2177 * PCIe SGL creation. If the driver will not build a native SGL, return
2178 * TRUE and a normal IEEE SGL will be built. Currently this routine
2179 * supports NVMe.
2180 * @ioc: per adapter object
2181 * @mpi_request: mf request pointer
2182 * @smid: system request message index
2183 * @scmd: scsi command
2184 * @pcie_device: points to the PCIe device's info
2185 *
2186 * Returns 0 if native SGL was built, 1 if no SGL was built
2187 */
2188static int
2189_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2190 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2191 struct _pcie_device *pcie_device)
2192{
2193 struct scatterlist *sg_scmd;
2194 int sges_left;
2195
2196 /* Get the SG list pointer and info. */
2197 sg_scmd = scsi_sglist(scmd);
2198 sges_left = scsi_dma_map(scmd);
2199 if (sges_left < 0) {
2200 sdev_printk(KERN_ERR, scmd->device,
2201 "scsi_dma_map failed: request for %d bytes!\n",
2202 scsi_bufflen(scmd));
2203 return 1;
2204 }
2205
2206 /* Check if we need to build a native SG list. */
2207 if (base_is_prp_possible(ioc, pcie_device,
2208 scmd, sges_left) == 0) {
2209 /* We built a native SG list, just return. */
2210 goto out;
2211 }
2212
2213 /*
2214 * Build native NVMe PRP.
2215 */
2216 base_make_prp_nvme(ioc, scmd, mpi_request,
2217 smid, sges_left);
2218
2219 return 0;
2220out:
2221 scsi_dma_unmap(scmd);
2222 return 1;
2223}
f92363d1
SR
2224
2225/**
2226 * _base_add_sg_single_ieee - add sg element for IEEE format
2227 * @paddr: virtual address for SGE
2228 * @flags: SGE flags
2229 * @chain_offset: number of 128 byte elements from start of segment
2230 * @length: data transfer length
2231 * @dma_addr: Physical address
2232 *
2233 * Return nothing.
2234 */
2235static void
2236_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2237 dma_addr_t dma_addr)
2238{
2239 Mpi25IeeeSgeChain64_t *sgel = paddr;
2240
2241 sgel->Flags = flags;
2242 sgel->NextChainOffset = chain_offset;
2243 sgel->Length = cpu_to_le32(length);
2244 sgel->Address = cpu_to_le64(dma_addr);
2245}
2246
2247/**
2248 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2249 * @ioc: per adapter object
2250 * @paddr: virtual address for SGE
2251 *
2252 * Create a zero length scatter gather entry to insure the IOCs hardware has
2253 * something to use if the target device goes brain dead and tries
2254 * to send data even when none is asked for.
2255 *
2256 * Return nothing.
2257 */
2258static void
2259_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2260{
2261 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2262 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2263 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
b130b0d5 2264
f92363d1
SR
2265 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2266}
2267
471ef9d4
SR
2268/**
2269 * _base_build_sg_scmd - main sg creation routine
016d5c35 2270 * pcie_device is unused here!
471ef9d4
SR
2271 * @ioc: per adapter object
2272 * @scmd: scsi command
2273 * @smid: system request message index
016d5c35 2274 * @unused: unused pcie_device pointer
471ef9d4
SR
2275 * Context: none.
2276 *
2277 * The main routine that builds scatter gather table from a given
2278 * scsi request sent via the .queuecommand main handler.
2279 *
2280 * Returns 0 success, anything else error
2281 */
2282static int
2283_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
016d5c35 2284 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
471ef9d4
SR
2285{
2286 Mpi2SCSIIORequest_t *mpi_request;
2287 dma_addr_t chain_dma;
2288 struct scatterlist *sg_scmd;
2289 void *sg_local, *chain;
2290 u32 chain_offset;
2291 u32 chain_length;
2292 u32 chain_flags;
2293 int sges_left;
2294 u32 sges_in_segment;
2295 u32 sgl_flags;
2296 u32 sgl_flags_last_element;
2297 u32 sgl_flags_end_buffer;
2298 struct chain_tracker *chain_req;
2299
2300 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2301
2302 /* init scatter gather flags */
2303 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2304 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2305 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2306 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2307 << MPI2_SGE_FLAGS_SHIFT;
2308 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2309 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2310 << MPI2_SGE_FLAGS_SHIFT;
2311 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2312
2313 sg_scmd = scsi_sglist(scmd);
2314 sges_left = scsi_dma_map(scmd);
2315 if (sges_left < 0) {
2316 sdev_printk(KERN_ERR, scmd->device,
2317 "pci_map_sg failed: request for %d bytes!\n",
2318 scsi_bufflen(scmd));
2319 return -ENOMEM;
2320 }
2321
2322 sg_local = &mpi_request->SGL;
2323 sges_in_segment = ioc->max_sges_in_main_message;
2324 if (sges_left <= sges_in_segment)
2325 goto fill_in_last_segment;
2326
2327 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2328 (sges_in_segment * ioc->sge_size))/4;
2329
2330 /* fill in main message segment when there is a chain following */
2331 while (sges_in_segment) {
2332 if (sges_in_segment == 1)
2333 ioc->base_add_sg_single(sg_local,
2334 sgl_flags_last_element | sg_dma_len(sg_scmd),
2335 sg_dma_address(sg_scmd));
2336 else
2337 ioc->base_add_sg_single(sg_local, sgl_flags |
2338 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2339 sg_scmd = sg_next(sg_scmd);
2340 sg_local += ioc->sge_size;
2341 sges_left--;
2342 sges_in_segment--;
2343 }
2344
2345 /* initializing the chain flags and pointers */
2346 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
dbec4c90 2347 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
471ef9d4
SR
2348 if (!chain_req)
2349 return -1;
2350 chain = chain_req->chain_buffer;
2351 chain_dma = chain_req->chain_buffer_dma;
2352 do {
2353 sges_in_segment = (sges_left <=
2354 ioc->max_sges_in_chain_message) ? sges_left :
2355 ioc->max_sges_in_chain_message;
2356 chain_offset = (sges_left == sges_in_segment) ?
2357 0 : (sges_in_segment * ioc->sge_size)/4;
2358 chain_length = sges_in_segment * ioc->sge_size;
2359 if (chain_offset) {
2360 chain_offset = chain_offset <<
2361 MPI2_SGE_CHAIN_OFFSET_SHIFT;
2362 chain_length += ioc->sge_size;
2363 }
2364 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2365 chain_length, chain_dma);
2366 sg_local = chain;
2367 if (!chain_offset)
2368 goto fill_in_last_segment;
2369
2370 /* fill in chain segments */
2371 while (sges_in_segment) {
2372 if (sges_in_segment == 1)
2373 ioc->base_add_sg_single(sg_local,
2374 sgl_flags_last_element |
2375 sg_dma_len(sg_scmd),
2376 sg_dma_address(sg_scmd));
2377 else
2378 ioc->base_add_sg_single(sg_local, sgl_flags |
2379 sg_dma_len(sg_scmd),
2380 sg_dma_address(sg_scmd));
2381 sg_scmd = sg_next(sg_scmd);
2382 sg_local += ioc->sge_size;
2383 sges_left--;
2384 sges_in_segment--;
2385 }
2386
dbec4c90 2387 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
471ef9d4
SR
2388 if (!chain_req)
2389 return -1;
2390 chain = chain_req->chain_buffer;
2391 chain_dma = chain_req->chain_buffer_dma;
2392 } while (1);
2393
2394
2395 fill_in_last_segment:
2396
2397 /* fill the last segment */
2398 while (sges_left) {
2399 if (sges_left == 1)
2400 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2401 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2402 else
2403 ioc->base_add_sg_single(sg_local, sgl_flags |
2404 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2405 sg_scmd = sg_next(sg_scmd);
2406 sg_local += ioc->sge_size;
2407 sges_left--;
2408 }
2409
2410 return 0;
2411}
2412
f92363d1
SR
2413/**
2414 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2415 * @ioc: per adapter object
2416 * @scmd: scsi command
2417 * @smid: system request message index
016d5c35
SPS
2418 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2419 * constructed on need.
f92363d1
SR
2420 * Context: none.
2421 *
2422 * The main routine that builds scatter gather table from a given
2423 * scsi request sent via the .queuecommand main handler.
2424 *
2425 * Returns 0 success, anything else error
2426 */
2427static int
2428_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
016d5c35 2429 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
f92363d1 2430{
016d5c35 2431 Mpi25SCSIIORequest_t *mpi_request;
f92363d1
SR
2432 dma_addr_t chain_dma;
2433 struct scatterlist *sg_scmd;
2434 void *sg_local, *chain;
2435 u32 chain_offset;
2436 u32 chain_length;
f92363d1
SR
2437 int sges_left;
2438 u32 sges_in_segment;
2439 u8 simple_sgl_flags;
2440 u8 simple_sgl_flags_last;
2441 u8 chain_sgl_flags;
2442 struct chain_tracker *chain_req;
2443
2444 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2445
2446 /* init scatter gather flags */
2447 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2448 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2449 simple_sgl_flags_last = simple_sgl_flags |
2450 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2451 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2452 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2453
016d5c35
SPS
2454 /* Check if we need to build a native SG list. */
2455 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2456 smid, scmd, pcie_device) == 0)) {
2457 /* We built a native SG list, just return. */
2458 return 0;
2459 }
2460
f92363d1
SR
2461 sg_scmd = scsi_sglist(scmd);
2462 sges_left = scsi_dma_map(scmd);
62f5c74c 2463 if (sges_left < 0) {
f92363d1
SR
2464 sdev_printk(KERN_ERR, scmd->device,
2465 "pci_map_sg failed: request for %d bytes!\n",
2466 scsi_bufflen(scmd));
2467 return -ENOMEM;
2468 }
2469
2470 sg_local = &mpi_request->SGL;
2471 sges_in_segment = (ioc->request_sz -
016d5c35 2472 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
f92363d1
SR
2473 if (sges_left <= sges_in_segment)
2474 goto fill_in_last_segment;
2475
2476 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
016d5c35 2477 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
f92363d1
SR
2478
2479 /* fill in main message segment when there is a chain following */
2480 while (sges_in_segment > 1) {
2481 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2482 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2483 sg_scmd = sg_next(sg_scmd);
2484 sg_local += ioc->sge_size_ieee;
2485 sges_left--;
2486 sges_in_segment--;
2487 }
2488
25ef16d0 2489 /* initializing the pointers */
dbec4c90 2490 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
f92363d1
SR
2491 if (!chain_req)
2492 return -1;
2493 chain = chain_req->chain_buffer;
2494 chain_dma = chain_req->chain_buffer_dma;
2495 do {
2496 sges_in_segment = (sges_left <=
2497 ioc->max_sges_in_chain_message) ? sges_left :
2498 ioc->max_sges_in_chain_message;
2499 chain_offset = (sges_left == sges_in_segment) ?
2500 0 : sges_in_segment;
2501 chain_length = sges_in_segment * ioc->sge_size_ieee;
2502 if (chain_offset)
2503 chain_length += ioc->sge_size_ieee;
2504 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2505 chain_offset, chain_length, chain_dma);
2506
2507 sg_local = chain;
2508 if (!chain_offset)
2509 goto fill_in_last_segment;
2510
2511 /* fill in chain segments */
2512 while (sges_in_segment) {
2513 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2514 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2515 sg_scmd = sg_next(sg_scmd);
2516 sg_local += ioc->sge_size_ieee;
2517 sges_left--;
2518 sges_in_segment--;
2519 }
2520
dbec4c90 2521 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
f92363d1
SR
2522 if (!chain_req)
2523 return -1;
2524 chain = chain_req->chain_buffer;
2525 chain_dma = chain_req->chain_buffer_dma;
2526 } while (1);
2527
2528
2529 fill_in_last_segment:
2530
2531 /* fill the last segment */
62f5c74c 2532 while (sges_left > 0) {
f92363d1
SR
2533 if (sges_left == 1)
2534 _base_add_sg_single_ieee(sg_local,
2535 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2536 sg_dma_address(sg_scmd));
2537 else
2538 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2539 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2540 sg_scmd = sg_next(sg_scmd);
2541 sg_local += ioc->sge_size_ieee;
2542 sges_left--;
2543 }
2544
2545 return 0;
2546}
2547
2548/**
2549 * _base_build_sg_ieee - build generic sg for IEEE format
2550 * @ioc: per adapter object
2551 * @psge: virtual address for SGE
2552 * @data_out_dma: physical address for WRITES
2553 * @data_out_sz: data xfer size for WRITES
2554 * @data_in_dma: physical address for READS
2555 * @data_in_sz: data xfer size for READS
2556 *
2557 * Return nothing.
2558 */
2559static void
2560_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2561 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2562 size_t data_in_sz)
2563{
2564 u8 sgl_flags;
2565
2566 if (!data_out_sz && !data_in_sz) {
2567 _base_build_zero_len_sge_ieee(ioc, psge);
2568 return;
2569 }
2570
2571 if (data_out_sz && data_in_sz) {
2572 /* WRITE sgel first */
2573 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2574 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2575 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2576 data_out_dma);
2577
2578 /* incr sgel */
2579 psge += ioc->sge_size_ieee;
2580
2581 /* READ sgel last */
2582 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2583 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2584 data_in_dma);
2585 } else if (data_out_sz) /* WRITE */ {
2586 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2587 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2588 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2589 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2590 data_out_dma);
2591 } else if (data_in_sz) /* READ */ {
2592 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2593 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2594 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2595 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2596 data_in_dma);
2597 }
2598}
2599
2600#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2601
2602/**
2603 * _base_config_dma_addressing - set dma addressing
2604 * @ioc: per adapter object
2605 * @pdev: PCI device struct
2606 *
2607 * Returns 0 for success, non-zero for failure.
2608 */
2609static int
2610_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2611{
2612 struct sysinfo s;
9b05c91a
SR
2613 u64 consistent_dma_mask;
2614
0448f019
SPS
2615 if (ioc->is_mcpu_endpoint)
2616 goto try_32bit;
2617
9b05c91a
SR
2618 if (ioc->dma_mask)
2619 consistent_dma_mask = DMA_BIT_MASK(64);
2620 else
2621 consistent_dma_mask = DMA_BIT_MASK(32);
f92363d1
SR
2622
2623 if (sizeof(dma_addr_t) > 4) {
2624 const uint64_t required_mask =
2625 dma_get_required_mask(&pdev->dev);
2626 if ((required_mask > DMA_BIT_MASK(32)) &&
2627 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
9b05c91a 2628 !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
f92363d1
SR
2629 ioc->base_add_sg_single = &_base_add_sg_single_64;
2630 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
9b05c91a 2631 ioc->dma_mask = 64;
f92363d1
SR
2632 goto out;
2633 }
2634 }
2635
0448f019 2636 try_32bit:
f92363d1
SR
2637 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2638 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2639 ioc->base_add_sg_single = &_base_add_sg_single_32;
2640 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
9b05c91a 2641 ioc->dma_mask = 32;
f92363d1
SR
2642 } else
2643 return -ENODEV;
2644
2645 out:
2646 si_meminfo(&s);
2647 pr_info(MPT3SAS_FMT
9b05c91a
SR
2648 "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2649 ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
2650
2651 return 0;
2652}
f92363d1 2653
9b05c91a
SR
2654static int
2655_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
2656 struct pci_dev *pdev)
2657{
2658 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2659 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2660 return -ENODEV;
2661 }
f92363d1
SR
2662 return 0;
2663}
2664
2665/**
2666 * _base_check_enable_msix - checks MSIX capabable.
2667 * @ioc: per adapter object
2668 *
2669 * Check to see if card is capable of MSIX, and set number
2670 * of available msix vectors
2671 */
2672static int
2673_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2674{
2675 int base;
2676 u16 message_control;
2677
42081173
SR
2678 /* Check whether controller SAS2008 B0 controller,
2679 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
2680 */
2681 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2682 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2683 return -EINVAL;
2684 }
2685
f92363d1
SR
2686 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2687 if (!base) {
2688 dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
2689 ioc->name));
2690 return -EINVAL;
2691 }
2692
2693 /* get msix vector count */
42081173
SR
2694 /* NUMA_IO not supported for older controllers */
2695 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2696 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2697 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2698 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2699 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2700 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2701 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2702 ioc->msix_vector_count = 1;
2703 else {
2704 pci_read_config_word(ioc->pdev, base + 2, &message_control);
2705 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2706 }
f92363d1
SR
2707 dinitprintk(ioc, pr_info(MPT3SAS_FMT
2708 "msix is supported, vector_count(%d)\n",
2709 ioc->name, ioc->msix_vector_count));
2710 return 0;
2711}
2712
2713/**
2714 * _base_free_irq - free irq
2715 * @ioc: per adapter object
2716 *
2717 * Freeing respective reply_queue from the list.
2718 */
2719static void
2720_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2721{
2722 struct adapter_reply_queue *reply_q, *next;
2723
2724 if (list_empty(&ioc->reply_queue_list))
2725 return;
2726
2727 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
2728 list_del(&reply_q->list);
1d55abc0
HR
2729 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
2730 reply_q);
f92363d1
SR
2731 kfree(reply_q);
2732 }
2733}
2734
2735/**
2736 * _base_request_irq - request irq
2737 * @ioc: per adapter object
2738 * @index: msix index into vector table
f92363d1
SR
2739 *
2740 * Inserting respective reply_queue into the list.
2741 */
2742static int
1d55abc0 2743_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
f92363d1 2744{
1d55abc0 2745 struct pci_dev *pdev = ioc->pdev;
f92363d1
SR
2746 struct adapter_reply_queue *reply_q;
2747 int r;
2748
2749 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
2750 if (!reply_q) {
2751 pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
2752 ioc->name, (int)sizeof(struct adapter_reply_queue));
2753 return -ENOMEM;
2754 }
2755 reply_q->ioc = ioc;
2756 reply_q->msix_index = index;
14b3114d 2757
f92363d1
SR
2758 atomic_set(&reply_q->busy, 0);
2759 if (ioc->msix_enable)
2760 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
c84b06a4 2761 ioc->driver_name, ioc->id, index);
f92363d1
SR
2762 else
2763 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
c84b06a4 2764 ioc->driver_name, ioc->id);
1d55abc0
HR
2765 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
2766 IRQF_SHARED, reply_q->name, reply_q);
f92363d1
SR
2767 if (r) {
2768 pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
1d55abc0 2769 reply_q->name, pci_irq_vector(pdev, index));
da3cec25 2770 kfree(reply_q);
f92363d1
SR
2771 return -EBUSY;
2772 }
2773
2774 INIT_LIST_HEAD(&reply_q->list);
2775 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
2776 return 0;
2777}
2778
2779/**
2780 * _base_assign_reply_queues - assigning msix index for each cpu
2781 * @ioc: per adapter object
2782 *
2783 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
2784 *
2785 * It would nice if we could call irq_set_affinity, however it is not
2786 * an exported symbol
2787 */
2788static void
2789_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2790{
91b265bf 2791 unsigned int cpu, nr_cpus, nr_msix, index = 0;
14b3114d 2792 struct adapter_reply_queue *reply_q;
f92363d1
SR
2793
2794 if (!_base_is_controller_msix_enabled(ioc))
2795 return;
2796
2797 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
2798
91b265bf
MP
2799 nr_cpus = num_online_cpus();
2800 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
2801 ioc->facts.MaxMSIxVectors);
2802 if (!nr_msix)
2803 return;
f92363d1 2804
1d55abc0
HR
2805 if (smp_affinity_enable) {
2806 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2807 const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
2808 reply_q->msix_index);
2809 if (!mask) {
2810 pr_warn(MPT3SAS_FMT "no affinity for msi %x\n",
2811 ioc->name, reply_q->msix_index);
2812 continue;
2813 }
2814
4a8842de
TH
2815 for_each_cpu_and(cpu, mask, cpu_online_mask) {
2816 if (cpu >= ioc->cpu_msix_table_sz)
2817 break;
1d55abc0 2818 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
4a8842de 2819 }
1d55abc0
HR
2820 }
2821 return;
2822 }
91b265bf
MP
2823 cpu = cpumask_first(cpu_online_mask);
2824
14b3114d
SR
2825 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2826
91b265bf
MP
2827 unsigned int i, group = nr_cpus / nr_msix;
2828
14b3114d
SR
2829 if (cpu >= nr_cpus)
2830 break;
2831
91b265bf
MP
2832 if (index < nr_cpus % nr_msix)
2833 group++;
2834
2835 for (i = 0 ; i < group ; i++) {
1d55abc0 2836 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
91b265bf 2837 cpu = cpumask_next(cpu, cpu_online_mask);
f92363d1 2838 }
91b265bf 2839 index++;
14b3114d 2840 }
f92363d1
SR
2841}
2842
2843/**
2844 * _base_disable_msix - disables msix
2845 * @ioc: per adapter object
2846 *
2847 */
2848static void
2849_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
2850{
2851 if (!ioc->msix_enable)
2852 return;
2853 pci_disable_msix(ioc->pdev);
2854 ioc->msix_enable = 0;
2855}
2856
2857/**
2858 * _base_enable_msix - enables msix, failback to io_apic
2859 * @ioc: per adapter object
2860 *
2861 */
2862static int
2863_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2864{
f92363d1 2865 int r;
bb350661 2866 int i, local_max_msix_vectors;
f92363d1 2867 u8 try_msix = 0;
1d55abc0 2868 unsigned int irq_flags = PCI_IRQ_MSIX;
f92363d1 2869
f92363d1
SR
2870 if (msix_disable == -1 || msix_disable == 0)
2871 try_msix = 1;
2872
2873 if (!try_msix)
2874 goto try_ioapic;
2875
2876 if (_base_check_enable_msix(ioc) != 0)
2877 goto try_ioapic;
2878
2879 ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1d55abc0 2880 ioc->msix_vector_count);
f92363d1 2881
9c500060
SR
2882 printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
2883 ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
2884 ioc->cpu_count, max_msix_vectors);
2885
9b05c91a 2886 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
06f5f976 2887 local_max_msix_vectors = (reset_devices) ? 1 : 8;
bb350661
SPS
2888 else
2889 local_max_msix_vectors = max_msix_vectors;
9b05c91a 2890
1d55abc0 2891 if (local_max_msix_vectors > 0)
bb350661 2892 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
9c500060 2893 ioc->reply_queue_count);
1d55abc0 2894 else if (local_max_msix_vectors == 0)
9b05c91a 2895 goto try_ioapic;
9c500060 2896
64038301
SPS
2897 if (ioc->msix_vector_count < ioc->cpu_count)
2898 smp_affinity_enable = 0;
2899
1d55abc0
HR
2900 if (smp_affinity_enable)
2901 irq_flags |= PCI_IRQ_AFFINITY;
f92363d1 2902
1d55abc0
HR
2903 r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
2904 irq_flags);
2905 if (r < 0) {
f92363d1 2906 dfailprintk(ioc, pr_info(MPT3SAS_FMT
1d55abc0 2907 "pci_alloc_irq_vectors failed (r=%d) !!!\n",
f92363d1 2908 ioc->name, r));
f92363d1
SR
2909 goto try_ioapic;
2910 }
2911
2912 ioc->msix_enable = 1;
1d55abc0
HR
2913 ioc->reply_queue_count = r;
2914 for (i = 0; i < ioc->reply_queue_count; i++) {
2915 r = _base_request_irq(ioc, i);
f92363d1
SR
2916 if (r) {
2917 _base_free_irq(ioc);
2918 _base_disable_msix(ioc);
f92363d1
SR
2919 goto try_ioapic;
2920 }
2921 }
2922
f92363d1
SR
2923 return 0;
2924
2925/* failback to io_apic interrupt routing */
2926 try_ioapic:
2927
9b05c91a 2928 ioc->reply_queue_count = 1;
1d55abc0
HR
2929 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
2930 if (r < 0) {
2931 dfailprintk(ioc, pr_info(MPT3SAS_FMT
2932 "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
2933 ioc->name, r));
2934 } else
2935 r = _base_request_irq(ioc, 0);
f92363d1
SR
2936
2937 return r;
2938}
2939
580d4e31
SR
2940/**
2941 * mpt3sas_base_unmap_resources - free controller resources
2942 * @ioc: per adapter object
2943 */
8bbb1cf6 2944static void
580d4e31
SR
2945mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2946{
2947 struct pci_dev *pdev = ioc->pdev;
2948
2949 dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
2950 ioc->name, __func__));
2951
2952 _base_free_irq(ioc);
2953 _base_disable_msix(ioc);
2954
0bb337c9 2955 if (ioc->combined_reply_queue) {
580d4e31 2956 kfree(ioc->replyPostRegisterIndex);
5f985d88
TH
2957 ioc->replyPostRegisterIndex = NULL;
2958 }
580d4e31
SR
2959
2960 if (ioc->chip_phys) {
2961 iounmap(ioc->chip);
2962 ioc->chip_phys = 0;
2963 }
2964
2965 if (pci_is_enabled(pdev)) {
2966 pci_release_selected_regions(ioc->pdev, ioc->bars);
2967 pci_disable_pcie_error_reporting(pdev);
2968 pci_disable_device(pdev);
2969 }
2970}
2971
f92363d1
SR
2972/**
2973 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
2974 * @ioc: per adapter object
2975 *
2976 * Returns 0 for success, non-zero for failure.
2977 */
2978int
2979mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2980{
2981 struct pci_dev *pdev = ioc->pdev;
2982 u32 memap_sz;
2983 u32 pio_sz;
2984 int i, r = 0;
2985 u64 pio_chip = 0;
6f9e09fd 2986 phys_addr_t chip_phys = 0;
f92363d1
SR
2987 struct adapter_reply_queue *reply_q;
2988
2989 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
2990 ioc->name, __func__));
2991
2992 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
2993 if (pci_enable_device_mem(pdev)) {
2994 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
2995 ioc->name);
cf9bd21a 2996 ioc->bars = 0;
f92363d1
SR
2997 return -ENODEV;
2998 }
2999
3000
3001 if (pci_request_selected_regions(pdev, ioc->bars,
c84b06a4 3002 ioc->driver_name)) {
f92363d1
SR
3003 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
3004 ioc->name);
cf9bd21a 3005 ioc->bars = 0;
f92363d1
SR
3006 r = -ENODEV;
3007 goto out_fail;
3008 }
3009
3010/* AER (Advanced Error Reporting) hooks */
3011 pci_enable_pcie_error_reporting(pdev);
3012
3013 pci_set_master(pdev);
3014
3015
3016 if (_base_config_dma_addressing(ioc, pdev) != 0) {
3017 pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
3018 ioc->name, pci_name(pdev));
3019 r = -ENODEV;
3020 goto out_fail;
3021 }
3022
5aeeb78a
SR
3023 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3024 (!memap_sz || !pio_sz); i++) {
f92363d1
SR
3025 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3026 if (pio_sz)
3027 continue;
3028 pio_chip = (u64)pci_resource_start(pdev, i);
3029 pio_sz = pci_resource_len(pdev, i);
3030 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3031 if (memap_sz)
3032 continue;
3033 ioc->chip_phys = pci_resource_start(pdev, i);
6f9e09fd 3034 chip_phys = ioc->chip_phys;
f92363d1
SR
3035 memap_sz = pci_resource_len(pdev, i);
3036 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
f92363d1
SR
3037 }
3038 }
3039
5aeeb78a
SR
3040 if (ioc->chip == NULL) {
3041 pr_err(MPT3SAS_FMT "unable to map adapter memory! "
3042 " or resource not found\n", ioc->name);
3043 r = -EINVAL;
3044 goto out_fail;
3045 }
3046
f92363d1 3047 _base_mask_interrupts(ioc);
9b05c91a 3048
98c56ad3 3049 r = _base_get_ioc_facts(ioc);
9b05c91a
SR
3050 if (r)
3051 goto out_fail;
3052
3053 if (!ioc->rdpq_array_enable_assigned) {
3054 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3055 ioc->rdpq_array_enable_assigned = 1;
3056 }
3057
f92363d1
SR
3058 r = _base_enable_msix(ioc);
3059 if (r)
3060 goto out_fail;
3061
fb77bb53
SR
3062 /* Use the Combined reply queue feature only for SAS3 C0 & higher
3063 * revision HBAs and also only when reply queue count is greater than 8
3064 */
0bb337c9 3065 if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
fb77bb53
SR
3066 /* Determine the Supplemental Reply Post Host Index Registers
3067 * Addresse. Supplemental Reply Post Host Index Registers
3068 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3069 * each register is at offset bytes of
3070 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3071 */
3072 ioc->replyPostRegisterIndex = kcalloc(
0bb337c9 3073 ioc->combined_reply_index_count,
fb77bb53
SR
3074 sizeof(resource_size_t *), GFP_KERNEL);
3075 if (!ioc->replyPostRegisterIndex) {
3076 dfailprintk(ioc, printk(MPT3SAS_FMT
3077 "allocation for reply Post Register Index failed!!!\n",
3078 ioc->name));
3079 r = -ENOMEM;
3080 goto out_fail;
3081 }
3082
0bb337c9 3083 for (i = 0; i < ioc->combined_reply_index_count; i++) {
fb77bb53 3084 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
cf6bf971 3085 ((u8 __force *)&ioc->chip->Doorbell +
fb77bb53
SR
3086 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3087 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3088 }
3089 } else
0bb337c9 3090 ioc->combined_reply_queue = 0;
fb77bb53 3091
ce7c6c9e
GE
3092 if (ioc->is_warpdrive) {
3093 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3094 &ioc->chip->ReplyPostHostIndex;
3095
3096 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3097 ioc->reply_post_host_index[i] =
3098 (resource_size_t __iomem *)
3099 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3100 * 4)));
3101 }
3102
f92363d1
SR
3103 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3104 pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
3105 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1d55abc0
HR
3106 "IO-APIC enabled"),
3107 pci_irq_vector(ioc->pdev, reply_q->msix_index));
f92363d1 3108
6f9e09fd
AB
3109 pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n",
3110 ioc->name, &chip_phys, ioc->chip, memap_sz);
f92363d1
SR
3111 pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
3112 ioc->name, (unsigned long long)pio_chip, pio_sz);
3113
3114 /* Save PCI configuration state for recovery from PCI AER/EEH errors */
3115 pci_save_state(pdev);
3116 return 0;
3117
3118 out_fail:
580d4e31 3119 mpt3sas_base_unmap_resources(ioc);
f92363d1
SR
3120 return r;
3121}
3122
3123/**
3124 * mpt3sas_base_get_msg_frame - obtain request mf pointer
3125 * @ioc: per adapter object
3126 * @smid: system request message index(smid zero is invalid)
3127 *
3128 * Returns virt pointer to message frame.
3129 */
3130void *
3131mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3132{
3133 return (void *)(ioc->request + (smid * ioc->request_sz));
3134}
3135
3136/**
3137 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3138 * @ioc: per adapter object
3139 * @smid: system request message index
3140 *
3141 * Returns virt pointer to sense buffer.
3142 */
3143void *
3144mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3145{
3146 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3147}
3148
3149/**
3150 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3151 * @ioc: per adapter object
3152 * @smid: system request message index
3153 *
3154 * Returns phys pointer to the low 32bit address of the sense buffer.
3155 */
3156__le32
3157mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3158{
3159 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3160 SCSI_SENSE_BUFFERSIZE));
3161}
3162
016d5c35
SPS
3163/**
3164 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3165 * @ioc: per adapter object
3166 * @smid: system request message index
3167 *
3168 * Returns virt pointer to a PCIe SGL.
3169 */
3170void *
3171mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3172{
dbec4c90 3173 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
016d5c35
SPS
3174}
3175
3176/**
3177 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3178 * @ioc: per adapter object
3179 * @smid: system request message index
3180 *
3181 * Returns phys pointer to the address of the PCIe buffer.
3182 */
d8335ae2 3183dma_addr_t
016d5c35
SPS
3184mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3185{
dbec4c90 3186 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
016d5c35
SPS
3187}
3188
f92363d1
SR
3189/**
3190 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3191 * @ioc: per adapter object
3192 * @phys_addr: lower 32 physical addr of the reply
3193 *
3194 * Converts 32bit lower physical addr into a virt address.
3195 */
3196void *
3197mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3198{
3199 if (!phys_addr)
3200 return NULL;
3201 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3202}
3203
03d1fb3a
SS
3204static inline u8
3205_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
3206{
3207 return ioc->cpu_msix_table[raw_smp_processor_id()];
3208}
3209
f92363d1
SR
3210/**
3211 * mpt3sas_base_get_smid - obtain a free smid from internal queue
3212 * @ioc: per adapter object
3213 * @cb_idx: callback index
3214 *
3215 * Returns smid (zero is invalid)
3216 */
3217u16
3218mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3219{
3220 unsigned long flags;
3221 struct request_tracker *request;
3222 u16 smid;
3223
3224 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3225 if (list_empty(&ioc->internal_free_list)) {
3226 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3227 pr_err(MPT3SAS_FMT "%s: smid not available\n",
3228 ioc->name, __func__);
3229 return 0;
3230 }
3231
3232 request = list_entry(ioc->internal_free_list.next,
3233 struct request_tracker, tracker_list);
3234 request->cb_idx = cb_idx;
3235 smid = request->smid;
3236 list_del(&request->tracker_list);
3237 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3238 return smid;
3239}
3240
3241/**
3242 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3243 * @ioc: per adapter object
3244 * @cb_idx: callback index
3245 * @scmd: pointer to scsi command object
3246 *
3247 * Returns smid (zero is invalid)
3248 */
3249u16
3250mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3251 struct scsi_cmnd *scmd)
3252{
dbec4c90
SPS
3253 struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3254 unsigned int tag = scmd->request->tag;
f92363d1
SR
3255 u16 smid;
3256
dbec4c90 3257 smid = tag + 1;
f92363d1 3258 request->cb_idx = cb_idx;
03d1fb3a 3259 request->msix_io = _base_get_msix_index(ioc);
dbec4c90
SPS
3260 request->smid = smid;
3261 INIT_LIST_HEAD(&request->chain_list);
f92363d1
SR
3262 return smid;
3263}
3264
3265/**
3266 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3267 * @ioc: per adapter object
3268 * @cb_idx: callback index
3269 *
3270 * Returns smid (zero is invalid)
3271 */
3272u16
3273mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3274{
3275 unsigned long flags;
3276 struct request_tracker *request;
3277 u16 smid;
3278
3279 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3280 if (list_empty(&ioc->hpr_free_list)) {
3281 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3282 return 0;
3283 }
3284
3285 request = list_entry(ioc->hpr_free_list.next,
3286 struct request_tracker, tracker_list);
3287 request->cb_idx = cb_idx;
3288 smid = request->smid;
3289 list_del(&request->tracker_list);
3290 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3291 return smid;
3292}
3293
6a2d4618
HR
3294static void
3295_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3296{
3297 /*
3298 * See _wait_for_commands_to_complete() call with regards to this code.
3299 */
3300 if (ioc->shost_recovery && ioc->pending_io_count) {
272e253c
HR
3301 ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
3302 if (ioc->pending_io_count == 0)
6a2d4618 3303 wake_up(&ioc->reset_wq);
6a2d4618
HR
3304 }
3305}
3306
dbec4c90
SPS
3307void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3308 struct scsiio_tracker *st)
3309{
3310 if (WARN_ON(st->smid == 0))
3311 return;
3312 st->cb_idx = 0xFF;
3313 st->direct_io = 0;
93204b78 3314 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
dbec4c90
SPS
3315}
3316
f92363d1
SR
3317/**
3318 * mpt3sas_base_free_smid - put smid back on free_list
3319 * @ioc: per adapter object
3320 * @smid: system request message index
3321 *
3322 * Return nothing.
3323 */
3324void
3325mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3326{
3327 unsigned long flags;
3328 int i;
f92363d1 3329
f92363d1 3330 if (smid < ioc->hi_priority_smid) {
dbec4c90 3331 struct scsiio_tracker *st;
f92363d1 3332
dbec4c90
SPS
3333 st = _get_st_from_smid(ioc, smid);
3334 if (!st) {
3335 _base_recovery_check(ioc);
3336 return;
3337 }
3338 mpt3sas_base_clear_st(ioc, st);
6a2d4618 3339 _base_recovery_check(ioc);
f92363d1 3340 return;
dbec4c90
SPS
3341 }
3342
3343 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3344 if (smid < ioc->internal_smid) {
f92363d1
SR
3345 /* hi-priority */
3346 i = smid - ioc->hi_priority_smid;
3347 ioc->hpr_lookup[i].cb_idx = 0xFF;
3348 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3349 } else if (smid <= ioc->hba_queue_depth) {
3350 /* internal queue */
3351 i = smid - ioc->internal_smid;
3352 ioc->internal_lookup[i].cb_idx = 0xFF;
3353 list_add(&ioc->internal_lookup[i].tracker_list,
3354 &ioc->internal_free_list);
3355 }
3356 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3357}
3358
e5747439
SPS
3359/**
3360 * _base_mpi_ep_writeq - 32 bit write to MMIO
3361 * @b: data payload
3362 * @addr: address in MMIO space
3363 * @writeq_lock: spin lock
3364 *
3365 * This special handling for MPI EP to take care of 32 bit
3366 * environment where its not quarenteed to send the entire word
3367 * in one transfer.
3368 */
3369static inline void
3370_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3371 spinlock_t *writeq_lock)
3372{
3373 unsigned long flags;
cf6bf971 3374 __u64 data_out = b;
e5747439
SPS
3375
3376 spin_lock_irqsave(writeq_lock, flags);
3377 writel((u32)(data_out), addr);
3378 writel((u32)(data_out >> 32), (addr + 4));
10ee1f22 3379 mmiowb();
e5747439
SPS
3380 spin_unlock_irqrestore(writeq_lock, flags);
3381}
3382
f92363d1
SR
3383/**
3384 * _base_writeq - 64 bit write to MMIO
3385 * @ioc: per adapter object
3386 * @b: data payload
3387 * @addr: address in MMIO space
3388 * @writeq_lock: spin lock
3389 *
3390 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
3391 * care of 32 bit environment where its not quarenteed to send the entire word
3392 * in one transfer.
3393 */
3394#if defined(writeq) && defined(CONFIG_64BIT)
3395static inline void
3396_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3397{
cf6bf971 3398 writeq(b, addr);
f92363d1
SR
3399}
3400#else
3401static inline void
3402_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3403{
e5747439 3404 _base_mpi_ep_writeq(b, addr, writeq_lock);
f92363d1
SR
3405}
3406#endif
3407
e5747439
SPS
3408/**
3409 * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
3410 * @ioc: per adapter object
3411 * @smid: system request message index
3412 * @handle: device handle
3413 *
3414 * Return nothing.
3415 */
3416static void
3417_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3418{
3419 Mpi2RequestDescriptorUnion_t descriptor;
3420 u64 *request = (u64 *)&descriptor;
3421 void *mpi_req_iomem;
3422 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3423
3424 _clone_sg_entries(ioc, (void *) mfp, smid);
cf6bf971 3425 mpi_req_iomem = (void __force *)ioc->chip +
e5747439
SPS
3426 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3427 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3428 ioc->request_sz);
3429 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3430 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
3431 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3432 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3433 descriptor.SCSIIO.LMID = 0;
3434 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3435 &ioc->scsi_lookup_lock);
3436}
3437
f92363d1 3438/**
81c16f83 3439 * _base_put_smid_scsi_io - send SCSI_IO request to firmware
f92363d1
SR
3440 * @ioc: per adapter object
3441 * @smid: system request message index
3442 * @handle: device handle
3443 *
3444 * Return nothing.
3445 */
81c16f83
SPS
3446static void
3447_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
f92363d1
SR
3448{
3449 Mpi2RequestDescriptorUnion_t descriptor;
3450 u64 *request = (u64 *)&descriptor;
3451
3452
3453 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3454 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
3455 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3456 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3457 descriptor.SCSIIO.LMID = 0;
3458 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3459 &ioc->scsi_lookup_lock);
3460}
3461
3462/**
40114bde 3463 * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
f92363d1
SR
3464 * @ioc: per adapter object
3465 * @smid: system request message index
3466 * @handle: device handle
3467 *
3468 * Return nothing.
3469 */
40114bde
SP
3470void
3471mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
f92363d1
SR
3472 u16 handle)
3473{
3474 Mpi2RequestDescriptorUnion_t descriptor;
3475 u64 *request = (u64 *)&descriptor;
3476
3477 descriptor.SCSIIO.RequestFlags =
3478 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3479 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
3480 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3481 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3482 descriptor.SCSIIO.LMID = 0;
3483 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3484 &ioc->scsi_lookup_lock);
3485}
3486
3487/**
40114bde 3488 * mpt3sas_base_put_smid_hi_priority - send Task Management request to firmware
f92363d1
SR
3489 * @ioc: per adapter object
3490 * @smid: system request message index
03d1fb3a 3491 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
f92363d1
SR
3492 * Return nothing.
3493 */
40114bde
SP
3494void
3495mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
03d1fb3a 3496 u16 msix_task)
f92363d1
SR
3497{
3498 Mpi2RequestDescriptorUnion_t descriptor;
e5747439
SPS
3499 void *mpi_req_iomem;
3500 u64 *request;
3501
3502 if (ioc->is_mcpu_endpoint) {
3503 MPI2RequestHeader_t *request_hdr;
3504
3505 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3506
3507 request_hdr = (MPI2RequestHeader_t *)mfp;
3508 /* TBD 256 is offset within sys register. */
cf6bf971
C
3509 mpi_req_iomem = (void __force *)ioc->chip
3510 + MPI_FRAME_START_OFFSET
e5747439
SPS
3511 + (smid * ioc->request_sz);
3512 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3513 ioc->request_sz);
3514 }
3515
3516 request = (u64 *)&descriptor;
f92363d1
SR
3517
3518 descriptor.HighPriority.RequestFlags =
3519 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
03d1fb3a 3520 descriptor.HighPriority.MSIxIndex = msix_task;
f92363d1
SR
3521 descriptor.HighPriority.SMID = cpu_to_le16(smid);
3522 descriptor.HighPriority.LMID = 0;
3523 descriptor.HighPriority.Reserved1 = 0;
e5747439
SPS
3524 if (ioc->is_mcpu_endpoint)
3525 _base_mpi_ep_writeq(*request,
3526 &ioc->chip->RequestDescriptorPostLow,
3527 &ioc->scsi_lookup_lock);
3528 else
3529 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3530 &ioc->scsi_lookup_lock);
f92363d1
SR
3531}
3532
aff39e61 3533/**
40114bde 3534 * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
aff39e61
SPS
3535 * firmware
3536 * @ioc: per adapter object
3537 * @smid: system request message index
3538 *
3539 * Return nothing.
3540 */
40114bde
SP
3541void
3542mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
aff39e61
SPS
3543{
3544 Mpi2RequestDescriptorUnion_t descriptor;
3545 u64 *request = (u64 *)&descriptor;
3546
3547 descriptor.Default.RequestFlags =
3548 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3549 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
3550 descriptor.Default.SMID = cpu_to_le16(smid);
3551 descriptor.Default.LMID = 0;
3552 descriptor.Default.DescriptorTypeDependent = 0;
3553 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3554 &ioc->scsi_lookup_lock);
3555}
3556
f92363d1 3557/**
40114bde 3558 * mpt3sas_base_put_smid_default - Default, primarily used for config pages
f92363d1
SR
3559 * @ioc: per adapter object
3560 * @smid: system request message index
3561 *
3562 * Return nothing.
3563 */
40114bde
SP
3564void
3565mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
f92363d1
SR
3566{
3567 Mpi2RequestDescriptorUnion_t descriptor;
e5747439
SPS
3568 void *mpi_req_iomem;
3569 u64 *request;
3570 MPI2RequestHeader_t *request_hdr;
3571
3572 if (ioc->is_mcpu_endpoint) {
3573 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3574
3575 request_hdr = (MPI2RequestHeader_t *)mfp;
f92363d1 3576
e5747439
SPS
3577 _clone_sg_entries(ioc, (void *) mfp, smid);
3578 /* TBD 256 is offset within sys register */
cf6bf971 3579 mpi_req_iomem = (void __force *)ioc->chip +
e5747439
SPS
3580 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3581 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3582 ioc->request_sz);
3583 }
3584 request = (u64 *)&descriptor;
f92363d1
SR
3585 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3586 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
3587 descriptor.Default.SMID = cpu_to_le16(smid);
3588 descriptor.Default.LMID = 0;
3589 descriptor.Default.DescriptorTypeDependent = 0;
e5747439
SPS
3590 if (ioc->is_mcpu_endpoint)
3591 _base_mpi_ep_writeq(*request,
3592 &ioc->chip->RequestDescriptorPostLow,
3593 &ioc->scsi_lookup_lock);
3594 else
3595 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3596 &ioc->scsi_lookup_lock);
f92363d1
SR
3597}
3598
1117b31a 3599/**
989e43c7 3600 * _base_display_OEMs_branding - Display branding string
1117b31a
SR
3601 * @ioc: per adapter object
3602 *
3603 * Return nothing.
3604 */
3605static void
989e43c7 3606_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
1117b31a
SR
3607{
3608 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
3609 return;
3610
989e43c7
SR
3611 switch (ioc->pdev->subsystem_vendor) {
3612 case PCI_VENDOR_ID_INTEL:
3613 switch (ioc->pdev->device) {
3614 case MPI2_MFGPAGE_DEVID_SAS2008:
3615 switch (ioc->pdev->subsystem_device) {
3616 case MPT2SAS_INTEL_RMS2LL080_SSDID:
3617 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3618 MPT2SAS_INTEL_RMS2LL080_BRANDING);
3619 break;
3620 case MPT2SAS_INTEL_RMS2LL040_SSDID:
3621 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3622 MPT2SAS_INTEL_RMS2LL040_BRANDING);
3623 break;
3624 case MPT2SAS_INTEL_SSD910_SSDID:
3625 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3626 MPT2SAS_INTEL_SSD910_BRANDING);
3627 break;
3628 default:
3629 pr_info(MPT3SAS_FMT
3630 "Intel(R) Controller: Subsystem ID: 0x%X\n",
3631 ioc->name, ioc->pdev->subsystem_device);
3632 break;
3633 }
3634 case MPI2_MFGPAGE_DEVID_SAS2308_2:
3635 switch (ioc->pdev->subsystem_device) {
3636 case MPT2SAS_INTEL_RS25GB008_SSDID:
3637 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3638 MPT2SAS_INTEL_RS25GB008_BRANDING);
3639 break;
3640 case MPT2SAS_INTEL_RMS25JB080_SSDID:
3641 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3642 MPT2SAS_INTEL_RMS25JB080_BRANDING);
3643 break;
3644 case MPT2SAS_INTEL_RMS25JB040_SSDID:
3645 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3646 MPT2SAS_INTEL_RMS25JB040_BRANDING);
3647 break;
3648 case MPT2SAS_INTEL_RMS25KB080_SSDID:
3649 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3650 MPT2SAS_INTEL_RMS25KB080_BRANDING);
3651 break;
3652 case MPT2SAS_INTEL_RMS25KB040_SSDID:
3653 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3654 MPT2SAS_INTEL_RMS25KB040_BRANDING);
3655 break;
3656 case MPT2SAS_INTEL_RMS25LB040_SSDID:
3657 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3658 MPT2SAS_INTEL_RMS25LB040_BRANDING);
3659 break;
3660 case MPT2SAS_INTEL_RMS25LB080_SSDID:
3661 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3662 MPT2SAS_INTEL_RMS25LB080_BRANDING);
3663 break;
3664 default:
3665 pr_info(MPT3SAS_FMT
3666 "Intel(R) Controller: Subsystem ID: 0x%X\n",
3667 ioc->name, ioc->pdev->subsystem_device);
3668 break;
3669 }
3670 case MPI25_MFGPAGE_DEVID_SAS3008:
3671 switch (ioc->pdev->subsystem_device) {
3672 case MPT3SAS_INTEL_RMS3JC080_SSDID:
3673 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3674 MPT3SAS_INTEL_RMS3JC080_BRANDING);
3675 break;
3676
3677 case MPT3SAS_INTEL_RS3GC008_SSDID:
3678 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3679 MPT3SAS_INTEL_RS3GC008_BRANDING);
3680 break;
3681 case MPT3SAS_INTEL_RS3FC044_SSDID:
3682 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3683 MPT3SAS_INTEL_RS3FC044_BRANDING);
3684 break;
3685 case MPT3SAS_INTEL_RS3UC080_SSDID:
3686 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3687 MPT3SAS_INTEL_RS3UC080_BRANDING);
3688 break;
3689 default:
3690 pr_info(MPT3SAS_FMT
3691 "Intel(R) Controller: Subsystem ID: 0x%X\n",
3692 ioc->name, ioc->pdev->subsystem_device);
3693 break;
3694 }
1117b31a
SR
3695 break;
3696 default:
3697 pr_info(MPT3SAS_FMT
989e43c7
SR
3698 "Intel(R) Controller: Subsystem ID: 0x%X\n",
3699 ioc->name, ioc->pdev->subsystem_device);
1117b31a
SR
3700 break;
3701 }
3702 break;
989e43c7
SR
3703 case PCI_VENDOR_ID_DELL:
3704 switch (ioc->pdev->device) {
3705 case MPI2_MFGPAGE_DEVID_SAS2008:
3706 switch (ioc->pdev->subsystem_device) {
3707 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
3708 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3709 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
3710 break;
3711 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
3712 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3713 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
3714 break;
3715 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
3716 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3717 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
3718 break;
3719 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
3720 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3721 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
3722 break;
3723 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
3724 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3725 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
3726 break;
3727 case MPT2SAS_DELL_PERC_H200_SSDID:
3728 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3729 MPT2SAS_DELL_PERC_H200_BRANDING);
3730 break;
3731 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
3732 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3733 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
3734 break;
3735 default:
3736 pr_info(MPT3SAS_FMT
3737 "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
3738 ioc->name, ioc->pdev->subsystem_device);
3739 break;
3740 }
3741 break;
3742 case MPI25_MFGPAGE_DEVID_SAS3008:
3743 switch (ioc->pdev->subsystem_device) {
3744 case MPT3SAS_DELL_12G_HBA_SSDID:
3745 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3746 MPT3SAS_DELL_12G_HBA_BRANDING);
3747 break;
3748 default:
3749 pr_info(MPT3SAS_FMT
3750 "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
3751 ioc->name, ioc->pdev->subsystem_device);
3752 break;
3753 }
fb84dfc4
SR
3754 break;
3755 default:
3756 pr_info(MPT3SAS_FMT
989e43c7 3757 "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
fb84dfc4
SR
3758 ioc->pdev->subsystem_device);
3759 break;
3760 }
3761 break;
989e43c7
SR
3762 case PCI_VENDOR_ID_CISCO:
3763 switch (ioc->pdev->device) {
3764 case MPI25_MFGPAGE_DEVID_SAS3008:
3765 switch (ioc->pdev->subsystem_device) {
3766 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
3767 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3768 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
3769 break;
3770 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
3771 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3772 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
3773 break;
3774 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
3775 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3776 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
3777 break;
3778 default:
3779 pr_info(MPT3SAS_FMT
3780 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
3781 ioc->name, ioc->pdev->subsystem_device);
3782 break;
3783 }
d8eb4a47 3784 break;
989e43c7
SR
3785 case MPI25_MFGPAGE_DEVID_SAS3108_1:
3786 switch (ioc->pdev->subsystem_device) {
3787 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
3788 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
d8eb4a47 3789 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
989e43c7
SR
3790 break;
3791 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
3792 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3793 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
3794 );
3795 break;
3796 default:
3797 pr_info(MPT3SAS_FMT
3798 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
3799 ioc->name, ioc->pdev->subsystem_device);
3800 break;
3801 }
38e4141e
SR
3802 break;
3803 default:
3804 pr_info(MPT3SAS_FMT
989e43c7
SR
3805 "Cisco SAS HBA: Subsystem ID: 0x%X\n",
3806 ioc->name, ioc->pdev->subsystem_device);
38e4141e
SR
3807 break;
3808 }
3809 break;
989e43c7
SR
3810 case MPT2SAS_HP_3PAR_SSVID:
3811 switch (ioc->pdev->device) {
3812 case MPI2_MFGPAGE_DEVID_SAS2004:
3813 switch (ioc->pdev->subsystem_device) {
3814 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
3815 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3816 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
3817 break;
3818 default:
3819 pr_info(MPT3SAS_FMT
3820 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
3821 ioc->name, ioc->pdev->subsystem_device);
3822 break;
3823 }
3824 case MPI2_MFGPAGE_DEVID_SAS2308_2:
3825 switch (ioc->pdev->subsystem_device) {
3826 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
3827 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3828 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
3829 break;
3830 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
3831 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3832 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
3833 break;
3834 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
3835 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3836 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
3837 break;
3838 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
3839 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3840 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
3841 break;
3842 default:
3843 pr_info(MPT3SAS_FMT
3844 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
3845 ioc->name, ioc->pdev->subsystem_device);
3846 break;
3847 }
d8eb4a47
SR
3848 default:
3849 pr_info(MPT3SAS_FMT
989e43c7
SR
3850 "HP SAS HBA: Subsystem ID: 0x%X\n",
3851 ioc->name, ioc->pdev->subsystem_device);
d8eb4a47
SR
3852 break;
3853 }
38e4141e 3854 default:
38e4141e
SR
3855 break;
3856 }
3857}
fb84dfc4 3858
3d29ed85
C
3859/**
3860 * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
3861 * version from FW Image Header.
3862 * @ioc: per adapter object
3863 *
3864 * Returns 0 for success, non-zero for failure.
3865 */
3866 static int
3867_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
3868{
3869 Mpi2FWImageHeader_t *FWImgHdr;
3870 Mpi25FWUploadRequest_t *mpi_request;
3871 Mpi2FWUploadReply_t mpi_reply;
3872 int r = 0;
3873 void *fwpkg_data = NULL;
3874 dma_addr_t fwpkg_data_dma;
3875 u16 smid, ioc_status;
3876 size_t data_length;
3877
3878 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3879 __func__));
3880
3881 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
3882 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
3883 ioc->name, __func__);
3884 return -EAGAIN;
3885 }
3886
3887 data_length = sizeof(Mpi2FWImageHeader_t);
3888 fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
3889 &fwpkg_data_dma);
3890 if (!fwpkg_data) {
3891 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
3892 ioc->name, __FILE__, __LINE__, __func__);
3893 return -ENOMEM;
3894 }
3895
3896 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3897 if (!smid) {
3898 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3899 ioc->name, __func__);
3900 r = -EAGAIN;
3901 goto out;
3902 }
3903
3904 ioc->base_cmds.status = MPT3_CMD_PENDING;
3905 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3906 ioc->base_cmds.smid = smid;
3907 memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
3908 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
3909 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
3910 mpi_request->ImageSize = cpu_to_le32(data_length);
3911 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
3912 data_length);
3913 init_completion(&ioc->base_cmds.done);
3914 mpt3sas_base_put_smid_default(ioc, smid);
3915 /* Wait for 15 seconds */
3916 wait_for_completion_timeout(&ioc->base_cmds.done,
3917 FW_IMG_HDR_READ_TIMEOUT*HZ);
3918 pr_info(MPT3SAS_FMT "%s: complete\n",
3919 ioc->name, __func__);
3920 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3921 pr_err(MPT3SAS_FMT "%s: timeout\n",
3922 ioc->name, __func__);
3923 _debug_dump_mf(mpi_request,
3924 sizeof(Mpi25FWUploadRequest_t)/4);
3925 r = -ETIME;
3926 } else {
3927 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
3928 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
3929 memcpy(&mpi_reply, ioc->base_cmds.reply,
3930 sizeof(Mpi2FWUploadReply_t));
3931 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3932 MPI2_IOCSTATUS_MASK;
3933 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3934 FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
3935 if (FWImgHdr->PackageVersion.Word) {
3936 pr_info(MPT3SAS_FMT "FW Package Version"
3937 "(%02d.%02d.%02d.%02d)\n",
3938 ioc->name,
3939 FWImgHdr->PackageVersion.Struct.Major,
3940 FWImgHdr->PackageVersion.Struct.Minor,
3941 FWImgHdr->PackageVersion.Struct.Unit,
3942 FWImgHdr->PackageVersion.Struct.Dev);
3943 }
3944 } else {
3945 _debug_dump_mf(&mpi_reply,
3946 sizeof(Mpi2FWUploadReply_t)/4);
3947 }
3948 }
3949 }
3950 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3951out:
3952 if (fwpkg_data)
3953 pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
3954 fwpkg_data_dma);
3955 return r;
3956}
3957
f92363d1
SR
3958/**
3959 * _base_display_ioc_capabilities - Disply IOC's capabilities.
3960 * @ioc: per adapter object
3961 *
3962 * Return nothing.
3963 */
3964static void
3965_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
3966{
3967 int i = 0;
3968 char desc[16];
3969 u32 iounit_pg1_flags;
3970 u32 bios_version;
3971
3972 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
3973 strncpy(desc, ioc->manu_pg0.ChipName, 16);
3974 pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
3975 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
3976 ioc->name, desc,
3977 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
3978 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
3979 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
3980 ioc->facts.FWVersion.Word & 0x000000FF,
3981 ioc->pdev->revision,
3982 (bios_version & 0xFF000000) >> 24,
3983 (bios_version & 0x00FF0000) >> 16,
3984 (bios_version & 0x0000FF00) >> 8,
3985 bios_version & 0x000000FF);
3986
989e43c7 3987 _base_display_OEMs_branding(ioc);
1117b31a 3988
016d5c35
SPS
3989 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
3990 pr_info("%sNVMe", i ? "," : "");
3991 i++;
3992 }
3993
f92363d1
SR
3994 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
3995
3996 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
3997 pr_info("Initiator");
3998 i++;
3999 }
4000
4001 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4002 pr_info("%sTarget", i ? "," : "");
4003 i++;
4004 }
4005
4006 i = 0;
4007 pr_info("), ");
4008 pr_info("Capabilities=(");
4009
7786ab6a
SR
4010 if (!ioc->hide_ir_msg) {
4011 if (ioc->facts.IOCCapabilities &
f92363d1
SR
4012 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4013 pr_info("Raid");
4014 i++;
7786ab6a 4015 }
f92363d1
SR
4016 }
4017
4018 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4019 pr_info("%sTLR", i ? "," : "");
4020 i++;
4021 }
4022
4023 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4024 pr_info("%sMulticast", i ? "," : "");
4025 i++;
4026 }
4027
4028 if (ioc->facts.IOCCapabilities &
4029 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4030 pr_info("%sBIDI Target", i ? "," : "");
4031 i++;
4032 }
4033
4034 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4035 pr_info("%sEEDP", i ? "," : "");
4036 i++;
4037 }
4038
4039 if (ioc->facts.IOCCapabilities &
4040 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4041 pr_info("%sSnapshot Buffer", i ? "," : "");
4042 i++;
4043 }
4044
4045 if (ioc->facts.IOCCapabilities &
4046 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4047 pr_info("%sDiag Trace Buffer", i ? "," : "");
4048 i++;
4049 }
4050
4051 if (ioc->facts.IOCCapabilities &
4052 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4053 pr_info("%sDiag Extended Buffer", i ? "," : "");
4054 i++;
4055 }
4056
4057 if (ioc->facts.IOCCapabilities &
4058 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4059 pr_info("%sTask Set Full", i ? "," : "");
4060 i++;
4061 }
4062
4063 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4064 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4065 pr_info("%sNCQ", i ? "," : "");
4066 i++;
4067 }
4068
4069 pr_info(")\n");
4070}
4071
4072/**
4073 * mpt3sas_base_update_missing_delay - change the missing delay timers
4074 * @ioc: per adapter object
4075 * @device_missing_delay: amount of time till device is reported missing
4076 * @io_missing_delay: interval IO is returned when there is a missing device
4077 *
4078 * Return nothing.
4079 *
4080 * Passed on the command line, this function will modify the device missing
4081 * delay, as well as the io missing delay. This should be called at driver
4082 * load time.
4083 */
4084void
4085mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4086 u16 device_missing_delay, u8 io_missing_delay)
4087{
4088 u16 dmd, dmd_new, dmd_orignal;
4089 u8 io_missing_delay_original;
4090 u16 sz;
4091 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4092 Mpi2ConfigReply_t mpi_reply;
4093 u8 num_phys = 0;
4094 u16 ioc_status;
4095
4096 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4097 if (!num_phys)
4098 return;
4099
4100 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4101 sizeof(Mpi2SasIOUnit1PhyData_t));
4102 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4103 if (!sas_iounit_pg1) {
4104 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
4105 ioc->name, __FILE__, __LINE__, __func__);
4106 goto out;
4107 }
4108 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4109 sas_iounit_pg1, sz))) {
4110 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
4111 ioc->name, __FILE__, __LINE__, __func__);
4112 goto out;
4113 }
4114 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4115 MPI2_IOCSTATUS_MASK;
4116 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4117 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
4118 ioc->name, __FILE__, __LINE__, __func__);
4119 goto out;
4120 }
4121
4122 /* device missing delay */
4123 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4124 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4125 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4126 else
4127 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4128 dmd_orignal = dmd;
4129 if (device_missing_delay > 0x7F) {
4130 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4131 device_missing_delay;
4132 dmd = dmd / 16;
4133 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4134 } else
4135 dmd = device_missing_delay;
4136 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4137
4138 /* io missing delay */
4139 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4140 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4141
4142 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4143 sz)) {
4144 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4145 dmd_new = (dmd &
4146 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4147 else
4148 dmd_new =
4149 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4150 pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
4151 ioc->name, dmd_orignal, dmd_new);
4152 pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
4153 ioc->name, io_missing_delay_original,
4154 io_missing_delay);
4155 ioc->device_missing_delay = dmd_new;
4156 ioc->io_missing_delay = io_missing_delay;
4157 }
4158
4159out:
4160 kfree(sas_iounit_pg1);
4161}
4162/**
4163 * _base_static_config_pages - static start of day config pages
4164 * @ioc: per adapter object
4165 *
4166 * Return nothing.
4167 */
4168static void
4169_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4170{
4171 Mpi2ConfigReply_t mpi_reply;
4172 u32 iounit_pg1_flags;
4173
c1a6c5ac 4174 ioc->nvme_abort_timeout = 30;
f92363d1
SR
4175 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
4176 if (ioc->ir_firmware)
4177 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
4178 &ioc->manu_pg10);
4179
4180 /*
4181 * Ensure correct T10 PI operation if vendor left EEDPTagMode
4182 * flag unset in NVDATA.
4183 */
4184 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
4185 if (ioc->manu_pg11.EEDPTagMode == 0) {
4186 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
4187 ioc->name);
4188 ioc->manu_pg11.EEDPTagMode &= ~0x3;
4189 ioc->manu_pg11.EEDPTagMode |= 0x1;
4190 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
4191 &ioc->manu_pg11);
4192 }
c1a6c5ac
C
4193 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
4194 ioc->tm_custom_handling = 1;
4195 else {
4196 ioc->tm_custom_handling = 0;
4197 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
4198 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
4199 else if (ioc->manu_pg11.NVMeAbortTO >
4200 NVME_TASK_ABORT_MAX_TIMEOUT)
4201 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
4202 else
4203 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
4204 }
f92363d1
SR
4205
4206 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
4207 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
4208 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
4209 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
4210 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2d8ce8c9 4211 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
f92363d1
SR
4212 _base_display_ioc_capabilities(ioc);
4213
4214 /*
4215 * Enable task_set_full handling in iounit_pg1 when the
4216 * facts capabilities indicate that its supported.
4217 */
4218 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4219 if ((ioc->facts.IOCCapabilities &
4220 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
4221 iounit_pg1_flags &=
4222 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4223 else
4224 iounit_pg1_flags |=
4225 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4226 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
4227 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2d8ce8c9
SR
4228
4229 if (ioc->iounit_pg8.NumSensors)
4230 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
f92363d1
SR
4231}
4232
22a923c3
C
4233/**
4234 * mpt3sas_free_enclosure_list - release memory
4235 * @ioc: per adapter object
4236 *
4237 * Free memory allocated during encloure add.
4238 *
4239 * Return nothing.
4240 */
4241void
4242mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
4243{
4244 struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
4245
4246 /* Free enclosure list */
4247 list_for_each_entry_safe(enclosure_dev,
4248 enclosure_dev_next, &ioc->enclosure_list, list) {
4249 list_del(&enclosure_dev->list);
4250 kfree(enclosure_dev);
4251 }
4252}
4253
f92363d1
SR
4254/**
4255 * _base_release_memory_pools - release memory
4256 * @ioc: per adapter object
4257 *
4258 * Free memory allocated from _base_allocate_memory_pools.
4259 *
4260 * Return nothing.
4261 */
4262static void
4263_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4264{
9b05c91a 4265 int i = 0;
93204b78
C
4266 int j = 0;
4267 struct chain_tracker *ct;
9b05c91a 4268 struct reply_post_struct *rps;
f92363d1
SR
4269
4270 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4271 __func__));
4272
4273 if (ioc->request) {
4274 pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
4275 ioc->request, ioc->request_dma);
4276 dexitprintk(ioc, pr_info(MPT3SAS_FMT
4277 "request_pool(0x%p): free\n",
4278 ioc->name, ioc->request));
4279 ioc->request = NULL;
4280 }
4281
4282 if (ioc->sense) {
e9d98418
RP
4283 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4284 dma_pool_destroy(ioc->sense_dma_pool);
f92363d1
SR
4285 dexitprintk(ioc, pr_info(MPT3SAS_FMT
4286 "sense_pool(0x%p): free\n",
4287 ioc->name, ioc->sense));
4288 ioc->sense = NULL;
4289 }
4290
4291 if (ioc->reply) {
e9d98418
RP
4292 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
4293 dma_pool_destroy(ioc->reply_dma_pool);
f92363d1
SR
4294 dexitprintk(ioc, pr_info(MPT3SAS_FMT
4295 "reply_pool(0x%p): free\n",
4296 ioc->name, ioc->reply));
4297 ioc->reply = NULL;
4298 }
4299
4300 if (ioc->reply_free) {
e9d98418 4301 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
f92363d1 4302 ioc->reply_free_dma);
e9d98418 4303 dma_pool_destroy(ioc->reply_free_dma_pool);
f92363d1
SR
4304 dexitprintk(ioc, pr_info(MPT3SAS_FMT
4305 "reply_free_pool(0x%p): free\n",
4306 ioc->name, ioc->reply_free));
4307 ioc->reply_free = NULL;
4308 }
4309
9b05c91a
SR
4310 if (ioc->reply_post) {
4311 do {
4312 rps = &ioc->reply_post[i];
4313 if (rps->reply_post_free) {
e9d98418 4314 dma_pool_free(
9b05c91a
SR
4315 ioc->reply_post_free_dma_pool,
4316 rps->reply_post_free,
4317 rps->reply_post_free_dma);
4318 dexitprintk(ioc, pr_info(MPT3SAS_FMT
4319 "reply_post_free_pool(0x%p): free\n",
4320 ioc->name, rps->reply_post_free));
4321 rps->reply_post_free = NULL;
4322 }
4323 } while (ioc->rdpq_array_enable &&
4324 (++i < ioc->reply_queue_count));
cd33223b
C
4325 if (ioc->reply_post_free_array &&
4326 ioc->rdpq_array_enable) {
4327 dma_pool_free(ioc->reply_post_free_array_dma_pool,
4328 ioc->reply_post_free_array,
4329 ioc->reply_post_free_array_dma);
4330 ioc->reply_post_free_array = NULL;
4331 }
4332 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
e9d98418 4333 dma_pool_destroy(ioc->reply_post_free_dma_pool);
9b05c91a 4334 kfree(ioc->reply_post);
f92363d1
SR
4335 }
4336
016d5c35
SPS
4337 if (ioc->pcie_sgl_dma_pool) {
4338 for (i = 0; i < ioc->scsiio_depth; i++) {
dbec4c90
SPS
4339 dma_pool_free(ioc->pcie_sgl_dma_pool,
4340 ioc->pcie_sg_lookup[i].pcie_sgl,
4341 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
016d5c35
SPS
4342 }
4343 if (ioc->pcie_sgl_dma_pool)
13a06405 4344 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
016d5c35
SPS
4345 }
4346
f92363d1
SR
4347 if (ioc->config_page) {
4348 dexitprintk(ioc, pr_info(MPT3SAS_FMT
4349 "config_page(0x%p): free\n", ioc->name,
4350 ioc->config_page));
4351 pci_free_consistent(ioc->pdev, ioc->config_page_sz,
4352 ioc->config_page, ioc->config_page_dma);
4353 }
4354
f92363d1
SR
4355 kfree(ioc->hpr_lookup);
4356 kfree(ioc->internal_lookup);
4357 if (ioc->chain_lookup) {
93204b78 4358 for (i = 0; i < ioc->scsiio_depth; i++) {
74522a92
C
4359 for (j = ioc->chains_per_prp_buffer;
4360 j < ioc->chains_needed_per_io; j++) {
93204b78
C
4361 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4362 if (ct && ct->chain_buffer)
4363 dma_pool_free(ioc->chain_dma_pool,
4364 ct->chain_buffer,
4365 ct->chain_buffer_dma);
4366 }
4367 kfree(ioc->chain_lookup[i].chains_per_smid);
f92363d1 4368 }
e9d98418 4369 dma_pool_destroy(ioc->chain_dma_pool);
93204b78 4370 kfree(ioc->chain_lookup);
f92363d1
SR
4371 ioc->chain_lookup = NULL;
4372 }
4373}
4374
e21fef6f
C
4375/**
4376 * is_MSB_are_same - checks whether all reply queues in a set are
4377 * having same upper 32bits in their base memory address.
4378 * @reply_pool_start_address: Base address of a reply queue set
4379 * @pool_sz: Size of single Reply Descriptor Post Queues pool size
4380 *
4381 * Returns 1 if reply queues in a set have a same upper 32bits
4382 * in their base memory address,
4383 * else 0
4384 */
4385
4386static int
4387is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
4388{
4389 long reply_pool_end_address;
4390
4391 reply_pool_end_address = reply_pool_start_address + pool_sz;
4392
4393 if (upper_32_bits(reply_pool_start_address) ==
4394 upper_32_bits(reply_pool_end_address))
4395 return 1;
4396 else
4397 return 0;
4398}
4399
f92363d1
SR
4400/**
4401 * _base_allocate_memory_pools - allocate start of day memory pools
4402 * @ioc: per adapter object
f92363d1
SR
4403 *
4404 * Returns 0 success, anything else error
4405 */
4406static int
98c56ad3 4407_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
4408{
4409 struct mpt3sas_facts *facts;
4410 u16 max_sge_elements;
4411 u16 chains_needed_per_io;
cd33223b 4412 u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
f92363d1 4413 u32 retry_sz;
016d5c35 4414 u16 max_request_credit, nvme_blocks_needed;
f92363d1
SR
4415 unsigned short sg_tablesize;
4416 u16 sge_size;
93204b78
C
4417 int i, j;
4418 struct chain_tracker *ct;
f92363d1
SR
4419
4420 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4421 __func__));
4422
4423
4424 retry_sz = 0;
4425 facts = &ioc->facts;
4426
4427 /* command line tunables for max sgl entries */
4428 if (max_sgl_entries != -1)
4429 sg_tablesize = max_sgl_entries;
471ef9d4
SR
4430 else {
4431 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
4432 sg_tablesize = MPT2SAS_SG_DEPTH;
4433 else
4434 sg_tablesize = MPT3SAS_SG_DEPTH;
4435 }
f92363d1 4436
06f5f976
SR
4437 /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
4438 if (reset_devices)
4439 sg_tablesize = min_t(unsigned short, sg_tablesize,
4440 MPT_KDUMP_MIN_PHYS_SEGMENTS);
4441
0448f019
SPS
4442 if (ioc->is_mcpu_endpoint)
4443 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
4444 else {
4445 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
4446 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
4447 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
4448 sg_tablesize = min_t(unsigned short, sg_tablesize,
4449 SG_MAX_SEGMENTS);
4450 pr_warn(MPT3SAS_FMT
4451 "sg_tablesize(%u) is bigger than kernel "
4452 "defined SG_CHUNK_SIZE(%u)\n", ioc->name,
4453 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
4454 }
4455 ioc->shost->sg_tablesize = sg_tablesize;
ad666a0f 4456 }
f92363d1 4457
fd0331b3
SS
4458 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
4459 (facts->RequestCredit / 4));
4460 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
4461 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
4462 INTERNAL_SCSIIO_CMDS_COUNT)) {
4463 pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
4464 Credits, it has just %d number of credits\n",
4465 ioc->name, facts->RequestCredit);
4466 return -ENOMEM;
4467 }
4468 ioc->internal_depth = 10;
4469 }
4470
4471 ioc->hi_priority_depth = ioc->internal_depth - (5);
f92363d1
SR
4472 /* command line tunables for max controller queue depth */
4473 if (max_queue_depth != -1 && max_queue_depth != 0) {
4474 max_request_credit = min_t(u16, max_queue_depth +
fd0331b3 4475 ioc->internal_depth, facts->RequestCredit);
f92363d1
SR
4476 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
4477 max_request_credit = MAX_HBA_QUEUE_DEPTH;
06f5f976
SR
4478 } else if (reset_devices)
4479 max_request_credit = min_t(u16, facts->RequestCredit,
4480 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
4481 else
f92363d1
SR
4482 max_request_credit = min_t(u16, facts->RequestCredit,
4483 MAX_HBA_QUEUE_DEPTH);
4484
fd0331b3
SS
4485 /* Firmware maintains additional facts->HighPriorityCredit number of
4486 * credits for HiPriprity Request messages, so hba queue depth will be
4487 * sum of max_request_credit and high priority queue depth.
4488 */
4489 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
f92363d1
SR
4490
4491 /* request frame size */
4492 ioc->request_sz = facts->IOCRequestFrameSize * 4;
4493
4494 /* reply frame size */
4495 ioc->reply_sz = facts->ReplyFrameSize * 4;
4496
ebb3024e
SS
4497 /* chain segment size */
4498 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4499 if (facts->IOCMaxChainSegmentSize)
4500 ioc->chain_segment_sz =
4501 facts->IOCMaxChainSegmentSize *
4502 MAX_CHAIN_ELEMT_SZ;
4503 else
4504 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
4505 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
4506 MAX_CHAIN_ELEMT_SZ;
4507 } else
4508 ioc->chain_segment_sz = ioc->request_sz;
4509
f92363d1
SR
4510 /* calculate the max scatter element size */
4511 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
4512
4513 retry_allocation:
4514 total_sz = 0;
4515 /* calculate number of sg elements left over in the 1st frame */
4516 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
4517 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
4518 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
4519
4520 /* now do the same for a chain buffer */
ebb3024e 4521 max_sge_elements = ioc->chain_segment_sz - sge_size;
f92363d1
SR
4522 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
4523
4524 /*
4525 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
4526 */
4527 chains_needed_per_io = ((ioc->shost->sg_tablesize -
4528 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
4529 + 1;
4530 if (chains_needed_per_io > facts->MaxChainDepth) {
4531 chains_needed_per_io = facts->MaxChainDepth;
4532 ioc->shost->sg_tablesize = min_t(u16,
4533 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
4534 * chains_needed_per_io), ioc->shost->sg_tablesize);
4535 }
4536 ioc->chains_needed_per_io = chains_needed_per_io;
4537
4538 /* reply free queue sizing - taking into account for 64 FW events */
4539 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4540
0448f019
SPS
4541 /* mCPU manage single counters for simplicity */
4542 if (ioc->is_mcpu_endpoint)
4543 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
4544 else {
4545 /* calculate reply descriptor post queue depth */
4546 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
4547 ioc->reply_free_queue_depth + 1;
4548 /* align the reply post queue on the next 16 count boundary */
4549 if (ioc->reply_post_queue_depth % 16)
4550 ioc->reply_post_queue_depth += 16 -
4551 (ioc->reply_post_queue_depth % 16);
4552 }
f92363d1 4553
f92363d1
SR
4554 if (ioc->reply_post_queue_depth >
4555 facts->MaxReplyDescriptorPostQueueDepth) {
4556 ioc->reply_post_queue_depth =
4557 facts->MaxReplyDescriptorPostQueueDepth -
4558 (facts->MaxReplyDescriptorPostQueueDepth % 16);
4559 ioc->hba_queue_depth =
4560 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
4561 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4562 }
4563
4564 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
4565 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
4566 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
4567 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
4568 ioc->chains_needed_per_io));
4569
9b05c91a
SR
4570 /* reply post queue, 16 byte align */
4571 reply_post_free_sz = ioc->reply_post_queue_depth *
4572 sizeof(Mpi2DefaultReplyDescriptor_t);
4573
4574 sz = reply_post_free_sz;
4575 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
4576 sz *= ioc->reply_queue_count;
4577
4578 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
4579 (ioc->reply_queue_count):1,
4580 sizeof(struct reply_post_struct), GFP_KERNEL);
4581
4582 if (!ioc->reply_post) {
4583 pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
4584 ioc->name);
4585 goto out;
4586 }
e9d98418
RP
4587 ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
4588 &ioc->pdev->dev, sz, 16, 0);
9b05c91a
SR
4589 if (!ioc->reply_post_free_dma_pool) {
4590 pr_err(MPT3SAS_FMT
e9d98418 4591 "reply_post_free pool: dma_pool_create failed\n",
9b05c91a
SR
4592 ioc->name);
4593 goto out;
4594 }
4595 i = 0;
4596 do {
4597 ioc->reply_post[i].reply_post_free =
e9d98418 4598 dma_pool_alloc(ioc->reply_post_free_dma_pool,
9b05c91a
SR
4599 GFP_KERNEL,
4600 &ioc->reply_post[i].reply_post_free_dma);
4601 if (!ioc->reply_post[i].reply_post_free) {
4602 pr_err(MPT3SAS_FMT
e9d98418 4603 "reply_post_free pool: dma_pool_alloc failed\n",
9b05c91a
SR
4604 ioc->name);
4605 goto out;
4606 }
4607 memset(ioc->reply_post[i].reply_post_free, 0, sz);
4608 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4609 "reply post free pool (0x%p): depth(%d),"
4610 "element_size(%d), pool_size(%d kB)\n", ioc->name,
4611 ioc->reply_post[i].reply_post_free,
4612 ioc->reply_post_queue_depth, 8, sz/1024));
4613 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4614 "reply_post_free_dma = (0x%llx)\n", ioc->name,
4615 (unsigned long long)
4616 ioc->reply_post[i].reply_post_free_dma));
4617 total_sz += sz;
4618 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
4619
4620 if (ioc->dma_mask == 64) {
4621 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
4622 pr_warn(MPT3SAS_FMT
4623 "no suitable consistent DMA mask for %s\n",
4624 ioc->name, pci_name(ioc->pdev));
4625 goto out;
4626 }
4627 }
4628
f92363d1
SR
4629 ioc->scsiio_depth = ioc->hba_queue_depth -
4630 ioc->hi_priority_depth - ioc->internal_depth;
4631
4632 /* set the scsi host can_queue depth
4633 * with some internal commands that could be outstanding
4634 */
fd0331b3 4635 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
f92363d1
SR
4636 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4637 "scsi host: can_queue depth (%d)\n",
4638 ioc->name, ioc->shost->can_queue));
4639
4640
4641 /* contiguous pool for request and chains, 16 byte align, one extra "
4642 * "frame for smid=0
4643 */
4644 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
4645 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
4646
4647 /* hi-priority queue */
4648 sz += (ioc->hi_priority_depth * ioc->request_sz);
4649
4650 /* internal queue */
4651 sz += (ioc->internal_depth * ioc->request_sz);
4652
4653 ioc->request_dma_sz = sz;
4654 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
4655 if (!ioc->request) {
4656 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
4657 "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
4658 "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
4659 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
4660 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
4661 goto out;
fd0331b3
SS
4662 retry_sz = 64;
4663 ioc->hba_queue_depth -= retry_sz;
8ff045c9 4664 _base_release_memory_pools(ioc);
f92363d1
SR
4665 goto retry_allocation;
4666 }
4667
4668 if (retry_sz)
4669 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
4670 "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
4671 "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
4672 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
4673
4674 /* hi-priority queue */
4675 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
4676 ioc->request_sz);
4677 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
4678 ioc->request_sz);
4679
4680 /* internal queue */
4681 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
4682 ioc->request_sz);
4683 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
4684 ioc->request_sz);
4685
4686 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4687 "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
4688 ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
4689 (ioc->hba_queue_depth * ioc->request_sz)/1024));
4690
4691 dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
4692 ioc->name, (unsigned long long) ioc->request_dma));
4693 total_sz += sz;
4694
f92363d1
SR
4695 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
4696 ioc->name, ioc->request, ioc->scsiio_depth));
4697
4698 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
93204b78
C
4699 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
4700 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
f92363d1 4701 if (!ioc->chain_lookup) {
93204b78 4702 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages "
74522a92 4703 "failed\n", ioc->name);
f92363d1
SR
4704 goto out;
4705 }
93204b78
C
4706
4707 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
4708 for (i = 0; i < ioc->scsiio_depth; i++) {
4709 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
4710 if (!ioc->chain_lookup[i].chains_per_smid) {
4711 pr_err(MPT3SAS_FMT "chain_lookup: "
4712 " kzalloc failed\n", ioc->name);
4713 goto out;
4714 }
4715 }
4716
f92363d1
SR
4717 /* initialize hi-priority queue smid's */
4718 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
4719 sizeof(struct request_tracker), GFP_KERNEL);
4720 if (!ioc->hpr_lookup) {
4721 pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
4722 ioc->name);
4723 goto out;
4724 }
4725 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
4726 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4727 "hi_priority(0x%p): depth(%d), start smid(%d)\n",
4728 ioc->name, ioc->hi_priority,
4729 ioc->hi_priority_depth, ioc->hi_priority_smid));
4730
4731 /* initialize internal queue smid's */
4732 ioc->internal_lookup = kcalloc(ioc->internal_depth,
4733 sizeof(struct request_tracker), GFP_KERNEL);
4734 if (!ioc->internal_lookup) {
4735 pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
4736 ioc->name);
4737 goto out;
4738 }
4739 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
4740 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4741 "internal(0x%p): depth(%d), start smid(%d)\n",
4742 ioc->name, ioc->internal,
4743 ioc->internal_depth, ioc->internal_smid));
016d5c35
SPS
4744 /*
4745 * The number of NVMe page sized blocks needed is:
4746 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
4747 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
4748 * that is placed in the main message frame. 8 is the size of each PRP
4749 * entry or PRP list pointer entry. 8 is subtracted from page_size
4750 * because of the PRP list pointer entry at the end of a page, so this
4751 * is not counted as a PRP entry. The 1 added page is a round up.
4752 *
4753 * To avoid allocation failures due to the amount of memory that could
4754 * be required for NVMe PRP's, only each set of NVMe blocks will be
4755 * contiguous, so a new set is allocated for each possible I/O.
4756 */
74522a92 4757 ioc->chains_per_prp_buffer = 0;
016d5c35
SPS
4758 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4759 nvme_blocks_needed =
4760 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
4761 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
4762 nvme_blocks_needed++;
4763
dbec4c90
SPS
4764 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
4765 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
4766 if (!ioc->pcie_sg_lookup) {
4767 pr_info(MPT3SAS_FMT
4768 "PCIe SGL lookup: kzalloc failed\n", ioc->name);
4769 goto out;
4770 }
016d5c35
SPS
4771 sz = nvme_blocks_needed * ioc->page_size;
4772 ioc->pcie_sgl_dma_pool =
13a06405 4773 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
016d5c35
SPS
4774 if (!ioc->pcie_sgl_dma_pool) {
4775 pr_info(MPT3SAS_FMT
13a06405 4776 "PCIe SGL pool: dma_pool_create failed\n",
016d5c35
SPS
4777 ioc->name);
4778 goto out;
4779 }
74522a92
C
4780
4781 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
4782 ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
4783 ioc->chains_needed_per_io);
4784
016d5c35 4785 for (i = 0; i < ioc->scsiio_depth; i++) {
dbec4c90
SPS
4786 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
4787 ioc->pcie_sgl_dma_pool, GFP_KERNEL,
4788 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4789 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
016d5c35 4790 pr_info(MPT3SAS_FMT
13a06405 4791 "PCIe SGL pool: dma_pool_alloc failed\n",
016d5c35
SPS
4792 ioc->name);
4793 goto out;
4794 }
74522a92
C
4795 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
4796 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4797 ct->chain_buffer =
4798 ioc->pcie_sg_lookup[i].pcie_sgl +
4799 (j * ioc->chain_segment_sz);
4800 ct->chain_buffer_dma =
4801 ioc->pcie_sg_lookup[i].pcie_sgl_dma +
4802 (j * ioc->chain_segment_sz);
4803 }
016d5c35 4804 }
f92363d1 4805
016d5c35
SPS
4806 dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
4807 "element_size(%d), pool_size(%d kB)\n", ioc->name,
4808 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
74522a92
C
4809 dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can "
4810 "fit in a PRP page(%d)\n", ioc->name,
4811 ioc->chains_per_prp_buffer));
016d5c35
SPS
4812 total_sz += sz * ioc->scsiio_depth;
4813 }
74522a92
C
4814
4815 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
4816 ioc->chain_segment_sz, 16, 0);
4817 if (!ioc->chain_dma_pool) {
4818 pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
4819 ioc->name);
4820 goto out;
4821 }
4822 for (i = 0; i < ioc->scsiio_depth; i++) {
4823 for (j = ioc->chains_per_prp_buffer;
4824 j < ioc->chains_needed_per_io; j++) {
4825 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4826 ct->chain_buffer = dma_pool_alloc(
4827 ioc->chain_dma_pool, GFP_KERNEL,
4828 &ct->chain_buffer_dma);
4829 if (!ct->chain_buffer) {
4830 pr_err(MPT3SAS_FMT "chain_lookup: "
4831 " pci_pool_alloc failed\n", ioc->name);
4832 _base_release_memory_pools(ioc);
4833 goto out;
4834 }
4835 }
4836 total_sz += ioc->chain_segment_sz;
4837 }
4838
4839 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4840 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
4841 ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
4842 ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
4843
f92363d1
SR
4844 /* sense buffers, 4 byte align */
4845 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
e9d98418
RP
4846 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4847 4, 0);
f92363d1 4848 if (!ioc->sense_dma_pool) {
e9d98418 4849 pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n",
f92363d1
SR
4850 ioc->name);
4851 goto out;
4852 }
e9d98418 4853 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
f92363d1
SR
4854 &ioc->sense_dma);
4855 if (!ioc->sense) {
e9d98418 4856 pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n",
f92363d1
SR
4857 ioc->name);
4858 goto out;
4859 }
e21fef6f
C
4860 /* sense buffer requires to be in same 4 gb region.
4861 * Below function will check the same.
4862 * In case of failure, new pci pool will be created with updated
4863 * alignment. Older allocation and pool will be destroyed.
4864 * Alignment will be used such a way that next allocation if
4865 * success, will always meet same 4gb region requirement.
4866 * Actual requirement is not alignment, but we need start and end of
4867 * DMA address must have same upper 32 bit address.
4868 */
4869 if (!is_MSB_are_same((long)ioc->sense, sz)) {
4870 //Release Sense pool & Reallocate
4871 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4872 dma_pool_destroy(ioc->sense_dma_pool);
4873 ioc->sense = NULL;
4874
4875 ioc->sense_dma_pool =
4876 dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4877 roundup_pow_of_two(sz), 0);
4878 if (!ioc->sense_dma_pool) {
4879 pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
4880 ioc->name);
4881 goto out;
4882 }
4883 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
4884 &ioc->sense_dma);
4885 if (!ioc->sense) {
4886 pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
4887 ioc->name);
4888 goto out;
4889 }
4890 }
f92363d1
SR
4891 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4892 "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
4893 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
4894 SCSI_SENSE_BUFFERSIZE, sz/1024));
4895 dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
4896 ioc->name, (unsigned long long)ioc->sense_dma));
4897 total_sz += sz;
4898
4899 /* reply pool, 4 byte align */
4900 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
e9d98418
RP
4901 ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
4902 4, 0);
f92363d1 4903 if (!ioc->reply_dma_pool) {
e9d98418 4904 pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n",
f92363d1
SR
4905 ioc->name);
4906 goto out;
4907 }
e9d98418 4908 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
f92363d1
SR
4909 &ioc->reply_dma);
4910 if (!ioc->reply) {
e9d98418 4911 pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n",
f92363d1
SR
4912 ioc->name);
4913 goto out;
4914 }
4915 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
4916 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
4917 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4918 "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
4919 ioc->name, ioc->reply,
4920 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
4921 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
4922 ioc->name, (unsigned long long)ioc->reply_dma));
4923 total_sz += sz;
4924
4925 /* reply free queue, 16 byte align */
4926 sz = ioc->reply_free_queue_depth * 4;
e9d98418
RP
4927 ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
4928 &ioc->pdev->dev, sz, 16, 0);
f92363d1 4929 if (!ioc->reply_free_dma_pool) {
e9d98418 4930 pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n",
f92363d1
SR
4931 ioc->name);
4932 goto out;
4933 }
e9d98418 4934 ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL,
f92363d1
SR
4935 &ioc->reply_free_dma);
4936 if (!ioc->reply_free) {
e9d98418 4937 pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n",
f92363d1
SR
4938 ioc->name);
4939 goto out;
4940 }
4941 memset(ioc->reply_free, 0, sz);
4942 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
4943 "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
4944 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
4945 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4946 "reply_free_dma (0x%llx)\n",
4947 ioc->name, (unsigned long long)ioc->reply_free_dma));
4948 total_sz += sz;
4949
cd33223b
C
4950 if (ioc->rdpq_array_enable) {
4951 reply_post_free_array_sz = ioc->reply_queue_count *
4952 sizeof(Mpi2IOCInitRDPQArrayEntry);
4953 ioc->reply_post_free_array_dma_pool =
4954 dma_pool_create("reply_post_free_array pool",
4955 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
4956 if (!ioc->reply_post_free_array_dma_pool) {
4957 dinitprintk(ioc,
4958 pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
4959 "dma_pool_create failed\n", ioc->name));
4960 goto out;
4961 }
4962 ioc->reply_post_free_array =
4963 dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
4964 GFP_KERNEL, &ioc->reply_post_free_array_dma);
4965 if (!ioc->reply_post_free_array) {
4966 dinitprintk(ioc,
4967 pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
4968 "dma_pool_alloc failed\n", ioc->name));
4969 goto out;
4970 }
4971 }
f92363d1
SR
4972 ioc->config_page_sz = 512;
4973 ioc->config_page = pci_alloc_consistent(ioc->pdev,
4974 ioc->config_page_sz, &ioc->config_page_dma);
4975 if (!ioc->config_page) {
4976 pr_err(MPT3SAS_FMT
e9d98418 4977 "config page: dma_pool_alloc failed\n",
f92363d1
SR
4978 ioc->name);
4979 goto out;
4980 }
4981 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4982 "config page(0x%p): size(%d)\n",
4983 ioc->name, ioc->config_page, ioc->config_page_sz));
4984 dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
4985 ioc->name, (unsigned long long)ioc->config_page_dma));
4986 total_sz += ioc->config_page_sz;
4987
4988 pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
4989 ioc->name, total_sz/1024);
4990 pr_info(MPT3SAS_FMT
4991 "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
4992 ioc->name, ioc->shost->can_queue, facts->RequestCredit);
4993 pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
4994 ioc->name, ioc->shost->sg_tablesize);
4995 return 0;
4996
4997 out:
4998 return -ENOMEM;
4999}
5000
5001/**
5002 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
5003 * @ioc: Pointer to MPT_ADAPTER structure
5004 * @cooked: Request raw or cooked IOC state
5005 *
5006 * Returns all IOC Doorbell register bits if cooked==0, else just the
5007 * Doorbell bits in MPI_IOC_STATE_MASK.
5008 */
5009u32
5010mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
5011{
5012 u32 s, sc;
5013
5014 s = readl(&ioc->chip->Doorbell);
5015 sc = s & MPI2_IOC_STATE_MASK;
5016 return cooked ? sc : s;
5017}
5018
5019/**
5020 * _base_wait_on_iocstate - waiting on a particular ioc state
5021 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
5022 * @timeout: timeout in second
f92363d1
SR
5023 *
5024 * Returns 0 for success, non-zero for failure.
5025 */
5026static int
98c56ad3 5027_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
f92363d1
SR
5028{
5029 u32 count, cntdn;
5030 u32 current_state;
5031
5032 count = 0;
98c56ad3 5033 cntdn = 1000 * timeout;
f92363d1
SR
5034 do {
5035 current_state = mpt3sas_base_get_iocstate(ioc, 1);
5036 if (current_state == ioc_state)
5037 return 0;
5038 if (count && current_state == MPI2_IOC_STATE_FAULT)
5039 break;
98c56ad3
CO
5040
5041 usleep_range(1000, 1500);
f92363d1
SR
5042 count++;
5043 } while (--cntdn);
5044
5045 return current_state;
5046}
5047
5048/**
5049 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
5050 * a write to the doorbell)
5051 * @ioc: per adapter object
5052 * @timeout: timeout in second
f92363d1
SR
5053 *
5054 * Returns 0 for success, non-zero for failure.
5055 *
5056 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
5057 */
4dc8c808 5058static int
98c56ad3 5059_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
4dc8c808 5060
f92363d1 5061static int
98c56ad3 5062_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
f92363d1
SR
5063{
5064 u32 cntdn, count;
5065 u32 int_status;
5066
5067 count = 0;
98c56ad3 5068 cntdn = 1000 * timeout;
f92363d1
SR
5069 do {
5070 int_status = readl(&ioc->chip->HostInterruptStatus);
5071 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5072 dhsprintk(ioc, pr_info(MPT3SAS_FMT
5073 "%s: successful count(%d), timeout(%d)\n",
5074 ioc->name, __func__, count, timeout));
5075 return 0;
5076 }
98c56ad3
CO
5077
5078 usleep_range(1000, 1500);
5079 count++;
5080 } while (--cntdn);
5081
5082 pr_err(MPT3SAS_FMT
5083 "%s: failed due to timeout count(%d), int_status(%x)!\n",
5084 ioc->name, __func__, count, int_status);
5085 return -EFAULT;
5086}
5087
5088static int
5089_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5090{
5091 u32 cntdn, count;
5092 u32 int_status;
5093
5094 count = 0;
5095 cntdn = 2000 * timeout;
5096 do {
5097 int_status = readl(&ioc->chip->HostInterruptStatus);
5098 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5099 dhsprintk(ioc, pr_info(MPT3SAS_FMT
5100 "%s: successful count(%d), timeout(%d)\n",
5101 ioc->name, __func__, count, timeout));
5102 return 0;
5103 }
5104
5105 udelay(500);
f92363d1
SR
5106 count++;
5107 } while (--cntdn);
5108
5109 pr_err(MPT3SAS_FMT
5110 "%s: failed due to timeout count(%d), int_status(%x)!\n",
5111 ioc->name, __func__, count, int_status);
5112 return -EFAULT;
98c56ad3 5113
f92363d1
SR
5114}
5115
5116/**
5117 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
5118 * @ioc: per adapter object
5119 * @timeout: timeout in second
f92363d1
SR
5120 *
5121 * Returns 0 for success, non-zero for failure.
5122 *
5123 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
5124 * doorbell.
5125 */
5126static int
98c56ad3 5127_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
f92363d1
SR
5128{
5129 u32 cntdn, count;
5130 u32 int_status;
5131 u32 doorbell;
5132
5133 count = 0;
98c56ad3 5134 cntdn = 1000 * timeout;
f92363d1
SR
5135 do {
5136 int_status = readl(&ioc->chip->HostInterruptStatus);
5137 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
5138 dhsprintk(ioc, pr_info(MPT3SAS_FMT
5139 "%s: successful count(%d), timeout(%d)\n",
5140 ioc->name, __func__, count, timeout));
5141 return 0;
5142 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5143 doorbell = readl(&ioc->chip->Doorbell);
5144 if ((doorbell & MPI2_IOC_STATE_MASK) ==
5145 MPI2_IOC_STATE_FAULT) {
5146 mpt3sas_base_fault_info(ioc , doorbell);
5147 return -EFAULT;
5148 }
5149 } else if (int_status == 0xFFFFFFFF)
5150 goto out;
5151
98c56ad3 5152 usleep_range(1000, 1500);
f92363d1
SR
5153 count++;
5154 } while (--cntdn);
5155
5156 out:
5157 pr_err(MPT3SAS_FMT
5158 "%s: failed due to timeout count(%d), int_status(%x)!\n",
5159 ioc->name, __func__, count, int_status);
5160 return -EFAULT;
5161}
5162
5163/**
5164 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
5165 * @ioc: per adapter object
5166 * @timeout: timeout in second
f92363d1
SR
5167 *
5168 * Returns 0 for success, non-zero for failure.
5169 *
5170 */
5171static int
98c56ad3 5172_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
f92363d1
SR
5173{
5174 u32 cntdn, count;
5175 u32 doorbell_reg;
5176
5177 count = 0;
98c56ad3 5178 cntdn = 1000 * timeout;
f92363d1
SR
5179 do {
5180 doorbell_reg = readl(&ioc->chip->Doorbell);
5181 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
5182 dhsprintk(ioc, pr_info(MPT3SAS_FMT
5183 "%s: successful count(%d), timeout(%d)\n",
5184 ioc->name, __func__, count, timeout));
5185 return 0;
5186 }
98c56ad3
CO
5187
5188 usleep_range(1000, 1500);
f92363d1
SR
5189 count++;
5190 } while (--cntdn);
5191
5192 pr_err(MPT3SAS_FMT
5193 "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
5194 ioc->name, __func__, count, doorbell_reg);
5195 return -EFAULT;
5196}
5197
5198/**
5199 * _base_send_ioc_reset - send doorbell reset
5200 * @ioc: per adapter object
5201 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
5202 * @timeout: timeout in second
f92363d1
SR
5203 *
5204 * Returns 0 for success, non-zero for failure.
5205 */
5206static int
98c56ad3 5207_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
f92363d1
SR
5208{
5209 u32 ioc_state;
5210 int r = 0;
5211
5212 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
5213 pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
5214 ioc->name, __func__);
5215 return -EFAULT;
5216 }
5217
5218 if (!(ioc->facts.IOCCapabilities &
5219 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
5220 return -EFAULT;
5221
5222 pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
5223
5224 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
5225 &ioc->chip->Doorbell);
98c56ad3 5226 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
f92363d1
SR
5227 r = -EFAULT;
5228 goto out;
5229 }
98c56ad3 5230 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
f92363d1
SR
5231 if (ioc_state) {
5232 pr_err(MPT3SAS_FMT
5233 "%s: failed going to ready state (ioc_state=0x%x)\n",
5234 ioc->name, __func__, ioc_state);
5235 r = -EFAULT;
5236 goto out;
5237 }
5238 out:
5239 pr_info(MPT3SAS_FMT "message unit reset: %s\n",
5240 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
5241 return r;
5242}
5243
5244/**
5245 * _base_handshake_req_reply_wait - send request thru doorbell interface
5246 * @ioc: per adapter object
5247 * @request_bytes: request length
5248 * @request: pointer having request payload
5249 * @reply_bytes: reply length
5250 * @reply: pointer to reply payload
5251 * @timeout: timeout in second
f92363d1
SR
5252 *
5253 * Returns 0 for success, non-zero for failure.
5254 */
5255static int
5256_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
98c56ad3 5257 u32 *request, int reply_bytes, u16 *reply, int timeout)
f92363d1
SR
5258{
5259 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
5260 int i;
5261 u8 failed;
f92363d1
SR
5262 __le32 *mfp;
5263
5264 /* make sure doorbell is not in use */
5265 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
5266 pr_err(MPT3SAS_FMT
5267 "doorbell is in use (line=%d)\n",
5268 ioc->name, __LINE__);
5269 return -EFAULT;
5270 }
5271
5272 /* clear pending doorbell interrupts from previous state changes */
5273 if (readl(&ioc->chip->HostInterruptStatus) &
5274 MPI2_HIS_IOC2SYS_DB_STATUS)
5275 writel(0, &ioc->chip->HostInterruptStatus);
5276
5277 /* send message to ioc */
5278 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
5279 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
5280 &ioc->chip->Doorbell);
5281
98c56ad3 5282 if ((_base_spin_on_doorbell_int(ioc, 5))) {
f92363d1
SR
5283 pr_err(MPT3SAS_FMT
5284 "doorbell handshake int failed (line=%d)\n",
5285 ioc->name, __LINE__);
5286 return -EFAULT;
5287 }
5288 writel(0, &ioc->chip->HostInterruptStatus);
5289
98c56ad3 5290 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
f92363d1
SR
5291 pr_err(MPT3SAS_FMT
5292 "doorbell handshake ack failed (line=%d)\n",
5293 ioc->name, __LINE__);
5294 return -EFAULT;
5295 }
5296
5297 /* send message 32-bits at a time */
5298 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
cf6bf971 5299 writel((u32)(request[i]), &ioc->chip->Doorbell);
98c56ad3 5300 if ((_base_wait_for_doorbell_ack(ioc, 5)))
f92363d1
SR
5301 failed = 1;
5302 }
5303
5304 if (failed) {
5305 pr_err(MPT3SAS_FMT
5306 "doorbell handshake sending request failed (line=%d)\n",
5307 ioc->name, __LINE__);
5308 return -EFAULT;
5309 }
5310
5311 /* now wait for the reply */
98c56ad3 5312 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
f92363d1
SR
5313 pr_err(MPT3SAS_FMT
5314 "doorbell handshake int failed (line=%d)\n",
5315 ioc->name, __LINE__);
5316 return -EFAULT;
5317 }
5318
5319 /* read the first two 16-bits, it gives the total length of the reply */
cf6bf971 5320 reply[0] = (u16)(readl(&ioc->chip->Doorbell)
f92363d1
SR
5321 & MPI2_DOORBELL_DATA_MASK);
5322 writel(0, &ioc->chip->HostInterruptStatus);
98c56ad3 5323 if ((_base_wait_for_doorbell_int(ioc, 5))) {
f92363d1
SR
5324 pr_err(MPT3SAS_FMT
5325 "doorbell handshake int failed (line=%d)\n",
5326 ioc->name, __LINE__);
5327 return -EFAULT;
5328 }
cf6bf971 5329 reply[1] = (u16)(readl(&ioc->chip->Doorbell)
f92363d1
SR
5330 & MPI2_DOORBELL_DATA_MASK);
5331 writel(0, &ioc->chip->HostInterruptStatus);
5332
5333 for (i = 2; i < default_reply->MsgLength * 2; i++) {
98c56ad3 5334 if ((_base_wait_for_doorbell_int(ioc, 5))) {
f92363d1
SR
5335 pr_err(MPT3SAS_FMT
5336 "doorbell handshake int failed (line=%d)\n",
5337 ioc->name, __LINE__);
5338 return -EFAULT;
5339 }
5340 if (i >= reply_bytes/2) /* overflow case */
8bbb1cf6 5341 readl(&ioc->chip->Doorbell);
f92363d1 5342 else
cf6bf971 5343 reply[i] = (u16)(readl(&ioc->chip->Doorbell)
f92363d1
SR
5344 & MPI2_DOORBELL_DATA_MASK);
5345 writel(0, &ioc->chip->HostInterruptStatus);
5346 }
5347
98c56ad3
CO
5348 _base_wait_for_doorbell_int(ioc, 5);
5349 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
f92363d1
SR
5350 dhsprintk(ioc, pr_info(MPT3SAS_FMT
5351 "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
5352 }
5353 writel(0, &ioc->chip->HostInterruptStatus);
5354
5355 if (ioc->logging_level & MPT_DEBUG_INIT) {
5356 mfp = (__le32 *)reply;
5357 pr_info("\toffset:data\n");
5358 for (i = 0; i < reply_bytes/4; i++)
5359 pr_info("\t[0x%02x]:%08x\n", i*4,
5360 le32_to_cpu(mfp[i]));
5361 }
5362 return 0;
5363}
5364
5365/**
5366 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
5367 * @ioc: per adapter object
5368 * @mpi_reply: the reply payload from FW
5369 * @mpi_request: the request payload sent to FW
5370 *
5371 * The SAS IO Unit Control Request message allows the host to perform low-level
5372 * operations, such as resets on the PHYs of the IO Unit, also allows the host
5373 * to obtain the IOC assigned device handles for a device if it has other
5374 * identifying information about the device, in addition allows the host to
5375 * remove IOC resources associated with the device.
5376 *
5377 * Returns 0 for success, non-zero for failure.
5378 */
5379int
5380mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
5381 Mpi2SasIoUnitControlReply_t *mpi_reply,
5382 Mpi2SasIoUnitControlRequest_t *mpi_request)
5383{
5384 u16 smid;
5385 u32 ioc_state;
d37306ca 5386 u8 issue_reset = 0;
f92363d1
SR
5387 int rc;
5388 void *request;
5389 u16 wait_state_count;
5390
5391 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5392 __func__));
5393
5394 mutex_lock(&ioc->base_cmds.mutex);
5395
5396 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
5397 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
5398 ioc->name, __func__);
5399 rc = -EAGAIN;
5400 goto out;
5401 }
5402
5403 wait_state_count = 0;
5404 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5405 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
5406 if (wait_state_count++ == 10) {
5407 pr_err(MPT3SAS_FMT
5408 "%s: failed due to ioc not operational\n",
5409 ioc->name, __func__);
5410 rc = -EFAULT;
5411 goto out;
5412 }
5413 ssleep(1);
5414 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5415 pr_info(MPT3SAS_FMT
5416 "%s: waiting for operational state(count=%d)\n",
5417 ioc->name, __func__, wait_state_count);
5418 }
5419
5420 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5421 if (!smid) {
5422 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
5423 ioc->name, __func__);
5424 rc = -EAGAIN;
5425 goto out;
5426 }
5427
5428 rc = 0;
5429 ioc->base_cmds.status = MPT3_CMD_PENDING;
5430 request = mpt3sas_base_get_msg_frame(ioc, smid);
5431 ioc->base_cmds.smid = smid;
5432 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
5433 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
5434 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
5435 ioc->ioc_link_reset_in_progress = 1;
5436 init_completion(&ioc->base_cmds.done);
40114bde 5437 mpt3sas_base_put_smid_default(ioc, smid);
8bbb1cf6 5438 wait_for_completion_timeout(&ioc->base_cmds.done,
f92363d1
SR
5439 msecs_to_jiffies(10000));
5440 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
5441 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
5442 ioc->ioc_link_reset_in_progress)
5443 ioc->ioc_link_reset_in_progress = 0;
5444 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
d37306ca
C
5445 issue_reset =
5446 mpt3sas_base_check_cmd_timeout(ioc,
5447 ioc->base_cmds.status, mpi_request,
5448 sizeof(Mpi2SasIoUnitControlRequest_t)/4);
f92363d1
SR
5449 goto issue_host_reset;
5450 }
5451 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
5452 memcpy(mpi_reply, ioc->base_cmds.reply,
5453 sizeof(Mpi2SasIoUnitControlReply_t));
5454 else
5455 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
5456 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5457 goto out;
5458
5459 issue_host_reset:
5460 if (issue_reset)
98c56ad3 5461 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
f92363d1
SR
5462 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5463 rc = -EFAULT;
5464 out:
5465 mutex_unlock(&ioc->base_cmds.mutex);
5466 return rc;
5467}
5468
5469/**
5470 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
5471 * @ioc: per adapter object
5472 * @mpi_reply: the reply payload from FW
5473 * @mpi_request: the request payload sent to FW
5474 *
5475 * The SCSI Enclosure Processor request message causes the IOC to
5476 * communicate with SES devices to control LED status signals.
5477 *
5478 * Returns 0 for success, non-zero for failure.
5479 */
5480int
5481mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
5482 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
5483{
5484 u16 smid;
5485 u32 ioc_state;
d37306ca 5486 u8 issue_reset = 0;
f92363d1
SR
5487 int rc;
5488 void *request;
5489 u16 wait_state_count;
5490
5491 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5492 __func__));
5493
5494 mutex_lock(&ioc->base_cmds.mutex);
5495
5496 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
5497 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
5498 ioc->name, __func__);
5499 rc = -EAGAIN;
5500 goto out;
5501 }
5502
5503 wait_state_count = 0;
5504 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5505 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
5506 if (wait_state_count++ == 10) {
5507 pr_err(MPT3SAS_FMT
5508 "%s: failed due to ioc not operational\n",
5509 ioc->name, __func__);
5510 rc = -EFAULT;
5511 goto out;
5512 }
5513 ssleep(1);
5514 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5515 pr_info(MPT3SAS_FMT
5516 "%s: waiting for operational state(count=%d)\n",
5517 ioc->name,
5518 __func__, wait_state_count);
5519 }
5520
5521 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5522 if (!smid) {
5523 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
5524 ioc->name, __func__);
5525 rc = -EAGAIN;
5526 goto out;
5527 }
5528
5529 rc = 0;
5530 ioc->base_cmds.status = MPT3_CMD_PENDING;
5531 request = mpt3sas_base_get_msg_frame(ioc, smid);
5532 ioc->base_cmds.smid = smid;
5533 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
5534 init_completion(&ioc->base_cmds.done);
40114bde 5535 mpt3sas_base_put_smid_default(ioc, smid);
8bbb1cf6 5536 wait_for_completion_timeout(&ioc->base_cmds.done,
f92363d1
SR
5537 msecs_to_jiffies(10000));
5538 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
d37306ca
C
5539 issue_reset =
5540 mpt3sas_base_check_cmd_timeout(ioc,
5541 ioc->base_cmds.status, mpi_request,
5542 sizeof(Mpi2SepRequest_t)/4);
f92363d1
SR
5543 goto issue_host_reset;
5544 }
5545 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
5546 memcpy(mpi_reply, ioc->base_cmds.reply,
5547 sizeof(Mpi2SepReply_t));
5548 else
5549 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
5550 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5551 goto out;
5552
5553 issue_host_reset:
5554 if (issue_reset)
98c56ad3 5555 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
f92363d1
SR
5556 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5557 rc = -EFAULT;
5558 out:
5559 mutex_unlock(&ioc->base_cmds.mutex);
5560 return rc;
5561}
5562
5563/**
5564 * _base_get_port_facts - obtain port facts reply and save in ioc
5565 * @ioc: per adapter object
f92363d1
SR
5566 *
5567 * Returns 0 for success, non-zero for failure.
5568 */
5569static int
98c56ad3 5570_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
f92363d1
SR
5571{
5572 Mpi2PortFactsRequest_t mpi_request;
5573 Mpi2PortFactsReply_t mpi_reply;
5574 struct mpt3sas_port_facts *pfacts;
5575 int mpi_reply_sz, mpi_request_sz, r;
5576
5577 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5578 __func__));
5579
5580 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
5581 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
5582 memset(&mpi_request, 0, mpi_request_sz);
5583 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
5584 mpi_request.PortNumber = port;
5585 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
98c56ad3 5586 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
f92363d1
SR
5587
5588 if (r != 0) {
5589 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
5590 ioc->name, __func__, r);
5591 return r;
5592 }
5593
5594 pfacts = &ioc->pfacts[port];
5595 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
5596 pfacts->PortNumber = mpi_reply.PortNumber;
5597 pfacts->VP_ID = mpi_reply.VP_ID;
5598 pfacts->VF_ID = mpi_reply.VF_ID;
5599 pfacts->MaxPostedCmdBuffers =
5600 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
5601
5602 return 0;
5603}
5604
4dc8c808
SR
5605/**
5606 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
5607 * @ioc: per adapter object
5608 * @timeout:
4dc8c808
SR
5609 *
5610 * Returns 0 for success, non-zero for failure.
5611 */
5612static int
98c56ad3 5613_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
4dc8c808
SR
5614{
5615 u32 ioc_state;
5616 int rc;
5617
5618 dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
5619 __func__));
5620
5621 if (ioc->pci_error_recovery) {
5622 dfailprintk(ioc, printk(MPT3SAS_FMT
5623 "%s: host in pci error recovery\n", ioc->name, __func__));
5624 return -EFAULT;
5625 }
5626
5627 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5628 dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
5629 ioc->name, __func__, ioc_state));
5630
5631 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
5632 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
5633 return 0;
5634
5635 if (ioc_state & MPI2_DOORBELL_USED) {
5636 dhsprintk(ioc, printk(MPT3SAS_FMT
5637 "unexpected doorbell active!\n", ioc->name));
5638 goto issue_diag_reset;
5639 }
5640
5641 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
5642 mpt3sas_base_fault_info(ioc, ioc_state &
5643 MPI2_DOORBELL_DATA_MASK);
5644 goto issue_diag_reset;
5645 }
5646
98c56ad3 5647 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
4dc8c808
SR
5648 if (ioc_state) {
5649 dfailprintk(ioc, printk(MPT3SAS_FMT
5650 "%s: failed going to ready state (ioc_state=0x%x)\n",
5651 ioc->name, __func__, ioc_state));
5652 return -EFAULT;
5653 }
5654
5655 issue_diag_reset:
98c56ad3 5656 rc = _base_diag_reset(ioc);
4dc8c808
SR
5657 return rc;
5658}
5659
f92363d1
SR
5660/**
5661 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
5662 * @ioc: per adapter object
f92363d1
SR
5663 *
5664 * Returns 0 for success, non-zero for failure.
5665 */
5666static int
98c56ad3 5667_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
5668{
5669 Mpi2IOCFactsRequest_t mpi_request;
5670 Mpi2IOCFactsReply_t mpi_reply;
5671 struct mpt3sas_facts *facts;
5672 int mpi_reply_sz, mpi_request_sz, r;
5673
5674 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5675 __func__));
5676
98c56ad3 5677 r = _base_wait_for_iocstate(ioc, 10);
4dc8c808
SR
5678 if (r) {
5679 dfailprintk(ioc, printk(MPT3SAS_FMT
5680 "%s: failed getting to correct state\n",
5681 ioc->name, __func__));
5682 return r;
5683 }
f92363d1
SR
5684 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
5685 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
5686 memset(&mpi_request, 0, mpi_request_sz);
5687 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
5688 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
98c56ad3 5689 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
f92363d1
SR
5690
5691 if (r != 0) {
5692 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
5693 ioc->name, __func__, r);
5694 return r;
5695 }
5696
5697 facts = &ioc->facts;
5698 memset(facts, 0, sizeof(struct mpt3sas_facts));
5699 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
5700 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
5701 facts->VP_ID = mpi_reply.VP_ID;
5702 facts->VF_ID = mpi_reply.VF_ID;
5703 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
5704 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
5705 facts->WhoInit = mpi_reply.WhoInit;
5706 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
5707 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
5708 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
5709 facts->MaxReplyDescriptorPostQueueDepth =
5710 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
5711 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
5712 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
5713 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
5714 ioc->ir_firmware = 1;
9b05c91a 5715 if ((facts->IOCCapabilities &
06f5f976 5716 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
9b05c91a 5717 ioc->rdpq_array_capable = 1;
f92363d1
SR
5718 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
5719 facts->IOCRequestFrameSize =
5720 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
ebb3024e
SS
5721 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5722 facts->IOCMaxChainSegmentSize =
5723 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
5724 }
f92363d1
SR
5725 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
5726 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
5727 ioc->shost->max_id = -1;
5728 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
5729 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
5730 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
5731 facts->HighPriorityCredit =
5732 le16_to_cpu(mpi_reply.HighPriorityCredit);
5733 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
5734 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
016d5c35
SPS
5735 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
5736
5737 /*
5738 * Get the Page Size from IOC Facts. If it's 0, default to 4k.
5739 */
5740 ioc->page_size = 1 << facts->CurrentHostPageSize;
5741 if (ioc->page_size == 1) {
5742 pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
5743 "default host page size to 4k\n", ioc->name);
5744 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
5745 }
5746 dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
5747 ioc->name, facts->CurrentHostPageSize));
f92363d1
SR
5748
5749 dinitprintk(ioc, pr_info(MPT3SAS_FMT
5750 "hba queue depth(%d), max chains per io(%d)\n",
5751 ioc->name, facts->RequestCredit,
5752 facts->MaxChainDepth));
5753 dinitprintk(ioc, pr_info(MPT3SAS_FMT
5754 "request frame size(%d), reply frame size(%d)\n", ioc->name,
5755 facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
5756 return 0;
5757}
5758
5759/**
5760 * _base_send_ioc_init - send ioc_init to firmware
5761 * @ioc: per adapter object
f92363d1
SR
5762 *
5763 * Returns 0 for success, non-zero for failure.
5764 */
5765static int
98c56ad3 5766_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
5767{
5768 Mpi2IOCInitRequest_t mpi_request;
5769 Mpi2IOCInitReply_t mpi_reply;
9b05c91a 5770 int i, r = 0;
23409bd4 5771 ktime_t current_time;
f92363d1 5772 u16 ioc_status;
9b05c91a 5773 u32 reply_post_free_array_sz = 0;
f92363d1
SR
5774
5775 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5776 __func__));
5777
5778 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
5779 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
5780 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
5781 mpi_request.VF_ID = 0; /* TODO */
5782 mpi_request.VP_ID = 0;
d357e84d 5783 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
f92363d1 5784 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
016d5c35 5785 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
f92363d1
SR
5786
5787 if (_base_is_controller_msix_enabled(ioc))
5788 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
5789 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
5790 mpi_request.ReplyDescriptorPostQueueDepth =
5791 cpu_to_le16(ioc->reply_post_queue_depth);
5792 mpi_request.ReplyFreeQueueDepth =
5793 cpu_to_le16(ioc->reply_free_queue_depth);
5794
5795 mpi_request.SenseBufferAddressHigh =
5796 cpu_to_le32((u64)ioc->sense_dma >> 32);
5797 mpi_request.SystemReplyAddressHigh =
5798 cpu_to_le32((u64)ioc->reply_dma >> 32);
5799 mpi_request.SystemRequestFrameBaseAddress =
5800 cpu_to_le64((u64)ioc->request_dma);
5801 mpi_request.ReplyFreeQueueAddress =
5802 cpu_to_le64((u64)ioc->reply_free_dma);
f92363d1 5803
9b05c91a
SR
5804 if (ioc->rdpq_array_enable) {
5805 reply_post_free_array_sz = ioc->reply_queue_count *
5806 sizeof(Mpi2IOCInitRDPQArrayEntry);
cd33223b 5807 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
9b05c91a 5808 for (i = 0; i < ioc->reply_queue_count; i++)
cd33223b 5809 ioc->reply_post_free_array[i].RDPQBaseAddress =
9b05c91a
SR
5810 cpu_to_le64(
5811 (u64)ioc->reply_post[i].reply_post_free_dma);
5812 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
5813 mpi_request.ReplyDescriptorPostQueueAddress =
cd33223b 5814 cpu_to_le64((u64)ioc->reply_post_free_array_dma);
9b05c91a
SR
5815 } else {
5816 mpi_request.ReplyDescriptorPostQueueAddress =
5817 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
5818 }
f92363d1
SR
5819
5820 /* This time stamp specifies number of milliseconds
5821 * since epoch ~ midnight January 1, 1970.
5822 */
23409bd4
TR
5823 current_time = ktime_get_real();
5824 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
f92363d1
SR
5825
5826 if (ioc->logging_level & MPT_DEBUG_INIT) {
5827 __le32 *mfp;
5828 int i;
5829
5830 mfp = (__le32 *)&mpi_request;
5831 pr_info("\toffset:data\n");
5832 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
5833 pr_info("\t[0x%02x]:%08x\n", i*4,
5834 le32_to_cpu(mfp[i]));
5835 }
5836
5837 r = _base_handshake_req_reply_wait(ioc,
5838 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
98c56ad3 5839 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
f92363d1
SR
5840
5841 if (r != 0) {
5842 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
5843 ioc->name, __func__, r);
cd33223b 5844 return r;
f92363d1
SR
5845 }
5846
5847 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5848 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
5849 mpi_reply.IOCLogInfo) {
5850 pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
5851 r = -EIO;
5852 }
5853
9b05c91a 5854 return r;
f92363d1
SR
5855}
5856
5857/**
5858 * mpt3sas_port_enable_done - command completion routine for port enable
5859 * @ioc: per adapter object
5860 * @smid: system request message index
5861 * @msix_index: MSIX table index supplied by the OS
5862 * @reply: reply message frame(lower 32bit addr)
5863 *
5864 * Return 1 meaning mf should be freed from _base_interrupt
5865 * 0 means the mf is freed from this function.
5866 */
5867u8
5868mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
5869 u32 reply)
5870{
5871 MPI2DefaultReply_t *mpi_reply;
5872 u16 ioc_status;
5873
5874 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
5875 return 1;
5876
5877 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5878 if (!mpi_reply)
5879 return 1;
5880
5881 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
5882 return 1;
5883
5884 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
5885 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
5886 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
5887 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
5888 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
5889 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5890 ioc->port_enable_failed = 1;
5891
5892 if (ioc->is_driver_loading) {
5893 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
5894 mpt3sas_port_enable_complete(ioc);
5895 return 1;
5896 } else {
5897 ioc->start_scan_failed = ioc_status;
5898 ioc->start_scan = 0;
5899 return 1;
5900 }
5901 }
5902 complete(&ioc->port_enable_cmds.done);
5903 return 1;
5904}
5905
5906/**
5907 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
5908 * @ioc: per adapter object
f92363d1
SR
5909 *
5910 * Returns 0 for success, non-zero for failure.
5911 */
5912static int
98c56ad3 5913_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
5914{
5915 Mpi2PortEnableRequest_t *mpi_request;
5916 Mpi2PortEnableReply_t *mpi_reply;
f92363d1
SR
5917 int r = 0;
5918 u16 smid;
5919 u16 ioc_status;
5920
5921 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
5922
5923 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5924 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
5925 ioc->name, __func__);
5926 return -EAGAIN;
5927 }
5928
5929 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
5930 if (!smid) {
5931 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
5932 ioc->name, __func__);
5933 return -EAGAIN;
5934 }
5935
5936 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
5937 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5938 ioc->port_enable_cmds.smid = smid;
5939 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
5940 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
5941
5942 init_completion(&ioc->port_enable_cmds.done);
40114bde 5943 mpt3sas_base_put_smid_default(ioc, smid);
8bbb1cf6 5944 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
f92363d1
SR
5945 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
5946 pr_err(MPT3SAS_FMT "%s: timeout\n",
5947 ioc->name, __func__);
5948 _debug_dump_mf(mpi_request,
5949 sizeof(Mpi2PortEnableRequest_t)/4);
5950 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
5951 r = -EFAULT;
5952 else
5953 r = -ETIME;
5954 goto out;
5955 }
5956
5957 mpi_reply = ioc->port_enable_cmds.reply;
5958 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
5959 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5960 pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
5961 ioc->name, __func__, ioc_status);
5962 r = -EFAULT;
5963 goto out;
5964 }
5965
5966 out:
5967 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
5968 pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
5969 "SUCCESS" : "FAILED"));
5970 return r;
5971}
5972
5973/**
5974 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
5975 * @ioc: per adapter object
5976 *
5977 * Returns 0 for success, non-zero for failure.
5978 */
5979int
5980mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
5981{
5982 Mpi2PortEnableRequest_t *mpi_request;
5983 u16 smid;
5984
5985 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
5986
5987 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5988 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
5989 ioc->name, __func__);
5990 return -EAGAIN;
5991 }
5992
5993 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
5994 if (!smid) {
5995 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
5996 ioc->name, __func__);
5997 return -EAGAIN;
5998 }
5999
6000 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6001 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6002 ioc->port_enable_cmds.smid = smid;
6003 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6004 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6005
40114bde 6006 mpt3sas_base_put_smid_default(ioc, smid);
f92363d1
SR
6007 return 0;
6008}
6009
6010/**
6011 * _base_determine_wait_on_discovery - desposition
6012 * @ioc: per adapter object
6013 *
6014 * Decide whether to wait on discovery to complete. Used to either
6015 * locate boot device, or report volumes ahead of physical devices.
6016 *
6017 * Returns 1 for wait, 0 for don't wait
6018 */
6019static int
6020_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
6021{
6022 /* We wait for discovery to complete if IR firmware is loaded.
6023 * The sas topology events arrive before PD events, so we need time to
6024 * turn on the bit in ioc->pd_handles to indicate PD
6025 * Also, it maybe required to report Volumes ahead of physical
6026 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
6027 */
6028 if (ioc->ir_firmware)
6029 return 1;
6030
6031 /* if no Bios, then we don't need to wait */
6032 if (!ioc->bios_pg3.BiosVersion)
6033 return 0;
6034
6035 /* Bios is present, then we drop down here.
6036 *
6037 * If there any entries in the Bios Page 2, then we wait
6038 * for discovery to complete.
6039 */
6040
6041 /* Current Boot Device */
6042 if ((ioc->bios_pg2.CurrentBootDeviceForm &
6043 MPI2_BIOSPAGE2_FORM_MASK) ==
6044 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6045 /* Request Boot Device */
6046 (ioc->bios_pg2.ReqBootDeviceForm &
6047 MPI2_BIOSPAGE2_FORM_MASK) ==
6048 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6049 /* Alternate Request Boot Device */
6050 (ioc->bios_pg2.ReqAltBootDeviceForm &
6051 MPI2_BIOSPAGE2_FORM_MASK) ==
6052 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
6053 return 0;
6054
6055 return 1;
6056}
6057
6058/**
6059 * _base_unmask_events - turn on notification for this event
6060 * @ioc: per adapter object
6061 * @event: firmware event
6062 *
6063 * The mask is stored in ioc->event_masks.
6064 */
6065static void
6066_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
6067{
6068 u32 desired_event;
6069
6070 if (event >= 128)
6071 return;
6072
6073 desired_event = (1 << (event % 32));
6074
6075 if (event < 32)
6076 ioc->event_masks[0] &= ~desired_event;
6077 else if (event < 64)
6078 ioc->event_masks[1] &= ~desired_event;
6079 else if (event < 96)
6080 ioc->event_masks[2] &= ~desired_event;
6081 else if (event < 128)
6082 ioc->event_masks[3] &= ~desired_event;
6083}
6084
6085/**
6086 * _base_event_notification - send event notification
6087 * @ioc: per adapter object
f92363d1
SR
6088 *
6089 * Returns 0 for success, non-zero for failure.
6090 */
6091static int
98c56ad3 6092_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
6093{
6094 Mpi2EventNotificationRequest_t *mpi_request;
f92363d1
SR
6095 u16 smid;
6096 int r = 0;
6097 int i;
6098
6099 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
6100 __func__));
6101
6102 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6103 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
6104 ioc->name, __func__);
6105 return -EAGAIN;
6106 }
6107
6108 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6109 if (!smid) {
6110 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
6111 ioc->name, __func__);
6112 return -EAGAIN;
6113 }
6114 ioc->base_cmds.status = MPT3_CMD_PENDING;
6115 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6116 ioc->base_cmds.smid = smid;
6117 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
6118 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
6119 mpi_request->VF_ID = 0; /* TODO */
6120 mpi_request->VP_ID = 0;
6121 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6122 mpi_request->EventMasks[i] =
6123 cpu_to_le32(ioc->event_masks[i]);
6124 init_completion(&ioc->base_cmds.done);
40114bde 6125 mpt3sas_base_put_smid_default(ioc, smid);
8bbb1cf6 6126 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
f92363d1
SR
6127 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6128 pr_err(MPT3SAS_FMT "%s: timeout\n",
6129 ioc->name, __func__);
6130 _debug_dump_mf(mpi_request,
6131 sizeof(Mpi2EventNotificationRequest_t)/4);
6132 if (ioc->base_cmds.status & MPT3_CMD_RESET)
6133 r = -EFAULT;
6134 else
6135 r = -ETIME;
6136 } else
6137 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
6138 ioc->name, __func__));
6139 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6140 return r;
6141}
6142
6143/**
6144 * mpt3sas_base_validate_event_type - validating event types
6145 * @ioc: per adapter object
6146 * @event: firmware event
6147 *
6148 * This will turn on firmware event notification when application
6149 * ask for that event. We don't mask events that are already enabled.
6150 */
6151void
6152mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
6153{
6154 int i, j;
6155 u32 event_mask, desired_event;
6156 u8 send_update_to_fw;
6157
6158 for (i = 0, send_update_to_fw = 0; i <
6159 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
6160 event_mask = ~event_type[i];
6161 desired_event = 1;
6162 for (j = 0; j < 32; j++) {
6163 if (!(event_mask & desired_event) &&
6164 (ioc->event_masks[i] & desired_event)) {
6165 ioc->event_masks[i] &= ~desired_event;
6166 send_update_to_fw = 1;
6167 }
6168 desired_event = (desired_event << 1);
6169 }
6170 }
6171
6172 if (!send_update_to_fw)
6173 return;
6174
6175 mutex_lock(&ioc->base_cmds.mutex);
98c56ad3 6176 _base_event_notification(ioc);
f92363d1
SR
6177 mutex_unlock(&ioc->base_cmds.mutex);
6178}
6179
6180/**
6181 * _base_diag_reset - the "big hammer" start of day reset
6182 * @ioc: per adapter object
f92363d1
SR
6183 *
6184 * Returns 0 for success, non-zero for failure.
6185 */
6186static int
98c56ad3 6187_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
6188{
6189 u32 host_diagnostic;
6190 u32 ioc_state;
6191 u32 count;
6192 u32 hcb_size;
6193
6194 pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
6195
6196 drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
6197 ioc->name));
6198
6199 count = 0;
6200 do {
6201 /* Write magic sequence to WriteSequence register
6202 * Loop until in diagnostic mode
6203 */
6204 drsprintk(ioc, pr_info(MPT3SAS_FMT
6205 "write magic sequence\n", ioc->name));
6206 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6207 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
6208 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
6209 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
6210 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
6211 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
6212 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
6213
6214 /* wait 100 msec */
98c56ad3 6215 msleep(100);
f92363d1
SR
6216
6217 if (count++ > 20)
6218 goto out;
6219
6220 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
6221 drsprintk(ioc, pr_info(MPT3SAS_FMT
6222 "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6223 ioc->name, count, host_diagnostic));
6224
6225 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6226
6227 hcb_size = readl(&ioc->chip->HCBSize);
6228
6229 drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
6230 ioc->name));
6231 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
6232 &ioc->chip->HostDiagnostic);
6233
b453ff84 6234 /*This delay allows the chip PCIe hardware time to finish reset tasks*/
98c56ad3 6235 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
f92363d1 6236
b453ff84
SR
6237 /* Approximately 300 second max wait */
6238 for (count = 0; count < (300000000 /
6239 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
f92363d1
SR
6240
6241 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
6242
6243 if (host_diagnostic == 0xFFFFFFFF)
6244 goto out;
6245 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
6246 break;
6247
98c56ad3 6248 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
f92363d1
SR
6249 }
6250
6251 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
6252
6253 drsprintk(ioc, pr_info(MPT3SAS_FMT
6254 "restart the adapter assuming the HCB Address points to good F/W\n",
6255 ioc->name));
6256 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
6257 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
6258 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
6259
6260 drsprintk(ioc, pr_info(MPT3SAS_FMT
6261 "re-enable the HCDW\n", ioc->name));
6262 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
6263 &ioc->chip->HCBSize);
6264 }
6265
6266 drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
6267 ioc->name));
6268 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
6269 &ioc->chip->HostDiagnostic);
6270
6271 drsprintk(ioc, pr_info(MPT3SAS_FMT
6272 "disable writes to the diagnostic register\n", ioc->name));
6273 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6274
6275 drsprintk(ioc, pr_info(MPT3SAS_FMT
6276 "Wait for FW to go to the READY state\n", ioc->name));
98c56ad3 6277 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
f92363d1
SR
6278 if (ioc_state) {
6279 pr_err(MPT3SAS_FMT
6280 "%s: failed going to ready state (ioc_state=0x%x)\n",
6281 ioc->name, __func__, ioc_state);
6282 goto out;
6283 }
6284
6285 pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
6286 return 0;
6287
6288 out:
6289 pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
6290 return -EFAULT;
6291}
6292
6293/**
6294 * _base_make_ioc_ready - put controller in READY state
6295 * @ioc: per adapter object
f92363d1
SR
6296 * @type: FORCE_BIG_HAMMER or SOFT_RESET
6297 *
6298 * Returns 0 for success, non-zero for failure.
6299 */
6300static int
98c56ad3 6301_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
f92363d1
SR
6302{
6303 u32 ioc_state;
6304 int rc;
6305 int count;
6306
6307 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
6308 __func__));
6309
6310 if (ioc->pci_error_recovery)
6311 return 0;
6312
6313 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6314 dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
6315 ioc->name, __func__, ioc_state));
6316
6317 /* if in RESET state, it should move to READY state shortly */
6318 count = 0;
6319 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
6320 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
6321 MPI2_IOC_STATE_READY) {
6322 if (count++ == 10) {
6323 pr_err(MPT3SAS_FMT
6324 "%s: failed going to ready state (ioc_state=0x%x)\n",
6325 ioc->name, __func__, ioc_state);
6326 return -EFAULT;
6327 }
98c56ad3 6328 ssleep(1);
f92363d1
SR
6329 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6330 }
6331 }
6332
6333 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
6334 return 0;
6335
6336 if (ioc_state & MPI2_DOORBELL_USED) {
6337 dhsprintk(ioc, pr_info(MPT3SAS_FMT
6338 "unexpected doorbell active!\n",
6339 ioc->name));
6340 goto issue_diag_reset;
6341 }
6342
6343 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6344 mpt3sas_base_fault_info(ioc, ioc_state &
6345 MPI2_DOORBELL_DATA_MASK);
6346 goto issue_diag_reset;
6347 }
6348
6349 if (type == FORCE_BIG_HAMMER)
6350 goto issue_diag_reset;
6351
6352 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6353 if (!(_base_send_ioc_reset(ioc,
98c56ad3 6354 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
f92363d1
SR
6355 return 0;
6356 }
6357
6358 issue_diag_reset:
98c56ad3 6359 rc = _base_diag_reset(ioc);
f92363d1
SR
6360 return rc;
6361}
6362
6363/**
6364 * _base_make_ioc_operational - put controller in OPERATIONAL state
6365 * @ioc: per adapter object
f92363d1
SR
6366 *
6367 * Returns 0 for success, non-zero for failure.
6368 */
6369static int
98c56ad3 6370_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
f92363d1 6371{
5ec8a175 6372 int r, i, index;
f92363d1
SR
6373 unsigned long flags;
6374 u32 reply_address;
6375 u16 smid;
6376 struct _tr_list *delayed_tr, *delayed_tr_next;
fd0331b3
SS
6377 struct _sc_list *delayed_sc, *delayed_sc_next;
6378 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
7786ab6a 6379 u8 hide_flag;
f92363d1 6380 struct adapter_reply_queue *reply_q;
5ec8a175 6381 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
f92363d1
SR
6382
6383 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
6384 __func__));
6385
6386 /* clean the delayed target reset list */
6387 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6388 &ioc->delayed_tr_list, list) {
6389 list_del(&delayed_tr->list);
6390 kfree(delayed_tr);
6391 }
6392
6393
6394 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6395 &ioc->delayed_tr_volume_list, list) {
6396 list_del(&delayed_tr->list);
6397 kfree(delayed_tr);
6398 }
6399
fd0331b3
SS
6400 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
6401 &ioc->delayed_sc_list, list) {
6402 list_del(&delayed_sc->list);
6403 kfree(delayed_sc);
6404 }
6405
6406 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
6407 &ioc->delayed_event_ack_list, list) {
6408 list_del(&delayed_event_ack->list);
6409 kfree(delayed_event_ack);
6410 }
6411
f92363d1 6412 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
f92363d1
SR
6413
6414 /* hi-priority queue */
6415 INIT_LIST_HEAD(&ioc->hpr_free_list);
6416 smid = ioc->hi_priority_smid;
6417 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
6418 ioc->hpr_lookup[i].cb_idx = 0xFF;
6419 ioc->hpr_lookup[i].smid = smid;
6420 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
6421 &ioc->hpr_free_list);
6422 }
6423
6424 /* internal queue */
6425 INIT_LIST_HEAD(&ioc->internal_free_list);
6426 smid = ioc->internal_smid;
6427 for (i = 0; i < ioc->internal_depth; i++, smid++) {
6428 ioc->internal_lookup[i].cb_idx = 0xFF;
6429 ioc->internal_lookup[i].smid = smid;
6430 list_add_tail(&ioc->internal_lookup[i].tracker_list,
6431 &ioc->internal_free_list);
6432 }
6433
f92363d1
SR
6434 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
6435
6436 /* initialize Reply Free Queue */
6437 for (i = 0, reply_address = (u32)ioc->reply_dma ;
6438 i < ioc->reply_free_queue_depth ; i++, reply_address +=
b4472d71 6439 ioc->reply_sz) {
f92363d1 6440 ioc->reply_free[i] = cpu_to_le32(reply_address);
b4472d71
SPS
6441 if (ioc->is_mcpu_endpoint)
6442 _base_clone_reply_to_sys_mem(ioc,
cf6bf971 6443 reply_address, i);
b4472d71 6444 }
f92363d1
SR
6445
6446 /* initialize reply queues */
6447 if (ioc->is_driver_loading)
6448 _base_assign_reply_queues(ioc);
6449
6450 /* initialize Reply Post Free Queue */
5ec8a175
CO
6451 index = 0;
6452 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
f92363d1 6453 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5ec8a175
CO
6454 /*
6455 * If RDPQ is enabled, switch to the next allocation.
6456 * Otherwise advance within the contiguous region.
6457 */
6458 if (ioc->rdpq_array_enable) {
6459 reply_q->reply_post_free =
6460 ioc->reply_post[index++].reply_post_free;
6461 } else {
6462 reply_q->reply_post_free = reply_post_free_contig;
6463 reply_post_free_contig += ioc->reply_post_queue_depth;
6464 }
6465
f92363d1 6466 reply_q->reply_post_host_index = 0;
f92363d1
SR
6467 for (i = 0; i < ioc->reply_post_queue_depth; i++)
6468 reply_q->reply_post_free[i].Words =
6469 cpu_to_le64(ULLONG_MAX);
6470 if (!_base_is_controller_msix_enabled(ioc))
6471 goto skip_init_reply_post_free_queue;
f92363d1
SR
6472 }
6473 skip_init_reply_post_free_queue:
6474
98c56ad3 6475 r = _base_send_ioc_init(ioc);
f92363d1
SR
6476 if (r)
6477 return r;
6478
6479 /* initialize reply free host index */
6480 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
6481 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
6482
6483 /* initialize reply post host index */
6484 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
0bb337c9 6485 if (ioc->combined_reply_queue)
fb77bb53
SR
6486 writel((reply_q->msix_index & 7)<<
6487 MPI2_RPHI_MSIX_INDEX_SHIFT,
6488 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
6489 else
6490 writel(reply_q->msix_index <<
6491 MPI2_RPHI_MSIX_INDEX_SHIFT,
6492 &ioc->chip->ReplyPostHostIndex);
6493
f92363d1
SR
6494 if (!_base_is_controller_msix_enabled(ioc))
6495 goto skip_init_reply_post_host_index;
6496 }
6497
6498 skip_init_reply_post_host_index:
6499
6500 _base_unmask_interrupts(ioc);
3d29ed85
C
6501
6502 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6503 r = _base_display_fwpkg_version(ioc);
6504 if (r)
6505 return r;
6506 }
6507
6508 _base_static_config_pages(ioc);
98c56ad3 6509 r = _base_event_notification(ioc);
f92363d1
SR
6510 if (r)
6511 return r;
6512
f92363d1 6513 if (ioc->is_driver_loading) {
7786ab6a
SR
6514
6515 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
6516 == 0x80) {
6517 hide_flag = (u8) (
6518 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
6519 MFG_PAGE10_HIDE_SSDS_MASK);
6520 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
6521 ioc->mfg_pg10_hide_flag = hide_flag;
6522 }
6523
f92363d1
SR
6524 ioc->wait_for_discovery_to_complete =
6525 _base_determine_wait_on_discovery(ioc);
6526
6527 return r; /* scan_start and scan_finished support */
6528 }
6529
98c56ad3 6530 r = _base_send_port_enable(ioc);
f92363d1
SR
6531 if (r)
6532 return r;
6533
6534 return r;
6535}
6536
6537/**
6538 * mpt3sas_base_free_resources - free resources controller resources
6539 * @ioc: per adapter object
6540 *
6541 * Return nothing.
6542 */
6543void
6544mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
6545{
f92363d1
SR
6546 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
6547 __func__));
6548
08c4d550
SR
6549 /* synchronizing freeing resource with pci_access_mutex lock */
6550 mutex_lock(&ioc->pci_access_mutex);
cf9bd21a
JL
6551 if (ioc->chip_phys && ioc->chip) {
6552 _base_mask_interrupts(ioc);
6553 ioc->shost_recovery = 1;
98c56ad3 6554 _base_make_ioc_ready(ioc, SOFT_RESET);
cf9bd21a
JL
6555 ioc->shost_recovery = 0;
6556 }
6557
580d4e31 6558 mpt3sas_base_unmap_resources(ioc);
08c4d550 6559 mutex_unlock(&ioc->pci_access_mutex);
f92363d1
SR
6560 return;
6561}
6562
6563/**
6564 * mpt3sas_base_attach - attach controller instance
6565 * @ioc: per adapter object
6566 *
6567 * Returns 0 for success, non-zero for failure.
6568 */
6569int
6570mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6571{
6572 int r, i;
6573 int cpu_id, last_cpu_id = 0;
6574
6575 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
6576 __func__));
6577
6578 /* setup cpu_msix_table */
6579 ioc->cpu_count = num_online_cpus();
6580 for_each_online_cpu(cpu_id)
6581 last_cpu_id = cpu_id;
6582 ioc->cpu_msix_table_sz = last_cpu_id + 1;
6583 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
6584 ioc->reply_queue_count = 1;
6585 if (!ioc->cpu_msix_table) {
6586 dfailprintk(ioc, pr_info(MPT3SAS_FMT
6587 "allocation for cpu_msix_table failed!!!\n",
6588 ioc->name));
6589 r = -ENOMEM;
6590 goto out_free_resources;
6591 }
6592
7786ab6a
SR
6593 if (ioc->is_warpdrive) {
6594 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
6595 sizeof(resource_size_t *), GFP_KERNEL);
6596 if (!ioc->reply_post_host_index) {
6597 dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
1d55abc0
HR
6598 "for reply_post_host_index failed!!!\n",
6599 ioc->name));
7786ab6a
SR
6600 r = -ENOMEM;
6601 goto out_free_resources;
6602 }
6603 }
6604
9b05c91a
SR
6605 ioc->rdpq_array_enable_assigned = 0;
6606 ioc->dma_mask = 0;
f92363d1
SR
6607 r = mpt3sas_base_map_resources(ioc);
6608 if (r)
6609 goto out_free_resources;
6610
f92363d1 6611 pci_set_drvdata(ioc->pdev, ioc->shost);
98c56ad3 6612 r = _base_get_ioc_facts(ioc);
f92363d1
SR
6613 if (r)
6614 goto out_free_resources;
6615
471ef9d4
SR
6616 switch (ioc->hba_mpi_version_belonged) {
6617 case MPI2_VERSION:
6618 ioc->build_sg_scmd = &_base_build_sg_scmd;
6619 ioc->build_sg = &_base_build_sg;
6620 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
6621 break;
6622 case MPI25_VERSION:
b130b0d5 6623 case MPI26_VERSION:
471ef9d4
SR
6624 /*
6625 * In SAS3.0,
6626 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
6627 * Target Status - all require the IEEE formated scatter gather
6628 * elements.
6629 */
6630 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
6631 ioc->build_sg = &_base_build_sg_ieee;
aff39e61 6632 ioc->build_nvme_prp = &_base_build_nvme_prp;
471ef9d4
SR
6633 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
6634 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
81c16f83 6635
471ef9d4
SR
6636 break;
6637 }
f92363d1 6638
40114bde
SP
6639 if (ioc->is_mcpu_endpoint)
6640 ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io;
6641 else
6642 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
81c16f83 6643
f92363d1
SR
6644 /*
6645 * These function pointers for other requests that don't
6646 * the require IEEE scatter gather elements.
6647 *
6648 * For example Configuration Pages and SAS IOUNIT Control don't.
6649 */
6650 ioc->build_sg_mpi = &_base_build_sg;
6651 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
6652
98c56ad3 6653 r = _base_make_ioc_ready(ioc, SOFT_RESET);
f92363d1
SR
6654 if (r)
6655 goto out_free_resources;
6656
6657 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
6658 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
6659 if (!ioc->pfacts) {
6660 r = -ENOMEM;
6661 goto out_free_resources;
6662 }
6663
6664 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
98c56ad3 6665 r = _base_get_port_facts(ioc, i);
f92363d1
SR
6666 if (r)
6667 goto out_free_resources;
6668 }
6669
98c56ad3 6670 r = _base_allocate_memory_pools(ioc);
f92363d1
SR
6671 if (r)
6672 goto out_free_resources;
6673
6674 init_waitqueue_head(&ioc->reset_wq);
6675
6676 /* allocate memory pd handle bitmask list */
6677 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
6678 if (ioc->facts.MaxDevHandle % 8)
6679 ioc->pd_handles_sz++;
6680 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
6681 GFP_KERNEL);
6682 if (!ioc->pd_handles) {
6683 r = -ENOMEM;
6684 goto out_free_resources;
6685 }
6686 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
6687 GFP_KERNEL);
6688 if (!ioc->blocking_handles) {
6689 r = -ENOMEM;
6690 goto out_free_resources;
6691 }
6692
c696f7b8
SPS
6693 /* allocate memory for pending OS device add list */
6694 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
6695 if (ioc->facts.MaxDevHandle % 8)
6696 ioc->pend_os_device_add_sz++;
6697 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
6698 GFP_KERNEL);
6699 if (!ioc->pend_os_device_add)
6700 goto out_free_resources;
6701
6702 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
6703 ioc->device_remove_in_progress =
6704 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
6705 if (!ioc->device_remove_in_progress)
6706 goto out_free_resources;
6707
f92363d1
SR
6708 ioc->fwfault_debug = mpt3sas_fwfault_debug;
6709
6710 /* base internal command bits */
6711 mutex_init(&ioc->base_cmds.mutex);
6712 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6713 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6714
6715 /* port_enable command bits */
6716 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6717 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
6718
6719 /* transport internal command bits */
6720 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6721 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
6722 mutex_init(&ioc->transport_cmds.mutex);
6723
6724 /* scsih internal command bits */
6725 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6726 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
6727 mutex_init(&ioc->scsih_cmds.mutex);
6728
6729 /* task management internal command bits */
6730 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6731 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
6732 mutex_init(&ioc->tm_cmds.mutex);
6733
6734 /* config page internal command bits */
6735 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6736 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
6737 mutex_init(&ioc->config_cmds.mutex);
6738
6739 /* ctl module internal command bits */
6740 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6741 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
6742 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
6743 mutex_init(&ioc->ctl_cmds.mutex);
6744
a5dd7efd
CJ
6745 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
6746 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
6747 !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
6748 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
f92363d1
SR
6749 r = -ENOMEM;
6750 goto out_free_resources;
6751 }
6752
6753 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6754 ioc->event_masks[i] = -1;
6755
6756 /* here we enable the events we care about */
6757 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
6758 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
6759 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
6760 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
6761 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
6762 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
6763 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
6764 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
6765 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
6766 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
2d8ce8c9 6767 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
b99b1993 6768 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
95540b8e 6769 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
4318c734
SPS
6770 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
6771 if (ioc->is_gen35_ioc) {
6772 _base_unmask_events(ioc,
6773 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
6774 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
6775 _base_unmask_events(ioc,
6776 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
6777 }
6778 }
98c56ad3 6779 r = _base_make_ioc_operational(ioc);
f92363d1
SR
6780 if (r)
6781 goto out_free_resources;
6782
16e179bd 6783 ioc->non_operational_loop = 0;
459325c4 6784 ioc->got_task_abort_from_ioctl = 0;
f92363d1
SR
6785 return 0;
6786
6787 out_free_resources:
6788
6789 ioc->remove_host = 1;
6790
6791 mpt3sas_base_free_resources(ioc);
6792 _base_release_memory_pools(ioc);
6793 pci_set_drvdata(ioc->pdev, NULL);
6794 kfree(ioc->cpu_msix_table);
7786ab6a
SR
6795 if (ioc->is_warpdrive)
6796 kfree(ioc->reply_post_host_index);
f92363d1
SR
6797 kfree(ioc->pd_handles);
6798 kfree(ioc->blocking_handles);
c696f7b8
SPS
6799 kfree(ioc->device_remove_in_progress);
6800 kfree(ioc->pend_os_device_add);
f92363d1
SR
6801 kfree(ioc->tm_cmds.reply);
6802 kfree(ioc->transport_cmds.reply);
6803 kfree(ioc->scsih_cmds.reply);
6804 kfree(ioc->config_cmds.reply);
6805 kfree(ioc->base_cmds.reply);
6806 kfree(ioc->port_enable_cmds.reply);
6807 kfree(ioc->ctl_cmds.reply);
6808 kfree(ioc->ctl_cmds.sense);
6809 kfree(ioc->pfacts);
6810 ioc->ctl_cmds.reply = NULL;
6811 ioc->base_cmds.reply = NULL;
6812 ioc->tm_cmds.reply = NULL;
6813 ioc->scsih_cmds.reply = NULL;
6814 ioc->transport_cmds.reply = NULL;
6815 ioc->config_cmds.reply = NULL;
6816 ioc->pfacts = NULL;
6817 return r;
6818}
6819
6820
6821/**
6822 * mpt3sas_base_detach - remove controller instance
6823 * @ioc: per adapter object
6824 *
6825 * Return nothing.
6826 */
6827void
6828mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
6829{
6830 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
6831 __func__));
6832
6833 mpt3sas_base_stop_watchdog(ioc);
6834 mpt3sas_base_free_resources(ioc);
6835 _base_release_memory_pools(ioc);
22a923c3 6836 mpt3sas_free_enclosure_list(ioc);
f92363d1
SR
6837 pci_set_drvdata(ioc->pdev, NULL);
6838 kfree(ioc->cpu_msix_table);
7786ab6a
SR
6839 if (ioc->is_warpdrive)
6840 kfree(ioc->reply_post_host_index);
f92363d1
SR
6841 kfree(ioc->pd_handles);
6842 kfree(ioc->blocking_handles);
c696f7b8
SPS
6843 kfree(ioc->device_remove_in_progress);
6844 kfree(ioc->pend_os_device_add);
f92363d1
SR
6845 kfree(ioc->pfacts);
6846 kfree(ioc->ctl_cmds.reply);
6847 kfree(ioc->ctl_cmds.sense);
6848 kfree(ioc->base_cmds.reply);
6849 kfree(ioc->port_enable_cmds.reply);
6850 kfree(ioc->tm_cmds.reply);
6851 kfree(ioc->transport_cmds.reply);
6852 kfree(ioc->scsih_cmds.reply);
6853 kfree(ioc->config_cmds.reply);
6854}
6855
6856/**
6857 * _base_reset_handler - reset callback handler (for base)
6858 * @ioc: per adapter object
6859 * @reset_phase: phase
6860 *
6861 * The handler for doing any required cleanup or initialization.
6862 *
6863 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
6864 * MPT3_IOC_DONE_RESET
6865 *
6866 * Return nothing.
6867 */
6868static void
6869_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
6870{
6871 mpt3sas_scsih_reset_handler(ioc, reset_phase);
6872 mpt3sas_ctl_reset_handler(ioc, reset_phase);
6873 switch (reset_phase) {
6874 case MPT3_IOC_PRE_RESET:
6875 dtmprintk(ioc, pr_info(MPT3SAS_FMT
6876 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
6877 break;
6878 case MPT3_IOC_AFTER_RESET:
6879 dtmprintk(ioc, pr_info(MPT3SAS_FMT
6880 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
6881 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
6882 ioc->transport_cmds.status |= MPT3_CMD_RESET;
6883 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
6884 complete(&ioc->transport_cmds.done);
6885 }
6886 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6887 ioc->base_cmds.status |= MPT3_CMD_RESET;
6888 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
6889 complete(&ioc->base_cmds.done);
6890 }
6891 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6892 ioc->port_enable_failed = 1;
6893 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
6894 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
6895 if (ioc->is_driver_loading) {
6896 ioc->start_scan_failed =
6897 MPI2_IOCSTATUS_INTERNAL_ERROR;
6898 ioc->start_scan = 0;
6899 ioc->port_enable_cmds.status =
6900 MPT3_CMD_NOT_USED;
6901 } else
6902 complete(&ioc->port_enable_cmds.done);
6903 }
6904 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
6905 ioc->config_cmds.status |= MPT3_CMD_RESET;
6906 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
6907 ioc->config_cmds.smid = USHRT_MAX;
6908 complete(&ioc->config_cmds.done);
6909 }
6910 break;
6911 case MPT3_IOC_DONE_RESET:
6912 dtmprintk(ioc, pr_info(MPT3SAS_FMT
6913 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
6914 break;
6915 }
6916}
6917
6918/**
c666d3be 6919 * mpt3sas_wait_for_commands_to_complete - reset controller
f92363d1 6920 * @ioc: Pointer to MPT_ADAPTER structure
f92363d1 6921 *
272e253c 6922 * This function is waiting 10s for all pending commands to complete
f92363d1
SR
6923 * prior to putting controller in reset.
6924 */
c666d3be
SR
6925void
6926mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
6927{
6928 u32 ioc_state;
f92363d1
SR
6929
6930 ioc->pending_io_count = 0;
f92363d1
SR
6931
6932 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6933 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
6934 return;
6935
6936 /* pending command count */
272e253c 6937 ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
f92363d1
SR
6938
6939 if (!ioc->pending_io_count)
6940 return;
6941
6942 /* wait for pending commands to complete */
6943 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
6944}
6945
6946/**
6947 * mpt3sas_base_hard_reset_handler - reset controller
6948 * @ioc: Pointer to MPT_ADAPTER structure
f92363d1
SR
6949 * @type: FORCE_BIG_HAMMER or SOFT_RESET
6950 *
6951 * Returns 0 for success, non-zero for failure.
6952 */
6953int
98c56ad3 6954mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
f92363d1
SR
6955 enum reset_type type)
6956{
6957 int r;
6958 unsigned long flags;
6959 u32 ioc_state;
6960 u8 is_fault = 0, is_trigger = 0;
6961
6962 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
6963 __func__));
6964
6965 if (ioc->pci_error_recovery) {
6966 pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
6967 ioc->name, __func__);
6968 r = 0;
6969 goto out_unlocked;
6970 }
6971
6972 if (mpt3sas_fwfault_debug)
6973 mpt3sas_halt_firmware(ioc);
6974
f92363d1
SR
6975 /* wait for an active reset in progress to complete */
6976 if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
6977 do {
6978 ssleep(1);
6979 } while (ioc->shost_recovery == 1);
6980 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
6981 __func__));
6982 return ioc->ioc_reset_in_progress_status;
6983 }
6984
6985 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6986 ioc->shost_recovery = 1;
6987 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6988
6989 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
6990 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
6991 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
6992 MPT3_DIAG_BUFFER_IS_RELEASED))) {
6993 is_trigger = 1;
6994 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6995 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
6996 is_fault = 1;
6997 }
6998 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
c666d3be 6999 mpt3sas_wait_for_commands_to_complete(ioc);
f92363d1 7000 _base_mask_interrupts(ioc);
98c56ad3 7001 r = _base_make_ioc_ready(ioc, type);
f92363d1
SR
7002 if (r)
7003 goto out;
7004 _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
7005
7006 /* If this hard reset is called while port enable is active, then
7007 * there is no reason to call make_ioc_operational
7008 */
7009 if (ioc->is_driver_loading && ioc->port_enable_failed) {
7010 ioc->remove_host = 1;
7011 r = -EFAULT;
7012 goto out;
7013 }
98c56ad3 7014 r = _base_get_ioc_facts(ioc);
f92363d1
SR
7015 if (r)
7016 goto out;
9b05c91a
SR
7017
7018 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
7019 panic("%s: Issue occurred with flashing controller firmware."
7020 "Please reboot the system and ensure that the correct"
7021 " firmware version is running\n", ioc->name);
7022
98c56ad3 7023 r = _base_make_ioc_operational(ioc);
f92363d1
SR
7024 if (!r)
7025 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
7026
7027 out:
7028 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
7029 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
7030
7031 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7032 ioc->ioc_reset_in_progress_status = r;
7033 ioc->shost_recovery = 0;
7034 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7035 ioc->ioc_reset_count++;
7036 mutex_unlock(&ioc->reset_in_progress_mutex);
7037
7038 out_unlocked:
7039 if ((r == 0) && is_trigger) {
7040 if (is_fault)
7041 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
7042 else
7043 mpt3sas_trigger_master(ioc,
7044 MASTER_TRIGGER_ADAPTER_RESET);
7045 }
7046 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
7047 __func__));
7048 return r;
7049}