Merge branch 'kconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[linux-2.6-block.git] / drivers / scsi / megaraid / megaraid_sas_fp.c
CommitLineData
9c915a8c 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
ae59057b 4 * Copyright (c) 2009-2012 LSI Corporation.
9c915a8c 5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 * FILE: megaraid_sas_fp.c
21 *
22 * Authors: LSI Corporation
23 * Sumant Patro
24 * Varad Talamacki
25 * Manoj Jose
26 *
27 * Send feedback to: <megaraidlinux@lsi.com>
28 *
29 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
30 * ATTN: Linuxraid
31 */
32
33#include <linux/kernel.h>
34#include <linux/types.h>
35#include <linux/pci.h>
36#include <linux/list.h>
37#include <linux/moduleparam.h>
38#include <linux/module.h>
39#include <linux/spinlock.h>
40#include <linux/interrupt.h>
41#include <linux/delay.h>
9c915a8c 42#include <linux/uio.h>
43#include <linux/uaccess.h>
44#include <linux/fs.h>
45#include <linux/compat.h>
46#include <linux/blkdev.h>
47#include <linux/poll.h>
48
49#include <scsi/scsi.h>
50#include <scsi/scsi_cmnd.h>
51#include <scsi/scsi_device.h>
52#include <scsi/scsi_host.h>
53
54#include "megaraid_sas_fusion.h"
36807e67 55#include "megaraid_sas.h"
9c915a8c 56#include <asm/div64.h>
57
58#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
59#define MR_LD_STATE_OPTIMAL 3
60#define FALSE 0
61#define TRUE 1
62
bc93d425
SS
63#define SPAN_DEBUG 0
64#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
65#define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
66#define SPAN_INVALID 0xff
67
9c915a8c 68/* Prototypes */
bc93d425
SS
69void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
70 struct LD_LOAD_BALANCE_INFO *lbInfo);
71
72static void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
73 PLD_SPAN_INFO ldSpanInfo);
74static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
75 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
76 struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map);
77static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
78 u64 strip, struct MR_FW_RAID_MAP_ALL *map);
9c915a8c 79
80u32 mega_mod64(u64 dividend, u32 divisor)
81{
82 u64 d;
83 u32 remainder;
84
85 if (!divisor)
86 printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
87 d = dividend;
88 remainder = do_div(d, divisor);
89 return remainder;
90}
91
92/**
93 * @param dividend : Dividend
94 * @param divisor : Divisor
95 *
96 * @return quotient
97 **/
98u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
99{
100 u32 remainder;
101 u64 d;
102
103 if (!divisor)
104 printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
105
106 d = dividend;
107 remainder = do_div(d, divisor);
108
109 return d;
110}
111
112struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
113{
114 return &map->raidMap.ldSpanMap[ld].ldRaid;
115}
116
117static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
118 struct MR_FW_RAID_MAP_ALL
119 *map)
120{
121 return &map->raidMap.ldSpanMap[ld].spanBlock[0];
122}
123
124static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
125{
126 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
127}
128
129static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
130{
131 return map->raidMap.arMapInfo[ar].pd[arm];
132}
133
134static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
135{
136 return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
137}
138
139static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
140{
141 return map->raidMap.devHndlInfo[pd].curDevHdl;
142}
143
144u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
145{
146 return map->raidMap.ldSpanMap[ld].ldRaid.targetId;
147}
148
149u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
150{
151 return map->raidMap.ldTgtIdToLd[ldTgtId];
152}
153
154static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
155 struct MR_FW_RAID_MAP_ALL *map)
156{
157 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
158}
159
160/*
161 * This function will validate Map info data provided by FW
162 */
bc93d425 163u8 MR_ValidateMapInfo(struct megasas_instance *instance)
9c915a8c 164{
bc93d425
SS
165 struct fusion_context *fusion = instance->ctrl_context;
166 struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)];
167 struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
168 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
9c915a8c 169 struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
170
171 if (pFwRaidMap->totalSize !=
172 (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
173 (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) {
174 printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
175 (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
176 sizeof(struct MR_LD_SPAN_MAP)) +
177 (sizeof(struct MR_LD_SPAN_MAP) *
178 pFwRaidMap->ldCount)));
179 printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
180 ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
181 pFwRaidMap->totalSize);
182 return 0;
183 }
184
bc93d425
SS
185 if (instance->UnevenSpanSupport)
186 mr_update_span_set(map, ldSpanInfo);
187
9c915a8c 188 mr_update_load_balance_params(map, lbInfo);
189
190 return 1;
191}
192
193u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
bc93d425 194 struct MR_FW_RAID_MAP_ALL *map)
9c915a8c 195{
196 struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
197 struct MR_QUAD_ELEMENT *quad;
198 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
199 u32 span, j;
200
201 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
202
203 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
204 quad = &pSpanBlock->block_span_info.quad[j];
205
bc93d425
SS
206 if (quad->diff == 0)
207 return SPAN_INVALID;
9c915a8c 208 if (quad->logStart <= row && row <= quad->logEnd &&
209 (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
210 if (span_blk != NULL) {
211 u64 blk, debugBlk;
212 blk =
213 mega_div64_32(
214 (row-quad->logStart),
215 quad->diff);
216 debugBlk = blk;
217
218 blk = (blk + quad->offsetInSpan) <<
219 raid->stripeShift;
220 *span_blk = blk;
221 }
222 return span;
223 }
224 }
225 }
bc93d425
SS
226 return SPAN_INVALID;
227}
228
229/*
230******************************************************************************
231*
232* Function to print info about span set created in driver from FW raid map
233*
234* Inputs :
235* map - LD map
236* ldSpanInfo - ldSpanInfo per HBA instance
237*/
238#if SPAN_DEBUG
239static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
240{
241
242 u8 span;
243 u32 element;
244 struct MR_LD_RAID *raid;
245 LD_SPAN_SET *span_set;
246 struct MR_QUAD_ELEMENT *quad;
247 int ldCount;
248 u16 ld;
249
250 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
251 ld = MR_TargetIdToLdGet(ldCount, map);
252 if (ld >= MAX_LOGICAL_DRIVES)
253 continue;
254 raid = MR_LdRaidGet(ld, map);
255 dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
256 ld, raid->spanDepth);
257 for (span = 0; span < raid->spanDepth; span++)
258 dev_dbg(&instance->pdev->dev, "Span=%x,"
259 " number of quads=%x\n", span,
260 map->raidMap.ldSpanMap[ld].spanBlock[span].
261 block_span_info.noElements);
262 for (element = 0; element < MAX_QUAD_DEPTH; element++) {
263 span_set = &(ldSpanInfo[ld].span_set[element]);
264 if (span_set->span_row_data_width == 0)
265 break;
266
267 dev_dbg(&instance->pdev->dev, "Span Set %x:"
268 "width=%x, diff=%x\n", element,
269 (unsigned int)span_set->span_row_data_width,
270 (unsigned int)span_set->diff);
271 dev_dbg(&instance->pdev->dev, "logical LBA"
272 "start=0x%08lx, end=0x%08lx\n",
273 (long unsigned int)span_set->log_start_lba,
274 (long unsigned int)span_set->log_end_lba);
275 dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
276 " end=0x%08lx\n",
277 (long unsigned int)span_set->span_row_start,
278 (long unsigned int)span_set->span_row_end);
279 dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
280 " end=0x%08lx\n",
281 (long unsigned int)span_set->data_row_start,
282 (long unsigned int)span_set->data_row_end);
283 dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
284 " end=0x%08lx\n",
285 (long unsigned int)span_set->data_strip_start,
286 (long unsigned int)span_set->data_strip_end);
287
288 for (span = 0; span < raid->spanDepth; span++) {
289 if (map->raidMap.ldSpanMap[ld].spanBlock[span].
290 block_span_info.noElements >=
291 element + 1) {
292 quad = &map->raidMap.ldSpanMap[ld].
293 spanBlock[span].block_span_info.
294 quad[element];
295 dev_dbg(&instance->pdev->dev, "Span=%x,"
296 "Quad=%x, diff=%x\n", span,
297 element, quad->diff);
298 dev_dbg(&instance->pdev->dev,
299 "offset_in_span=0x%08lx\n",
300 (long unsigned int)quad->offsetInSpan);
301 dev_dbg(&instance->pdev->dev,
302 "logical start=0x%08lx, end=0x%08lx\n",
303 (long unsigned int)quad->logStart,
304 (long unsigned int)quad->logEnd);
305 }
306 }
307 }
308 }
309 return 0;
310}
311#endif
312
313/*
314******************************************************************************
315*
316* This routine calculates the Span block for given row using spanset.
317*
318* Inputs :
319* instance - HBA instance
320* ld - Logical drive number
321* row - Row number
322* map - LD map
323*
324* Outputs :
325*
326* span - Span number
327* block - Absolute Block number in the physical disk
328* div_error - Devide error code.
329*/
330
331u32 mr_spanset_get_span_block(struct megasas_instance *instance,
332 u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map)
333{
334 struct fusion_context *fusion = instance->ctrl_context;
335 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
336 LD_SPAN_SET *span_set;
337 struct MR_QUAD_ELEMENT *quad;
338 u32 span, info;
339 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
340
341 for (info = 0; info < MAX_QUAD_DEPTH; info++) {
342 span_set = &(ldSpanInfo[ld].span_set[info]);
343
344 if (span_set->span_row_data_width == 0)
345 break;
346
347 if (row > span_set->data_row_end)
348 continue;
349
350 for (span = 0; span < raid->spanDepth; span++)
351 if (map->raidMap.ldSpanMap[ld].spanBlock[span].
352 block_span_info.noElements >= info+1) {
353 quad = &map->raidMap.ldSpanMap[ld].
354 spanBlock[span].
355 block_span_info.quad[info];
356 if (quad->diff == 0)
357 return SPAN_INVALID;
358 if (quad->logStart <= row &&
359 row <= quad->logEnd &&
360 (mega_mod64(row - quad->logStart,
361 quad->diff)) == 0) {
362 if (span_blk != NULL) {
363 u64 blk;
364 blk = mega_div64_32
365 ((row - quad->logStart),
366 quad->diff);
367 blk = (blk + quad->offsetInSpan)
368 << raid->stripeShift;
369 *span_blk = blk;
370 }
371 return span;
372 }
373 }
374 }
375 return SPAN_INVALID;
376}
377
378/*
379******************************************************************************
380*
381* This routine calculates the row for given strip using spanset.
382*
383* Inputs :
384* instance - HBA instance
385* ld - Logical drive number
386* Strip - Strip
387* map - LD map
388*
389* Outputs :
390*
391* row - row associated with strip
392*/
393
394static u64 get_row_from_strip(struct megasas_instance *instance,
395 u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
396{
397 struct fusion_context *fusion = instance->ctrl_context;
398 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
399 LD_SPAN_SET *span_set;
400 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
401 u32 info, strip_offset, span, span_offset;
402 u64 span_set_Strip, span_set_Row, retval;
403
404 for (info = 0; info < MAX_QUAD_DEPTH; info++) {
405 span_set = &(ldSpanInfo[ld].span_set[info]);
406
407 if (span_set->span_row_data_width == 0)
408 break;
409 if (strip > span_set->data_strip_end)
410 continue;
411
412 span_set_Strip = strip - span_set->data_strip_start;
413 strip_offset = mega_mod64(span_set_Strip,
414 span_set->span_row_data_width);
415 span_set_Row = mega_div64_32(span_set_Strip,
416 span_set->span_row_data_width) * span_set->diff;
417 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
418 if (map->raidMap.ldSpanMap[ld].spanBlock[span].
419 block_span_info.noElements >= info+1) {
420 if (strip_offset >=
421 span_set->strip_offset[span])
422 span_offset++;
423 else
424 break;
425 }
426#if SPAN_DEBUG
427 dev_info(&instance->pdev->dev, "Strip 0x%llx,"
428 "span_set_Strip 0x%llx, span_set_Row 0x%llx"
429 "data width 0x%llx span offset 0x%x\n", strip,
430 (unsigned long long)span_set_Strip,
431 (unsigned long long)span_set_Row,
432 (unsigned long long)span_set->span_row_data_width,
433 span_offset);
434 dev_info(&instance->pdev->dev, "For strip 0x%llx"
435 "row is 0x%llx\n", strip,
436 (unsigned long long) span_set->data_row_start +
437 (unsigned long long) span_set_Row + (span_offset - 1));
438#endif
439 retval = (span_set->data_row_start + span_set_Row +
440 (span_offset - 1));
441 return retval;
442 }
443 return -1LLU;
444}
445
446
447/*
448******************************************************************************
449*
450* This routine calculates the Start Strip for given row using spanset.
451*
452* Inputs :
453* instance - HBA instance
454* ld - Logical drive number
455* row - Row number
456* map - LD map
457*
458* Outputs :
459*
460* Strip - Start strip associated with row
461*/
462
463static u64 get_strip_from_row(struct megasas_instance *instance,
464 u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map)
465{
466 struct fusion_context *fusion = instance->ctrl_context;
467 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
468 LD_SPAN_SET *span_set;
469 struct MR_QUAD_ELEMENT *quad;
470 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
471 u32 span, info;
472 u64 strip;
473
474 for (info = 0; info < MAX_QUAD_DEPTH; info++) {
475 span_set = &(ldSpanInfo[ld].span_set[info]);
476
477 if (span_set->span_row_data_width == 0)
478 break;
479 if (row > span_set->data_row_end)
480 continue;
481
482 for (span = 0; span < raid->spanDepth; span++)
483 if (map->raidMap.ldSpanMap[ld].spanBlock[span].
484 block_span_info.noElements >= info+1) {
485 quad = &map->raidMap.ldSpanMap[ld].
486 spanBlock[span].block_span_info.quad[info];
487 if (quad->logStart <= row &&
488 row <= quad->logEnd &&
489 mega_mod64((row - quad->logStart),
490 quad->diff) == 0) {
491 strip = mega_div64_32
492 (((row - span_set->data_row_start)
493 - quad->logStart),
494 quad->diff);
495 strip *= span_set->span_row_data_width;
496 strip += span_set->data_strip_start;
497 strip += span_set->strip_offset[span];
498 return strip;
499 }
500 }
501 }
502 dev_err(&instance->pdev->dev, "get_strip_from_row"
503 "returns invalid strip for ld=%x, row=%lx\n",
504 ld, (long unsigned int)row);
505 return -1;
506}
507
508/*
509******************************************************************************
510*
511* This routine calculates the Physical Arm for given strip using spanset.
512*
513* Inputs :
514* instance - HBA instance
515* ld - Logical drive number
516* strip - Strip
517* map - LD map
518*
519* Outputs :
520*
521* Phys Arm - Phys Arm associated with strip
522*/
523
524static u32 get_arm_from_strip(struct megasas_instance *instance,
525 u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
526{
527 struct fusion_context *fusion = instance->ctrl_context;
528 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
529 LD_SPAN_SET *span_set;
530 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
531 u32 info, strip_offset, span, span_offset, retval;
532
533 for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
534 span_set = &(ldSpanInfo[ld].span_set[info]);
535
536 if (span_set->span_row_data_width == 0)
537 break;
538 if (strip > span_set->data_strip_end)
539 continue;
540
541 strip_offset = (uint)mega_mod64
542 ((strip - span_set->data_strip_start),
543 span_set->span_row_data_width);
544
545 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
546 if (map->raidMap.ldSpanMap[ld].spanBlock[span].
547 block_span_info.noElements >= info+1) {
548 if (strip_offset >=
549 span_set->strip_offset[span])
550 span_offset =
551 span_set->strip_offset[span];
552 else
553 break;
554 }
555#if SPAN_DEBUG
556 dev_info(&instance->pdev->dev, "get_arm_from_strip:"
557 "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
558 (long unsigned int)strip, (strip_offset - span_offset));
559#endif
560 retval = (strip_offset - span_offset);
561 return retval;
562 }
563
564 dev_err(&instance->pdev->dev, "get_arm_from_strip"
565 "returns invalid arm for ld=%x strip=%lx\n",
566 ld, (long unsigned int)strip);
567
568 return -1;
569}
570
571/* This Function will return Phys arm */
572u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
573 struct MR_FW_RAID_MAP_ALL *map)
574{
575 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
576 /* Need to check correct default value */
577 u32 arm = 0;
578
579 switch (raid->level) {
580 case 0:
581 case 5:
582 case 6:
583 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
584 break;
585 case 1:
586 /* start with logical arm */
587 arm = get_arm_from_strip(instance, ld, stripe, map);
fec3c1b4 588 if (arm != -1U)
bc93d425
SS
589 arm *= 2;
590 break;
591 }
592
593 return arm;
594}
595
596
597/*
598******************************************************************************
599*
600* This routine calculates the arm, span and block for the specified stripe and
601* reference in stripe using spanset
602*
603* Inputs :
604*
605* ld - Logical drive number
606* stripRow - Stripe number
607* stripRef - Reference in stripe
608*
609* Outputs :
610*
611* span - Span number
612* block - Absolute Block number in the physical disk
613*/
614static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
615 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
616 struct RAID_CONTEXT *pRAID_Context,
617 struct MR_FW_RAID_MAP_ALL *map)
618{
619 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
620 u32 pd, arRef;
621 u8 physArm, span;
622 u64 row;
623 u8 retval = TRUE;
624 u8 do_invader = 0;
625 u64 *pdBlock = &io_info->pdBlock;
626 u16 *pDevHandle = &io_info->devHandle;
627 u32 logArm, rowMod, armQ, arm;
628
629 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
630 instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
631 do_invader = 1;
632
633 /*Get row and span from io_info for Uneven Span IO.*/
634 row = io_info->start_row;
635 span = io_info->start_span;
636
637
638 if (raid->level == 6) {
639 logArm = get_arm_from_strip(instance, ld, stripRow, map);
fec3c1b4 640 if (logArm == -1U)
bc93d425
SS
641 return FALSE;
642 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
643 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
644 arm = armQ + 1 + logArm;
645 if (arm >= SPAN_ROW_SIZE(map, ld, span))
646 arm -= SPAN_ROW_SIZE(map, ld, span);
647 physArm = (u8)arm;
648 } else
649 /* Calculate the arm */
650 physArm = get_arm(instance, ld, span, stripRow, map);
651 if (physArm == 0xFF)
652 return FALSE;
653
654 arRef = MR_LdSpanArrayGet(ld, span, map);
655 pd = MR_ArPdGet(arRef, physArm, map);
656
657 if (pd != MR_PD_INVALID)
658 *pDevHandle = MR_PdDevHandleGet(pd, map);
659 else {
660 *pDevHandle = MR_PD_INVALID;
661 if ((raid->level >= 5) &&
662 (!do_invader || (do_invader &&
663 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
664 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
665 else if (raid->level == 1) {
666 pd = MR_ArPdGet(arRef, physArm + 1, map);
667 if (pd != MR_PD_INVALID)
668 *pDevHandle = MR_PdDevHandleGet(pd, map);
669 }
670 }
671
672 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
673 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
674 physArm;
675 return retval;
9c915a8c 676}
677
678/*
679******************************************************************************
680*
681* This routine calculates the arm, span and block for the specified stripe and
682* reference in stripe.
683*
684* Inputs :
685*
686* ld - Logical drive number
687* stripRow - Stripe number
688* stripRef - Reference in stripe
689*
690* Outputs :
691*
692* span - Span number
693* block - Absolute Block number in the physical disk
694*/
36807e67 695u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
bc93d425
SS
696 u16 stripRef, struct IO_REQUEST_INFO *io_info,
697 struct RAID_CONTEXT *pRAID_Context,
698 struct MR_FW_RAID_MAP_ALL *map)
9c915a8c 699{
700 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
701 u32 pd, arRef;
702 u8 physArm, span;
703 u64 row;
704 u8 retval = TRUE;
21d3c710 705 u8 do_invader = 0;
bc93d425
SS
706 u64 *pdBlock = &io_info->pdBlock;
707 u16 *pDevHandle = &io_info->devHandle;
21d3c710
SS
708
709 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
710 instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
711 do_invader = 1;
9c915a8c 712
713 row = mega_div64_32(stripRow, raid->rowDataSize);
714
715 if (raid->level == 6) {
716 /* logical arm within row */
717 u32 logArm = mega_mod64(stripRow, raid->rowDataSize);
718 u32 rowMod, armQ, arm;
719
720 if (raid->rowSize == 0)
721 return FALSE;
722 /* get logical row mod */
723 rowMod = mega_mod64(row, raid->rowSize);
724 armQ = raid->rowSize-1-rowMod; /* index of Q drive */
725 arm = armQ+1+logArm; /* data always logically follows Q */
726 if (arm >= raid->rowSize) /* handle wrap condition */
727 arm -= raid->rowSize;
728 physArm = (u8)arm;
729 } else {
730 if (raid->modFactor == 0)
731 return FALSE;
732 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow,
733 raid->modFactor),
734 map);
735 }
736
737 if (raid->spanDepth == 1) {
738 span = 0;
739 *pdBlock = row << raid->stripeShift;
740 } else {
bc93d425
SS
741 span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
742 if (span == SPAN_INVALID)
9c915a8c 743 return FALSE;
744 }
745
746 /* Get the array on which this span is present */
747 arRef = MR_LdSpanArrayGet(ld, span, map);
748 pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
749
750 if (pd != MR_PD_INVALID)
751 /* Get dev handle from Pd. */
752 *pDevHandle = MR_PdDevHandleGet(pd, map);
753 else {
754 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
36807e67 755 if ((raid->level >= 5) &&
21d3c710
SS
756 (!do_invader || (do_invader &&
757 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
9c915a8c 758 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
759 else if (raid->level == 1) {
760 /* Get alternate Pd. */
761 pd = MR_ArPdGet(arRef, physArm + 1, map);
762 if (pd != MR_PD_INVALID)
763 /* Get dev handle from Pd */
764 *pDevHandle = MR_PdDevHandleGet(pd, map);
765 }
9c915a8c 766 }
767
768 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
769 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
770 physArm;
771 return retval;
772}
773
774/*
775******************************************************************************
776*
777* MR_BuildRaidContext function
778*
779* This function will initiate command processing. The start/end row and strip
780* information is calculated then the lock is acquired.
781* This function will return 0 if region lock was acquired OR return num strips
782*/
783u8
36807e67 784MR_BuildRaidContext(struct megasas_instance *instance,
785 struct IO_REQUEST_INFO *io_info,
9c915a8c 786 struct RAID_CONTEXT *pRAID_Context,
787 struct MR_FW_RAID_MAP_ALL *map)
788{
789 struct MR_LD_RAID *raid;
790 u32 ld, stripSize, stripe_mask;
791 u64 endLba, endStrip, endRow, start_row, start_strip;
792 u64 regStart;
793 u32 regSize;
794 u8 num_strips, numRows;
795 u16 ref_in_start_stripe, ref_in_end_stripe;
796 u64 ldStartBlock;
797 u32 numBlocks, ldTgtId;
798 u8 isRead;
799 u8 retval = 0;
bc93d425
SS
800 u8 startlba_span = SPAN_INVALID;
801 u64 *pdBlock = &io_info->pdBlock;
9c915a8c 802
803 ldStartBlock = io_info->ldStartBlock;
804 numBlocks = io_info->numBlocks;
805 ldTgtId = io_info->ldTgtId;
806 isRead = io_info->isRead;
bc93d425
SS
807 io_info->IoforUnevenSpan = 0;
808 io_info->start_span = SPAN_INVALID;
9c915a8c 809
810 ld = MR_TargetIdToLdGet(ldTgtId, map);
811 raid = MR_LdRaidGet(ld, map);
812
bc93d425
SS
813 /*
814 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
815 * return FALSE
816 */
817 if (raid->rowDataSize == 0) {
818 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
819 return FALSE;
820 else if (instance->UnevenSpanSupport) {
821 io_info->IoforUnevenSpan = 1;
822 } else {
823 dev_info(&instance->pdev->dev,
824 "raid->rowDataSize is 0, but has SPAN[0]"
825 "rowDataSize = 0x%0x,"
826 "but there is _NO_ UnevenSpanSupport\n",
827 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
828 return FALSE;
829 }
830 }
831
9c915a8c 832 stripSize = 1 << raid->stripeShift;
833 stripe_mask = stripSize-1;
bc93d425
SS
834
835
9c915a8c 836 /*
837 * calculate starting row and stripe, and number of strips and rows
838 */
839 start_strip = ldStartBlock >> raid->stripeShift;
840 ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
841 endLba = ldStartBlock + numBlocks - 1;
842 ref_in_end_stripe = (u16)(endLba & stripe_mask);
843 endStrip = endLba >> raid->stripeShift;
844 num_strips = (u8)(endStrip - start_strip + 1); /* End strip */
bc93d425
SS
845
846 if (io_info->IoforUnevenSpan) {
847 start_row = get_row_from_strip(instance, ld, start_strip, map);
848 endRow = get_row_from_strip(instance, ld, endStrip, map);
849 if (start_row == -1ULL || endRow == -1ULL) {
850 dev_info(&instance->pdev->dev, "return from %s %d."
851 "Send IO w/o region lock.\n",
852 __func__, __LINE__);
853 return FALSE;
854 }
855
856 if (raid->spanDepth == 1) {
857 startlba_span = 0;
858 *pdBlock = start_row << raid->stripeShift;
859 } else
860 startlba_span = (u8)mr_spanset_get_span_block(instance,
861 ld, start_row, pdBlock, map);
862 if (startlba_span == SPAN_INVALID) {
863 dev_info(&instance->pdev->dev, "return from %s %d"
864 "for row 0x%llx,start strip %llx"
865 "endSrip %llx\n", __func__, __LINE__,
866 (unsigned long long)start_row,
867 (unsigned long long)start_strip,
868 (unsigned long long)endStrip);
869 return FALSE;
870 }
871 io_info->start_span = startlba_span;
872 io_info->start_row = start_row;
873#if SPAN_DEBUG
874 dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
875 "for row 0x%llx, start strip 0x%llx end strip 0x%llx"
876 " span 0x%x\n", __func__, __LINE__,
877 (unsigned long long)start_row,
878 (unsigned long long)start_strip,
879 (unsigned long long)endStrip, startlba_span);
880 dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
881 "Start span 0x%x\n", (unsigned long long)start_row,
882 (unsigned long long)endRow, startlba_span);
883#endif
884 } else {
885 start_row = mega_div64_32(start_strip, raid->rowDataSize);
886 endRow = mega_div64_32(endStrip, raid->rowDataSize);
887 }
888 numRows = (u8)(endRow - start_row + 1);
9c915a8c 889
890 /*
891 * calculate region info.
892 */
893
894 /* assume region is at the start of the first row */
895 regStart = start_row << raid->stripeShift;
896 /* assume this IO needs the full row - we'll adjust if not true */
897 regSize = stripSize;
898
c1529fa2 899 /* Check if we can send this I/O via FastPath */
900 if (raid->capability.fpCapable) {
901 if (isRead)
902 io_info->fpOkForIo = (raid->capability.fpReadCapable &&
903 ((num_strips == 1) ||
904 raid->capability.
905 fpReadAcrossStripe));
906 else
907 io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
908 ((num_strips == 1) ||
909 raid->capability.
910 fpWriteAcrossStripe));
911 } else
9c915a8c 912 io_info->fpOkForIo = FALSE;
9c915a8c 913
914 if (numRows == 1) {
915 /* single-strip IOs can always lock only the data needed */
916 if (num_strips == 1) {
917 regStart += ref_in_start_stripe;
918 regSize = numBlocks;
919 }
920 /* multi-strip IOs always need to full stripe locked */
bc93d425
SS
921 } else if (io_info->IoforUnevenSpan == 0) {
922 /*
923 * For Even span region lock optimization.
924 * If the start strip is the last in the start row
925 */
9c915a8c 926 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
9c915a8c 927 regStart += ref_in_start_stripe;
9c915a8c 928 /* initialize count to sectors from startref to end
929 of strip */
bc93d425 930 regSize = stripSize - ref_in_start_stripe;
9c915a8c 931 }
932
bc93d425 933 /* add complete rows in the middle of the transfer */
9c915a8c 934 if (numRows > 2)
9c915a8c 935 regSize += (numRows-2) << raid->stripeShift;
936
bc93d425 937 /* if IO ends within first strip of last row*/
9c915a8c 938 if (endStrip == endRow*raid->rowDataSize)
939 regSize += ref_in_end_stripe+1;
940 else
941 regSize += stripSize;
bc93d425
SS
942 } else {
943 /*
944 * For Uneven span region lock optimization.
945 * If the start strip is the last in the start row
946 */
947 if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
948 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
949 regStart += ref_in_start_stripe;
950 /* initialize count to sectors from
951 * startRef to end of strip
952 */
953 regSize = stripSize - ref_in_start_stripe;
954 }
955 /* Add complete rows in the middle of the transfer*/
956
957 if (numRows > 2)
958 /* Add complete rows in the middle of the transfer*/
959 regSize += (numRows-2) << raid->stripeShift;
960
961 /* if IO ends within first strip of last row */
962 if (endStrip == get_strip_from_row(instance, ld, endRow, map))
963 regSize += ref_in_end_stripe + 1;
964 else
965 regSize += stripSize;
9c915a8c 966 }
967
968 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
21d3c710
SS
969 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
970 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
36807e67 971 pRAID_Context->regLockFlags = (isRead) ?
972 raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
973 else
974 pRAID_Context->regLockFlags = (isRead) ?
975 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
9c915a8c 976 pRAID_Context->VirtualDiskTgtId = raid->targetId;
977 pRAID_Context->regLockRowLBA = regStart;
978 pRAID_Context->regLockLength = regSize;
979 pRAID_Context->configSeqNum = raid->seqNum;
980
981 /*Get Phy Params only if FP capable, or else leave it to MR firmware
982 to do the calculation.*/
983 if (io_info->fpOkForIo) {
bc93d425
SS
984 retval = io_info->IoforUnevenSpan ?
985 mr_spanset_get_phy_params(instance, ld,
986 start_strip, ref_in_start_stripe,
987 io_info, pRAID_Context, map) :
988 MR_GetPhyParams(instance, ld, start_strip,
989 ref_in_start_stripe, io_info,
990 pRAID_Context, map);
991 /* If IO on an invalid Pd, then FP is not possible.*/
9c915a8c 992 if (io_info->devHandle == MR_PD_INVALID)
993 io_info->fpOkForIo = FALSE;
994 return retval;
995 } else if (isRead) {
996 uint stripIdx;
997 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
bc93d425
SS
998 retval = io_info->IoforUnevenSpan ?
999 mr_spanset_get_phy_params(instance, ld,
1000 start_strip + stripIdx,
1001 ref_in_start_stripe, io_info,
1002 pRAID_Context, map) :
1003 MR_GetPhyParams(instance, ld,
1004 start_strip + stripIdx, ref_in_start_stripe,
1005 io_info, pRAID_Context, map);
1006 if (!retval)
9c915a8c 1007 return TRUE;
1008 }
1009 }
bc93d425
SS
1010
1011#if SPAN_DEBUG
1012 /* Just for testing what arm we get for strip.*/
1013 if (io_info->IoforUnevenSpan)
1014 get_arm_from_strip(instance, ld, start_strip, map);
1015#endif
9c915a8c 1016 return TRUE;
1017}
1018
bc93d425
SS
1019/*
1020******************************************************************************
1021*
1022* This routine pepare spanset info from Valid Raid map and store it into
1023* local copy of ldSpanInfo per instance data structure.
1024*
1025* Inputs :
1026* map - LD map
1027* ldSpanInfo - ldSpanInfo per HBA instance
1028*
1029*/
1030void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
1031 PLD_SPAN_INFO ldSpanInfo)
1032{
1033 u8 span, count;
1034 u32 element, span_row_width;
1035 u64 span_row;
1036 struct MR_LD_RAID *raid;
1037 LD_SPAN_SET *span_set, *span_set_prev;
1038 struct MR_QUAD_ELEMENT *quad;
1039 int ldCount;
1040 u16 ld;
1041
1042
1043 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
1044 ld = MR_TargetIdToLdGet(ldCount, map);
1045 if (ld >= MAX_LOGICAL_DRIVES)
1046 continue;
1047 raid = MR_LdRaidGet(ld, map);
1048 for (element = 0; element < MAX_QUAD_DEPTH; element++) {
1049 for (span = 0; span < raid->spanDepth; span++) {
1050 if (map->raidMap.ldSpanMap[ld].spanBlock[span].
1051 block_span_info.noElements <
1052 element + 1)
1053 continue;
1054 span_set = &(ldSpanInfo[ld].span_set[element]);
1055 quad = &map->raidMap.ldSpanMap[ld].
1056 spanBlock[span].block_span_info.
1057 quad[element];
1058
1059 span_set->diff = quad->diff;
1060
1061 for (count = 0, span_row_width = 0;
1062 count < raid->spanDepth; count++) {
1063 if (map->raidMap.ldSpanMap[ld].
1064 spanBlock[count].
1065 block_span_info.
1066 noElements >= element + 1) {
1067 span_set->strip_offset[count] =
1068 span_row_width;
1069 span_row_width +=
1070 MR_LdSpanPtrGet
1071 (ld, count, map)->spanRowDataSize;
1072 printk(KERN_INFO "megasas:"
1073 "span %x rowDataSize %x\n",
1074 count, MR_LdSpanPtrGet
1075 (ld, count, map)->spanRowDataSize);
1076 }
1077 }
1078
1079 span_set->span_row_data_width = span_row_width;
1080 span_row = mega_div64_32(((quad->logEnd -
1081 quad->logStart) + quad->diff),
1082 quad->diff);
1083
1084 if (element == 0) {
1085 span_set->log_start_lba = 0;
1086 span_set->log_end_lba =
1087 ((span_row << raid->stripeShift)
1088 * span_row_width) - 1;
1089
1090 span_set->span_row_start = 0;
1091 span_set->span_row_end = span_row - 1;
1092
1093 span_set->data_strip_start = 0;
1094 span_set->data_strip_end =
1095 (span_row * span_row_width) - 1;
1096
1097 span_set->data_row_start = 0;
1098 span_set->data_row_end =
1099 (span_row * quad->diff) - 1;
1100 } else {
1101 span_set_prev = &(ldSpanInfo[ld].
1102 span_set[element - 1]);
1103 span_set->log_start_lba =
1104 span_set_prev->log_end_lba + 1;
1105 span_set->log_end_lba =
1106 span_set->log_start_lba +
1107 ((span_row << raid->stripeShift)
1108 * span_row_width) - 1;
1109
1110 span_set->span_row_start =
1111 span_set_prev->span_row_end + 1;
1112 span_set->span_row_end =
1113 span_set->span_row_start + span_row - 1;
1114
1115 span_set->data_strip_start =
1116 span_set_prev->data_strip_end + 1;
1117 span_set->data_strip_end =
1118 span_set->data_strip_start +
1119 (span_row * span_row_width) - 1;
1120
1121 span_set->data_row_start =
1122 span_set_prev->data_row_end + 1;
1123 span_set->data_row_end =
1124 span_set->data_row_start +
1125 (span_row * quad->diff) - 1;
1126 }
1127 break;
1128 }
1129 if (span == raid->spanDepth)
1130 break;
1131 }
1132 }
1133#if SPAN_DEBUG
1134 getSpanInfo(map, ldSpanInfo);
1135#endif
1136
1137}
1138
9c915a8c 1139void
1140mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
1141 struct LD_LOAD_BALANCE_INFO *lbInfo)
1142{
1143 int ldCount;
1144 u16 ld;
1145 struct MR_LD_RAID *raid;
1146
1147 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
1148 ld = MR_TargetIdToLdGet(ldCount, map);
1149 if (ld >= MAX_LOGICAL_DRIVES) {
1150 lbInfo[ldCount].loadBalanceFlag = 0;
1151 continue;
1152 }
1153
1154 raid = MR_LdRaidGet(ld, map);
1155
1156 /* Two drive Optimal RAID 1 */
1157 if ((raid->level == 1) && (raid->rowSize == 2) &&
1158 (raid->spanDepth == 1) && raid->ldState ==
1159 MR_LD_STATE_OPTIMAL) {
1160 u32 pd, arRef;
1161
1162 lbInfo[ldCount].loadBalanceFlag = 1;
1163
1164 /* Get the array on which this span is present */
1165 arRef = MR_LdSpanArrayGet(ld, 0, map);
1166
1167 /* Get the Pd */
1168 pd = MR_ArPdGet(arRef, 0, map);
1169 /* Get dev handle from Pd */
1170 lbInfo[ldCount].raid1DevHandle[0] =
1171 MR_PdDevHandleGet(pd, map);
1172 /* Get the Pd */
1173 pd = MR_ArPdGet(arRef, 1, map);
1174
1175 /* Get the dev handle from Pd */
1176 lbInfo[ldCount].raid1DevHandle[1] =
1177 MR_PdDevHandleGet(pd, map);
1178 } else
1179 lbInfo[ldCount].loadBalanceFlag = 0;
1180 }
1181}
1182
1183u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block,
1184 u32 count)
1185{
1186 u16 pend0, pend1;
1187 u64 diff0, diff1;
1188 u8 bestArm;
1189
1190 /* get the pending cmds for the data and mirror arms */
1191 pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
1192 pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
1193
1194 /* Determine the disk whose head is nearer to the req. block */
1195 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
1196 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
1197 bestArm = (diff0 <= diff1 ? 0 : 1);
1198
70b47b88
SS
1199 /*Make balance count from 16 to 4 to keep driver in sync with Firmware*/
1200 if ((bestArm == arm && pend0 > pend1 + 4) ||
1201 (bestArm != arm && pend1 > pend0 + 4))
9c915a8c 1202 bestArm ^= 1;
1203
1204 /* Update the last accessed block on the correct pd */
1205 lbInfo->last_accessed_block[bestArm] = block + count - 1;
1206
1207 return bestArm;
1208}
1209
1210u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
1211 struct IO_REQUEST_INFO *io_info)
1212{
1213 u8 arm, old_arm;
1214 u16 devHandle;
1215
1216 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
1217
1218 /* get best new arm */
1219 arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
1220 io_info->numBlocks);
1221 devHandle = lbInfo->raid1DevHandle[arm];
1222 atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
1223
1224 return devHandle;
1225}