[SCSI] hpsa: combine hpsa_scsi_detect and hpsa_register_scsi
[linux-2.6-block.git] / drivers / scsi / hpsa.c
CommitLineData
edd16368
SC
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
e5a44df8 26#include <linux/pci-aspm.h>
edd16368
SC
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/fs.h>
31#include <linux/timer.h>
32#include <linux/seq_file.h>
33#include <linux/init.h>
34#include <linux/spinlock.h>
edd16368
SC
35#include <linux/compat.h>
36#include <linux/blktrace_api.h>
37#include <linux/uaccess.h>
38#include <linux/io.h>
39#include <linux/dma-mapping.h>
40#include <linux/completion.h>
41#include <linux/moduleparam.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h>
667e23d4 46#include <scsi/scsi_tcq.h>
edd16368
SC
47#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
60063497 50#include <linux/atomic.h>
edd16368 51#include <linux/kthread.h>
a0c12413 52#include <linux/jiffies.h>
edd16368
SC
53#include "hpsa_cmd.h"
54#include "hpsa.h"
55
56/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
31468401 57#define HPSA_DRIVER_VERSION "2.0.2-1"
edd16368
SC
58#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
59
60/* How long to wait (in milliseconds) for board to go into simple mode */
61#define MAX_CONFIG_WAIT 30000
62#define MAX_IOCTL_CONFIG_WAIT 1000
63
64/*define how many times we will try a command because of bus resets */
65#define MAX_CMD_RETRIES 3
66
67/* Embedded module documentation macros - see modules.h */
68MODULE_AUTHOR("Hewlett-Packard Company");
69MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
70 HPSA_DRIVER_VERSION);
71MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
72MODULE_VERSION(HPSA_DRIVER_VERSION);
73MODULE_LICENSE("GPL");
74
75static int hpsa_allow_any;
76module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
77MODULE_PARM_DESC(hpsa_allow_any,
78 "Allow hpsa driver to access unknown HP Smart Array hardware");
02ec19c8
SC
79static int hpsa_simple_mode;
80module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
81MODULE_PARM_DESC(hpsa_simple_mode,
82 "Use 'simple mode' rather than 'performant mode'");
edd16368
SC
83
84/* define the PCI info for the cards we can control */
85static const struct pci_device_id hpsa_pci_device_id[] = {
edd16368
SC
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
f8b01eb9 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
9143a961 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
7c03b870 101 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
6798cc0a 102 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
edd16368
SC
103 {0,}
104};
105
106MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
107
108/* board_id = Subsystem Device ID & Vendor ID
109 * product = Marketing Name for the board
110 * access = Address of the struct of function pointers
111 */
112static struct board_type products[] = {
edd16368
SC
113 {0x3241103C, "Smart Array P212", &SA5_access},
114 {0x3243103C, "Smart Array P410", &SA5_access},
115 {0x3245103C, "Smart Array P410i", &SA5_access},
116 {0x3247103C, "Smart Array P411", &SA5_access},
117 {0x3249103C, "Smart Array P812", &SA5_access},
118 {0x324a103C, "Smart Array P712m", &SA5_access},
119 {0x324b103C, "Smart Array P711m", &SA5_access},
9143a961 120 {0x3350103C, "Smart Array", &SA5_access},
121 {0x3351103C, "Smart Array", &SA5_access},
122 {0x3352103C, "Smart Array", &SA5_access},
123 {0x3353103C, "Smart Array", &SA5_access},
124 {0x3354103C, "Smart Array", &SA5_access},
125 {0x3355103C, "Smart Array", &SA5_access},
126 {0x3356103C, "Smart Array", &SA5_access},
edd16368
SC
127 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
128};
129
130static int number_of_controllers;
131
a0c12413
SC
132static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list);
133static spinlock_t lockup_detector_lock;
134static struct task_struct *hpsa_lockup_detector;
135
10f66018
SC
136static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
137static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
edd16368
SC
138static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
139static void start_io(struct ctlr_info *h);
140
141#ifdef CONFIG_COMPAT
142static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
143#endif
144
145static void cmd_free(struct ctlr_info *h, struct CommandList *c);
146static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
147static struct CommandList *cmd_alloc(struct ctlr_info *h);
148static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
01a02ffc
SC
149static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
150 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
edd16368
SC
151 int cmd_type);
152
f281233d 153static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
a08a8471
SC
154static void hpsa_scan_start(struct Scsi_Host *);
155static int hpsa_scan_finished(struct Scsi_Host *sh,
156 unsigned long elapsed_time);
667e23d4
SC
157static int hpsa_change_queue_depth(struct scsi_device *sdev,
158 int qdepth, int reason);
edd16368
SC
159
160static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
161static int hpsa_slave_alloc(struct scsi_device *sdev);
162static void hpsa_slave_destroy(struct scsi_device *sdev);
163
edd16368 164static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
edd16368
SC
165static int check_for_unit_attention(struct ctlr_info *h,
166 struct CommandList *c);
167static void check_ioctl_unit_attention(struct ctlr_info *h,
168 struct CommandList *c);
303932fd
DB
169/* performant mode helper functions */
170static void calc_bucket_map(int *bucket, int num_buckets,
171 int nsgs, int *bucket_map);
7136f9a7 172static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
303932fd 173static inline u32 next_command(struct ctlr_info *h);
1df8552a
SC
174static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
175 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
176 u64 *cfg_offset);
177static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
178 unsigned long *memory_bar);
18867659 179static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
fe5389c8
SC
180static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
181 void __iomem *vaddr, int wait_for_ready);
182#define BOARD_NOT_READY 0
183#define BOARD_READY 1
edd16368 184
edd16368
SC
185static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
186{
187 unsigned long *priv = shost_priv(sdev->host);
188 return (struct ctlr_info *) *priv;
189}
190
a23513e8
SC
191static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
192{
193 unsigned long *priv = shost_priv(sh);
194 return (struct ctlr_info *) *priv;
195}
196
edd16368
SC
197static int check_for_unit_attention(struct ctlr_info *h,
198 struct CommandList *c)
199{
200 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
201 return 0;
202
203 switch (c->err_info->SenseInfo[12]) {
204 case STATE_CHANGED:
205 dev_warn(&h->pdev->dev, "hpsa%d: a state change "
206 "detected, command retried\n", h->ctlr);
207 break;
208 case LUN_FAILED:
209 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
210 "detected, action required\n", h->ctlr);
211 break;
212 case REPORT_LUNS_CHANGED:
213 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
31468401 214 "changed, action required\n", h->ctlr);
edd16368 215 /*
edd16368
SC
216 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
217 */
218 break;
219 case POWER_OR_RESET:
220 dev_warn(&h->pdev->dev, "hpsa%d: a power on "
221 "or device reset detected\n", h->ctlr);
222 break;
223 case UNIT_ATTENTION_CLEARED:
224 dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
225 "cleared by another initiator\n", h->ctlr);
226 break;
227 default:
228 dev_warn(&h->pdev->dev, "hpsa%d: unknown "
229 "unit attention detected\n", h->ctlr);
230 break;
231 }
232 return 1;
233}
234
235static ssize_t host_store_rescan(struct device *dev,
236 struct device_attribute *attr,
237 const char *buf, size_t count)
238{
239 struct ctlr_info *h;
240 struct Scsi_Host *shost = class_to_shost(dev);
a23513e8 241 h = shost_to_hba(shost);
31468401 242 hpsa_scan_start(h->scsi_host);
edd16368
SC
243 return count;
244}
245
d28ce020
SC
246static ssize_t host_show_firmware_revision(struct device *dev,
247 struct device_attribute *attr, char *buf)
248{
249 struct ctlr_info *h;
250 struct Scsi_Host *shost = class_to_shost(dev);
251 unsigned char *fwrev;
252
253 h = shost_to_hba(shost);
254 if (!h->hba_inquiry_data)
255 return 0;
256 fwrev = &h->hba_inquiry_data[32];
257 return snprintf(buf, 20, "%c%c%c%c\n",
258 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
259}
260
94a13649
SC
261static ssize_t host_show_commands_outstanding(struct device *dev,
262 struct device_attribute *attr, char *buf)
263{
264 struct Scsi_Host *shost = class_to_shost(dev);
265 struct ctlr_info *h = shost_to_hba(shost);
266
267 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
268}
269
745a7a25
SC
270static ssize_t host_show_transport_mode(struct device *dev,
271 struct device_attribute *attr, char *buf)
272{
273 struct ctlr_info *h;
274 struct Scsi_Host *shost = class_to_shost(dev);
275
276 h = shost_to_hba(shost);
277 return snprintf(buf, 20, "%s\n",
960a30e7 278 h->transMethod & CFGTBL_Trans_Performant ?
745a7a25
SC
279 "performant" : "simple");
280}
281
46380786 282/* List of controllers which cannot be hard reset on kexec with reset_devices */
941b1cda
SC
283static u32 unresettable_controller[] = {
284 0x324a103C, /* Smart Array P712m */
285 0x324b103C, /* SmartArray P711m */
286 0x3223103C, /* Smart Array P800 */
287 0x3234103C, /* Smart Array P400 */
288 0x3235103C, /* Smart Array P400i */
289 0x3211103C, /* Smart Array E200i */
290 0x3212103C, /* Smart Array E200 */
291 0x3213103C, /* Smart Array E200i */
292 0x3214103C, /* Smart Array E200i */
293 0x3215103C, /* Smart Array E200i */
294 0x3237103C, /* Smart Array E500 */
295 0x323D103C, /* Smart Array P700m */
7af0abbc 296 0x40800E11, /* Smart Array 5i */
941b1cda
SC
297 0x409C0E11, /* Smart Array 6400 */
298 0x409D0E11, /* Smart Array 6400 EM */
299};
300
46380786
SC
301/* List of controllers which cannot even be soft reset */
302static u32 soft_unresettable_controller[] = {
7af0abbc 303 0x40800E11, /* Smart Array 5i */
46380786
SC
304 /* Exclude 640x boards. These are two pci devices in one slot
305 * which share a battery backed cache module. One controls the
306 * cache, the other accesses the cache through the one that controls
307 * it. If we reset the one controlling the cache, the other will
308 * likely not be happy. Just forbid resetting this conjoined mess.
309 * The 640x isn't really supported by hpsa anyway.
310 */
311 0x409C0E11, /* Smart Array 6400 */
312 0x409D0E11, /* Smart Array 6400 EM */
313};
314
315static int ctlr_is_hard_resettable(u32 board_id)
941b1cda
SC
316{
317 int i;
318
319 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
46380786
SC
320 if (unresettable_controller[i] == board_id)
321 return 0;
322 return 1;
323}
324
325static int ctlr_is_soft_resettable(u32 board_id)
326{
327 int i;
328
329 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
330 if (soft_unresettable_controller[i] == board_id)
941b1cda
SC
331 return 0;
332 return 1;
333}
334
46380786
SC
335static int ctlr_is_resettable(u32 board_id)
336{
337 return ctlr_is_hard_resettable(board_id) ||
338 ctlr_is_soft_resettable(board_id);
339}
340
941b1cda
SC
341static ssize_t host_show_resettable(struct device *dev,
342 struct device_attribute *attr, char *buf)
343{
344 struct ctlr_info *h;
345 struct Scsi_Host *shost = class_to_shost(dev);
346
347 h = shost_to_hba(shost);
46380786 348 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
941b1cda
SC
349}
350
edd16368
SC
351static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
352{
353 return (scsi3addr[3] & 0xC0) == 0x40;
354}
355
356static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
357 "UNKNOWN"
358};
359#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
360
361static ssize_t raid_level_show(struct device *dev,
362 struct device_attribute *attr, char *buf)
363{
364 ssize_t l = 0;
82a72c0a 365 unsigned char rlevel;
edd16368
SC
366 struct ctlr_info *h;
367 struct scsi_device *sdev;
368 struct hpsa_scsi_dev_t *hdev;
369 unsigned long flags;
370
371 sdev = to_scsi_device(dev);
372 h = sdev_to_hba(sdev);
373 spin_lock_irqsave(&h->lock, flags);
374 hdev = sdev->hostdata;
375 if (!hdev) {
376 spin_unlock_irqrestore(&h->lock, flags);
377 return -ENODEV;
378 }
379
380 /* Is this even a logical drive? */
381 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
382 spin_unlock_irqrestore(&h->lock, flags);
383 l = snprintf(buf, PAGE_SIZE, "N/A\n");
384 return l;
385 }
386
387 rlevel = hdev->raid_level;
388 spin_unlock_irqrestore(&h->lock, flags);
82a72c0a 389 if (rlevel > RAID_UNKNOWN)
edd16368
SC
390 rlevel = RAID_UNKNOWN;
391 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
392 return l;
393}
394
395static ssize_t lunid_show(struct device *dev,
396 struct device_attribute *attr, char *buf)
397{
398 struct ctlr_info *h;
399 struct scsi_device *sdev;
400 struct hpsa_scsi_dev_t *hdev;
401 unsigned long flags;
402 unsigned char lunid[8];
403
404 sdev = to_scsi_device(dev);
405 h = sdev_to_hba(sdev);
406 spin_lock_irqsave(&h->lock, flags);
407 hdev = sdev->hostdata;
408 if (!hdev) {
409 spin_unlock_irqrestore(&h->lock, flags);
410 return -ENODEV;
411 }
412 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
413 spin_unlock_irqrestore(&h->lock, flags);
414 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
415 lunid[0], lunid[1], lunid[2], lunid[3],
416 lunid[4], lunid[5], lunid[6], lunid[7]);
417}
418
419static ssize_t unique_id_show(struct device *dev,
420 struct device_attribute *attr, char *buf)
421{
422 struct ctlr_info *h;
423 struct scsi_device *sdev;
424 struct hpsa_scsi_dev_t *hdev;
425 unsigned long flags;
426 unsigned char sn[16];
427
428 sdev = to_scsi_device(dev);
429 h = sdev_to_hba(sdev);
430 spin_lock_irqsave(&h->lock, flags);
431 hdev = sdev->hostdata;
432 if (!hdev) {
433 spin_unlock_irqrestore(&h->lock, flags);
434 return -ENODEV;
435 }
436 memcpy(sn, hdev->device_id, sizeof(sn));
437 spin_unlock_irqrestore(&h->lock, flags);
438 return snprintf(buf, 16 * 2 + 2,
439 "%02X%02X%02X%02X%02X%02X%02X%02X"
440 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
441 sn[0], sn[1], sn[2], sn[3],
442 sn[4], sn[5], sn[6], sn[7],
443 sn[8], sn[9], sn[10], sn[11],
444 sn[12], sn[13], sn[14], sn[15]);
445}
446
3f5eac3a
SC
447static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
448static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
449static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
450static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
451static DEVICE_ATTR(firmware_revision, S_IRUGO,
452 host_show_firmware_revision, NULL);
453static DEVICE_ATTR(commands_outstanding, S_IRUGO,
454 host_show_commands_outstanding, NULL);
455static DEVICE_ATTR(transport_mode, S_IRUGO,
456 host_show_transport_mode, NULL);
941b1cda
SC
457static DEVICE_ATTR(resettable, S_IRUGO,
458 host_show_resettable, NULL);
3f5eac3a
SC
459
460static struct device_attribute *hpsa_sdev_attrs[] = {
461 &dev_attr_raid_level,
462 &dev_attr_lunid,
463 &dev_attr_unique_id,
464 NULL,
465};
466
467static struct device_attribute *hpsa_shost_attrs[] = {
468 &dev_attr_rescan,
469 &dev_attr_firmware_revision,
470 &dev_attr_commands_outstanding,
471 &dev_attr_transport_mode,
941b1cda 472 &dev_attr_resettable,
3f5eac3a
SC
473 NULL,
474};
475
476static struct scsi_host_template hpsa_driver_template = {
477 .module = THIS_MODULE,
478 .name = "hpsa",
479 .proc_name = "hpsa",
480 .queuecommand = hpsa_scsi_queue_command,
481 .scan_start = hpsa_scan_start,
482 .scan_finished = hpsa_scan_finished,
483 .change_queue_depth = hpsa_change_queue_depth,
484 .this_id = -1,
485 .use_clustering = ENABLE_CLUSTERING,
486 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
487 .ioctl = hpsa_ioctl,
488 .slave_alloc = hpsa_slave_alloc,
489 .slave_destroy = hpsa_slave_destroy,
490#ifdef CONFIG_COMPAT
491 .compat_ioctl = hpsa_compat_ioctl,
492#endif
493 .sdev_attrs = hpsa_sdev_attrs,
494 .shost_attrs = hpsa_shost_attrs,
c0d6a4d1 495 .max_sectors = 8192,
3f5eac3a
SC
496};
497
498
499/* Enqueuing and dequeuing functions for cmdlists. */
500static inline void addQ(struct list_head *list, struct CommandList *c)
501{
502 list_add_tail(&c->list, list);
503}
504
505static inline u32 next_command(struct ctlr_info *h)
506{
507 u32 a;
508
509 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
510 return h->access.command_completed(h);
511
512 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
513 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
514 (h->reply_pool_head)++;
515 h->commands_outstanding--;
516 } else {
517 a = FIFO_EMPTY;
518 }
519 /* Check for wraparound */
520 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
521 h->reply_pool_head = h->reply_pool;
522 h->reply_pool_wraparound ^= 1;
523 }
524 return a;
525}
526
527/* set_performant_mode: Modify the tag for cciss performant
528 * set bit 0 for pull model, bits 3-1 for block fetch
529 * register number
530 */
531static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
532{
533 if (likely(h->transMethod & CFGTBL_Trans_Performant))
534 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
535}
536
537static void enqueue_cmd_and_start_io(struct ctlr_info *h,
538 struct CommandList *c)
539{
540 unsigned long flags;
541
542 set_performant_mode(h, c);
543 spin_lock_irqsave(&h->lock, flags);
544 addQ(&h->reqQ, c);
545 h->Qdepth++;
546 start_io(h);
547 spin_unlock_irqrestore(&h->lock, flags);
548}
549
550static inline void removeQ(struct CommandList *c)
551{
552 if (WARN_ON(list_empty(&c->list)))
553 return;
554 list_del_init(&c->list);
555}
556
557static inline int is_hba_lunid(unsigned char scsi3addr[])
558{
559 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
560}
561
562static inline int is_scsi_rev_5(struct ctlr_info *h)
563{
564 if (!h->hba_inquiry_data)
565 return 0;
566 if ((h->hba_inquiry_data[2] & 0x07) == 5)
567 return 1;
568 return 0;
569}
570
edd16368
SC
571static int hpsa_find_target_lun(struct ctlr_info *h,
572 unsigned char scsi3addr[], int bus, int *target, int *lun)
573{
574 /* finds an unused bus, target, lun for a new physical device
575 * assumes h->devlock is held
576 */
577 int i, found = 0;
cfe5badc 578 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
edd16368 579
cfe5badc 580 memset(&lun_taken[0], 0, HPSA_MAX_DEVICES >> 3);
edd16368
SC
581
582 for (i = 0; i < h->ndevices; i++) {
583 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
584 set_bit(h->dev[i]->target, lun_taken);
585 }
586
cfe5badc 587 for (i = 0; i < HPSA_MAX_DEVICES; i++) {
edd16368
SC
588 if (!test_bit(i, lun_taken)) {
589 /* *bus = 1; */
590 *target = i;
591 *lun = 0;
592 found = 1;
593 break;
594 }
595 }
596 return !found;
597}
598
599/* Add an entry into h->dev[] array. */
600static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
601 struct hpsa_scsi_dev_t *device,
602 struct hpsa_scsi_dev_t *added[], int *nadded)
603{
604 /* assumes h->devlock is held */
605 int n = h->ndevices;
606 int i;
607 unsigned char addr1[8], addr2[8];
608 struct hpsa_scsi_dev_t *sd;
609
cfe5badc 610 if (n >= HPSA_MAX_DEVICES) {
edd16368
SC
611 dev_err(&h->pdev->dev, "too many devices, some will be "
612 "inaccessible.\n");
613 return -1;
614 }
615
616 /* physical devices do not have lun or target assigned until now. */
617 if (device->lun != -1)
618 /* Logical device, lun is already assigned. */
619 goto lun_assigned;
620
621 /* If this device a non-zero lun of a multi-lun device
622 * byte 4 of the 8-byte LUN addr will contain the logical
623 * unit no, zero otherise.
624 */
625 if (device->scsi3addr[4] == 0) {
626 /* This is not a non-zero lun of a multi-lun device */
627 if (hpsa_find_target_lun(h, device->scsi3addr,
628 device->bus, &device->target, &device->lun) != 0)
629 return -1;
630 goto lun_assigned;
631 }
632
633 /* This is a non-zero lun of a multi-lun device.
634 * Search through our list and find the device which
635 * has the same 8 byte LUN address, excepting byte 4.
636 * Assign the same bus and target for this new LUN.
637 * Use the logical unit number from the firmware.
638 */
639 memcpy(addr1, device->scsi3addr, 8);
640 addr1[4] = 0;
641 for (i = 0; i < n; i++) {
642 sd = h->dev[i];
643 memcpy(addr2, sd->scsi3addr, 8);
644 addr2[4] = 0;
645 /* differ only in byte 4? */
646 if (memcmp(addr1, addr2, 8) == 0) {
647 device->bus = sd->bus;
648 device->target = sd->target;
649 device->lun = device->scsi3addr[4];
650 break;
651 }
652 }
653 if (device->lun == -1) {
654 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
655 " suspect firmware bug or unsupported hardware "
656 "configuration.\n");
657 return -1;
658 }
659
660lun_assigned:
661
662 h->dev[n] = device;
663 h->ndevices++;
664 added[*nadded] = device;
665 (*nadded)++;
666
667 /* initially, (before registering with scsi layer) we don't
668 * know our hostno and we don't want to print anything first
669 * time anyway (the scsi layer's inquiries will show that info)
670 */
671 /* if (hostno != -1) */
672 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
673 scsi_device_type(device->devtype), hostno,
674 device->bus, device->target, device->lun);
675 return 0;
676}
677
2a8ccf31
SC
678/* Replace an entry from h->dev[] array. */
679static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
680 int entry, struct hpsa_scsi_dev_t *new_entry,
681 struct hpsa_scsi_dev_t *added[], int *nadded,
682 struct hpsa_scsi_dev_t *removed[], int *nremoved)
683{
684 /* assumes h->devlock is held */
cfe5badc 685 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
2a8ccf31
SC
686 removed[*nremoved] = h->dev[entry];
687 (*nremoved)++;
01350d05
SC
688
689 /*
690 * New physical devices won't have target/lun assigned yet
691 * so we need to preserve the values in the slot we are replacing.
692 */
693 if (new_entry->target == -1) {
694 new_entry->target = h->dev[entry]->target;
695 new_entry->lun = h->dev[entry]->lun;
696 }
697
2a8ccf31
SC
698 h->dev[entry] = new_entry;
699 added[*nadded] = new_entry;
700 (*nadded)++;
701 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
702 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
703 new_entry->target, new_entry->lun);
704}
705
edd16368
SC
706/* Remove an entry from h->dev[] array. */
707static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
708 struct hpsa_scsi_dev_t *removed[], int *nremoved)
709{
710 /* assumes h->devlock is held */
711 int i;
712 struct hpsa_scsi_dev_t *sd;
713
cfe5badc 714 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
edd16368
SC
715
716 sd = h->dev[entry];
717 removed[*nremoved] = h->dev[entry];
718 (*nremoved)++;
719
720 for (i = entry; i < h->ndevices-1; i++)
721 h->dev[i] = h->dev[i+1];
722 h->ndevices--;
723 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
724 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
725 sd->lun);
726}
727
728#define SCSI3ADDR_EQ(a, b) ( \
729 (a)[7] == (b)[7] && \
730 (a)[6] == (b)[6] && \
731 (a)[5] == (b)[5] && \
732 (a)[4] == (b)[4] && \
733 (a)[3] == (b)[3] && \
734 (a)[2] == (b)[2] && \
735 (a)[1] == (b)[1] && \
736 (a)[0] == (b)[0])
737
738static void fixup_botched_add(struct ctlr_info *h,
739 struct hpsa_scsi_dev_t *added)
740{
741 /* called when scsi_add_device fails in order to re-adjust
742 * h->dev[] to match the mid layer's view.
743 */
744 unsigned long flags;
745 int i, j;
746
747 spin_lock_irqsave(&h->lock, flags);
748 for (i = 0; i < h->ndevices; i++) {
749 if (h->dev[i] == added) {
750 for (j = i; j < h->ndevices-1; j++)
751 h->dev[j] = h->dev[j+1];
752 h->ndevices--;
753 break;
754 }
755 }
756 spin_unlock_irqrestore(&h->lock, flags);
757 kfree(added);
758}
759
760static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
761 struct hpsa_scsi_dev_t *dev2)
762{
edd16368
SC
763 /* we compare everything except lun and target as these
764 * are not yet assigned. Compare parts likely
765 * to differ first
766 */
767 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
768 sizeof(dev1->scsi3addr)) != 0)
769 return 0;
770 if (memcmp(dev1->device_id, dev2->device_id,
771 sizeof(dev1->device_id)) != 0)
772 return 0;
773 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
774 return 0;
775 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
776 return 0;
edd16368
SC
777 if (dev1->devtype != dev2->devtype)
778 return 0;
edd16368
SC
779 if (dev1->bus != dev2->bus)
780 return 0;
781 return 1;
782}
783
784/* Find needle in haystack. If exact match found, return DEVICE_SAME,
785 * and return needle location in *index. If scsi3addr matches, but not
786 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
787 * location in *index. If needle not found, return DEVICE_NOT_FOUND.
788 */
789static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
790 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
791 int *index)
792{
793 int i;
794#define DEVICE_NOT_FOUND 0
795#define DEVICE_CHANGED 1
796#define DEVICE_SAME 2
797 for (i = 0; i < haystack_size; i++) {
23231048
SC
798 if (haystack[i] == NULL) /* previously removed. */
799 continue;
edd16368
SC
800 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
801 *index = i;
802 if (device_is_the_same(needle, haystack[i]))
803 return DEVICE_SAME;
804 else
805 return DEVICE_CHANGED;
806 }
807 }
808 *index = -1;
809 return DEVICE_NOT_FOUND;
810}
811
4967bd3e 812static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
edd16368
SC
813 struct hpsa_scsi_dev_t *sd[], int nsds)
814{
815 /* sd contains scsi3 addresses and devtypes, and inquiry
816 * data. This function takes what's in sd to be the current
817 * reality and updates h->dev[] to reflect that reality.
818 */
819 int i, entry, device_change, changes = 0;
820 struct hpsa_scsi_dev_t *csd;
821 unsigned long flags;
822 struct hpsa_scsi_dev_t **added, **removed;
823 int nadded, nremoved;
824 struct Scsi_Host *sh = NULL;
825
cfe5badc
ST
826 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
827 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
edd16368
SC
828
829 if (!added || !removed) {
830 dev_warn(&h->pdev->dev, "out of memory in "
831 "adjust_hpsa_scsi_table\n");
832 goto free_and_out;
833 }
834
835 spin_lock_irqsave(&h->devlock, flags);
836
837 /* find any devices in h->dev[] that are not in
838 * sd[] and remove them from h->dev[], and for any
839 * devices which have changed, remove the old device
840 * info and add the new device info.
841 */
842 i = 0;
843 nremoved = 0;
844 nadded = 0;
845 while (i < h->ndevices) {
846 csd = h->dev[i];
847 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
848 if (device_change == DEVICE_NOT_FOUND) {
849 changes++;
850 hpsa_scsi_remove_entry(h, hostno, i,
851 removed, &nremoved);
852 continue; /* remove ^^^, hence i not incremented */
853 } else if (device_change == DEVICE_CHANGED) {
854 changes++;
2a8ccf31
SC
855 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
856 added, &nadded, removed, &nremoved);
c7f172dc
SC
857 /* Set it to NULL to prevent it from being freed
858 * at the bottom of hpsa_update_scsi_devices()
859 */
860 sd[entry] = NULL;
edd16368
SC
861 }
862 i++;
863 }
864
865 /* Now, make sure every device listed in sd[] is also
866 * listed in h->dev[], adding them if they aren't found
867 */
868
869 for (i = 0; i < nsds; i++) {
870 if (!sd[i]) /* if already added above. */
871 continue;
872 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
873 h->ndevices, &entry);
874 if (device_change == DEVICE_NOT_FOUND) {
875 changes++;
876 if (hpsa_scsi_add_entry(h, hostno, sd[i],
877 added, &nadded) != 0)
878 break;
879 sd[i] = NULL; /* prevent from being freed later. */
880 } else if (device_change == DEVICE_CHANGED) {
881 /* should never happen... */
882 changes++;
883 dev_warn(&h->pdev->dev,
884 "device unexpectedly changed.\n");
885 /* but if it does happen, we just ignore that device */
886 }
887 }
888 spin_unlock_irqrestore(&h->devlock, flags);
889
890 /* Don't notify scsi mid layer of any changes the first time through
891 * (or if there are no changes) scsi_scan_host will do it later the
892 * first time through.
893 */
894 if (hostno == -1 || !changes)
895 goto free_and_out;
896
897 sh = h->scsi_host;
898 /* Notify scsi mid layer of any removed devices */
899 for (i = 0; i < nremoved; i++) {
900 struct scsi_device *sdev =
901 scsi_device_lookup(sh, removed[i]->bus,
902 removed[i]->target, removed[i]->lun);
903 if (sdev != NULL) {
904 scsi_remove_device(sdev);
905 scsi_device_put(sdev);
906 } else {
907 /* We don't expect to get here.
908 * future cmds to this device will get selection
909 * timeout as if the device was gone.
910 */
911 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
912 " for removal.", hostno, removed[i]->bus,
913 removed[i]->target, removed[i]->lun);
914 }
915 kfree(removed[i]);
916 removed[i] = NULL;
917 }
918
919 /* Notify scsi mid layer of any added devices */
920 for (i = 0; i < nadded; i++) {
921 if (scsi_add_device(sh, added[i]->bus,
922 added[i]->target, added[i]->lun) == 0)
923 continue;
924 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
925 "device not added.\n", hostno, added[i]->bus,
926 added[i]->target, added[i]->lun);
927 /* now we have to remove it from h->dev,
928 * since it didn't get added to scsi mid layer
929 */
930 fixup_botched_add(h, added[i]);
931 }
932
933free_and_out:
934 kfree(added);
935 kfree(removed);
edd16368
SC
936}
937
938/*
939 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
940 * Assume's h->devlock is held.
941 */
942static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
943 int bus, int target, int lun)
944{
945 int i;
946 struct hpsa_scsi_dev_t *sd;
947
948 for (i = 0; i < h->ndevices; i++) {
949 sd = h->dev[i];
950 if (sd->bus == bus && sd->target == target && sd->lun == lun)
951 return sd;
952 }
953 return NULL;
954}
955
956/* link sdev->hostdata to our per-device structure. */
957static int hpsa_slave_alloc(struct scsi_device *sdev)
958{
959 struct hpsa_scsi_dev_t *sd;
960 unsigned long flags;
961 struct ctlr_info *h;
962
963 h = sdev_to_hba(sdev);
964 spin_lock_irqsave(&h->devlock, flags);
965 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
966 sdev_id(sdev), sdev->lun);
967 if (sd != NULL)
968 sdev->hostdata = sd;
969 spin_unlock_irqrestore(&h->devlock, flags);
970 return 0;
971}
972
973static void hpsa_slave_destroy(struct scsi_device *sdev)
974{
bcc44255 975 /* nothing to do. */
edd16368
SC
976}
977
33a2ffce
SC
978static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
979{
980 int i;
981
982 if (!h->cmd_sg_list)
983 return;
984 for (i = 0; i < h->nr_cmds; i++) {
985 kfree(h->cmd_sg_list[i]);
986 h->cmd_sg_list[i] = NULL;
987 }
988 kfree(h->cmd_sg_list);
989 h->cmd_sg_list = NULL;
990}
991
992static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
993{
994 int i;
995
996 if (h->chainsize <= 0)
997 return 0;
998
999 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1000 GFP_KERNEL);
1001 if (!h->cmd_sg_list)
1002 return -ENOMEM;
1003 for (i = 0; i < h->nr_cmds; i++) {
1004 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1005 h->chainsize, GFP_KERNEL);
1006 if (!h->cmd_sg_list[i])
1007 goto clean;
1008 }
1009 return 0;
1010
1011clean:
1012 hpsa_free_sg_chain_blocks(h);
1013 return -ENOMEM;
1014}
1015
1016static void hpsa_map_sg_chain_block(struct ctlr_info *h,
1017 struct CommandList *c)
1018{
1019 struct SGDescriptor *chain_sg, *chain_block;
1020 u64 temp64;
1021
1022 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1023 chain_block = h->cmd_sg_list[c->cmdindex];
1024 chain_sg->Ext = HPSA_SG_CHAIN;
1025 chain_sg->Len = sizeof(*chain_sg) *
1026 (c->Header.SGTotal - h->max_cmd_sg_entries);
1027 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1028 PCI_DMA_TODEVICE);
1029 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1030 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
1031}
1032
1033static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1034 struct CommandList *c)
1035{
1036 struct SGDescriptor *chain_sg;
1037 union u64bit temp64;
1038
1039 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1040 return;
1041
1042 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1043 temp64.val32.lower = chain_sg->Addr.lower;
1044 temp64.val32.upper = chain_sg->Addr.upper;
1045 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1046}
1047
1fb011fb 1048static void complete_scsi_command(struct CommandList *cp)
edd16368
SC
1049{
1050 struct scsi_cmnd *cmd;
1051 struct ctlr_info *h;
1052 struct ErrorInfo *ei;
1053
1054 unsigned char sense_key;
1055 unsigned char asc; /* additional sense code */
1056 unsigned char ascq; /* additional sense code qualifier */
db111e18 1057 unsigned long sense_data_size;
edd16368
SC
1058
1059 ei = cp->err_info;
1060 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1061 h = cp->h;
1062
1063 scsi_dma_unmap(cmd); /* undo the DMA mappings */
33a2ffce
SC
1064 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
1065 hpsa_unmap_sg_chain_block(h, cp);
edd16368
SC
1066
1067 cmd->result = (DID_OK << 16); /* host byte */
1068 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
5512672f 1069 cmd->result |= ei->ScsiStatus;
edd16368
SC
1070
1071 /* copy the sense data whether we need to or not. */
db111e18
SC
1072 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1073 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1074 else
1075 sense_data_size = sizeof(ei->SenseInfo);
1076 if (ei->SenseLen < sense_data_size)
1077 sense_data_size = ei->SenseLen;
1078
1079 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
edd16368
SC
1080 scsi_set_resid(cmd, ei->ResidualCnt);
1081
1082 if (ei->CommandStatus == 0) {
1083 cmd->scsi_done(cmd);
1084 cmd_free(h, cp);
1085 return;
1086 }
1087
1088 /* an error has occurred */
1089 switch (ei->CommandStatus) {
1090
1091 case CMD_TARGET_STATUS:
1092 if (ei->ScsiStatus) {
1093 /* Get sense key */
1094 sense_key = 0xf & ei->SenseInfo[2];
1095 /* Get additional sense code */
1096 asc = ei->SenseInfo[12];
1097 /* Get addition sense code qualifier */
1098 ascq = ei->SenseInfo[13];
1099 }
1100
1101 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1102 if (check_for_unit_attention(h, cp)) {
1103 cmd->result = DID_SOFT_ERROR << 16;
1104 break;
1105 }
1106 if (sense_key == ILLEGAL_REQUEST) {
1107 /*
1108 * SCSI REPORT_LUNS is commonly unsupported on
1109 * Smart Array. Suppress noisy complaint.
1110 */
1111 if (cp->Request.CDB[0] == REPORT_LUNS)
1112 break;
1113
1114 /* If ASC/ASCQ indicate Logical Unit
1115 * Not Supported condition,
1116 */
1117 if ((asc == 0x25) && (ascq == 0x0)) {
1118 dev_warn(&h->pdev->dev, "cp %p "
1119 "has check condition\n", cp);
1120 break;
1121 }
1122 }
1123
1124 if (sense_key == NOT_READY) {
1125 /* If Sense is Not Ready, Logical Unit
1126 * Not ready, Manual Intervention
1127 * required
1128 */
1129 if ((asc == 0x04) && (ascq == 0x03)) {
edd16368
SC
1130 dev_warn(&h->pdev->dev, "cp %p "
1131 "has check condition: unit "
1132 "not ready, manual "
1133 "intervention required\n", cp);
1134 break;
1135 }
1136 }
1d3b3609
MG
1137 if (sense_key == ABORTED_COMMAND) {
1138 /* Aborted command is retryable */
1139 dev_warn(&h->pdev->dev, "cp %p "
1140 "has check condition: aborted command: "
1141 "ASC: 0x%x, ASCQ: 0x%x\n",
1142 cp, asc, ascq);
1143 cmd->result = DID_SOFT_ERROR << 16;
1144 break;
1145 }
edd16368
SC
1146 /* Must be some other type of check condition */
1147 dev_warn(&h->pdev->dev, "cp %p has check condition: "
1148 "unknown type: "
1149 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1150 "Returning result: 0x%x, "
1151 "cmd=[%02x %02x %02x %02x %02x "
807be732 1152 "%02x %02x %02x %02x %02x %02x "
edd16368
SC
1153 "%02x %02x %02x %02x %02x]\n",
1154 cp, sense_key, asc, ascq,
1155 cmd->result,
1156 cmd->cmnd[0], cmd->cmnd[1],
1157 cmd->cmnd[2], cmd->cmnd[3],
1158 cmd->cmnd[4], cmd->cmnd[5],
1159 cmd->cmnd[6], cmd->cmnd[7],
807be732
MM
1160 cmd->cmnd[8], cmd->cmnd[9],
1161 cmd->cmnd[10], cmd->cmnd[11],
1162 cmd->cmnd[12], cmd->cmnd[13],
1163 cmd->cmnd[14], cmd->cmnd[15]);
edd16368
SC
1164 break;
1165 }
1166
1167
1168 /* Problem was not a check condition
1169 * Pass it up to the upper layers...
1170 */
1171 if (ei->ScsiStatus) {
1172 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1173 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1174 "Returning result: 0x%x\n",
1175 cp, ei->ScsiStatus,
1176 sense_key, asc, ascq,
1177 cmd->result);
1178 } else { /* scsi status is zero??? How??? */
1179 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1180 "Returning no connection.\n", cp),
1181
1182 /* Ordinarily, this case should never happen,
1183 * but there is a bug in some released firmware
1184 * revisions that allows it to happen if, for
1185 * example, a 4100 backplane loses power and
1186 * the tape drive is in it. We assume that
1187 * it's a fatal error of some kind because we
1188 * can't show that it wasn't. We will make it
1189 * look like selection timeout since that is
1190 * the most common reason for this to occur,
1191 * and it's severe enough.
1192 */
1193
1194 cmd->result = DID_NO_CONNECT << 16;
1195 }
1196 break;
1197
1198 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1199 break;
1200 case CMD_DATA_OVERRUN:
1201 dev_warn(&h->pdev->dev, "cp %p has"
1202 " completed with data overrun "
1203 "reported\n", cp);
1204 break;
1205 case CMD_INVALID: {
1206 /* print_bytes(cp, sizeof(*cp), 1, 0);
1207 print_cmd(cp); */
1208 /* We get CMD_INVALID if you address a non-existent device
1209 * instead of a selection timeout (no response). You will
1210 * see this if you yank out a drive, then try to access it.
1211 * This is kind of a shame because it means that any other
1212 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1213 * missing target. */
1214 cmd->result = DID_NO_CONNECT << 16;
1215 }
1216 break;
1217 case CMD_PROTOCOL_ERR:
1218 dev_warn(&h->pdev->dev, "cp %p has "
1219 "protocol error \n", cp);
1220 break;
1221 case CMD_HARDWARE_ERR:
1222 cmd->result = DID_ERROR << 16;
1223 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1224 break;
1225 case CMD_CONNECTION_LOST:
1226 cmd->result = DID_ERROR << 16;
1227 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1228 break;
1229 case CMD_ABORTED:
1230 cmd->result = DID_ABORT << 16;
1231 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1232 cp, ei->ScsiStatus);
1233 break;
1234 case CMD_ABORT_FAILED:
1235 cmd->result = DID_ERROR << 16;
1236 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1237 break;
1238 case CMD_UNSOLICITED_ABORT:
f6e76055
SC
1239 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1240 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
edd16368
SC
1241 "abort\n", cp);
1242 break;
1243 case CMD_TIMEOUT:
1244 cmd->result = DID_TIME_OUT << 16;
1245 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1246 break;
1d5e2ed0
SC
1247 case CMD_UNABORTABLE:
1248 cmd->result = DID_ERROR << 16;
1249 dev_warn(&h->pdev->dev, "Command unabortable\n");
1250 break;
edd16368
SC
1251 default:
1252 cmd->result = DID_ERROR << 16;
1253 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1254 cp, ei->CommandStatus);
1255 }
1256 cmd->scsi_done(cmd);
1257 cmd_free(h, cp);
1258}
1259
edd16368
SC
1260static void hpsa_pci_unmap(struct pci_dev *pdev,
1261 struct CommandList *c, int sg_used, int data_direction)
1262{
1263 int i;
1264 union u64bit addr64;
1265
1266 for (i = 0; i < sg_used; i++) {
1267 addr64.val32.lower = c->SG[i].Addr.lower;
1268 addr64.val32.upper = c->SG[i].Addr.upper;
1269 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1270 data_direction);
1271 }
1272}
1273
1274static void hpsa_map_one(struct pci_dev *pdev,
1275 struct CommandList *cp,
1276 unsigned char *buf,
1277 size_t buflen,
1278 int data_direction)
1279{
01a02ffc 1280 u64 addr64;
edd16368
SC
1281
1282 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1283 cp->Header.SGList = 0;
1284 cp->Header.SGTotal = 0;
1285 return;
1286 }
1287
01a02ffc 1288 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
edd16368 1289 cp->SG[0].Addr.lower =
01a02ffc 1290 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
edd16368 1291 cp->SG[0].Addr.upper =
01a02ffc 1292 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
edd16368 1293 cp->SG[0].Len = buflen;
01a02ffc
SC
1294 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1295 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
edd16368
SC
1296}
1297
1298static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1299 struct CommandList *c)
1300{
1301 DECLARE_COMPLETION_ONSTACK(wait);
1302
1303 c->waiting = &wait;
1304 enqueue_cmd_and_start_io(h, c);
1305 wait_for_completion(&wait);
1306}
1307
a0c12413
SC
1308static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1309 struct CommandList *c)
1310{
1311 unsigned long flags;
1312
1313 /* If controller lockup detected, fake a hardware error. */
1314 spin_lock_irqsave(&h->lock, flags);
1315 if (unlikely(h->lockup_detected)) {
1316 spin_unlock_irqrestore(&h->lock, flags);
1317 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
1318 } else {
1319 spin_unlock_irqrestore(&h->lock, flags);
1320 hpsa_scsi_do_simple_cmd_core(h, c);
1321 }
1322}
1323
edd16368
SC
1324static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1325 struct CommandList *c, int data_direction)
1326{
1327 int retry_count = 0;
1328
1329 do {
7630abd0 1330 memset(c->err_info, 0, sizeof(*c->err_info));
edd16368
SC
1331 hpsa_scsi_do_simple_cmd_core(h, c);
1332 retry_count++;
1333 } while (check_for_unit_attention(h, c) && retry_count <= 3);
1334 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1335}
1336
1337static void hpsa_scsi_interpret_error(struct CommandList *cp)
1338{
1339 struct ErrorInfo *ei;
1340 struct device *d = &cp->h->pdev->dev;
1341
1342 ei = cp->err_info;
1343 switch (ei->CommandStatus) {
1344 case CMD_TARGET_STATUS:
1345 dev_warn(d, "cmd %p has completed with errors\n", cp);
1346 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1347 ei->ScsiStatus);
1348 if (ei->ScsiStatus == 0)
1349 dev_warn(d, "SCSI status is abnormally zero. "
1350 "(probably indicates selection timeout "
1351 "reported incorrectly due to a known "
1352 "firmware bug, circa July, 2001.)\n");
1353 break;
1354 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1355 dev_info(d, "UNDERRUN\n");
1356 break;
1357 case CMD_DATA_OVERRUN:
1358 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1359 break;
1360 case CMD_INVALID: {
1361 /* controller unfortunately reports SCSI passthru's
1362 * to non-existent targets as invalid commands.
1363 */
1364 dev_warn(d, "cp %p is reported invalid (probably means "
1365 "target device no longer present)\n", cp);
1366 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1367 print_cmd(cp); */
1368 }
1369 break;
1370 case CMD_PROTOCOL_ERR:
1371 dev_warn(d, "cp %p has protocol error \n", cp);
1372 break;
1373 case CMD_HARDWARE_ERR:
1374 /* cmd->result = DID_ERROR << 16; */
1375 dev_warn(d, "cp %p had hardware error\n", cp);
1376 break;
1377 case CMD_CONNECTION_LOST:
1378 dev_warn(d, "cp %p had connection lost\n", cp);
1379 break;
1380 case CMD_ABORTED:
1381 dev_warn(d, "cp %p was aborted\n", cp);
1382 break;
1383 case CMD_ABORT_FAILED:
1384 dev_warn(d, "cp %p reports abort failed\n", cp);
1385 break;
1386 case CMD_UNSOLICITED_ABORT:
1387 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1388 break;
1389 case CMD_TIMEOUT:
1390 dev_warn(d, "cp %p timed out\n", cp);
1391 break;
1d5e2ed0
SC
1392 case CMD_UNABORTABLE:
1393 dev_warn(d, "Command unabortable\n");
1394 break;
edd16368
SC
1395 default:
1396 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1397 ei->CommandStatus);
1398 }
1399}
1400
1401static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1402 unsigned char page, unsigned char *buf,
1403 unsigned char bufsize)
1404{
1405 int rc = IO_OK;
1406 struct CommandList *c;
1407 struct ErrorInfo *ei;
1408
1409 c = cmd_special_alloc(h);
1410
1411 if (c == NULL) { /* trouble... */
1412 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
ecd9aad4 1413 return -ENOMEM;
edd16368
SC
1414 }
1415
1416 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1417 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1418 ei = c->err_info;
1419 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1420 hpsa_scsi_interpret_error(c);
1421 rc = -1;
1422 }
1423 cmd_special_free(h, c);
1424 return rc;
1425}
1426
1427static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1428{
1429 int rc = IO_OK;
1430 struct CommandList *c;
1431 struct ErrorInfo *ei;
1432
1433 c = cmd_special_alloc(h);
1434
1435 if (c == NULL) { /* trouble... */
1436 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
e9ea04a6 1437 return -ENOMEM;
edd16368
SC
1438 }
1439
1440 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1441 hpsa_scsi_do_simple_cmd_core(h, c);
1442 /* no unmap needed here because no data xfer. */
1443
1444 ei = c->err_info;
1445 if (ei->CommandStatus != 0) {
1446 hpsa_scsi_interpret_error(c);
1447 rc = -1;
1448 }
1449 cmd_special_free(h, c);
1450 return rc;
1451}
1452
1453static void hpsa_get_raid_level(struct ctlr_info *h,
1454 unsigned char *scsi3addr, unsigned char *raid_level)
1455{
1456 int rc;
1457 unsigned char *buf;
1458
1459 *raid_level = RAID_UNKNOWN;
1460 buf = kzalloc(64, GFP_KERNEL);
1461 if (!buf)
1462 return;
1463 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1464 if (rc == 0)
1465 *raid_level = buf[8];
1466 if (*raid_level > RAID_UNKNOWN)
1467 *raid_level = RAID_UNKNOWN;
1468 kfree(buf);
1469 return;
1470}
1471
1472/* Get the device id from inquiry page 0x83 */
1473static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1474 unsigned char *device_id, int buflen)
1475{
1476 int rc;
1477 unsigned char *buf;
1478
1479 if (buflen > 16)
1480 buflen = 16;
1481 buf = kzalloc(64, GFP_KERNEL);
1482 if (!buf)
1483 return -1;
1484 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1485 if (rc == 0)
1486 memcpy(device_id, &buf[8], buflen);
1487 kfree(buf);
1488 return rc != 0;
1489}
1490
1491static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1492 struct ReportLUNdata *buf, int bufsize,
1493 int extended_response)
1494{
1495 int rc = IO_OK;
1496 struct CommandList *c;
1497 unsigned char scsi3addr[8];
1498 struct ErrorInfo *ei;
1499
1500 c = cmd_special_alloc(h);
1501 if (c == NULL) { /* trouble... */
1502 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1503 return -1;
1504 }
e89c0ae7
SC
1505 /* address the controller */
1506 memset(scsi3addr, 0, sizeof(scsi3addr));
edd16368
SC
1507 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1508 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1509 if (extended_response)
1510 c->Request.CDB[1] = extended_response;
1511 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1512 ei = c->err_info;
1513 if (ei->CommandStatus != 0 &&
1514 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1515 hpsa_scsi_interpret_error(c);
1516 rc = -1;
1517 }
1518 cmd_special_free(h, c);
1519 return rc;
1520}
1521
1522static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1523 struct ReportLUNdata *buf,
1524 int bufsize, int extended_response)
1525{
1526 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1527}
1528
1529static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1530 struct ReportLUNdata *buf, int bufsize)
1531{
1532 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1533}
1534
1535static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1536 int bus, int target, int lun)
1537{
1538 device->bus = bus;
1539 device->target = target;
1540 device->lun = lun;
1541}
1542
1543static int hpsa_update_device_info(struct ctlr_info *h,
0b0e1d6c
SC
1544 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
1545 unsigned char *is_OBDR_device)
edd16368 1546{
0b0e1d6c
SC
1547
1548#define OBDR_SIG_OFFSET 43
1549#define OBDR_TAPE_SIG "$DR-10"
1550#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
1551#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
1552
ea6d3bc3 1553 unsigned char *inq_buff;
0b0e1d6c 1554 unsigned char *obdr_sig;
edd16368 1555
ea6d3bc3 1556 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
edd16368
SC
1557 if (!inq_buff)
1558 goto bail_out;
1559
edd16368
SC
1560 /* Do an inquiry to the device to see what it is. */
1561 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1562 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1563 /* Inquiry failed (msg printed already) */
1564 dev_err(&h->pdev->dev,
1565 "hpsa_update_device_info: inquiry failed\n");
1566 goto bail_out;
1567 }
1568
edd16368
SC
1569 this_device->devtype = (inq_buff[0] & 0x1f);
1570 memcpy(this_device->scsi3addr, scsi3addr, 8);
1571 memcpy(this_device->vendor, &inq_buff[8],
1572 sizeof(this_device->vendor));
1573 memcpy(this_device->model, &inq_buff[16],
1574 sizeof(this_device->model));
edd16368
SC
1575 memset(this_device->device_id, 0,
1576 sizeof(this_device->device_id));
1577 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1578 sizeof(this_device->device_id));
1579
1580 if (this_device->devtype == TYPE_DISK &&
1581 is_logical_dev_addr_mode(scsi3addr))
1582 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1583 else
1584 this_device->raid_level = RAID_UNKNOWN;
1585
0b0e1d6c
SC
1586 if (is_OBDR_device) {
1587 /* See if this is a One-Button-Disaster-Recovery device
1588 * by looking for "$DR-10" at offset 43 in inquiry data.
1589 */
1590 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
1591 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
1592 strncmp(obdr_sig, OBDR_TAPE_SIG,
1593 OBDR_SIG_LEN) == 0);
1594 }
1595
edd16368
SC
1596 kfree(inq_buff);
1597 return 0;
1598
1599bail_out:
1600 kfree(inq_buff);
1601 return 1;
1602}
1603
1604static unsigned char *msa2xxx_model[] = {
1605 "MSA2012",
1606 "MSA2024",
1607 "MSA2312",
1608 "MSA2324",
fda38518 1609 "P2000 G3 SAS",
edd16368
SC
1610 NULL,
1611};
1612
1613static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1614{
1615 int i;
1616
1617 for (i = 0; msa2xxx_model[i]; i++)
1618 if (strncmp(device->model, msa2xxx_model[i],
1619 strlen(msa2xxx_model[i])) == 0)
1620 return 1;
1621 return 0;
1622}
1623
1624/* Helper function to assign bus, target, lun mapping of devices.
1625 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1626 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1627 * Logical drive target and lun are assigned at this time, but
1628 * physical device lun and target assignment are deferred (assigned
1629 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1630 */
1631static void figure_bus_target_lun(struct ctlr_info *h,
01a02ffc 1632 u8 *lunaddrbytes, int *bus, int *target, int *lun,
edd16368
SC
1633 struct hpsa_scsi_dev_t *device)
1634{
01a02ffc 1635 u32 lunid;
edd16368
SC
1636
1637 if (is_logical_dev_addr_mode(lunaddrbytes)) {
1638 /* logical device */
339b2b14
SC
1639 if (unlikely(is_scsi_rev_5(h))) {
1640 /* p1210m, logical drives lun assignments
1641 * match SCSI REPORT LUNS data.
1642 */
1643 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
edd16368 1644 *bus = 0;
339b2b14
SC
1645 *target = 0;
1646 *lun = (lunid & 0x3fff) + 1;
1647 } else {
1648 /* not p1210m... */
1649 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1650 if (is_msa2xxx(h, device)) {
1651 /* msa2xxx way, put logicals on bus 1
1652 * and match target/lun numbers box
1653 * reports.
1654 */
1655 *bus = 1;
1656 *target = (lunid >> 16) & 0x3fff;
1657 *lun = lunid & 0x00ff;
1658 } else {
1659 /* Traditional smart array way. */
1660 *bus = 0;
1661 *lun = 0;
1662 *target = lunid & 0x3fff;
1663 }
edd16368
SC
1664 }
1665 } else {
1666 /* physical device */
1667 if (is_hba_lunid(lunaddrbytes))
339b2b14
SC
1668 if (unlikely(is_scsi_rev_5(h))) {
1669 *bus = 0; /* put p1210m ctlr at 0,0,0 */
1670 *target = 0;
1671 *lun = 0;
1672 return;
1673 } else
1674 *bus = 3; /* traditional smartarray */
edd16368 1675 else
339b2b14 1676 *bus = 2; /* physical disk */
edd16368
SC
1677 *target = -1;
1678 *lun = -1; /* we will fill these in later. */
1679 }
1680}
1681
1682/*
1683 * If there is no lun 0 on a target, linux won't find any devices.
1684 * For the MSA2xxx boxes, we have to manually detect the enclosure
1685 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1686 * it for some reason. *tmpdevice is the target we're adding,
1687 * this_device is a pointer into the current element of currentsd[]
1688 * that we're building up in update_scsi_devices(), below.
1689 * lunzerobits is a bitmap that tracks which targets already have a
1690 * lun 0 assigned.
1691 * Returns 1 if an enclosure was added, 0 if not.
1692 */
1693static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1694 struct hpsa_scsi_dev_t *tmpdevice,
01a02ffc 1695 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
edd16368
SC
1696 int bus, int target, int lun, unsigned long lunzerobits[],
1697 int *nmsa2xxx_enclosures)
1698{
1699 unsigned char scsi3addr[8];
1700
1701 if (test_bit(target, lunzerobits))
1702 return 0; /* There is already a lun 0 on this target. */
1703
1704 if (!is_logical_dev_addr_mode(lunaddrbytes))
1705 return 0; /* It's the logical targets that may lack lun 0. */
1706
1707 if (!is_msa2xxx(h, tmpdevice))
1708 return 0; /* It's only the MSA2xxx that have this problem. */
1709
1710 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1711 return 0;
1712
c4f8a299
SC
1713 memset(scsi3addr, 0, 8);
1714 scsi3addr[3] = target;
edd16368
SC
1715 if (is_hba_lunid(scsi3addr))
1716 return 0; /* Don't add the RAID controller here. */
1717
339b2b14
SC
1718 if (is_scsi_rev_5(h))
1719 return 0; /* p1210m doesn't need to do this. */
1720
edd16368
SC
1721 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1722 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1723 "enclosures exceeded. Check your hardware "
1724 "configuration.");
1725 return 0;
1726 }
1727
0b0e1d6c 1728 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
edd16368
SC
1729 return 0;
1730 (*nmsa2xxx_enclosures)++;
1731 hpsa_set_bus_target_lun(this_device, bus, target, 0);
1732 set_bit(target, lunzerobits);
1733 return 1;
1734}
1735
1736/*
1737 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1738 * logdev. The number of luns in physdev and logdev are returned in
1739 * *nphysicals and *nlogicals, respectively.
1740 * Returns 0 on success, -1 otherwise.
1741 */
1742static int hpsa_gather_lun_info(struct ctlr_info *h,
1743 int reportlunsize,
01a02ffc
SC
1744 struct ReportLUNdata *physdev, u32 *nphysicals,
1745 struct ReportLUNdata *logdev, u32 *nlogicals)
edd16368
SC
1746{
1747 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1748 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1749 return -1;
1750 }
6df1e954 1751 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
edd16368
SC
1752 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1753 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1754 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1755 *nphysicals - HPSA_MAX_PHYS_LUN);
1756 *nphysicals = HPSA_MAX_PHYS_LUN;
1757 }
1758 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1759 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1760 return -1;
1761 }
6df1e954 1762 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
edd16368
SC
1763 /* Reject Logicals in excess of our max capability. */
1764 if (*nlogicals > HPSA_MAX_LUN) {
1765 dev_warn(&h->pdev->dev,
1766 "maximum logical LUNs (%d) exceeded. "
1767 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1768 *nlogicals - HPSA_MAX_LUN);
1769 *nlogicals = HPSA_MAX_LUN;
1770 }
1771 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1772 dev_warn(&h->pdev->dev,
1773 "maximum logical + physical LUNs (%d) exceeded. "
1774 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1775 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1776 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1777 }
1778 return 0;
1779}
1780
339b2b14
SC
1781u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1782 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1783 struct ReportLUNdata *logdev_list)
1784{
1785 /* Helper function, figure out where the LUN ID info is coming from
1786 * given index i, lists of physical and logical devices, where in
1787 * the list the raid controller is supposed to appear (first or last)
1788 */
1789
1790 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1791 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1792
1793 if (i == raid_ctlr_position)
1794 return RAID_CTLR_LUNID;
1795
1796 if (i < logicals_start)
1797 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1798
1799 if (i < last_device)
1800 return &logdev_list->LUN[i - nphysicals -
1801 (raid_ctlr_position == 0)][0];
1802 BUG();
1803 return NULL;
1804}
1805
edd16368
SC
1806static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1807{
1808 /* the idea here is we could get notified
1809 * that some devices have changed, so we do a report
1810 * physical luns and report logical luns cmd, and adjust
1811 * our list of devices accordingly.
1812 *
1813 * The scsi3addr's of devices won't change so long as the
1814 * adapter is not reset. That means we can rescan and
1815 * tell which devices we already know about, vs. new
1816 * devices, vs. disappearing devices.
1817 */
1818 struct ReportLUNdata *physdev_list = NULL;
1819 struct ReportLUNdata *logdev_list = NULL;
01a02ffc
SC
1820 u32 nphysicals = 0;
1821 u32 nlogicals = 0;
1822 u32 ndev_allocated = 0;
edd16368
SC
1823 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1824 int ncurrent = 0;
1825 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1826 int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1827 int bus, target, lun;
339b2b14 1828 int raid_ctlr_position;
edd16368
SC
1829 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1830
cfe5badc 1831 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
edd16368
SC
1832 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1833 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
edd16368
SC
1834 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1835
0b0e1d6c 1836 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
edd16368
SC
1837 dev_err(&h->pdev->dev, "out of memory\n");
1838 goto out;
1839 }
1840 memset(lunzerobits, 0, sizeof(lunzerobits));
1841
1842 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1843 logdev_list, &nlogicals))
1844 goto out;
1845
1846 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1847 * but each of them 4 times through different paths. The plus 1
1848 * is for the RAID controller.
1849 */
1850 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
1851
1852 /* Allocate the per device structures */
1853 for (i = 0; i < ndevs_to_allocate; i++) {
b7ec021f
ST
1854 if (i >= HPSA_MAX_DEVICES) {
1855 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
1856 " %d devices ignored.\n", HPSA_MAX_DEVICES,
1857 ndevs_to_allocate - HPSA_MAX_DEVICES);
1858 break;
1859 }
1860
edd16368
SC
1861 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1862 if (!currentsd[i]) {
1863 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1864 __FILE__, __LINE__);
1865 goto out;
1866 }
1867 ndev_allocated++;
1868 }
1869
339b2b14
SC
1870 if (unlikely(is_scsi_rev_5(h)))
1871 raid_ctlr_position = 0;
1872 else
1873 raid_ctlr_position = nphysicals + nlogicals;
1874
edd16368
SC
1875 /* adjust our table of devices */
1876 nmsa2xxx_enclosures = 0;
1877 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
0b0e1d6c 1878 u8 *lunaddrbytes, is_OBDR = 0;
edd16368
SC
1879
1880 /* Figure out where the LUN ID info is coming from */
339b2b14
SC
1881 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1882 i, nphysicals, nlogicals, physdev_list, logdev_list);
edd16368 1883 /* skip masked physical devices. */
339b2b14
SC
1884 if (lunaddrbytes[3] & 0xC0 &&
1885 i < nphysicals + (raid_ctlr_position == 0))
edd16368
SC
1886 continue;
1887
1888 /* Get device type, vendor, model, device id */
0b0e1d6c
SC
1889 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
1890 &is_OBDR))
edd16368
SC
1891 continue; /* skip it if we can't talk to it. */
1892 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1893 tmpdevice);
1894 this_device = currentsd[ncurrent];
1895
1896 /*
1897 * For the msa2xxx boxes, we have to insert a LUN 0 which
1898 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1899 * is nonetheless an enclosure device there. We have to
1900 * present that otherwise linux won't find anything if
1901 * there is no lun 0.
1902 */
1903 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1904 lunaddrbytes, bus, target, lun, lunzerobits,
1905 &nmsa2xxx_enclosures)) {
1906 ncurrent++;
1907 this_device = currentsd[ncurrent];
1908 }
1909
1910 *this_device = *tmpdevice;
1911 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1912
1913 switch (this_device->devtype) {
0b0e1d6c 1914 case TYPE_ROM:
edd16368
SC
1915 /* We don't *really* support actual CD-ROM devices,
1916 * just "One Button Disaster Recovery" tape drive
1917 * which temporarily pretends to be a CD-ROM drive.
1918 * So we check that the device is really an OBDR tape
1919 * device by checking for "$DR-10" in bytes 43-48 of
1920 * the inquiry data.
1921 */
0b0e1d6c
SC
1922 if (is_OBDR)
1923 ncurrent++;
edd16368
SC
1924 break;
1925 case TYPE_DISK:
1926 if (i < nphysicals)
1927 break;
1928 ncurrent++;
1929 break;
1930 case TYPE_TAPE:
1931 case TYPE_MEDIUM_CHANGER:
1932 ncurrent++;
1933 break;
1934 case TYPE_RAID:
1935 /* Only present the Smartarray HBA as a RAID controller.
1936 * If it's a RAID controller other than the HBA itself
1937 * (an external RAID controller, MSA500 or similar)
1938 * don't present it.
1939 */
1940 if (!is_hba_lunid(lunaddrbytes))
1941 break;
1942 ncurrent++;
1943 break;
1944 default:
1945 break;
1946 }
cfe5badc 1947 if (ncurrent >= HPSA_MAX_DEVICES)
edd16368
SC
1948 break;
1949 }
1950 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
1951out:
1952 kfree(tmpdevice);
1953 for (i = 0; i < ndev_allocated; i++)
1954 kfree(currentsd[i]);
1955 kfree(currentsd);
edd16368
SC
1956 kfree(physdev_list);
1957 kfree(logdev_list);
edd16368
SC
1958}
1959
1960/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1961 * dma mapping and fills in the scatter gather entries of the
1962 * hpsa command, cp.
1963 */
33a2ffce 1964static int hpsa_scatter_gather(struct ctlr_info *h,
edd16368
SC
1965 struct CommandList *cp,
1966 struct scsi_cmnd *cmd)
1967{
1968 unsigned int len;
1969 struct scatterlist *sg;
01a02ffc 1970 u64 addr64;
33a2ffce
SC
1971 int use_sg, i, sg_index, chained;
1972 struct SGDescriptor *curr_sg;
edd16368 1973
33a2ffce 1974 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
edd16368
SC
1975
1976 use_sg = scsi_dma_map(cmd);
1977 if (use_sg < 0)
1978 return use_sg;
1979
1980 if (!use_sg)
1981 goto sglist_finished;
1982
33a2ffce
SC
1983 curr_sg = cp->SG;
1984 chained = 0;
1985 sg_index = 0;
edd16368 1986 scsi_for_each_sg(cmd, sg, use_sg, i) {
33a2ffce
SC
1987 if (i == h->max_cmd_sg_entries - 1 &&
1988 use_sg > h->max_cmd_sg_entries) {
1989 chained = 1;
1990 curr_sg = h->cmd_sg_list[cp->cmdindex];
1991 sg_index = 0;
1992 }
01a02ffc 1993 addr64 = (u64) sg_dma_address(sg);
edd16368 1994 len = sg_dma_len(sg);
33a2ffce
SC
1995 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
1996 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
1997 curr_sg->Len = len;
1998 curr_sg->Ext = 0; /* we are not chaining */
1999 curr_sg++;
2000 }
2001
2002 if (use_sg + chained > h->maxSG)
2003 h->maxSG = use_sg + chained;
2004
2005 if (chained) {
2006 cp->Header.SGList = h->max_cmd_sg_entries;
2007 cp->Header.SGTotal = (u16) (use_sg + 1);
2008 hpsa_map_sg_chain_block(h, cp);
2009 return 0;
edd16368
SC
2010 }
2011
2012sglist_finished:
2013
01a02ffc
SC
2014 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
2015 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
edd16368
SC
2016 return 0;
2017}
2018
2019
f281233d 2020static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
edd16368
SC
2021 void (*done)(struct scsi_cmnd *))
2022{
2023 struct ctlr_info *h;
2024 struct hpsa_scsi_dev_t *dev;
2025 unsigned char scsi3addr[8];
2026 struct CommandList *c;
2027 unsigned long flags;
2028
2029 /* Get the ptr to our adapter structure out of cmd->host. */
2030 h = sdev_to_hba(cmd->device);
2031 dev = cmd->device->hostdata;
2032 if (!dev) {
2033 cmd->result = DID_NO_CONNECT << 16;
2034 done(cmd);
2035 return 0;
2036 }
2037 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
2038
edd16368 2039 spin_lock_irqsave(&h->lock, flags);
a0c12413
SC
2040 if (unlikely(h->lockup_detected)) {
2041 spin_unlock_irqrestore(&h->lock, flags);
2042 cmd->result = DID_ERROR << 16;
2043 done(cmd);
2044 return 0;
2045 }
2046 /* Need a lock as this is being allocated from the pool */
edd16368
SC
2047 c = cmd_alloc(h);
2048 spin_unlock_irqrestore(&h->lock, flags);
2049 if (c == NULL) { /* trouble... */
2050 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2051 return SCSI_MLQUEUE_HOST_BUSY;
2052 }
2053
2054 /* Fill in the command list header */
2055
2056 cmd->scsi_done = done; /* save this for use by completion code */
2057
2058 /* save c in case we have to abort it */
2059 cmd->host_scribble = (unsigned char *) c;
2060
2061 c->cmd_type = CMD_SCSI;
2062 c->scsi_cmd = cmd;
2063 c->Header.ReplyQueue = 0; /* unused in simple mode */
2064 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
303932fd
DB
2065 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
2066 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
edd16368
SC
2067
2068 /* Fill in the request block... */
2069
2070 c->Request.Timeout = 0;
2071 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
2072 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
2073 c->Request.CDBLen = cmd->cmd_len;
2074 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
2075 c->Request.Type.Type = TYPE_CMD;
2076 c->Request.Type.Attribute = ATTR_SIMPLE;
2077 switch (cmd->sc_data_direction) {
2078 case DMA_TO_DEVICE:
2079 c->Request.Type.Direction = XFER_WRITE;
2080 break;
2081 case DMA_FROM_DEVICE:
2082 c->Request.Type.Direction = XFER_READ;
2083 break;
2084 case DMA_NONE:
2085 c->Request.Type.Direction = XFER_NONE;
2086 break;
2087 case DMA_BIDIRECTIONAL:
2088 /* This can happen if a buggy application does a scsi passthru
2089 * and sets both inlen and outlen to non-zero. ( see
2090 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
2091 */
2092
2093 c->Request.Type.Direction = XFER_RSVD;
2094 /* This is technically wrong, and hpsa controllers should
2095 * reject it with CMD_INVALID, which is the most correct
2096 * response, but non-fibre backends appear to let it
2097 * slide by, and give the same results as if this field
2098 * were set correctly. Either way is acceptable for
2099 * our purposes here.
2100 */
2101
2102 break;
2103
2104 default:
2105 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2106 cmd->sc_data_direction);
2107 BUG();
2108 break;
2109 }
2110
33a2ffce 2111 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
edd16368
SC
2112 cmd_free(h, c);
2113 return SCSI_MLQUEUE_HOST_BUSY;
2114 }
2115 enqueue_cmd_and_start_io(h, c);
2116 /* the cmd'll come back via intr handler in complete_scsi_command() */
2117 return 0;
2118}
2119
f281233d
JG
2120static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
2121
a08a8471
SC
2122static void hpsa_scan_start(struct Scsi_Host *sh)
2123{
2124 struct ctlr_info *h = shost_to_hba(sh);
2125 unsigned long flags;
2126
2127 /* wait until any scan already in progress is finished. */
2128 while (1) {
2129 spin_lock_irqsave(&h->scan_lock, flags);
2130 if (h->scan_finished)
2131 break;
2132 spin_unlock_irqrestore(&h->scan_lock, flags);
2133 wait_event(h->scan_wait_queue, h->scan_finished);
2134 /* Note: We don't need to worry about a race between this
2135 * thread and driver unload because the midlayer will
2136 * have incremented the reference count, so unload won't
2137 * happen if we're in here.
2138 */
2139 }
2140 h->scan_finished = 0; /* mark scan as in progress */
2141 spin_unlock_irqrestore(&h->scan_lock, flags);
2142
2143 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2144
2145 spin_lock_irqsave(&h->scan_lock, flags);
2146 h->scan_finished = 1; /* mark scan as finished. */
2147 wake_up_all(&h->scan_wait_queue);
2148 spin_unlock_irqrestore(&h->scan_lock, flags);
2149}
2150
2151static int hpsa_scan_finished(struct Scsi_Host *sh,
2152 unsigned long elapsed_time)
2153{
2154 struct ctlr_info *h = shost_to_hba(sh);
2155 unsigned long flags;
2156 int finished;
2157
2158 spin_lock_irqsave(&h->scan_lock, flags);
2159 finished = h->scan_finished;
2160 spin_unlock_irqrestore(&h->scan_lock, flags);
2161 return finished;
2162}
2163
667e23d4
SC
2164static int hpsa_change_queue_depth(struct scsi_device *sdev,
2165 int qdepth, int reason)
2166{
2167 struct ctlr_info *h = sdev_to_hba(sdev);
2168
2169 if (reason != SCSI_QDEPTH_DEFAULT)
2170 return -ENOTSUPP;
2171
2172 if (qdepth < 1)
2173 qdepth = 1;
2174 else
2175 if (qdepth > h->nr_cmds)
2176 qdepth = h->nr_cmds;
2177 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2178 return sdev->queue_depth;
2179}
2180
edd16368
SC
2181static void hpsa_unregister_scsi(struct ctlr_info *h)
2182{
2183 /* we are being forcibly unloaded, and may not refuse. */
2184 scsi_remove_host(h->scsi_host);
2185 scsi_host_put(h->scsi_host);
2186 h->scsi_host = NULL;
2187}
2188
2189static int hpsa_register_scsi(struct ctlr_info *h)
2190{
b705690d
SC
2191 struct Scsi_Host *sh;
2192 int error;
edd16368 2193
b705690d
SC
2194 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
2195 if (sh == NULL)
2196 goto fail;
2197
2198 sh->io_port = 0;
2199 sh->n_io_port = 0;
2200 sh->this_id = -1;
2201 sh->max_channel = 3;
2202 sh->max_cmd_len = MAX_COMMAND_SIZE;
2203 sh->max_lun = HPSA_MAX_LUN;
2204 sh->max_id = HPSA_MAX_LUN;
2205 sh->can_queue = h->nr_cmds;
2206 sh->cmd_per_lun = h->nr_cmds;
2207 sh->sg_tablesize = h->maxsgentries;
2208 h->scsi_host = sh;
2209 sh->hostdata[0] = (unsigned long) h;
2210 sh->irq = h->intr[h->intr_mode];
2211 sh->unique_id = sh->irq;
2212 error = scsi_add_host(sh, &h->pdev->dev);
2213 if (error)
2214 goto fail_host_put;
2215 scsi_scan_host(sh);
2216 return 0;
2217
2218 fail_host_put:
2219 dev_err(&h->pdev->dev, "%s: scsi_add_host"
2220 " failed for controller %d\n", __func__, h->ctlr);
2221 scsi_host_put(sh);
2222 return error;
2223 fail:
2224 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
2225 " failed for controller %d\n", __func__, h->ctlr);
2226 return -ENOMEM;
edd16368
SC
2227}
2228
2229static int wait_for_device_to_become_ready(struct ctlr_info *h,
2230 unsigned char lunaddr[])
2231{
2232 int rc = 0;
2233 int count = 0;
2234 int waittime = 1; /* seconds */
2235 struct CommandList *c;
2236
2237 c = cmd_special_alloc(h);
2238 if (!c) {
2239 dev_warn(&h->pdev->dev, "out of memory in "
2240 "wait_for_device_to_become_ready.\n");
2241 return IO_ERROR;
2242 }
2243
2244 /* Send test unit ready until device ready, or give up. */
2245 while (count < HPSA_TUR_RETRY_LIMIT) {
2246
2247 /* Wait for a bit. do this first, because if we send
2248 * the TUR right away, the reset will just abort it.
2249 */
2250 msleep(1000 * waittime);
2251 count++;
2252
2253 /* Increase wait time with each try, up to a point. */
2254 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2255 waittime = waittime * 2;
2256
2257 /* Send the Test Unit Ready */
2258 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
2259 hpsa_scsi_do_simple_cmd_core(h, c);
2260 /* no unmap needed here because no data xfer. */
2261
2262 if (c->err_info->CommandStatus == CMD_SUCCESS)
2263 break;
2264
2265 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2266 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2267 (c->err_info->SenseInfo[2] == NO_SENSE ||
2268 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2269 break;
2270
2271 dev_warn(&h->pdev->dev, "waiting %d secs "
2272 "for device to become ready.\n", waittime);
2273 rc = 1; /* device not ready. */
2274 }
2275
2276 if (rc)
2277 dev_warn(&h->pdev->dev, "giving up on device.\n");
2278 else
2279 dev_warn(&h->pdev->dev, "device is ready.\n");
2280
2281 cmd_special_free(h, c);
2282 return rc;
2283}
2284
2285/* Need at least one of these error handlers to keep ../scsi/hosts.c from
2286 * complaining. Doing a host- or bus-reset can't do anything good here.
2287 */
2288static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2289{
2290 int rc;
2291 struct ctlr_info *h;
2292 struct hpsa_scsi_dev_t *dev;
2293
2294 /* find the controller to which the command to be aborted was sent */
2295 h = sdev_to_hba(scsicmd->device);
2296 if (h == NULL) /* paranoia */
2297 return FAILED;
edd16368
SC
2298 dev = scsicmd->device->hostdata;
2299 if (!dev) {
2300 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2301 "device lookup failed.\n");
2302 return FAILED;
2303 }
d416b0c7
SC
2304 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2305 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
edd16368
SC
2306 /* send a reset to the SCSI LUN which the command was sent to */
2307 rc = hpsa_send_reset(h, dev->scsi3addr);
2308 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2309 return SUCCESS;
2310
2311 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2312 return FAILED;
2313}
2314
2315/*
2316 * For operations that cannot sleep, a command block is allocated at init,
2317 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2318 * which ones are free or in use. Lock must be held when calling this.
2319 * cmd_free() is the complement.
2320 */
2321static struct CommandList *cmd_alloc(struct ctlr_info *h)
2322{
2323 struct CommandList *c;
2324 int i;
2325 union u64bit temp64;
2326 dma_addr_t cmd_dma_handle, err_dma_handle;
2327
2328 do {
2329 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2330 if (i == h->nr_cmds)
2331 return NULL;
2332 } while (test_and_set_bit
2333 (i & (BITS_PER_LONG - 1),
2334 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2335 c = h->cmd_pool + i;
2336 memset(c, 0, sizeof(*c));
2337 cmd_dma_handle = h->cmd_pool_dhandle
2338 + i * sizeof(*c);
2339 c->err_info = h->errinfo_pool + i;
2340 memset(c->err_info, 0, sizeof(*c->err_info));
2341 err_dma_handle = h->errinfo_pool_dhandle
2342 + i * sizeof(*c->err_info);
2343 h->nr_allocs++;
2344
2345 c->cmdindex = i;
2346
9e0fc764 2347 INIT_LIST_HEAD(&c->list);
01a02ffc
SC
2348 c->busaddr = (u32) cmd_dma_handle;
2349 temp64.val = (u64) err_dma_handle;
edd16368
SC
2350 c->ErrDesc.Addr.lower = temp64.val32.lower;
2351 c->ErrDesc.Addr.upper = temp64.val32.upper;
2352 c->ErrDesc.Len = sizeof(*c->err_info);
2353
2354 c->h = h;
2355 return c;
2356}
2357
2358/* For operations that can wait for kmalloc to possibly sleep,
2359 * this routine can be called. Lock need not be held to call
2360 * cmd_special_alloc. cmd_special_free() is the complement.
2361 */
2362static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2363{
2364 struct CommandList *c;
2365 union u64bit temp64;
2366 dma_addr_t cmd_dma_handle, err_dma_handle;
2367
2368 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2369 if (c == NULL)
2370 return NULL;
2371 memset(c, 0, sizeof(*c));
2372
2373 c->cmdindex = -1;
2374
2375 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2376 &err_dma_handle);
2377
2378 if (c->err_info == NULL) {
2379 pci_free_consistent(h->pdev,
2380 sizeof(*c), c, cmd_dma_handle);
2381 return NULL;
2382 }
2383 memset(c->err_info, 0, sizeof(*c->err_info));
2384
9e0fc764 2385 INIT_LIST_HEAD(&c->list);
01a02ffc
SC
2386 c->busaddr = (u32) cmd_dma_handle;
2387 temp64.val = (u64) err_dma_handle;
edd16368
SC
2388 c->ErrDesc.Addr.lower = temp64.val32.lower;
2389 c->ErrDesc.Addr.upper = temp64.val32.upper;
2390 c->ErrDesc.Len = sizeof(*c->err_info);
2391
2392 c->h = h;
2393 return c;
2394}
2395
2396static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2397{
2398 int i;
2399
2400 i = c - h->cmd_pool;
2401 clear_bit(i & (BITS_PER_LONG - 1),
2402 h->cmd_pool_bits + (i / BITS_PER_LONG));
2403 h->nr_frees++;
2404}
2405
2406static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2407{
2408 union u64bit temp64;
2409
2410 temp64.val32.lower = c->ErrDesc.Addr.lower;
2411 temp64.val32.upper = c->ErrDesc.Addr.upper;
2412 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2413 c->err_info, (dma_addr_t) temp64.val);
2414 pci_free_consistent(h->pdev, sizeof(*c),
d896f3f3 2415 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
edd16368
SC
2416}
2417
2418#ifdef CONFIG_COMPAT
2419
edd16368
SC
2420static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2421{
2422 IOCTL32_Command_struct __user *arg32 =
2423 (IOCTL32_Command_struct __user *) arg;
2424 IOCTL_Command_struct arg64;
2425 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2426 int err;
2427 u32 cp;
2428
938abd84 2429 memset(&arg64, 0, sizeof(arg64));
edd16368
SC
2430 err = 0;
2431 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2432 sizeof(arg64.LUN_info));
2433 err |= copy_from_user(&arg64.Request, &arg32->Request,
2434 sizeof(arg64.Request));
2435 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2436 sizeof(arg64.error_info));
2437 err |= get_user(arg64.buf_size, &arg32->buf_size);
2438 err |= get_user(cp, &arg32->buf);
2439 arg64.buf = compat_ptr(cp);
2440 err |= copy_to_user(p, &arg64, sizeof(arg64));
2441
2442 if (err)
2443 return -EFAULT;
2444
e39eeaed 2445 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
edd16368
SC
2446 if (err)
2447 return err;
2448 err |= copy_in_user(&arg32->error_info, &p->error_info,
2449 sizeof(arg32->error_info));
2450 if (err)
2451 return -EFAULT;
2452 return err;
2453}
2454
2455static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2456 int cmd, void *arg)
2457{
2458 BIG_IOCTL32_Command_struct __user *arg32 =
2459 (BIG_IOCTL32_Command_struct __user *) arg;
2460 BIG_IOCTL_Command_struct arg64;
2461 BIG_IOCTL_Command_struct __user *p =
2462 compat_alloc_user_space(sizeof(arg64));
2463 int err;
2464 u32 cp;
2465
938abd84 2466 memset(&arg64, 0, sizeof(arg64));
edd16368
SC
2467 err = 0;
2468 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2469 sizeof(arg64.LUN_info));
2470 err |= copy_from_user(&arg64.Request, &arg32->Request,
2471 sizeof(arg64.Request));
2472 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2473 sizeof(arg64.error_info));
2474 err |= get_user(arg64.buf_size, &arg32->buf_size);
2475 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2476 err |= get_user(cp, &arg32->buf);
2477 arg64.buf = compat_ptr(cp);
2478 err |= copy_to_user(p, &arg64, sizeof(arg64));
2479
2480 if (err)
2481 return -EFAULT;
2482
e39eeaed 2483 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
edd16368
SC
2484 if (err)
2485 return err;
2486 err |= copy_in_user(&arg32->error_info, &p->error_info,
2487 sizeof(arg32->error_info));
2488 if (err)
2489 return -EFAULT;
2490 return err;
2491}
71fe75a7
SC
2492
2493static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2494{
2495 switch (cmd) {
2496 case CCISS_GETPCIINFO:
2497 case CCISS_GETINTINFO:
2498 case CCISS_SETINTINFO:
2499 case CCISS_GETNODENAME:
2500 case CCISS_SETNODENAME:
2501 case CCISS_GETHEARTBEAT:
2502 case CCISS_GETBUSTYPES:
2503 case CCISS_GETFIRMVER:
2504 case CCISS_GETDRIVVER:
2505 case CCISS_REVALIDVOLS:
2506 case CCISS_DEREGDISK:
2507 case CCISS_REGNEWDISK:
2508 case CCISS_REGNEWD:
2509 case CCISS_RESCANDISK:
2510 case CCISS_GETLUNINFO:
2511 return hpsa_ioctl(dev, cmd, arg);
2512
2513 case CCISS_PASSTHRU32:
2514 return hpsa_ioctl32_passthru(dev, cmd, arg);
2515 case CCISS_BIG_PASSTHRU32:
2516 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2517
2518 default:
2519 return -ENOIOCTLCMD;
2520 }
2521}
edd16368
SC
2522#endif
2523
2524static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2525{
2526 struct hpsa_pci_info pciinfo;
2527
2528 if (!argp)
2529 return -EINVAL;
2530 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2531 pciinfo.bus = h->pdev->bus->number;
2532 pciinfo.dev_fn = h->pdev->devfn;
2533 pciinfo.board_id = h->board_id;
2534 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2535 return -EFAULT;
2536 return 0;
2537}
2538
2539static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2540{
2541 DriverVer_type DriverVer;
2542 unsigned char vmaj, vmin, vsubmin;
2543 int rc;
2544
2545 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2546 &vmaj, &vmin, &vsubmin);
2547 if (rc != 3) {
2548 dev_info(&h->pdev->dev, "driver version string '%s' "
2549 "unrecognized.", HPSA_DRIVER_VERSION);
2550 vmaj = 0;
2551 vmin = 0;
2552 vsubmin = 0;
2553 }
2554 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2555 if (!argp)
2556 return -EINVAL;
2557 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2558 return -EFAULT;
2559 return 0;
2560}
2561
2562static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2563{
2564 IOCTL_Command_struct iocommand;
2565 struct CommandList *c;
2566 char *buff = NULL;
2567 union u64bit temp64;
2568
2569 if (!argp)
2570 return -EINVAL;
2571 if (!capable(CAP_SYS_RAWIO))
2572 return -EPERM;
2573 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2574 return -EFAULT;
2575 if ((iocommand.buf_size < 1) &&
2576 (iocommand.Request.Type.Direction != XFER_NONE)) {
2577 return -EINVAL;
2578 }
2579 if (iocommand.buf_size > 0) {
2580 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2581 if (buff == NULL)
2582 return -EFAULT;
b03a7771
SC
2583 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2584 /* Copy the data into the buffer we created */
2585 if (copy_from_user(buff, iocommand.buf,
2586 iocommand.buf_size)) {
2587 kfree(buff);
2588 return -EFAULT;
2589 }
2590 } else {
2591 memset(buff, 0, iocommand.buf_size);
edd16368 2592 }
b03a7771 2593 }
edd16368
SC
2594 c = cmd_special_alloc(h);
2595 if (c == NULL) {
2596 kfree(buff);
2597 return -ENOMEM;
2598 }
2599 /* Fill in the command type */
2600 c->cmd_type = CMD_IOCTL_PEND;
2601 /* Fill in Command Header */
2602 c->Header.ReplyQueue = 0; /* unused in simple mode */
2603 if (iocommand.buf_size > 0) { /* buffer to fill */
2604 c->Header.SGList = 1;
2605 c->Header.SGTotal = 1;
2606 } else { /* no buffers to fill */
2607 c->Header.SGList = 0;
2608 c->Header.SGTotal = 0;
2609 }
2610 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2611 /* use the kernel address the cmd block for tag */
2612 c->Header.Tag.lower = c->busaddr;
2613
2614 /* Fill in Request block */
2615 memcpy(&c->Request, &iocommand.Request,
2616 sizeof(c->Request));
2617
2618 /* Fill in the scatter gather information */
2619 if (iocommand.buf_size > 0) {
2620 temp64.val = pci_map_single(h->pdev, buff,
2621 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2622 c->SG[0].Addr.lower = temp64.val32.lower;
2623 c->SG[0].Addr.upper = temp64.val32.upper;
2624 c->SG[0].Len = iocommand.buf_size;
2625 c->SG[0].Ext = 0; /* we are not chaining*/
2626 }
a0c12413 2627 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
c2dd32e0
SC
2628 if (iocommand.buf_size > 0)
2629 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
edd16368
SC
2630 check_ioctl_unit_attention(h, c);
2631
2632 /* Copy the error information out */
2633 memcpy(&iocommand.error_info, c->err_info,
2634 sizeof(iocommand.error_info));
2635 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2636 kfree(buff);
2637 cmd_special_free(h, c);
2638 return -EFAULT;
2639 }
b03a7771
SC
2640 if (iocommand.Request.Type.Direction == XFER_READ &&
2641 iocommand.buf_size > 0) {
edd16368
SC
2642 /* Copy the data out of the buffer we created */
2643 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2644 kfree(buff);
2645 cmd_special_free(h, c);
2646 return -EFAULT;
2647 }
2648 }
2649 kfree(buff);
2650 cmd_special_free(h, c);
2651 return 0;
2652}
2653
2654static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2655{
2656 BIG_IOCTL_Command_struct *ioc;
2657 struct CommandList *c;
2658 unsigned char **buff = NULL;
2659 int *buff_size = NULL;
2660 union u64bit temp64;
2661 BYTE sg_used = 0;
2662 int status = 0;
2663 int i;
01a02ffc
SC
2664 u32 left;
2665 u32 sz;
edd16368
SC
2666 BYTE __user *data_ptr;
2667
2668 if (!argp)
2669 return -EINVAL;
2670 if (!capable(CAP_SYS_RAWIO))
2671 return -EPERM;
2672 ioc = (BIG_IOCTL_Command_struct *)
2673 kmalloc(sizeof(*ioc), GFP_KERNEL);
2674 if (!ioc) {
2675 status = -ENOMEM;
2676 goto cleanup1;
2677 }
2678 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2679 status = -EFAULT;
2680 goto cleanup1;
2681 }
2682 if ((ioc->buf_size < 1) &&
2683 (ioc->Request.Type.Direction != XFER_NONE)) {
2684 status = -EINVAL;
2685 goto cleanup1;
2686 }
2687 /* Check kmalloc limits using all SGs */
2688 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2689 status = -EINVAL;
2690 goto cleanup1;
2691 }
d66ae08b 2692 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
edd16368
SC
2693 status = -EINVAL;
2694 goto cleanup1;
2695 }
d66ae08b 2696 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
edd16368
SC
2697 if (!buff) {
2698 status = -ENOMEM;
2699 goto cleanup1;
2700 }
d66ae08b 2701 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
edd16368
SC
2702 if (!buff_size) {
2703 status = -ENOMEM;
2704 goto cleanup1;
2705 }
2706 left = ioc->buf_size;
2707 data_ptr = ioc->buf;
2708 while (left) {
2709 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2710 buff_size[sg_used] = sz;
2711 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2712 if (buff[sg_used] == NULL) {
2713 status = -ENOMEM;
2714 goto cleanup1;
2715 }
2716 if (ioc->Request.Type.Direction == XFER_WRITE) {
2717 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2718 status = -ENOMEM;
2719 goto cleanup1;
2720 }
2721 } else
2722 memset(buff[sg_used], 0, sz);
2723 left -= sz;
2724 data_ptr += sz;
2725 sg_used++;
2726 }
2727 c = cmd_special_alloc(h);
2728 if (c == NULL) {
2729 status = -ENOMEM;
2730 goto cleanup1;
2731 }
2732 c->cmd_type = CMD_IOCTL_PEND;
2733 c->Header.ReplyQueue = 0;
b03a7771 2734 c->Header.SGList = c->Header.SGTotal = sg_used;
edd16368
SC
2735 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2736 c->Header.Tag.lower = c->busaddr;
2737 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2738 if (ioc->buf_size > 0) {
2739 int i;
2740 for (i = 0; i < sg_used; i++) {
2741 temp64.val = pci_map_single(h->pdev, buff[i],
2742 buff_size[i], PCI_DMA_BIDIRECTIONAL);
2743 c->SG[i].Addr.lower = temp64.val32.lower;
2744 c->SG[i].Addr.upper = temp64.val32.upper;
2745 c->SG[i].Len = buff_size[i];
2746 /* we are not chaining */
2747 c->SG[i].Ext = 0;
2748 }
2749 }
a0c12413 2750 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
b03a7771
SC
2751 if (sg_used)
2752 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
edd16368
SC
2753 check_ioctl_unit_attention(h, c);
2754 /* Copy the error information out */
2755 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2756 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2757 cmd_special_free(h, c);
2758 status = -EFAULT;
2759 goto cleanup1;
2760 }
b03a7771 2761 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
edd16368
SC
2762 /* Copy the data out of the buffer we created */
2763 BYTE __user *ptr = ioc->buf;
2764 for (i = 0; i < sg_used; i++) {
2765 if (copy_to_user(ptr, buff[i], buff_size[i])) {
2766 cmd_special_free(h, c);
2767 status = -EFAULT;
2768 goto cleanup1;
2769 }
2770 ptr += buff_size[i];
2771 }
2772 }
2773 cmd_special_free(h, c);
2774 status = 0;
2775cleanup1:
2776 if (buff) {
2777 for (i = 0; i < sg_used; i++)
2778 kfree(buff[i]);
2779 kfree(buff);
2780 }
2781 kfree(buff_size);
2782 kfree(ioc);
2783 return status;
2784}
2785
2786static void check_ioctl_unit_attention(struct ctlr_info *h,
2787 struct CommandList *c)
2788{
2789 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2790 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2791 (void) check_for_unit_attention(h, c);
2792}
2793/*
2794 * ioctl
2795 */
2796static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2797{
2798 struct ctlr_info *h;
2799 void __user *argp = (void __user *)arg;
2800
2801 h = sdev_to_hba(dev);
2802
2803 switch (cmd) {
2804 case CCISS_DEREGDISK:
2805 case CCISS_REGNEWDISK:
2806 case CCISS_REGNEWD:
a08a8471 2807 hpsa_scan_start(h->scsi_host);
edd16368
SC
2808 return 0;
2809 case CCISS_GETPCIINFO:
2810 return hpsa_getpciinfo_ioctl(h, argp);
2811 case CCISS_GETDRIVVER:
2812 return hpsa_getdrivver_ioctl(h, argp);
2813 case CCISS_PASSTHRU:
2814 return hpsa_passthru_ioctl(h, argp);
2815 case CCISS_BIG_PASSTHRU:
2816 return hpsa_big_passthru_ioctl(h, argp);
2817 default:
2818 return -ENOTTY;
2819 }
2820}
2821
64670ac8
SC
2822static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
2823 unsigned char *scsi3addr, u8 reset_type)
2824{
2825 struct CommandList *c;
2826
2827 c = cmd_alloc(h);
2828 if (!c)
2829 return -ENOMEM;
2830 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2831 RAID_CTLR_LUNID, TYPE_MSG);
2832 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
2833 c->waiting = NULL;
2834 enqueue_cmd_and_start_io(h, c);
2835 /* Don't wait for completion, the reset won't complete. Don't free
2836 * the command either. This is the last command we will send before
2837 * re-initializing everything, so it doesn't matter and won't leak.
2838 */
2839 return 0;
2840}
2841
01a02ffc
SC
2842static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2843 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
edd16368
SC
2844 int cmd_type)
2845{
2846 int pci_dir = XFER_NONE;
2847
2848 c->cmd_type = CMD_IOCTL_PEND;
2849 c->Header.ReplyQueue = 0;
2850 if (buff != NULL && size > 0) {
2851 c->Header.SGList = 1;
2852 c->Header.SGTotal = 1;
2853 } else {
2854 c->Header.SGList = 0;
2855 c->Header.SGTotal = 0;
2856 }
2857 c->Header.Tag.lower = c->busaddr;
2858 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2859
2860 c->Request.Type.Type = cmd_type;
2861 if (cmd_type == TYPE_CMD) {
2862 switch (cmd) {
2863 case HPSA_INQUIRY:
2864 /* are we trying to read a vital product page */
2865 if (page_code != 0) {
2866 c->Request.CDB[1] = 0x01;
2867 c->Request.CDB[2] = page_code;
2868 }
2869 c->Request.CDBLen = 6;
2870 c->Request.Type.Attribute = ATTR_SIMPLE;
2871 c->Request.Type.Direction = XFER_READ;
2872 c->Request.Timeout = 0;
2873 c->Request.CDB[0] = HPSA_INQUIRY;
2874 c->Request.CDB[4] = size & 0xFF;
2875 break;
2876 case HPSA_REPORT_LOG:
2877 case HPSA_REPORT_PHYS:
2878 /* Talking to controller so It's a physical command
2879 mode = 00 target = 0. Nothing to write.
2880 */
2881 c->Request.CDBLen = 12;
2882 c->Request.Type.Attribute = ATTR_SIMPLE;
2883 c->Request.Type.Direction = XFER_READ;
2884 c->Request.Timeout = 0;
2885 c->Request.CDB[0] = cmd;
2886 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2887 c->Request.CDB[7] = (size >> 16) & 0xFF;
2888 c->Request.CDB[8] = (size >> 8) & 0xFF;
2889 c->Request.CDB[9] = size & 0xFF;
2890 break;
edd16368
SC
2891 case HPSA_CACHE_FLUSH:
2892 c->Request.CDBLen = 12;
2893 c->Request.Type.Attribute = ATTR_SIMPLE;
2894 c->Request.Type.Direction = XFER_WRITE;
2895 c->Request.Timeout = 0;
2896 c->Request.CDB[0] = BMIC_WRITE;
2897 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
bb158eab
SC
2898 c->Request.CDB[7] = (size >> 8) & 0xFF;
2899 c->Request.CDB[8] = size & 0xFF;
edd16368
SC
2900 break;
2901 case TEST_UNIT_READY:
2902 c->Request.CDBLen = 6;
2903 c->Request.Type.Attribute = ATTR_SIMPLE;
2904 c->Request.Type.Direction = XFER_NONE;
2905 c->Request.Timeout = 0;
2906 break;
2907 default:
2908 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2909 BUG();
2910 return;
2911 }
2912 } else if (cmd_type == TYPE_MSG) {
2913 switch (cmd) {
2914
2915 case HPSA_DEVICE_RESET_MSG:
2916 c->Request.CDBLen = 16;
2917 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
2918 c->Request.Type.Attribute = ATTR_SIMPLE;
2919 c->Request.Type.Direction = XFER_NONE;
2920 c->Request.Timeout = 0; /* Don't time out */
64670ac8
SC
2921 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
2922 c->Request.CDB[0] = cmd;
edd16368
SC
2923 c->Request.CDB[1] = 0x03; /* Reset target above */
2924 /* If bytes 4-7 are zero, it means reset the */
2925 /* LunID device */
2926 c->Request.CDB[4] = 0x00;
2927 c->Request.CDB[5] = 0x00;
2928 c->Request.CDB[6] = 0x00;
2929 c->Request.CDB[7] = 0x00;
2930 break;
2931
2932 default:
2933 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2934 cmd);
2935 BUG();
2936 }
2937 } else {
2938 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2939 BUG();
2940 }
2941
2942 switch (c->Request.Type.Direction) {
2943 case XFER_READ:
2944 pci_dir = PCI_DMA_FROMDEVICE;
2945 break;
2946 case XFER_WRITE:
2947 pci_dir = PCI_DMA_TODEVICE;
2948 break;
2949 case XFER_NONE:
2950 pci_dir = PCI_DMA_NONE;
2951 break;
2952 default:
2953 pci_dir = PCI_DMA_BIDIRECTIONAL;
2954 }
2955
2956 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2957
2958 return;
2959}
2960
2961/*
2962 * Map (physical) PCI mem into (virtual) kernel space
2963 */
2964static void __iomem *remap_pci_mem(ulong base, ulong size)
2965{
2966 ulong page_base = ((ulong) base) & PAGE_MASK;
2967 ulong page_offs = ((ulong) base) - page_base;
2968 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2969
2970 return page_remapped ? (page_remapped + page_offs) : NULL;
2971}
2972
2973/* Takes cmds off the submission queue and sends them to the hardware,
2974 * then puts them on the queue of cmds waiting for completion.
2975 */
2976static void start_io(struct ctlr_info *h)
2977{
2978 struct CommandList *c;
2979
9e0fc764
SC
2980 while (!list_empty(&h->reqQ)) {
2981 c = list_entry(h->reqQ.next, struct CommandList, list);
edd16368
SC
2982 /* can't do anything if fifo is full */
2983 if ((h->access.fifo_full(h))) {
2984 dev_warn(&h->pdev->dev, "fifo full\n");
2985 break;
2986 }
2987
2988 /* Get the first entry from the Request Q */
2989 removeQ(c);
2990 h->Qdepth--;
2991
2992 /* Tell the controller execute command */
2993 h->access.submit_command(h, c);
2994
2995 /* Put job onto the completed Q */
2996 addQ(&h->cmpQ, c);
2997 }
2998}
2999
3000static inline unsigned long get_next_completion(struct ctlr_info *h)
3001{
3002 return h->access.command_completed(h);
3003}
3004
900c5440 3005static inline bool interrupt_pending(struct ctlr_info *h)
edd16368
SC
3006{
3007 return h->access.intr_pending(h);
3008}
3009
3010static inline long interrupt_not_for_us(struct ctlr_info *h)
3011{
10f66018
SC
3012 return (h->access.intr_pending(h) == 0) ||
3013 (h->interrupts_enabled == 0);
edd16368
SC
3014}
3015
01a02ffc
SC
3016static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
3017 u32 raw_tag)
edd16368
SC
3018{
3019 if (unlikely(tag_index >= h->nr_cmds)) {
3020 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
3021 return 1;
3022 }
3023 return 0;
3024}
3025
01a02ffc 3026static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
edd16368
SC
3027{
3028 removeQ(c);
3029 if (likely(c->cmd_type == CMD_SCSI))
1fb011fb 3030 complete_scsi_command(c);
edd16368
SC
3031 else if (c->cmd_type == CMD_IOCTL_PEND)
3032 complete(c->waiting);
3033}
3034
a104c99f
SC
3035static inline u32 hpsa_tag_contains_index(u32 tag)
3036{
a104c99f
SC
3037 return tag & DIRECT_LOOKUP_BIT;
3038}
3039
3040static inline u32 hpsa_tag_to_index(u32 tag)
3041{
a104c99f
SC
3042 return tag >> DIRECT_LOOKUP_SHIFT;
3043}
3044
a9a3a273
SC
3045
3046static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
a104c99f 3047{
a9a3a273
SC
3048#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
3049#define HPSA_SIMPLE_ERROR_BITS 0x03
960a30e7 3050 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
a9a3a273
SC
3051 return tag & ~HPSA_SIMPLE_ERROR_BITS;
3052 return tag & ~HPSA_PERF_ERROR_BITS;
a104c99f
SC
3053}
3054
303932fd
DB
3055/* process completion of an indexed ("direct lookup") command */
3056static inline u32 process_indexed_cmd(struct ctlr_info *h,
3057 u32 raw_tag)
3058{
3059 u32 tag_index;
3060 struct CommandList *c;
3061
3062 tag_index = hpsa_tag_to_index(raw_tag);
3063 if (bad_tag(h, tag_index, raw_tag))
3064 return next_command(h);
3065 c = h->cmd_pool + tag_index;
3066 finish_cmd(c, raw_tag);
3067 return next_command(h);
3068}
3069
3070/* process completion of a non-indexed command */
3071static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
3072 u32 raw_tag)
3073{
3074 u32 tag;
3075 struct CommandList *c = NULL;
303932fd 3076
a9a3a273 3077 tag = hpsa_tag_discard_error_bits(h, raw_tag);
9e0fc764 3078 list_for_each_entry(c, &h->cmpQ, list) {
303932fd
DB
3079 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
3080 finish_cmd(c, raw_tag);
3081 return next_command(h);
3082 }
3083 }
3084 bad_tag(h, h->nr_cmds + 1, raw_tag);
3085 return next_command(h);
3086}
3087
64670ac8
SC
3088/* Some controllers, like p400, will give us one interrupt
3089 * after a soft reset, even if we turned interrupts off.
3090 * Only need to check for this in the hpsa_xxx_discard_completions
3091 * functions.
3092 */
3093static int ignore_bogus_interrupt(struct ctlr_info *h)
3094{
3095 if (likely(!reset_devices))
3096 return 0;
3097
3098 if (likely(h->interrupts_enabled))
3099 return 0;
3100
3101 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
3102 "(known firmware bug.) Ignoring.\n");
3103
3104 return 1;
3105}
3106
3107static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
3108{
3109 struct ctlr_info *h = dev_id;
3110 unsigned long flags;
3111 u32 raw_tag;
3112
3113 if (ignore_bogus_interrupt(h))
3114 return IRQ_NONE;
3115
3116 if (interrupt_not_for_us(h))
3117 return IRQ_NONE;
3118 spin_lock_irqsave(&h->lock, flags);
a0c12413 3119 h->last_intr_timestamp = get_jiffies_64();
64670ac8
SC
3120 while (interrupt_pending(h)) {
3121 raw_tag = get_next_completion(h);
3122 while (raw_tag != FIFO_EMPTY)
3123 raw_tag = next_command(h);
3124 }
3125 spin_unlock_irqrestore(&h->lock, flags);
3126 return IRQ_HANDLED;
3127}
3128
3129static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
3130{
3131 struct ctlr_info *h = dev_id;
3132 unsigned long flags;
3133 u32 raw_tag;
3134
3135 if (ignore_bogus_interrupt(h))
3136 return IRQ_NONE;
3137
3138 spin_lock_irqsave(&h->lock, flags);
a0c12413 3139 h->last_intr_timestamp = get_jiffies_64();
64670ac8
SC
3140 raw_tag = get_next_completion(h);
3141 while (raw_tag != FIFO_EMPTY)
3142 raw_tag = next_command(h);
3143 spin_unlock_irqrestore(&h->lock, flags);
3144 return IRQ_HANDLED;
3145}
3146
10f66018 3147static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
edd16368
SC
3148{
3149 struct ctlr_info *h = dev_id;
edd16368 3150 unsigned long flags;
303932fd 3151 u32 raw_tag;
edd16368
SC
3152
3153 if (interrupt_not_for_us(h))
3154 return IRQ_NONE;
10f66018 3155 spin_lock_irqsave(&h->lock, flags);
a0c12413 3156 h->last_intr_timestamp = get_jiffies_64();
10f66018
SC
3157 while (interrupt_pending(h)) {
3158 raw_tag = get_next_completion(h);
3159 while (raw_tag != FIFO_EMPTY) {
3160 if (hpsa_tag_contains_index(raw_tag))
3161 raw_tag = process_indexed_cmd(h, raw_tag);
3162 else
3163 raw_tag = process_nonindexed_cmd(h, raw_tag);
3164 }
3165 }
3166 spin_unlock_irqrestore(&h->lock, flags);
3167 return IRQ_HANDLED;
3168}
3169
3170static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
3171{
3172 struct ctlr_info *h = dev_id;
3173 unsigned long flags;
3174 u32 raw_tag;
3175
edd16368 3176 spin_lock_irqsave(&h->lock, flags);
a0c12413 3177 h->last_intr_timestamp = get_jiffies_64();
303932fd
DB
3178 raw_tag = get_next_completion(h);
3179 while (raw_tag != FIFO_EMPTY) {
3180 if (hpsa_tag_contains_index(raw_tag))
3181 raw_tag = process_indexed_cmd(h, raw_tag);
3182 else
3183 raw_tag = process_nonindexed_cmd(h, raw_tag);
edd16368
SC
3184 }
3185 spin_unlock_irqrestore(&h->lock, flags);
3186 return IRQ_HANDLED;
3187}
3188
a9a3a273
SC
3189/* Send a message CDB to the firmware. Careful, this only works
3190 * in simple mode, not performant mode due to the tag lookup.
3191 * We only ever use this immediately after a controller reset.
3192 */
edd16368
SC
3193static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
3194 unsigned char type)
3195{
3196 struct Command {
3197 struct CommandListHeader CommandHeader;
3198 struct RequestBlock Request;
3199 struct ErrDescriptor ErrorDescriptor;
3200 };
3201 struct Command *cmd;
3202 static const size_t cmd_sz = sizeof(*cmd) +
3203 sizeof(cmd->ErrorDescriptor);
3204 dma_addr_t paddr64;
3205 uint32_t paddr32, tag;
3206 void __iomem *vaddr;
3207 int i, err;
3208
3209 vaddr = pci_ioremap_bar(pdev, 0);
3210 if (vaddr == NULL)
3211 return -ENOMEM;
3212
3213 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
3214 * CCISS commands, so they must be allocated from the lower 4GiB of
3215 * memory.
3216 */
3217 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3218 if (err) {
3219 iounmap(vaddr);
3220 return -ENOMEM;
3221 }
3222
3223 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
3224 if (cmd == NULL) {
3225 iounmap(vaddr);
3226 return -ENOMEM;
3227 }
3228
3229 /* This must fit, because of the 32-bit consistent DMA mask. Also,
3230 * although there's no guarantee, we assume that the address is at
3231 * least 4-byte aligned (most likely, it's page-aligned).
3232 */
3233 paddr32 = paddr64;
3234
3235 cmd->CommandHeader.ReplyQueue = 0;
3236 cmd->CommandHeader.SGList = 0;
3237 cmd->CommandHeader.SGTotal = 0;
3238 cmd->CommandHeader.Tag.lower = paddr32;
3239 cmd->CommandHeader.Tag.upper = 0;
3240 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3241
3242 cmd->Request.CDBLen = 16;
3243 cmd->Request.Type.Type = TYPE_MSG;
3244 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3245 cmd->Request.Type.Direction = XFER_NONE;
3246 cmd->Request.Timeout = 0; /* Don't time out */
3247 cmd->Request.CDB[0] = opcode;
3248 cmd->Request.CDB[1] = type;
3249 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
3250 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
3251 cmd->ErrorDescriptor.Addr.upper = 0;
3252 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
3253
3254 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3255
3256 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3257 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
a9a3a273 3258 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
edd16368
SC
3259 break;
3260 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3261 }
3262
3263 iounmap(vaddr);
3264
3265 /* we leak the DMA buffer here ... no choice since the controller could
3266 * still complete the command.
3267 */
3268 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3269 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3270 opcode, type);
3271 return -ETIMEDOUT;
3272 }
3273
3274 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3275
3276 if (tag & HPSA_ERROR_BIT) {
3277 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3278 opcode, type);
3279 return -EIO;
3280 }
3281
3282 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3283 opcode, type);
3284 return 0;
3285}
3286
edd16368
SC
3287#define hpsa_noop(p) hpsa_message(p, 3, 0)
3288
1df8552a 3289static int hpsa_controller_hard_reset(struct pci_dev *pdev,
cf0b08d0 3290 void * __iomem vaddr, u32 use_doorbell)
1df8552a
SC
3291{
3292 u16 pmcsr;
3293 int pos;
3294
3295 if (use_doorbell) {
3296 /* For everything after the P600, the PCI power state method
3297 * of resetting the controller doesn't work, so we have this
3298 * other way using the doorbell register.
3299 */
3300 dev_info(&pdev->dev, "using doorbell to reset controller\n");
cf0b08d0 3301 writel(use_doorbell, vaddr + SA5_DOORBELL);
1df8552a
SC
3302 } else { /* Try to do it the PCI power state way */
3303
3304 /* Quoting from the Open CISS Specification: "The Power
3305 * Management Control/Status Register (CSR) controls the power
3306 * state of the device. The normal operating state is D0,
3307 * CSR=00h. The software off state is D3, CSR=03h. To reset
3308 * the controller, place the interface device in D3 then to D0,
3309 * this causes a secondary PCI reset which will reset the
3310 * controller." */
3311
3312 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3313 if (pos == 0) {
3314 dev_err(&pdev->dev,
3315 "hpsa_reset_controller: "
3316 "PCI PM not supported\n");
3317 return -ENODEV;
3318 }
3319 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3320 /* enter the D3hot power management state */
3321 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3322 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3323 pmcsr |= PCI_D3hot;
3324 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3325
3326 msleep(500);
3327
3328 /* enter the D0 power management state */
3329 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3330 pmcsr |= PCI_D0;
3331 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
c4853efe
MM
3332
3333 /*
3334 * The P600 requires a small delay when changing states.
3335 * Otherwise we may think the board did not reset and we bail.
3336 * This for kdump only and is particular to the P600.
3337 */
3338 msleep(500);
1df8552a
SC
3339 }
3340 return 0;
3341}
3342
580ada3c
SC
3343static __devinit void init_driver_version(char *driver_version, int len)
3344{
3345 memset(driver_version, 0, len);
3346 strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1);
3347}
3348
3349static __devinit int write_driver_ver_to_cfgtable(
3350 struct CfgTable __iomem *cfgtable)
3351{
3352 char *driver_version;
3353 int i, size = sizeof(cfgtable->driver_version);
3354
3355 driver_version = kmalloc(size, GFP_KERNEL);
3356 if (!driver_version)
3357 return -ENOMEM;
3358
3359 init_driver_version(driver_version, size);
3360 for (i = 0; i < size; i++)
3361 writeb(driver_version[i], &cfgtable->driver_version[i]);
3362 kfree(driver_version);
3363 return 0;
3364}
3365
3366static __devinit void read_driver_ver_from_cfgtable(
3367 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
3368{
3369 int i;
3370
3371 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
3372 driver_ver[i] = readb(&cfgtable->driver_version[i]);
3373}
3374
3375static __devinit int controller_reset_failed(
3376 struct CfgTable __iomem *cfgtable)
3377{
3378
3379 char *driver_ver, *old_driver_ver;
3380 int rc, size = sizeof(cfgtable->driver_version);
3381
3382 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
3383 if (!old_driver_ver)
3384 return -ENOMEM;
3385 driver_ver = old_driver_ver + size;
3386
3387 /* After a reset, the 32 bytes of "driver version" in the cfgtable
3388 * should have been changed, otherwise we know the reset failed.
3389 */
3390 init_driver_version(old_driver_ver, size);
3391 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
3392 rc = !memcmp(driver_ver, old_driver_ver, size);
3393 kfree(old_driver_ver);
3394 return rc;
3395}
edd16368 3396/* This does a hard reset of the controller using PCI power management
1df8552a 3397 * states or the using the doorbell register.
edd16368 3398 */
1df8552a 3399static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
edd16368 3400{
1df8552a
SC
3401 u64 cfg_offset;
3402 u32 cfg_base_addr;
3403 u64 cfg_base_addr_index;
3404 void __iomem *vaddr;
3405 unsigned long paddr;
580ada3c 3406 u32 misc_fw_support;
270d05de 3407 int rc;
1df8552a 3408 struct CfgTable __iomem *cfgtable;
cf0b08d0 3409 u32 use_doorbell;
18867659 3410 u32 board_id;
270d05de 3411 u16 command_register;
edd16368 3412
1df8552a
SC
3413 /* For controllers as old as the P600, this is very nearly
3414 * the same thing as
edd16368
SC
3415 *
3416 * pci_save_state(pci_dev);
3417 * pci_set_power_state(pci_dev, PCI_D3hot);
3418 * pci_set_power_state(pci_dev, PCI_D0);
3419 * pci_restore_state(pci_dev);
3420 *
1df8552a
SC
3421 * For controllers newer than the P600, the pci power state
3422 * method of resetting doesn't work so we have another way
3423 * using the doorbell register.
edd16368 3424 */
18867659 3425
25c1e56a 3426 rc = hpsa_lookup_board_id(pdev, &board_id);
46380786 3427 if (rc < 0 || !ctlr_is_resettable(board_id)) {
25c1e56a
SC
3428 dev_warn(&pdev->dev, "Not resetting device.\n");
3429 return -ENODEV;
3430 }
46380786
SC
3431
3432 /* if controller is soft- but not hard resettable... */
3433 if (!ctlr_is_hard_resettable(board_id))
3434 return -ENOTSUPP; /* try soft reset later. */
18867659 3435
270d05de
SC
3436 /* Save the PCI command register */
3437 pci_read_config_word(pdev, 4, &command_register);
3438 /* Turn the board off. This is so that later pci_restore_state()
3439 * won't turn the board on before the rest of config space is ready.
3440 */
3441 pci_disable_device(pdev);
3442 pci_save_state(pdev);
edd16368 3443
1df8552a
SC
3444 /* find the first memory BAR, so we can find the cfg table */
3445 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
3446 if (rc)
3447 return rc;
3448 vaddr = remap_pci_mem(paddr, 0x250);
3449 if (!vaddr)
3450 return -ENOMEM;
edd16368 3451
1df8552a
SC
3452 /* find cfgtable in order to check if reset via doorbell is supported */
3453 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
3454 &cfg_base_addr_index, &cfg_offset);
3455 if (rc)
3456 goto unmap_vaddr;
3457 cfgtable = remap_pci_mem(pci_resource_start(pdev,
3458 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
3459 if (!cfgtable) {
3460 rc = -ENOMEM;
3461 goto unmap_vaddr;
3462 }
580ada3c
SC
3463 rc = write_driver_ver_to_cfgtable(cfgtable);
3464 if (rc)
3465 goto unmap_vaddr;
edd16368 3466
cf0b08d0
SC
3467 /* If reset via doorbell register is supported, use that.
3468 * There are two such methods. Favor the newest method.
3469 */
1df8552a 3470 misc_fw_support = readl(&cfgtable->misc_fw_support);
cf0b08d0
SC
3471 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
3472 if (use_doorbell) {
3473 use_doorbell = DOORBELL_CTLR_RESET2;
3474 } else {
3475 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3476 if (use_doorbell) {
fba63097
MM
3477 dev_warn(&pdev->dev, "Soft reset not supported. "
3478 "Firmware update is required.\n");
64670ac8 3479 rc = -ENOTSUPP; /* try soft reset */
cf0b08d0
SC
3480 goto unmap_cfgtable;
3481 }
3482 }
edd16368 3483
1df8552a
SC
3484 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3485 if (rc)
3486 goto unmap_cfgtable;
edd16368 3487
270d05de
SC
3488 pci_restore_state(pdev);
3489 rc = pci_enable_device(pdev);
3490 if (rc) {
3491 dev_warn(&pdev->dev, "failed to enable device.\n");
3492 goto unmap_cfgtable;
edd16368 3493 }
270d05de 3494 pci_write_config_word(pdev, 4, command_register);
edd16368 3495
1df8552a
SC
3496 /* Some devices (notably the HP Smart Array 5i Controller)
3497 need a little pause here */
3498 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3499
fe5389c8 3500 /* Wait for board to become not ready, then ready. */
2b870cb3 3501 dev_info(&pdev->dev, "Waiting for board to reset.\n");
fe5389c8 3502 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
64670ac8 3503 if (rc) {
fe5389c8 3504 dev_warn(&pdev->dev,
64670ac8
SC
3505 "failed waiting for board to reset."
3506 " Will try soft reset.\n");
3507 rc = -ENOTSUPP; /* Not expected, but try soft reset later */
3508 goto unmap_cfgtable;
3509 }
fe5389c8
SC
3510 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
3511 if (rc) {
3512 dev_warn(&pdev->dev,
64670ac8
SC
3513 "failed waiting for board to become ready "
3514 "after hard reset\n");
fe5389c8
SC
3515 goto unmap_cfgtable;
3516 }
fe5389c8 3517
580ada3c
SC
3518 rc = controller_reset_failed(vaddr);
3519 if (rc < 0)
3520 goto unmap_cfgtable;
3521 if (rc) {
64670ac8
SC
3522 dev_warn(&pdev->dev, "Unable to successfully reset "
3523 "controller. Will try soft reset.\n");
3524 rc = -ENOTSUPP;
580ada3c 3525 } else {
64670ac8 3526 dev_info(&pdev->dev, "board ready after hard reset.\n");
1df8552a
SC
3527 }
3528
3529unmap_cfgtable:
3530 iounmap(cfgtable);
3531
3532unmap_vaddr:
3533 iounmap(vaddr);
3534 return rc;
edd16368
SC
3535}
3536
3537/*
3538 * We cannot read the structure directly, for portability we must use
3539 * the io functions.
3540 * This is for debug only.
3541 */
edd16368
SC
3542static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3543{
58f8665c 3544#ifdef HPSA_DEBUG
edd16368
SC
3545 int i;
3546 char temp_name[17];
3547
3548 dev_info(dev, "Controller Configuration information\n");
3549 dev_info(dev, "------------------------------------\n");
3550 for (i = 0; i < 4; i++)
3551 temp_name[i] = readb(&(tb->Signature[i]));
3552 temp_name[4] = '\0';
3553 dev_info(dev, " Signature = %s\n", temp_name);
3554 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3555 dev_info(dev, " Transport methods supported = 0x%x\n",
3556 readl(&(tb->TransportSupport)));
3557 dev_info(dev, " Transport methods active = 0x%x\n",
3558 readl(&(tb->TransportActive)));
3559 dev_info(dev, " Requested transport Method = 0x%x\n",
3560 readl(&(tb->HostWrite.TransportRequest)));
3561 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3562 readl(&(tb->HostWrite.CoalIntDelay)));
3563 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3564 readl(&(tb->HostWrite.CoalIntCount)));
3565 dev_info(dev, " Max outstanding commands = 0x%d\n",
3566 readl(&(tb->CmdsOutMax)));
3567 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3568 for (i = 0; i < 16; i++)
3569 temp_name[i] = readb(&(tb->ServerName[i]));
3570 temp_name[16] = '\0';
3571 dev_info(dev, " Server Name = %s\n", temp_name);
3572 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3573 readl(&(tb->HeartBeat)));
edd16368 3574#endif /* HPSA_DEBUG */
58f8665c 3575}
edd16368
SC
3576
3577static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3578{
3579 int i, offset, mem_type, bar_type;
3580
3581 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3582 return 0;
3583 offset = 0;
3584 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3585 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3586 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3587 offset += 4;
3588 else {
3589 mem_type = pci_resource_flags(pdev, i) &
3590 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3591 switch (mem_type) {
3592 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3593 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3594 offset += 4; /* 32 bit */
3595 break;
3596 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3597 offset += 8;
3598 break;
3599 default: /* reserved in PCI 2.2 */
3600 dev_warn(&pdev->dev,
3601 "base address is invalid\n");
3602 return -1;
3603 break;
3604 }
3605 }
3606 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3607 return i + 1;
3608 }
3609 return -1;
3610}
3611
3612/* If MSI/MSI-X is supported by the kernel we will try to enable it on
3613 * controllers that are capable. If not, we use IO-APIC mode.
3614 */
3615
6b3f4c52 3616static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
edd16368
SC
3617{
3618#ifdef CONFIG_PCI_MSI
3619 int err;
3620 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3621 {0, 2}, {0, 3}
3622 };
3623
3624 /* Some boards advertise MSI but don't really support it */
6b3f4c52
SC
3625 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
3626 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
edd16368 3627 goto default_int_mode;
55c06c71
SC
3628 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
3629 dev_info(&h->pdev->dev, "MSIX\n");
3630 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
edd16368
SC
3631 if (!err) {
3632 h->intr[0] = hpsa_msix_entries[0].vector;
3633 h->intr[1] = hpsa_msix_entries[1].vector;
3634 h->intr[2] = hpsa_msix_entries[2].vector;
3635 h->intr[3] = hpsa_msix_entries[3].vector;
3636 h->msix_vector = 1;
3637 return;
3638 }
3639 if (err > 0) {
55c06c71 3640 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
edd16368
SC
3641 "available\n", err);
3642 goto default_int_mode;
3643 } else {
55c06c71 3644 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
edd16368
SC
3645 err);
3646 goto default_int_mode;
3647 }
3648 }
55c06c71
SC
3649 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
3650 dev_info(&h->pdev->dev, "MSI\n");
3651 if (!pci_enable_msi(h->pdev))
edd16368
SC
3652 h->msi_vector = 1;
3653 else
55c06c71 3654 dev_warn(&h->pdev->dev, "MSI init failed\n");
edd16368
SC
3655 }
3656default_int_mode:
3657#endif /* CONFIG_PCI_MSI */
3658 /* if we get here we're going to use the default interrupt mode */
a9a3a273 3659 h->intr[h->intr_mode] = h->pdev->irq;
edd16368
SC
3660}
3661
e5c880d1
SC
3662static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
3663{
3664 int i;
3665 u32 subsystem_vendor_id, subsystem_device_id;
3666
3667 subsystem_vendor_id = pdev->subsystem_vendor;
3668 subsystem_device_id = pdev->subsystem_device;
3669 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
3670 subsystem_vendor_id;
3671
3672 for (i = 0; i < ARRAY_SIZE(products); i++)
3673 if (*board_id == products[i].board_id)
3674 return i;
3675
6798cc0a
SC
3676 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
3677 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
3678 !hpsa_allow_any) {
e5c880d1
SC
3679 dev_warn(&pdev->dev, "unrecognized board ID: "
3680 "0x%08x, ignoring.\n", *board_id);
3681 return -ENODEV;
3682 }
3683 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
3684}
3685
85bdbabb
SC
3686static inline bool hpsa_board_disabled(struct pci_dev *pdev)
3687{
3688 u16 command;
3689
3690 (void) pci_read_config_word(pdev, PCI_COMMAND, &command);
3691 return ((command & PCI_COMMAND_MEMORY) == 0);
3692}
3693
12d2cd47 3694static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
3a7774ce
SC
3695 unsigned long *memory_bar)
3696{
3697 int i;
3698
3699 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
12d2cd47 3700 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3a7774ce 3701 /* addressing mode bits already removed */
12d2cd47
SC
3702 *memory_bar = pci_resource_start(pdev, i);
3703 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3a7774ce
SC
3704 *memory_bar);
3705 return 0;
3706 }
12d2cd47 3707 dev_warn(&pdev->dev, "no memory BAR found\n");
3a7774ce
SC
3708 return -ENODEV;
3709}
3710
fe5389c8
SC
3711static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
3712 void __iomem *vaddr, int wait_for_ready)
2c4c8c8b 3713{
fe5389c8 3714 int i, iterations;
2c4c8c8b 3715 u32 scratchpad;
fe5389c8
SC
3716 if (wait_for_ready)
3717 iterations = HPSA_BOARD_READY_ITERATIONS;
3718 else
3719 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
2c4c8c8b 3720
fe5389c8
SC
3721 for (i = 0; i < iterations; i++) {
3722 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
3723 if (wait_for_ready) {
3724 if (scratchpad == HPSA_FIRMWARE_READY)
3725 return 0;
3726 } else {
3727 if (scratchpad != HPSA_FIRMWARE_READY)
3728 return 0;
3729 }
2c4c8c8b
SC
3730 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3731 }
fe5389c8 3732 dev_warn(&pdev->dev, "board not ready, timed out.\n");
2c4c8c8b
SC
3733 return -ENODEV;
3734}
3735
a51fd47f
SC
3736static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
3737 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
3738 u64 *cfg_offset)
3739{
3740 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
3741 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
3742 *cfg_base_addr &= (u32) 0x0000ffff;
3743 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
3744 if (*cfg_base_addr_index == -1) {
3745 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3746 return -ENODEV;
3747 }
3748 return 0;
3749}
3750
77c4495c 3751static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
edd16368 3752{
01a02ffc
SC
3753 u64 cfg_offset;
3754 u32 cfg_base_addr;
3755 u64 cfg_base_addr_index;
303932fd 3756 u32 trans_offset;
a51fd47f 3757 int rc;
77c4495c 3758
a51fd47f
SC
3759 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
3760 &cfg_base_addr_index, &cfg_offset);
3761 if (rc)
3762 return rc;
77c4495c 3763 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
a51fd47f 3764 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
77c4495c
SC
3765 if (!h->cfgtable)
3766 return -ENOMEM;
580ada3c
SC
3767 rc = write_driver_ver_to_cfgtable(h->cfgtable);
3768 if (rc)
3769 return rc;
77c4495c 3770 /* Find performant mode table. */
a51fd47f 3771 trans_offset = readl(&h->cfgtable->TransMethodOffset);
77c4495c
SC
3772 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
3773 cfg_base_addr_index)+cfg_offset+trans_offset,
3774 sizeof(*h->transtable));
3775 if (!h->transtable)
3776 return -ENOMEM;
3777 return 0;
3778}
3779
cba3d38b
SC
3780static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
3781{
3782 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
72ceeaec
SC
3783
3784 /* Limit commands in memory limited kdump scenario. */
3785 if (reset_devices && h->max_commands > 32)
3786 h->max_commands = 32;
3787
cba3d38b
SC
3788 if (h->max_commands < 16) {
3789 dev_warn(&h->pdev->dev, "Controller reports "
3790 "max supported commands of %d, an obvious lie. "
3791 "Using 16. Ensure that firmware is up to date.\n",
3792 h->max_commands);
3793 h->max_commands = 16;
3794 }
3795}
3796
b93d7536
SC
3797/* Interrogate the hardware for some limits:
3798 * max commands, max SG elements without chaining, and with chaining,
3799 * SG chain block size, etc.
3800 */
3801static void __devinit hpsa_find_board_params(struct ctlr_info *h)
3802{
cba3d38b 3803 hpsa_get_max_perf_mode_cmds(h);
b93d7536
SC
3804 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
3805 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
3806 /*
3807 * Limit in-command s/g elements to 32 save dma'able memory.
3808 * Howvever spec says if 0, use 31
3809 */
3810 h->max_cmd_sg_entries = 31;
3811 if (h->maxsgentries > 512) {
3812 h->max_cmd_sg_entries = 32;
3813 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
3814 h->maxsgentries--; /* save one for chain pointer */
3815 } else {
3816 h->maxsgentries = 31; /* default to traditional values */
3817 h->chainsize = 0;
3818 }
3819}
3820
76c46e49
SC
3821static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
3822{
3823 if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3824 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3825 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3826 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3827 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
3828 return false;
3829 }
3830 return true;
3831}
3832
f7c39101
SC
3833/* Need to enable prefetch in the SCSI core for 6400 in x86 */
3834static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
3835{
3836#ifdef CONFIG_X86
3837 u32 prefetch;
3838
3839 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3840 prefetch |= 0x100;
3841 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3842#endif
3843}
3844
3d0eab67
SC
3845/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
3846 * in a prefetch beyond physical memory.
3847 */
3848static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
3849{
3850 u32 dma_prefetch;
3851
3852 if (h->board_id != 0x3225103C)
3853 return;
3854 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3855 dma_prefetch |= 0x8000;
3856 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3857}
3858
3f4336f3 3859static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
eb6b2ae9
SC
3860{
3861 int i;
6eaf46fd
SC
3862 u32 doorbell_value;
3863 unsigned long flags;
eb6b2ae9
SC
3864
3865 /* under certain very rare conditions, this can take awhile.
3866 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3867 * as we enter this code.)
3868 */
3869 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6eaf46fd
SC
3870 spin_lock_irqsave(&h->lock, flags);
3871 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
3872 spin_unlock_irqrestore(&h->lock, flags);
382be668 3873 if (!(doorbell_value & CFGTBL_ChangeReq))
eb6b2ae9
SC
3874 break;
3875 /* delay and try again */
60d3f5b0 3876 usleep_range(10000, 20000);
eb6b2ae9 3877 }
3f4336f3
SC
3878}
3879
3880static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
3881{
3882 u32 trans_support;
3883
3884 trans_support = readl(&(h->cfgtable->TransportSupport));
3885 if (!(trans_support & SIMPLE_MODE))
3886 return -ENOTSUPP;
3887
3888 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3889 /* Update the field, and then ring the doorbell */
3890 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3891 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3892 hpsa_wait_for_mode_change_ack(h);
eb6b2ae9 3893 print_cfg_table(&h->pdev->dev, h->cfgtable);
eb6b2ae9
SC
3894 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3895 dev_warn(&h->pdev->dev,
3896 "unable to get board into simple mode\n");
3897 return -ENODEV;
3898 }
960a30e7 3899 h->transMethod = CFGTBL_Trans_Simple;
eb6b2ae9
SC
3900 return 0;
3901}
3902
77c4495c
SC
3903static int __devinit hpsa_pci_init(struct ctlr_info *h)
3904{
eb6b2ae9 3905 int prod_index, err;
edd16368 3906
e5c880d1
SC
3907 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
3908 if (prod_index < 0)
3909 return -ENODEV;
3910 h->product_name = products[prod_index].product_name;
3911 h->access = *(products[prod_index].access);
edd16368 3912
85bdbabb 3913 if (hpsa_board_disabled(h->pdev)) {
55c06c71 3914 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
edd16368
SC
3915 return -ENODEV;
3916 }
e5a44df8
MG
3917
3918 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
3919 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
3920
55c06c71 3921 err = pci_enable_device(h->pdev);
edd16368 3922 if (err) {
55c06c71 3923 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
edd16368
SC
3924 return err;
3925 }
3926
55c06c71 3927 err = pci_request_regions(h->pdev, "hpsa");
edd16368 3928 if (err) {
55c06c71
SC
3929 dev_err(&h->pdev->dev,
3930 "cannot obtain PCI resources, aborting\n");
edd16368
SC
3931 return err;
3932 }
6b3f4c52 3933 hpsa_interrupt_mode(h);
12d2cd47 3934 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3a7774ce 3935 if (err)
edd16368 3936 goto err_out_free_res;
edd16368 3937 h->vaddr = remap_pci_mem(h->paddr, 0x250);
204892e9
SC
3938 if (!h->vaddr) {
3939 err = -ENOMEM;
3940 goto err_out_free_res;
3941 }
fe5389c8 3942 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
2c4c8c8b 3943 if (err)
edd16368 3944 goto err_out_free_res;
77c4495c
SC
3945 err = hpsa_find_cfgtables(h);
3946 if (err)
edd16368 3947 goto err_out_free_res;
b93d7536 3948 hpsa_find_board_params(h);
edd16368 3949
76c46e49 3950 if (!hpsa_CISS_signature_present(h)) {
edd16368
SC
3951 err = -ENODEV;
3952 goto err_out_free_res;
3953 }
f7c39101 3954 hpsa_enable_scsi_prefetch(h);
3d0eab67 3955 hpsa_p600_dma_prefetch_quirk(h);
eb6b2ae9
SC
3956 err = hpsa_enter_simple_mode(h);
3957 if (err)
edd16368 3958 goto err_out_free_res;
edd16368
SC
3959 return 0;
3960
3961err_out_free_res:
204892e9
SC
3962 if (h->transtable)
3963 iounmap(h->transtable);
3964 if (h->cfgtable)
3965 iounmap(h->cfgtable);
3966 if (h->vaddr)
3967 iounmap(h->vaddr);
edd16368
SC
3968 /*
3969 * Deliberately omit pci_disable_device(): it does something nasty to
3970 * Smart Array controllers that pci_enable_device does not undo
3971 */
55c06c71 3972 pci_release_regions(h->pdev);
edd16368
SC
3973 return err;
3974}
3975
339b2b14
SC
3976static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
3977{
3978 int rc;
3979
3980#define HBA_INQUIRY_BYTE_COUNT 64
3981 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
3982 if (!h->hba_inquiry_data)
3983 return;
3984 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
3985 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
3986 if (rc != 0) {
3987 kfree(h->hba_inquiry_data);
3988 h->hba_inquiry_data = NULL;
3989 }
3990}
3991
4c2a8c40
SC
3992static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3993{
1df8552a 3994 int rc, i;
4c2a8c40
SC
3995
3996 if (!reset_devices)
3997 return 0;
3998
1df8552a
SC
3999 /* Reset the controller with a PCI power-cycle or via doorbell */
4000 rc = hpsa_kdump_hard_reset_controller(pdev);
4c2a8c40 4001
1df8552a
SC
4002 /* -ENOTSUPP here means we cannot reset the controller
4003 * but it's already (and still) up and running in
18867659
SC
4004 * "performant mode". Or, it might be 640x, which can't reset
4005 * due to concerns about shared bbwc between 6402/6404 pair.
1df8552a
SC
4006 */
4007 if (rc == -ENOTSUPP)
64670ac8 4008 return rc; /* just try to do the kdump anyhow. */
1df8552a
SC
4009 if (rc)
4010 return -ENODEV;
4c2a8c40
SC
4011
4012 /* Now try to get the controller to respond to a no-op */
2b870cb3 4013 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
4c2a8c40
SC
4014 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
4015 if (hpsa_noop(pdev) == 0)
4016 break;
4017 else
4018 dev_warn(&pdev->dev, "no-op failed%s\n",
4019 (i < 11 ? "; re-trying" : ""));
4020 }
4021 return 0;
4022}
4023
2e9d1b36
SC
4024static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
4025{
4026 h->cmd_pool_bits = kzalloc(
4027 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
4028 sizeof(unsigned long), GFP_KERNEL);
4029 h->cmd_pool = pci_alloc_consistent(h->pdev,
4030 h->nr_cmds * sizeof(*h->cmd_pool),
4031 &(h->cmd_pool_dhandle));
4032 h->errinfo_pool = pci_alloc_consistent(h->pdev,
4033 h->nr_cmds * sizeof(*h->errinfo_pool),
4034 &(h->errinfo_pool_dhandle));
4035 if ((h->cmd_pool_bits == NULL)
4036 || (h->cmd_pool == NULL)
4037 || (h->errinfo_pool == NULL)) {
4038 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
4039 return -ENOMEM;
4040 }
4041 return 0;
4042}
4043
4044static void hpsa_free_cmd_pool(struct ctlr_info *h)
4045{
4046 kfree(h->cmd_pool_bits);
4047 if (h->cmd_pool)
4048 pci_free_consistent(h->pdev,
4049 h->nr_cmds * sizeof(struct CommandList),
4050 h->cmd_pool, h->cmd_pool_dhandle);
4051 if (h->errinfo_pool)
4052 pci_free_consistent(h->pdev,
4053 h->nr_cmds * sizeof(struct ErrorInfo),
4054 h->errinfo_pool,
4055 h->errinfo_pool_dhandle);
4056}
4057
0ae01a32
SC
4058static int hpsa_request_irq(struct ctlr_info *h,
4059 irqreturn_t (*msixhandler)(int, void *),
4060 irqreturn_t (*intxhandler)(int, void *))
4061{
4062 int rc;
4063
4064 if (h->msix_vector || h->msi_vector)
4065 rc = request_irq(h->intr[h->intr_mode], msixhandler,
45bcf018 4066 0, h->devname, h);
0ae01a32
SC
4067 else
4068 rc = request_irq(h->intr[h->intr_mode], intxhandler,
45bcf018 4069 IRQF_SHARED, h->devname, h);
0ae01a32
SC
4070 if (rc) {
4071 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
4072 h->intr[h->intr_mode], h->devname);
4073 return -ENODEV;
4074 }
4075 return 0;
4076}
4077
64670ac8
SC
4078static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
4079{
4080 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
4081 HPSA_RESET_TYPE_CONTROLLER)) {
4082 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
4083 return -EIO;
4084 }
4085
4086 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
4087 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
4088 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
4089 return -1;
4090 }
4091
4092 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
4093 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
4094 dev_warn(&h->pdev->dev, "Board failed to become ready "
4095 "after soft reset.\n");
4096 return -1;
4097 }
4098
4099 return 0;
4100}
4101
4102static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
4103{
4104 free_irq(h->intr[h->intr_mode], h);
4105#ifdef CONFIG_PCI_MSI
4106 if (h->msix_vector)
4107 pci_disable_msix(h->pdev);
4108 else if (h->msi_vector)
4109 pci_disable_msi(h->pdev);
4110#endif /* CONFIG_PCI_MSI */
4111 hpsa_free_sg_chain_blocks(h);
4112 hpsa_free_cmd_pool(h);
4113 kfree(h->blockFetchTable);
4114 pci_free_consistent(h->pdev, h->reply_pool_size,
4115 h->reply_pool, h->reply_pool_dhandle);
4116 if (h->vaddr)
4117 iounmap(h->vaddr);
4118 if (h->transtable)
4119 iounmap(h->transtable);
4120 if (h->cfgtable)
4121 iounmap(h->cfgtable);
4122 pci_release_regions(h->pdev);
4123 kfree(h);
4124}
4125
a0c12413
SC
4126static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h)
4127{
4128 assert_spin_locked(&lockup_detector_lock);
4129 if (!hpsa_lockup_detector)
4130 return;
4131 if (h->lockup_detected)
4132 return; /* already stopped the lockup detector */
4133 list_del(&h->lockup_list);
4134}
4135
4136/* Called when controller lockup detected. */
4137static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
4138{
4139 struct CommandList *c = NULL;
4140
4141 assert_spin_locked(&h->lock);
4142 /* Mark all outstanding commands as failed and complete them. */
4143 while (!list_empty(list)) {
4144 c = list_entry(list->next, struct CommandList, list);
4145 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
4146 finish_cmd(c, c->Header.Tag.lower);
4147 }
4148}
4149
4150static void controller_lockup_detected(struct ctlr_info *h)
4151{
4152 unsigned long flags;
4153
4154 assert_spin_locked(&lockup_detector_lock);
4155 remove_ctlr_from_lockup_detector_list(h);
4156 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4157 spin_lock_irqsave(&h->lock, flags);
4158 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
4159 spin_unlock_irqrestore(&h->lock, flags);
4160 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
4161 h->lockup_detected);
4162 pci_disable_device(h->pdev);
4163 spin_lock_irqsave(&h->lock, flags);
4164 fail_all_cmds_on_list(h, &h->cmpQ);
4165 fail_all_cmds_on_list(h, &h->reqQ);
4166 spin_unlock_irqrestore(&h->lock, flags);
4167}
4168
4169#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
4170#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
4171
4172static void detect_controller_lockup(struct ctlr_info *h)
4173{
4174 u64 now;
4175 u32 heartbeat;
4176 unsigned long flags;
4177
4178 assert_spin_locked(&lockup_detector_lock);
4179 now = get_jiffies_64();
4180 /* If we've received an interrupt recently, we're ok. */
4181 if (time_after64(h->last_intr_timestamp +
4182 (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
4183 return;
4184
4185 /*
4186 * If we've already checked the heartbeat recently, we're ok.
4187 * This could happen if someone sends us a signal. We
4188 * otherwise don't care about signals in this thread.
4189 */
4190 if (time_after64(h->last_heartbeat_timestamp +
4191 (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
4192 return;
4193
4194 /* If heartbeat has not changed since we last looked, we're not ok. */
4195 spin_lock_irqsave(&h->lock, flags);
4196 heartbeat = readl(&h->cfgtable->HeartBeat);
4197 spin_unlock_irqrestore(&h->lock, flags);
4198 if (h->last_heartbeat == heartbeat) {
4199 controller_lockup_detected(h);
4200 return;
4201 }
4202
4203 /* We're ok. */
4204 h->last_heartbeat = heartbeat;
4205 h->last_heartbeat_timestamp = now;
4206}
4207
4208static int detect_controller_lockup_thread(void *notused)
4209{
4210 struct ctlr_info *h;
4211 unsigned long flags;
4212
4213 while (1) {
4214 struct list_head *this, *tmp;
4215
4216 schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL);
4217 if (kthread_should_stop())
4218 break;
4219 spin_lock_irqsave(&lockup_detector_lock, flags);
4220 list_for_each_safe(this, tmp, &hpsa_ctlr_list) {
4221 h = list_entry(this, struct ctlr_info, lockup_list);
4222 detect_controller_lockup(h);
4223 }
4224 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4225 }
4226 return 0;
4227}
4228
4229static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
4230{
4231 unsigned long flags;
4232
4233 spin_lock_irqsave(&lockup_detector_lock, flags);
4234 list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
4235 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4236}
4237
4238static void start_controller_lockup_detector(struct ctlr_info *h)
4239{
4240 /* Start the lockup detector thread if not already started */
4241 if (!hpsa_lockup_detector) {
4242 spin_lock_init(&lockup_detector_lock);
4243 hpsa_lockup_detector =
4244 kthread_run(detect_controller_lockup_thread,
4245 NULL, "hpsa");
4246 }
4247 if (!hpsa_lockup_detector) {
4248 dev_warn(&h->pdev->dev,
4249 "Could not start lockup detector thread\n");
4250 return;
4251 }
4252 add_ctlr_to_lockup_detector_list(h);
4253}
4254
4255static void stop_controller_lockup_detector(struct ctlr_info *h)
4256{
4257 unsigned long flags;
4258
4259 spin_lock_irqsave(&lockup_detector_lock, flags);
4260 remove_ctlr_from_lockup_detector_list(h);
4261 /* If the list of ctlr's to monitor is empty, stop the thread */
4262 if (list_empty(&hpsa_ctlr_list)) {
775bf277 4263 spin_unlock_irqrestore(&lockup_detector_lock, flags);
a0c12413 4264 kthread_stop(hpsa_lockup_detector);
775bf277 4265 spin_lock_irqsave(&lockup_detector_lock, flags);
a0c12413
SC
4266 hpsa_lockup_detector = NULL;
4267 }
4268 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4269}
4270
edd16368
SC
4271static int __devinit hpsa_init_one(struct pci_dev *pdev,
4272 const struct pci_device_id *ent)
4273{
4c2a8c40 4274 int dac, rc;
edd16368 4275 struct ctlr_info *h;
64670ac8
SC
4276 int try_soft_reset = 0;
4277 unsigned long flags;
edd16368
SC
4278
4279 if (number_of_controllers == 0)
4280 printk(KERN_INFO DRIVER_NAME "\n");
edd16368 4281
4c2a8c40 4282 rc = hpsa_init_reset_devices(pdev);
64670ac8
SC
4283 if (rc) {
4284 if (rc != -ENOTSUPP)
4285 return rc;
4286 /* If the reset fails in a particular way (it has no way to do
4287 * a proper hard reset, so returns -ENOTSUPP) we can try to do
4288 * a soft reset once we get the controller configured up to the
4289 * point that it can accept a command.
4290 */
4291 try_soft_reset = 1;
4292 rc = 0;
4293 }
4294
4295reinit_after_soft_reset:
edd16368 4296
303932fd
DB
4297 /* Command structures must be aligned on a 32-byte boundary because
4298 * the 5 lower bits of the address are used by the hardware. and by
4299 * the driver. See comments in hpsa.h for more info.
4300 */
4301#define COMMANDLIST_ALIGNMENT 32
4302 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
edd16368
SC
4303 h = kzalloc(sizeof(*h), GFP_KERNEL);
4304 if (!h)
ecd9aad4 4305 return -ENOMEM;
edd16368 4306
55c06c71 4307 h->pdev = pdev;
a9a3a273 4308 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
9e0fc764
SC
4309 INIT_LIST_HEAD(&h->cmpQ);
4310 INIT_LIST_HEAD(&h->reqQ);
6eaf46fd
SC
4311 spin_lock_init(&h->lock);
4312 spin_lock_init(&h->scan_lock);
55c06c71 4313 rc = hpsa_pci_init(h);
ecd9aad4 4314 if (rc != 0)
edd16368
SC
4315 goto clean1;
4316
4317 sprintf(h->devname, "hpsa%d", number_of_controllers);
4318 h->ctlr = number_of_controllers;
4319 number_of_controllers++;
edd16368
SC
4320
4321 /* configure PCI DMA stuff */
ecd9aad4
SC
4322 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4323 if (rc == 0) {
edd16368 4324 dac = 1;
ecd9aad4
SC
4325 } else {
4326 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4327 if (rc == 0) {
4328 dac = 0;
4329 } else {
4330 dev_err(&pdev->dev, "no suitable DMA available\n");
4331 goto clean1;
4332 }
edd16368
SC
4333 }
4334
4335 /* make sure the board interrupts are off */
4336 h->access.set_intr_mask(h, HPSA_INTR_OFF);
10f66018 4337
0ae01a32 4338 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
edd16368 4339 goto clean2;
303932fd
DB
4340 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
4341 h->devname, pdev->device,
a9a3a273 4342 h->intr[h->intr_mode], dac ? "" : " not");
2e9d1b36 4343 if (hpsa_allocate_cmd_pool(h))
edd16368 4344 goto clean4;
33a2ffce
SC
4345 if (hpsa_allocate_sg_chain_blocks(h))
4346 goto clean4;
a08a8471
SC
4347 init_waitqueue_head(&h->scan_wait_queue);
4348 h->scan_finished = 1; /* no scan currently in progress */
edd16368
SC
4349
4350 pci_set_drvdata(pdev, h);
9a41338e
SC
4351 h->ndevices = 0;
4352 h->scsi_host = NULL;
4353 spin_lock_init(&h->devlock);
64670ac8
SC
4354 hpsa_put_ctlr_into_performant_mode(h);
4355
4356 /* At this point, the controller is ready to take commands.
4357 * Now, if reset_devices and the hard reset didn't work, try
4358 * the soft reset and see if that works.
4359 */
4360 if (try_soft_reset) {
4361
4362 /* This is kind of gross. We may or may not get a completion
4363 * from the soft reset command, and if we do, then the value
4364 * from the fifo may or may not be valid. So, we wait 10 secs
4365 * after the reset throwing away any completions we get during
4366 * that time. Unregister the interrupt handler and register
4367 * fake ones to scoop up any residual completions.
4368 */
4369 spin_lock_irqsave(&h->lock, flags);
4370 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4371 spin_unlock_irqrestore(&h->lock, flags);
4372 free_irq(h->intr[h->intr_mode], h);
4373 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
4374 hpsa_intx_discard_completions);
4375 if (rc) {
4376 dev_warn(&h->pdev->dev, "Failed to request_irq after "
4377 "soft reset.\n");
4378 goto clean4;
4379 }
4380
4381 rc = hpsa_kdump_soft_reset(h);
4382 if (rc)
4383 /* Neither hard nor soft reset worked, we're hosed. */
4384 goto clean4;
4385
4386 dev_info(&h->pdev->dev, "Board READY.\n");
4387 dev_info(&h->pdev->dev,
4388 "Waiting for stale completions to drain.\n");
4389 h->access.set_intr_mask(h, HPSA_INTR_ON);
4390 msleep(10000);
4391 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4392
4393 rc = controller_reset_failed(h->cfgtable);
4394 if (rc)
4395 dev_info(&h->pdev->dev,
4396 "Soft reset appears to have failed.\n");
4397
4398 /* since the controller's reset, we have to go back and re-init
4399 * everything. Easiest to just forget what we've done and do it
4400 * all over again.
4401 */
4402 hpsa_undo_allocations_after_kdump_soft_reset(h);
4403 try_soft_reset = 0;
4404 if (rc)
4405 /* don't go to clean4, we already unallocated */
4406 return -ENODEV;
4407
4408 goto reinit_after_soft_reset;
4409 }
edd16368
SC
4410
4411 /* Turn the interrupts on so we can service requests */
4412 h->access.set_intr_mask(h, HPSA_INTR_ON);
4413
339b2b14 4414 hpsa_hba_inquiry(h);
edd16368 4415 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
a0c12413 4416 start_controller_lockup_detector(h);
edd16368
SC
4417 return 1;
4418
4419clean4:
33a2ffce 4420 hpsa_free_sg_chain_blocks(h);
2e9d1b36 4421 hpsa_free_cmd_pool(h);
a9a3a273 4422 free_irq(h->intr[h->intr_mode], h);
edd16368
SC
4423clean2:
4424clean1:
edd16368 4425 kfree(h);
ecd9aad4 4426 return rc;
edd16368
SC
4427}
4428
4429static void hpsa_flush_cache(struct ctlr_info *h)
4430{
4431 char *flush_buf;
4432 struct CommandList *c;
4433
4434 flush_buf = kzalloc(4, GFP_KERNEL);
4435 if (!flush_buf)
4436 return;
4437
4438 c = cmd_special_alloc(h);
4439 if (!c) {
4440 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4441 goto out_of_memory;
4442 }
4443 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
4444 RAID_CTLR_LUNID, TYPE_CMD);
4445 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
4446 if (c->err_info->CommandStatus != 0)
4447 dev_warn(&h->pdev->dev,
4448 "error flushing cache on controller\n");
4449 cmd_special_free(h, c);
4450out_of_memory:
4451 kfree(flush_buf);
4452}
4453
4454static void hpsa_shutdown(struct pci_dev *pdev)
4455{
4456 struct ctlr_info *h;
4457
4458 h = pci_get_drvdata(pdev);
4459 /* Turn board interrupts off and send the flush cache command
4460 * sendcmd will turn off interrupt, and send the flush...
4461 * To write all data in the battery backed cache to disks
4462 */
4463 hpsa_flush_cache(h);
4464 h->access.set_intr_mask(h, HPSA_INTR_OFF);
a9a3a273 4465 free_irq(h->intr[h->intr_mode], h);
edd16368
SC
4466#ifdef CONFIG_PCI_MSI
4467 if (h->msix_vector)
4468 pci_disable_msix(h->pdev);
4469 else if (h->msi_vector)
4470 pci_disable_msi(h->pdev);
4471#endif /* CONFIG_PCI_MSI */
4472}
4473
55e14e76
SC
4474static void __devexit hpsa_free_device_info(struct ctlr_info *h)
4475{
4476 int i;
4477
4478 for (i = 0; i < h->ndevices; i++)
4479 kfree(h->dev[i]);
4480}
4481
edd16368
SC
4482static void __devexit hpsa_remove_one(struct pci_dev *pdev)
4483{
4484 struct ctlr_info *h;
4485
4486 if (pci_get_drvdata(pdev) == NULL) {
a0c12413 4487 dev_err(&pdev->dev, "unable to remove device\n");
edd16368
SC
4488 return;
4489 }
4490 h = pci_get_drvdata(pdev);
a0c12413 4491 stop_controller_lockup_detector(h);
edd16368
SC
4492 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
4493 hpsa_shutdown(pdev);
4494 iounmap(h->vaddr);
204892e9
SC
4495 iounmap(h->transtable);
4496 iounmap(h->cfgtable);
55e14e76 4497 hpsa_free_device_info(h);
33a2ffce 4498 hpsa_free_sg_chain_blocks(h);
edd16368
SC
4499 pci_free_consistent(h->pdev,
4500 h->nr_cmds * sizeof(struct CommandList),
4501 h->cmd_pool, h->cmd_pool_dhandle);
4502 pci_free_consistent(h->pdev,
4503 h->nr_cmds * sizeof(struct ErrorInfo),
4504 h->errinfo_pool, h->errinfo_pool_dhandle);
303932fd
DB
4505 pci_free_consistent(h->pdev, h->reply_pool_size,
4506 h->reply_pool, h->reply_pool_dhandle);
edd16368 4507 kfree(h->cmd_pool_bits);
303932fd 4508 kfree(h->blockFetchTable);
339b2b14 4509 kfree(h->hba_inquiry_data);
edd16368
SC
4510 /*
4511 * Deliberately omit pci_disable_device(): it does something nasty to
4512 * Smart Array controllers that pci_enable_device does not undo
4513 */
4514 pci_release_regions(pdev);
4515 pci_set_drvdata(pdev, NULL);
edd16368
SC
4516 kfree(h);
4517}
4518
4519static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
4520 __attribute__((unused)) pm_message_t state)
4521{
4522 return -ENOSYS;
4523}
4524
4525static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
4526{
4527 return -ENOSYS;
4528}
4529
4530static struct pci_driver hpsa_pci_driver = {
4531 .name = "hpsa",
4532 .probe = hpsa_init_one,
4533 .remove = __devexit_p(hpsa_remove_one),
4534 .id_table = hpsa_pci_device_id, /* id_table */
4535 .shutdown = hpsa_shutdown,
4536 .suspend = hpsa_suspend,
4537 .resume = hpsa_resume,
4538};
4539
303932fd
DB
4540/* Fill in bucket_map[], given nsgs (the max number of
4541 * scatter gather elements supported) and bucket[],
4542 * which is an array of 8 integers. The bucket[] array
4543 * contains 8 different DMA transfer sizes (in 16
4544 * byte increments) which the controller uses to fetch
4545 * commands. This function fills in bucket_map[], which
4546 * maps a given number of scatter gather elements to one of
4547 * the 8 DMA transfer sizes. The point of it is to allow the
4548 * controller to only do as much DMA as needed to fetch the
4549 * command, with the DMA transfer size encoded in the lower
4550 * bits of the command address.
4551 */
4552static void calc_bucket_map(int bucket[], int num_buckets,
4553 int nsgs, int *bucket_map)
4554{
4555 int i, j, b, size;
4556
4557 /* even a command with 0 SGs requires 4 blocks */
4558#define MINIMUM_TRANSFER_BLOCKS 4
4559#define NUM_BUCKETS 8
4560 /* Note, bucket_map must have nsgs+1 entries. */
4561 for (i = 0; i <= nsgs; i++) {
4562 /* Compute size of a command with i SG entries */
4563 size = i + MINIMUM_TRANSFER_BLOCKS;
4564 b = num_buckets; /* Assume the biggest bucket */
4565 /* Find the bucket that is just big enough */
4566 for (j = 0; j < 8; j++) {
4567 if (bucket[j] >= size) {
4568 b = j;
4569 break;
4570 }
4571 }
4572 /* for a command with i SG entries, use bucket b. */
4573 bucket_map[i] = b;
4574 }
4575}
4576
960a30e7
SC
4577static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
4578 u32 use_short_tags)
303932fd 4579{
6c311b57
SC
4580 int i;
4581 unsigned long register_value;
def342bd
SC
4582
4583 /* This is a bit complicated. There are 8 registers on
4584 * the controller which we write to to tell it 8 different
4585 * sizes of commands which there may be. It's a way of
4586 * reducing the DMA done to fetch each command. Encoded into
4587 * each command's tag are 3 bits which communicate to the controller
4588 * which of the eight sizes that command fits within. The size of
4589 * each command depends on how many scatter gather entries there are.
4590 * Each SG entry requires 16 bytes. The eight registers are programmed
4591 * with the number of 16-byte blocks a command of that size requires.
4592 * The smallest command possible requires 5 such 16 byte blocks.
d66ae08b 4593 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
def342bd
SC
4594 * blocks. Note, this only extends to the SG entries contained
4595 * within the command block, and does not extend to chained blocks
4596 * of SG elements. bft[] contains the eight values we write to
4597 * the registers. They are not evenly distributed, but have more
4598 * sizes for small commands, and fewer sizes for larger commands.
4599 */
d66ae08b
SC
4600 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
4601 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
303932fd
DB
4602 /* 5 = 1 s/g entry or 4k
4603 * 6 = 2 s/g entry or 8k
4604 * 8 = 4 s/g entry or 16k
4605 * 10 = 6 s/g entry or 24k
4606 */
303932fd
DB
4607
4608 h->reply_pool_wraparound = 1; /* spec: init to 1 */
4609
4610 /* Controller spec: zero out this buffer. */
4611 memset(h->reply_pool, 0, h->reply_pool_size);
4612 h->reply_pool_head = h->reply_pool;
4613
d66ae08b
SC
4614 bft[7] = SG_ENTRIES_IN_CMD + 4;
4615 calc_bucket_map(bft, ARRAY_SIZE(bft),
4616 SG_ENTRIES_IN_CMD, h->blockFetchTable);
303932fd
DB
4617 for (i = 0; i < 8; i++)
4618 writel(bft[i], &h->transtable->BlockFetch[i]);
4619
4620 /* size of controller ring buffer */
4621 writel(h->max_commands, &h->transtable->RepQSize);
4622 writel(1, &h->transtable->RepQCount);
4623 writel(0, &h->transtable->RepQCtrAddrLow32);
4624 writel(0, &h->transtable->RepQCtrAddrHigh32);
4625 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
4626 writel(0, &h->transtable->RepQAddr0High32);
960a30e7 4627 writel(CFGTBL_Trans_Performant | use_short_tags,
303932fd
DB
4628 &(h->cfgtable->HostWrite.TransportRequest));
4629 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3f4336f3 4630 hpsa_wait_for_mode_change_ack(h);
303932fd
DB
4631 register_value = readl(&(h->cfgtable->TransportActive));
4632 if (!(register_value & CFGTBL_Trans_Performant)) {
4633 dev_warn(&h->pdev->dev, "unable to get board into"
4634 " performant mode\n");
4635 return;
4636 }
960a30e7
SC
4637 /* Change the access methods to the performant access methods */
4638 h->access = SA5_performant_access;
4639 h->transMethod = CFGTBL_Trans_Performant;
6c311b57
SC
4640}
4641
4642static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
4643{
4644 u32 trans_support;
4645
02ec19c8
SC
4646 if (hpsa_simple_mode)
4647 return;
4648
6c311b57
SC
4649 trans_support = readl(&(h->cfgtable->TransportSupport));
4650 if (!(trans_support & PERFORMANT_MODE))
4651 return;
4652
cba3d38b 4653 hpsa_get_max_perf_mode_cmds(h);
6c311b57
SC
4654 /* Performant mode ring buffer and supporting data structures */
4655 h->reply_pool_size = h->max_commands * sizeof(u64);
4656 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
4657 &(h->reply_pool_dhandle));
4658
4659 /* Need a block fetch table for performant mode */
d66ae08b 4660 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
6c311b57
SC
4661 sizeof(u32)), GFP_KERNEL);
4662
4663 if ((h->reply_pool == NULL)
4664 || (h->blockFetchTable == NULL))
4665 goto clean_up;
4666
960a30e7
SC
4667 hpsa_enter_performant_mode(h,
4668 trans_support & CFGTBL_Trans_use_short_tags);
303932fd
DB
4669
4670 return;
4671
4672clean_up:
4673 if (h->reply_pool)
4674 pci_free_consistent(h->pdev, h->reply_pool_size,
4675 h->reply_pool, h->reply_pool_dhandle);
4676 kfree(h->blockFetchTable);
4677}
4678
edd16368
SC
4679/*
4680 * This is it. Register the PCI driver information for the cards we control
4681 * the OS will call our registered routines when it finds one of our cards.
4682 */
4683static int __init hpsa_init(void)
4684{
31468401 4685 return pci_register_driver(&hpsa_pci_driver);
edd16368
SC
4686}
4687
4688static void __exit hpsa_cleanup(void)
4689{
4690 pci_unregister_driver(&hpsa_pci_driver);
edd16368
SC
4691}
4692
4693module_init(hpsa_init);
4694module_exit(hpsa_cleanup);