5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/reboot.h>
31 #include <linux/slab.h>
32 #include <linux/bitops.h>
34 #include <linux/genhd.h>
35 #include <linux/idr.h>
37 #include "rsxx_priv.h"
42 MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver");
43 MODULE_AUTHOR("IBM <support@ramsan.com>");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRIVER_VERSION);
47 static unsigned int force_legacy = NO_LEGACY;
48 module_param(force_legacy, uint, 0444);
49 MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
51 static DEFINE_IDA(rsxx_disk_ida);
52 static DEFINE_SPINLOCK(rsxx_ida_lock);
54 /*----------------- Interrupt Control & Handling -------------------*/
55 static void __enable_intr(unsigned int *mask, unsigned int intr)
60 static void __disable_intr(unsigned int *mask, unsigned int intr)
66 * NOTE: Disabling the IER will disable the hardware interrupt.
67 * Disabling the ISR will disable the software handling of the ISR bit.
69 * Enable/Disable interrupt functions assume the card->irq_lock
70 * is held by the caller.
72 void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
74 if (unlikely(card->halt))
77 __enable_intr(&card->ier_mask, intr);
78 iowrite32(card->ier_mask, card->regmap + IER);
81 void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
83 __disable_intr(&card->ier_mask, intr);
84 iowrite32(card->ier_mask, card->regmap + IER);
87 void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
90 if (unlikely(card->halt))
93 __enable_intr(&card->isr_mask, intr);
94 __enable_intr(&card->ier_mask, intr);
95 iowrite32(card->ier_mask, card->regmap + IER);
97 void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
100 __disable_intr(&card->isr_mask, intr);
101 __disable_intr(&card->ier_mask, intr);
102 iowrite32(card->ier_mask, card->regmap + IER);
105 static irqreturn_t rsxx_isr(int irq, void *pdata)
107 struct rsxx_cardinfo *card = pdata;
113 spin_lock(&card->irq_lock);
118 isr = ioread32(card->regmap + ISR);
119 if (isr == 0xffffffff) {
121 * A few systems seem to have an intermittent issue
122 * where PCI reads return all Fs, but retrying the read
123 * a little later will return as expected.
125 dev_info(CARD_TO_DEV(card),
126 "ISR = 0xFFFFFFFF, retrying later\n");
130 isr &= card->isr_mask;
134 for (i = 0; i < card->n_targets; i++) {
135 if (isr & CR_INTR_DMA(i)) {
136 if (card->ier_mask & CR_INTR_DMA(i)) {
137 rsxx_disable_ier(card, CR_INTR_DMA(i));
140 queue_work(card->ctrl[i].done_wq,
141 &card->ctrl[i].dma_done_work);
146 if (isr & CR_INTR_CREG) {
147 schedule_work(&card->creg_ctrl.done_work);
151 if (isr & CR_INTR_EVENT) {
152 schedule_work(&card->event_work);
153 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
156 } while (reread_isr);
158 spin_unlock(&card->irq_lock);
160 return handled ? IRQ_HANDLED : IRQ_NONE;
163 /*----------------- Card Event Handler -------------------*/
164 static char *rsxx_card_state_to_str(unsigned int state)
166 static char *state_strings[] = {
167 "Unknown", "Shutdown", "Starting", "Formatting",
168 "Uninitialized", "Good", "Shutting Down",
169 "Fault", "Read Only Fault", "dStroying"
172 return state_strings[ffs(state)];
175 static void card_state_change(struct rsxx_cardinfo *card,
176 unsigned int new_state)
180 dev_info(CARD_TO_DEV(card),
181 "card state change detected.(%s -> %s)\n",
182 rsxx_card_state_to_str(card->state),
183 rsxx_card_state_to_str(new_state));
185 card->state = new_state;
187 /* Don't attach DMA interfaces if the card has an invalid config */
188 if (!card->config_valid)
192 case CARD_STATE_RD_ONLY_FAULT:
193 dev_crit(CARD_TO_DEV(card),
194 "Hardware has entered read-only mode!\n");
196 * Fall through so the DMA devices can be attached and
197 * the user can attempt to pull off their data.
199 case CARD_STATE_GOOD:
200 st = rsxx_get_card_size8(card, &card->size8);
202 dev_err(CARD_TO_DEV(card),
203 "Failed attaching DMA devices\n");
205 if (card->config_valid)
206 set_capacity(card->gendisk, card->size8 >> 9);
209 case CARD_STATE_FAULT:
210 dev_crit(CARD_TO_DEV(card),
211 "Hardware Fault reported!\n");
214 /* Everything else, detach DMA interface if it's attached. */
215 case CARD_STATE_SHUTDOWN:
216 case CARD_STATE_STARTING:
217 case CARD_STATE_FORMATTING:
218 case CARD_STATE_UNINITIALIZED:
219 case CARD_STATE_SHUTTING_DOWN:
221 * dStroy is a term coined by marketing to represent the low level
224 case CARD_STATE_DSTROYING:
225 set_capacity(card->gendisk, 0);
230 static void card_event_handler(struct work_struct *work)
232 struct rsxx_cardinfo *card;
237 card = container_of(work, struct rsxx_cardinfo, event_work);
239 if (unlikely(card->halt))
243 * Enable the interrupt now to avoid any weird race conditions where a
244 * state change might occur while rsxx_get_card_state() is
245 * processing a returned creg cmd.
247 spin_lock_irqsave(&card->irq_lock, flags);
248 rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
249 spin_unlock_irqrestore(&card->irq_lock, flags);
251 st = rsxx_get_card_state(card, &state);
253 dev_info(CARD_TO_DEV(card),
254 "Failed reading state after event.\n");
258 if (card->state != state)
259 card_state_change(card, state);
261 if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
262 rsxx_read_hw_log(card);
265 /*----------------- Card Operations -------------------*/
266 static int card_shutdown(struct rsxx_cardinfo *card)
270 const int timeout = msecs_to_jiffies(120000);
273 /* We can't issue a shutdown if the card is in a transition state */
276 st = rsxx_get_card_state(card, &state);
279 } while (state == CARD_STATE_STARTING &&
280 (jiffies - start < timeout));
282 if (state == CARD_STATE_STARTING)
285 /* Only issue a shutdown if we need to */
286 if ((state != CARD_STATE_SHUTTING_DOWN) &&
287 (state != CARD_STATE_SHUTDOWN)) {
288 st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
295 st = rsxx_get_card_state(card, &state);
298 } while (state != CARD_STATE_SHUTDOWN &&
299 (jiffies - start < timeout));
301 if (state != CARD_STATE_SHUTDOWN)
307 /*----------------- Driver Initialization & Setup -------------------*/
308 /* Returns: 0 if the driver is compatible with the device
309 -1 if the driver is NOT compatible with the device */
310 static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
312 unsigned char pci_rev;
314 pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
316 if (pci_rev > RS70_PCI_REV_SUPPORTED)
321 static int rsxx_pci_probe(struct pci_dev *dev,
322 const struct pci_device_id *id)
324 struct rsxx_cardinfo *card;
327 dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
329 card = kzalloc(sizeof(*card), GFP_KERNEL);
334 pci_set_drvdata(dev, card);
337 if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) {
342 spin_lock(&rsxx_ida_lock);
343 st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
344 spin_unlock(&rsxx_ida_lock);
345 } while (st == -EAGAIN);
350 st = pci_enable_device(dev);
355 pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
357 st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
359 dev_err(CARD_TO_DEV(card),
360 "No usable DMA configuration,aborting\n");
361 goto failed_dma_mask;
364 st = pci_request_regions(dev, DRIVER_NAME);
366 dev_err(CARD_TO_DEV(card),
367 "Failed to request memory region\n");
368 goto failed_request_regions;
371 if (pci_resource_len(dev, 0) == 0) {
372 dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
377 card->regmap = pci_iomap(dev, 0, 0);
379 dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
384 spin_lock_init(&card->irq_lock);
387 spin_lock_irq(&card->irq_lock);
388 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
389 spin_unlock_irq(&card->irq_lock);
392 st = pci_enable_msi(dev);
394 dev_warn(CARD_TO_DEV(card),
395 "Failed to enable MSI\n");
398 st = request_irq(dev->irq, rsxx_isr, IRQF_DISABLED | IRQF_SHARED,
401 dev_err(CARD_TO_DEV(card),
402 "Failed requesting IRQ%d\n", dev->irq);
406 /************* Setup Processor Command Interface *************/
407 rsxx_creg_setup(card);
409 spin_lock_irq(&card->irq_lock);
410 rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
411 spin_unlock_irq(&card->irq_lock);
413 st = rsxx_compatibility_check(card);
415 dev_warn(CARD_TO_DEV(card),
416 "Incompatible driver detected. Please update the driver.\n");
418 goto failed_compatiblity_check;
421 /************* Load Card Config *************/
422 st = rsxx_load_config(card);
424 dev_err(CARD_TO_DEV(card),
425 "Failed loading card config\n");
427 /************* Setup DMA Engine *************/
428 st = rsxx_get_num_targets(card, &card->n_targets);
430 dev_info(CARD_TO_DEV(card),
431 "Failed reading the number of DMA targets\n");
433 card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL);
436 goto failed_dma_setup;
439 st = rsxx_dma_setup(card);
441 dev_info(CARD_TO_DEV(card),
442 "Failed to setup DMA engine\n");
443 goto failed_dma_setup;
446 /************* Setup Card Event Handler *************/
447 INIT_WORK(&card->event_work, card_event_handler);
449 st = rsxx_setup_dev(card);
451 goto failed_create_dev;
453 rsxx_get_card_state(card, &card->state);
455 dev_info(CARD_TO_DEV(card),
457 rsxx_card_state_to_str(card->state));
460 * Now that the DMA Engine and devices have been setup,
461 * we can enable the event interrupt(it kicks off actions in
462 * those layers so we couldn't enable it right away.)
464 spin_lock_irq(&card->irq_lock);
465 rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
466 spin_unlock_irq(&card->irq_lock);
468 if (card->state == CARD_STATE_SHUTDOWN) {
469 st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
471 dev_crit(CARD_TO_DEV(card),
472 "Failed issuing card startup\n");
473 } else if (card->state == CARD_STATE_GOOD ||
474 card->state == CARD_STATE_RD_ONLY_FAULT) {
475 st = rsxx_get_card_size8(card, &card->size8);
480 rsxx_attach_dev(card);
485 rsxx_dma_destroy(card);
487 failed_compatiblity_check:
488 spin_lock_irq(&card->irq_lock);
489 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
490 spin_unlock_irq(&card->irq_lock);
491 free_irq(dev->irq, card);
493 pci_disable_msi(dev);
495 pci_iounmap(dev, card->regmap);
497 pci_release_regions(dev);
498 failed_request_regions:
500 pci_disable_device(dev);
502 spin_lock(&rsxx_ida_lock);
503 ida_remove(&rsxx_disk_ida, card->disk_id);
504 spin_unlock(&rsxx_ida_lock);
511 static void rsxx_pci_remove(struct pci_dev *dev)
513 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
521 dev_info(CARD_TO_DEV(card),
522 "Removing PCI-Flash SSD.\n");
524 rsxx_detach_dev(card);
526 for (i = 0; i < card->n_targets; i++) {
527 spin_lock_irqsave(&card->irq_lock, flags);
528 rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
529 spin_unlock_irqrestore(&card->irq_lock, flags);
532 st = card_shutdown(card);
534 dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
536 /* Sync outstanding event handlers. */
537 spin_lock_irqsave(&card->irq_lock, flags);
538 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
539 spin_unlock_irqrestore(&card->irq_lock, flags);
541 /* Prevent work_structs from re-queuing themselves. */
544 cancel_work_sync(&card->event_work);
546 rsxx_destroy_dev(card);
547 rsxx_dma_destroy(card);
549 spin_lock_irqsave(&card->irq_lock, flags);
550 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
551 spin_unlock_irqrestore(&card->irq_lock, flags);
552 free_irq(dev->irq, card);
555 pci_disable_msi(dev);
557 rsxx_creg_destroy(card);
559 pci_iounmap(dev, card->regmap);
561 pci_disable_device(dev);
562 pci_release_regions(dev);
567 static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
569 /* We don't support suspend at this time. */
573 static void rsxx_pci_shutdown(struct pci_dev *dev)
575 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
582 dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
584 rsxx_detach_dev(card);
586 for (i = 0; i < card->n_targets; i++) {
587 spin_lock_irqsave(&card->irq_lock, flags);
588 rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
589 spin_unlock_irqrestore(&card->irq_lock, flags);
595 static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
596 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)},
597 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)},
598 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
599 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
603 MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
605 static struct pci_driver rsxx_pci_driver = {
607 .id_table = rsxx_pci_ids,
608 .probe = rsxx_pci_probe,
609 .remove = rsxx_pci_remove,
610 .suspend = rsxx_pci_suspend,
611 .shutdown = rsxx_pci_shutdown,
614 static int __init rsxx_core_init(void)
618 st = rsxx_dev_init();
622 st = rsxx_dma_init();
624 goto dma_init_failed;
626 st = rsxx_creg_init();
628 goto creg_init_failed;
630 return pci_register_driver(&rsxx_pci_driver);
640 static void __exit rsxx_core_cleanup(void)
642 pci_unregister_driver(&rsxx_pci_driver);
648 module_init(rsxx_core_init);
649 module_exit(rsxx_core_cleanup);