#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
+#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
static int nvme_major;
module_param(nvme_major, int, 0);
+static int use_threaded_interrupts;
+module_param(use_threaded_interrupts, int, 0);
+
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
struct nvme_dev {
- struct list_head node;
struct nvme_queue **queues;
u32 __iomem *dbs;
struct pci_dev *pci_dev;
struct msix_entry *entry;
struct nvme_bar __iomem *bar;
struct list_head namespaces;
+ char serial[20];
+ char model[40];
+ char firmware_rev[8];
};
/*
u16 sq_head;
u16 sq_tail;
u16 cq_head;
- u16 cq_cycle;
+ u16 cq_phase;
unsigned long cmdid_data[];
};
}
/* If you need more than four handlers, you'll need to change how
- * alloc_cmdid and nvme_process_cq work
+ * alloc_cmdid and nvme_process_cq work. Consider using a special
+ * CMD_CTX value instead, if that works for your situation.
*/
enum {
sync_completion_id = 0,
bio_completion_id,
};
+#define CMD_CTX_BASE (POISON_POINTER_DELTA + sync_completion_id)
+#define CMD_CTX_CANCELLED (0x2008 + CMD_CTX_BASE)
+
static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
{
unsigned long data;
return data;
}
+static void cancel_cmdid_data(struct nvme_queue *nvmeq, int cmdid)
+{
+ nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(nvmeq->q_depth)] =
+ CMD_CTX_CANCELLED;
+}
+
static struct nvme_queue *get_nvmeq(struct nvme_ns *ns)
{
- return ns->dev->queues[1];
+ int qid, cpu = get_cpu();
+ if (cpu < ns->dev->queue_count)
+ qid = cpu + 1;
+ else
+ qid = (cpu % rounddown_pow_of_two(ns->dev->queue_count)) + 1;
+ return ns->dev->queues[qid];
}
static void put_nvmeq(struct nvme_queue *nvmeq)
{
+ put_cpu();
}
/**
bio_endio(bio, status ? -EIO : 0);
}
+/* length is in bytes */
+static void nvme_setup_prps(struct nvme_common_command *cmd,
+ struct scatterlist *sg, int length)
+{
+ int dma_len = sg_dma_len(sg);
+ u64 dma_addr = sg_dma_address(sg);
+ int offset = offset_in_page(dma_addr);
+
+ cmd->prp1 = cpu_to_le64(dma_addr);
+ length -= (PAGE_SIZE - offset);
+ if (length <= 0)
+ return;
+
+ dma_len -= (PAGE_SIZE - offset);
+ if (dma_len) {
+ dma_addr += (PAGE_SIZE - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= PAGE_SIZE) {
+ cmd->prp2 = cpu_to_le64(dma_addr);
+ return;
+ }
+
+ /* XXX: support PRP lists */
+}
+
static int nvme_map_bio(struct device *dev, struct nvme_req_info *info,
struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
struct bio *bio)
{
- struct nvme_rw_command *cmnd;
+ struct nvme_command *cmnd;
struct nvme_req_info *info;
enum dma_data_direction dma_dir;
int cmdid;
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
spin_lock_irqsave(&nvmeq->q_lock, flags);
- cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail].rw;
+ cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+ memset(cmnd, 0, sizeof(*cmnd));
if (bio_data_dir(bio)) {
- cmnd->opcode = nvme_cmd_write;
+ cmnd->rw.opcode = nvme_cmd_write;
dma_dir = DMA_TO_DEVICE;
} else {
- cmnd->opcode = nvme_cmd_read;
+ cmnd->rw.opcode = nvme_cmd_read;
dma_dir = DMA_FROM_DEVICE;
}
nvme_map_bio(nvmeq->q_dmadev, info, bio, dma_dir, psegs);
- cmnd->flags = 1;
- cmnd->command_id = cmdid;
- cmnd->nsid = cpu_to_le32(ns->ns_id);
- cmnd->prp1 = cpu_to_le64(sg_phys(info->sg));
- /* XXX: Support more than one PRP */
- cmnd->slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
- cmnd->length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
- cmnd->control = cpu_to_le16(control);
- cmnd->dsmgmt = cpu_to_le32(dsmgmt);
+ cmnd->rw.flags = 1;
+ cmnd->rw.command_id = cmdid;
+ cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+ nvme_setup_prps(&cmnd->common, info->sg, bio->bi_size);
+ cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+ cmnd->rw.length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
+ cmnd->rw.control = cpu_to_le16(control);
+ cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
writel(nvmeq->sq_tail, nvmeq->q_db);
if (++nvmeq->sq_tail == nvmeq->q_depth)
struct nvme_completion *cqe)
{
struct sync_cmd_info *cmdinfo = ctx;
+ if ((unsigned long)cmdinfo == CMD_CTX_CANCELLED)
+ return;
cmdinfo->result = le32_to_cpup(&cqe->result);
cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
wake_up_process(cmdinfo->task);
static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
{
- u16 head, cycle;
+ u16 head, phase;
static const completion_fn completions[4] = {
[sync_completion_id] = sync_completion,
};
head = nvmeq->cq_head;
- cycle = nvmeq->cq_cycle;
+ phase = nvmeq->cq_phase;
for (;;) {
unsigned long data;
void *ptr;
unsigned char handler;
struct nvme_completion cqe = nvmeq->cqes[head];
- if ((le16_to_cpu(cqe.status) & 1) != cycle)
+ if ((le16_to_cpu(cqe.status) & 1) != phase)
break;
nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
if (++head == nvmeq->q_depth) {
head = 0;
- cycle = !cycle;
+ phase = !phase;
}
data = free_cmdid(nvmeq, cqe.command_id);
* requires that 0.1% of your interrupts are handled, so this isn't
* a big problem.
*/
- if (head == nvmeq->cq_head && cycle == nvmeq->cq_cycle)
+ if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return IRQ_NONE;
writel(head, nvmeq->q_db + 1);
nvmeq->cq_head = head;
- nvmeq->cq_cycle = cycle;
+ nvmeq->cq_phase = phase;
return IRQ_HANDLED;
}
return nvme_process_cq(data);
}
+static irqreturn_t nvme_irq_thread(int irq, void *data)
+{
+ irqreturn_t result;
+ struct nvme_queue *nvmeq = data;
+ spin_lock(&nvmeq->q_lock);
+ result = nvme_process_cq(nvmeq);
+ spin_unlock(&nvmeq->q_lock);
+ return result;
+}
+
+static irqreturn_t nvme_irq_check(int irq, void *data)
+{
+ struct nvme_queue *nvmeq = data;
+ struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
+ if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
+ return IRQ_NONE;
+ return IRQ_WAKE_THREAD;
+}
+
+static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
+{
+ spin_lock_irq(&nvmeq->q_lock);
+ cancel_cmdid_data(nvmeq, cmdid);
+ spin_unlock_irq(&nvmeq->q_lock);
+}
+
/*
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
-static int nvme_submit_sync_cmd(struct nvme_queue *q, struct nvme_command *cmd,
- u32 *result)
+static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd, u32 *result)
{
int cmdid;
struct sync_cmd_info cmdinfo;
cmdinfo.task = current;
cmdinfo.status = -EINTR;
- cmdid = alloc_cmdid_killable(q, &cmdinfo, sync_completion_id);
+ cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id);
if (cmdid < 0)
return cmdid;
cmd->common.command_id = cmdid;
- set_current_state(TASK_UNINTERRUPTIBLE);
- nvme_submit_cmd(q, cmd);
+ set_current_state(TASK_KILLABLE);
+ nvme_submit_cmd(nvmeq, cmd);
schedule();
+ if (cmdinfo.status == -EINTR) {
+ nvme_abort_command(nvmeq, cmdid);
+ return -EINTR;
+ }
+
if (result)
*result = cmdinfo.result;
nvmeq->q_dmadev = dmadev;
spin_lock_init(&nvmeq->q_lock);
nvmeq->cq_head = 0;
- nvmeq->cq_cycle = 1;
+ nvmeq->cq_phase = 1;
init_waitqueue_head(&nvmeq->sq_full);
bio_list_init(&nvmeq->sq_cong);
nvmeq->q_db = &dev->dbs[qid * 2];
static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
const char *name)
{
+ if (use_threaded_interrupts)
+ return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
+ nvme_irq_check, nvme_irq_thread,
+ IRQF_DISABLED | IRQF_SHARED,
+ name, nvmeq);
return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
}
int result;
struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
+ if (!nvmeq)
+ return NULL;
+
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
goto free_nvmeq;
dev->dbs = ((void __iomem *)dev->bar) + 4096;
nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+ if (!nvmeq)
+ return -ENOMEM;
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
+ writel(0, &dev->bar->cc);
writel(aqa, &dev->bar->aqa);
writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
return result;
}
-static int nvme_identify(struct nvme_ns *ns, void __user *addr, int cns)
+static int nvme_map_user_pages(struct nvme_dev *dev, int write,
+ unsigned long addr, unsigned length,
+ struct scatterlist **sgp)
{
- struct nvme_dev *dev = ns->dev;
- int status;
- struct nvme_command c;
- void *page;
- dma_addr_t dma_addr;
+ int i, err, count, nents, offset;
+ struct scatterlist *sg;
+ struct page **pages;
+
+ if (addr & 3)
+ return -EINVAL;
+ if (!length)
+ return -EINVAL;
+
+ offset = offset_in_page(addr);
+ count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
+ pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
+
+ err = get_user_pages_fast(addr, count, 1, pages);
+ if (err < count) {
+ count = err;
+ err = -EFAULT;
+ goto put_pages;
+ }
- page = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
- GFP_KERNEL);
+ sg = kcalloc(count, sizeof(*sg), GFP_KERNEL);
+ sg_init_table(sg, count);
+ sg_set_page(&sg[0], pages[0], PAGE_SIZE - offset, offset);
+ length -= (PAGE_SIZE - offset);
+ for (i = 1; i < count; i++) {
+ sg_set_page(&sg[i], pages[i], min_t(int, length, PAGE_SIZE), 0);
+ length -= PAGE_SIZE;
+ }
+
+ err = -ENOMEM;
+ nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!nents)
+ goto put_pages;
+
+ kfree(pages);
+ *sgp = sg;
+ return nents;
+
+ put_pages:
+ for (i = 0; i < count; i++)
+ put_page(pages[i]);
+ kfree(pages);
+ return err;
+}
+
+static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+ unsigned long addr, int length,
+ struct scatterlist *sg, int nents)
+{
+ int i, count;
+
+ count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
+ dma_unmap_sg(&dev->pci_dev->dev, sg, nents, DMA_FROM_DEVICE);
+
+ for (i = 0; i < count; i++)
+ put_page(sg_page(&sg[i]));
+}
+
+static int nvme_submit_user_admin_command(struct nvme_dev *dev,
+ unsigned long addr, unsigned length,
+ struct nvme_command *cmd)
+{
+ int err, nents;
+ struct scatterlist *sg;
+
+ nents = nvme_map_user_pages(dev, 0, addr, length, &sg);
+ if (nents < 0)
+ return nents;
+ nvme_setup_prps(&cmd->common, sg, length);
+ err = nvme_submit_admin_cmd(dev, cmd, NULL);
+ nvme_unmap_user_pages(dev, 0, addr, length, sg, nents);
+ return err ? -EIO : 0;
+}
+
+static int nvme_identify(struct nvme_ns *ns, unsigned long addr, int cns)
+{
+ struct nvme_command c;
memset(&c, 0, sizeof(c));
c.identify.opcode = nvme_admin_identify;
c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id);
- c.identify.prp1 = cpu_to_le64(dma_addr);
c.identify.cns = cpu_to_le32(cns);
- status = nvme_submit_admin_cmd(dev, &c, NULL);
+ return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
+}
- if (status)
- status = -EIO;
- else if (copy_to_user(addr, page, 4096))
- status = -EFAULT;
+static int nvme_get_range_type(struct nvme_ns *ns, unsigned long addr)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_get_features;
+ c.features.nsid = cpu_to_le32(ns->ns_id);
+ c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
+
+ return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
+}
+
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_queue *nvmeq;
+ struct nvme_user_io io;
+ struct nvme_command c;
+ unsigned length;
+ u32 result;
+ int nents, status;
+ struct scatterlist *sg;
- dma_free_coherent(&dev->pci_dev->dev, 4096, page, dma_addr);
+ if (copy_from_user(&io, uio, sizeof(io)))
+ return -EFAULT;
+ length = io.nblocks << io.block_shift;
+ nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length, &sg);
+ if (nents < 0)
+ return nents;
+ memset(&c, 0, sizeof(c));
+ c.rw.opcode = io.opcode;
+ c.rw.flags = io.flags;
+ c.rw.nsid = cpu_to_le32(io.nsid);
+ c.rw.slba = cpu_to_le64(io.slba);
+ c.rw.length = cpu_to_le16(io.nblocks - 1);
+ c.rw.control = cpu_to_le16(io.control);
+ c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
+ c.rw.reftag = cpu_to_le32(io.reftag); /* XXX: endian? */
+ c.rw.apptag = cpu_to_le16(io.apptag);
+ c.rw.appmask = cpu_to_le16(io.appmask);
+ /* XXX: metadata */
+ nvme_setup_prps(&c.common, sg, length);
+
+ nvmeq = get_nvmeq(ns);
+ /* Since nvme_submit_sync_cmd sleeps, we can't keep preemption
+ * disabled. We may be preempted at any point, and be rescheduled
+ * to a different CPU. That will cause cacheline bouncing, but no
+ * additional races since q_lock already protects against other CPUs.
+ */
+ put_nvmeq(nvmeq);
+ status = nvme_submit_sync_cmd(nvmeq, &c, &result);
+
+ nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents);
+ put_user(result, &uio->result);
return status;
}
-static int nvme_get_range_type(struct nvme_ns *ns, void __user *addr)
+static int nvme_download_firmware(struct nvme_ns *ns,
+ struct nvme_dlfw __user *udlfw)
{
struct nvme_dev *dev = ns->dev;
- int status;
+ struct nvme_dlfw dlfw;
struct nvme_command c;
- void *page;
- dma_addr_t dma_addr;
+ int nents, status;
+ struct scatterlist *sg;
- page = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
- GFP_KERNEL);
+ if (copy_from_user(&dlfw, udlfw, sizeof(dlfw)))
+ return -EFAULT;
+ if (dlfw.length >= (1 << 30))
+ return -EINVAL;
+
+ nents = nvme_map_user_pages(dev, 1, dlfw.addr, dlfw.length * 4, &sg);
+ if (nents < 0)
+ return nents;
memset(&c, 0, sizeof(c));
- c.features.opcode = nvme_admin_get_features;
- c.features.nsid = cpu_to_le32(ns->ns_id);
- c.features.prp1 = cpu_to_le64(dma_addr);
- c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
+ c.dlfw.opcode = nvme_admin_download_fw;
+ c.dlfw.numd = cpu_to_le32(dlfw.length);
+ c.dlfw.offset = cpu_to_le32(dlfw.offset);
+ nvme_setup_prps(&c.common, sg, dlfw.length * 4);
status = nvme_submit_admin_cmd(dev, &c, NULL);
+ nvme_unmap_user_pages(dev, 0, dlfw.addr, dlfw.length * 4, sg, nents);
+ return status;
+}
- /* XXX: Assuming first range for now */
- if (status)
- status = -EIO;
- else if (copy_to_user(addr, page, 64))
- status = -EFAULT;
+static int nvme_activate_firmware(struct nvme_ns *ns, unsigned long arg)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_command c;
- dma_free_coherent(&dev->pci_dev->dev, 4096, page, dma_addr);
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_admin_activate_fw;
+ c.common.rsvd10[0] = cpu_to_le32(arg);
- return status;
+ return nvme_submit_admin_cmd(dev, &c, NULL);
}
static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
switch (cmd) {
case NVME_IOCTL_IDENTIFY_NS:
- return nvme_identify(ns, (void __user *)arg, 0);
+ return nvme_identify(ns, arg, 0);
case NVME_IOCTL_IDENTIFY_CTRL:
- return nvme_identify(ns, (void __user *)arg, 1);
+ return nvme_identify(ns, arg, 1);
case NVME_IOCTL_GET_RANGE_TYPE:
- return nvme_get_range_type(ns, (void __user *)arg);
+ return nvme_get_range_type(ns, arg);
+ case NVME_IOCTL_SUBMIT_IO:
+ return nvme_submit_io(ns, (void __user *)arg);
+ case NVME_IOCTL_DOWNLOAD_FW:
+ return nvme_download_firmware(ns, (void __user *)arg);
+ case NVME_IOCTL_ACTIVATE_FW:
+ return nvme_activate_firmware(ns, arg);
default:
return -ENOTTY;
}
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
+ disk->driverfs_dev = &dev->pci_dev->dev;
sprintf(disk->disk_name, "nvme%dn%d", dev->instance, index);
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
return min(result & 0xffff, result >> 16) + 1;
}
-/* XXX: Create per-CPU queues */
static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
{
- int this_cpu;
+ int result, cpu, i, nr_queues;
- set_queue_count(dev, 1);
+ nr_queues = num_online_cpus();
+ result = set_queue_count(dev, nr_queues);
+ if (result < 0)
+ return result;
+ if (result < nr_queues)
+ nr_queues = result;
- this_cpu = get_cpu();
- dev->queues[1] = nvme_create_queue(dev, 1, NVME_Q_DEPTH, this_cpu);
- put_cpu();
- if (!dev->queues[1])
- return -ENOMEM;
- dev->queue_count++;
+ /* Deregister the admin queue's interrupt */
+ free_irq(dev->entry[0].vector, dev->queues[0]);
+
+ for (i = 0; i < nr_queues; i++)
+ dev->entry[i].entry = i;
+ for (;;) {
+ result = pci_enable_msix(dev->pci_dev, dev->entry, nr_queues);
+ if (result == 0) {
+ break;
+ } else if (result > 0) {
+ nr_queues = result;
+ continue;
+ } else {
+ nr_queues = 1;
+ break;
+ }
+ }
+
+ result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+ /* XXX: handle failure here */
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < nr_queues; i++) {
+ irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ for (i = 0; i < nr_queues; i++) {
+ dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
+ NVME_Q_DEPTH, i);
+ if (!dev->queues[i + 1])
+ return -ENOMEM;
+ dev->queue_count++;
+ }
return 0;
}
{
int res, nn, i;
struct nvme_ns *ns, *next;
+ struct nvme_id_ctrl *ctrl;
void *id;
dma_addr_t dma_addr;
struct nvme_command cid, crt;
goto out_free;
}
- nn = le32_to_cpup(&((struct nvme_id_ctrl *)id)->nn);
+ ctrl = id;
+ nn = le32_to_cpup(&ctrl->nn);
+ memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
+ memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
+ memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
cid.identify.cns = 0;
memset(&crt, 0, sizeof(crt));
static int __devinit nvme_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int result = -ENOMEM;
+ int bars, result = -ENOMEM;
struct nvme_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
GFP_KERNEL);
if (!dev->entry)
goto free;
- dev->queues = kcalloc(2, sizeof(void *), GFP_KERNEL);
+ dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
+ GFP_KERNEL);
if (!dev->queues)
goto free;
+ if (pci_enable_device_mem(pdev))
+ goto free;
+ pci_set_master(pdev);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_request_selected_regions(pdev, bars, "nvme"))
+ goto disable;
+
INIT_LIST_HEAD(&dev->namespaces);
dev->pci_dev = pdev;
pci_set_drvdata(pdev, dev);
- dma_set_mask(&dev->pci_dev->dev, DMA_BIT_MASK(64));
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
nvme_set_instance(dev);
+ dev->entry[0].vector = pdev->irq;
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
if (!dev->bar) {
result = -ENOMEM;
- goto disable;
+ goto disable_msix;
}
result = nvme_configure_admin_queue(dev);
nvme_free_queues(dev);
unmap:
iounmap(dev->bar);
- disable:
+ disable_msix:
pci_disable_msix(pdev);
nvme_release_instance(dev);
+ disable:
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
free:
kfree(dev->queues);
kfree(dev->entry);
pci_disable_msix(pdev);
iounmap(dev->bar);
nvme_release_instance(dev);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.1");
+MODULE_VERSION("0.2");
module_init(nvme_init);
module_exit(nvme_exit);