NVMe: Check returns from nvme_alloc_queue()
[linux-2.6-block.git] / drivers / block / nvme.c
index 3d917a87ea93e88ae0561354761dfbdba0c25c94..dc821776be949d8b2b298ce265bc413c14133c54 100644 (file)
@@ -48,7 +48,6 @@ module_param(nvme_major, int, 0);
  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
  */
 struct nvme_dev {
-       struct list_head node;
        struct nvme_queue **queues;
        u32 __iomem *dbs;
        struct pci_dev *pci_dev;
@@ -58,6 +57,9 @@ struct nvme_dev {
        struct msix_entry *entry;
        struct nvme_bar __iomem *bar;
        struct list_head namespaces;
+       char serial[20];
+       char model[40];
+       char firmware_rev[8];
 };
 
 /*
@@ -240,6 +242,36 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
        bio_endio(bio, status ? -EIO : 0);
 }
 
+/* length is in bytes */
+static void nvme_setup_prps(struct nvme_common_command *cmd,
+                                       struct scatterlist *sg, int length)
+{
+       int dma_len = sg_dma_len(sg);
+       u64 dma_addr = sg_dma_address(sg);
+       int offset = offset_in_page(dma_addr);
+
+       cmd->prp1 = cpu_to_le64(dma_addr);
+       length -= (PAGE_SIZE - offset);
+       if (length <= 0)
+               return;
+
+       dma_len -= (PAGE_SIZE - offset);
+       if (dma_len) {
+               dma_addr += (PAGE_SIZE - offset);
+       } else {
+               sg = sg_next(sg);
+               dma_addr = sg_dma_address(sg);
+               dma_len = sg_dma_len(sg);
+       }
+
+       if (length <= PAGE_SIZE) {
+               cmd->prp2 = cpu_to_le64(dma_addr);
+               return;
+       }
+
+       /* XXX: support PRP lists */
+}
+
 static int nvme_map_bio(struct device *dev, struct nvme_req_info *info,
                struct bio *bio, enum dma_data_direction dma_dir, int psegs)
 {
@@ -261,7 +293,7 @@ static int nvme_map_bio(struct device *dev, struct nvme_req_info *info,
 static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
                                                                struct bio *bio)
 {
-       struct nvme_rw_command *cmnd;
+       struct nvme_command *cmnd;
        struct nvme_req_info *info;
        enum dma_data_direction dma_dir;
        int cmdid;
@@ -290,27 +322,27 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
                dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
 
        spin_lock_irqsave(&nvmeq->q_lock, flags);
-       cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail].rw;
+       cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
 
+       memset(cmnd, 0, sizeof(*cmnd));
        if (bio_data_dir(bio)) {
-               cmnd->opcode = nvme_cmd_write;
+               cmnd->rw.opcode = nvme_cmd_write;
                dma_dir = DMA_TO_DEVICE;
        } else {
-               cmnd->opcode = nvme_cmd_read;
+               cmnd->rw.opcode = nvme_cmd_read;
                dma_dir = DMA_FROM_DEVICE;
        }
 
        nvme_map_bio(nvmeq->q_dmadev, info, bio, dma_dir, psegs);
 
-       cmnd->flags = 1;
-       cmnd->command_id = cmdid;
-       cmnd->nsid = cpu_to_le32(ns->ns_id);
-       cmnd->prp1 = cpu_to_le64(sg_phys(info->sg));
-       /* XXX: Support more than one PRP */
-       cmnd->slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
-       cmnd->length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
-       cmnd->control = cpu_to_le16(control);
-       cmnd->dsmgmt = cpu_to_le32(dsmgmt);
+       cmnd->rw.flags = 1;
+       cmnd->rw.command_id = cmdid;
+       cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+       nvme_setup_prps(&cmnd->common, info->sg, bio->bi_size);
+       cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+       cmnd->rw.length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
+       cmnd->rw.control = cpu_to_le16(control);
+       cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
 
        writel(nvmeq->sq_tail, nvmeq->q_db);
        if (++nvmeq->sq_tail == nvmeq->q_depth)
@@ -587,6 +619,9 @@ static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
        int result;
        struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
 
+       if (!nvmeq)
+               return NULL;
+
        result = adapter_alloc_cq(dev, qid, nvmeq);
        if (result < 0)
                goto free_nvmeq;
@@ -623,6 +658,8 @@ static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
        dev->dbs = ((void __iomem *)dev->bar) + 4096;
 
        nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+       if (!nvmeq)
+               return -ENOMEM;
 
        aqa = nvmeq->q_depth - 1;
        aqa |= aqa << 16;
@@ -647,62 +684,147 @@ static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
        return result;
 }
 
-static int nvme_identify(struct nvme_ns *ns, void __user *addr, int cns)
+static int nvme_map_user_pages(struct nvme_dev *dev, int write,
+                               unsigned long addr, unsigned length,
+                               struct scatterlist **sgp)
 {
-       struct nvme_dev *dev = ns->dev;
-       int status;
-       struct nvme_command c;
-       void *page;
-       dma_addr_t dma_addr;
+       int i, err, count, nents, offset;
+       struct scatterlist *sg;
+       struct page **pages;
+
+       if (addr & 3)
+               return -EINVAL;
+       if (!length)
+               return -EINVAL;
+
+       offset = offset_in_page(addr);
+       count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
+       pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
+
+       err = get_user_pages_fast(addr, count, 1, pages);
+       if (err < count) {
+               count = err;
+               err = -EFAULT;
+               goto put_pages;
+       }
 
-       page = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
-                                                               GFP_KERNEL);
+       sg = kcalloc(count, sizeof(*sg), GFP_KERNEL);
+       sg_init_table(sg, count);
+       sg_set_page(&sg[0], pages[0], PAGE_SIZE - offset, offset);
+       length -= (PAGE_SIZE - offset);
+       for (i = 1; i < count; i++) {
+               sg_set_page(&sg[i], pages[i], min_t(int, length, PAGE_SIZE), 0);
+               length -= PAGE_SIZE;
+       }
 
-       memset(&c, 0, sizeof(c));
-       c.identify.opcode = nvme_admin_identify;
-       c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id);
-       c.identify.prp1 = cpu_to_le64(dma_addr);
-       c.identify.cns = cpu_to_le32(cns);
+       err = -ENOMEM;
+       nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
+                               write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+       if (!nents)
+               goto put_pages;
+
+       kfree(pages);
+       *sgp = sg;
+       return nents;
+
+ put_pages:
+       for (i = 0; i < count; i++)
+               put_page(pages[i]);
+       kfree(pages);
+       return err;
+}
 
-       status = nvme_submit_admin_cmd(dev, &c, NULL);
+static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+                               unsigned long addr, int length,
+                               struct scatterlist *sg, int nents)
+{
+       int i, count;
 
-       if (status)
-               status = -EIO;
-       else if (copy_to_user(addr, page, 4096))
-               status = -EFAULT;
+       count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
+       dma_unmap_sg(&dev->pci_dev->dev, sg, nents, DMA_FROM_DEVICE);
 
-       dma_free_coherent(&dev->pci_dev->dev, 4096, page, dma_addr);
+       for (i = 0; i < count; i++)
+               put_page(sg_page(&sg[i]));
+}
 
-       return status;
+static int nvme_submit_user_admin_command(struct nvme_dev *dev,
+                                       unsigned long addr, unsigned length,
+                                       struct nvme_command *cmd)
+{
+       int err, nents;
+       struct scatterlist *sg;
+
+       nents = nvme_map_user_pages(dev, 0, addr, length, &sg);
+       if (nents < 0)
+               return nents;
+       nvme_setup_prps(&cmd->common, sg, length);
+       err = nvme_submit_admin_cmd(dev, cmd, NULL);
+       nvme_unmap_user_pages(dev, 0, addr, length, sg, nents);
+       return err ? -EIO : 0;
 }
 
-static int nvme_get_range_type(struct nvme_ns *ns, void __user *addr)
+static int nvme_identify(struct nvme_ns *ns, unsigned long addr, int cns)
 {
-       struct nvme_dev *dev = ns->dev;
-       int status;
        struct nvme_command c;
-       void *page;
-       dma_addr_t dma_addr;
 
-       page = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
-                                                               GFP_KERNEL);
+       memset(&c, 0, sizeof(c));
+       c.identify.opcode = nvme_admin_identify;
+       c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id);
+       c.identify.cns = cpu_to_le32(cns);
+
+       return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
+}
+
+static int nvme_get_range_type(struct nvme_ns *ns, unsigned long addr)
+{
+       struct nvme_command c;
 
        memset(&c, 0, sizeof(c));
        c.features.opcode = nvme_admin_get_features;
        c.features.nsid = cpu_to_le32(ns->ns_id);
-       c.features.prp1 = cpu_to_le64(dma_addr);
        c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
 
-       status = nvme_submit_admin_cmd(dev, &c, NULL);
+       return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
+}
 
-       /* XXX: Assuming first range for now */
-       if (status)
-               status = -EIO;
-       else if (copy_to_user(addr, page, 64))
-               status = -EFAULT;
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+       struct nvme_dev *dev = ns->dev;
+       struct nvme_queue *nvmeq;
+       struct nvme_user_io io;
+       struct nvme_command c;
+       unsigned length;
+       u32 result;
+       int nents, status;
+       struct scatterlist *sg;
 
-       dma_free_coherent(&dev->pci_dev->dev, 4096, page, dma_addr);
+       if (copy_from_user(&io, uio, sizeof(io)))
+               return -EFAULT;
+       length = io.nblocks << io.block_shift;
+       nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length, &sg);
+       if (nents < 0)
+               return nents;
+
+       memset(&c, 0, sizeof(c));
+       c.rw.opcode = io.opcode;
+       c.rw.flags = io.flags;
+       c.rw.nsid = cpu_to_le32(io.nsid);
+       c.rw.slba = cpu_to_le64(io.slba);
+       c.rw.length = cpu_to_le16(io.nblocks - 1);
+       c.rw.control = cpu_to_le16(io.control);
+       c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
+       c.rw.reftag = cpu_to_le32(io.reftag);   /* XXX: endian? */
+       c.rw.apptag = cpu_to_le16(io.apptag);
+       c.rw.appmask = cpu_to_le16(io.appmask);
+       /* XXX: metadata */
+       nvme_setup_prps(&c.common, sg, length);
+
+       nvmeq = get_nvmeq(ns);
+       status = nvme_submit_sync_cmd(nvmeq, &c, &result);
+       put_nvmeq(nvmeq);
 
+       nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents);
+       put_user(result, &uio->result);
        return status;
 }
 
@@ -713,11 +835,13 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
 
        switch (cmd) {
        case NVME_IOCTL_IDENTIFY_NS:
-               return nvme_identify(ns, (void __user *)arg, 0);
+               return nvme_identify(ns, arg, 0);
        case NVME_IOCTL_IDENTIFY_CTRL:
-               return nvme_identify(ns, (void __user *)arg, 1);
+               return nvme_identify(ns, arg, 1);
        case NVME_IOCTL_GET_RANGE_TYPE:
-               return nvme_get_range_type(ns, (void __user *)arg);
+               return nvme_get_range_type(ns, arg);
+       case NVME_IOCTL_SUBMIT_IO:
+               return nvme_submit_io(ns, (void __user *)arg);
        default:
                return -ENOTTY;
        }
@@ -862,6 +986,7 @@ static int __devinit nvme_dev_add(struct nvme_dev *dev)
 {
        int res, nn, i;
        struct nvme_ns *ns, *next;
+       struct nvme_id_ctrl *ctrl;
        void *id;
        dma_addr_t dma_addr;
        struct nvme_command cid, crt;
@@ -886,7 +1011,11 @@ static int __devinit nvme_dev_add(struct nvme_dev *dev)
                goto out_free;
        }
 
-       nn = le32_to_cpup(&((struct nvme_id_ctrl *)id)->nn);
+       ctrl = id;
+       nn = le32_to_cpup(&ctrl->nn);
+       memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
+       memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
+       memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
 
        cid.identify.cns = 0;
        memset(&crt, 0, sizeof(crt));
@@ -979,6 +1108,7 @@ static int __devinit nvme_probe(struct pci_dev *pdev,
        pci_set_drvdata(pdev, dev);
        dma_set_mask(&dev->pci_dev->dev, DMA_BIT_MASK(64));
        nvme_set_instance(dev);
+       dev->entry[0].vector = pdev->irq;
 
        dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
        if (!dev->bar) {