struct dma_pool *prp_small_pool;
int instance;
int queue_count;
+ int db_stride;
u32 ctrl_config;
struct msix_entry *entry;
struct nvme_bar __iomem *bar;
}
struct nvme_prps {
- int npages;
+ int npages; /* 0 means small pool in use */
dma_addr_t first_dma;
__le64 *list[0];
};
if (status) {
bio_endio(bio, -EIO);
} else if (bio->bi_vcnt > bio->bi_idx) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
bio_list_add(&nvmeq->sq_cong, bio);
wake_up_process(nvme_thread);
} else {
int offset = offset_in_page(dma_addr);
__le64 *prp_list;
dma_addr_t prp_dma;
- int nprps, npages, i, prp_page;
+ int nprps, npages, i;
struct nvme_prps *prps = NULL;
cmd->prp1 = cpu_to_le64(dma_addr);
}
nprps = DIV_ROUND_UP(length, PAGE_SIZE);
- npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE);
+ npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, gfp);
if (!prps) {
cmd->prp2 = cpu_to_le64(dma_addr);
*len = (*len - length) + PAGE_SIZE;
return prps;
}
- prp_page = 0;
+
if (nprps <= (256 / 8)) {
pool = dev->prp_small_pool;
prps->npages = 0;
} else {
pool = dev->prp_page_pool;
- prps->npages = npages;
+ prps->npages = 1;
}
prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
kfree(prps);
return NULL;
}
- prps->list[prp_page++] = prp_list;
+ prps->list[0] = prp_list;
prps->first_dma = prp_dma;
cmd->prp2 = cpu_to_le64(prp_dma);
i = 0;
*len = (*len - length);
return prps;
}
- prps->list[prp_page++] = prp_list;
+ prps->list[prps->npages++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
i = 1;
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return IRQ_NONE;
- writel(head, nvmeq->q_db + 1);
+ writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
nvmeq->cq_head = head;
nvmeq->cq_phase = phase;
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}
+static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+ dma_addr_t dma_addr)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.prp1 = cpu_to_le64(dma_addr);
+ c.identify.cns = cpu_to_le32(cns);
+
+ return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
+ unsigned dword11, dma_addr_t dma_addr, u32 *result)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_get_features;
+ c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
+ return nvme_submit_admin_cmd(dev, &c, result);
+}
+
static void nvme_free_queue(struct nvme_dev *dev, int qid)
{
struct nvme_queue *nvmeq = dev->queues[qid];
init_waitqueue_head(&nvmeq->sq_full);
init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
bio_list_init(&nvmeq->sq_cong);
- nvmeq->q_db = &dev->dbs[qid * 2];
+ nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
nvmeq->q_depth = depth;
nvmeq->cq_vector = vector;
struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
if (!nvmeq)
- return NULL;
+ return ERR_PTR(-ENOMEM);
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
kfree(nvmeq);
- return NULL;
+ return ERR_PTR(result);
}
static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
cap = readq(&dev->bar->cap);
timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+ dev->db_stride = NVME_CAP_STRIDE(cap);
while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
msleep(100);
sg = kcalloc(count, sizeof(*sg), GFP_KERNEL);
sg_init_table(sg, count);
- sg_set_page(&sg[0], pages[0], PAGE_SIZE - offset, offset);
- length -= (PAGE_SIZE - offset);
- for (i = 1; i < count; i++) {
- sg_set_page(&sg[i], pages[i], min_t(int, length, PAGE_SIZE), 0);
- length -= PAGE_SIZE;
+ for (i = 0; i < count; i++) {
+ sg_set_page(&sg[i], pages[i],
+ min_t(int, length, PAGE_SIZE - offset), offset);
+ length -= (PAGE_SIZE - offset);
+ offset = 0;
}
err = -ENOMEM;
}
static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
- unsigned long addr, int length,
- struct scatterlist *sg, int nents)
+ unsigned long addr, int length, struct scatterlist *sg)
{
int i, count;
count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
- dma_unmap_sg(&dev->pci_dev->dev, sg, nents, DMA_FROM_DEVICE);
+ dma_unmap_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE);
for (i = 0; i < count; i++)
put_page(sg_page(&sg[i]));
}
-static int nvme_submit_user_admin_command(struct nvme_dev *dev,
- unsigned long addr, unsigned length,
- struct nvme_command *cmd)
-{
- int err, nents, tmplen = length;
- struct scatterlist *sg;
- struct nvme_prps *prps;
-
- nents = nvme_map_user_pages(dev, 0, addr, length, &sg);
- if (nents < 0)
- return nents;
- prps = nvme_setup_prps(dev, &cmd->common, sg, &tmplen, GFP_KERNEL);
- if (tmplen != length)
- err = -ENOMEM;
- else
- err = nvme_submit_admin_cmd(dev, cmd, NULL);
- nvme_unmap_user_pages(dev, 0, addr, length, sg, nents);
- nvme_free_prps(dev, prps);
- return err ? -EIO : 0;
-}
-
-static int nvme_identify(struct nvme_ns *ns, unsigned long addr, int cns)
-{
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
- c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id);
- c.identify.cns = cpu_to_le32(cns);
-
- return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
-}
-
-static int nvme_get_range_type(struct nvme_ns *ns, unsigned long addr)
-{
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
- c.features.opcode = nvme_admin_get_features;
- c.features.nsid = cpu_to_le32(ns->ns_id);
- c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
-
- return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
-}
-
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
struct nvme_dev *dev = ns->dev;
switch (io.opcode) {
case nvme_cmd_write:
case nvme_cmd_read:
+ case nvme_cmd_compare:
nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr,
length, &sg);
+ break;
default:
- return -EFAULT;
+ return -EINVAL;
}
if (nents < 0)
else
status = nvme_submit_sync_cmd(nvmeq, &c, NULL, IO_TIMEOUT);
- nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents);
+ nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg);
nvme_free_prps(dev, prps);
return status;
}
-static int nvme_download_firmware(struct nvme_ns *ns,
- struct nvme_dlfw __user *udlfw)
+static int nvme_user_admin_cmd(struct nvme_ns *ns,
+ struct nvme_admin_cmd __user *ucmd)
{
struct nvme_dev *dev = ns->dev;
- struct nvme_dlfw dlfw;
+ struct nvme_admin_cmd cmd;
struct nvme_command c;
- int nents, status, length;
+ int status, length, nents = 0;
struct scatterlist *sg;
- struct nvme_prps *prps;
+ struct nvme_prps *prps = NULL;
- if (copy_from_user(&dlfw, udlfw, sizeof(dlfw)))
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
- if (dlfw.length >= (1 << 30))
- return -EINVAL;
- length = dlfw.length * 4;
-
- nents = nvme_map_user_pages(dev, 1, dlfw.addr, length, &sg);
- if (nents < 0)
- return nents;
memset(&c, 0, sizeof(c));
- c.dlfw.opcode = nvme_admin_download_fw;
- c.dlfw.numd = cpu_to_le32(dlfw.length);
- c.dlfw.offset = cpu_to_le32(dlfw.offset);
- prps = nvme_setup_prps(dev, &c.common, sg, &length, GFP_KERNEL);
- if (length != dlfw.length * 4)
+ c.common.opcode = cmd.opcode;
+ c.common.flags = cmd.flags;
+ c.common.nsid = cpu_to_le32(cmd.nsid);
+ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+ c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
+ c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
+ c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
+ c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
+ c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
+ c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
+
+ length = cmd.data_len;
+ if (cmd.data_len) {
+ nents = nvme_map_user_pages(dev, 1, cmd.addr, length, &sg);
+ if (nents < 0)
+ return nents;
+ prps = nvme_setup_prps(dev, &c.common, sg, &length, GFP_KERNEL);
+ }
+
+ if (length != cmd.data_len)
status = -ENOMEM;
else
status = nvme_submit_admin_cmd(dev, &c, NULL);
- nvme_unmap_user_pages(dev, 0, dlfw.addr, dlfw.length * 4, sg, nents);
- nvme_free_prps(dev, prps);
+ if (cmd.data_len) {
+ nvme_unmap_user_pages(dev, 0, cmd.addr, cmd.data_len, sg);
+ nvme_free_prps(dev, prps);
+ }
return status;
}
-static int nvme_activate_firmware(struct nvme_ns *ns, unsigned long arg)
-{
- struct nvme_dev *dev = ns->dev;
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
- c.common.opcode = nvme_admin_activate_fw;
- c.common.rsvd10[0] = cpu_to_le32(arg);
-
- return nvme_submit_admin_cmd(dev, &c, NULL);
-}
-
static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
unsigned long arg)
{
struct nvme_ns *ns = bdev->bd_disk->private_data;
switch (cmd) {
- case NVME_IOCTL_IDENTIFY_NS:
- return nvme_identify(ns, arg, 0);
- case NVME_IOCTL_IDENTIFY_CTRL:
- return nvme_identify(ns, arg, 1);
- case NVME_IOCTL_GET_RANGE_TYPE:
- return nvme_get_range_type(ns, arg);
+ case NVME_IOCTL_ID:
+ return ns->ns_id;
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_admin_cmd(ns, (void __user *)arg);
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, (void __user *)arg);
- case NVME_IOCTL_DOWNLOAD_FW:
- return nvme_download_firmware(ns, (void __user *)arg);
- case NVME_IOCTL_ACTIVATE_FW:
- return nvme_activate_firmware(ns, arg);
default:
return -ENOTTY;
}
{
int status;
u32 result;
- struct nvme_command c;
u32 q_count = (count - 1) | ((count - 1) << 16);
- memset(&c, 0, sizeof(c));
- c.features.opcode = nvme_admin_get_features;
- c.features.fid = cpu_to_le32(NVME_FEAT_NUM_QUEUES);
- c.features.dword11 = cpu_to_le32(q_count);
-
- status = nvme_submit_admin_cmd(dev, &c, &result);
+ status = nvme_get_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
+ &result);
if (status)
return -EIO;
return min(result & 0xffff, result >> 16) + 1;
static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
{
- int result, cpu, i, nr_io_queues;
+ int result, cpu, i, nr_io_queues, db_bar_size;
nr_io_queues = num_online_cpus();
result = set_queue_count(dev, nr_io_queues);
/* Deregister the admin queue's interrupt */
free_irq(dev->entry[0].vector, dev->queues[0]);
+ db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+ if (db_bar_size > 8192) {
+ iounmap(dev->bar);
+ dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
+ db_bar_size);
+ dev->dbs = ((void __iomem *)dev->bar) + 4096;
+ dev->queues[0]->q_db = dev->dbs;
+ }
+
for (i = 0; i < nr_io_queues; i++)
dev->entry[i].entry = i;
for (;;) {
for (i = 0; i < nr_io_queues; i++) {
dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
NVME_Q_DEPTH, i);
- if (!dev->queues[i + 1])
- return -ENOMEM;
+ if (IS_ERR(dev->queues[i + 1]))
+ return PTR_ERR(dev->queues[i + 1]);
dev->queue_count++;
}
int res, nn, i;
struct nvme_ns *ns, *next;
struct nvme_id_ctrl *ctrl;
- void *id;
+ struct nvme_id_ns *id_ns;
+ void *mem;
dma_addr_t dma_addr;
- struct nvme_command cid, crt;
res = nvme_setup_io_queues(dev);
if (res)
return res;
- /* XXX: Switch to a SG list once prp2 works */
- id = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
GFP_KERNEL);
- memset(&cid, 0, sizeof(cid));
- cid.identify.opcode = nvme_admin_identify;
- cid.identify.nsid = 0;
- cid.identify.prp1 = cpu_to_le64(dma_addr);
- cid.identify.cns = cpu_to_le32(1);
-
- res = nvme_submit_admin_cmd(dev, &cid, NULL);
+ res = nvme_identify(dev, 0, 1, dma_addr);
if (res) {
res = -EIO;
goto out_free;
}
- ctrl = id;
+ ctrl = mem;
nn = le32_to_cpup(&ctrl->nn);
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
- cid.identify.cns = 0;
- memset(&crt, 0, sizeof(crt));
- crt.features.opcode = nvme_admin_get_features;
- crt.features.prp1 = cpu_to_le64(dma_addr + 4096);
- crt.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
-
- for (i = 0; i <= nn; i++) {
- cid.identify.nsid = cpu_to_le32(i);
- res = nvme_submit_admin_cmd(dev, &cid, NULL);
+ id_ns = mem;
+ for (i = 1; i <= nn; i++) {
+ res = nvme_identify(dev, i, 0, dma_addr);
if (res)
continue;
- if (((struct nvme_id_ns *)id)->ncap == 0)
+ if (id_ns->ncap == 0)
continue;
- crt.features.nsid = cpu_to_le32(i);
- res = nvme_submit_admin_cmd(dev, &crt, NULL);
+ res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
+ dma_addr + 4096, NULL);
if (res)
continue;
- ns = nvme_alloc_ns(dev, i, id, id + 4096);
+ ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
if (ns)
list_add_tail(&ns->list, &dev->namespaces);
}
list_for_each_entry(ns, &dev->namespaces, list)
add_disk(ns->disk);
- dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
- return 0;
+ goto out;
out_free:
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
nvme_ns_free(ns);
}
- dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
+ out:
+ dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
return res;
}
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.5");
+MODULE_VERSION("0.7");
module_init(nvme_init);
module_exit(nvme_exit);