#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
+#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
+#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
#define NVME_MINORS 64
+#define IO_TIMEOUT (5 * HZ)
+#define ADMIN_TIMEOUT (60 * HZ)
static int nvme_major;
module_param(nvme_major, int, 0);
+static int use_threaded_interrupts;
+module_param(use_threaded_interrupts, int, 0);
+
+static DEFINE_SPINLOCK(dev_list_lock);
+static LIST_HEAD(dev_list);
+static struct task_struct *nvme_thread;
+
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
struct nvme_queue **queues;
u32 __iomem *dbs;
struct pci_dev *pci_dev;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool;
int instance;
int queue_count;
u32 ctrl_config;
struct msix_entry *entry;
struct nvme_bar __iomem *bar;
struct list_head namespaces;
+ char serial[20];
+ char model[40];
+ char firmware_rev[8];
};
/*
*/
struct nvme_queue {
struct device *q_dmadev;
+ struct nvme_dev *dev;
spinlock_t q_lock;
struct nvme_command *sq_cmds;
volatile struct nvme_completion *cqes;
dma_addr_t sq_dma_addr;
dma_addr_t cq_dma_addr;
wait_queue_head_t sq_full;
+ wait_queue_t sq_cong_wait;
struct bio_list sq_cong;
u32 __iomem *q_db;
u16 q_depth;
u16 sq_head;
u16 sq_tail;
u16 cq_head;
- u16 cq_cycle;
+ u16 cq_phase;
unsigned long cmdid_data[];
};
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
}
+struct nvme_cmd_info {
+ unsigned long ctx;
+ unsigned long timeout;
+};
+
+static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
+{
+ return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
+}
+
/**
* alloc_cmdid - Allocate a Command ID
* @param nvmeq The queue that will be used for this command
* Passing in a pointer that's not 4-byte aligned will cause a BUG.
* We can change this if it becomes a problem.
*/
-static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler)
+static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler,
+ unsigned timeout)
{
int depth = nvmeq->q_depth;
- unsigned long data = (unsigned long)ctx | handler;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
int cmdid;
BUG_ON((unsigned long)ctx & 3);
return -EBUSY;
} while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
- nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(depth)] = data;
+ info[cmdid].ctx = (unsigned long)ctx | handler;
+ info[cmdid].timeout = jiffies + timeout;
return cmdid;
}
static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
- int handler)
+ int handler, unsigned timeout)
{
int cmdid;
wait_event_killable(nvmeq->sq_full,
- (cmdid = alloc_cmdid(nvmeq, ctx, handler)) >= 0);
+ (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
return (cmdid < 0) ? -EINTR : cmdid;
}
/* If you need more than four handlers, you'll need to change how
- * alloc_cmdid and nvme_process_cq work
+ * alloc_cmdid and nvme_process_cq work. Consider using a special
+ * CMD_CTX value instead, if that works for your situation.
*/
enum {
sync_completion_id = 0,
bio_completion_id,
};
+#define CMD_CTX_BASE (POISON_POINTER_DELTA + sync_completion_id)
+#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
+#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
+#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
+
static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
{
unsigned long data;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- data = nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(nvmeq->q_depth)];
+ if (cmdid >= nvmeq->q_depth)
+ return CMD_CTX_INVALID;
+ data = info[cmdid].ctx;
+ info[cmdid].ctx = CMD_CTX_COMPLETED;
clear_bit(cmdid, nvmeq->cmdid_data);
wake_up(&nvmeq->sq_full);
return data;
}
+static void cancel_cmdid_data(struct nvme_queue *nvmeq, int cmdid)
+{
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ info[cmdid].ctx = CMD_CTX_CANCELLED;
+}
+
static struct nvme_queue *get_nvmeq(struct nvme_ns *ns)
{
- return ns->dev->queues[1];
+ int qid, cpu = get_cpu();
+ if (cpu < ns->dev->queue_count)
+ qid = cpu + 1;
+ else
+ qid = (cpu % rounddown_pow_of_two(ns->dev->queue_count)) + 1;
+ return ns->dev->queues[qid];
}
static void put_nvmeq(struct nvme_queue *nvmeq)
{
+ put_cpu();
}
/**
return 0;
}
-struct nvme_req_info {
+struct nvme_prps {
+ int npages;
+ dma_addr_t first_dma;
+ __le64 *list[0];
+};
+
+static void nvme_free_prps(struct nvme_dev *dev, struct nvme_prps *prps)
+{
+ const int last_prp = PAGE_SIZE / 8 - 1;
+ int i;
+ dma_addr_t prp_dma;
+
+ if (!prps)
+ return;
+
+ prp_dma = prps->first_dma;
+
+ if (prps->npages == 0)
+ dma_pool_free(dev->prp_small_pool, prps->list[0], prp_dma);
+ for (i = 0; i < prps->npages; i++) {
+ __le64 *prp_list = prps->list[i];
+ dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
+ dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
+ prp_dma = next_prp_dma;
+ }
+ kfree(prps);
+}
+
+struct nvme_bio {
struct bio *bio;
int nents;
+ struct nvme_prps *prps;
struct scatterlist sg[0];
};
/* XXX: use a mempool */
-static struct nvme_req_info *alloc_info(unsigned nseg, gfp_t gfp)
+static struct nvme_bio *alloc_nbio(unsigned nseg, gfp_t gfp)
{
- return kmalloc(sizeof(struct nvme_req_info) +
+ return kzalloc(sizeof(struct nvme_bio) +
sizeof(struct scatterlist) * nseg, gfp);
}
-static void free_info(struct nvme_req_info *info)
+static void free_nbio(struct nvme_queue *nvmeq, struct nvme_bio *nbio)
{
- kfree(info);
+ nvme_free_prps(nvmeq->dev, nbio->prps);
+ kfree(nbio);
}
static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
- struct nvme_req_info *info = ctx;
- struct bio *bio = info->bio;
+ struct nvme_bio *nbio = ctx;
+ struct bio *bio = nbio->bio;
u16 status = le16_to_cpup(&cqe->status) >> 1;
- dma_unmap_sg(nvmeq->q_dmadev, info->sg, info->nents,
+ dma_unmap_sg(nvmeq->q_dmadev, nbio->sg, nbio->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- free_info(info);
+ free_nbio(nvmeq, nbio);
bio_endio(bio, status ? -EIO : 0);
}
-static int nvme_map_bio(struct device *dev, struct nvme_req_info *info,
+/* length is in bytes */
+static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
+ struct nvme_common_command *cmd,
+ struct scatterlist *sg, int length)
+{
+ struct dma_pool *pool;
+ int dma_len = sg_dma_len(sg);
+ u64 dma_addr = sg_dma_address(sg);
+ int offset = offset_in_page(dma_addr);
+ __le64 *prp_list;
+ dma_addr_t prp_dma;
+ int nprps, npages, i, prp_page;
+ struct nvme_prps *prps = NULL;
+
+ cmd->prp1 = cpu_to_le64(dma_addr);
+ length -= (PAGE_SIZE - offset);
+ if (length <= 0)
+ return prps;
+
+ dma_len -= (PAGE_SIZE - offset);
+ if (dma_len) {
+ dma_addr += (PAGE_SIZE - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= PAGE_SIZE) {
+ cmd->prp2 = cpu_to_le64(dma_addr);
+ return prps;
+ }
+
+ nprps = DIV_ROUND_UP(length, PAGE_SIZE);
+ npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE);
+ prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, GFP_ATOMIC);
+ prp_page = 0;
+ if (nprps <= (256 / 8)) {
+ pool = dev->prp_small_pool;
+ prps->npages = 0;
+ } else {
+ pool = dev->prp_page_pool;
+ prps->npages = npages;
+ }
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ prps->list[prp_page++] = prp_list;
+ prps->first_dma = prp_dma;
+ cmd->prp2 = cpu_to_le64(prp_dma);
+ i = 0;
+ for (;;) {
+ if (i == PAGE_SIZE / 8 - 1) {
+ __le64 *old_prp_list = prp_list;
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ prps->list[prp_page++] = prp_list;
+ old_prp_list[i] = cpu_to_le64(prp_dma);
+ i = 0;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= PAGE_SIZE;
+ dma_addr += PAGE_SIZE;
+ length -= PAGE_SIZE;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ BUG_ON(dma_len < 0);
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ return prps;
+}
+
+static int nvme_map_bio(struct device *dev, struct nvme_bio *nbio,
struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
- struct bio_vec *bvec;
- struct scatterlist *sg = info->sg;
- int i, nsegs;
+ struct bio_vec *bvec, *bvprv = NULL;
+ struct scatterlist *sg = NULL;
+ int i, nsegs = 0;
- sg_init_table(sg, psegs);
+ sg_init_table(nbio->sg, psegs);
bio_for_each_segment(bvec, bio, i) {
- sg_set_page(sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
- /* XXX: handle non-mergable here */
- nsegs++;
+ if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
+ sg->length += bvec->bv_len;
+ } else {
+ /* Check bvprv && offset == 0 */
+ sg = sg ? sg + 1 : nbio->sg;
+ sg_set_page(sg, bvec->bv_page, bvec->bv_len,
+ bvec->bv_offset);
+ nsegs++;
+ }
+ bvprv = bvec;
}
- info->nents = nsegs;
-
- return dma_map_sg(dev, info->sg, info->nents, dma_dir);
+ nbio->nents = nsegs;
+ sg_mark_end(sg);
+ return dma_map_sg(dev, nbio->sg, nbio->nents, dma_dir);
}
static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
struct bio *bio)
{
- struct nvme_rw_command *cmnd;
- struct nvme_req_info *info;
+ struct nvme_command *cmnd;
+ struct nvme_bio *nbio;
enum dma_data_direction dma_dir;
- int cmdid;
+ int cmdid, result = -ENOMEM;
u16 control;
u32 dsmgmt;
- unsigned long flags;
int psegs = bio_phys_segments(ns->queue, bio);
- info = alloc_info(psegs, GFP_NOIO);
- if (!info)
- goto congestion;
- info->bio = bio;
+ nbio = alloc_nbio(psegs, GFP_ATOMIC);
+ if (!nbio)
+ goto nomem;
+ nbio->bio = bio;
- cmdid = alloc_cmdid(nvmeq, info, bio_completion_id);
+ result = -EBUSY;
+ cmdid = alloc_cmdid(nvmeq, nbio, bio_completion_id, IO_TIMEOUT);
if (unlikely(cmdid < 0))
- goto free_info;
+ goto free_nbio;
control = 0;
if (bio->bi_rw & REQ_FUA)
if (bio->bi_rw & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
- spin_lock_irqsave(&nvmeq->q_lock, flags);
- cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail].rw;
+ cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+ memset(cmnd, 0, sizeof(*cmnd));
if (bio_data_dir(bio)) {
- cmnd->opcode = nvme_cmd_write;
+ cmnd->rw.opcode = nvme_cmd_write;
dma_dir = DMA_TO_DEVICE;
} else {
- cmnd->opcode = nvme_cmd_read;
+ cmnd->rw.opcode = nvme_cmd_read;
dma_dir = DMA_FROM_DEVICE;
}
- nvme_map_bio(nvmeq->q_dmadev, info, bio, dma_dir, psegs);
+ result = -ENOMEM;
+ if (nvme_map_bio(nvmeq->q_dmadev, nbio, bio, dma_dir, psegs) == 0)
+ goto free_nbio;
- cmnd->flags = 1;
- cmnd->command_id = cmdid;
- cmnd->nsid = cpu_to_le32(ns->ns_id);
- cmnd->prp1 = cpu_to_le64(sg_phys(info->sg));
- /* XXX: Support more than one PRP */
- cmnd->slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
- cmnd->length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
- cmnd->control = cpu_to_le16(control);
- cmnd->dsmgmt = cpu_to_le32(dsmgmt);
+ cmnd->rw.command_id = cmdid;
+ cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+ nbio->prps = nvme_setup_prps(nvmeq->dev, &cmnd->common, nbio->sg,
+ bio->bi_size);
+ cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+ cmnd->rw.length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
+ cmnd->rw.control = cpu_to_le16(control);
+ cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
writel(nvmeq->sq_tail, nvmeq->q_db);
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
- spin_unlock_irqrestore(&nvmeq->q_lock, flags);
-
return 0;
- free_info:
- free_info(info);
- congestion:
- return -EBUSY;
+ free_nbio:
+ free_nbio(nvmeq, nbio);
+ nomem:
+ return result;
}
/*
{
struct nvme_ns *ns = q->queuedata;
struct nvme_queue *nvmeq = get_nvmeq(ns);
-
- if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
- blk_set_queue_congested(q, rw_is_sync(bio->bi_rw));
+ int result = -EBUSY;
+
+ spin_lock_irq(&nvmeq->q_lock);
+ if (bio_list_empty(&nvmeq->sq_cong))
+ result = nvme_submit_bio_queue(nvmeq, ns, bio);
+ if (unlikely(result)) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
bio_list_add(&nvmeq->sq_cong, bio);
}
+
+ spin_unlock_irq(&nvmeq->q_lock);
put_nvmeq(nvmeq);
return 0;
struct nvme_completion *cqe)
{
struct sync_cmd_info *cmdinfo = ctx;
+ if ((unsigned long)cmdinfo == CMD_CTX_CANCELLED)
+ return;
+ if (unlikely((unsigned long)cmdinfo == CMD_CTX_COMPLETED)) {
+ dev_warn(nvmeq->q_dmadev,
+ "completed id %d twice on queue %d\n",
+ cqe->command_id, le16_to_cpup(&cqe->sq_id));
+ return;
+ }
+ if (unlikely((unsigned long)cmdinfo == CMD_CTX_INVALID)) {
+ dev_warn(nvmeq->q_dmadev,
+ "invalid id %d completed on queue %d\n",
+ cqe->command_id, le16_to_cpup(&cqe->sq_id));
+ return;
+ }
cmdinfo->result = le32_to_cpup(&cqe->result);
cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
wake_up_process(cmdinfo->task);
static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
{
- u16 head, cycle;
+ u16 head, phase;
static const completion_fn completions[4] = {
[sync_completion_id] = sync_completion,
};
head = nvmeq->cq_head;
- cycle = nvmeq->cq_cycle;
+ phase = nvmeq->cq_phase;
for (;;) {
unsigned long data;
void *ptr;
unsigned char handler;
struct nvme_completion cqe = nvmeq->cqes[head];
- if ((le16_to_cpu(cqe.status) & 1) != cycle)
+ if ((le16_to_cpu(cqe.status) & 1) != phase)
break;
nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
if (++head == nvmeq->q_depth) {
head = 0;
- cycle = !cycle;
+ phase = !phase;
}
data = free_cmdid(nvmeq, cqe.command_id);
* requires that 0.1% of your interrupts are handled, so this isn't
* a big problem.
*/
- if (head == nvmeq->cq_head && cycle == nvmeq->cq_cycle)
+ if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return IRQ_NONE;
writel(head, nvmeq->q_db + 1);
nvmeq->cq_head = head;
- nvmeq->cq_cycle = cycle;
+ nvmeq->cq_phase = phase;
return IRQ_HANDLED;
}
static irqreturn_t nvme_irq(int irq, void *data)
{
- return nvme_process_cq(data);
+ irqreturn_t result;
+ struct nvme_queue *nvmeq = data;
+ spin_lock(&nvmeq->q_lock);
+ result = nvme_process_cq(nvmeq);
+ spin_unlock(&nvmeq->q_lock);
+ return result;
+}
+
+static irqreturn_t nvme_irq_check(int irq, void *data)
+{
+ struct nvme_queue *nvmeq = data;
+ struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
+ if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
+ return IRQ_NONE;
+ return IRQ_WAKE_THREAD;
+}
+
+static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
+{
+ spin_lock_irq(&nvmeq->q_lock);
+ cancel_cmdid_data(nvmeq, cmdid);
+ spin_unlock_irq(&nvmeq->q_lock);
}
/*
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
-static int nvme_submit_sync_cmd(struct nvme_queue *q, struct nvme_command *cmd,
- u32 *result)
+static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd, u32 *result, unsigned timeout)
{
int cmdid;
struct sync_cmd_info cmdinfo;
cmdinfo.task = current;
cmdinfo.status = -EINTR;
- cmdid = alloc_cmdid_killable(q, &cmdinfo, sync_completion_id);
+ cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id,
+ timeout);
if (cmdid < 0)
return cmdid;
cmd->common.command_id = cmdid;
- set_current_state(TASK_UNINTERRUPTIBLE);
- nvme_submit_cmd(q, cmd);
+ set_current_state(TASK_KILLABLE);
+ nvme_submit_cmd(nvmeq, cmd);
schedule();
+ if (cmdinfo.status == -EINTR) {
+ nvme_abort_command(nvmeq, cmdid);
+ return -EINTR;
+ }
+
if (result)
*result = cmdinfo.result;
static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
- return nvme_submit_sync_cmd(dev->queues[0], cmd, result);
+ return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
int depth, int vector)
{
struct device *dmadev = &dev->pci_dev->dev;
- unsigned extra = (depth + BITS_TO_LONGS(depth)) * sizeof(long);
+ unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
if (!nvmeq)
return NULL;
goto free_cqdma;
nvmeq->q_dmadev = dmadev;
+ nvmeq->dev = dev;
spin_lock_init(&nvmeq->q_lock);
nvmeq->cq_head = 0;
- nvmeq->cq_cycle = 1;
+ nvmeq->cq_phase = 1;
init_waitqueue_head(&nvmeq->sq_full);
+ init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
bio_list_init(&nvmeq->sq_cong);
nvmeq->q_db = &dev->dbs[qid * 2];
nvmeq->q_depth = depth;
static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
const char *name)
{
+ if (use_threaded_interrupts)
+ return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
+ nvme_irq_check, nvme_irq,
+ IRQF_DISABLED | IRQF_SHARED,
+ name, nvmeq);
return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
}
int result;
struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
+ if (!nvmeq)
+ return NULL;
+
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
goto free_nvmeq;
dev->dbs = ((void __iomem *)dev->bar) + 4096;
nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+ if (!nvmeq)
+ return -ENOMEM;
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
+ writel(0, &dev->bar->cc);
writel(aqa, &dev->bar->aqa);
writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
return result;
}
-static int nvme_identify(struct nvme_ns *ns, void __user *addr, int cns)
+static int nvme_map_user_pages(struct nvme_dev *dev, int write,
+ unsigned long addr, unsigned length,
+ struct scatterlist **sgp)
{
- struct nvme_dev *dev = ns->dev;
- int status;
- struct nvme_command c;
- void *page;
- dma_addr_t dma_addr;
+ int i, err, count, nents, offset;
+ struct scatterlist *sg;
+ struct page **pages;
+
+ if (addr & 3)
+ return -EINVAL;
+ if (!length)
+ return -EINVAL;
+
+ offset = offset_in_page(addr);
+ count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
+ pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
+
+ err = get_user_pages_fast(addr, count, 1, pages);
+ if (err < count) {
+ count = err;
+ err = -EFAULT;
+ goto put_pages;
+ }
- page = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
- GFP_KERNEL);
+ sg = kcalloc(count, sizeof(*sg), GFP_KERNEL);
+ sg_init_table(sg, count);
+ sg_set_page(&sg[0], pages[0], PAGE_SIZE - offset, offset);
+ length -= (PAGE_SIZE - offset);
+ for (i = 1; i < count; i++) {
+ sg_set_page(&sg[i], pages[i], min_t(int, length, PAGE_SIZE), 0);
+ length -= PAGE_SIZE;
+ }
+
+ err = -ENOMEM;
+ nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!nents)
+ goto put_pages;
+
+ kfree(pages);
+ *sgp = sg;
+ return nents;
+
+ put_pages:
+ for (i = 0; i < count; i++)
+ put_page(pages[i]);
+ kfree(pages);
+ return err;
+}
+
+static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+ unsigned long addr, int length,
+ struct scatterlist *sg, int nents)
+{
+ int i, count;
+
+ count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
+ dma_unmap_sg(&dev->pci_dev->dev, sg, nents, DMA_FROM_DEVICE);
+
+ for (i = 0; i < count; i++)
+ put_page(sg_page(&sg[i]));
+}
+
+static int nvme_submit_user_admin_command(struct nvme_dev *dev,
+ unsigned long addr, unsigned length,
+ struct nvme_command *cmd)
+{
+ int err, nents;
+ struct scatterlist *sg;
+ struct nvme_prps *prps;
+
+ nents = nvme_map_user_pages(dev, 0, addr, length, &sg);
+ if (nents < 0)
+ return nents;
+ prps = nvme_setup_prps(dev, &cmd->common, sg, length);
+ err = nvme_submit_admin_cmd(dev, cmd, NULL);
+ nvme_unmap_user_pages(dev, 0, addr, length, sg, nents);
+ nvme_free_prps(dev, prps);
+ return err ? -EIO : 0;
+}
+
+static int nvme_identify(struct nvme_ns *ns, unsigned long addr, int cns)
+{
+ struct nvme_command c;
memset(&c, 0, sizeof(c));
c.identify.opcode = nvme_admin_identify;
c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id);
- c.identify.prp1 = cpu_to_le64(dma_addr);
c.identify.cns = cpu_to_le32(cns);
- status = nvme_submit_admin_cmd(dev, &c, NULL);
+ return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
+}
- if (status)
- status = -EIO;
- else if (copy_to_user(addr, page, 4096))
- status = -EFAULT;
+static int nvme_get_range_type(struct nvme_ns *ns, unsigned long addr)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_get_features;
+ c.features.nsid = cpu_to_le32(ns->ns_id);
+ c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
- dma_free_coherent(&dev->pci_dev->dev, 4096, page, dma_addr);
+ return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
+}
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_queue *nvmeq;
+ struct nvme_user_io io;
+ struct nvme_command c;
+ unsigned length;
+ u32 result;
+ int nents, status;
+ struct scatterlist *sg;
+ struct nvme_prps *prps;
+
+ if (copy_from_user(&io, uio, sizeof(io)))
+ return -EFAULT;
+ length = io.nblocks << io.block_shift;
+ nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length, &sg);
+ if (nents < 0)
+ return nents;
+
+ memset(&c, 0, sizeof(c));
+ c.rw.opcode = io.opcode;
+ c.rw.flags = io.flags;
+ c.rw.nsid = cpu_to_le32(io.nsid);
+ c.rw.slba = cpu_to_le64(io.slba);
+ c.rw.length = cpu_to_le16(io.nblocks - 1);
+ c.rw.control = cpu_to_le16(io.control);
+ c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
+ c.rw.reftag = cpu_to_le32(io.reftag); /* XXX: endian? */
+ c.rw.apptag = cpu_to_le16(io.apptag);
+ c.rw.appmask = cpu_to_le16(io.appmask);
+ /* XXX: metadata */
+ prps = nvme_setup_prps(dev, &c.common, sg, length);
+
+ nvmeq = get_nvmeq(ns);
+ /* Since nvme_submit_sync_cmd sleeps, we can't keep preemption
+ * disabled. We may be preempted at any point, and be rescheduled
+ * to a different CPU. That will cause cacheline bouncing, but no
+ * additional races since q_lock already protects against other CPUs.
+ */
+ put_nvmeq(nvmeq);
+ status = nvme_submit_sync_cmd(nvmeq, &c, &result, IO_TIMEOUT);
+
+ nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents);
+ nvme_free_prps(dev, prps);
+ put_user(result, &uio->result);
return status;
}
-static int nvme_get_range_type(struct nvme_ns *ns, void __user *addr)
+static int nvme_download_firmware(struct nvme_ns *ns,
+ struct nvme_dlfw __user *udlfw)
{
struct nvme_dev *dev = ns->dev;
- int status;
+ struct nvme_dlfw dlfw;
struct nvme_command c;
- void *page;
- dma_addr_t dma_addr;
+ int nents, status;
+ struct scatterlist *sg;
+ struct nvme_prps *prps;
- page = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
- GFP_KERNEL);
+ if (copy_from_user(&dlfw, udlfw, sizeof(dlfw)))
+ return -EFAULT;
+ if (dlfw.length >= (1 << 30))
+ return -EINVAL;
+
+ nents = nvme_map_user_pages(dev, 1, dlfw.addr, dlfw.length * 4, &sg);
+ if (nents < 0)
+ return nents;
memset(&c, 0, sizeof(c));
- c.features.opcode = nvme_admin_get_features;
- c.features.nsid = cpu_to_le32(ns->ns_id);
- c.features.prp1 = cpu_to_le64(dma_addr);
- c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
+ c.dlfw.opcode = nvme_admin_download_fw;
+ c.dlfw.numd = cpu_to_le32(dlfw.length);
+ c.dlfw.offset = cpu_to_le32(dlfw.offset);
+ prps = nvme_setup_prps(dev, &c.common, sg, dlfw.length * 4);
status = nvme_submit_admin_cmd(dev, &c, NULL);
+ nvme_unmap_user_pages(dev, 0, dlfw.addr, dlfw.length * 4, sg, nents);
+ nvme_free_prps(dev, prps);
+ return status;
+}
- /* XXX: Assuming first range for now */
- if (status)
- status = -EIO;
- else if (copy_to_user(addr, page, 64))
- status = -EFAULT;
+static int nvme_activate_firmware(struct nvme_ns *ns, unsigned long arg)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_command c;
- dma_free_coherent(&dev->pci_dev->dev, 4096, page, dma_addr);
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_admin_activate_fw;
+ c.common.rsvd10[0] = cpu_to_le32(arg);
- return status;
+ return nvme_submit_admin_cmd(dev, &c, NULL);
}
static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
switch (cmd) {
case NVME_IOCTL_IDENTIFY_NS:
- return nvme_identify(ns, (void __user *)arg, 0);
+ return nvme_identify(ns, arg, 0);
case NVME_IOCTL_IDENTIFY_CTRL:
- return nvme_identify(ns, (void __user *)arg, 1);
+ return nvme_identify(ns, arg, 1);
case NVME_IOCTL_GET_RANGE_TYPE:
- return nvme_get_range_type(ns, (void __user *)arg);
+ return nvme_get_range_type(ns, arg);
+ case NVME_IOCTL_SUBMIT_IO:
+ return nvme_submit_io(ns, (void __user *)arg);
+ case NVME_IOCTL_DOWNLOAD_FW:
+ return nvme_download_firmware(ns, (void __user *)arg);
+ case NVME_IOCTL_ACTIVATE_FW:
+ return nvme_activate_firmware(ns, arg);
default:
return -ENOTTY;
}
.ioctl = nvme_ioctl,
};
+static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
+{
+ while (bio_list_peek(&nvmeq->sq_cong)) {
+ struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+ struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+ if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+ bio_list_add_head(&nvmeq->sq_cong, bio);
+ break;
+ }
+ }
+}
+
+static int nvme_kthread(void *data)
+{
+ struct nvme_dev *dev;
+
+ while (!kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ spin_lock(&dev_list_lock);
+ list_for_each_entry(dev, &dev_list, node) {
+ int i;
+ for (i = 0; i < dev->queue_count; i++) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+ if (!nvmeq)
+ continue;
+ spin_lock_irq(&nvmeq->q_lock);
+ if (nvme_process_cq(nvmeq))
+ printk("process_cq did something\n");
+ nvme_resubmit_bios(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
+ }
+ }
+ spin_unlock(&dev_list_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+ return 0;
+}
+
static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int index,
struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
{
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
+ disk->driverfs_dev = &dev->pci_dev->dev;
sprintf(disk->disk_name, "nvme%dn%d", dev->instance, index);
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
return min(result & 0xffff, result >> 16) + 1;
}
-/* XXX: Create per-CPU queues */
static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
{
- int this_cpu;
+ int result, cpu, i, nr_io_queues;
- set_queue_count(dev, 1);
+ nr_io_queues = num_online_cpus();
+ result = set_queue_count(dev, nr_io_queues);
+ if (result < 0)
+ return result;
+ if (result < nr_io_queues)
+ nr_io_queues = result;
- this_cpu = get_cpu();
- dev->queues[1] = nvme_create_queue(dev, 1, NVME_Q_DEPTH, this_cpu);
- put_cpu();
- if (!dev->queues[1])
- return -ENOMEM;
- dev->queue_count++;
+ /* Deregister the admin queue's interrupt */
+ free_irq(dev->entry[0].vector, dev->queues[0]);
+
+ for (i = 0; i < nr_io_queues; i++)
+ dev->entry[i].entry = i;
+ for (;;) {
+ result = pci_enable_msix(dev->pci_dev, dev->entry,
+ nr_io_queues);
+ if (result == 0) {
+ break;
+ } else if (result > 0) {
+ nr_io_queues = result;
+ continue;
+ } else {
+ nr_io_queues = 1;
+ break;
+ }
+ }
+
+ result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+ /* XXX: handle failure here */
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < nr_io_queues; i++) {
+ irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ for (i = 0; i < nr_io_queues; i++) {
+ dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
+ NVME_Q_DEPTH, i);
+ if (!dev->queues[i + 1])
+ return -ENOMEM;
+ dev->queue_count++;
+ }
return 0;
}
{
int res, nn, i;
struct nvme_ns *ns, *next;
+ struct nvme_id_ctrl *ctrl;
void *id;
dma_addr_t dma_addr;
struct nvme_command cid, crt;
goto out_free;
}
- nn = le32_to_cpup(&((struct nvme_id_ctrl *)id)->nn);
+ ctrl = id;
+ nn = le32_to_cpup(&ctrl->nn);
+ memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
+ memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
+ memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
cid.identify.cns = 0;
memset(&crt, 0, sizeof(crt));
{
struct nvme_ns *ns, *next;
+ spin_lock(&dev_list_lock);
+ list_del(&dev->node);
+ spin_unlock(&dev_list_lock);
+
/* TODO: wait all I/O finished or cancel them */
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
return 0;
}
+static int nvme_setup_prp_pools(struct nvme_dev *dev)
+{
+ struct device *dmadev = &dev->pci_dev->dev;
+ dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+ if (!dev->prp_page_pool)
+ return -ENOMEM;
+
+ /* Optimisation for I/Os between 4k and 128k */
+ dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
+ 256, 256, 0);
+ if (!dev->prp_small_pool) {
+ dma_pool_destroy(dev->prp_page_pool);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void nvme_release_prp_pools(struct nvme_dev *dev)
+{
+ dma_pool_destroy(dev->prp_page_pool);
+ dma_pool_destroy(dev->prp_small_pool);
+}
+
/* XXX: Use an ida or something to let remove / add work correctly */
static void nvme_set_instance(struct nvme_dev *dev)
{
static int __devinit nvme_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int result = -ENOMEM;
+ int bars, result = -ENOMEM;
struct nvme_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
GFP_KERNEL);
if (!dev->entry)
goto free;
- dev->queues = kcalloc(2, sizeof(void *), GFP_KERNEL);
+ dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
+ GFP_KERNEL);
if (!dev->queues)
goto free;
+ if (pci_enable_device_mem(pdev))
+ goto free;
+ pci_set_master(pdev);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_request_selected_regions(pdev, bars, "nvme"))
+ goto disable;
+
INIT_LIST_HEAD(&dev->namespaces);
dev->pci_dev = pdev;
pci_set_drvdata(pdev, dev);
- dma_set_mask(&dev->pci_dev->dev, DMA_BIT_MASK(64));
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
nvme_set_instance(dev);
+ dev->entry[0].vector = pdev->irq;
+
+ result = nvme_setup_prp_pools(dev);
+ if (result)
+ goto disable_msix;
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
if (!dev->bar) {
result = -ENOMEM;
- goto disable;
+ goto disable_msix;
}
result = nvme_configure_admin_queue(dev);
goto unmap;
dev->queue_count++;
+ spin_lock(&dev_list_lock);
+ list_add(&dev->node, &dev_list);
+ spin_unlock(&dev_list_lock);
+
result = nvme_dev_add(dev);
if (result)
goto delete;
+
return 0;
delete:
+ spin_lock(&dev_list_lock);
+ list_del(&dev->node);
+ spin_unlock(&dev_list_lock);
+
nvme_free_queues(dev);
unmap:
iounmap(dev->bar);
- disable:
+ disable_msix:
pci_disable_msix(pdev);
nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ disable:
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
free:
kfree(dev->queues);
kfree(dev->entry);
pci_disable_msix(pdev);
iounmap(dev->bar);
nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
static int __init nvme_init(void)
{
- int result;
+ int result = -EBUSY;
+
+ nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+ if (IS_ERR(nvme_thread))
+ return PTR_ERR(nvme_thread);
nvme_major = register_blkdev(nvme_major, "nvme");
if (nvme_major <= 0)
- return -EBUSY;
+ goto kill_kthread;
result = pci_register_driver(&nvme_driver);
- if (!result)
- return 0;
+ if (result)
+ goto unregister_blkdev;
+ return 0;
+ unregister_blkdev:
unregister_blkdev(nvme_major, "nvme");
+ kill_kthread:
+ kthread_stop(nvme_thread);
return result;
}
{
pci_unregister_driver(&nvme_driver);
unregister_blkdev(nvme_major, "nvme");
+ kthread_stop(nvme_thread);
}
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.1");
+MODULE_VERSION("0.3");
module_init(nvme_init);
module_exit(nvme_exit);