#include <linux/nvme.h>
#include <linux/bio.h>
+#include <linux/bitops.h>
#include <linux/blkdev.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
+#include <linux/idr.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
+#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
#define NVME_MINORS 64
+#define IO_TIMEOUT (5 * HZ)
+#define ADMIN_TIMEOUT (60 * HZ)
static int nvme_major;
module_param(nvme_major, int, 0);
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
+static DEFINE_SPINLOCK(dev_list_lock);
+static LIST_HEAD(dev_list);
+static struct task_struct *nvme_thread;
+
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
struct nvme_dev {
+ struct list_head node;
struct nvme_queue **queues;
u32 __iomem *dbs;
struct pci_dev *pci_dev;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool;
int instance;
int queue_count;
+ int db_stride;
u32 ctrl_config;
struct msix_entry *entry;
struct nvme_bar __iomem *bar;
*/
struct nvme_queue {
struct device *q_dmadev;
+ struct nvme_dev *dev;
spinlock_t q_lock;
struct nvme_command *sq_cmds;
volatile struct nvme_completion *cqes;
dma_addr_t sq_dma_addr;
dma_addr_t cq_dma_addr;
wait_queue_head_t sq_full;
+ wait_queue_t sq_cong_wait;
struct bio_list sq_cong;
u32 __iomem *q_db;
u16 q_depth;
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
}
+struct nvme_cmd_info {
+ unsigned long ctx;
+ unsigned long timeout;
+};
+
+static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
+{
+ return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
+}
+
/**
- * alloc_cmdid - Allocate a Command ID
- * @param nvmeq The queue that will be used for this command
- * @param ctx A pointer that will be passed to the handler
- * @param handler The ID of the handler to call
+ * alloc_cmdid() - Allocate a Command ID
+ * @nvmeq: The queue that will be used for this command
+ * @ctx: A pointer that will be passed to the handler
+ * @handler: The ID of the handler to call
*
* Allocate a Command ID for a queue. The data passed in will
* be passed to the completion handler. This is implemented by using
* the bottom two bits of the ctx pointer to store the handler ID.
* Passing in a pointer that's not 4-byte aligned will cause a BUG.
* We can change this if it becomes a problem.
+ *
+ * May be called with local interrupts disabled and the q_lock held,
+ * or with interrupts enabled and no locks held.
*/
-static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler)
+static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler,
+ unsigned timeout)
{
- int depth = nvmeq->q_depth;
- unsigned long data = (unsigned long)ctx | handler;
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
int cmdid;
BUG_ON((unsigned long)ctx & 3);
return -EBUSY;
} while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
- nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(depth)] = data;
+ info[cmdid].ctx = (unsigned long)ctx | handler;
+ info[cmdid].timeout = jiffies + timeout;
return cmdid;
}
static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
- int handler)
+ int handler, unsigned timeout)
{
int cmdid;
wait_event_killable(nvmeq->sq_full,
- (cmdid = alloc_cmdid(nvmeq, ctx, handler)) >= 0);
+ (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
return (cmdid < 0) ? -EINTR : cmdid;
}
-/* If you need more than four handlers, you'll need to change how
+/*
+ * If you need more than four handlers, you'll need to change how
* alloc_cmdid and nvme_process_cq work. Consider using a special
* CMD_CTX value instead, if that works for your situation.
*/
bio_completion_id,
};
+/* Special values must be a multiple of 4, and less than 0x1000 */
#define CMD_CTX_BASE (POISON_POINTER_DELTA + sync_completion_id)
-#define CMD_CTX_CANCELLED (0x2008 + CMD_CTX_BASE)
-#define CMD_CTX_COMPLETED (0x2010 + CMD_CTX_BASE)
-#define CMD_CTX_INVALID (0x2014 + CMD_CTX_BASE)
+#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
+#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
+#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
+#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
{
unsigned long data;
- unsigned offset = cmdid + BITS_TO_LONGS(nvmeq->q_depth);
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- if (cmdid > nvmeq->q_depth)
+ if (cmdid >= nvmeq->q_depth)
return CMD_CTX_INVALID;
- data = nvmeq->cmdid_data[offset];
- nvmeq->cmdid_data[offset] = CMD_CTX_COMPLETED;
+ data = info[cmdid].ctx;
+ info[cmdid].ctx = CMD_CTX_COMPLETED;
clear_bit(cmdid, nvmeq->cmdid_data);
wake_up(&nvmeq->sq_full);
return data;
}
-static void cancel_cmdid_data(struct nvme_queue *nvmeq, int cmdid)
+static unsigned long cancel_cmdid(struct nvme_queue *nvmeq, int cmdid)
{
- unsigned offset = cmdid + BITS_TO_LONGS(nvmeq->q_depth);
- nvmeq->cmdid_data[offset] = CMD_CTX_CANCELLED;
+ unsigned long data;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ data = info[cmdid].ctx;
+ info[cmdid].ctx = CMD_CTX_CANCELLED;
+ return data;
}
static struct nvme_queue *get_nvmeq(struct nvme_ns *ns)
{
- int qid, cpu = get_cpu();
- if (cpu < ns->dev->queue_count)
- qid = cpu + 1;
- else
- qid = (cpu % rounddown_pow_of_two(ns->dev->queue_count)) + 1;
- return ns->dev->queues[qid];
+ return ns->dev->queues[get_cpu() + 1];
}
static void put_nvmeq(struct nvme_queue *nvmeq)
}
/**
- * nvme_submit_cmd: Copy a command into a queue and ring the doorbell
+ * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
* @nvmeq: The queue to use
* @cmd: The command to send
*
{
unsigned long flags;
u16 tail;
- /* XXX: Need to check tail isn't going to overrun head */
spin_lock_irqsave(&nvmeq->q_lock, flags);
tail = nvmeq->sq_tail;
memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
- writel(tail, nvmeq->q_db);
if (++tail == nvmeq->q_depth)
tail = 0;
+ writel(tail, nvmeq->q_db);
nvmeq->sq_tail = tail;
spin_unlock_irqrestore(&nvmeq->q_lock, flags);
return 0;
}
-struct nvme_req_info {
+struct nvme_prps {
+ int npages; /* 0 means small pool in use */
+ dma_addr_t first_dma;
+ __le64 *list[0];
+};
+
+static void nvme_free_prps(struct nvme_dev *dev, struct nvme_prps *prps)
+{
+ const int last_prp = PAGE_SIZE / 8 - 1;
+ int i;
+ dma_addr_t prp_dma;
+
+ if (!prps)
+ return;
+
+ prp_dma = prps->first_dma;
+
+ if (prps->npages == 0)
+ dma_pool_free(dev->prp_small_pool, prps->list[0], prp_dma);
+ for (i = 0; i < prps->npages; i++) {
+ __le64 *prp_list = prps->list[i];
+ dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
+ dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
+ prp_dma = next_prp_dma;
+ }
+ kfree(prps);
+}
+
+struct nvme_bio {
struct bio *bio;
int nents;
+ struct nvme_prps *prps;
struct scatterlist sg[0];
};
/* XXX: use a mempool */
-static struct nvme_req_info *alloc_info(unsigned nseg, gfp_t gfp)
+static struct nvme_bio *alloc_nbio(unsigned nseg, gfp_t gfp)
{
- return kmalloc(sizeof(struct nvme_req_info) +
+ return kzalloc(sizeof(struct nvme_bio) +
sizeof(struct scatterlist) * nseg, gfp);
}
-static void free_info(struct nvme_req_info *info)
+static void free_nbio(struct nvme_queue *nvmeq, struct nvme_bio *nbio)
{
- kfree(info);
+ nvme_free_prps(nvmeq->dev, nbio->prps);
+ kfree(nbio);
}
static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
- struct nvme_req_info *info = ctx;
- struct bio *bio = info->bio;
+ struct nvme_bio *nbio = ctx;
+ struct bio *bio = nbio->bio;
u16 status = le16_to_cpup(&cqe->status) >> 1;
- dma_unmap_sg(nvmeq->q_dmadev, info->sg, info->nents,
+ dma_unmap_sg(nvmeq->q_dmadev, nbio->sg, nbio->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- free_info(info);
- bio_endio(bio, status ? -EIO : 0);
+ free_nbio(nvmeq, nbio);
+ if (status) {
+ bio_endio(bio, -EIO);
+ } else if (bio->bi_vcnt > bio->bi_idx) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ wake_up_process(nvme_thread);
+ } else {
+ bio_endio(bio, 0);
+ }
}
-/* length is in bytes */
-static void nvme_setup_prps(struct nvme_common_command *cmd,
- struct scatterlist *sg, int length)
+/* length is in bytes. gfp flags indicates whether we may sleep. */
+static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
+ struct nvme_common_command *cmd,
+ struct scatterlist *sg, int *len,
+ gfp_t gfp)
{
+ struct dma_pool *pool;
+ int length = *len;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
int offset = offset_in_page(dma_addr);
+ __le64 *prp_list;
+ dma_addr_t prp_dma;
+ int nprps, npages, i;
+ struct nvme_prps *prps = NULL;
cmd->prp1 = cpu_to_le64(dma_addr);
length -= (PAGE_SIZE - offset);
if (length <= 0)
- return;
+ return prps;
dma_len -= (PAGE_SIZE - offset);
if (dma_len) {
if (length <= PAGE_SIZE) {
cmd->prp2 = cpu_to_le64(dma_addr);
- return;
+ return prps;
+ }
+
+ nprps = DIV_ROUND_UP(length, PAGE_SIZE);
+ npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+ prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, gfp);
+ if (!prps) {
+ cmd->prp2 = cpu_to_le64(dma_addr);
+ *len = (*len - length) + PAGE_SIZE;
+ return prps;
+ }
+
+ if (nprps <= (256 / 8)) {
+ pool = dev->prp_small_pool;
+ prps->npages = 0;
+ } else {
+ pool = dev->prp_page_pool;
+ prps->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+ if (!prp_list) {
+ cmd->prp2 = cpu_to_le64(dma_addr);
+ *len = (*len - length) + PAGE_SIZE;
+ kfree(prps);
+ return NULL;
+ }
+ prps->list[0] = prp_list;
+ prps->first_dma = prp_dma;
+ cmd->prp2 = cpu_to_le64(prp_dma);
+ i = 0;
+ for (;;) {
+ if (i == PAGE_SIZE / 8) {
+ __le64 *old_prp_list = prp_list;
+ prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+ if (!prp_list) {
+ *len = (*len - length);
+ return prps;
+ }
+ prps->list[prps->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= PAGE_SIZE;
+ dma_addr += PAGE_SIZE;
+ length -= PAGE_SIZE;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ BUG_ON(dma_len < 0);
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
}
- /* XXX: support PRP lists */
+ return prps;
}
-static int nvme_map_bio(struct device *dev, struct nvme_req_info *info,
+/* NVMe scatterlists require no holes in the virtual address */
+#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
+ (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
+
+static int nvme_map_bio(struct device *dev, struct nvme_bio *nbio,
struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
- struct bio_vec *bvec;
- struct scatterlist *sg = info->sg;
- int i, nsegs;
+ struct bio_vec *bvec, *bvprv = NULL;
+ struct scatterlist *sg = NULL;
+ int i, old_idx, length = 0, nsegs = 0;
- sg_init_table(sg, psegs);
+ sg_init_table(nbio->sg, psegs);
+ old_idx = bio->bi_idx;
bio_for_each_segment(bvec, bio, i) {
- sg_set_page(sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
- /* XXX: handle non-mergable here */
- nsegs++;
+ if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
+ sg->length += bvec->bv_len;
+ } else {
+ if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
+ break;
+ sg = sg ? sg + 1 : nbio->sg;
+ sg_set_page(sg, bvec->bv_page, bvec->bv_len,
+ bvec->bv_offset);
+ nsegs++;
+ }
+ length += bvec->bv_len;
+ bvprv = bvec;
}
- info->nents = nsegs;
+ bio->bi_idx = i;
+ nbio->nents = nsegs;
+ sg_mark_end(sg);
+ if (dma_map_sg(dev, nbio->sg, nbio->nents, dma_dir) == 0) {
+ bio->bi_idx = old_idx;
+ return -ENOMEM;
+ }
+ return length;
+}
+
+static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ int cmdid)
+{
+ struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->common.opcode = nvme_cmd_flush;
+ cmnd->common.command_id = cmdid;
+ cmnd->common.nsid = cpu_to_le32(ns->ns_id);
- return dma_map_sg(dev, info->sg, info->nents, dma_dir);
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+
+ return 0;
}
+static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
+{
+ int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
+ sync_completion_id, IO_TIMEOUT);
+ if (unlikely(cmdid < 0))
+ return cmdid;
+
+ return nvme_submit_flush(nvmeq, ns, cmdid);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
struct bio *bio)
{
struct nvme_command *cmnd;
- struct nvme_req_info *info;
+ struct nvme_bio *nbio;
enum dma_data_direction dma_dir;
- int cmdid;
+ int cmdid, length, result = -ENOMEM;
u16 control;
u32 dsmgmt;
- unsigned long flags;
int psegs = bio_phys_segments(ns->queue, bio);
- info = alloc_info(psegs, GFP_NOIO);
- if (!info)
- goto congestion;
- info->bio = bio;
+ if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+ result = nvme_submit_flush_data(nvmeq, ns);
+ if (result)
+ return result;
+ }
+
+ nbio = alloc_nbio(psegs, GFP_ATOMIC);
+ if (!nbio)
+ goto nomem;
+ nbio->bio = bio;
- cmdid = alloc_cmdid(nvmeq, info, bio_completion_id);
+ result = -EBUSY;
+ cmdid = alloc_cmdid(nvmeq, nbio, bio_completion_id, IO_TIMEOUT);
if (unlikely(cmdid < 0))
- goto free_info;
+ goto free_nbio;
+
+ if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+ return nvme_submit_flush(nvmeq, ns, cmdid);
control = 0;
if (bio->bi_rw & REQ_FUA)
if (bio->bi_rw & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
- spin_lock_irqsave(&nvmeq->q_lock, flags);
cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
memset(cmnd, 0, sizeof(*cmnd));
dma_dir = DMA_FROM_DEVICE;
}
- nvme_map_bio(nvmeq->q_dmadev, info, bio, dma_dir, psegs);
+ result = nvme_map_bio(nvmeq->q_dmadev, nbio, bio, dma_dir, psegs);
+ if (result < 0)
+ goto free_nbio;
+ length = result;
- cmnd->rw.flags = 1;
cmnd->rw.command_id = cmdid;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
- nvme_setup_prps(&cmnd->common, info->sg, bio->bi_size);
+ nbio->prps = nvme_setup_prps(nvmeq->dev, &cmnd->common, nbio->sg,
+ &length, GFP_ATOMIC);
cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
- cmnd->rw.length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
+ cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
- writel(nvmeq->sq_tail, nvmeq->q_db);
+ bio->bi_sector += length >> 9;
+
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
-
- spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+ writel(nvmeq->sq_tail, nvmeq->q_db);
return 0;
- free_info:
- free_info(info);
- congestion:
- return -EBUSY;
+ free_nbio:
+ free_nbio(nvmeq, nbio);
+ nomem:
+ return result;
}
/*
{
struct nvme_ns *ns = q->queuedata;
struct nvme_queue *nvmeq = get_nvmeq(ns);
+ int result = -EBUSY;
- if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
- blk_set_queue_congested(q, rw_is_sync(bio->bi_rw));
+ spin_lock_irq(&nvmeq->q_lock);
+ if (bio_list_empty(&nvmeq->sq_cong))
+ result = nvme_submit_bio_queue(nvmeq, ns, bio);
+ if (unlikely(result)) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
bio_list_add(&nvmeq->sq_cong, bio);
}
+
+ spin_unlock_irq(&nvmeq->q_lock);
put_nvmeq(nvmeq);
return 0;
struct nvme_completion *cqe)
{
struct sync_cmd_info *cmdinfo = ctx;
- if ((unsigned long)cmdinfo == CMD_CTX_CANCELLED)
+ if (unlikely((unsigned long)cmdinfo == CMD_CTX_CANCELLED))
+ return;
+ if ((unsigned long)cmdinfo == CMD_CTX_FLUSH)
return;
if (unlikely((unsigned long)cmdinfo == CMD_CTX_COMPLETED)) {
dev_warn(nvmeq->q_dmadev,
typedef void (*completion_fn)(struct nvme_queue *, void *,
struct nvme_completion *);
+static const completion_fn nvme_completions[4] = {
+ [sync_completion_id] = sync_completion,
+ [bio_completion_id] = bio_completion,
+};
+
static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
{
u16 head, phase;
- static const completion_fn completions[4] = {
- [sync_completion_id] = sync_completion,
- [bio_completion_id] = bio_completion,
- };
-
head = nvmeq->cq_head;
phase = nvmeq->cq_phase;
data = free_cmdid(nvmeq, cqe.command_id);
handler = data & 3;
ptr = (void *)(data & ~3UL);
- completions[handler](nvmeq, ptr, &cqe);
+ nvme_completions[handler](nvmeq, ptr, &cqe);
}
/* If the controller ignores the cq head doorbell and continuously
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return IRQ_NONE;
- writel(head, nvmeq->q_db + 1);
+ writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
nvmeq->cq_head = head;
nvmeq->cq_phase = phase;
}
static irqreturn_t nvme_irq(int irq, void *data)
-{
- return nvme_process_cq(data);
-}
-
-static irqreturn_t nvme_irq_thread(int irq, void *data)
{
irqreturn_t result;
struct nvme_queue *nvmeq = data;
static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
{
spin_lock_irq(&nvmeq->q_lock);
- cancel_cmdid_data(nvmeq, cmdid);
+ cancel_cmdid(nvmeq, cmdid);
spin_unlock_irq(&nvmeq->q_lock);
}
* if the result is positive, it's an NVM Express status code
*/
static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
- struct nvme_command *cmd, u32 *result)
+ struct nvme_command *cmd, u32 *result, unsigned timeout)
{
int cmdid;
struct sync_cmd_info cmdinfo;
cmdinfo.task = current;
cmdinfo.status = -EINTR;
- cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id);
+ cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id,
+ timeout);
if (cmdid < 0)
return cmdid;
cmd->common.command_id = cmdid;
static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
- return nvme_submit_sync_cmd(dev->queues[0], cmd, result);
+ return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}
+static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+ dma_addr_t dma_addr)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.prp1 = cpu_to_le64(dma_addr);
+ c.identify.cns = cpu_to_le32(cns);
+
+ return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
+ unsigned dword11, dma_addr_t dma_addr, u32 *result)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_get_features;
+ c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
+ return nvme_submit_admin_cmd(dev, &c, result);
+}
+
static void nvme_free_queue(struct nvme_dev *dev, int qid)
{
struct nvme_queue *nvmeq = dev->queues[qid];
+ int vector = dev->entry[nvmeq->cq_vector].vector;
- free_irq(dev->entry[nvmeq->cq_vector].vector, nvmeq);
+ irq_set_affinity_hint(vector, NULL);
+ free_irq(vector, nvmeq);
/* Don't tell the adapter to delete the admin queue */
if (qid) {
int depth, int vector)
{
struct device *dmadev = &dev->pci_dev->dev;
- unsigned extra = (depth + BITS_TO_LONGS(depth)) * sizeof(long);
+ unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
if (!nvmeq)
return NULL;
goto free_cqdma;
nvmeq->q_dmadev = dmadev;
+ nvmeq->dev = dev;
spin_lock_init(&nvmeq->q_lock);
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
init_waitqueue_head(&nvmeq->sq_full);
+ init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
bio_list_init(&nvmeq->sq_cong);
- nvmeq->q_db = &dev->dbs[qid * 2];
+ nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
nvmeq->q_depth = depth;
nvmeq->cq_vector = vector;
{
if (use_threaded_interrupts)
return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
- nvme_irq_check, nvme_irq_thread,
+ nvme_irq_check, nvme_irq,
IRQF_DISABLED | IRQF_SHARED,
name, nvmeq);
return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
if (!nvmeq)
- return NULL;
+ return ERR_PTR(-ENOMEM);
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
kfree(nvmeq);
- return NULL;
+ return ERR_PTR(result);
}
static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
{
int result;
u32 aqa;
+ u64 cap;
+ unsigned long timeout;
struct nvme_queue *nvmeq;
dev->dbs = ((void __iomem *)dev->bar) + 4096;
dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
+ dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
writel(0, &dev->bar->cc);
writel(aqa, &dev->bar->aqa);
writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
writel(dev->ctrl_config, &dev->bar->cc);
+ cap = readq(&dev->bar->cap);
+ timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+ dev->db_stride = NVME_CAP_STRIDE(cap);
+
while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
msleep(100);
if (fatal_signal_pending(current))
return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&dev->pci_dev->dev,
+ "Device not ready; aborting initialisation\n");
+ return -ENODEV;
+ }
}
result = queue_request_irq(dev, nvmeq, "nvme admin");
sg = kcalloc(count, sizeof(*sg), GFP_KERNEL);
sg_init_table(sg, count);
- sg_set_page(&sg[0], pages[0], PAGE_SIZE - offset, offset);
- length -= (PAGE_SIZE - offset);
- for (i = 1; i < count; i++) {
- sg_set_page(&sg[i], pages[i], min_t(int, length, PAGE_SIZE), 0);
- length -= PAGE_SIZE;
+ for (i = 0; i < count; i++) {
+ sg_set_page(&sg[i], pages[i],
+ min_t(int, length, PAGE_SIZE - offset), offset);
+ length -= (PAGE_SIZE - offset);
+ offset = 0;
}
err = -ENOMEM;
}
static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
- unsigned long addr, int length,
- struct scatterlist *sg, int nents)
+ unsigned long addr, int length, struct scatterlist *sg)
{
int i, count;
count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
- dma_unmap_sg(&dev->pci_dev->dev, sg, nents, DMA_FROM_DEVICE);
+ dma_unmap_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE);
for (i = 0; i < count; i++)
put_page(sg_page(&sg[i]));
}
-static int nvme_submit_user_admin_command(struct nvme_dev *dev,
- unsigned long addr, unsigned length,
- struct nvme_command *cmd)
-{
- int err, nents;
- struct scatterlist *sg;
-
- nents = nvme_map_user_pages(dev, 0, addr, length, &sg);
- if (nents < 0)
- return nents;
- nvme_setup_prps(&cmd->common, sg, length);
- err = nvme_submit_admin_cmd(dev, cmd, NULL);
- nvme_unmap_user_pages(dev, 0, addr, length, sg, nents);
- return err ? -EIO : 0;
-}
-
-static int nvme_identify(struct nvme_ns *ns, unsigned long addr, int cns)
-{
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
- c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id);
- c.identify.cns = cpu_to_le32(cns);
-
- return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
-}
-
-static int nvme_get_range_type(struct nvme_ns *ns, unsigned long addr)
-{
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
- c.features.opcode = nvme_admin_get_features;
- c.features.nsid = cpu_to_le32(ns->ns_id);
- c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
-
- return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
-}
-
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
struct nvme_dev *dev = ns->dev;
struct nvme_user_io io;
struct nvme_command c;
unsigned length;
- u32 result;
int nents, status;
struct scatterlist *sg;
+ struct nvme_prps *prps;
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
- length = io.nblocks << io.block_shift;
- nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length, &sg);
+ length = (io.nblocks + 1) << ns->lba_shift;
+
+ switch (io.opcode) {
+ case nvme_cmd_write:
+ case nvme_cmd_read:
+ case nvme_cmd_compare:
+ nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr,
+ length, &sg);
+ break;
+ default:
+ return -EINVAL;
+ }
+
if (nents < 0)
return nents;
memset(&c, 0, sizeof(c));
c.rw.opcode = io.opcode;
c.rw.flags = io.flags;
- c.rw.nsid = cpu_to_le32(io.nsid);
+ c.rw.nsid = cpu_to_le32(ns->ns_id);
c.rw.slba = cpu_to_le64(io.slba);
- c.rw.length = cpu_to_le16(io.nblocks - 1);
+ c.rw.length = cpu_to_le16(io.nblocks);
c.rw.control = cpu_to_le16(io.control);
c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
- c.rw.reftag = cpu_to_le32(io.reftag); /* XXX: endian? */
- c.rw.apptag = cpu_to_le16(io.apptag);
- c.rw.appmask = cpu_to_le16(io.appmask);
+ c.rw.reftag = io.reftag;
+ c.rw.apptag = io.apptag;
+ c.rw.appmask = io.appmask;
/* XXX: metadata */
- nvme_setup_prps(&c.common, sg, length);
+ prps = nvme_setup_prps(dev, &c.common, sg, &length, GFP_KERNEL);
nvmeq = get_nvmeq(ns);
- /* Since nvme_submit_sync_cmd sleeps, we can't keep preemption
+ /*
+ * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
* disabled. We may be preempted at any point, and be rescheduled
* to a different CPU. That will cause cacheline bouncing, but no
* additional races since q_lock already protects against other CPUs.
*/
put_nvmeq(nvmeq);
- status = nvme_submit_sync_cmd(nvmeq, &c, &result);
+ if (length != (io.nblocks + 1) << ns->lba_shift)
+ status = -ENOMEM;
+ else
+ status = nvme_submit_sync_cmd(nvmeq, &c, NULL, IO_TIMEOUT);
- nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents);
- put_user(result, &uio->result);
+ nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg);
+ nvme_free_prps(dev, prps);
return status;
}
-static int nvme_download_firmware(struct nvme_ns *ns,
- struct nvme_dlfw __user *udlfw)
+static int nvme_user_admin_cmd(struct nvme_ns *ns,
+ struct nvme_admin_cmd __user *ucmd)
{
struct nvme_dev *dev = ns->dev;
- struct nvme_dlfw dlfw;
+ struct nvme_admin_cmd cmd;
struct nvme_command c;
- int nents, status;
+ int status, length, nents = 0;
struct scatterlist *sg;
+ struct nvme_prps *prps = NULL;
- if (copy_from_user(&dlfw, udlfw, sizeof(dlfw)))
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
- if (dlfw.length >= (1 << 30))
- return -EINVAL;
-
- nents = nvme_map_user_pages(dev, 1, dlfw.addr, dlfw.length * 4, &sg);
- if (nents < 0)
- return nents;
memset(&c, 0, sizeof(c));
- c.dlfw.opcode = nvme_admin_download_fw;
- c.dlfw.numd = cpu_to_le32(dlfw.length);
- c.dlfw.offset = cpu_to_le32(dlfw.offset);
- nvme_setup_prps(&c.common, sg, dlfw.length * 4);
+ c.common.opcode = cmd.opcode;
+ c.common.flags = cmd.flags;
+ c.common.nsid = cpu_to_le32(cmd.nsid);
+ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+ c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
+ c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
+ c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
+ c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
+ c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
+ c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
+
+ length = cmd.data_len;
+ if (cmd.data_len) {
+ nents = nvme_map_user_pages(dev, 1, cmd.addr, length, &sg);
+ if (nents < 0)
+ return nents;
+ prps = nvme_setup_prps(dev, &c.common, sg, &length, GFP_KERNEL);
+ }
- status = nvme_submit_admin_cmd(dev, &c, NULL);
- nvme_unmap_user_pages(dev, 0, dlfw.addr, dlfw.length * 4, sg, nents);
+ if (length != cmd.data_len)
+ status = -ENOMEM;
+ else
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (cmd.data_len) {
+ nvme_unmap_user_pages(dev, 0, cmd.addr, cmd.data_len, sg);
+ nvme_free_prps(dev, prps);
+ }
return status;
}
-static int nvme_activate_firmware(struct nvme_ns *ns, unsigned long arg)
-{
- struct nvme_dev *dev = ns->dev;
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
- c.common.opcode = nvme_admin_activate_fw;
- c.common.rsvd10[0] = cpu_to_le32(arg);
-
- return nvme_submit_admin_cmd(dev, &c, NULL);
-}
-
static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
unsigned long arg)
{
struct nvme_ns *ns = bdev->bd_disk->private_data;
switch (cmd) {
- case NVME_IOCTL_IDENTIFY_NS:
- return nvme_identify(ns, arg, 0);
- case NVME_IOCTL_IDENTIFY_CTRL:
- return nvme_identify(ns, arg, 1);
- case NVME_IOCTL_GET_RANGE_TYPE:
- return nvme_get_range_type(ns, arg);
+ case NVME_IOCTL_ID:
+ return ns->ns_id;
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_admin_cmd(ns, (void __user *)arg);
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, (void __user *)arg);
- case NVME_IOCTL_DOWNLOAD_FW:
- return nvme_download_firmware(ns, (void __user *)arg);
- case NVME_IOCTL_ACTIVATE_FW:
- return nvme_activate_firmware(ns, arg);
default:
return -ENOTTY;
}
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_ioctl,
};
-static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int index,
+static void nvme_timeout_ios(struct nvme_queue *nvmeq)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ unsigned long now = jiffies;
+ int cmdid;
+
+ for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+ unsigned long data;
+ void *ptr;
+ unsigned char handler;
+ static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
+
+ if (!time_after(now, info[cmdid].timeout))
+ continue;
+ dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
+ data = cancel_cmdid(nvmeq, cmdid);
+ handler = data & 3;
+ ptr = (void *)(data & ~3UL);
+ nvme_completions[handler](nvmeq, ptr, &cqe);
+ }
+}
+
+static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
+{
+ while (bio_list_peek(&nvmeq->sq_cong)) {
+ struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+ struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+ if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+ bio_list_add_head(&nvmeq->sq_cong, bio);
+ break;
+ }
+ if (bio_list_empty(&nvmeq->sq_cong))
+ remove_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
+ }
+}
+
+static int nvme_kthread(void *data)
+{
+ struct nvme_dev *dev;
+
+ while (!kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ spin_lock(&dev_list_lock);
+ list_for_each_entry(dev, &dev_list, node) {
+ int i;
+ for (i = 0; i < dev->queue_count; i++) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+ if (!nvmeq)
+ continue;
+ spin_lock_irq(&nvmeq->q_lock);
+ if (nvme_process_cq(nvmeq))
+ printk("process_cq did something\n");
+ nvme_timeout_ios(nvmeq);
+ nvme_resubmit_bios(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
+ }
+ }
+ spin_unlock(&dev_list_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+ return 0;
+}
+
+static DEFINE_IDA(nvme_index_ida);
+
+static int nvme_get_ns_idx(void)
+{
+ int index, error;
+
+ do {
+ if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
+ return -1;
+
+ spin_lock(&dev_list_lock);
+ error = ida_get_new(&nvme_index_ida, &index);
+ spin_unlock(&dev_list_lock);
+ } while (error == -EAGAIN);
+
+ if (error)
+ index = -1;
+ return index;
+}
+
+static void nvme_put_ns_idx(int index)
+{
+ spin_lock(&dev_list_lock);
+ ida_remove(&nvme_index_ida, index);
+ spin_unlock(&dev_list_lock);
+}
+
+static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
{
struct nvme_ns *ns;
disk = alloc_disk(NVME_MINORS);
if (!disk)
goto out_free_queue;
- ns->ns_id = index;
+ ns->ns_id = nsid;
ns->disk = disk;
lbaf = id->flbas & 0xf;
ns->lba_shift = id->lbaf[lbaf].ds;
disk->major = nvme_major;
disk->minors = NVME_MINORS;
- disk->first_minor = NVME_MINORS * index;
+ disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
disk->driverfs_dev = &dev->pci_dev->dev;
- sprintf(disk->disk_name, "nvme%dn%d", dev->instance, index);
+ sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
return ns;
static void nvme_ns_free(struct nvme_ns *ns)
{
+ int index = ns->disk->first_minor / NVME_MINORS;
put_disk(ns->disk);
+ nvme_put_ns_idx(index);
blk_cleanup_queue(ns->queue);
kfree(ns);
}
{
int status;
u32 result;
- struct nvme_command c;
u32 q_count = (count - 1) | ((count - 1) << 16);
- memset(&c, 0, sizeof(c));
- c.features.opcode = nvme_admin_get_features;
- c.features.fid = cpu_to_le32(NVME_FEAT_NUM_QUEUES);
- c.features.dword11 = cpu_to_le32(q_count);
-
- status = nvme_submit_admin_cmd(dev, &c, &result);
+ status = nvme_get_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
+ &result);
if (status)
return -EIO;
return min(result & 0xffff, result >> 16) + 1;
static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
{
- int result, cpu, i, nr_queues;
+ int result, cpu, i, nr_io_queues, db_bar_size;
- nr_queues = num_online_cpus();
- result = set_queue_count(dev, nr_queues);
+ nr_io_queues = num_online_cpus();
+ result = set_queue_count(dev, nr_io_queues);
if (result < 0)
return result;
- if (result < nr_queues)
- nr_queues = result;
+ if (result < nr_io_queues)
+ nr_io_queues = result;
/* Deregister the admin queue's interrupt */
free_irq(dev->entry[0].vector, dev->queues[0]);
- for (i = 0; i < nr_queues; i++)
+ db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+ if (db_bar_size > 8192) {
+ iounmap(dev->bar);
+ dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
+ db_bar_size);
+ dev->dbs = ((void __iomem *)dev->bar) + 4096;
+ dev->queues[0]->q_db = dev->dbs;
+ }
+
+ for (i = 0; i < nr_io_queues; i++)
dev->entry[i].entry = i;
for (;;) {
- result = pci_enable_msix(dev->pci_dev, dev->entry, nr_queues);
+ result = pci_enable_msix(dev->pci_dev, dev->entry,
+ nr_io_queues);
if (result == 0) {
break;
} else if (result > 0) {
- nr_queues = result;
+ nr_io_queues = result;
continue;
} else {
- nr_queues = 1;
+ nr_io_queues = 1;
break;
}
}
/* XXX: handle failure here */
cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < nr_queues; i++) {
+ for (i = 0; i < nr_io_queues; i++) {
irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
cpu = cpumask_next(cpu, cpu_online_mask);
}
- for (i = 0; i < nr_queues; i++) {
+ for (i = 0; i < nr_io_queues; i++) {
dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
NVME_Q_DEPTH, i);
- if (!dev->queues[i + 1])
- return -ENOMEM;
+ if (IS_ERR(dev->queues[i + 1]))
+ return PTR_ERR(dev->queues[i + 1]);
dev->queue_count++;
}
+ for (; i < num_possible_cpus(); i++) {
+ int target = i % rounddown_pow_of_two(dev->queue_count - 1);
+ dev->queues[i + 1] = dev->queues[target + 1];
+ }
+
return 0;
}
int res, nn, i;
struct nvme_ns *ns, *next;
struct nvme_id_ctrl *ctrl;
- void *id;
+ struct nvme_id_ns *id_ns;
+ void *mem;
dma_addr_t dma_addr;
- struct nvme_command cid, crt;
res = nvme_setup_io_queues(dev);
if (res)
return res;
- /* XXX: Switch to a SG list once prp2 works */
- id = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
GFP_KERNEL);
- memset(&cid, 0, sizeof(cid));
- cid.identify.opcode = nvme_admin_identify;
- cid.identify.nsid = 0;
- cid.identify.prp1 = cpu_to_le64(dma_addr);
- cid.identify.cns = cpu_to_le32(1);
-
- res = nvme_submit_admin_cmd(dev, &cid, NULL);
+ res = nvme_identify(dev, 0, 1, dma_addr);
if (res) {
res = -EIO;
goto out_free;
}
- ctrl = id;
+ ctrl = mem;
nn = le32_to_cpup(&ctrl->nn);
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
- cid.identify.cns = 0;
- memset(&crt, 0, sizeof(crt));
- crt.features.opcode = nvme_admin_get_features;
- crt.features.prp1 = cpu_to_le64(dma_addr + 4096);
- crt.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
-
- for (i = 0; i < nn; i++) {
- cid.identify.nsid = cpu_to_le32(i);
- res = nvme_submit_admin_cmd(dev, &cid, NULL);
+ id_ns = mem;
+ for (i = 1; i <= nn; i++) {
+ res = nvme_identify(dev, i, 0, dma_addr);
if (res)
continue;
- if (((struct nvme_id_ns *)id)->ncap == 0)
+ if (id_ns->ncap == 0)
continue;
- crt.features.nsid = cpu_to_le32(i);
- res = nvme_submit_admin_cmd(dev, &crt, NULL);
+ res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
+ dma_addr + 4096, NULL);
if (res)
continue;
- ns = nvme_alloc_ns(dev, i, id, id + 4096);
+ ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
if (ns)
list_add_tail(&ns->list, &dev->namespaces);
}
list_for_each_entry(ns, &dev->namespaces, list)
add_disk(ns->disk);
- dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
- return 0;
+ goto out;
out_free:
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
nvme_ns_free(ns);
}
- dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
+ out:
+ dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
return res;
}
{
struct nvme_ns *ns, *next;
+ spin_lock(&dev_list_lock);
+ list_del(&dev->node);
+ spin_unlock(&dev_list_lock);
+
/* TODO: wait all I/O finished or cancel them */
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
return 0;
}
+static int nvme_setup_prp_pools(struct nvme_dev *dev)
+{
+ struct device *dmadev = &dev->pci_dev->dev;
+ dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+ if (!dev->prp_page_pool)
+ return -ENOMEM;
+
+ /* Optimisation for I/Os between 4k and 128k */
+ dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
+ 256, 256, 0);
+ if (!dev->prp_small_pool) {
+ dma_pool_destroy(dev->prp_page_pool);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void nvme_release_prp_pools(struct nvme_dev *dev)
+{
+ dma_pool_destroy(dev->prp_page_pool);
+ dma_pool_destroy(dev->prp_small_pool);
+}
+
/* XXX: Use an ida or something to let remove / add work correctly */
static void nvme_set_instance(struct nvme_dev *dev)
{
nvme_set_instance(dev);
dev->entry[0].vector = pdev->irq;
+ result = nvme_setup_prp_pools(dev);
+ if (result)
+ goto disable_msix;
+
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
if (!dev->bar) {
result = -ENOMEM;
goto unmap;
dev->queue_count++;
+ spin_lock(&dev_list_lock);
+ list_add(&dev->node, &dev_list);
+ spin_unlock(&dev_list_lock);
+
result = nvme_dev_add(dev);
if (result)
goto delete;
+
return 0;
delete:
+ spin_lock(&dev_list_lock);
+ list_del(&dev->node);
+ spin_unlock(&dev_list_lock);
+
nvme_free_queues(dev);
unmap:
iounmap(dev->bar);
disable_msix:
pci_disable_msix(pdev);
nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
disable:
pci_disable_device(pdev);
pci_release_regions(pdev);
pci_disable_msix(pdev);
iounmap(dev->bar);
nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
pci_disable_device(pdev);
pci_release_regions(pdev);
kfree(dev->queues);
static int __init nvme_init(void)
{
- int result;
+ int result = -EBUSY;
+
+ nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+ if (IS_ERR(nvme_thread))
+ return PTR_ERR(nvme_thread);
nvme_major = register_blkdev(nvme_major, "nvme");
if (nvme_major <= 0)
- return -EBUSY;
+ goto kill_kthread;
result = pci_register_driver(&nvme_driver);
- if (!result)
- return 0;
+ if (result)
+ goto unregister_blkdev;
+ return 0;
+ unregister_blkdev:
unregister_blkdev(nvme_major, "nvme");
+ kill_kthread:
+ kthread_stop(nvme_thread);
return result;
}
{
pci_unregister_driver(&nvme_driver);
unregister_blkdev(nvme_major, "nvme");
+ kthread_stop(nvme_thread);
}
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.2");
+MODULE_VERSION("0.7");
module_init(nvme_init);
module_exit(nvme_exit);