2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/nvme.h>
16 #include <linux/bitops.h>
17 #include <linux/blkdev.h>
18 #include <linux/blk-mq.h>
19 #include <linux/cpu.h>
20 #include <linux/delay.h>
21 #include <linux/errno.h>
23 #include <linux/genhd.h>
24 #include <linux/hdreg.h>
25 #include <linux/idr.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
29 #include <linux/kdev_t.h>
30 #include <linux/kthread.h>
31 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <linux/pci.h>
36 #include <linux/poison.h>
37 #include <linux/ptrace.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/types.h>
42 #include <asm-generic/io-64-nonatomic-lo-hi.h>
44 #define NVME_Q_DEPTH 1024
45 #define NVME_AQ_DEPTH 64
46 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
47 #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
48 #define ADMIN_TIMEOUT (admin_timeout * HZ)
49 #define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
50 #define IOD_TIMEOUT (retry_time * HZ)
52 static unsigned char admin_timeout = 60;
53 module_param(admin_timeout, byte, 0644);
54 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
56 unsigned char nvme_io_timeout = 30;
57 module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
58 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
60 static unsigned char retry_time = 30;
61 module_param(retry_time, byte, 0644);
62 MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
64 static unsigned char shutdown_timeout = 5;
65 module_param(shutdown_timeout, byte, 0644);
66 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
68 static int nvme_major;
69 module_param(nvme_major, int, 0);
71 static int use_threaded_interrupts;
72 module_param(use_threaded_interrupts, int, 0);
74 static DEFINE_SPINLOCK(dev_list_lock);
75 static LIST_HEAD(dev_list);
76 static struct task_struct *nvme_thread;
77 static struct workqueue_struct *nvme_workq;
78 static wait_queue_head_t nvme_kthread_wait;
79 static struct notifier_block nvme_nb;
81 static void nvme_reset_failed_dev(struct work_struct *ws);
82 static int nvme_process_cq(struct nvme_queue *nvmeq);
84 struct async_cmd_info {
85 struct kthread_work work;
86 struct kthread_worker *worker;
94 * An NVM Express queue. Each device has at least two (one for admin
95 * commands and one for I/O commands).
98 struct llist_node node;
99 struct device *q_dmadev;
100 struct nvme_dev *dev;
101 char irqname[24]; /* nvme4294967295-65535\0 */
103 struct nvme_command *sq_cmds;
104 volatile struct nvme_completion *cqes;
105 dma_addr_t sq_dma_addr;
106 dma_addr_t cq_dma_addr;
116 struct async_cmd_info cmdinfo;
117 struct blk_mq_hw_ctx *hctx;
121 * Check we didin't inadvertently grow the command struct
123 static inline void _nvme_check_size(void)
125 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
126 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
127 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
128 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
129 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
130 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
131 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
132 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
133 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
134 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
135 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
136 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
139 typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
140 struct nvme_completion *);
142 struct nvme_cmd_info {
143 nvme_completion_fn fn;
146 struct nvme_queue *nvmeq;
149 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
150 unsigned int hctx_idx)
152 struct nvme_dev *dev = data;
153 struct nvme_queue *nvmeq = dev->queues[0];
155 WARN_ON(nvmeq->hctx);
157 hctx->driver_data = nvmeq;
161 static int nvme_admin_init_request(void *data, struct request *req,
162 unsigned int hctx_idx, unsigned int rq_idx,
163 unsigned int numa_node)
165 struct nvme_dev *dev = data;
166 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
167 struct nvme_queue *nvmeq = dev->queues[0];
174 static void nvme_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
176 struct nvme_queue *nvmeq = hctx->driver_data;
181 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
182 unsigned int hctx_idx)
184 struct nvme_dev *dev = data;
185 struct nvme_queue *nvmeq = dev->queues[
186 (hctx_idx % dev->queue_count) + 1];
191 /* nvmeq queues are shared between namespaces. We assume here that
192 * blk-mq map the tags so they match up with the nvme queue tags. */
193 WARN_ON(nvmeq->hctx->tags != hctx->tags);
195 hctx->driver_data = nvmeq;
199 static int nvme_init_request(void *data, struct request *req,
200 unsigned int hctx_idx, unsigned int rq_idx,
201 unsigned int numa_node)
203 struct nvme_dev *dev = data;
204 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
205 struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
212 static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
213 nvme_completion_fn handler)
218 blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
221 /* Special values must be less than 0x1000 */
222 #define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
223 #define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
224 #define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
225 #define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
227 static void special_completion(struct nvme_queue *nvmeq, void *ctx,
228 struct nvme_completion *cqe)
230 if (ctx == CMD_CTX_CANCELLED)
232 if (ctx == CMD_CTX_COMPLETED) {
233 dev_warn(nvmeq->q_dmadev,
234 "completed id %d twice on queue %d\n",
235 cqe->command_id, le16_to_cpup(&cqe->sq_id));
238 if (ctx == CMD_CTX_INVALID) {
239 dev_warn(nvmeq->q_dmadev,
240 "invalid id %d completed on queue %d\n",
241 cqe->command_id, le16_to_cpup(&cqe->sq_id));
244 dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
247 static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn)
254 cmd->fn = special_completion;
255 cmd->ctx = CMD_CTX_CANCELLED;
259 static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
260 struct nvme_completion *cqe)
262 struct request *req = ctx;
264 u32 result = le32_to_cpup(&cqe->result);
265 u16 status = le16_to_cpup(&cqe->status) >> 1;
267 if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
268 ++nvmeq->dev->event_limit;
269 if (status == NVME_SC_SUCCESS)
270 dev_warn(nvmeq->q_dmadev,
271 "async event result %08x\n", result);
273 blk_mq_free_hctx_request(nvmeq->hctx, req);
276 static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
277 struct nvme_completion *cqe)
279 struct request *req = ctx;
281 u16 status = le16_to_cpup(&cqe->status) >> 1;
282 u32 result = le32_to_cpup(&cqe->result);
284 blk_mq_free_hctx_request(nvmeq->hctx, req);
286 dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
287 ++nvmeq->dev->abort_limit;
290 static void async_completion(struct nvme_queue *nvmeq, void *ctx,
291 struct nvme_completion *cqe)
293 struct async_cmd_info *cmdinfo = ctx;
294 cmdinfo->result = le32_to_cpup(&cqe->result);
295 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
296 queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
297 blk_mq_free_hctx_request(nvmeq->hctx, cmdinfo->req);
300 static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
303 struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
304 struct request *req = blk_mq_tag_to_rq(hctx->tags, tag);
306 return blk_mq_rq_to_pdu(req);
310 * Called with local interrupts disabled and the q_lock held. May not sleep.
312 static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag,
313 nvme_completion_fn *fn)
315 struct nvme_cmd_info *cmd = get_cmd_from_tag(nvmeq, tag);
317 if (tag >= nvmeq->q_depth) {
318 *fn = special_completion;
319 return CMD_CTX_INVALID;
324 cmd->fn = special_completion;
325 cmd->ctx = CMD_CTX_COMPLETED;
330 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
331 * @nvmeq: The queue to use
332 * @cmd: The command to send
334 * Safe to use from interrupt context
336 static int __nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
338 u16 tail = nvmeq->sq_tail;
340 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
341 if (++tail == nvmeq->q_depth)
343 writel(tail, nvmeq->q_db);
344 nvmeq->sq_tail = tail;
349 static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
353 spin_lock_irqsave(&nvmeq->q_lock, flags);
354 ret = __nvme_submit_cmd(nvmeq, cmd);
355 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
359 static __le64 **iod_list(struct nvme_iod *iod)
361 return ((void *)iod) + iod->offset;
365 * Will slightly overestimate the number of pages needed. This is OK
366 * as it only leads to a small amount of wasted memory for the lifetime of
369 static int nvme_npages(unsigned size, struct nvme_dev *dev)
371 unsigned nprps = DIV_ROUND_UP(size + dev->page_size, dev->page_size);
372 return DIV_ROUND_UP(8 * nprps, dev->page_size - 8);
375 static struct nvme_iod *
376 nvme_alloc_iod(unsigned nseg, unsigned nbytes, struct nvme_dev *dev, gfp_t gfp)
378 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
379 sizeof(__le64 *) * nvme_npages(nbytes, dev) +
380 sizeof(struct scatterlist) * nseg, gfp);
383 iod->offset = offsetof(struct nvme_iod, sg[nseg]);
385 iod->length = nbytes;
387 iod->first_dma = 0ULL;
393 void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
395 const int last_prp = dev->page_size / 8 - 1;
397 __le64 **list = iod_list(iod);
398 dma_addr_t prp_dma = iod->first_dma;
400 if (iod->npages == 0)
401 dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
402 for (i = 0; i < iod->npages; i++) {
403 __le64 *prp_list = list[i];
404 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
405 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
406 prp_dma = next_prp_dma;
411 static int nvme_error_status(u16 status)
413 switch (status & 0x7ff) {
414 case NVME_SC_SUCCESS:
416 case NVME_SC_CAP_EXCEEDED:
423 static void req_completion(struct nvme_queue *nvmeq, void *ctx,
424 struct nvme_completion *cqe)
426 struct nvme_iod *iod = ctx;
427 struct request *req = iod->private;
428 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
430 u16 status = le16_to_cpup(&cqe->status) >> 1;
432 if (unlikely(status)) {
433 if (!(status & NVME_SC_DNR || blk_noretry_request(req))
434 && (jiffies - req->start_time) < req->timeout) {
435 blk_mq_requeue_request(req);
436 blk_mq_kick_requeue_list(req->q);
439 req->errors = nvme_error_status(status);
444 dev_warn(&nvmeq->dev->pci_dev->dev,
445 "completing aborted command with status:%04x\n",
449 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
450 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
451 nvme_free_iod(nvmeq->dev, iod);
453 blk_mq_complete_request(req);
456 /* length is in bytes. gfp flags indicates whether we may sleep. */
457 int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
460 struct dma_pool *pool;
461 int length = total_len;
462 struct scatterlist *sg = iod->sg;
463 int dma_len = sg_dma_len(sg);
464 u64 dma_addr = sg_dma_address(sg);
465 int offset = offset_in_page(dma_addr);
467 __le64 **list = iod_list(iod);
470 u32 page_size = dev->page_size;
472 length -= (page_size - offset);
476 dma_len -= (page_size - offset);
478 dma_addr += (page_size - offset);
481 dma_addr = sg_dma_address(sg);
482 dma_len = sg_dma_len(sg);
485 if (length <= page_size) {
486 iod->first_dma = dma_addr;
490 nprps = DIV_ROUND_UP(length, page_size);
491 if (nprps <= (256 / 8)) {
492 pool = dev->prp_small_pool;
495 pool = dev->prp_page_pool;
499 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
501 iod->first_dma = dma_addr;
503 return (total_len - length) + page_size;
506 iod->first_dma = prp_dma;
509 if (i == page_size >> 3) {
510 __le64 *old_prp_list = prp_list;
511 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
513 return total_len - length;
514 list[iod->npages++] = prp_list;
515 prp_list[0] = old_prp_list[i - 1];
516 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
519 prp_list[i++] = cpu_to_le64(dma_addr);
520 dma_len -= page_size;
521 dma_addr += page_size;
529 dma_addr = sg_dma_address(sg);
530 dma_len = sg_dma_len(sg);
537 * We reuse the small pool to allocate the 16-byte range here as it is not
538 * worth having a special pool for these or additional cases to handle freeing
541 static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
542 struct request *req, struct nvme_iod *iod)
544 struct nvme_dsm_range *range =
545 (struct nvme_dsm_range *)iod_list(iod)[0];
546 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
548 range->cattr = cpu_to_le32(0);
549 range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
550 range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
552 memset(cmnd, 0, sizeof(*cmnd));
553 cmnd->dsm.opcode = nvme_cmd_dsm;
554 cmnd->dsm.command_id = req->tag;
555 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
556 cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
558 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
560 if (++nvmeq->sq_tail == nvmeq->q_depth)
562 writel(nvmeq->sq_tail, nvmeq->q_db);
565 static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
568 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
570 memset(cmnd, 0, sizeof(*cmnd));
571 cmnd->common.opcode = nvme_cmd_flush;
572 cmnd->common.command_id = cmdid;
573 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
575 if (++nvmeq->sq_tail == nvmeq->q_depth)
577 writel(nvmeq->sq_tail, nvmeq->q_db);
580 static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
583 struct request *req = iod->private;
584 struct nvme_command *cmnd;
588 if (req->cmd_flags & REQ_FUA)
589 control |= NVME_RW_FUA;
590 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
591 control |= NVME_RW_LR;
593 if (req->cmd_flags & REQ_RAHEAD)
594 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
596 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
597 memset(cmnd, 0, sizeof(*cmnd));
599 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
600 cmnd->rw.command_id = req->tag;
601 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
602 cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
603 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
604 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
605 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
606 cmnd->rw.control = cpu_to_le16(control);
607 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
609 if (++nvmeq->sq_tail == nvmeq->q_depth)
611 writel(nvmeq->sq_tail, nvmeq->q_db);
616 static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
617 const struct blk_mq_queue_data *bd)
619 struct nvme_ns *ns = hctx->queue->queuedata;
620 struct nvme_queue *nvmeq = hctx->driver_data;
621 struct request *req = bd->rq;
622 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
623 struct nvme_iod *iod;
624 int psegs = req->nr_phys_segments;
625 enum dma_data_direction dma_dir;
626 unsigned size = !(req->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(req) :
627 sizeof(struct nvme_dsm_range);
629 iod = nvme_alloc_iod(psegs, size, ns->dev, GFP_ATOMIC);
631 return BLK_MQ_RQ_QUEUE_BUSY;
635 if (req->cmd_flags & REQ_DISCARD) {
638 * We reuse the small pool to allocate the 16-byte range here
639 * as it is not worth having a special pool for these or
640 * additional cases to handle freeing the iod.
642 range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
647 iod_list(iod)[0] = (__le64 *)range;
650 dma_dir = rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
652 sg_init_table(iod->sg, psegs);
653 iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
657 if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir))
660 if (blk_rq_bytes(req) !=
661 nvme_setup_prps(nvmeq->dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
662 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg,
663 iod->nents, dma_dir);
668 nvme_set_info(cmd, iod, req_completion);
669 spin_lock_irq(&nvmeq->q_lock);
670 if (req->cmd_flags & REQ_DISCARD)
671 nvme_submit_discard(nvmeq, ns, req, iod);
672 else if (req->cmd_flags & REQ_FLUSH)
673 nvme_submit_flush(nvmeq, ns, req->tag);
675 nvme_submit_iod(nvmeq, iod, ns);
677 nvme_process_cq(nvmeq);
678 spin_unlock_irq(&nvmeq->q_lock);
679 return BLK_MQ_RQ_QUEUE_OK;
682 nvme_free_iod(nvmeq->dev, iod);
683 return BLK_MQ_RQ_QUEUE_ERROR;
685 nvme_free_iod(nvmeq->dev, iod);
686 return BLK_MQ_RQ_QUEUE_BUSY;
689 static int nvme_process_cq(struct nvme_queue *nvmeq)
693 head = nvmeq->cq_head;
694 phase = nvmeq->cq_phase;
698 nvme_completion_fn fn;
699 struct nvme_completion cqe = nvmeq->cqes[head];
700 if ((le16_to_cpu(cqe.status) & 1) != phase)
702 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
703 if (++head == nvmeq->q_depth) {
707 ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn);
708 fn(nvmeq, ctx, &cqe);
711 /* If the controller ignores the cq head doorbell and continuously
712 * writes to the queue, it is theoretically possible to wrap around
713 * the queue twice and mistakenly return IRQ_NONE. Linux only
714 * requires that 0.1% of your interrupts are handled, so this isn't
717 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
720 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
721 nvmeq->cq_head = head;
722 nvmeq->cq_phase = phase;
728 /* Admin queue isn't initialized as a request queue. If at some point this
729 * happens anyway, make sure to notify the user */
730 static int nvme_admin_queue_rq(struct blk_mq_hw_ctx *hctx,
731 const struct blk_mq_queue_data *bd)
734 return BLK_MQ_RQ_QUEUE_ERROR;
737 static irqreturn_t nvme_irq(int irq, void *data)
740 struct nvme_queue *nvmeq = data;
741 spin_lock(&nvmeq->q_lock);
742 nvme_process_cq(nvmeq);
743 result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
745 spin_unlock(&nvmeq->q_lock);
749 static irqreturn_t nvme_irq_check(int irq, void *data)
751 struct nvme_queue *nvmeq = data;
752 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
753 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
755 return IRQ_WAKE_THREAD;
758 static void nvme_abort_cmd_info(struct nvme_queue *nvmeq, struct nvme_cmd_info *
761 spin_lock_irq(&nvmeq->q_lock);
762 cancel_cmd_info(cmd_info, NULL);
763 spin_unlock_irq(&nvmeq->q_lock);
766 struct sync_cmd_info {
767 struct task_struct *task;
772 static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
773 struct nvme_completion *cqe)
775 struct sync_cmd_info *cmdinfo = ctx;
776 cmdinfo->result = le32_to_cpup(&cqe->result);
777 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
778 wake_up_process(cmdinfo->task);
782 * Returns 0 on success. If the result is negative, it's a Linux error code;
783 * if the result is positive, it's an NVM Express status code
785 static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
786 u32 *result, unsigned timeout)
789 struct sync_cmd_info cmdinfo;
790 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
791 struct nvme_queue *nvmeq = cmd_rq->nvmeq;
793 cmdinfo.task = current;
794 cmdinfo.status = -EINTR;
796 cmd->common.command_id = req->tag;
798 nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
800 set_current_state(TASK_KILLABLE);
801 ret = nvme_submit_cmd(nvmeq, cmd);
803 nvme_finish_cmd(nvmeq, req->tag, NULL);
804 set_current_state(TASK_RUNNING);
806 ret = schedule_timeout(timeout);
809 * Ensure that sync_completion has either run, or that it will
812 nvme_abort_cmd_info(nvmeq, blk_mq_rq_to_pdu(req));
815 * We never got the completion
817 if (cmdinfo.status == -EINTR)
821 *result = cmdinfo.result;
823 return cmdinfo.status;
826 static int nvme_submit_async_admin_req(struct nvme_dev *dev)
828 struct nvme_queue *nvmeq = dev->queues[0];
829 struct nvme_command c;
830 struct nvme_cmd_info *cmd_info;
833 req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, false);
837 req->cmd_flags |= REQ_NO_TIMEOUT;
838 cmd_info = blk_mq_rq_to_pdu(req);
839 nvme_set_info(cmd_info, req, async_req_completion);
841 memset(&c, 0, sizeof(c));
842 c.common.opcode = nvme_admin_async_event;
843 c.common.command_id = req->tag;
845 return __nvme_submit_cmd(nvmeq, &c);
848 static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
849 struct nvme_command *cmd,
850 struct async_cmd_info *cmdinfo, unsigned timeout)
852 struct nvme_queue *nvmeq = dev->queues[0];
854 struct nvme_cmd_info *cmd_rq;
856 req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_KERNEL, false);
860 req->timeout = timeout;
861 cmd_rq = blk_mq_rq_to_pdu(req);
863 nvme_set_info(cmd_rq, cmdinfo, async_completion);
864 cmdinfo->status = -EINTR;
866 cmd->common.command_id = req->tag;
868 return nvme_submit_cmd(nvmeq, cmd);
871 static int __nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
872 u32 *result, unsigned timeout)
877 req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_KERNEL, false);
880 res = nvme_submit_sync_cmd(req, cmd, result, timeout);
881 blk_mq_free_request(req);
885 int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
888 return __nvme_submit_admin_cmd(dev, cmd, result, ADMIN_TIMEOUT);
891 int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
892 struct nvme_command *cmd, u32 *result)
897 req = blk_mq_alloc_request(ns->queue, WRITE, (GFP_KERNEL|__GFP_WAIT),
901 res = nvme_submit_sync_cmd(req, cmd, result, NVME_IO_TIMEOUT);
902 blk_mq_free_request(req);
906 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
908 struct nvme_command c;
910 memset(&c, 0, sizeof(c));
911 c.delete_queue.opcode = opcode;
912 c.delete_queue.qid = cpu_to_le16(id);
914 return nvme_submit_admin_cmd(dev, &c, NULL);
917 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
918 struct nvme_queue *nvmeq)
920 struct nvme_command c;
921 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
923 memset(&c, 0, sizeof(c));
924 c.create_cq.opcode = nvme_admin_create_cq;
925 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
926 c.create_cq.cqid = cpu_to_le16(qid);
927 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
928 c.create_cq.cq_flags = cpu_to_le16(flags);
929 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
931 return nvme_submit_admin_cmd(dev, &c, NULL);
934 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
935 struct nvme_queue *nvmeq)
937 struct nvme_command c;
938 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
940 memset(&c, 0, sizeof(c));
941 c.create_sq.opcode = nvme_admin_create_sq;
942 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
943 c.create_sq.sqid = cpu_to_le16(qid);
944 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
945 c.create_sq.sq_flags = cpu_to_le16(flags);
946 c.create_sq.cqid = cpu_to_le16(qid);
948 return nvme_submit_admin_cmd(dev, &c, NULL);
951 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
953 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
956 static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
958 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
961 int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
964 struct nvme_command c;
966 memset(&c, 0, sizeof(c));
967 c.identify.opcode = nvme_admin_identify;
968 c.identify.nsid = cpu_to_le32(nsid);
969 c.identify.prp1 = cpu_to_le64(dma_addr);
970 c.identify.cns = cpu_to_le32(cns);
972 return nvme_submit_admin_cmd(dev, &c, NULL);
975 int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
976 dma_addr_t dma_addr, u32 *result)
978 struct nvme_command c;
980 memset(&c, 0, sizeof(c));
981 c.features.opcode = nvme_admin_get_features;
982 c.features.nsid = cpu_to_le32(nsid);
983 c.features.prp1 = cpu_to_le64(dma_addr);
984 c.features.fid = cpu_to_le32(fid);
986 return nvme_submit_admin_cmd(dev, &c, result);
989 int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
990 dma_addr_t dma_addr, u32 *result)
992 struct nvme_command c;
994 memset(&c, 0, sizeof(c));
995 c.features.opcode = nvme_admin_set_features;
996 c.features.prp1 = cpu_to_le64(dma_addr);
997 c.features.fid = cpu_to_le32(fid);
998 c.features.dword11 = cpu_to_le32(dword11);
1000 return nvme_submit_admin_cmd(dev, &c, result);
1004 * nvme_abort_req - Attempt aborting a request
1006 * Schedule controller reset if the command was already aborted once before and
1007 * still hasn't been returned to the driver, or if this is the admin queue.
1009 static void nvme_abort_req(struct request *req)
1011 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
1012 struct nvme_queue *nvmeq = cmd_rq->nvmeq;
1013 struct nvme_dev *dev = nvmeq->dev;
1014 struct request *abort_req;
1015 struct nvme_cmd_info *abort_cmd;
1016 struct nvme_command cmd;
1018 if (!nvmeq->qid || cmd_rq->aborted) {
1019 if (work_busy(&dev->reset_work))
1021 list_del_init(&dev->node);
1022 dev_warn(&dev->pci_dev->dev,
1023 "I/O %d QID %d timeout, reset controller\n",
1024 req->tag, nvmeq->qid);
1025 dev->reset_workfn = nvme_reset_failed_dev;
1026 queue_work(nvme_workq, &dev->reset_work);
1030 if (!dev->abort_limit)
1033 abort_req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC,
1035 if (IS_ERR(abort_req))
1038 abort_cmd = blk_mq_rq_to_pdu(abort_req);
1039 nvme_set_info(abort_cmd, abort_req, abort_completion);
1041 memset(&cmd, 0, sizeof(cmd));
1042 cmd.abort.opcode = nvme_admin_abort_cmd;
1043 cmd.abort.cid = req->tag;
1044 cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1045 cmd.abort.command_id = abort_req->tag;
1048 cmd_rq->aborted = 1;
1050 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
1052 if (nvme_submit_cmd(dev->queues[0], &cmd) < 0) {
1053 dev_warn(nvmeq->q_dmadev,
1054 "Could not abort I/O %d QID %d",
1055 req->tag, nvmeq->qid);
1056 blk_mq_free_request(abort_req);
1060 static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
1061 struct request *req, void *data, bool reserved)
1063 struct nvme_queue *nvmeq = data;
1065 nvme_completion_fn fn;
1066 struct nvme_cmd_info *cmd;
1067 static struct nvme_completion cqe = {
1068 .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
1071 cmd = blk_mq_rq_to_pdu(req);
1073 if (cmd->ctx == CMD_CTX_CANCELLED)
1076 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
1077 req->tag, nvmeq->qid);
1078 ctx = cancel_cmd_info(cmd, &fn);
1079 fn(nvmeq, ctx, &cqe);
1082 static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1084 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
1085 struct nvme_queue *nvmeq = cmd->nvmeq;
1087 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
1090 if (!nvmeq->dev->initialized) {
1092 * Force cancelled command frees the request, which requires we
1093 * return BLK_EH_NOT_HANDLED.
1095 nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
1096 return BLK_EH_NOT_HANDLED;
1098 nvme_abort_req(req);
1101 * The aborted req will be completed on receiving the abort req.
1102 * We enable the timer again. If hit twice, it'll cause a device reset,
1103 * as the device then is in a faulty state.
1105 return BLK_EH_RESET_TIMER;
1108 static void nvme_free_queue(struct nvme_queue *nvmeq)
1110 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
1111 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1112 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1113 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1117 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1120 struct nvme_queue *nvmeq, *next;
1121 struct llist_node *entry;
1124 for (i = dev->queue_count - 1; i >= lowest; i--) {
1125 struct nvme_queue *nvmeq = dev->queues[i];
1126 llist_add(&nvmeq->node, &q_list);
1128 dev->queues[i] = NULL;
1131 entry = llist_del_all(&q_list);
1132 llist_for_each_entry_safe(nvmeq, next, entry, node)
1133 nvme_free_queue(nvmeq);
1137 * nvme_suspend_queue - put queue into suspended state
1138 * @nvmeq - queue to suspend
1140 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1144 spin_lock_irq(&nvmeq->q_lock);
1145 if (nvmeq->cq_vector == -1) {
1146 spin_unlock_irq(&nvmeq->q_lock);
1149 vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
1150 nvmeq->dev->online_queues--;
1151 nvmeq->cq_vector = -1;
1152 spin_unlock_irq(&nvmeq->q_lock);
1154 irq_set_affinity_hint(vector, NULL);
1155 free_irq(vector, nvmeq);
1160 static void nvme_clear_queue(struct nvme_queue *nvmeq)
1162 struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
1164 spin_lock_irq(&nvmeq->q_lock);
1165 nvme_process_cq(nvmeq);
1166 if (hctx && hctx->tags)
1167 blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
1168 spin_unlock_irq(&nvmeq->q_lock);
1171 static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1173 struct nvme_queue *nvmeq = dev->queues[qid];
1177 if (nvme_suspend_queue(nvmeq))
1180 /* Don't tell the adapter to delete the admin queue.
1181 * Don't tell a removed adapter to delete IO queues. */
1182 if (qid && readl(&dev->bar->csts) != -1) {
1183 adapter_delete_sq(dev, qid);
1184 adapter_delete_cq(dev, qid);
1186 if (!qid && dev->admin_q)
1187 blk_mq_freeze_queue_start(dev->admin_q);
1188 nvme_clear_queue(nvmeq);
1191 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1194 struct device *dmadev = &dev->pci_dev->dev;
1195 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
1199 nvmeq->cqes = dma_zalloc_coherent(dmadev, CQ_SIZE(depth),
1200 &nvmeq->cq_dma_addr, GFP_KERNEL);
1204 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
1205 &nvmeq->sq_dma_addr, GFP_KERNEL);
1206 if (!nvmeq->sq_cmds)
1209 nvmeq->q_dmadev = dmadev;
1211 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
1212 dev->instance, qid);
1213 spin_lock_init(&nvmeq->q_lock);
1215 nvmeq->cq_phase = 1;
1216 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1217 nvmeq->q_depth = depth;
1220 dev->queues[qid] = nvmeq;
1225 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
1226 nvmeq->cq_dma_addr);
1232 static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1235 if (use_threaded_interrupts)
1236 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
1237 nvme_irq_check, nvme_irq, IRQF_SHARED,
1239 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
1240 IRQF_SHARED, name, nvmeq);
1243 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1245 struct nvme_dev *dev = nvmeq->dev;
1247 spin_lock_irq(&nvmeq->q_lock);
1250 nvmeq->cq_phase = 1;
1251 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1252 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1253 dev->online_queues++;
1254 spin_unlock_irq(&nvmeq->q_lock);
1257 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1259 struct nvme_dev *dev = nvmeq->dev;
1262 nvmeq->cq_vector = qid - 1;
1263 result = adapter_alloc_cq(dev, qid, nvmeq);
1267 result = adapter_alloc_sq(dev, qid, nvmeq);
1271 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1275 nvme_init_queue(nvmeq, qid);
1279 adapter_delete_sq(dev, qid);
1281 adapter_delete_cq(dev, qid);
1285 static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
1287 unsigned long timeout;
1288 u32 bit = enabled ? NVME_CSTS_RDY : 0;
1290 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1292 while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
1294 if (fatal_signal_pending(current))
1296 if (time_after(jiffies, timeout)) {
1297 dev_err(&dev->pci_dev->dev,
1298 "Device not ready; aborting %s\n", enabled ?
1299 "initialisation" : "reset");
1308 * If the device has been passed off to us in an enabled state, just clear
1309 * the enabled bit. The spec says we should set the 'shutdown notification
1310 * bits', but doing so may cause the device to complete commands to the
1311 * admin queue ... and we don't know what memory that might be pointing at!
1313 static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
1315 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
1316 dev->ctrl_config &= ~NVME_CC_ENABLE;
1317 writel(dev->ctrl_config, &dev->bar->cc);
1319 return nvme_wait_ready(dev, cap, false);
1322 static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
1324 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
1325 dev->ctrl_config |= NVME_CC_ENABLE;
1326 writel(dev->ctrl_config, &dev->bar->cc);
1328 return nvme_wait_ready(dev, cap, true);
1331 static int nvme_shutdown_ctrl(struct nvme_dev *dev)
1333 unsigned long timeout;
1335 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
1336 dev->ctrl_config |= NVME_CC_SHN_NORMAL;
1338 writel(dev->ctrl_config, &dev->bar->cc);
1340 timeout = SHUTDOWN_TIMEOUT + jiffies;
1341 while ((readl(&dev->bar->csts) & NVME_CSTS_SHST_MASK) !=
1342 NVME_CSTS_SHST_CMPLT) {
1344 if (fatal_signal_pending(current))
1346 if (time_after(jiffies, timeout)) {
1347 dev_err(&dev->pci_dev->dev,
1348 "Device shutdown incomplete; abort shutdown\n");
1356 static struct blk_mq_ops nvme_mq_admin_ops = {
1357 .queue_rq = nvme_admin_queue_rq,
1358 .map_queue = blk_mq_map_queue,
1359 .init_hctx = nvme_admin_init_hctx,
1360 .exit_hctx = nvme_exit_hctx,
1361 .init_request = nvme_admin_init_request,
1362 .timeout = nvme_timeout,
1365 static struct blk_mq_ops nvme_mq_ops = {
1366 .queue_rq = nvme_queue_rq,
1367 .map_queue = blk_mq_map_queue,
1368 .init_hctx = nvme_init_hctx,
1369 .exit_hctx = nvme_exit_hctx,
1370 .init_request = nvme_init_request,
1371 .timeout = nvme_timeout,
1374 static void nvme_dev_remove_admin(struct nvme_dev *dev)
1376 if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
1377 blk_cleanup_queue(dev->admin_q);
1378 blk_mq_free_tag_set(&dev->admin_tagset);
1382 static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1384 if (!dev->admin_q) {
1385 dev->admin_tagset.ops = &nvme_mq_admin_ops;
1386 dev->admin_tagset.nr_hw_queues = 1;
1387 dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
1388 dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1389 dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
1390 dev->admin_tagset.cmd_size = sizeof(struct nvme_cmd_info);
1391 dev->admin_tagset.driver_data = dev;
1393 if (blk_mq_alloc_tag_set(&dev->admin_tagset))
1396 dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
1397 if (IS_ERR(dev->admin_q)) {
1398 blk_mq_free_tag_set(&dev->admin_tagset);
1401 if (!blk_get_queue(dev->admin_q)) {
1402 nvme_dev_remove_admin(dev);
1406 blk_mq_unfreeze_queue(dev->admin_q);
1411 static int nvme_configure_admin_queue(struct nvme_dev *dev)
1415 u64 cap = readq(&dev->bar->cap);
1416 struct nvme_queue *nvmeq;
1417 unsigned page_shift = PAGE_SHIFT;
1418 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
1419 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
1421 if (page_shift < dev_page_min) {
1422 dev_err(&dev->pci_dev->dev,
1423 "Minimum device page size (%u) too large for "
1424 "host (%u)\n", 1 << dev_page_min,
1428 if (page_shift > dev_page_max) {
1429 dev_info(&dev->pci_dev->dev,
1430 "Device maximum page size (%u) smaller than "
1431 "host (%u); enabling work-around\n",
1432 1 << dev_page_max, 1 << page_shift);
1433 page_shift = dev_page_max;
1436 result = nvme_disable_ctrl(dev, cap);
1440 nvmeq = dev->queues[0];
1442 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
1447 aqa = nvmeq->q_depth - 1;
1450 dev->page_size = 1 << page_shift;
1452 dev->ctrl_config = NVME_CC_CSS_NVM;
1453 dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
1454 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
1455 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
1457 writel(aqa, &dev->bar->aqa);
1458 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
1459 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
1461 result = nvme_enable_ctrl(dev, cap);
1465 nvmeq->cq_vector = 0;
1466 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1473 nvme_free_queues(dev, 0);
1477 struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1478 unsigned long addr, unsigned length)
1480 int i, err, count, nents, offset;
1481 struct scatterlist *sg;
1482 struct page **pages;
1483 struct nvme_iod *iod;
1486 return ERR_PTR(-EINVAL);
1487 if (!length || length > INT_MAX - PAGE_SIZE)
1488 return ERR_PTR(-EINVAL);
1490 offset = offset_in_page(addr);
1491 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1492 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
1494 return ERR_PTR(-ENOMEM);
1496 err = get_user_pages_fast(addr, count, 1, pages);
1504 iod = nvme_alloc_iod(count, length, dev, GFP_KERNEL);
1509 sg_init_table(sg, count);
1510 for (i = 0; i < count; i++) {
1511 sg_set_page(&sg[i], pages[i],
1512 min_t(unsigned, length, PAGE_SIZE - offset),
1514 length -= (PAGE_SIZE - offset);
1517 sg_mark_end(&sg[i - 1]);
1520 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
1521 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1531 for (i = 0; i < count; i++)
1534 return ERR_PTR(err);
1537 void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
1538 struct nvme_iod *iod)
1542 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
1543 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1545 for (i = 0; i < iod->nents; i++)
1546 put_page(sg_page(&iod->sg[i]));
1549 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1551 struct nvme_dev *dev = ns->dev;
1552 struct nvme_user_io io;
1553 struct nvme_command c;
1554 unsigned length, meta_len;
1556 struct nvme_iod *iod, *meta_iod = NULL;
1557 dma_addr_t meta_dma_addr;
1558 void *meta, *uninitialized_var(meta_mem);
1560 if (copy_from_user(&io, uio, sizeof(io)))
1562 length = (io.nblocks + 1) << ns->lba_shift;
1563 meta_len = (io.nblocks + 1) * ns->ms;
1565 if (meta_len && ((io.metadata & 3) || !io.metadata))
1568 switch (io.opcode) {
1569 case nvme_cmd_write:
1571 case nvme_cmd_compare:
1572 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
1579 return PTR_ERR(iod);
1581 memset(&c, 0, sizeof(c));
1582 c.rw.opcode = io.opcode;
1583 c.rw.flags = io.flags;
1584 c.rw.nsid = cpu_to_le32(ns->ns_id);
1585 c.rw.slba = cpu_to_le64(io.slba);
1586 c.rw.length = cpu_to_le16(io.nblocks);
1587 c.rw.control = cpu_to_le16(io.control);
1588 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
1589 c.rw.reftag = cpu_to_le32(io.reftag);
1590 c.rw.apptag = cpu_to_le16(io.apptag);
1591 c.rw.appmask = cpu_to_le16(io.appmask);
1594 meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata,
1596 if (IS_ERR(meta_iod)) {
1597 status = PTR_ERR(meta_iod);
1602 meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
1603 &meta_dma_addr, GFP_KERNEL);
1609 if (io.opcode & 1) {
1610 int meta_offset = 0;
1612 for (i = 0; i < meta_iod->nents; i++) {
1613 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
1614 meta_iod->sg[i].offset;
1615 memcpy(meta_mem + meta_offset, meta,
1616 meta_iod->sg[i].length);
1617 kunmap_atomic(meta);
1618 meta_offset += meta_iod->sg[i].length;
1622 c.rw.metadata = cpu_to_le64(meta_dma_addr);
1625 length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
1626 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1627 c.rw.prp2 = cpu_to_le64(iod->first_dma);
1629 if (length != (io.nblocks + 1) << ns->lba_shift)
1632 status = nvme_submit_io_cmd(dev, ns, &c, NULL);
1635 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
1636 int meta_offset = 0;
1638 for (i = 0; i < meta_iod->nents; i++) {
1639 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
1640 meta_iod->sg[i].offset;
1641 memcpy(meta, meta_mem + meta_offset,
1642 meta_iod->sg[i].length);
1643 kunmap_atomic(meta);
1644 meta_offset += meta_iod->sg[i].length;
1648 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
1653 nvme_unmap_user_pages(dev, io.opcode & 1, iod);
1654 nvme_free_iod(dev, iod);
1657 nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
1658 nvme_free_iod(dev, meta_iod);
1664 static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
1665 struct nvme_passthru_cmd __user *ucmd)
1667 struct nvme_passthru_cmd cmd;
1668 struct nvme_command c;
1670 struct nvme_iod *uninitialized_var(iod);
1673 if (!capable(CAP_SYS_ADMIN))
1675 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1678 memset(&c, 0, sizeof(c));
1679 c.common.opcode = cmd.opcode;
1680 c.common.flags = cmd.flags;
1681 c.common.nsid = cpu_to_le32(cmd.nsid);
1682 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1683 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1684 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
1685 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
1686 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
1687 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
1688 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
1689 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
1691 length = cmd.data_len;
1693 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
1696 return PTR_ERR(iod);
1697 length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
1698 c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1699 c.common.prp2 = cpu_to_le64(iod->first_dma);
1702 timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
1705 if (length != cmd.data_len)
1708 struct request *req;
1710 req = blk_mq_alloc_request(ns->queue, WRITE,
1711 (GFP_KERNEL|__GFP_WAIT), false);
1713 status = PTR_ERR(req);
1715 status = nvme_submit_sync_cmd(req, &c, &cmd.result,
1717 blk_mq_free_request(req);
1720 status = __nvme_submit_admin_cmd(dev, &c, &cmd.result, timeout);
1723 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
1724 nvme_free_iod(dev, iod);
1727 if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
1728 sizeof(cmd.result)))
1734 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1737 struct nvme_ns *ns = bdev->bd_disk->private_data;
1741 force_successful_syscall_return();
1743 case NVME_IOCTL_ADMIN_CMD:
1744 return nvme_user_cmd(ns->dev, NULL, (void __user *)arg);
1745 case NVME_IOCTL_IO_CMD:
1746 return nvme_user_cmd(ns->dev, ns, (void __user *)arg);
1747 case NVME_IOCTL_SUBMIT_IO:
1748 return nvme_submit_io(ns, (void __user *)arg);
1749 case SG_GET_VERSION_NUM:
1750 return nvme_sg_get_version_num((void __user *)arg);
1752 return nvme_sg_io(ns, (void __user *)arg);
1758 #ifdef CONFIG_COMPAT
1759 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
1760 unsigned int cmd, unsigned long arg)
1764 return -ENOIOCTLCMD;
1766 return nvme_ioctl(bdev, mode, cmd, arg);
1769 #define nvme_compat_ioctl NULL
1772 static int nvme_open(struct block_device *bdev, fmode_t mode)
1777 spin_lock(&dev_list_lock);
1778 ns = bdev->bd_disk->private_data;
1781 else if (!kref_get_unless_zero(&ns->dev->kref))
1783 spin_unlock(&dev_list_lock);
1788 static void nvme_free_dev(struct kref *kref);
1790 static void nvme_release(struct gendisk *disk, fmode_t mode)
1792 struct nvme_ns *ns = disk->private_data;
1793 struct nvme_dev *dev = ns->dev;
1795 kref_put(&dev->kref, nvme_free_dev);
1798 static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
1800 /* some standard values */
1801 geo->heads = 1 << 6;
1802 geo->sectors = 1 << 5;
1803 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1807 static int nvme_revalidate_disk(struct gendisk *disk)
1809 struct nvme_ns *ns = disk->private_data;
1810 struct nvme_dev *dev = ns->dev;
1811 struct nvme_id_ns *id;
1812 dma_addr_t dma_addr;
1815 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
1818 dev_warn(&dev->pci_dev->dev, "%s: Memory alocation failure\n",
1823 if (nvme_identify(dev, ns->ns_id, 0, dma_addr))
1826 lbaf = id->flbas & 0xf;
1827 ns->lba_shift = id->lbaf[lbaf].ds;
1829 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1830 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1832 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
1836 static const struct block_device_operations nvme_fops = {
1837 .owner = THIS_MODULE,
1838 .ioctl = nvme_ioctl,
1839 .compat_ioctl = nvme_compat_ioctl,
1841 .release = nvme_release,
1842 .getgeo = nvme_getgeo,
1843 .revalidate_disk= nvme_revalidate_disk,
1846 static int nvme_kthread(void *data)
1848 struct nvme_dev *dev, *next;
1850 while (!kthread_should_stop()) {
1851 set_current_state(TASK_INTERRUPTIBLE);
1852 spin_lock(&dev_list_lock);
1853 list_for_each_entry_safe(dev, next, &dev_list, node) {
1855 if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
1857 if (work_busy(&dev->reset_work))
1859 list_del_init(&dev->node);
1860 dev_warn(&dev->pci_dev->dev,
1861 "Failed status: %x, reset controller\n",
1862 readl(&dev->bar->csts));
1863 dev->reset_workfn = nvme_reset_failed_dev;
1864 queue_work(nvme_workq, &dev->reset_work);
1867 for (i = 0; i < dev->queue_count; i++) {
1868 struct nvme_queue *nvmeq = dev->queues[i];
1871 spin_lock_irq(&nvmeq->q_lock);
1872 nvme_process_cq(nvmeq);
1874 while ((i == 0) && (dev->event_limit > 0)) {
1875 if (nvme_submit_async_admin_req(dev))
1879 spin_unlock_irq(&nvmeq->q_lock);
1882 spin_unlock(&dev_list_lock);
1883 schedule_timeout(round_jiffies_relative(HZ));
1888 static void nvme_config_discard(struct nvme_ns *ns)
1890 u32 logical_block_size = queue_logical_block_size(ns->queue);
1891 ns->queue->limits.discard_zeroes_data = 0;
1892 ns->queue->limits.discard_alignment = logical_block_size;
1893 ns->queue->limits.discard_granularity = logical_block_size;
1894 ns->queue->limits.max_discard_sectors = 0xffffffff;
1895 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
1898 static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1899 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1902 struct gendisk *disk;
1903 int node = dev_to_node(&dev->pci_dev->dev);
1906 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1909 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
1912 ns->queue = blk_mq_init_queue(&dev->tagset);
1913 if (IS_ERR(ns->queue))
1915 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1916 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1917 queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, ns->queue);
1919 ns->queue->queuedata = ns;
1921 disk = alloc_disk_node(0, node);
1923 goto out_free_queue;
1927 lbaf = id->flbas & 0xf;
1928 ns->lba_shift = id->lbaf[lbaf].ds;
1929 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
1930 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1931 if (dev->max_hw_sectors)
1932 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
1933 if (dev->stripe_size)
1934 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
1935 if (dev->vwc & NVME_CTRL_VWC_PRESENT)
1936 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
1938 disk->major = nvme_major;
1939 disk->first_minor = 0;
1940 disk->fops = &nvme_fops;
1941 disk->private_data = ns;
1942 disk->queue = ns->queue;
1943 disk->driverfs_dev = &dev->pci_dev->dev;
1944 disk->flags = GENHD_FL_EXT_DEVT;
1945 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
1946 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1948 if (dev->oncs & NVME_CTRL_ONCS_DSM)
1949 nvme_config_discard(ns);
1954 blk_cleanup_queue(ns->queue);
1960 static void nvme_create_io_queues(struct nvme_dev *dev)
1964 for (i = dev->queue_count; i <= dev->max_qid; i++)
1965 if (!nvme_alloc_queue(dev, i, dev->q_depth))
1968 for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
1969 if (nvme_create_queue(dev->queues[i], i))
1973 static int set_queue_count(struct nvme_dev *dev, int count)
1977 u32 q_count = (count - 1) | ((count - 1) << 16);
1979 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
1984 dev_err(&dev->pci_dev->dev, "Could not set queue count (%d)\n",
1988 return min(result & 0xffff, result >> 16) + 1;
1991 static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
1993 return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
1996 static int nvme_setup_io_queues(struct nvme_dev *dev)
1998 struct nvme_queue *adminq = dev->queues[0];
1999 struct pci_dev *pdev = dev->pci_dev;
2000 int result, i, vecs, nr_io_queues, size;
2002 nr_io_queues = num_possible_cpus();
2003 result = set_queue_count(dev, nr_io_queues);
2006 if (result < nr_io_queues)
2007 nr_io_queues = result;
2009 size = db_bar_size(dev, nr_io_queues);
2013 dev->bar = ioremap(pci_resource_start(pdev, 0), size);
2016 if (!--nr_io_queues)
2018 size = db_bar_size(dev, nr_io_queues);
2020 dev->dbs = ((void __iomem *)dev->bar) + 4096;
2021 adminq->q_db = dev->dbs;
2024 /* Deregister the admin queue's interrupt */
2025 free_irq(dev->entry[0].vector, adminq);
2028 * If we enable msix early due to not intx, disable it again before
2029 * setting up the full range we need.
2032 pci_disable_msix(pdev);
2034 for (i = 0; i < nr_io_queues; i++)
2035 dev->entry[i].entry = i;
2036 vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues);
2038 vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32));
2042 for (i = 0; i < vecs; i++)
2043 dev->entry[i].vector = i + pdev->irq;
2048 * Should investigate if there's a performance win from allocating
2049 * more queues than interrupt vectors; it might allow the submission
2050 * path to scale better, even if the receive path is limited by the
2051 * number of interrupts.
2053 nr_io_queues = vecs;
2054 dev->max_qid = nr_io_queues;
2056 result = queue_request_irq(dev, adminq, adminq->irqname);
2060 /* Free previously allocated queues that are no longer usable */
2061 nvme_free_queues(dev, nr_io_queues + 1);
2062 nvme_create_io_queues(dev);
2067 nvme_free_queues(dev, 1);
2072 * Return: error value if an error occurred setting up the queues or calling
2073 * Identify Device. 0 if these succeeded, even if adding some of the
2074 * namespaces failed. At the moment, these failures are silent. TBD which
2075 * failures should be reported.
2077 static int nvme_dev_add(struct nvme_dev *dev)
2079 struct pci_dev *pdev = dev->pci_dev;
2083 struct nvme_id_ctrl *ctrl;
2084 struct nvme_id_ns *id_ns;
2086 dma_addr_t dma_addr;
2087 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
2089 mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
2093 res = nvme_identify(dev, 0, 1, dma_addr);
2095 dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
2101 nn = le32_to_cpup(&ctrl->nn);
2102 dev->oncs = le16_to_cpup(&ctrl->oncs);
2103 dev->abort_limit = ctrl->acl + 1;
2104 dev->vwc = ctrl->vwc;
2105 dev->event_limit = min(ctrl->aerl + 1, 8);
2106 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
2107 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
2108 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
2110 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
2111 if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
2112 (pdev->device == 0x0953) && ctrl->vs[3]) {
2113 unsigned int max_hw_sectors;
2115 dev->stripe_size = 1 << (ctrl->vs[3] + shift);
2116 max_hw_sectors = dev->stripe_size >> (shift - 9);
2117 if (dev->max_hw_sectors) {
2118 dev->max_hw_sectors = min(max_hw_sectors,
2119 dev->max_hw_sectors);
2121 dev->max_hw_sectors = max_hw_sectors;
2124 dev->tagset.ops = &nvme_mq_ops;
2125 dev->tagset.nr_hw_queues = dev->online_queues - 1;
2126 dev->tagset.timeout = NVME_IO_TIMEOUT;
2127 dev->tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
2128 dev->tagset.queue_depth =
2129 min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
2130 dev->tagset.cmd_size = sizeof(struct nvme_cmd_info);
2131 dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
2132 dev->tagset.driver_data = dev;
2134 if (blk_mq_alloc_tag_set(&dev->tagset))
2138 for (i = 1; i <= nn; i++) {
2139 res = nvme_identify(dev, i, 0, dma_addr);
2143 if (id_ns->ncap == 0)
2146 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
2147 dma_addr + 4096, NULL);
2149 memset(mem + 4096, 0, 4096);
2151 ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
2153 list_add_tail(&ns->list, &dev->namespaces);
2155 list_for_each_entry(ns, &dev->namespaces, list)
2160 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
2164 static int nvme_dev_map(struct nvme_dev *dev)
2167 int bars, result = -ENOMEM;
2168 struct pci_dev *pdev = dev->pci_dev;
2170 if (pci_enable_device_mem(pdev))
2173 dev->entry[0].vector = pdev->irq;
2174 pci_set_master(pdev);
2175 bars = pci_select_bars(pdev, IORESOURCE_MEM);
2179 if (pci_request_selected_regions(pdev, bars, "nvme"))
2182 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
2183 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
2186 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
2190 if (readl(&dev->bar->csts) == -1) {
2196 * Some devices don't advertse INTx interrupts, pre-enable a single
2197 * MSIX vec for setup. We'll adjust this later.
2200 result = pci_enable_msix(pdev, dev->entry, 1);
2205 cap = readq(&dev->bar->cap);
2206 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
2207 dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
2208 dev->dbs = ((void __iomem *)dev->bar) + 4096;
2216 pci_release_regions(pdev);
2218 pci_disable_device(pdev);
2222 static void nvme_dev_unmap(struct nvme_dev *dev)
2224 if (dev->pci_dev->msi_enabled)
2225 pci_disable_msi(dev->pci_dev);
2226 else if (dev->pci_dev->msix_enabled)
2227 pci_disable_msix(dev->pci_dev);
2232 pci_release_regions(dev->pci_dev);
2235 if (pci_is_enabled(dev->pci_dev))
2236 pci_disable_device(dev->pci_dev);
2239 struct nvme_delq_ctx {
2240 struct task_struct *waiter;
2241 struct kthread_worker *worker;
2245 static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
2247 dq->waiter = current;
2251 set_current_state(TASK_KILLABLE);
2252 if (!atomic_read(&dq->refcount))
2254 if (!schedule_timeout(ADMIN_TIMEOUT) ||
2255 fatal_signal_pending(current)) {
2257 * Disable the controller first since we can't trust it
2258 * at this point, but leave the admin queue enabled
2259 * until all queue deletion requests are flushed.
2260 * FIXME: This may take a while if there are more h/w
2261 * queues than admin tags.
2263 set_current_state(TASK_RUNNING);
2264 nvme_disable_ctrl(dev, readq(&dev->bar->cap));
2265 nvme_clear_queue(dev->queues[0]);
2266 flush_kthread_worker(dq->worker);
2267 nvme_disable_queue(dev, 0);
2271 set_current_state(TASK_RUNNING);
2274 static void nvme_put_dq(struct nvme_delq_ctx *dq)
2276 atomic_dec(&dq->refcount);
2278 wake_up_process(dq->waiter);
2281 static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
2283 atomic_inc(&dq->refcount);
2287 static void nvme_del_queue_end(struct nvme_queue *nvmeq)
2289 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
2291 nvme_clear_queue(nvmeq);
2295 static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
2296 kthread_work_func_t fn)
2298 struct nvme_command c;
2300 memset(&c, 0, sizeof(c));
2301 c.delete_queue.opcode = opcode;
2302 c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
2304 init_kthread_work(&nvmeq->cmdinfo.work, fn);
2305 return nvme_submit_admin_async_cmd(nvmeq->dev, &c, &nvmeq->cmdinfo,
2309 static void nvme_del_cq_work_handler(struct kthread_work *work)
2311 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2313 nvme_del_queue_end(nvmeq);
2316 static int nvme_delete_cq(struct nvme_queue *nvmeq)
2318 return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
2319 nvme_del_cq_work_handler);
2322 static void nvme_del_sq_work_handler(struct kthread_work *work)
2324 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2326 int status = nvmeq->cmdinfo.status;
2329 status = nvme_delete_cq(nvmeq);
2331 nvme_del_queue_end(nvmeq);
2334 static int nvme_delete_sq(struct nvme_queue *nvmeq)
2336 return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
2337 nvme_del_sq_work_handler);
2340 static void nvme_del_queue_start(struct kthread_work *work)
2342 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2344 if (nvme_delete_sq(nvmeq))
2345 nvme_del_queue_end(nvmeq);
2348 static void nvme_disable_io_queues(struct nvme_dev *dev)
2351 DEFINE_KTHREAD_WORKER_ONSTACK(worker);
2352 struct nvme_delq_ctx dq;
2353 struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
2354 &worker, "nvme%d", dev->instance);
2356 if (IS_ERR(kworker_task)) {
2357 dev_err(&dev->pci_dev->dev,
2358 "Failed to create queue del task\n");
2359 for (i = dev->queue_count - 1; i > 0; i--)
2360 nvme_disable_queue(dev, i);
2365 atomic_set(&dq.refcount, 0);
2366 dq.worker = &worker;
2367 for (i = dev->queue_count - 1; i > 0; i--) {
2368 struct nvme_queue *nvmeq = dev->queues[i];
2370 if (nvme_suspend_queue(nvmeq))
2372 nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
2373 nvmeq->cmdinfo.worker = dq.worker;
2374 init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
2375 queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
2377 nvme_wait_dq(&dq, dev);
2378 kthread_stop(kworker_task);
2382 * Remove the node from the device list and check
2383 * for whether or not we need to stop the nvme_thread.
2385 static void nvme_dev_list_remove(struct nvme_dev *dev)
2387 struct task_struct *tmp = NULL;
2389 spin_lock(&dev_list_lock);
2390 list_del_init(&dev->node);
2391 if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
2395 spin_unlock(&dev_list_lock);
2401 static void nvme_dev_shutdown(struct nvme_dev *dev)
2406 dev->initialized = 0;
2407 nvme_dev_list_remove(dev);
2410 csts = readl(&dev->bar->csts);
2411 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
2412 for (i = dev->queue_count - 1; i >= 0; i--) {
2413 struct nvme_queue *nvmeq = dev->queues[i];
2414 nvme_suspend_queue(nvmeq);
2415 nvme_clear_queue(nvmeq);
2418 nvme_disable_io_queues(dev);
2419 nvme_shutdown_ctrl(dev);
2420 nvme_disable_queue(dev, 0);
2422 nvme_dev_unmap(dev);
2425 static void nvme_dev_remove(struct nvme_dev *dev)
2429 list_for_each_entry(ns, &dev->namespaces, list) {
2430 if (ns->disk->flags & GENHD_FL_UP)
2431 del_gendisk(ns->disk);
2432 if (!blk_queue_dying(ns->queue))
2433 blk_cleanup_queue(ns->queue);
2437 static int nvme_setup_prp_pools(struct nvme_dev *dev)
2439 struct device *dmadev = &dev->pci_dev->dev;
2440 dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
2441 PAGE_SIZE, PAGE_SIZE, 0);
2442 if (!dev->prp_page_pool)
2445 /* Optimisation for I/Os between 4k and 128k */
2446 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
2448 if (!dev->prp_small_pool) {
2449 dma_pool_destroy(dev->prp_page_pool);
2455 static void nvme_release_prp_pools(struct nvme_dev *dev)
2457 dma_pool_destroy(dev->prp_page_pool);
2458 dma_pool_destroy(dev->prp_small_pool);
2461 static DEFINE_IDA(nvme_instance_ida);
2463 static int nvme_set_instance(struct nvme_dev *dev)
2465 int instance, error;
2468 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
2471 spin_lock(&dev_list_lock);
2472 error = ida_get_new(&nvme_instance_ida, &instance);
2473 spin_unlock(&dev_list_lock);
2474 } while (error == -EAGAIN);
2479 dev->instance = instance;
2483 static void nvme_release_instance(struct nvme_dev *dev)
2485 spin_lock(&dev_list_lock);
2486 ida_remove(&nvme_instance_ida, dev->instance);
2487 spin_unlock(&dev_list_lock);
2490 static void nvme_free_namespaces(struct nvme_dev *dev)
2492 struct nvme_ns *ns, *next;
2494 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
2495 list_del(&ns->list);
2497 spin_lock(&dev_list_lock);
2498 ns->disk->private_data = NULL;
2499 spin_unlock(&dev_list_lock);
2506 static void nvme_free_dev(struct kref *kref)
2508 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
2510 pci_dev_put(dev->pci_dev);
2511 nvme_free_namespaces(dev);
2512 nvme_release_instance(dev);
2513 blk_mq_free_tag_set(&dev->tagset);
2514 blk_put_queue(dev->admin_q);
2520 static int nvme_dev_open(struct inode *inode, struct file *f)
2522 struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
2524 kref_get(&dev->kref);
2525 f->private_data = dev;
2529 static int nvme_dev_release(struct inode *inode, struct file *f)
2531 struct nvme_dev *dev = f->private_data;
2532 kref_put(&dev->kref, nvme_free_dev);
2536 static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
2538 struct nvme_dev *dev = f->private_data;
2542 case NVME_IOCTL_ADMIN_CMD:
2543 return nvme_user_cmd(dev, NULL, (void __user *)arg);
2544 case NVME_IOCTL_IO_CMD:
2545 if (list_empty(&dev->namespaces))
2547 ns = list_first_entry(&dev->namespaces, struct nvme_ns, list);
2548 return nvme_user_cmd(dev, ns, (void __user *)arg);
2554 static const struct file_operations nvme_dev_fops = {
2555 .owner = THIS_MODULE,
2556 .open = nvme_dev_open,
2557 .release = nvme_dev_release,
2558 .unlocked_ioctl = nvme_dev_ioctl,
2559 .compat_ioctl = nvme_dev_ioctl,
2562 static void nvme_set_irq_hints(struct nvme_dev *dev)
2564 struct nvme_queue *nvmeq;
2567 for (i = 0; i < dev->online_queues; i++) {
2568 nvmeq = dev->queues[i];
2573 irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
2574 nvmeq->hctx->cpumask);
2578 static int nvme_dev_start(struct nvme_dev *dev)
2581 bool start_thread = false;
2583 result = nvme_dev_map(dev);
2587 result = nvme_configure_admin_queue(dev);
2591 spin_lock(&dev_list_lock);
2592 if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
2593 start_thread = true;
2596 list_add(&dev->node, &dev_list);
2597 spin_unlock(&dev_list_lock);
2600 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
2601 wake_up_all(&nvme_kthread_wait);
2603 wait_event_killable(nvme_kthread_wait, nvme_thread);
2605 if (IS_ERR_OR_NULL(nvme_thread)) {
2606 result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
2610 nvme_init_queue(dev->queues[0], 0);
2611 result = nvme_alloc_admin_tags(dev);
2615 result = nvme_setup_io_queues(dev);
2619 nvme_set_irq_hints(dev);
2624 nvme_dev_remove_admin(dev);
2626 nvme_disable_queue(dev, 0);
2627 nvme_dev_list_remove(dev);
2629 nvme_dev_unmap(dev);
2633 static int nvme_remove_dead_ctrl(void *arg)
2635 struct nvme_dev *dev = (struct nvme_dev *)arg;
2636 struct pci_dev *pdev = dev->pci_dev;
2638 if (pci_get_drvdata(pdev))
2639 pci_stop_and_remove_bus_device_locked(pdev);
2640 kref_put(&dev->kref, nvme_free_dev);
2644 static void nvme_remove_disks(struct work_struct *ws)
2646 struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
2648 nvme_free_queues(dev, 1);
2649 nvme_dev_remove(dev);
2652 static int nvme_dev_resume(struct nvme_dev *dev)
2656 ret = nvme_dev_start(dev);
2659 if (dev->online_queues < 2) {
2660 spin_lock(&dev_list_lock);
2661 dev->reset_workfn = nvme_remove_disks;
2662 queue_work(nvme_workq, &dev->reset_work);
2663 spin_unlock(&dev_list_lock);
2665 dev->initialized = 1;
2669 static void nvme_dev_reset(struct nvme_dev *dev)
2671 nvme_dev_shutdown(dev);
2672 if (nvme_dev_resume(dev)) {
2673 dev_warn(&dev->pci_dev->dev, "Device failed to resume\n");
2674 kref_get(&dev->kref);
2675 if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
2677 dev_err(&dev->pci_dev->dev,
2678 "Failed to start controller remove task\n");
2679 kref_put(&dev->kref, nvme_free_dev);
2684 static void nvme_reset_failed_dev(struct work_struct *ws)
2686 struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
2687 nvme_dev_reset(dev);
2690 static void nvme_reset_workfn(struct work_struct *work)
2692 struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
2693 dev->reset_workfn(work);
2696 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2698 int node, result = -ENOMEM;
2699 struct nvme_dev *dev;
2701 node = dev_to_node(&pdev->dev);
2702 if (node == NUMA_NO_NODE)
2703 set_dev_node(&pdev->dev, 0);
2705 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
2708 dev->entry = kzalloc_node(num_possible_cpus() * sizeof(*dev->entry),
2712 dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
2717 INIT_LIST_HEAD(&dev->namespaces);
2718 dev->reset_workfn = nvme_reset_failed_dev;
2719 INIT_WORK(&dev->reset_work, nvme_reset_workfn);
2720 dev->pci_dev = pci_dev_get(pdev);
2721 pci_set_drvdata(pdev, dev);
2722 result = nvme_set_instance(dev);
2726 result = nvme_setup_prp_pools(dev);
2730 kref_init(&dev->kref);
2731 result = nvme_dev_start(dev);
2735 if (dev->online_queues > 1)
2736 result = nvme_dev_add(dev);
2740 scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
2741 dev->miscdev.minor = MISC_DYNAMIC_MINOR;
2742 dev->miscdev.parent = &pdev->dev;
2743 dev->miscdev.name = dev->name;
2744 dev->miscdev.fops = &nvme_dev_fops;
2745 result = misc_register(&dev->miscdev);
2749 nvme_set_irq_hints(dev);
2751 dev->initialized = 1;
2755 nvme_dev_remove(dev);
2756 nvme_dev_remove_admin(dev);
2757 nvme_free_namespaces(dev);
2759 nvme_dev_shutdown(dev);
2761 nvme_free_queues(dev, 0);
2762 nvme_release_prp_pools(dev);
2764 nvme_release_instance(dev);
2766 pci_dev_put(dev->pci_dev);
2774 static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
2776 struct nvme_dev *dev = pci_get_drvdata(pdev);
2779 nvme_dev_shutdown(dev);
2781 nvme_dev_resume(dev);
2784 static void nvme_shutdown(struct pci_dev *pdev)
2786 struct nvme_dev *dev = pci_get_drvdata(pdev);
2787 nvme_dev_shutdown(dev);
2790 static void nvme_remove(struct pci_dev *pdev)
2792 struct nvme_dev *dev = pci_get_drvdata(pdev);
2794 spin_lock(&dev_list_lock);
2795 list_del_init(&dev->node);
2796 spin_unlock(&dev_list_lock);
2798 pci_set_drvdata(pdev, NULL);
2799 flush_work(&dev->reset_work);
2800 misc_deregister(&dev->miscdev);
2801 nvme_dev_remove(dev);
2802 nvme_dev_shutdown(dev);
2803 nvme_dev_remove_admin(dev);
2804 nvme_free_queues(dev, 0);
2805 nvme_release_prp_pools(dev);
2806 kref_put(&dev->kref, nvme_free_dev);
2809 /* These functions are yet to be implemented */
2810 #define nvme_error_detected NULL
2811 #define nvme_dump_registers NULL
2812 #define nvme_link_reset NULL
2813 #define nvme_slot_reset NULL
2814 #define nvme_error_resume NULL
2816 #ifdef CONFIG_PM_SLEEP
2817 static int nvme_suspend(struct device *dev)
2819 struct pci_dev *pdev = to_pci_dev(dev);
2820 struct nvme_dev *ndev = pci_get_drvdata(pdev);
2822 nvme_dev_shutdown(ndev);
2826 static int nvme_resume(struct device *dev)
2828 struct pci_dev *pdev = to_pci_dev(dev);
2829 struct nvme_dev *ndev = pci_get_drvdata(pdev);
2831 if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
2832 ndev->reset_workfn = nvme_reset_failed_dev;
2833 queue_work(nvme_workq, &ndev->reset_work);
2839 static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
2841 static const struct pci_error_handlers nvme_err_handler = {
2842 .error_detected = nvme_error_detected,
2843 .mmio_enabled = nvme_dump_registers,
2844 .link_reset = nvme_link_reset,
2845 .slot_reset = nvme_slot_reset,
2846 .resume = nvme_error_resume,
2847 .reset_notify = nvme_reset_notify,
2850 /* Move to pci_ids.h later */
2851 #define PCI_CLASS_STORAGE_EXPRESS 0x010802
2853 static const struct pci_device_id nvme_id_table[] = {
2854 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2857 MODULE_DEVICE_TABLE(pci, nvme_id_table);
2859 static struct pci_driver nvme_driver = {
2861 .id_table = nvme_id_table,
2862 .probe = nvme_probe,
2863 .remove = nvme_remove,
2864 .shutdown = nvme_shutdown,
2866 .pm = &nvme_dev_pm_ops,
2868 .err_handler = &nvme_err_handler,
2871 static int __init nvme_init(void)
2875 init_waitqueue_head(&nvme_kthread_wait);
2877 nvme_workq = create_singlethread_workqueue("nvme");
2881 result = register_blkdev(nvme_major, "nvme");
2884 else if (result > 0)
2885 nvme_major = result;
2887 result = pci_register_driver(&nvme_driver);
2889 goto unregister_blkdev;
2893 unregister_blkdev(nvme_major, "nvme");
2895 destroy_workqueue(nvme_workq);
2899 static void __exit nvme_exit(void)
2901 pci_unregister_driver(&nvme_driver);
2902 unregister_hotcpu_notifier(&nvme_nb);
2903 unregister_blkdev(nvme_major, "nvme");
2904 destroy_workqueue(nvme_workq);
2905 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
2909 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
2910 MODULE_LICENSE("GPL");
2911 MODULE_VERSION("1.0");
2912 module_init(nvme_init);
2913 module_exit(nvme_exit);