NVMe: default to 4k device page size
[linux-2.6-block.git] / drivers / nvme / host / pci.c
index 8187df20469535023ae9966aecfb1542260b4270..d9d6229e9f3f011b18b914c8ae6eb60bcacce266 100644 (file)
@@ -968,7 +968,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
        if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
                return;
 
-       writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
+       if (likely(nvmeq->cq_vector >= 0))
+               writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
        nvmeq->cq_head = head;
        nvmeq->cq_phase = phase;
 
@@ -1727,9 +1728,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        u32 aqa;
        u64 cap = lo_hi_readq(&dev->bar->cap);
        struct nvme_queue *nvmeq;
-       unsigned page_shift = PAGE_SHIFT;
+       /*
+        * default to a 4K page size, with the intention to update this
+        * path in the future to accomodate architectures with differing
+        * kernel and IO page sizes.
+        */
+       unsigned page_shift = 12;
        unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
-       unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
 
        if (page_shift < dev_page_min) {
                dev_err(dev->dev,
@@ -1738,13 +1743,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
                                1 << page_shift);
                return -ENODEV;
        }
-       if (page_shift > dev_page_max) {
-               dev_info(dev->dev,
-                               "Device maximum page size (%u) smaller than "
-                               "host (%u); enabling work-around\n",
-                               1 << dev_page_max, 1 << page_shift);
-               page_shift = dev_page_max;
-       }
 
        dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
                                                NVME_CAP_NSSRC(cap) : 0;
@@ -2268,7 +2266,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
        if (dev->max_hw_sectors) {
                blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
                blk_queue_max_segments(ns->queue,
-                       ((dev->max_hw_sectors << 9) / dev->page_size) + 1);
+                       (dev->max_hw_sectors / (dev->page_size >> 9)) + 1);
        }
        if (dev->stripe_size)
                blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
@@ -2787,6 +2785,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq)
 {
        struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
        nvme_put_dq(dq);
+
+       spin_lock_irq(&nvmeq->q_lock);
+       nvme_process_cq(nvmeq);
+       spin_unlock_irq(&nvmeq->q_lock);
 }
 
 static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,