nbd: switch to using blk_queue_write_cache()
[linux-2.6-block.git] / block / blk-settings.c
index c7bb666aafd100a329c67b97e616c9f9037c508e..c903bee43cf85221a5a007d6c3b60b8b0aa81b23 100644 (file)
@@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
        struct queue_limits *limits = &q->limits;
        unsigned int max_sectors;
 
-       if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
-               max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
+       if ((max_hw_sectors << 9) < PAGE_SIZE) {
+               max_hw_sectors = 1 << (PAGE_SHIFT - 9);
                printk(KERN_INFO "%s: set to minimum %d\n",
                       __func__, max_hw_sectors);
        }
@@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
  **/
 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 {
-       if (max_size < PAGE_CACHE_SIZE) {
-               max_size = PAGE_CACHE_SIZE;
+       if (max_size < PAGE_SIZE) {
+               max_size = PAGE_SIZE;
                printk(KERN_INFO "%s: set to minimum %d\n",
                       __func__, max_size);
        }
@@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
  **/
 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 {
-       if (mask < PAGE_CACHE_SIZE - 1) {
-               mask = PAGE_CACHE_SIZE - 1;
+       if (mask < PAGE_SIZE - 1) {
+               mask = PAGE_SIZE - 1;
                printk(KERN_INFO "%s: set to minimum %lx\n",
                       __func__, mask);
        }
@@ -846,6 +846,32 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 
+/**
+ * blk_queue_write_cache - configure queue's write cache
+ * @q:         the request queue for the device
+ * @wc:                write back cache on or off
+ * @fua:       device supports FUA writes, if true
+ *
+ * Tell the block layer about the write cache of @q.
+ */
+void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+{
+       spin_lock_irq(q->queue_lock);
+       if (wc) {
+               queue_flag_set(QUEUE_FLAG_WC, q);
+               q->flush_flags = REQ_FLUSH;
+       } else
+               queue_flag_clear(QUEUE_FLAG_WC, q);
+       if (fua) {
+               if (wc)
+                       q->flush_flags |= REQ_FUA;
+               queue_flag_set(QUEUE_FLAG_FUA, q);
+       } else
+               queue_flag_clear(QUEUE_FLAG_FUA, q);
+       spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_write_cache);
+
 static int __init blk_settings_init(void)
 {
        blk_max_low_pfn = max_low_pfn - 1;