*error_sector = bio->bi_sector;
ret = 0;
- if (bio_flagged(bio, BIO_EOPNOTSUPP))
- ret = -EOPNOTSUPP;
- else if (!bio_flagged(bio, BIO_UPTODATE))
+ if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
bio_put(bio);
submit_bio(DISCARD_BARRIER, bio);
/* Check if it failed immediately */
- if (bio_flagged(bio, BIO_EOPNOTSUPP))
- ret = -EOPNOTSUPP;
- else if (!bio_flagged(bio, BIO_UPTODATE))
+ if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
bio_put(bio);
}
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
+void blk_queue_set_noflush(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ char b[BDEVNAME_SIZE];
+
+ if (test_and_set_bit(QUEUE_FLAG_NOFLUSH, &q->queue_flags))
+ return;
+
+ printk(KERN_ERR "Device %s does not appear to honor cache flushes. "
+ "This may mean that file system ordering guarantees "
+ "are not met.", bdevname(bdev, b));
+}
+
static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;
wait_for_completion(&wait);
- if (bio_flagged(bio, BIO_EOPNOTSUPP))
- ret = -EOPNOTSUPP;
- else if (!bio_flagged(bio, BIO_UPTODATE))
+ if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
bio_put(bio);
}
**/
void bio_endio(struct bio *bio, int error)
{
- if (error)
+ /*
+ * Special case here - hide the -EOPNOTSUPP from the driver or
+ * block layer, dump a warning the first time this happens so that
+ * the admin knows that we may not provide the ordering guarantees
+ * that are needed. Don't clear the uptodate bit.
+ */
+ if (error == -EOPNOTSUPP && bio_barrier(bio)) {
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+ blk_queue_set_noflush(bio->bi_bdev);
+ error = 0;
+ } else if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
- printk(KERN_WARNING "lost page write due to "
- "I/O error on %s\n",
- bdevname(bh->b_bdev, b));
- }
/* note, we dont' set_buffer_write_io_error because we have
* our own ways of dealing with the IO errors
*/
static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
unsigned long bio_flags)
{
- int ret = 0;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
struct page *page = bvec->bv_page;
struct extent_io_tree *tree = bio->bi_private;
bio->bi_private = NULL;
- bio_get(bio);
-
if (tree->ops && tree->ops->submit_bio_hook)
tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
mirror_num, bio_flags);
else
submit_bio(rw, bio);
- if (bio_flagged(bio, BIO_EOPNOTSUPP))
- ret = -EOPNOTSUPP;
- bio_put(bio);
- return ret;
+
+ return 0;
}
static int submit_extent_page(int rw, struct extent_io_tree *tree,
void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
{
- char b[BDEVNAME_SIZE];
-
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
- buffer_io_error(bh);
- printk(KERN_WARNING "lost page write due to "
- "I/O error on %s\n",
- bdevname(bh->b_bdev, b));
- }
set_buffer_write_io_error(bh);
clear_buffer_uptodate(bh);
}
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
- set_bit(BH_Eopnotsupp, &bh->b_state);
+ err = 0;
}
if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
int submit_bh(int rw, struct buffer_head * bh)
{
struct bio *bio;
- int ret = 0;
BUG_ON(!buffer_locked(bh));
BUG_ON(!buffer_mapped(bh));
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
- bio_get(bio);
submit_bio(rw, bio);
-
- if (bio_flagged(bio, BIO_EOPNOTSUPP))
- ret = -EOPNOTSUPP;
-
- bio_put(bio);
- return ret;
+ return 0;
}
/**
bh->b_end_io = end_buffer_write_sync;
ret = submit_bh(WRITE, bh);
wait_on_buffer(bh);
- if (buffer_eopnotsupp(bh)) {
- clear_buffer_eopnotsupp(bh);
- ret = -EOPNOTSUPP;
- }
if (!ret && !buffer_uptodate(bh))
ret = -EIO;
} else {
ll_rw_block(SWRITE, nr_bhs, bhs);
for (i = 0; i < nr_bhs; i++) {
wait_on_buffer(bhs[i]);
- if (buffer_eopnotsupp(bhs[i])) {
- clear_buffer_eopnotsupp(bhs[i]);
- err = -EOPNOTSUPP;
- } else if (!err && !buffer_uptodate(bhs[i]))
+ if (!err && !buffer_uptodate(bhs[i]))
err = -EIO;
}
return err;
struct gfs2_log_header *lh;
unsigned int tail;
u32 hash;
+ int rw;
bh = sb_getblk(sdp->sd_vfs, blkno);
lock_buffer(bh);
bh->b_end_io = end_buffer_write_sync;
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
- goto skip_barrier;
+ rw = WRITE;
+ else
+ rw = WRITE_BARRIER;
+
get_bh(bh);
- submit_bh(WRITE_BARRIER | (1 << BIO_RW_META), bh);
+ submit_bh(rw | (1 << BIO_RW_META), bh);
wait_on_buffer(bh);
- if (buffer_eopnotsupp(bh)) {
- clear_buffer_eopnotsupp(bh);
- set_buffer_uptodate(bh);
- set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
- lock_buffer(bh);
-skip_barrier:
- get_bh(bh);
- submit_bh(WRITE_SYNC | (1 << BIO_RW_META), bh);
- wait_on_buffer(bh);
- }
if (!buffer_uptodate(bh))
gfs2_io_error_bh(sdp, bh);
brelse(bh);
{
int ret = 0;
-retry:
clear_buffer_dirty(bh);
wait_on_buffer(bh);
- if (buffer_eopnotsupp(bh) && (journal->j_flags & JBD2_BARRIER)) {
- printk(KERN_WARNING
- "JBD2: wait_on_commit_record: sync failed on %s - "
- "disabling barriers\n", journal->j_devname);
- spin_lock(&journal->j_state_lock);
- journal->j_flags &= ~JBD2_BARRIER;
- spin_unlock(&journal->j_state_lock);
-
- lock_buffer(bh);
- clear_buffer_dirty(bh);
- set_buffer_uptodate(bh);
- bh->b_end_io = journal_end_buffer_io_sync;
-
- ret = submit_bh(WRITE_SYNC, bh);
- if (ret) {
- unlock_buffer(bh);
- return ret;
- }
- goto retry;
- }
-
if (unlikely(!buffer_uptodate(bh)))
ret = -EIO;
put_bh(bh); /* One for getblk() */
return submit_bh(WRITE_BARRIER, bh);
}
-static void check_barrier_completion(struct super_block *s,
- struct buffer_head *bh)
-{
- if (buffer_eopnotsupp(bh)) {
- clear_buffer_eopnotsupp(bh);
- disable_barrier(s);
- set_buffer_uptodate(bh);
- set_buffer_dirty(bh);
- sync_dirty_buffer(bh);
- }
-}
-
#define CHUNK_SIZE 32
struct buffer_chunk {
struct buffer_head *bh[CHUNK_SIZE];
} else
wait_on_buffer(jl->j_commit_bh);
- check_barrier_completion(s, jl->j_commit_bh);
-
/* If there was a write error in the journal - we can't commit this
* transaction - it will be invalid and, if successful, will just end
* up propagating the write error out to the filesystem. */
goto sync;
}
wait_on_buffer(journal->j_header_bh);
- check_barrier_completion(sb, journal->j_header_bh);
} else {
sync:
set_buffer_dirty(journal->j_header_bh);
bio->bi_end_io = xfs_end_bio;
submit_bio(WRITE, bio);
- ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
bio_put(bio);
}
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
+#define QUEUE_FLAG_NOFLUSH 16 /* device doesn't do flushes */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_CLUSTER) | \
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
extern void blk_unplug(struct request_queue *q);
+extern void blk_queue_set_noflush(struct block_device *bdev);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
BH_Boundary, /* Block is followed by a discontiguity */
BH_Write_EIO, /* I/O error on write */
BH_Ordered, /* ordered write */
- BH_Eopnotsupp, /* operation not supported (barrier) */
BH_Unwritten, /* Buffer is allocated on disk but not written */
BH_Quiet, /* Buffer Error Prinks to be quiet */
BUFFER_FNS(Boundary, boundary)
BUFFER_FNS(Write_EIO, write_io_error)
BUFFER_FNS(Ordered, ordered)
-BUFFER_FNS(Eopnotsupp, eopnotsupp)
BUFFER_FNS(Unwritten, unwritten)
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)