Commit
abf545484d31 changed it from an 'rw' flags type to the
newer ops based interface, but now we're effectively leaking
some bdev internals to the rest of the kernel. Since we only
care about whether it's a read or a write at that level, just
pass in a bool 'is_write' parameter instead.
Then we can also move op_is_write() and friends back under
CONFIG_BLOCK protection.
Reviewed-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, int op,
+ unsigned int len, unsigned int off, bool is_write,
sector_t sector)
{
void *mem;
int err = 0;
sector_t sector)
{
void *mem;
int err = 0;
err = copy_to_brd_setup(brd, sector, len);
if (err)
goto out;
}
mem = kmap_atomic(page);
err = copy_to_brd_setup(brd, sector, len);
if (err)
goto out;
}
mem = kmap_atomic(page);
- if (!op_is_write(op)) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
unsigned int len = bvec.bv_len;
int err;
unsigned int len = bvec.bv_len;
int err;
- err = brd_do_bvec(brd, bvec.bv_page, len,
- bvec.bv_offset, bio_op(bio), sector);
+ err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
+ op_is_write(bio_op(bio)), sector);
if (err)
goto io_error;
sector += len >> SECTOR_SHIFT;
if (err)
goto io_error;
sector += len >> SECTOR_SHIFT;
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int op)
+ struct page *page, bool is_write)
{
struct brd_device *brd = bdev->bd_disk->private_data;
{
struct brd_device *brd = bdev->bd_disk->private_data;
- int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
- page_endio(page, op, err);
+ int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
+ page_endio(page, is_write, err);
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
+ int offset, bool is_write)
{
unsigned long start_time = jiffies;
{
unsigned long start_time = jiffies;
+ int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
- generic_start_io_acct(op, bvec->bv_len >> SECTOR_SHIFT,
+ generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
- if (!op_is_write(op)) {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset);
} else {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset);
} else {
ret = zram_bvec_write(zram, bvec, index, offset);
}
ret = zram_bvec_write(zram, bvec, index, offset);
}
- generic_end_io_acct(op, &zram->disk->part0, start_time);
+ generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
atomic64_inc(&zram->stats.failed_reads);
else
atomic64_inc(&zram->stats.failed_writes);
atomic64_inc(&zram->stats.failed_reads);
else
atomic64_inc(&zram->stats.failed_writes);
bv.bv_offset = bvec.bv_offset;
if (zram_bvec_rw(zram, &bv, index, offset,
bv.bv_offset = bvec.bv_offset;
if (zram_bvec_rw(zram, &bv, index, offset,
+ op_is_write(bio_op(bio))) < 0)
goto out;
bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
if (zram_bvec_rw(zram, &bv, index + 1, 0,
goto out;
bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
if (zram_bvec_rw(zram, &bv, index + 1, 0,
+ op_is_write(bio_op(bio))) < 0)
goto out;
} else
if (zram_bvec_rw(zram, &bvec, index, offset,
goto out;
} else
if (zram_bvec_rw(zram, &bvec, index, offset,
+ op_is_write(bio_op(bio))) < 0)
goto out;
update_position(&index, &offset, &bvec);
goto out;
update_position(&index, &offset, &bvec);
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int op)
+ struct page *page, bool is_write)
{
int offset, err = -EIO;
u32 index;
{
int offset, err = -EIO;
u32 index;
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- err = zram_bvec_rw(zram, &bv, index, offset, op);
+ err = zram_bvec_rw(zram, &bv, index, offset, is_write);
put_zram:
zram_meta_put(zram);
out:
put_zram:
zram_meta_put(zram);
out:
* (e.g., SetPageError, set_page_dirty and extra works).
*/
if (err == 0)
* (e.g., SetPageError, set_page_dirty and extra works).
*/
if (err == 0)
- page_endio(page, op, 0);
+ page_endio(page, is_write, 0);
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off,
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off,
- int op, sector_t sector)
+ bool is_write, sector_t sector)
- if (!op_is_write(op)) {
ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page);
} else {
ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page);
} else {
BUG_ON(len % btt->sector_size);
err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
BUG_ON(len % btt->sector_size);
err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
- bio_op(bio), iter.bi_sector);
+ op_is_write(bio_op(bio)), iter.bi_sector);
if (err) {
dev_info(&btt->nd_btt->dev,
"io error in %s sector %lld, len %d,\n",
if (err) {
dev_info(&btt->nd_btt->dev,
"io error in %s sector %lld, len %d,\n",
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int op)
+ struct page *page, bool is_write)
{
struct btt *btt = bdev->bd_disk->private_data;
{
struct btt *btt = bdev->bd_disk->private_data;
- btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, op, sector);
- page_endio(page, op, 0);
+ btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
+ page_endio(page, is_write, 0);
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
- unsigned int len, unsigned int off, int op,
+ unsigned int len, unsigned int off, bool is_write,
sector_t sector)
{
int rc = 0;
sector_t sector)
{
int rc = 0;
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
bad_pmem = true;
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
bad_pmem = true;
- if (!op_is_write(op)) {
if (unlikely(bad_pmem))
rc = -EIO;
else {
if (unlikely(bad_pmem))
rc = -EIO;
else {
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
- bvec.bv_offset, bio_op(bio),
+ bvec.bv_offset, op_is_write(bio_op(bio)),
iter.bi_sector);
if (rc) {
bio->bi_error = rc;
iter.bi_sector);
if (rc) {
bio->bi_error = rc;
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int op)
+ struct page *page, bool is_write)
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
int rc;
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
int rc;
- rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, op, sector);
+ rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
/*
* The ->rw_page interface is subtle and tricky. The core
/*
* The ->rw_page interface is subtle and tricky. The core
* caused by double completion.
*/
if (rc == 0)
* caused by double completion.
*/
if (rc == 0)
- page_endio(page, op, 0);
+ page_endio(page, is_write, 0);
result = blk_queue_enter(bdev->bd_queue, false);
if (result)
return result;
result = blk_queue_enter(bdev->bd_queue, false);
if (result)
return result;
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
- REQ_OP_READ);
+ result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
blk_queue_exit(bdev->bd_queue);
return result;
}
blk_queue_exit(bdev->bd_queue);
return result;
}
return result;
set_page_writeback(page);
return result;
set_page_writeback(page);
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
- REQ_OP_WRITE);
+ result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
if (result)
end_page_writeback(page);
else
if (result)
end_page_writeback(page);
else
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- page_endio(page, bio_op(bio), bio->bi_error);
+ page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
typedef void (bio_end_io_t) (struct bio *);
typedef void (bio_destructor_t) (struct bio *);
typedef void (bio_end_io_t) (struct bio *);
typedef void (bio_destructor_t) (struct bio *);
-enum req_op {
- REQ_OP_READ,
- REQ_OP_WRITE,
- REQ_OP_DISCARD, /* request to discard sectors */
- REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
- REQ_OP_WRITE_SAME, /* write same block many times */
- REQ_OP_FLUSH, /* request for cache flush */
-};
-
-#define REQ_OP_BITS 3
-
#ifdef CONFIG_BLOCK
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
#ifdef CONFIG_BLOCK
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
+enum req_op {
+ REQ_OP_READ,
+ REQ_OP_WRITE,
+ REQ_OP_DISCARD, /* request to discard sectors */
+ REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
+ REQ_OP_WRITE_SAME, /* write same block many times */
+ REQ_OP_FLUSH, /* request for cache flush */
+};
+
+#define REQ_OP_BITS 3
+
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
#define BLK_QC_T_SHIFT 16
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
#define BLK_QC_T_SHIFT 16
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, int op);
+ int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
static inline bool op_is_write(unsigned int op)
{
return op == REQ_OP_READ ? false : true;
}
static inline bool op_is_write(unsigned int op)
{
return op == REQ_OP_READ ? false : true;
}
/*
* return data direction, READ or WRITE
*/
/*
* return data direction, READ or WRITE
*/
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
-void page_endio(struct page *page, int op, int err);
+void page_endio(struct page *page, bool is_write, int err);
/*
* Add an arbitrary waiter to a page's wait queue
/*
* Add an arbitrary waiter to a page's wait queue
* After completing I/O on a page, call this routine to update the page
* flags appropriately
*/
* After completing I/O on a page, call this routine to update the page
* flags appropriately
*/
-void page_endio(struct page *page, int op, int err)
+void page_endio(struct page *page, bool is_write, int err)
- if (!op_is_write(op)) {
if (!err) {
SetPageUptodate(page);
} else {
if (!err) {
SetPageUptodate(page);
} else {