static void zram_free_page(struct zram *zram, size_t index);
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
- struct bio *bio, bool partial_io);
+ struct bio *parent);
static int zram_slot_trylock(struct zram *zram, u32 index)
{
atomic64_dec(&zram->stats.bd_count);
}
-static void zram_page_end_io(struct bio *bio)
-{
- struct page *page = bio_first_page_all(bio);
-
- page_endio(page, op_is_write(bio_op(bio)),
- blk_status_to_errno(bio->bi_status));
- bio_put(bio);
-}
-
-/*
- * Returns 1 if the submission is successful.
- */
-static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
+static void read_from_bdev_async(struct zram *zram, struct page *page,
unsigned long entry, struct bio *parent)
{
struct bio *bio;
- bio = bio_alloc(zram->bdev, 1, parent ? parent->bi_opf : REQ_OP_READ,
- GFP_NOIO);
- if (!bio)
- return -ENOMEM;
-
+ bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
- if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
- bio_put(bio);
- return -EIO;
- }
-
- if (!parent)
- bio->bi_end_io = zram_page_end_io;
- else
- bio_chain(bio, parent);
-
+ __bio_add_page(bio, page, PAGE_SIZE, 0);
+ bio_chain(bio, parent);
submit_bio(bio);
- return 1;
}
#define PAGE_WB_SIG "page_index="
/* Need for hugepage writeback racing */
zram_set_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
- if (zram_read_page(zram, page, index, NULL, false)) {
+ if (zram_read_page(zram, page, index, NULL)) {
zram_slot_lock(zram, index);
zram_clear_flag(zram, index, ZRAM_UNDER_WB);
zram_clear_flag(zram, index, ZRAM_IDLE);
struct work_struct work;
struct zram *zram;
unsigned long entry;
- struct bio *bio;
- struct bio_vec bvec;
+ struct page *page;
+ int error;
};
static void zram_sync_read(struct work_struct *work)
{
struct zram_work *zw = container_of(work, struct zram_work, work);
- struct zram *zram = zw->zram;
- unsigned long entry = zw->entry;
- struct bio *bio = zw->bio;
+ struct bio_vec bv;
+ struct bio bio;
- read_from_bdev_async(zram, &zw->bvec, entry, bio);
+ bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
+ bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
+ __bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
+ zw->error = submit_bio_wait(&bio);
}
/*
* chained IO with parent IO in same context, it's a deadlock. To avoid that,
* use a worker thread context.
*/
-static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
- unsigned long entry, struct bio *bio)
+static int read_from_bdev_sync(struct zram *zram, struct page *page,
+ unsigned long entry)
{
struct zram_work work;
- work.bvec = *bvec;
+ work.page = page;
work.zram = zram;
work.entry = entry;
- work.bio = bio;
INIT_WORK_ONSTACK(&work.work, zram_sync_read);
queue_work(system_unbound_wq, &work.work);
flush_work(&work.work);
destroy_work_on_stack(&work.work);
- return 1;
+ return work.error;
}
-static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
- unsigned long entry, struct bio *parent, bool sync)
+static int read_from_bdev(struct zram *zram, struct page *page,
+ unsigned long entry, struct bio *parent)
{
atomic64_inc(&zram->stats.bd_reads);
- if (sync) {
+ if (!parent) {
if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO)))
return -EIO;
- return read_from_bdev_sync(zram, bvec, entry, parent);
+ return read_from_bdev_sync(zram, page, entry);
}
- return read_from_bdev_async(zram, bvec, entry, parent);
+ read_from_bdev_async(zram, page, entry, parent);
+ return 0;
}
#else
static inline void reset_bdev(struct zram *zram) {};
-static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
- unsigned long entry, struct bio *parent, bool sync)
+static int read_from_bdev(struct zram *zram, struct page *page,
+ unsigned long entry, struct bio *parent)
{
return -EIO;
}
~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
}
-/*
- * Reads a page from the writeback devices. Corresponding ZRAM slot
- * should be unlocked.
- */
-static int zram_bvec_read_from_bdev(struct zram *zram, struct page *page,
- u32 index, struct bio *bio, bool partial_io)
-{
- struct bio_vec bvec;
-
- bvec_set_page(&bvec, page, PAGE_SIZE, 0);
- return read_from_bdev(zram, &bvec, zram_get_element(zram, index), bio,
- partial_io);
-}
-
/*
* Reads (decompresses if needed) a page from zspool (zsmalloc).
* Corresponding ZRAM slot should be locked.
}
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
- struct bio *bio, bool partial_io)
+ struct bio *parent)
{
int ret;
ret = zram_read_from_zspool(zram, page, index);
zram_slot_unlock(zram, index);
} else {
- /* Slot should be unlocked before the function call */
+ /*
+ * The slot should be unlocked before reading from the backing
+ * device.
+ */
zram_slot_unlock(zram, index);
- ret = zram_bvec_read_from_bdev(zram, page, index, bio,
- partial_io);
+ ret = read_from_bdev(zram, page, zram_get_element(zram, index),
+ parent);
}
/* Should NEVER happen. Return bio error if it does. */
* always expects a full page for the output.
*/
static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset, struct bio *bio)
+ u32 index, int offset)
{
struct page *page = alloc_page(GFP_NOIO);
int ret;
if (!page)
return -ENOMEM;
- ret = zram_read_page(zram, page, index, bio, true);
+ ret = zram_read_page(zram, page, index, NULL);
if (likely(!ret))
memcpy_to_bvec(bvec, page_address(page) + offset);
__free_page(page);
u32 index, int offset, struct bio *bio)
{
if (is_partial_io(bvec))
- return zram_bvec_read_partial(zram, bvec, index, offset, bio);
- return zram_read_page(zram, bvec->bv_page, index, bio, false);
+ return zram_bvec_read_partial(zram, bvec, index, offset);
+ return zram_read_page(zram, bvec->bv_page, index, bio);
}
-static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
- u32 index, struct bio *bio)
+static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
unsigned long alloced_pages;
unsigned int comp_len = 0;
void *src, *dst, *mem;
struct zcomp_strm *zstrm;
- struct page *page = bvec->bv_page;
unsigned long element = 0;
enum zram_pageflags flags = 0;
return ret;
}
-static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset, struct bio *bio)
+/*
+ * This is a partial IO. Read the full page before writing the changes.
+ */
+static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset, struct bio *bio)
{
+ struct page *page = alloc_page(GFP_NOIO);
int ret;
- struct page *page = NULL;
- struct bio_vec vec;
-
- vec = *bvec;
- if (is_partial_io(bvec)) {
- /*
- * This is a partial IO. We need to read the full page
- * before to write the changes.
- */
- page = alloc_page(GFP_NOIO);
- if (!page)
- return -ENOMEM;
- ret = zram_read_page(zram, page, index, bio, true);
- if (ret)
- goto out;
+ if (!page)
+ return -ENOMEM;
+ ret = zram_read_page(zram, page, index, bio);
+ if (!ret) {
memcpy_from_bvec(page_address(page) + offset, bvec);
-
- bvec_set_page(&vec, page, PAGE_SIZE, 0);
+ ret = zram_write_page(zram, page, index);
}
+ __free_page(page);
+ return ret;
+}
- ret = __zram_bvec_write(zram, &vec, index, bio);
-out:
+static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset, struct bio *bio)
+{
if (is_partial_io(bvec))
- __free_page(page);
- return ret;
+ return zram_bvec_write_partial(zram, bvec, index, offset, bio);
+ return zram_write_page(zram, bvec->bv_page, index);
}
#ifdef CONFIG_ZRAM_MULTI_COMP
/*
* No direct reclaim (slow path) for handle allocation and no
- * re-compression attempt (unlike in __zram_bvec_write()) since
+ * re-compression attempt (unlike in zram_write_bvec()) since
* we already have stored that object in zsmalloc. If we cannot
* alloc memory for recompressed object then we bail out and
* simply keep the old (existing) object in zsmalloc.
* creates a new un-initialized zram device and returns back this device's
* device_id (or an error code if it fails to create a new device).
*/
-static ssize_t hot_add_show(struct class *class,
- struct class_attribute *attr,
+static ssize_t hot_add_show(const struct class *class,
+ const struct class_attribute *attr,
char *buf)
{
int ret;
return ret;
return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
}
+/* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */
static struct class_attribute class_attr_hot_add =
__ATTR(hot_add, 0400, hot_add_show, NULL);
-static ssize_t hot_remove_store(struct class *class,
- struct class_attribute *attr,
+static ssize_t hot_remove_store(const struct class *class,
+ const struct class_attribute *attr,
const char *buf,
size_t count)
{
static struct class zram_control_class = {
.name = "zram-control",
- .owner = THIS_MODULE,
.class_groups = zram_control_class_groups,
};