unsigned long start_time;
void *data;
struct hlist_node node;
+ struct task_struct *map_task;
spinlock_t endio_lock;
struct dm_stats_aux stats_aux;
/* last member of dm_target_io is 'struct bio' */
return 1;
}
- dm_submit_bio_remap(io->base_bio, clone, (gfp != CRYPT_MAP_READ_GFP));
+ dm_submit_bio_remap(io->base_bio, clone);
return 0;
}
{
struct bio *clone = io->ctx.bio_out;
- dm_submit_bio_remap(io->base_bio, clone, true);
+ dm_submit_bio_remap(io->base_bio, clone);
}
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
- dm_submit_bio_remap(io->base_bio, clone, true);
+ dm_submit_bio_remap(io->base_bio, clone);
return;
}
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- dm_submit_bio_remap(bio, NULL, true);
+ dm_submit_bio_remap(bio, NULL);
bio = n;
}
}
struct pool *pool = tc->pool;
if (!bio_triggers_commit(tc, bio)) {
- dm_submit_bio_remap(bio, NULL, true);
+ dm_submit_bio_remap(bio, NULL);
return;
}
if (bio->bi_opf & REQ_PREFLUSH)
bio_endio(bio);
else
- dm_submit_bio_remap(bio, NULL, true);
+ dm_submit_bio_remap(bio, NULL);
}
}
this_cpu_inc(*md->pending_io);
io->orig_bio = NULL;
io->md = md;
+ io->map_task = current;
spin_lock_init(&io->endio_lock);
io->start_time = jiffies;
/*
* @clone: clone bio that DM core passed to target's .map function
* @tgt_clone: clone of @clone bio that target needs submitted
- * @from_wq: caller is a workqueue thread managed by DM target
*
* Targets should use this interface to submit bios they take
* ownership of when returning DM_MAPIO_SUBMITTED.
*
* Target should also enable ti->accounts_remapped_io
*/
-void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone,
- bool from_wq)
+void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
{
struct dm_target_io *tio = clone_to_tio(clone);
struct dm_io *io = tio->io;
* Account io->origin_bio to DM dev on behalf of target
* that took ownership of IO with DM_MAPIO_SUBMITTED.
*/
- if (!from_wq) {
+ if (io->map_task == current) {
/* Still in target's map function */
io->start_io_acct = true;
} else {
}
error = __split_and_process_bio(&ci);
+ ci.io->map_task = NULL;
if (error || !ci.sector_count)
goto out;
int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
-void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone, bool from_wq);
+void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
union map_info *dm_get_rq_mapinfo(struct request *rq);
#ifdef CONFIG_BLK_DEV_ZONED