switch (ioprio_class) {
default:
pr_err("bdi %s: bfq: bad prio class %d\n",
- bdi_dev_name(queue_to_disk(bfqq->bfqd->queue)->bdi),
+ bdi_dev_name(bfqq->bfqd->queue->disk->bdi),
ioprio_class);
fallthrough;
case IOPRIO_CLASS_NONE:
const char *blkg_dev_name(struct blkcg_gq *blkg)
{
- if (!queue_has_disk(blkg->q) || !queue_to_disk(blkg->q)->bdi->dev)
+ if (!blkg->q->disk || !blkg->q->disk->bdi->dev)
return NULL;
- return bdi_dev_name(queue_to_disk(blkg->q)->bdi);
+ return bdi_dev_name(blkg->q->disk->bdi);
}
/**
__blk_mq_dec_active_requests(hctx);
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
- laptop_io_completion(queue_to_disk(q)->bdi);
+ laptop_io_completion(q->disk->bdi);
rq_qos_done(q, rq);
limits->logical_block_size >> SECTOR_SHIFT);
limits->max_sectors = max_sectors;
- if (!queue_has_disk(q))
+ if (!q->disk)
return;
- queue_to_disk(q)->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
+ q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
{
blk_limits_io_opt(&q->limits, opt);
- if (!queue_has_disk(q))
+ if (!q->disk)
return;
- queue_to_disk(q)->bdi->ra_pages =
+ q->disk->bdi->ra_pages =
max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
}
EXPORT_SYMBOL(blk_queue_io_opt);
{
unsigned long ra_kb;
- if (!queue_has_disk(q))
+ if (!q->disk)
return -EINVAL;
- ra_kb = queue_to_disk(q)->bdi->ra_pages << (PAGE_SHIFT - 10);
+ ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
return queue_var_show(ra_kb, page);
}
unsigned long ra_kb;
ssize_t ret;
- if (!queue_has_disk(q))
+ if (!q->disk)
return -EINVAL;
ret = queue_var_store(&ra_kb, page, count);
if (ret < 0)
return ret;
- queue_to_disk(q)->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
+ q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
return ret;
}
spin_lock_irq(&q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1;
- if (queue_has_disk(q))
- queue_to_disk(q)->bdi->io_pages =
- max_sectors_kb >> (PAGE_SHIFT - 10);
+ if (q->disk)
+ q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
spin_unlock_irq(&q->queue_lock);
return ret;
*/
static bool wb_recent_wait(struct rq_wb *rwb)
{
- struct bdi_writeback *wb = &queue_to_disk(rwb->rqos.q)->bdi->wb;
+ struct bdi_writeback *wb = &rwb->rqos.q->disk->bdi->wb;
return time_before(jiffies, wb->dirty_sleep + HZ);
}
static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
{
- struct backing_dev_info *bdi = queue_to_disk(rwb->rqos.q)->bdi;
+ struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
struct rq_depth *rqd = &rwb->rq_depth;
u64 thislat;
static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{
- struct backing_dev_info *bdi = queue_to_disk(rwb->rqos.q)->bdi;
+ struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
struct rq_depth *rqd = &rwb->rq_depth;
trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
status = latency_exceeded(rwb, cb->stat);
- trace_wbt_timer(queue_to_disk(rwb->rqos.q)->bdi, status,
- rqd->scale_step, inflight);
+ trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step,
+ inflight);
/*
* If we exceeded the latency target, step down. If we did not,
disk_release_events(disk);
kfree(disk->random);
xa_destroy(&disk->part_tbl);
+ disk->queue->disk = NULL;
blk_put_queue(disk->queue);
iput(disk->part0->bd_inode); /* frees the disk */
}
device_initialize(disk_to_dev(disk));
inc_diskseq(disk);
disk->queue = q;
+ q->disk = disk;
lockdep_init_map(&disk->lockdep_map, "(bio completion)", lkclass, 0);
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
INIT_LIST_HEAD(&disk->slave_bdevs);
spinlock_t queue_lock;
+ struct gendisk *disk;
+
/*
* queue kobject
*/
dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
(dir), (attrs))
-#define queue_has_disk(q) ((q)->kobj.parent != NULL)
-#define queue_to_disk(q) (dev_to_disk(kobj_to_dev((q)->kobj.parent)))
-
static inline bool queue_is_mq(struct request_queue *q)
{
return q->mq_ops;
),
TP_fast_assign(
- __entry->dev = disk_devt(queue_to_disk(q));
+ __entry->dev = disk_devt(q->disk);
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
strlcpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile;
),
TP_fast_assign(
- __entry->dev = disk_devt(queue_to_disk(q));
+ __entry->dev = disk_devt(q->disk);
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth;
),
),
TP_fast_assign(
- __entry->dev = disk_devt(queue_to_disk(q));
+ __entry->dev = disk_devt(q->disk);
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
),