summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
8827184)
Add pre sleep checker and worker ops, so we can push this to the
owner of the workqueue.
Signed-off-by: Jens Axboe <axboe@fb.com>
+static bool io_workqueue_pre_sleep_flush_fn(struct thread_data *td)
+{
+ if (td->io_u_queued || td->cur_depth || td->io_u_in_flight)
+ return true;
+
+ return false;
+}
+
+static void io_workqueue_pre_sleep_fn(struct thread_data *td)
+{
+ int ret;
+
+ ret = io_u_quiesce(td);
+ if (ret > 0)
+ td->cur_depth -= ret;
+}
+
+struct workqueue_ops rated_wq_ops = {
+ .fn = io_workqueue_fn,
+ .pre_sleep_flush_fn = io_workqueue_pre_sleep_flush_fn,
+ .pre_sleep_fn = io_workqueue_pre_sleep_fn,
+};
+
/*
* Entry point for the thread based jobs. The process based jobs end up
* here as well, after a little setup.
/*
* Entry point for the thread based jobs. The process based jobs end up
* here as well, after a little setup.
fio_verify_init(td);
if ((o->io_submit_mode == IO_MODE_OFFLOAD) &&
fio_verify_init(td);
if ((o->io_submit_mode == IO_MODE_OFFLOAD) &&
- workqueue_init(td, &td->io_wq, io_workqueue_fn, td->o.iodepth))
+ workqueue_init(td, &td->io_wq, &rated_wq_ops, td->o.iodepth))
goto err;
fio_gettime(&td->epoch, NULL);
goto err;
fio_gettime(&td->epoch, NULL);
while (!flist_empty(list)) {
work = flist_first_entry(list, struct workqueue_work, list);
flist_del_init(&work->list);
while (!flist_empty(list)) {
work = flist_first_entry(list, struct workqueue_work, list);
flist_del_init(&work->list);
+ wq->ops.fn(&sw->td, work);
{
struct submit_worker *sw = data;
struct workqueue *wq = sw->wq;
{
struct submit_worker *sw = data;
struct workqueue *wq = sw->wq;
- struct thread_data *td = &sw->td;
unsigned int eflags = 0, ret;
FLIST_HEAD(local_list);
unsigned int eflags = 0, ret;
FLIST_HEAD(local_list);
- if (td->io_u_queued || td->cur_depth ||
- td->io_u_in_flight) {
- int ret;
-
+ if (workqueue_pre_sleep_check(wq)) {
pthread_mutex_unlock(&sw->lock);
pthread_mutex_unlock(&sw->lock);
- ret = io_u_quiesce(td);
- if (ret > 0)
- td->cur_depth -= ret;
+ workqueue_pre_sleep(wq);
pthread_mutex_lock(&sw->lock);
}
pthread_mutex_lock(&sw->lock);
}
}
int workqueue_init(struct thread_data *td, struct workqueue *wq,
}
int workqueue_init(struct thread_data *td, struct workqueue *wq,
- workqueue_fn *fn, unsigned max_pending)
+ struct workqueue_ops *ops, unsigned max_pending)
{
unsigned int running;
int i, error;
wq->max_workers = max_pending;
wq->td = td;
{
unsigned int running;
int i, error;
wq->max_workers = max_pending;
wq->td = td;
wq->work_seq = 0;
wq->next_free_worker = 0;
pthread_cond_init(&wq->flush_cond, NULL);
wq->work_seq = 0;
wq->next_free_worker = 0;
pthread_cond_init(&wq->flush_cond, NULL);
struct flist_head list;
};
struct flist_head list;
};
-typedef void (workqueue_fn)(struct thread_data *, struct workqueue_work *);
+typedef void (workqueue_work_fn)(struct thread_data *, struct workqueue_work *);
+typedef bool (workqueue_pre_sleep_flush_fn)(struct thread_data *);
+typedef void (workqueue_pre_sleep_fn)(struct thread_data *);
+
+struct workqueue_ops {
+ workqueue_work_fn *fn;
+ workqueue_pre_sleep_flush_fn *pre_sleep_flush_fn;
+ workqueue_pre_sleep_fn *pre_sleep_fn;
+};
struct workqueue {
unsigned int max_workers;
struct thread_data *td;
struct workqueue {
unsigned int max_workers;
struct thread_data *td;
+ struct workqueue_ops ops;
uint64_t work_seq;
struct submit_worker *workers;
uint64_t work_seq;
struct submit_worker *workers;
volatile int wake_idle;
};
volatile int wake_idle;
};
-int workqueue_init(struct thread_data *td, struct workqueue *wq, workqueue_fn *fn, unsigned int max_workers);
+int workqueue_init(struct thread_data *td, struct workqueue *wq, struct workqueue_ops *ops, unsigned int max_workers);
void workqueue_exit(struct workqueue *wq);
bool workqueue_enqueue(struct workqueue *wq, struct workqueue_work *work);
void workqueue_flush(struct workqueue *wq);
void workqueue_exit(struct workqueue *wq);
bool workqueue_enqueue(struct workqueue *wq, struct workqueue_work *work);
void workqueue_flush(struct workqueue *wq);
+static inline bool workqueue_pre_sleep_check(struct workqueue *wq)
+{
+ if (!wq->ops.pre_sleep_flush_fn)
+ return false;
+
+ return wq->ops.pre_sleep_flush_fn(wq->td);
+}
+
+static inline void workqueue_pre_sleep(struct workqueue *wq)
+{
+ if (wq->ops.pre_sleep_fn)
+ wq->ops.pre_sleep_fn(wq->td);
+}
+