list all available dynamic ioengines with --enghelp
[fio.git] / flow.c
CommitLineData
9e684a49 1#include "fio.h"
971caeb1 2#include "fio_sem.h"
9e684a49
DE
3#include "smalloc.h"
4#include "flist.h"
5
6struct fio_flow {
7 unsigned int refs;
9e684a49 8 unsigned int id;
2b7968d3 9 struct flist_head list;
5200b0f8 10 unsigned long flow_counter;
d4e74fda 11 unsigned int total_weight;
9e684a49
DE
12};
13
14static struct flist_head *flow_list;
971caeb1 15static struct fio_sem *flow_lock;
9e684a49
DE
16
17int flow_threshold_exceeded(struct thread_data *td)
18{
19 struct fio_flow *flow = td->flow;
d4e74fda 20 double flow_counter_ratio, flow_weight_ratio;
9e684a49
DE
21
22 if (!flow)
23 return 0;
24
d4e74fda
DB
25 flow_counter_ratio = (double)td->flow_counter /
26 atomic_load_relaxed(&flow->flow_counter);
27 flow_weight_ratio = (double)td->o.flow /
28 atomic_load_relaxed(&flow->total_weight);
29
30 /*
31 * each thread/process executing a fio job will stall based on the
32 * expected user ratio for a given flow_id group. the idea is to keep
33 * 2 counters, flow and job-specific counter to test if the
34 * ratio between them is proportional to other jobs in the same flow_id
35 */
36 if (flow_counter_ratio > flow_weight_ratio) {
cb44aa1f
JA
37 if (td->o.flow_sleep) {
38 io_u_quiesce(td);
9e684a49 39 usleep(td->o.flow_sleep);
cb44aa1f
JA
40 }
41
9e684a49
DE
42 return 1;
43 }
44
d4e74fda
DB
45 /*
46 * increment flow(shared counter, therefore atomically)
47 * and job-specific counter
48 */
49 atomic_add(&flow->flow_counter, 1);
50 ++td->flow_counter;
51
9e684a49
DE
52 return 0;
53}
54
55static struct fio_flow *flow_get(unsigned int id)
56{
8e8b225d 57 struct fio_flow *flow = NULL;
9e684a49
DE
58 struct flist_head *n;
59
fba5c5ff
JA
60 if (!flow_lock)
61 return NULL;
62
971caeb1 63 fio_sem_down(flow_lock);
9e684a49
DE
64
65 flist_for_each(n, flow_list) {
66 flow = flist_entry(n, struct fio_flow, list);
67 if (flow->id == id)
68 break;
69
70 flow = NULL;
71 }
72
73 if (!flow) {
74 flow = smalloc(sizeof(*flow));
fba5c5ff 75 if (!flow) {
971caeb1 76 fio_sem_up(flow_lock);
fba5c5ff
JA
77 return NULL;
78 }
9e684a49
DE
79 flow->refs = 0;
80 INIT_FLIST_HEAD(&flow->list);
81 flow->id = id;
d4e74fda
DB
82 flow->flow_counter = 1;
83 flow->total_weight = 0;
9e684a49
DE
84
85 flist_add_tail(&flow->list, flow_list);
86 }
87
88 flow->refs++;
971caeb1 89 fio_sem_up(flow_lock);
9e684a49
DE
90 return flow;
91}
92
5200b0f8 93static void flow_put(struct fio_flow *flow, unsigned long flow_counter,
d4e74fda 94 unsigned int weight)
9e684a49 95{
fba5c5ff
JA
96 if (!flow_lock)
97 return;
98
971caeb1 99 fio_sem_down(flow_lock);
9e684a49 100
d4e74fda
DB
101 atomic_sub(&flow->flow_counter, flow_counter);
102 atomic_sub(&flow->total_weight, weight);
103
9e684a49 104 if (!--flow->refs) {
d4e74fda 105 assert(flow->flow_counter == 1);
9e684a49
DE
106 flist_del(&flow->list);
107 sfree(flow);
108 }
109
971caeb1 110 fio_sem_up(flow_lock);
9e684a49
DE
111}
112
113void flow_init_job(struct thread_data *td)
114{
d4e74fda 115 if (td->o.flow) {
9e684a49 116 td->flow = flow_get(td->o.flow_id);
d4e74fda
DB
117 td->flow_counter = 0;
118 atomic_add(&td->flow->total_weight, td->o.flow);
119 }
9e684a49
DE
120}
121
122void flow_exit_job(struct thread_data *td)
123{
124 if (td->flow) {
d4e74fda 125 flow_put(td->flow, td->flow_counter, td->o.flow);
9e684a49
DE
126 td->flow = NULL;
127 }
128}
129
130void flow_init(void)
131{
9e684a49 132 flow_list = smalloc(sizeof(*flow_list));
fba5c5ff
JA
133 if (!flow_list) {
134 log_err("fio: smalloc pool exhausted\n");
135 return;
136 }
137
971caeb1 138 flow_lock = fio_sem_init(FIO_SEM_UNLOCKED);
fba5c5ff
JA
139 if (!flow_lock) {
140 log_err("fio: failed to allocate flow lock\n");
141 sfree(flow_list);
142 return;
143 }
144
9e684a49
DE
145 INIT_FLIST_HEAD(flow_list);
146}
147
148void flow_exit(void)
149{
fba5c5ff 150 if (flow_lock)
971caeb1 151 fio_sem_remove(flow_lock);
fba5c5ff
JA
152 if (flow_list)
153 sfree(flow_list);
9e684a49 154}