Merge branch 'evelu-fix-engines' of https://github.com/ErwanAliasr1/fio
[fio.git] / flow.c
... / ...
CommitLineData
1#include "fio.h"
2#include "fio_sem.h"
3#include "smalloc.h"
4#include "flist.h"
5
6struct fio_flow {
7 unsigned int refs;
8 unsigned int id;
9 struct flist_head list;
10 unsigned long flow_counter;
11 unsigned int total_weight;
12};
13
14static struct flist_head *flow_list;
15static struct fio_sem *flow_lock;
16
17int flow_threshold_exceeded(struct thread_data *td)
18{
19 struct fio_flow *flow = td->flow;
20 double flow_counter_ratio, flow_weight_ratio;
21
22 if (!flow)
23 return 0;
24
25 flow_counter_ratio = (double)td->flow_counter /
26 atomic_load_relaxed(&flow->flow_counter);
27 flow_weight_ratio = (double)td->o.flow /
28 atomic_load_relaxed(&flow->total_weight);
29
30 /*
31 * each thread/process executing a fio job will stall based on the
32 * expected user ratio for a given flow_id group. the idea is to keep
33 * 2 counters, flow and job-specific counter to test if the
34 * ratio between them is proportional to other jobs in the same flow_id
35 */
36 if (flow_counter_ratio > flow_weight_ratio) {
37 if (td->o.flow_sleep) {
38 io_u_quiesce(td);
39 usleep(td->o.flow_sleep);
40 } else if (td->o.zone_mode == ZONE_MODE_ZBD) {
41 io_u_quiesce(td);
42 }
43
44 return 1;
45 }
46
47 /*
48 * increment flow(shared counter, therefore atomically)
49 * and job-specific counter
50 */
51 atomic_add(&flow->flow_counter, 1);
52 ++td->flow_counter;
53
54 return 0;
55}
56
57static struct fio_flow *flow_get(unsigned int id)
58{
59 struct fio_flow *flow = NULL;
60 struct flist_head *n;
61
62 if (!flow_lock)
63 return NULL;
64
65 fio_sem_down(flow_lock);
66
67 flist_for_each(n, flow_list) {
68 flow = flist_entry(n, struct fio_flow, list);
69 if (flow->id == id)
70 break;
71
72 flow = NULL;
73 }
74
75 if (!flow) {
76 flow = smalloc(sizeof(*flow));
77 if (!flow) {
78 fio_sem_up(flow_lock);
79 return NULL;
80 }
81 flow->refs = 0;
82 INIT_FLIST_HEAD(&flow->list);
83 flow->id = id;
84 flow->flow_counter = 1;
85 flow->total_weight = 0;
86
87 flist_add_tail(&flow->list, flow_list);
88 }
89
90 flow->refs++;
91 fio_sem_up(flow_lock);
92 return flow;
93}
94
95static void flow_put(struct fio_flow *flow, unsigned long flow_counter,
96 unsigned int weight)
97{
98 if (!flow_lock)
99 return;
100
101 fio_sem_down(flow_lock);
102
103 atomic_sub(&flow->flow_counter, flow_counter);
104 atomic_sub(&flow->total_weight, weight);
105
106 if (!--flow->refs) {
107 assert(flow->flow_counter == 1);
108 flist_del(&flow->list);
109 sfree(flow);
110 }
111
112 fio_sem_up(flow_lock);
113}
114
115void flow_init_job(struct thread_data *td)
116{
117 if (td->o.flow) {
118 td->flow = flow_get(td->o.flow_id);
119 td->flow_counter = 0;
120 atomic_add(&td->flow->total_weight, td->o.flow);
121 }
122}
123
124void flow_exit_job(struct thread_data *td)
125{
126 if (td->flow) {
127 flow_put(td->flow, td->flow_counter, td->o.flow);
128 td->flow = NULL;
129 }
130}
131
132void flow_init(void)
133{
134 flow_list = smalloc(sizeof(*flow_list));
135 if (!flow_list) {
136 log_err("fio: smalloc pool exhausted\n");
137 return;
138 }
139
140 flow_lock = fio_sem_init(FIO_SEM_UNLOCKED);
141 if (!flow_lock) {
142 log_err("fio: failed to allocate flow lock\n");
143 sfree(flow_list);
144 return;
145 }
146
147 INIT_FLIST_HEAD(flow_list);
148}
149
150void flow_exit(void)
151{
152 if (flow_lock)
153 fio_sem_remove(flow_lock);
154 if (flow_list)
155 sfree(flow_list);
156}