Add tests from t/ to the Windows installer
[fio.git] / os / os-windows-7.h
CommitLineData
a6ab5391
SW
1#define FIO_MAX_CPUS 512 /* From Hyper-V 2016's max logical processors */
2#define FIO_CPU_MASK_STRIDE 64
3#define FIO_CPU_MASK_ROWS (FIO_MAX_CPUS / FIO_CPU_MASK_STRIDE)
4
5typedef struct {
6 uint64_t row[FIO_CPU_MASK_ROWS];
7} os_cpu_mask_t;
8
9#define FIO_HAVE_CPU_ONLINE_SYSCONF
10/* Return all processors regardless of processor group */
11static inline unsigned int cpus_online(void)
12{
290c64f2 13 return GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
a6ab5391
SW
14}
15
16static inline void print_mask(os_cpu_mask_t *cpumask)
17{
18 for (int i = 0; i < FIO_CPU_MASK_ROWS; i++)
19 dprint(FD_PROCESS, "cpumask[%d]=%lu\n", i, cpumask->row[i]);
20}
21
22/* Return the index of the least significant set CPU in cpumask or -1 if no
23 * CPUs are set */
24static inline int first_set_cpu(os_cpu_mask_t *cpumask)
25{
26 int cpus_offset, mask_first_cpu, row;
27
28 cpus_offset = 0;
29 row = 0;
30 mask_first_cpu = -1;
31 while (mask_first_cpu < 0 && row < FIO_CPU_MASK_ROWS) {
32 int row_first_cpu;
33
34 row_first_cpu = __builtin_ffsll(cpumask->row[row]) - 1;
35 dprint(FD_PROCESS, "row_first_cpu=%d cpumask->row[%d]=%lu\n",
36 row_first_cpu, row, cpumask->row[row]);
37 if (row_first_cpu > -1) {
38 mask_first_cpu = cpus_offset + row_first_cpu;
39 dprint(FD_PROCESS, "first set cpu in mask is at index %d\n",
40 mask_first_cpu);
41 } else {
42 cpus_offset += FIO_CPU_MASK_STRIDE;
43 row++;
44 }
45 }
46
47 return mask_first_cpu;
48}
49
50/* Return the index of the most significant set CPU in cpumask or -1 if no
51 * CPUs are set */
52static inline int last_set_cpu(os_cpu_mask_t *cpumask)
53{
54 int cpus_offset, mask_last_cpu, row;
55
56 cpus_offset = (FIO_CPU_MASK_ROWS - 1) * FIO_CPU_MASK_STRIDE;
57 row = FIO_CPU_MASK_ROWS - 1;
58 mask_last_cpu = -1;
59 while (mask_last_cpu < 0 && row >= 0) {
60 int row_last_cpu;
61
62 if (cpumask->row[row] == 0)
63 row_last_cpu = -1;
64 else {
65 uint64_t tmp = cpumask->row[row];
66
67 row_last_cpu = 0;
68 while (tmp >>= 1)
69 row_last_cpu++;
70 }
71
72 dprint(FD_PROCESS, "row_last_cpu=%d cpumask->row[%d]=%lu\n",
73 row_last_cpu, row, cpumask->row[row]);
74 if (row_last_cpu > -1) {
75 mask_last_cpu = cpus_offset + row_last_cpu;
76 dprint(FD_PROCESS, "last set cpu in mask is at index %d\n",
77 mask_last_cpu);
78 } else {
79 cpus_offset -= FIO_CPU_MASK_STRIDE;
80 row--;
81 }
82 }
83
84 return mask_last_cpu;
85}
86
87static inline int mask_to_group_mask(os_cpu_mask_t *cpumask, int *processor_group, uint64_t *affinity_mask)
88{
89 WORD online_groups, group, group_size;
90 bool found;
91 int cpus_offset, search_cpu, last_cpu, bit_offset, row, end;
92 uint64_t group_cpumask;
93
94 search_cpu = first_set_cpu(cpumask);
95 if (search_cpu < 0) {
96 log_info("CPU mask doesn't set any CPUs\n");
97 return 1;
98 }
99
100 /* Find processor group first set CPU applies to */
101 online_groups = GetActiveProcessorGroupCount();
102 group = 0;
103 found = false;
104 cpus_offset = 0;
105 group_size = 0;
106 while (!found && group < online_groups) {
290c64f2 107 group_size = GetActiveProcessorCount(group);
a6ab5391
SW
108 dprint(FD_PROCESS, "group=%d group_start=%d group_size=%u search_cpu=%d\n",
109 group, cpus_offset, group_size, search_cpu);
110 if (cpus_offset + group_size > search_cpu)
111 found = true;
112 else {
113 cpus_offset += group_size;
114 group++;
115 }
116 }
117
118 if (!found) {
119 log_err("CPU mask contains processor beyond last active processor index (%d)\n",
120 cpus_offset - 1);
121 print_mask(cpumask);
122 return 1;
123 }
124
125 /* Check all the CPUs in the mask apply to ONLY that processor group */
126 last_cpu = last_set_cpu(cpumask);
127 if (last_cpu > (cpus_offset + group_size - 1)) {
128 log_info("CPU mask cannot bind CPUs (e.g. %d, %d) that are "
129 "in different processor groups\n", search_cpu,
130 last_cpu);
131 print_mask(cpumask);
132 return 1;
133 }
134
135 /* Extract the current processor group mask from the cpumask */
136 row = cpus_offset / FIO_CPU_MASK_STRIDE;
137 bit_offset = cpus_offset % FIO_CPU_MASK_STRIDE;
138 group_cpumask = cpumask->row[row] >> bit_offset;
139 end = bit_offset + group_size;
140 if (end > FIO_CPU_MASK_STRIDE && (row + 1 < FIO_CPU_MASK_ROWS)) {
141 /* Some of the next row needs to be part of the mask */
142 int needed, needed_shift, needed_mask_shift;
143 uint64_t needed_mask;
144
145 needed = end - FIO_CPU_MASK_STRIDE;
146 needed_shift = FIO_CPU_MASK_STRIDE - bit_offset;
147 needed_mask_shift = FIO_CPU_MASK_STRIDE - needed;
148 needed_mask = (uint64_t)-1 >> needed_mask_shift;
149 dprint(FD_PROCESS, "bit_offset=%d end=%d needed=%d needed_shift=%d needed_mask=%ld needed_mask_shift=%d\n", bit_offset, end, needed, needed_shift, needed_mask, needed_mask_shift);
150 group_cpumask |= (cpumask->row[row + 1] & needed_mask) << needed_shift;
151 }
152 group_cpumask &= (uint64_t)-1 >> (FIO_CPU_MASK_STRIDE - group_size);
153
154 /* Return group and mask */
155 dprint(FD_PROCESS, "Returning group=%d group_mask=%lu\n", group, group_cpumask);
156 *processor_group = group;
157 *affinity_mask = group_cpumask;
158
159 return 0;
160}
161
162static inline int fio_setaffinity(int pid, os_cpu_mask_t cpumask)
163{
164 HANDLE handle = NULL;
165 int group, ret;
166 uint64_t group_mask = 0;
167 GROUP_AFFINITY new_group_affinity;
168
169 ret = -1;
170
171 if (mask_to_group_mask(&cpumask, &group, &group_mask) != 0)
172 goto err;
173
174 handle = OpenThread(THREAD_QUERY_INFORMATION | THREAD_SET_INFORMATION,
175 TRUE, pid);
176 if (handle == NULL) {
177 log_err("fio_setaffinity: failed to get handle for pid %d\n", pid);
178 goto err;
179 }
180
181 /* Set group and mask.
182 * Note: if the GROUP_AFFINITY struct's Reserved members are not
183 * initialised to 0 then SetThreadGroupAffinity will fail with
184 * GetLastError() set to ERROR_INVALID_PARAMETER */
185 new_group_affinity.Mask = (KAFFINITY) group_mask;
186 new_group_affinity.Group = group;
187 new_group_affinity.Reserved[0] = 0;
188 new_group_affinity.Reserved[1] = 0;
189 new_group_affinity.Reserved[2] = 0;
190 if (SetThreadGroupAffinity(handle, &new_group_affinity, NULL) != 0)
191 ret = 0;
192 else {
193 log_err("fio_setaffinity: failed to set thread affinity "
194 "(pid %d, group %d, mask %" PRIx64 ", "
195 "GetLastError=%d)\n", pid, group, group_mask,
196 GetLastError());
197 goto err;
198 }
199
200err:
201 if (handle)
202 CloseHandle(handle);
203 return ret;
204}
205
206static inline void cpu_to_row_offset(int cpu, int *row, int *offset)
207{
208 *row = cpu / FIO_CPU_MASK_STRIDE;
209 *offset = cpu << FIO_CPU_MASK_STRIDE * *row;
210}
211
212static inline int fio_cpuset_init(os_cpu_mask_t *mask)
213{
214 for (int i = 0; i < FIO_CPU_MASK_ROWS; i++)
215 mask->row[i] = 0;
216 return 0;
217}
218
219/*
220 * fio_getaffinity() should not be called once a fio_setaffinity() call has
221 * been made because fio_setaffinity() may put the process into multiple
222 * processor groups
223 */
224static inline int fio_getaffinity(int pid, os_cpu_mask_t *mask)
225{
226 int ret;
227 int row, offset, end, group, group_size, group_start_cpu;
228 DWORD_PTR process_mask, system_mask;
229 HANDLE handle;
230 PUSHORT current_groups;
231 USHORT group_count;
232 WORD online_groups;
233
234 ret = -1;
235 current_groups = NULL;
236 handle = OpenProcess(PROCESS_QUERY_INFORMATION, TRUE, pid);
237 if (handle == NULL) {
238 log_err("fio_getaffinity: failed to get handle for pid %d\n",
239 pid);
240 goto err;
241 }
242
243 group_count = 1;
244 /*
245 * GetProcessGroupAffinity() seems to expect more than the natural
246 * alignment for a USHORT from the area pointed to by current_groups so
247 * arrange for maximum alignment by allocating via malloc()
248 */
249 current_groups = malloc(sizeof(USHORT));
250 if (!current_groups) {
251 log_err("fio_getaffinity: malloc failed\n");
252 goto err;
253 }
254 if (GetProcessGroupAffinity(handle, &group_count, current_groups) == 0) {
255 /* NB: we also fail here if we are a multi-group process */
256 log_err("fio_getaffinity: failed to get single group affinity for pid %d\n", pid);
257 goto err;
258 }
259 GetProcessAffinityMask(handle, &process_mask, &system_mask);
260
261 /* Convert group and group relative mask to full CPU mask */
262 online_groups = GetActiveProcessorGroupCount();
263 if (online_groups == 0) {
264 log_err("fio_getaffinity: error retrieving total processor groups\n");
265 goto err;
266 }
267
268 group = 0;
269 group_start_cpu = 0;
270 group_size = 0;
271 dprint(FD_PROCESS, "current_groups=%d group_count=%d\n",
272 current_groups[0], group_count);
273 while (true) {
290c64f2 274 group_size = GetActiveProcessorCount(group);
a6ab5391
SW
275 if (group_size == 0) {
276 log_err("fio_getaffinity: error retrieving size of "
277 "processor group %d\n", group);
278 goto err;
279 } else if (group >= current_groups[0] || group >= online_groups)
280 break;
281 else {
282 group_start_cpu += group_size;
283 group++;
284 }
285 }
286
287 if (group != current_groups[0]) {
288 log_err("fio_getaffinity: could not find processor group %d\n",
289 current_groups[0]);
290 goto err;
291 }
292
293 dprint(FD_PROCESS, "group_start_cpu=%d, group size=%u\n",
294 group_start_cpu, group_size);
295 if ((group_start_cpu + group_size) >= FIO_MAX_CPUS) {
296 log_err("fio_getaffinity failed: current CPU affinity (group "
297 "%d, group_start_cpu %d, group_size %d) extends "
298 "beyond mask's highest CPU (%d)\n", group,
299 group_start_cpu, group_size, FIO_MAX_CPUS);
300 goto err;
301 }
302
303 fio_cpuset_init(mask);
304 cpu_to_row_offset(group_start_cpu, &row, &offset);
305 mask->row[row] = process_mask;
306 mask->row[row] <<= offset;
307 end = offset + group_size;
308 if (end > FIO_CPU_MASK_STRIDE) {
309 int needed;
310 uint64_t needed_mask;
311
312 needed = FIO_CPU_MASK_STRIDE - end;
313 needed_mask = (uint64_t)-1 >> (FIO_CPU_MASK_STRIDE - needed);
314 row++;
315 mask->row[row] = process_mask;
316 mask->row[row] >>= needed;
317 mask->row[row] &= needed_mask;
318 }
319 ret = 0;
320
321err:
322 if (handle)
323 CloseHandle(handle);
324 if (current_groups)
325 free(current_groups);
326
327 return ret;
328}
329
330static inline void fio_cpu_clear(os_cpu_mask_t *mask, int cpu)
331{
332 int row, offset;
333 cpu_to_row_offset(cpu, &row, &offset);
334
335 mask->row[row] &= ~(1ULL << offset);
336}
337
338static inline void fio_cpu_set(os_cpu_mask_t *mask, int cpu)
339{
340 int row, offset;
341 cpu_to_row_offset(cpu, &row, &offset);
342
343 mask->row[row] |= 1ULL << offset;
344}
345
346static inline int fio_cpu_isset(os_cpu_mask_t *mask, int cpu)
347{
348 int row, offset;
349 cpu_to_row_offset(cpu, &row, &offset);
350
351 return (mask->row[row] & (1ULL << offset)) != 0;
352}
353
354static inline int fio_cpu_count(os_cpu_mask_t *mask)
355{
356 int count = 0;
357
358 for (int i = 0; i < FIO_CPU_MASK_ROWS; i++)
359 count += hweight64(mask->row[i]);
360
361 return count;
362}
363
364static inline int fio_cpuset_exit(os_cpu_mask_t *mask)
365{
366 return 0;
367}