};
+struct thread_segment {
+ struct thread_data *threads;
+ int shm_id;
+};
+
/*
* when should interactive ETA output be generated
*/
#define __fio_stringify_1(x) #x
#define __fio_stringify(x) __fio_stringify_1(x)
+#define REAL_MAX_JOBS 4096
+#define JOBS_PER_SEG 8
+#define REAL_MAX_SEG (REAL_MAX_JOBS / JOBS_PER_SEG)
+
extern bool exitall_on_terminate;
extern unsigned int thread_number;
extern unsigned int stat_number;
-extern int shm_id;
extern int groupid;
extern int output_format;
extern int append_terse_output;
extern long long trigger_timeout;
extern char *aux_path;
-extern struct thread_data *threads;
+extern struct thread_segment segments[REAL_MAX_SEG];
static inline bool is_running_backend(void)
{
!(io_u->ddir == DDIR_TRIM && !td_trim(td)));
}
-#define REAL_MAX_JOBS 4096
-
static inline bool should_fsync(struct thread_data *td)
{
if (td->last_was_sync)
* Iterates all threads/processes within all the defined jobs
*/
#define for_each_td(td, i) \
- for ((i) = 0, (td) = &threads[0]; (i) < (int) thread_number; (i)++, (td)++)
+ for ((i) = 0, (td) = &segments[0].threads[0]; (i) < (int) thread_number; (i)++, (td)++)
#define for_each_file(td, f, i) \
if ((td)->files_index) \
for ((i) = 0, (f) = (td)->files[0]; \
static bool merge_blktrace_only;
static struct thread_data def_thread;
-struct thread_data *threads = NULL;
+struct thread_segment segments[REAL_MAX_SEG];
static char **job_sections;
static int nr_job_sections;
void free_threads_shm(void)
{
- if (threads) {
- void *tp = threads;
+ if (segments[0].threads) {
+ void *tp = segments[0].threads;
#ifndef CONFIG_NO_SHM
struct shmid_ds sbuf;
- threads = NULL;
+ segments[0].threads = NULL;
shmdt(tp);
- shmctl(shm_id, IPC_RMID, &sbuf);
- shm_id = -1;
+ shmctl(segments[0].shm_id, IPC_RMID, &sbuf);
+ segments[0].shm_id = -1;
#else
- threads = NULL;
+ segments[0].threads = NULL;
free(tp);
#endif
}
static void free_shm(void)
{
- if (threads) {
+ if (segments[0].threads) {
flow_exit();
fio_debug_jobp = NULL;
fio_warned = NULL;
*/
static int setup_thread_area(void)
{
+ struct thread_segment *seg = &segments[0];
int i;
- if (threads)
+ if (seg->threads)
return 0;
/*
size += 2 * sizeof(unsigned int);
#ifndef CONFIG_NO_SHM
- shm_id = shmget(0, size, IPC_CREAT | 0600);
- if (shm_id != -1)
+ seg->shm_id = shmget(0, size, IPC_CREAT | 0600);
+ if (seg->shm_id != -1)
break;
if (errno != EINVAL && errno != ENOMEM && errno != ENOSPC) {
perror("shmget");
break;
}
#else
- threads = malloc(size);
- if (threads)
+ seg->threads = malloc(size);
+ if (seg->threads)
break;
#endif
} while (max_jobs);
#ifndef CONFIG_NO_SHM
- if (shm_id == -1)
+ if (seg->shm_id == -1)
return 1;
- threads = shmat(shm_id, NULL, 0);
- if (threads == (void *) -1) {
+ seg->threads = shmat(seg->shm_id, NULL, 0);
+ if (seg->threads == (void *) -1) {
perror("shmat");
return 1;
}
if (shm_attach_to_open_removed())
- shmctl(shm_id, IPC_RMID, NULL);
+ shmctl(seg->shm_id, IPC_RMID, NULL);
#endif
- memset(threads, 0, max_jobs * sizeof(struct thread_data));
+ memset(seg->threads, 0, max_jobs * sizeof(struct thread_data));
for (i = 0; i < max_jobs; i++)
- DRD_IGNORE_VAR(threads[i]);
- fio_debug_jobp = (unsigned int *)(threads + max_jobs);
+ DRD_IGNORE_VAR(seg->threads[i]);
+ fio_debug_jobp = (unsigned int *)(seg->threads + max_jobs);
*fio_debug_jobp = -1;
fio_warned = fio_debug_jobp + 1;
*fio_warned = 0;
return NULL;
}
- td = &threads[thread_number++];
+ td = &segments[0].threads[thread_number++];
*td = *parent;
INIT_FLIST_HEAD(&td->opt_list);
if (td->o.name)
free(td->o.name);
- memset(&threads[td->thread_number - 1], 0, sizeof(*td));
+ memset(td, 0, sizeof(*td));
thread_number--;
}