struct thread_segment {
struct thread_data *threads;
int shm_id;
+ int nr_threads;
};
/*
extern bool exitall_on_terminate;
extern unsigned int thread_number;
extern unsigned int stat_number;
+extern unsigned int nr_segments;
+extern unsigned int cur_segment;
extern int groupid;
extern int output_format;
extern int append_terse_output;
extern struct thread_segment segments[REAL_MAX_SEG];
+static inline struct thread_data *tnumber_to_td(unsigned int tnumber)
+{
+ struct thread_segment *seg;
+
+ seg = &segments[tnumber / JOBS_PER_SEG];
+ return &seg->threads[tnumber & (JOBS_PER_SEG - 1)];
+}
+
static inline bool is_running_backend(void)
{
return is_backend || is_local_backend;
* Iterates all threads/processes within all the defined jobs
*/
#define for_each_td(td, i) \
- for ((i) = 0, (td) = &segments[0].threads[0]; (i) < (int) thread_number; (i)++, (td)++)
+ for ((i) = 0, (td) = &segments[0].threads[0]; (i) < (int) thread_number; (i)++, (td) = tnumber_to_td((i)))
#define for_each_file(td, f, i) \
if ((td)->files_index) \
for ((i) = 0, (f) = (td)->files[0]; \
void free_threads_shm(void)
{
- if (segments[0].threads) {
- void *tp = segments[0].threads;
+ int i;
+
+ for (i = 0; i < nr_segments; i++) {
+ struct thread_segment *seg = &segments[i];
+
+ if (seg->threads) {
+ void *tp = seg->threads;
#ifndef CONFIG_NO_SHM
- struct shmid_ds sbuf;
+ struct shmid_ds sbuf;
- segments[0].threads = NULL;
- shmdt(tp);
- shmctl(segments[0].shm_id, IPC_RMID, &sbuf);
- segments[0].shm_id = -1;
+ seg->threads = NULL;
+ shmdt(tp);
+ shmctl(seg->shm_id, IPC_RMID, &sbuf);
+ seg->shm_id = -1;
#else
- segments[0].threads = NULL;
- free(tp);
+ seg->threads = NULL;
+ free(tp);
#endif
+ }
}
+
+ nr_segments = 0;
+ cur_segment = 0;
}
static void free_shm(void)
{
- if (segments[0].threads) {
+ if (nr_segments) {
flow_exit();
fio_debug_jobp = NULL;
fio_warned = NULL;
scleanup();
}
-/*
- * The thread area is shared between the main process and the job
- * threads/processes. So setup a shared memory segment that will hold
- * all the job info. We use the end of the region for keeping track of
- * open files across jobs, for file sharing.
- */
-static int setup_thread_area(void)
+static int add_thread_segment(void)
{
- struct thread_segment *seg = &segments[0];
+ struct thread_segment *seg = &segments[nr_segments];
+ size_t size = JOBS_PER_SEG * sizeof(struct thread_data);
int i;
- if (seg->threads)
- return 0;
-
- /*
- * 1024 is too much on some machines, scale max_jobs if
- * we get a failure that looks like too large a shm segment
- */
- do {
- size_t size = max_jobs * sizeof(struct thread_data);
+ if (nr_segments + 1 >= REAL_MAX_SEG)
+ return -1;
- size += 2 * sizeof(unsigned int);
+ size += 2 * sizeof(unsigned int);
#ifndef CONFIG_NO_SHM
- seg->shm_id = shmget(0, size, IPC_CREAT | 0600);
- if (seg->shm_id != -1)
- break;
- if (errno != EINVAL && errno != ENOMEM && errno != ENOSPC) {
+ seg->shm_id = shmget(0, size, IPC_CREAT | 0600);
+ if (seg->shm_id == -1) {
+ if (errno != EINVAL && errno != ENOMEM && errno != ENOSPC)
perror("shmget");
- break;
- }
+ return -1;
+ }
#else
- seg->threads = malloc(size);
- if (seg->threads)
- break;
+ seg->threads = malloc(size);
+ if (!seg->threads)
+ return -1;
#endif
- max_jobs >>= 1;
- } while (max_jobs);
-
#ifndef CONFIG_NO_SHM
- if (seg->shm_id == -1)
- return 1;
-
seg->threads = shmat(seg->shm_id, NULL, 0);
if (seg->threads == (void *) -1) {
perror("shmat");
shmctl(seg->shm_id, IPC_RMID, NULL);
#endif
- memset(seg->threads, 0, max_jobs * sizeof(struct thread_data));
- for (i = 0; i < max_jobs; i++)
+ nr_segments++;
+
+ memset(seg->threads, 0, JOBS_PER_SEG * sizeof(struct thread_data));
+ for (i = 0; i < JOBS_PER_SEG; i++)
DRD_IGNORE_VAR(seg->threads[i]);
- fio_debug_jobp = (unsigned int *)(seg->threads + max_jobs);
+ seg->nr_threads = 0;
+
+ /* Not first segment, we're done */
+ if (nr_segments != 1) {
+ cur_segment++;
+ return 0;
+ }
+
+ fio_debug_jobp = (unsigned int *)(seg->threads + JOBS_PER_SEG);
*fio_debug_jobp = -1;
fio_warned = fio_debug_jobp + 1;
*fio_warned = 0;
flow_init();
-
return 0;
}
+/*
+ * The thread areas are shared between the main process and the job
+ * threads/processes, and is split into chunks of JOBS_PER_SEG. If the current
+ * segment has no more room, add a new chunk.
+ */
+static int expand_thread_area(void)
+{
+ struct thread_segment *seg = &segments[cur_segment];
+
+ if (nr_segments && seg->nr_threads < JOBS_PER_SEG)
+ return 0;
+
+ return add_thread_segment();
+}
+
static void dump_print_option(struct print_option *p)
{
const char *delim;
static struct thread_data *get_new_job(bool global, struct thread_data *parent,
bool preserve_eo, const char *jobname)
{
+ struct thread_segment *seg;
struct thread_data *td;
if (global)
return &def_thread;
- if (setup_thread_area()) {
+ if (expand_thread_area()) {
log_err("error: failed to setup shm segment\n");
return NULL;
}
return NULL;
}
- td = &segments[0].threads[thread_number++];
+ seg = &segments[cur_segment];
+ td = &seg->threads[seg->nr_threads++];
+ thread_number++;
*td = *parent;
INIT_FLIST_HEAD(&td->opt_list);
free(td->o.name);
memset(td, 0, sizeof(*td));
+ segments[cur_segment].nr_threads--;
thread_number--;
}