#define FIO_RANDSEED (0xb1899bedUL)
static char **ini_file;
-static int max_jobs = FIO_MAX_JOBS;
static bool dump_cmdline;
static bool parse_only;
static bool merge_blktrace_only;
static struct thread_data def_thread;
-struct thread_data *threads = NULL;
+struct thread_segment segments[REAL_MAX_SEG];
static char **job_sections;
static int nr_job_sections;
.has_arg = optional_argument,
.val = 'S',
},
+#ifdef WIN32
+ {
+ .name = (char *) "server-internal",
+ .has_arg = required_argument,
+ .val = 'N',
+ },
+#endif
{ .name = (char *) "daemonize",
.has_arg = required_argument,
.val = 'D',
void free_threads_shm(void)
{
- if (threads) {
- void *tp = threads;
+ int i;
+
+ for (i = 0; i < nr_segments; i++) {
+ struct thread_segment *seg = &segments[i];
+
+ if (seg->threads) {
+ void *tp = seg->threads;
#ifndef CONFIG_NO_SHM
- struct shmid_ds sbuf;
+ struct shmid_ds sbuf;
- threads = NULL;
- shmdt(tp);
- shmctl(shm_id, IPC_RMID, &sbuf);
- shm_id = -1;
+ seg->threads = NULL;
+ shmdt(tp);
+ shmctl(seg->shm_id, IPC_RMID, &sbuf);
+ seg->shm_id = -1;
#else
- threads = NULL;
- free(tp);
+ seg->threads = NULL;
+ free(tp);
#endif
+ }
}
+
+ nr_segments = 0;
+ cur_segment = 0;
}
static void free_shm(void)
{
- if (threads) {
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (nr_segments) {
flow_exit();
fio_debug_jobp = NULL;
fio_warned = NULL;
fio_filelock_exit();
file_hash_exit();
scleanup();
+#endif
}
-/*
- * The thread area is shared between the main process and the job
- * threads/processes. So setup a shared memory segment that will hold
- * all the job info. We use the end of the region for keeping track of
- * open files across jobs, for file sharing.
- */
-static int setup_thread_area(void)
+static int add_thread_segment(void)
{
+ struct thread_segment *seg = &segments[nr_segments];
+ size_t size = JOBS_PER_SEG * sizeof(struct thread_data);
int i;
- if (threads)
- return 0;
-
- /*
- * 1024 is too much on some machines, scale max_jobs if
- * we get a failure that looks like too large a shm segment
- */
- do {
- size_t size = max_jobs * sizeof(struct thread_data);
+ if (nr_segments + 1 >= REAL_MAX_SEG) {
+ log_err("error: maximum number of jobs reached.\n");
+ return -1;
+ }
- size += 2 * sizeof(unsigned int);
+ size += 2 * sizeof(unsigned int);
#ifndef CONFIG_NO_SHM
- shm_id = shmget(0, size, IPC_CREAT | 0600);
- if (shm_id != -1)
- break;
- if (errno != EINVAL && errno != ENOMEM && errno != ENOSPC) {
+ seg->shm_id = shmget(0, size, IPC_CREAT | 0600);
+ if (seg->shm_id == -1) {
+ if (errno != EINVAL && errno != ENOMEM && errno != ENOSPC)
perror("shmget");
- break;
- }
+ return -1;
+ }
#else
- threads = malloc(size);
- if (threads)
- break;
+ seg->threads = malloc(size);
+ if (!seg->threads)
+ return -1;
#endif
- max_jobs >>= 1;
- } while (max_jobs);
-
#ifndef CONFIG_NO_SHM
- if (shm_id == -1)
- return 1;
-
- threads = shmat(shm_id, NULL, 0);
- if (threads == (void *) -1) {
+ seg->threads = shmat(seg->shm_id, NULL, 0);
+ if (seg->threads == (void *) -1) {
perror("shmat");
return 1;
}
if (shm_attach_to_open_removed())
- shmctl(shm_id, IPC_RMID, NULL);
+ shmctl(seg->shm_id, IPC_RMID, NULL);
#endif
- memset(threads, 0, max_jobs * sizeof(struct thread_data));
- for (i = 0; i < max_jobs; i++)
- DRD_IGNORE_VAR(threads[i]);
- fio_debug_jobp = (unsigned int *)(threads + max_jobs);
+ nr_segments++;
+
+ memset(seg->threads, 0, JOBS_PER_SEG * sizeof(struct thread_data));
+ for (i = 0; i < JOBS_PER_SEG; i++)
+ DRD_IGNORE_VAR(seg->threads[i]);
+ seg->nr_threads = 0;
+
+ /* Not first segment, we're done */
+ if (nr_segments != 1) {
+ cur_segment++;
+ return 0;
+ }
+
+ fio_debug_jobp = (unsigned int *)(seg->threads + JOBS_PER_SEG);
*fio_debug_jobp = -1;
fio_warned = fio_debug_jobp + 1;
*fio_warned = 0;
flow_init();
-
return 0;
}
+/*
+ * The thread areas are shared between the main process and the job
+ * threads/processes, and is split into chunks of JOBS_PER_SEG. If the current
+ * segment has no more room, add a new chunk.
+ */
+static int expand_thread_area(void)
+{
+ struct thread_segment *seg = &segments[cur_segment];
+
+ if (nr_segments && seg->nr_threads < JOBS_PER_SEG)
+ return 0;
+
+ return add_thread_segment();
+}
+
static void dump_print_option(struct print_option *p)
{
const char *delim;
}
}
-static void fio_dump_options_free(struct thread_data *td)
-{
- while (!flist_empty(&td->opt_list)) {
- struct print_option *p;
-
- p = flist_first_entry(&td->opt_list, struct print_option, list);
- flist_del_init(&p->list);
- free(p->name);
- free(p->value);
- free(p);
- }
-}
-
static void copy_opt_list(struct thread_data *dst, struct thread_data *src)
{
struct flist_head *entry;
static struct thread_data *get_new_job(bool global, struct thread_data *parent,
bool preserve_eo, const char *jobname)
{
+ struct thread_segment *seg;
struct thread_data *td;
if (global)
return &def_thread;
- if (setup_thread_area()) {
+ if (expand_thread_area()) {
log_err("error: failed to setup shm segment\n");
return NULL;
}
- if (thread_number >= max_jobs) {
- log_err("error: maximum number of jobs (%d) reached.\n",
- max_jobs);
- return NULL;
- }
- td = &threads[thread_number++];
+ seg = &segments[cur_segment];
+ td = &seg->threads[seg->nr_threads++];
+ thread_number++;
*td = *parent;
INIT_FLIST_HEAD(&td->opt_list);
if (td->o.name)
free(td->o.name);
- memset(&threads[td->thread_number - 1], 0, sizeof(*td));
+ memset(td, 0, sizeof(*td));
+ segments[cur_segment].nr_threads--;
thread_number--;
}
{
int ret = 0;
- if (td->o.rate[DDIR_READ] || td->o.rate_iops[DDIR_READ])
- ret = __setup_rate(td, DDIR_READ);
- if (td->o.rate[DDIR_WRITE] || td->o.rate_iops[DDIR_WRITE])
- ret |= __setup_rate(td, DDIR_WRITE);
- if (td->o.rate[DDIR_TRIM] || td->o.rate_iops[DDIR_TRIM])
- ret |= __setup_rate(td, DDIR_TRIM);
-
+ for_each_rw_ddir(ddir) {
+ if (td->o.rate[ddir] || td->o.rate_iops[ddir]) {
+ ret |= __setup_rate(td, ddir);
+ }
+ }
return ret;
}
ret |= 1;
}
+ if (o->zone_mode == ZONE_MODE_ZBD && !o->create_serialize) {
+ log_err("fio: --zonemode=zbd and --create_serialize=0 are not compatible.\n");
+ ret |= 1;
+ }
+
if (o->zone_mode == ZONE_MODE_STRIDED && !o->zone_size) {
log_err("fio: --zonesize must be specified when using --zonemode=strided.\n");
ret |= 1;
if (td_read(td))
o->overwrite = 1;
- if (!o->min_bs[DDIR_READ])
- o->min_bs[DDIR_READ] = o->bs[DDIR_READ];
- if (!o->max_bs[DDIR_READ])
- o->max_bs[DDIR_READ] = o->bs[DDIR_READ];
- if (!o->min_bs[DDIR_WRITE])
- o->min_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
- if (!o->max_bs[DDIR_WRITE])
- o->max_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
- if (!o->min_bs[DDIR_TRIM])
- o->min_bs[DDIR_TRIM] = o->bs[DDIR_TRIM];
- if (!o->max_bs[DDIR_TRIM])
- o->max_bs[DDIR_TRIM] = o->bs[DDIR_TRIM];
-
- o->rw_min_bs = min(o->min_bs[DDIR_READ], o->min_bs[DDIR_WRITE]);
- o->rw_min_bs = min(o->min_bs[DDIR_TRIM], o->rw_min_bs);
+ for_each_rw_ddir(ddir) {
+ if (!o->min_bs[ddir])
+ o->min_bs[ddir] = o->bs[ddir];
+ if (!o->max_bs[ddir])
+ o->max_bs[ddir] = o->bs[ddir];
+ }
+
+ o->rw_min_bs = -1;
+ for_each_rw_ddir(ddir) {
+ o->rw_min_bs = min(o->rw_min_bs, o->min_bs[ddir]);
+ }
/*
* For random IO, allow blockalign offset other than min_bs.
*/
- if (!o->ba[DDIR_READ] || !td_random(td))
- o->ba[DDIR_READ] = o->min_bs[DDIR_READ];
- if (!o->ba[DDIR_WRITE] || !td_random(td))
- o->ba[DDIR_WRITE] = o->min_bs[DDIR_WRITE];
- if (!o->ba[DDIR_TRIM] || !td_random(td))
- o->ba[DDIR_TRIM] = o->min_bs[DDIR_TRIM];
+ for_each_rw_ddir(ddir) {
+ if (!o->ba[ddir] || !td_random(td))
+ o->ba[ddir] = o->min_bs[ddir];
+ }
if ((o->ba[DDIR_READ] != o->min_bs[DDIR_READ] ||
o->ba[DDIR_WRITE] != o->min_bs[DDIR_WRITE] ||
log_err("fio: rate and rate_iops are mutually exclusive\n");
ret |= 1;
}
- if ((o->rate[DDIR_READ] && (o->rate[DDIR_READ] < o->ratemin[DDIR_READ])) ||
- (o->rate[DDIR_WRITE] && (o->rate[DDIR_WRITE] < o->ratemin[DDIR_WRITE])) ||
- (o->rate[DDIR_TRIM] && (o->rate[DDIR_TRIM] < o->ratemin[DDIR_TRIM])) ||
- (o->rate_iops[DDIR_READ] && (o->rate_iops[DDIR_READ] < o->rate_iops_min[DDIR_READ])) ||
- (o->rate_iops[DDIR_WRITE] && (o->rate_iops[DDIR_WRITE] < o->rate_iops_min[DDIR_WRITE])) ||
- (o->rate_iops[DDIR_TRIM] && (o->rate_iops[DDIR_TRIM] < o->rate_iops_min[DDIR_TRIM]))) {
- log_err("fio: minimum rate exceeds rate\n");
- ret |= 1;
+ for_each_rw_ddir(ddir) {
+ if ((o->rate[ddir] && (o->rate[ddir] < o->ratemin[ddir])) ||
+ (o->rate_iops[ddir] && (o->rate_iops[ddir] < o->rate_iops_min[ddir]))) {
+ log_err("fio: minimum rate exceeds rate, ddir %d\n", +ddir);
+ ret |= 1;
+ }
}
if (!o->timeout && o->time_based) {
ret |= 1;
}
- if (fio_option_is_set(o, clat_percentiles) &&
- !fio_option_is_set(o, lat_percentiles)) {
- o->lat_percentiles = !o->clat_percentiles;
- } else if (fio_option_is_set(o, lat_percentiles) &&
- !fio_option_is_set(o, clat_percentiles)) {
- o->clat_percentiles = !o->lat_percentiles;
- } else if (fio_option_is_set(o, lat_percentiles) &&
- fio_option_is_set(o, clat_percentiles) &&
- o->lat_percentiles && o->clat_percentiles) {
- log_err("fio: lat_percentiles and clat_percentiles are "
- "mutually exclusive\n");
- ret |= 1;
- }
-
if (o->disable_lat)
o->lat_percentiles = 0;
if (o->disable_clat)
o->clat_percentiles = 0;
+ if (o->disable_slat)
+ o->slat_percentiles = 0;
/*
* Fix these up to be nsec internally
*/
- o->max_latency *= 1000ULL;
+ for_each_rw_ddir(ddir)
+ o->max_latency[ddir] *= 1000ULL;
+
o->latency_target *= 1000ULL;
- o->latency_window *= 1000ULL;
+
+ /*
+ * Dedupe working set verifications
+ */
+ if (o->dedupe_percentage && o->dedupe_mode == DEDUPE_MODE_WORKING_SET) {
+ if (!fio_option_is_set(o, size)) {
+ log_err("fio: pregenerated dedupe working set "
+ "requires size to be set\n");
+ ret |= 1;
+ } else if (o->nr_files != 1) {
+ log_err("fio: dedupe working set mode supported with "
+ "single file per job, but %d files "
+ "provided\n", o->nr_files);
+ ret |= 1;
+ } else if (o->dedupe_working_set_percentage + o->dedupe_percentage > 100) {
+ log_err("fio: impossible to reach expected dedupe percentage %u "
+ "since %u percentage of size is reserved to dedupe working set "
+ "(those are unique pages)\n",
+ o->dedupe_percentage, o->dedupe_working_set_percentage);
+ ret |= 1;
+ }
+ }
return ret;
}
const unsigned int seed = td->rand_seeds[FIO_RAND_FILE_OFF];
if (td->o.file_service_type == FIO_FSERVICE_ZIPF) {
- zipf_init(&td->next_file_zipf, nranges, td->zipf_theta, seed);
+ zipf_init(&td->next_file_zipf, nranges, td->zipf_theta, td->random_center, seed);
zipf_disable_hash(&td->next_file_zipf);
} else if (td->o.file_service_type == FIO_FSERVICE_PARETO) {
- pareto_init(&td->next_file_zipf, nranges, td->pareto_h, seed);
+ pareto_init(&td->next_file_zipf, nranges, td->pareto_h, td->random_center, seed);
zipf_disable_hash(&td->next_file_zipf);
} else if (td->o.file_service_type == FIO_FSERVICE_GAUSS) {
- gauss_init(&td->next_file_gauss, nranges, td->gauss_dev, seed);
+ gauss_init(&td->next_file_gauss, nranges, td->gauss_dev, td->random_center, seed);
gauss_disable_hash(&td->next_file_gauss);
}
}
static void td_fill_rand_seeds_internal(struct thread_data *td, bool use64)
{
- unsigned int read_seed = td->rand_seeds[FIO_RAND_BS_OFF];
- unsigned int write_seed = td->rand_seeds[FIO_RAND_BS1_OFF];
- unsigned int trim_seed = td->rand_seeds[FIO_RAND_BS2_OFF];
+ uint64_t read_seed = td->rand_seeds[FIO_RAND_BS_OFF];
+ uint64_t write_seed = td->rand_seeds[FIO_RAND_BS1_OFF];
+ uint64_t trim_seed = td->rand_seeds[FIO_RAND_BS2_OFF];
int i;
/*
init_rand_seed(&td->poisson_state[2], td->rand_seeds[FIO_RAND_POISSON3_OFF], 0);
init_rand_seed(&td->dedupe_state, td->rand_seeds[FIO_DEDUPE_OFF], false);
init_rand_seed(&td->zone_state, td->rand_seeds[FIO_RAND_ZONE_OFF], false);
+ init_rand_seed(&td->prio_state, td->rand_seeds[FIO_RAND_PRIO_CMDS], false);
+ init_rand_seed(&td->dedupe_working_set_index_state, td->rand_seeds[FIO_RAND_DEDUPE_WORKING_SET_IX], use64);
if (!td_random(td))
return;
* for this name and see if they match. If they do, then
* the engine is unchanged.
*/
- dlhandle = td->io_ops_dlhandle;
+ dlhandle = td->io_ops->dlhandle;
ops = load_ioengine(td);
- if (ops == td->io_ops && dlhandle == td->io_ops_dlhandle) {
- if (dlhandle)
- dlclose(dlhandle);
+ if (!ops)
+ goto fail;
+
+ if (ops == td->io_ops && dlhandle == td->io_ops->dlhandle)
return 0;
- }
- if (dlhandle && dlhandle != td->io_ops_dlhandle)
+ if (dlhandle && dlhandle != td->io_ops->dlhandle)
dlclose(dlhandle);
/* Unload the old engine. */
}
td->io_ops = load_ioengine(td);
- if (!td->io_ops) {
- log_err("fio: failed to load engine\n");
- return 1;
- }
+ if (!td->io_ops)
+ goto fail;
if (td->io_ops->option_struct_size && td->io_ops->options) {
/*
td_set_ioengine_flags(td);
return 0;
+
+fail:
+ log_err("fio: failed to load engine\n");
+ return 1;
+
}
static void init_flags(struct thread_data *td)
FPRE_NONE = 0,
FPRE_JOBNAME,
FPRE_JOBNUM,
- FPRE_FILENUM
+ FPRE_FILENUM,
+ FPRE_CLIENTUID
};
static struct fpre_keyword {
{ .keyword = "$jobname", .key = FPRE_JOBNAME, },
{ .keyword = "$jobnum", .key = FPRE_JOBNUM, },
{ .keyword = "$filenum", .key = FPRE_FILENUM, },
+ { .keyword = "$clientuid", .key = FPRE_CLIENTUID, },
{ .keyword = NULL, },
};
}
break;
}
+ case FPRE_CLIENTUID: {
+ int ret;
+ ret = snprintf(dst, dst_left, "%s", client_sockaddr_str);
+ if (ret < 0)
+ break;
+ else if (ret > dst_left) {
+ log_err("fio: truncated filename\n");
+ dst += dst_left;
+ dst_left = 0;
+ } else {
+ dst += ret;
+ dst_left -= ret;
+ }
+ break;
+ }
default:
assert(0);
break;
return true;
}
+static int verify_per_group_options(struct thread_data *td, const char *jobname)
+{
+ struct thread_data *td2;
+ int i;
+
+ for_each_td(td2, i) {
+ if (td->groupid != td2->groupid)
+ continue;
+
+ if (td->o.stats &&
+ td->o.lat_percentiles != td2->o.lat_percentiles) {
+ log_err("fio: lat_percentiles in job: %s differs from group\n",
+ jobname);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
/*
* Treat an empty log file name the same as a one not given
*/
if (fixup_options(td))
goto err;
+ if (init_dedupe_working_set_seeds(td))
+ goto err;
+
/*
* Belongs to fixup_options, but o->name is not necessarily set as yet
*/
td->ts.clat_percentiles = o->clat_percentiles;
td->ts.lat_percentiles = o->lat_percentiles;
+ td->ts.slat_percentiles = o->slat_percentiles;
td->ts.percentile_precision = o->percentile_precision;
memcpy(td->ts.percentile_list, o->percentile_list, sizeof(o->percentile_list));
td->ts.sig_figs = o->sig_figs;
- for (i = 0; i < DDIR_RWDIR_CNT; i++) {
- td->ts.clat_stat[i].min_val = ULONG_MAX;
- td->ts.slat_stat[i].min_val = ULONG_MAX;
- td->ts.lat_stat[i].min_val = ULONG_MAX;
- td->ts.bw_stat[i].min_val = ULONG_MAX;
- td->ts.iops_stat[i].min_val = ULONG_MAX;
- }
- td->ts.sync_stat.min_val = ULONG_MAX;
+ init_thread_stat_min_vals(&td->ts);
td->ddir_seq_nr = o->ddir_seq_nr;
if ((o->stonewall || o->new_group) && prev_group_jobs) {
td->groupid = groupid;
prev_group_jobs++;
+ if (td->o.group_reporting && prev_group_jobs > 1 &&
+ verify_per_group_options(td, jobname))
+ goto err;
+
if (setup_rate(td))
goto err;
.hist_coarseness = o->log_hist_coarseness,
.log_type = IO_LOG_TYPE_LAT,
.log_offset = o->log_offset,
+ .log_prio = o->log_prio,
.log_gz = o->log_gz,
.log_gz_store = o->log_gz_store,
};
else
suf = "log";
- gen_log_name(logname, sizeof(logname), "lat", pre,
- td->thread_number, suf, o->per_job_logs);
- setup_log(&td->lat_log, &p, logname);
+ if (!o->disable_lat) {
+ gen_log_name(logname, sizeof(logname), "lat", pre,
+ td->thread_number, suf, o->per_job_logs);
+ setup_log(&td->lat_log, &p, logname);
+ }
- gen_log_name(logname, sizeof(logname), "slat", pre,
- td->thread_number, suf, o->per_job_logs);
- setup_log(&td->slat_log, &p, logname);
+ if (!o->disable_slat) {
+ gen_log_name(logname, sizeof(logname), "slat", pre,
+ td->thread_number, suf, o->per_job_logs);
+ setup_log(&td->slat_log, &p, logname);
+ }
- gen_log_name(logname, sizeof(logname), "clat", pre,
- td->thread_number, suf, o->per_job_logs);
- setup_log(&td->clat_log, &p, logname);
+ if (!o->disable_clat) {
+ gen_log_name(logname, sizeof(logname), "clat", pre,
+ td->thread_number, suf, o->per_job_logs);
+ setup_log(&td->clat_log, &p, logname);
+ }
}
.hist_coarseness = o->log_hist_coarseness,
.log_type = IO_LOG_TYPE_HIST,
.log_offset = o->log_offset,
+ .log_prio = o->log_prio,
.log_gz = o->log_gz,
.log_gz_store = o->log_gz_store,
};
.hist_coarseness = o->log_hist_coarseness,
.log_type = IO_LOG_TYPE_BW,
.log_offset = o->log_offset,
+ .log_prio = o->log_prio,
.log_gz = o->log_gz,
.log_gz_store = o->log_gz_store,
};
.hist_coarseness = o->log_hist_coarseness,
.log_type = IO_LOG_TYPE_IOPS,
.log_offset = o->log_offset,
+ .log_prio = o->log_prio,
.log_gz = o->log_gz,
.log_gz_store = o->log_gz_store,
};
if (file_alloced) {
if (td_new->files) {
struct fio_file *f;
- for_each_file(td_new, f, i) {
- if (f->file_name)
- sfree(f->file_name);
- sfree(f);
- }
+ for_each_file(td_new, f, i)
+ fio_file_free(f);
free(td_new->files);
td_new->files = NULL;
}
int nested, char *name, char ***popts, int *aopts, int *nopts)
{
bool global = false;
+ bool stdin_occupied = false;
char *string;
FILE *f;
char *p;
if (is_buf)
f = NULL;
else {
- if (!strcmp(file, "-"))
+ if (!strcmp(file, "-")) {
f = stdin;
- else
+ stdin_occupied = true;
+ } else
f = fopen(file, "r");
if (!f) {
}
ret = fio_options_parse(td, opts, num_opts);
+
+ if (!ret && td->o.read_iolog_file != NULL) {
+ char *fname = get_name_by_idx(td->o.read_iolog_file,
+ td->subjob_number);
+ if (!strcmp(fname, "-")) {
+ if (stdin_occupied) {
+ log_err("fio: only one user (read_iolog_file/job "
+ "file) of stdin is permitted at once but "
+ "more than one was found.\n");
+ ret = 1;
+ }
+ stdin_occupied = true;
+ }
+ }
if (!ret) {
if (dump_cmdline)
dump_opt_list(td);
case 'i':
did_arg = true;
if (!cur_client) {
- fio_show_ioengine_help(optarg);
+ exit_val = fio_show_ioengine_help(optarg);
do_exit++;
}
break;
warnings_fatal = 1;
break;
case 'j':
- max_jobs = atoi(optarg);
- if (!max_jobs || max_jobs > REAL_MAX_JOBS) {
- log_err("fio: invalid max jobs: %d\n", max_jobs);
- do_exit++;
- exit_val = 1;
- }
+ /* we don't track/need this anymore, ignore it */
break;
case 'S':
did_arg = true;
exit_val = 1;
#endif
break;
+#ifdef WIN32
+ case 'N':
+ did_arg = true;
+ fio_server_internal_set(optarg);
+ break;
+#endif
case 'D':
if (pid_file)
free(pid_file);
log_err("%s: unrecognized option '%s'\n", argv[0],
argv[optind - 1]);
show_closest_option(argv[optind - 1]);
- /* fall through */
+ fallthrough;
default:
do_exit++;
exit_val = 1;