OOM fixes
authorJens Axboe <jens.axboe@oracle.com>
Mon, 12 Mar 2007 10:21:48 +0000 (11:21 +0100)
committerJens Axboe <jens.axboe@oracle.com>
Mon, 12 Mar 2007 10:21:48 +0000 (11:21 +0100)
Hit this with many threads running.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
fio.c
init.c

diff --git a/fio.c b/fio.c
index b0afdcc85f303303b2513194b20bfc7212651c83..7110e87dfac36cfeec1cac0b87a8eb3018eac68b 100644 (file)
--- a/fio.c
+++ b/fio.c
@@ -692,33 +692,33 @@ static void *thread_main(void *data)
        INIT_LIST_HEAD(&td->io_log_list);
 
        if (init_io_u(td))
        INIT_LIST_HEAD(&td->io_log_list);
 
        if (init_io_u(td))
-               goto err;
+               goto err_sem;
 
        if (fio_setaffinity(td) == -1) {
                td_verror(td, errno, "cpu_set_affinity");
 
        if (fio_setaffinity(td) == -1) {
                td_verror(td, errno, "cpu_set_affinity");
-               goto err;
+               goto err_sem;
        }
 
        if (init_iolog(td))
        }
 
        if (init_iolog(td))
-               goto err;
+               goto err_sem;
 
        if (td->ioprio) {
                if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
                        td_verror(td, errno, "ioprio_set");
 
        if (td->ioprio) {
                if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
                        td_verror(td, errno, "ioprio_set");
-                       goto err;
+                       goto err_sem;
                }
        }
 
        if (nice(td->nice) == -1) {
                td_verror(td, errno, "nice");
                }
        }
 
        if (nice(td->nice) == -1) {
                td_verror(td, errno, "nice");
-               goto err;
+               goto err_sem;
        }
 
        if (init_random_state(td))
        }
 
        if (init_random_state(td))
-               goto err;
+               goto err_sem;
 
        if (td->ioscheduler && switch_ioscheduler(td))
 
        if (td->ioscheduler && switch_ioscheduler(td))
-               goto err;
+               goto err_sem;
 
        td_set_runstate(td, TD_INITIALIZED);
        fio_sem_up(startup_sem);
 
        td_set_runstate(td, TD_INITIALIZED);
        fio_sem_up(startup_sem);
@@ -821,6 +821,9 @@ err:
        cleanup_io_u(td);
        td_set_runstate(td, TD_EXITED);
        return (void *) (unsigned long) td->error;
        cleanup_io_u(td);
        td_set_runstate(td, TD_EXITED);
        return (void *) (unsigned long) td->error;
+err_sem:
+       fio_sem_up(startup_sem);
+       goto err;
 }
 
 /*
 }
 
 /*
diff --git a/init.c b/init.c
index 62080f74949f09a9abbdb1ce2fa40ee2b9a7731d..bca8424a9e49348c05b99a272c9d96789b469559 100644 (file)
--- a/init.c
+++ b/init.c
@@ -997,6 +997,10 @@ int init_random_state(struct thread_data *td)
                        blocks = (f->real_file_size + td->rw_min_bs - 1) / td->rw_min_bs;
                        num_maps = (blocks + BLOCKS_PER_MAP-1)/ BLOCKS_PER_MAP;
                        f->file_map = malloc(num_maps * sizeof(long));
                        blocks = (f->real_file_size + td->rw_min_bs - 1) / td->rw_min_bs;
                        num_maps = (blocks + BLOCKS_PER_MAP-1)/ BLOCKS_PER_MAP;
                        f->file_map = malloc(num_maps * sizeof(long));
+                       if (!f->file_map) {
+                               log_err("fio: failed allocating random map. If running a large number of jobs, try the 'norandommap' option\n");
+                               return 1;
+                       }
                        f->num_maps = num_maps;
                        memset(f->file_map, 0, num_maps * sizeof(long));
                }
                        f->num_maps = num_maps;
                        memset(f->file_map, 0, num_maps * sizeof(long));
                }