int r, new_layout = 0, unlink_file = 0, flags;
unsigned long long left;
unsigned int bs;
- char *b;
+ char *b = NULL;
if (read_only) {
log_err("fio: refusing extend of file due to read-only\n");
if (new_layout)
flags |= O_TRUNC;
+#ifdef WIN32
+ flags |= _O_BINARY;
+#endif
+
dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
f->fd = open(f->file_name, flags, 0644);
if (f->fd < 0) {
err:
close(f->fd);
f->fd = -1;
+ if (b)
+ free(b);
return 1;
}
{
int ret = 0;
+#ifdef CONFIG_ESX
+ return 0;
+#endif
+
if (len == -1ULL)
len = f->io_size;
if (off == -1ULL)
dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
len);
- /*
- * FIXME: add blockdev flushing too
- */
- if (f->mmap_ptr) {
+ if (td->io_ops->invalidate)
+ ret = td->io_ops->invalidate(td, f);
+ else if (f->mmap_ptr) {
ret = posix_madvise(f->mmap_ptr, f->mmap_sz, POSIX_MADV_DONTNEED);
#ifdef FIO_MADV_FREE
if (f->filetype == FIO_TYPE_BD)
} else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
ret = 0;
- if (ret < 0) {
- td_verror(td, errno, "invalidate_cache");
- return 1;
- } else if (ret > 0) {
- td_verror(td, ret, "invalidate_cache");
- return 1;
+ /*
+ * Cache flushing isn't a fatal condition, and we know it will
+ * happen on some platforms where we don't have the proper
+ * function to flush eg block device caches. So just warn and
+ * continue on our way.
+ */
+ if (ret) {
+ log_info("fio: cache invalidation of %s failed: %s\n", f->file_name, strerror(errno));
+ ret = 0;
}
- return ret;
+ return 0;
}
from_hash = 0;
}
+#ifdef WIN32
+ flags |= _O_BINARY;
+#endif
+
f->fd = open(f->file_name, flags, 0600);
return from_hash;
}
} else if (f->filetype != FIO_TYPE_FILE)
continue;
- strcpy(buf, f->file_name);
+ buf[255] = '\0';
+ strncpy(buf, f->file_name, 255);
if (stat(buf, &sb) < 0) {
if (errno != ENOENT)
if (fm)
continue;
- fm = malloc(sizeof(*fm));
- strcpy(fm->__base, buf);
+ fm = calloc(1, sizeof(*fm));
+ strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
fm->base = basename(fm->__base);
fm->key = sb.st_dev;
flist_add(&fm->list, &list);
if (!o->size || o->size > total_size)
o->size = total_size;
+ if (o->size < td_min_bs(td)) {
+ log_err("fio: blocksize too large for data set\n");
+ goto err_out;
+ }
+
/*
* See if we need to extend some files
*/
err = __file_invalidate_cache(td, f, old_len,
extend_len);
- close(f->fd);
+
+ /*
+ * Shut up static checker
+ */
+ if (f->fd != -1)
+ close(f->fd);
+
f->fd = -1;
if (err)
break;
* iolog already set the total io size, if we read back
* stored entries.
*/
- if (!o->read_iolog_file)
- td->total_io_size = o->size * o->loops;
+ if (!o->read_iolog_file) {
+ if (o->io_limit)
+ td->total_io_size = o->io_limit * o->loops;
+ else
+ td->total_io_size = o->size * o->loops;
+ }
done:
if (o->create_only)
unsigned long seed;
seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
-
+
if (!lfsr_init(&f->lfsr, blocks, seed, 0))
continue;
} else if (!td->o.norandommap) {
set_already_allocated(file_name);
- /*
- * For adding files after the fact - if openfiles= isn't
- * given as an option, ensure we allow at least one file open
- */
- if (!td->o.open_files)
- td->o.open_files = 1;
-
if (inc)
td->o.nr_files++;
if (--f->references)
return 0;
- if (should_fsync(td) && td->o.fsync_on_close)
+ if (should_fsync(td) && td->o.fsync_on_close) {
f_ret = fsync(f->fd);
+ if (f_ret < 0)
+ f_ret = errno;
+ }
if (td->io_ops->close_file)
ret = td->io_ops->close_file(td, f);
if (lstat(full_path, &sb) == -1) {
if (errno != ENOENT) {
td_verror(td, errno, "stat");
- return 1;
+ ret = 1;
+ break;
}
}
void free_release_files(struct thread_data *td)
{
close_files(td);
+ td->o.nr_files = 0;
+ td->o.open_files = 0;
td->files_index = 0;
td->nr_normal_files = 0;
}