+static void free_mem_mmap(struct thread_data *td, size_t total_mem)
+{
+ dprint(FD_MEM, "munmap %llu %p\n", (unsigned long long) total_mem,
+ td->orig_buffer);
+ munmap(td->orig_buffer, td->orig_buffer_size);
+ if (td->o.mmapfile) {
+ close(td->mmapfd);
+ unlink(td->o.mmapfile);
+ free(td->o.mmapfile);
+ }
+}
+
+static int alloc_mem_malloc(struct thread_data *td, size_t total_mem)
+{
+ td->orig_buffer = malloc(total_mem);
+ dprint(FD_MEM, "malloc %llu %p\n", (unsigned long long) total_mem,
+ td->orig_buffer);
+
+ return td->orig_buffer == NULL;
+}
+
+static void free_mem_malloc(struct thread_data *td)
+{
+ dprint(FD_MEM, "free malloc mem %p\n", td->orig_buffer);
+ free(td->orig_buffer);
+}
+
+/*
+ * Set up the buffer area we need for io.
+ */
+int allocate_io_mem(struct thread_data *td)
+{
+ size_t total_mem;
+ int ret = 0;
+
+ if (td->io_ops->flags & FIO_NOIO)
+ return 0;
+
+ total_mem = td->orig_buffer_size;
+
+ if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
+ (td->io_ops->flags & FIO_MEMALIGN)) {
+ total_mem += page_mask;
+ if (td->o.mem_align && td->o.mem_align > page_size)
+ total_mem += td->o.mem_align - page_size;
+ }
+
+ dprint(FD_MEM, "Alloc %llu for buffers\n", (unsigned long long) total_mem);
+
+ if (td->o.mem_type == MEM_MALLOC)
+ ret = alloc_mem_malloc(td, total_mem);
+ else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE)
+ ret = alloc_mem_shm(td, total_mem);
+ else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE)
+ ret = alloc_mem_mmap(td, total_mem);
+ else {
+ log_err("fio: bad mem type: %d\n", td->o.mem_type);
+ ret = 1;
+ }
+
+ if (ret)
+ td_verror(td, ENOMEM, "iomem allocation");
+
+ return ret;
+}
+