projects
/
fio.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Fix early termination of cpu id string
[fio.git]
/
memory.c
diff --git
a/memory.c
b/memory.c
index 5293af96b226240117d312f24875f0406ceff5fd..b208320c5d88cc619f7167626e3b7502ded07799 100644
(file)
--- a/
memory.c
+++ b/
memory.c
@@
-5,12
+5,12
@@
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
-#ifndef FIO_NO_HAVE_SHM_H
-#include <sys/shm.h>
-#endif
#include <sys/mman.h>
#include "fio.h"
#include <sys/mman.h>
#include "fio.h"
+#ifndef FIO_NO_HAVE_SHM_H
+#include <sys/shm.h>
+#endif
void fio_unpin_memory(struct thread_data *td)
{
void fio_unpin_memory(struct thread_data *td)
{
@@
-117,14
+117,16
@@
static void free_mem_shm(struct thread_data *td)
static int alloc_mem_mmap(struct thread_data *td, size_t total_mem)
{
static int alloc_mem_mmap(struct thread_data *td, size_t total_mem)
{
- int flags =
MAP_PRIVATE
;
+ int flags =
0
;
td->mmapfd = 1;
if (td->o.mem_type == MEM_MMAPHUGE) {
unsigned long mask = td->o.hugepage_size - 1;
td->mmapfd = 1;
if (td->o.mem_type == MEM_MMAPHUGE) {
unsigned long mask = td->o.hugepage_size - 1;
- flags |= MAP_HUGETLB;
+ /* TODO: make sure the file is a real hugetlbfs file */
+ if (!td->o.mmapfile)
+ flags |= MAP_HUGETLB;
total_mem = (total_mem + mask) & ~mask;
}
total_mem = (total_mem + mask) & ~mask;
}
@@
-136,18
+138,23
@@
static int alloc_mem_mmap(struct thread_data *td, size_t total_mem)
td->orig_buffer = NULL;
return 1;
}
td->orig_buffer = NULL;
return 1;
}
- if (ftruncate(td->mmapfd, total_mem) < 0) {
+ if (td->o.mem_type != MEM_MMAPHUGE &&
+ ftruncate(td->mmapfd, total_mem) < 0) {
td_verror(td, errno, "truncate mmap file");
td->orig_buffer = NULL;
return 1;
}
td_verror(td, errno, "truncate mmap file");
td->orig_buffer = NULL;
return 1;
}
+ if (td->o.mem_type == MEM_MMAPHUGE)
+ flags |= MAP_SHARED;
+ else
+ flags |= MAP_PRIVATE;
} else
} else
- flags |= OS_MAP_ANON;
+ flags |= OS_MAP_ANON
| MAP_PRIVATE
;
td->orig_buffer = mmap(NULL, total_mem, PROT_READ | PROT_WRITE, flags,
td->mmapfd, 0);
td->orig_buffer = mmap(NULL, total_mem, PROT_READ | PROT_WRITE, flags,
td->mmapfd, 0);
- dprint(FD_MEM, "mmap %
u/%d %p\n", total_mem, td->mmapfd
,
- td->orig_buffer);
+ dprint(FD_MEM, "mmap %
llu/%d %p\n", (unsigned long long) total_mem
,
+ td->
mmapfd, td->
orig_buffer);
if (td->orig_buffer == MAP_FAILED) {
td_verror(td, errno, "mmap");
td->orig_buffer = NULL;
if (td->orig_buffer == MAP_FAILED) {
td_verror(td, errno, "mmap");
td->orig_buffer = NULL;
@@
-164,7
+171,8
@@
static int alloc_mem_mmap(struct thread_data *td, size_t total_mem)
static void free_mem_mmap(struct thread_data *td, size_t total_mem)
{
static void free_mem_mmap(struct thread_data *td, size_t total_mem)
{
- dprint(FD_MEM, "munmap %u %p\n", total_mem, td->orig_buffer);
+ dprint(FD_MEM, "munmap %llu %p\n", (unsigned long long) total_mem,
+ td->orig_buffer);
munmap(td->orig_buffer, td->orig_buffer_size);
if (td->o.mmapfile) {
close(td->mmapfd);
munmap(td->orig_buffer, td->orig_buffer_size);
if (td->o.mmapfile) {
close(td->mmapfd);
@@
-176,7
+184,8
@@
static void free_mem_mmap(struct thread_data *td, size_t total_mem)
static int alloc_mem_malloc(struct thread_data *td, size_t total_mem)
{
td->orig_buffer = malloc(total_mem);
static int alloc_mem_malloc(struct thread_data *td, size_t total_mem)
{
td->orig_buffer = malloc(total_mem);
- dprint(FD_MEM, "malloc %u %p\n", total_mem, td->orig_buffer);
+ dprint(FD_MEM, "malloc %llu %p\n", (unsigned long long) total_mem,
+ td->orig_buffer);
return td->orig_buffer == NULL;
}
return td->orig_buffer == NULL;
}
@@
-200,14
+209,14
@@
int allocate_io_mem(struct thread_data *td)
total_mem = td->orig_buffer_size;
total_mem = td->orig_buffer_size;
- if (td->o.odirect || td->o.mem_align ||
+ if (td->o.odirect || td->o.mem_align ||
td->o.oatomic ||
(td->io_ops->flags & FIO_MEMALIGN)) {
total_mem += page_mask;
if (td->o.mem_align && td->o.mem_align > page_size)
total_mem += td->o.mem_align - page_size;
}
(td->io_ops->flags & FIO_MEMALIGN)) {
total_mem += page_mask;
if (td->o.mem_align && td->o.mem_align > page_size)
total_mem += td->o.mem_align - page_size;
}
- dprint(FD_MEM, "Alloc %l
u for buffers\n", (size_t
) total_mem);
+ dprint(FD_MEM, "Alloc %l
lu for buffers\n", (unsigned long long
) total_mem);
if (td->o.mem_type == MEM_MALLOC)
ret = alloc_mem_malloc(td, total_mem);
if (td->o.mem_type == MEM_MALLOC)
ret = alloc_mem_malloc(td, total_mem);
@@
-231,7
+240,7
@@
void free_io_mem(struct thread_data *td)
unsigned int total_mem;
total_mem = td->orig_buffer_size;
unsigned int total_mem;
total_mem = td->orig_buffer_size;
- if (td->o.odirect)
+ if (td->o.odirect
|| td->o.oatomic
)
total_mem += page_mask;
if (td->o.mem_type == MEM_MALLOC)
total_mem += page_mask;
if (td->o.mem_type == MEM_MALLOC)