summaryrefslogtreecommitdiff
path: root/memory.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2006-10-20 11:25:52 +0200
committerJens Axboe <jens.axboe@oracle.com>2006-10-20 11:25:52 +0200
commit2f9ade3cb72218eb260f4f5e6914218bdcaa2bcc (patch)
treed999fc2709638809daa11ad16ded18dd7878cb77 /memory.c
parent2e7964b8f84782afe665d8ae549c2910320e247f (diff)
downloadfio-2f9ade3cb72218eb260f4f5e6914218bdcaa2bcc.tar.gz
fio-2f9ade3cb72218eb260f4f5e6914218bdcaa2bcc.tar.bz2
[PATCH] Split out the memory handling from fio.c
In the process also fix some bugs in the memory pinning. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'memory.c')
-rw-r--r--memory.c108
1 files changed, 108 insertions, 0 deletions
diff --git a/memory.c b/memory.c
new file mode 100644
index 00000000..d8924a85
--- /dev/null
+++ b/memory.c
@@ -0,0 +1,108 @@
+/*
+ * Memory helpers
+ */
+#include <unistd.h>
+#include <sys/shm.h>
+#include <sys/mman.h>
+
+#include "fio.h"
+#include "os.h"
+
+extern unsigned long long mlock_size;
+static void *pinned_mem;
+
+void fio_unpin_memory(void)
+{
+ if (pinned_mem) {
+ if (munlock(pinned_mem, mlock_size) < 0)
+ perror("munlock");
+ munmap(pinned_mem, mlock_size);
+ pinned_mem = NULL;
+ }
+}
+
+int fio_pin_memory(void)
+{
+ unsigned long long phys_mem;
+
+ if (!mlock_size)
+ return 0;
+
+ /*
+ * Don't allow mlock of more than real_mem-128MB
+ */
+ phys_mem = os_phys_mem();
+ if (phys_mem) {
+ if ((mlock_size + 128 * 1024 * 1024) > phys_mem) {
+ mlock_size = phys_mem - 128 * 1024 * 1024;
+ fprintf(f_out, "fio: limiting mlocked memory to %lluMiB\n", mlock_size >> 20);
+ }
+ }
+
+ pinned_mem = mmap(NULL, mlock_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | OS_MAP_ANON, 0, 0);
+ if (pinned_mem == MAP_FAILED) {
+ perror("malloc locked mem");
+ pinned_mem = NULL;
+ return 1;
+ }
+ if (mlock(pinned_mem, mlock_size) < 0) {
+ perror("mlock");
+ munmap(pinned_mem, mlock_size);
+ pinned_mem = NULL;
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Setup the buffer area we need for io.
+ */
+int allocate_io_mem(struct thread_data *td)
+{
+ if (td->mem_type == MEM_MALLOC)
+ td->orig_buffer = malloc(td->orig_buffer_size);
+ else if (td->mem_type == MEM_SHM) {
+ td->shm_id = shmget(IPC_PRIVATE, td->orig_buffer_size, IPC_CREAT | 0600);
+ if (td->shm_id < 0) {
+ td_verror(td, errno);
+ perror("shmget");
+ return 1;
+ }
+
+ td->orig_buffer = shmat(td->shm_id, NULL, 0);
+ if (td->orig_buffer == (void *) -1) {
+ td_verror(td, errno);
+ perror("shmat");
+ td->orig_buffer = NULL;
+ return 1;
+ }
+ } else if (td->mem_type == MEM_MMAP) {
+ td->orig_buffer = mmap(NULL, td->orig_buffer_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | OS_MAP_ANON, 0, 0);
+ if (td->orig_buffer == MAP_FAILED) {
+ td_verror(td, errno);
+ perror("mmap");
+ td->orig_buffer = NULL;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+void free_io_mem(struct thread_data *td)
+{
+ if (td->mem_type == MEM_MALLOC)
+ free(td->orig_buffer);
+ else if (td->mem_type == MEM_SHM) {
+ struct shmid_ds sbuf;
+
+ shmdt(td->orig_buffer);
+ shmctl(td->shm_id, IPC_RMID, &sbuf);
+ } else if (td->mem_type == MEM_MMAP)
+ munmap(td->orig_buffer, td->orig_buffer_size);
+ else
+ log_err("Bad memory type %d\n", td->mem_type);
+
+ td->orig_buffer = NULL;
+}