8 #ifndef FIO_NO_HAVE_SHM_H
15 static void *pinned_mem;
17 void fio_unpin_memory(void)
20 dprint(FD_MEM, "unpinning %llu bytes\n", mlock_size);
21 if (munlock(pinned_mem, mlock_size) < 0)
23 munmap(pinned_mem, mlock_size);
28 int fio_pin_memory(void)
30 unsigned long long phys_mem;
35 dprint(FD_MEM, "pinning %llu bytes\n", mlock_size);
38 * Don't allow mlock of more than real_mem-128MB
40 phys_mem = os_phys_mem();
42 if ((mlock_size + 128 * 1024 * 1024) > phys_mem) {
43 mlock_size = phys_mem - 128 * 1024 * 1024;
44 log_info("fio: limiting mlocked memory to %lluMB\n",
49 pinned_mem = mmap(NULL, mlock_size, PROT_READ | PROT_WRITE,
50 MAP_PRIVATE | OS_MAP_ANON, -1, 0);
51 if (pinned_mem == MAP_FAILED) {
52 perror("malloc locked mem");
56 if (mlock(pinned_mem, mlock_size) < 0) {
58 munmap(pinned_mem, mlock_size);
66 static int alloc_mem_shm(struct thread_data *td, unsigned int total_mem)
68 int flags = IPC_CREAT | S_IRUSR | S_IWUSR;
70 if (td->o.mem_type == MEM_SHMHUGE) {
71 unsigned long mask = td->o.hugepage_size - 1;
74 total_mem = (total_mem + mask) & ~mask;
77 td->shm_id = shmget(IPC_PRIVATE, total_mem, flags);
78 dprint(FD_MEM, "shmget %u, %d\n", total_mem, td->shm_id);
80 td_verror(td, errno, "shmget");
81 if (geteuid() != 0 && (errno == ENOMEM || errno == EPERM))
82 log_err("fio: you may need to run this job as root\n");
83 if (td->o.mem_type == MEM_SHMHUGE) {
84 if (errno == EINVAL) {
85 log_err("fio: check that you have free huge"
86 " pages and that hugepage-size is"
88 } else if (errno == ENOSYS) {
89 log_err("fio: your system does not appear to"
90 " support huge pages.\n");
91 } else if (errno == ENOMEM) {
92 log_err("fio: no huge pages available, do you"
93 " need to alocate some? See HOWTO.\n");
100 td->orig_buffer = shmat(td->shm_id, NULL, 0);
101 dprint(FD_MEM, "shmat %d, %p\n", td->shm_id, td->orig_buffer);
102 if (td->orig_buffer == (void *) -1) {
103 td_verror(td, errno, "shmat");
104 td->orig_buffer = NULL;
111 static void free_mem_shm(struct thread_data *td)
113 struct shmid_ds sbuf;
115 dprint(FD_MEM, "shmdt/ctl %d %p\n", td->shm_id, td->orig_buffer);
116 shmdt(td->orig_buffer);
117 shmctl(td->shm_id, IPC_RMID, &sbuf);
120 static int alloc_mem_mmap(struct thread_data *td, size_t total_mem)
126 if (td->o.mem_type == MEM_MMAPHUGE) {
127 unsigned long mask = td->o.hugepage_size - 1;
129 /* TODO: make sure the file is a real hugetlbfs file */
131 flags |= MAP_HUGETLB;
132 total_mem = (total_mem + mask) & ~mask;
136 td->mmapfd = open(td->mmapfile, O_RDWR|O_CREAT, 0644);
138 if (td->mmapfd < 0) {
139 td_verror(td, errno, "open mmap file");
140 td->orig_buffer = NULL;
143 if (td->o.mem_type != MEM_MMAPHUGE &&
144 ftruncate(td->mmapfd, total_mem) < 0) {
145 td_verror(td, errno, "truncate mmap file");
146 td->orig_buffer = NULL;
149 if (td->o.mem_type == MEM_MMAPHUGE)
152 flags |= MAP_PRIVATE;
154 flags |= OS_MAP_ANON | MAP_PRIVATE;
156 td->orig_buffer = mmap(NULL, total_mem, PROT_READ | PROT_WRITE, flags,
158 dprint(FD_MEM, "mmap %u/%d %p\n", total_mem, td->mmapfd,
160 if (td->orig_buffer == MAP_FAILED) {
161 td_verror(td, errno, "mmap");
162 td->orig_buffer = NULL;
165 unlink(td->mmapfile);
174 static void free_mem_mmap(struct thread_data *td, size_t total_mem)
176 dprint(FD_MEM, "munmap %u %p\n", total_mem, td->orig_buffer);
177 munmap(td->orig_buffer, td->orig_buffer_size);
180 unlink(td->mmapfile);
185 static int alloc_mem_malloc(struct thread_data *td, size_t total_mem)
187 td->orig_buffer = malloc(total_mem);
188 dprint(FD_MEM, "malloc %u %p\n", total_mem, td->orig_buffer);
190 return td->orig_buffer == NULL;
193 static void free_mem_malloc(struct thread_data *td)
195 dprint(FD_MEM, "free malloc mem %p\n", td->orig_buffer);
196 free(td->orig_buffer);
200 * Set up the buffer area we need for io.
202 int allocate_io_mem(struct thread_data *td)
207 if (td->io_ops->flags & FIO_NOIO)
210 total_mem = td->orig_buffer_size;
212 if (td->o.odirect || td->o.mem_align ||
213 (td->io_ops->flags & FIO_MEMALIGN)) {
214 total_mem += page_mask;
215 if (td->o.mem_align && td->o.mem_align > page_size)
216 total_mem += td->o.mem_align - page_size;
219 dprint(FD_MEM, "Alloc %lu for buffers\n", (size_t) total_mem);
221 if (td->o.mem_type == MEM_MALLOC)
222 ret = alloc_mem_malloc(td, total_mem);
223 else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE)
224 ret = alloc_mem_shm(td, total_mem);
225 else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE)
226 ret = alloc_mem_mmap(td, total_mem);
228 log_err("fio: bad mem type: %d\n", td->o.mem_type);
233 td_verror(td, ENOMEM, "iomem allocation");
238 void free_io_mem(struct thread_data *td)
240 unsigned int total_mem;
242 total_mem = td->orig_buffer_size;
244 total_mem += page_mask;
246 if (td->o.mem_type == MEM_MALLOC)
248 else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE)
250 else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE)
251 free_mem_mmap(td, total_mem);
253 log_err("Bad memory type %u\n", td->o.mem_type);
255 td->orig_buffer = NULL;
256 td->orig_buffer_size = 0;