| 1 | /* |
| 2 | * Memory helpers |
| 3 | */ |
| 4 | #include <sys/types.h> |
| 5 | #include <sys/stat.h> |
| 6 | #include <fcntl.h> |
| 7 | #include <unistd.h> |
| 8 | #include <sys/shm.h> |
| 9 | #include <sys/mman.h> |
| 10 | |
| 11 | #include "fio.h" |
| 12 | |
| 13 | void fio_unpin_memory(struct thread_data *td) |
| 14 | { |
| 15 | if (td->pinned_mem) { |
| 16 | dprint(FD_MEM, "unpinning %llu bytes\n", td->o.lockmem); |
| 17 | if (munlock(td->pinned_mem, td->o.lockmem) < 0) |
| 18 | perror("munlock"); |
| 19 | munmap(td->pinned_mem, td->o.lockmem); |
| 20 | td->pinned_mem = NULL; |
| 21 | } |
| 22 | } |
| 23 | |
| 24 | int fio_pin_memory(struct thread_data *td) |
| 25 | { |
| 26 | unsigned long long phys_mem; |
| 27 | |
| 28 | if (!td->o.lockmem) |
| 29 | return 0; |
| 30 | |
| 31 | dprint(FD_MEM, "pinning %llu bytes\n", td->o.lockmem); |
| 32 | |
| 33 | /* |
| 34 | * Don't allow mlock of more than real_mem-128MB |
| 35 | */ |
| 36 | phys_mem = os_phys_mem(); |
| 37 | if (phys_mem) { |
| 38 | if ((td->o.lockmem + 128 * 1024 * 1024) > phys_mem) { |
| 39 | td->o.lockmem = phys_mem - 128 * 1024 * 1024; |
| 40 | log_info("fio: limiting mlocked memory to %lluMB\n", |
| 41 | td->o.lockmem >> 20); |
| 42 | } |
| 43 | } |
| 44 | |
| 45 | td->pinned_mem = mmap(NULL, td->o.lockmem, PROT_READ | PROT_WRITE, |
| 46 | MAP_PRIVATE | OS_MAP_ANON, -1, 0); |
| 47 | if (td->pinned_mem == MAP_FAILED) { |
| 48 | perror("malloc locked mem"); |
| 49 | td->pinned_mem = NULL; |
| 50 | return 1; |
| 51 | } |
| 52 | if (mlock(td->pinned_mem, td->o.lockmem) < 0) { |
| 53 | perror("mlock"); |
| 54 | munmap(td->pinned_mem, td->o.lockmem); |
| 55 | td->pinned_mem = NULL; |
| 56 | return 1; |
| 57 | } |
| 58 | |
| 59 | return 0; |
| 60 | } |
| 61 | |
| 62 | static int alloc_mem_shm(struct thread_data *td, unsigned int total_mem) |
| 63 | { |
| 64 | int flags = IPC_CREAT | S_IRUSR | S_IWUSR; |
| 65 | |
| 66 | if (td->o.mem_type == MEM_SHMHUGE) { |
| 67 | unsigned long mask = td->o.hugepage_size - 1; |
| 68 | |
| 69 | flags |= SHM_HUGETLB; |
| 70 | total_mem = (total_mem + mask) & ~mask; |
| 71 | } |
| 72 | |
| 73 | td->shm_id = shmget(IPC_PRIVATE, total_mem, flags); |
| 74 | dprint(FD_MEM, "shmget %u, %d\n", total_mem, td->shm_id); |
| 75 | if (td->shm_id < 0) { |
| 76 | td_verror(td, errno, "shmget"); |
| 77 | if (geteuid() != 0 && (errno == ENOMEM || errno == EPERM)) |
| 78 | log_err("fio: you may need to run this job as root\n"); |
| 79 | if (td->o.mem_type == MEM_SHMHUGE) { |
| 80 | if (errno == EINVAL) { |
| 81 | log_err("fio: check that you have free huge" |
| 82 | " pages and that hugepage-size is" |
| 83 | " correct.\n"); |
| 84 | } else if (errno == ENOSYS) { |
| 85 | log_err("fio: your system does not appear to" |
| 86 | " support huge pages.\n"); |
| 87 | } else if (errno == ENOMEM) { |
| 88 | log_err("fio: no huge pages available, do you" |
| 89 | " need to alocate some? See HOWTO.\n"); |
| 90 | } |
| 91 | } |
| 92 | |
| 93 | return 1; |
| 94 | } |
| 95 | |
| 96 | td->orig_buffer = shmat(td->shm_id, NULL, 0); |
| 97 | dprint(FD_MEM, "shmat %d, %p\n", td->shm_id, td->orig_buffer); |
| 98 | if (td->orig_buffer == (void *) -1) { |
| 99 | td_verror(td, errno, "shmat"); |
| 100 | td->orig_buffer = NULL; |
| 101 | return 1; |
| 102 | } |
| 103 | |
| 104 | return 0; |
| 105 | } |
| 106 | |
| 107 | static void free_mem_shm(struct thread_data *td) |
| 108 | { |
| 109 | struct shmid_ds sbuf; |
| 110 | |
| 111 | dprint(FD_MEM, "shmdt/ctl %d %p\n", td->shm_id, td->orig_buffer); |
| 112 | shmdt(td->orig_buffer); |
| 113 | shmctl(td->shm_id, IPC_RMID, &sbuf); |
| 114 | } |
| 115 | |
| 116 | static int alloc_mem_mmap(struct thread_data *td, size_t total_mem) |
| 117 | { |
| 118 | int flags = MAP_PRIVATE; |
| 119 | |
| 120 | td->mmapfd = 1; |
| 121 | |
| 122 | if (td->o.mmapfile) { |
| 123 | td->mmapfd = open(td->o.mmapfile, O_RDWR|O_CREAT, 0644); |
| 124 | |
| 125 | if (td->mmapfd < 0) { |
| 126 | td_verror(td, errno, "open mmap file"); |
| 127 | td->orig_buffer = NULL; |
| 128 | return 1; |
| 129 | } |
| 130 | if (ftruncate(td->mmapfd, total_mem) < 0) { |
| 131 | td_verror(td, errno, "truncate mmap file"); |
| 132 | td->orig_buffer = NULL; |
| 133 | return 1; |
| 134 | } |
| 135 | } else |
| 136 | flags |= OS_MAP_ANON; |
| 137 | |
| 138 | td->orig_buffer = mmap(NULL, total_mem, PROT_READ | PROT_WRITE, flags, |
| 139 | td->mmapfd, 0); |
| 140 | dprint(FD_MEM, "mmap %u/%d %p\n", total_mem, td->mmapfd, |
| 141 | td->orig_buffer); |
| 142 | if (td->orig_buffer == MAP_FAILED) { |
| 143 | td_verror(td, errno, "mmap"); |
| 144 | td->orig_buffer = NULL; |
| 145 | if (td->mmapfd) { |
| 146 | close(td->mmapfd); |
| 147 | unlink(td->o.mmapfile); |
| 148 | } |
| 149 | |
| 150 | return 1; |
| 151 | } |
| 152 | |
| 153 | return 0; |
| 154 | } |
| 155 | |
| 156 | static void free_mem_mmap(struct thread_data *td, size_t total_mem) |
| 157 | { |
| 158 | dprint(FD_MEM, "munmap %u %p\n", total_mem, td->orig_buffer); |
| 159 | munmap(td->orig_buffer, td->orig_buffer_size); |
| 160 | if (td->o.mmapfile) { |
| 161 | close(td->mmapfd); |
| 162 | unlink(td->o.mmapfile); |
| 163 | free(td->o.mmapfile); |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | static int alloc_mem_malloc(struct thread_data *td, size_t total_mem) |
| 168 | { |
| 169 | td->orig_buffer = malloc(total_mem); |
| 170 | dprint(FD_MEM, "malloc %u %p\n", total_mem, td->orig_buffer); |
| 171 | |
| 172 | return td->orig_buffer == NULL; |
| 173 | } |
| 174 | |
| 175 | static void free_mem_malloc(struct thread_data *td) |
| 176 | { |
| 177 | dprint(FD_MEM, "free malloc mem %p\n", td->orig_buffer); |
| 178 | free(td->orig_buffer); |
| 179 | } |
| 180 | |
| 181 | /* |
| 182 | * Set up the buffer area we need for io. |
| 183 | */ |
| 184 | int allocate_io_mem(struct thread_data *td) |
| 185 | { |
| 186 | size_t total_mem; |
| 187 | int ret = 0; |
| 188 | |
| 189 | if (td->io_ops->flags & FIO_NOIO) |
| 190 | return 0; |
| 191 | |
| 192 | total_mem = td->orig_buffer_size; |
| 193 | |
| 194 | if (td->o.odirect || td->o.mem_align || |
| 195 | (td->io_ops->flags & FIO_MEMALIGN)) { |
| 196 | total_mem += page_mask; |
| 197 | if (td->o.mem_align && td->o.mem_align > page_size) |
| 198 | total_mem += td->o.mem_align - page_size; |
| 199 | } |
| 200 | |
| 201 | dprint(FD_MEM, "Alloc %lu for buffers\n", (size_t) total_mem); |
| 202 | |
| 203 | if (td->o.mem_type == MEM_MALLOC) |
| 204 | ret = alloc_mem_malloc(td, total_mem); |
| 205 | else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE) |
| 206 | ret = alloc_mem_shm(td, total_mem); |
| 207 | else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE) |
| 208 | ret = alloc_mem_mmap(td, total_mem); |
| 209 | else { |
| 210 | log_err("fio: bad mem type: %d\n", td->o.mem_type); |
| 211 | ret = 1; |
| 212 | } |
| 213 | |
| 214 | if (ret) |
| 215 | td_verror(td, ENOMEM, "iomem allocation"); |
| 216 | |
| 217 | return ret; |
| 218 | } |
| 219 | |
| 220 | void free_io_mem(struct thread_data *td) |
| 221 | { |
| 222 | unsigned int total_mem; |
| 223 | |
| 224 | total_mem = td->orig_buffer_size; |
| 225 | if (td->o.odirect) |
| 226 | total_mem += page_mask; |
| 227 | |
| 228 | if (td->o.mem_type == MEM_MALLOC) |
| 229 | free_mem_malloc(td); |
| 230 | else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE) |
| 231 | free_mem_shm(td); |
| 232 | else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE) |
| 233 | free_mem_mmap(td, total_mem); |
| 234 | else |
| 235 | log_err("Bad memory type %u\n", td->o.mem_type); |
| 236 | |
| 237 | td->orig_buffer = NULL; |
| 238 | td->orig_buffer_size = 0; |
| 239 | } |