Commit | Line | Data |
---|---|---|
eff6f4a0 OG |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /* | |
4 | * Copyright 2016-2019 HabanaLabs, Ltd. | |
5 | * All Rights Reserved. | |
6 | */ | |
7 | ||
0feaf86d | 8 | #include <uapi/misc/habanalabs.h> |
eff6f4a0 | 9 | #include "habanalabs.h" |
0feaf86d | 10 | #include "include/hw_ip/mmu/mmu_general.h" |
eff6f4a0 OG |
11 | |
12 | #include <linux/uaccess.h> | |
13 | #include <linux/slab.h> | |
0feaf86d OS |
14 | #include <linux/genalloc.h> |
15 | ||
16 | #define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT) | |
17 | #define HL_MMU_DEBUG 0 | |
18 | ||
19 | /* | |
20 | * The va ranges in context object contain a list with the available chunks of | |
21 | * device virtual memory. | |
22 | * There is one range for host allocations and one for DRAM allocations. | |
23 | * | |
24 | * On initialization each range contains one chunk of all of its available | |
25 | * virtual range which is a half of the total device virtual range. | |
26 | * | |
27 | * On each mapping of physical pages, a suitable virtual range chunk (with a | |
28 | * minimum size) is selected from the list. If the chunk size equals the | |
29 | * requested size, the chunk is returned. Otherwise, the chunk is split into | |
30 | * two chunks - one to return as result and a remainder to stay in the list. | |
31 | * | |
32 | * On each Unmapping of a virtual address, the relevant virtual chunk is | |
33 | * returned to the list. The chunk is added to the list and if its edges match | |
34 | * the edges of the adjacent chunks (means a contiguous chunk can be created), | |
35 | * the chunks are merged. | |
36 | * | |
37 | * On finish, the list is checked to have only one chunk of all the relevant | |
38 | * virtual range (which is a half of the device total virtual range). | |
39 | * If not (means not all mappings were unmapped), a warning is printed. | |
40 | */ | |
41 | ||
42 | /* | |
43 | * alloc_device_memory - allocate device memory | |
44 | * | |
45 | * @ctx : current context | |
46 | * @args : host parameters containing the requested size | |
47 | * @ret_handle : result handle | |
48 | * | |
49 | * This function does the following: | |
50 | * - Allocate the requested size rounded up to 2MB pages | |
51 | * - Return unique handle | |
52 | */ | |
53 | static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, | |
54 | u32 *ret_handle) | |
55 | { | |
56 | struct hl_device *hdev = ctx->hdev; | |
57 | struct hl_vm *vm = &hdev->vm; | |
58 | struct hl_vm_phys_pg_pack *phys_pg_pack; | |
59 | u64 paddr = 0; | |
60 | u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift; | |
61 | int handle, rc, i; | |
62 | bool contiguous; | |
63 | ||
64 | num_curr_pgs = 0; | |
65 | page_size = hdev->asic_prop.dram_page_size; | |
66 | page_shift = __ffs(page_size); | |
67 | num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift; | |
68 | total_size = num_pgs << page_shift; | |
69 | ||
70 | contiguous = args->flags & HL_MEM_CONTIGUOUS; | |
71 | ||
72 | if (contiguous) { | |
73 | paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size); | |
74 | if (!paddr) { | |
75 | dev_err(hdev->dev, | |
76 | "failed to allocate %u huge contiguous pages\n", | |
77 | num_pgs); | |
78 | return -ENOMEM; | |
79 | } | |
80 | } | |
81 | ||
82 | phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); | |
83 | if (!phys_pg_pack) { | |
84 | rc = -ENOMEM; | |
85 | goto pages_pack_err; | |
86 | } | |
87 | ||
88 | phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK; | |
89 | phys_pg_pack->asid = ctx->asid; | |
90 | phys_pg_pack->npages = num_pgs; | |
91 | phys_pg_pack->page_size = page_size; | |
92 | phys_pg_pack->total_size = total_size; | |
93 | phys_pg_pack->flags = args->flags; | |
94 | phys_pg_pack->contiguous = contiguous; | |
95 | ||
96 | phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL); | |
97 | if (!phys_pg_pack->pages) { | |
98 | rc = -ENOMEM; | |
99 | goto pages_arr_err; | |
100 | } | |
101 | ||
102 | if (phys_pg_pack->contiguous) { | |
103 | for (i = 0 ; i < num_pgs ; i++) | |
104 | phys_pg_pack->pages[i] = paddr + i * page_size; | |
105 | } else { | |
106 | for (i = 0 ; i < num_pgs ; i++) { | |
107 | phys_pg_pack->pages[i] = (u64) gen_pool_alloc( | |
108 | vm->dram_pg_pool, | |
109 | page_size); | |
110 | if (!phys_pg_pack->pages[i]) { | |
111 | dev_err(hdev->dev, | |
112 | "ioctl failed to allocate page\n"); | |
113 | rc = -ENOMEM; | |
114 | goto page_err; | |
115 | } | |
116 | ||
117 | num_curr_pgs++; | |
118 | } | |
119 | } | |
120 | ||
121 | spin_lock(&vm->idr_lock); | |
122 | handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0, | |
123 | GFP_KERNEL); | |
124 | spin_unlock(&vm->idr_lock); | |
125 | ||
126 | if (handle < 0) { | |
127 | dev_err(hdev->dev, "Failed to get handle for page\n"); | |
128 | rc = -EFAULT; | |
129 | goto idr_err; | |
130 | } | |
131 | ||
132 | for (i = 0 ; i < num_pgs ; i++) | |
133 | kref_get(&vm->dram_pg_pool_refcount); | |
134 | ||
135 | phys_pg_pack->handle = handle; | |
136 | ||
137 | atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem); | |
138 | atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem); | |
139 | ||
140 | *ret_handle = handle; | |
141 | ||
142 | return 0; | |
143 | ||
144 | idr_err: | |
145 | page_err: | |
146 | if (!phys_pg_pack->contiguous) | |
147 | for (i = 0 ; i < num_curr_pgs ; i++) | |
148 | gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], | |
149 | page_size); | |
150 | ||
151 | kfree(phys_pg_pack->pages); | |
152 | pages_arr_err: | |
153 | kfree(phys_pg_pack); | |
154 | pages_pack_err: | |
155 | if (contiguous) | |
156 | gen_pool_free(vm->dram_pg_pool, paddr, total_size); | |
157 | ||
158 | return rc; | |
159 | } | |
160 | ||
161 | /* | |
162 | * get_userptr_from_host_va - initialize userptr structure from given host | |
163 | * virtual address | |
164 | * | |
165 | * @hdev : habanalabs device structure | |
166 | * @args : parameters containing the virtual address and size | |
167 | * @p_userptr : pointer to result userptr structure | |
168 | * | |
169 | * This function does the following: | |
170 | * - Allocate userptr structure | |
171 | * - Pin the given host memory using the userptr structure | |
172 | * - Perform DMA mapping to have the DMA addresses of the pages | |
173 | */ | |
174 | static int get_userptr_from_host_va(struct hl_device *hdev, | |
175 | struct hl_mem_in *args, struct hl_userptr **p_userptr) | |
176 | { | |
177 | struct hl_userptr *userptr; | |
178 | int rc; | |
179 | ||
180 | userptr = kzalloc(sizeof(*userptr), GFP_KERNEL); | |
181 | if (!userptr) { | |
182 | rc = -ENOMEM; | |
183 | goto userptr_err; | |
184 | } | |
185 | ||
186 | rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr, | |
187 | args->map_host.mem_size, userptr); | |
188 | if (rc) { | |
189 | dev_err(hdev->dev, "Failed to pin host memory\n"); | |
190 | goto pin_err; | |
191 | } | |
192 | ||
193 | rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl, | |
194 | userptr->sgt->nents, DMA_BIDIRECTIONAL); | |
195 | if (rc) { | |
196 | dev_err(hdev->dev, "failed to map sgt with DMA region\n"); | |
197 | goto dma_map_err; | |
198 | } | |
199 | ||
200 | userptr->dma_mapped = true; | |
201 | userptr->dir = DMA_BIDIRECTIONAL; | |
202 | userptr->vm_type = VM_TYPE_USERPTR; | |
203 | ||
204 | *p_userptr = userptr; | |
205 | ||
206 | return 0; | |
207 | ||
208 | dma_map_err: | |
209 | hl_unpin_host_memory(hdev, userptr); | |
210 | pin_err: | |
211 | kfree(userptr); | |
212 | userptr_err: | |
213 | ||
214 | return rc; | |
215 | } | |
216 | ||
217 | /* | |
218 | * free_userptr - free userptr structure | |
219 | * | |
220 | * @hdev : habanalabs device structure | |
221 | * @userptr : userptr to free | |
222 | * | |
223 | * This function does the following: | |
224 | * - Unpins the physical pages | |
225 | * - Frees the userptr structure | |
226 | */ | |
227 | static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr) | |
228 | { | |
229 | hl_unpin_host_memory(hdev, userptr); | |
230 | kfree(userptr); | |
231 | } | |
232 | ||
233 | /* | |
234 | * dram_pg_pool_do_release - free DRAM pages pool | |
235 | * | |
236 | * @ref : pointer to reference object | |
237 | * | |
238 | * This function does the following: | |
239 | * - Frees the idr structure of physical pages handles | |
240 | * - Frees the generic pool of DRAM physical pages | |
241 | */ | |
242 | static void dram_pg_pool_do_release(struct kref *ref) | |
243 | { | |
244 | struct hl_vm *vm = container_of(ref, struct hl_vm, | |
245 | dram_pg_pool_refcount); | |
246 | ||
247 | /* | |
248 | * free the idr here as only here we know for sure that there are no | |
249 | * allocated physical pages and hence there are no handles in use | |
250 | */ | |
251 | idr_destroy(&vm->phys_pg_pack_handles); | |
252 | gen_pool_destroy(vm->dram_pg_pool); | |
253 | } | |
254 | ||
255 | /* | |
256 | * free_phys_pg_pack - free physical page pack | |
257 | * | |
258 | * @hdev : habanalabs device structure | |
259 | * @phys_pg_pack : physical page pack to free | |
260 | * | |
261 | * This function does the following: | |
262 | * - For DRAM memory only, iterate over the pack and free each physical block | |
263 | * structure by returning it to the general pool | |
264 | * - Free the hl_vm_phys_pg_pack structure | |
265 | */ | |
266 | static void free_phys_pg_pack(struct hl_device *hdev, | |
267 | struct hl_vm_phys_pg_pack *phys_pg_pack) | |
268 | { | |
269 | struct hl_vm *vm = &hdev->vm; | |
270 | int i; | |
271 | ||
272 | if (!phys_pg_pack->created_from_userptr) { | |
273 | if (phys_pg_pack->contiguous) { | |
274 | gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0], | |
275 | phys_pg_pack->total_size); | |
276 | ||
277 | for (i = 0; i < phys_pg_pack->npages ; i++) | |
278 | kref_put(&vm->dram_pg_pool_refcount, | |
279 | dram_pg_pool_do_release); | |
280 | } else { | |
281 | for (i = 0 ; i < phys_pg_pack->npages ; i++) { | |
282 | gen_pool_free(vm->dram_pg_pool, | |
283 | phys_pg_pack->pages[i], | |
284 | phys_pg_pack->page_size); | |
285 | kref_put(&vm->dram_pg_pool_refcount, | |
286 | dram_pg_pool_do_release); | |
287 | } | |
288 | } | |
289 | } | |
290 | ||
291 | kfree(phys_pg_pack->pages); | |
292 | kfree(phys_pg_pack); | |
293 | } | |
294 | ||
295 | /* | |
296 | * free_device_memory - free device memory | |
297 | * | |
298 | * @ctx : current context | |
299 | * @handle : handle of the memory chunk to free | |
300 | * | |
301 | * This function does the following: | |
302 | * - Free the device memory related to the given handle | |
303 | */ | |
304 | static int free_device_memory(struct hl_ctx *ctx, u32 handle) | |
305 | { | |
306 | struct hl_device *hdev = ctx->hdev; | |
307 | struct hl_vm *vm = &hdev->vm; | |
308 | struct hl_vm_phys_pg_pack *phys_pg_pack; | |
309 | ||
310 | spin_lock(&vm->idr_lock); | |
311 | phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); | |
312 | if (phys_pg_pack) { | |
313 | if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) { | |
314 | dev_err(hdev->dev, "handle %u is mapped, cannot free\n", | |
315 | handle); | |
316 | spin_unlock(&vm->idr_lock); | |
317 | return -EINVAL; | |
318 | } | |
319 | ||
320 | /* | |
321 | * must remove from idr before the freeing of the physical | |
322 | * pages as the refcount of the pool is also the trigger of the | |
323 | * idr destroy | |
324 | */ | |
325 | idr_remove(&vm->phys_pg_pack_handles, handle); | |
326 | spin_unlock(&vm->idr_lock); | |
327 | ||
328 | atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem); | |
329 | atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem); | |
330 | ||
331 | free_phys_pg_pack(hdev, phys_pg_pack); | |
332 | } else { | |
333 | spin_unlock(&vm->idr_lock); | |
334 | dev_err(hdev->dev, | |
335 | "free device memory failed, no match for handle %u\n", | |
336 | handle); | |
337 | return -EINVAL; | |
338 | } | |
339 | ||
340 | return 0; | |
341 | } | |
342 | ||
343 | /* | |
344 | * clear_va_list_locked - free virtual addresses list | |
345 | * | |
346 | * @hdev : habanalabs device structure | |
347 | * @va_list : list of virtual addresses to free | |
348 | * | |
349 | * This function does the following: | |
350 | * - Iterate over the list and free each virtual addresses block | |
351 | * | |
352 | * This function should be called only when va_list lock is taken | |
353 | */ | |
354 | static void clear_va_list_locked(struct hl_device *hdev, | |
355 | struct list_head *va_list) | |
356 | { | |
357 | struct hl_vm_va_block *va_block, *tmp; | |
358 | ||
359 | list_for_each_entry_safe(va_block, tmp, va_list, node) { | |
360 | list_del(&va_block->node); | |
361 | kfree(va_block); | |
362 | } | |
363 | } | |
364 | ||
365 | /* | |
366 | * print_va_list_locked - print virtual addresses list | |
367 | * | |
368 | * @hdev : habanalabs device structure | |
369 | * @va_list : list of virtual addresses to print | |
370 | * | |
371 | * This function does the following: | |
372 | * - Iterate over the list and print each virtual addresses block | |
373 | * | |
374 | * This function should be called only when va_list lock is taken | |
375 | */ | |
376 | static void print_va_list_locked(struct hl_device *hdev, | |
377 | struct list_head *va_list) | |
378 | { | |
379 | #if HL_MMU_DEBUG | |
380 | struct hl_vm_va_block *va_block; | |
381 | ||
382 | dev_dbg(hdev->dev, "print va list:\n"); | |
383 | ||
384 | list_for_each_entry(va_block, va_list, node) | |
385 | dev_dbg(hdev->dev, | |
386 | "va block, start: 0x%llx, end: 0x%llx, size: %llu\n", | |
387 | va_block->start, va_block->end, va_block->size); | |
388 | #endif | |
389 | } | |
390 | ||
391 | /* | |
392 | * merge_va_blocks_locked - merge a virtual block if possible | |
393 | * | |
394 | * @hdev : pointer to the habanalabs device structure | |
395 | * @va_list : pointer to the virtual addresses block list | |
396 | * @va_block : virtual block to merge with adjacent blocks | |
397 | * | |
398 | * This function does the following: | |
399 | * - Merge the given blocks with the adjacent blocks if their virtual ranges | |
400 | * create a contiguous virtual range | |
401 | * | |
402 | * This Function should be called only when va_list lock is taken | |
403 | */ | |
404 | static void merge_va_blocks_locked(struct hl_device *hdev, | |
405 | struct list_head *va_list, struct hl_vm_va_block *va_block) | |
406 | { | |
407 | struct hl_vm_va_block *prev, *next; | |
408 | ||
409 | prev = list_prev_entry(va_block, node); | |
410 | if (&prev->node != va_list && prev->end + 1 == va_block->start) { | |
411 | prev->end = va_block->end; | |
412 | prev->size = prev->end - prev->start; | |
413 | list_del(&va_block->node); | |
414 | kfree(va_block); | |
415 | va_block = prev; | |
416 | } | |
417 | ||
418 | next = list_next_entry(va_block, node); | |
419 | if (&next->node != va_list && va_block->end + 1 == next->start) { | |
420 | next->start = va_block->start; | |
421 | next->size = next->end - next->start; | |
422 | list_del(&va_block->node); | |
423 | kfree(va_block); | |
424 | } | |
425 | } | |
426 | ||
427 | /* | |
428 | * add_va_block_locked - add a virtual block to the virtual addresses list | |
429 | * | |
430 | * @hdev : pointer to the habanalabs device structure | |
431 | * @va_list : pointer to the virtual addresses block list | |
432 | * @start : start virtual address | |
433 | * @end : end virtual address | |
434 | * | |
435 | * This function does the following: | |
436 | * - Add the given block to the virtual blocks list and merge with other | |
437 | * blocks if a contiguous virtual block can be created | |
438 | * | |
439 | * This Function should be called only when va_list lock is taken | |
440 | */ | |
441 | static int add_va_block_locked(struct hl_device *hdev, | |
442 | struct list_head *va_list, u64 start, u64 end) | |
443 | { | |
444 | struct hl_vm_va_block *va_block, *res = NULL; | |
445 | u64 size = end - start; | |
446 | ||
447 | print_va_list_locked(hdev, va_list); | |
448 | ||
449 | list_for_each_entry(va_block, va_list, node) { | |
450 | /* TODO: remove upon matureness */ | |
451 | if (hl_mem_area_crosses_range(start, size, va_block->start, | |
452 | va_block->end)) { | |
453 | dev_err(hdev->dev, | |
454 | "block crossing ranges at start 0x%llx, end 0x%llx\n", | |
455 | va_block->start, va_block->end); | |
456 | return -EINVAL; | |
457 | } | |
458 | ||
459 | if (va_block->end < start) | |
460 | res = va_block; | |
461 | } | |
462 | ||
463 | va_block = kmalloc(sizeof(*va_block), GFP_KERNEL); | |
464 | if (!va_block) | |
465 | return -ENOMEM; | |
466 | ||
467 | va_block->start = start; | |
468 | va_block->end = end; | |
469 | va_block->size = size; | |
470 | ||
471 | if (!res) | |
472 | list_add(&va_block->node, va_list); | |
473 | else | |
474 | list_add(&va_block->node, &res->node); | |
475 | ||
476 | merge_va_blocks_locked(hdev, va_list, va_block); | |
477 | ||
478 | print_va_list_locked(hdev, va_list); | |
479 | ||
480 | return 0; | |
481 | } | |
482 | ||
483 | /* | |
484 | * add_va_block - wrapper for add_va_block_locked | |
485 | * | |
486 | * @hdev : pointer to the habanalabs device structure | |
487 | * @va_list : pointer to the virtual addresses block list | |
488 | * @start : start virtual address | |
489 | * @end : end virtual address | |
490 | * | |
491 | * This function does the following: | |
492 | * - Takes the list lock and calls add_va_block_locked | |
493 | */ | |
494 | static inline int add_va_block(struct hl_device *hdev, | |
495 | struct hl_va_range *va_range, u64 start, u64 end) | |
496 | { | |
497 | int rc; | |
498 | ||
499 | mutex_lock(&va_range->lock); | |
500 | rc = add_va_block_locked(hdev, &va_range->list, start, end); | |
501 | mutex_unlock(&va_range->lock); | |
502 | ||
503 | return rc; | |
504 | } | |
505 | ||
506 | /* | |
507 | * get_va_block - get a virtual block with the requested size | |
508 | * | |
509 | * @hdev : pointer to the habanalabs device structure | |
510 | * @va_range : pointer to the virtual addresses range | |
511 | * @size : requested block size | |
512 | * @hint_addr : hint for request address by the user | |
513 | * @is_userptr : is host or DRAM memory | |
514 | * | |
515 | * This function does the following: | |
516 | * - Iterate on the virtual block list to find a suitable virtual block for the | |
517 | * requested size | |
518 | * - Reserve the requested block and update the list | |
519 | * - Return the start address of the virtual block | |
520 | */ | |
521 | static u64 get_va_block(struct hl_device *hdev, | |
522 | struct hl_va_range *va_range, u32 size, u64 hint_addr, | |
523 | bool is_userptr) | |
524 | { | |
525 | struct hl_vm_va_block *va_block, *new_va_block = NULL; | |
526 | u64 valid_start, valid_size, prev_start, prev_end, page_mask, | |
527 | res_valid_start = 0, res_valid_size = 0; | |
528 | u32 page_size; | |
529 | bool add_prev = false; | |
530 | ||
531 | if (is_userptr) { | |
532 | /* | |
533 | * We cannot know if the user allocated memory with huge pages | |
534 | * or not, hence we continue with the biggest possible | |
535 | * granularity. | |
536 | */ | |
537 | page_size = PAGE_SIZE_2MB; | |
538 | page_mask = PAGE_MASK_2MB; | |
539 | } else { | |
540 | page_size = hdev->asic_prop.dram_page_size; | |
541 | page_mask = ~((u64)page_size - 1); | |
542 | } | |
543 | ||
544 | mutex_lock(&va_range->lock); | |
545 | ||
546 | print_va_list_locked(hdev, &va_range->list); | |
547 | ||
548 | list_for_each_entry(va_block, &va_range->list, node) { | |
549 | /* calc the first possible aligned addr */ | |
550 | valid_start = va_block->start; | |
551 | ||
552 | ||
553 | if (valid_start & (page_size - 1)) { | |
554 | valid_start &= page_mask; | |
555 | valid_start += page_size; | |
556 | if (valid_start > va_block->end) | |
557 | continue; | |
558 | } | |
559 | ||
560 | valid_size = va_block->end - valid_start; | |
561 | ||
562 | if (valid_size >= size && | |
563 | (!new_va_block || valid_size < res_valid_size)) { | |
564 | ||
565 | new_va_block = va_block; | |
566 | res_valid_start = valid_start; | |
567 | res_valid_size = valid_size; | |
568 | } | |
569 | ||
570 | if (hint_addr && hint_addr >= valid_start && | |
571 | ((hint_addr + size) <= va_block->end)) { | |
572 | new_va_block = va_block; | |
573 | res_valid_start = hint_addr; | |
574 | res_valid_size = valid_size; | |
575 | break; | |
576 | } | |
577 | } | |
578 | ||
579 | if (!new_va_block) { | |
580 | dev_err(hdev->dev, "no available va block for size %u\n", size); | |
581 | goto out; | |
582 | } | |
583 | ||
584 | if (res_valid_start > new_va_block->start) { | |
585 | prev_start = new_va_block->start; | |
586 | prev_end = res_valid_start - 1; | |
587 | ||
588 | new_va_block->start = res_valid_start; | |
589 | new_va_block->size = res_valid_size; | |
590 | ||
591 | add_prev = true; | |
592 | } | |
593 | ||
594 | if (new_va_block->size > size) { | |
595 | new_va_block->start += size; | |
596 | new_va_block->size = new_va_block->end - new_va_block->start; | |
597 | } else { | |
598 | list_del(&new_va_block->node); | |
599 | kfree(new_va_block); | |
600 | } | |
601 | ||
602 | if (add_prev) | |
603 | add_va_block_locked(hdev, &va_range->list, prev_start, | |
604 | prev_end); | |
605 | ||
606 | print_va_list_locked(hdev, &va_range->list); | |
607 | out: | |
608 | mutex_unlock(&va_range->lock); | |
609 | ||
610 | return res_valid_start; | |
611 | } | |
612 | ||
613 | /* | |
614 | * get_sg_info - get number of pages and the DMA address from SG list | |
615 | * | |
616 | * @sg : the SG list | |
617 | * @dma_addr : pointer to DMA address to return | |
618 | * | |
619 | * Calculate the number of consecutive pages described by the SG list. Take the | |
620 | * offset of the address in the first page, add to it the length and round it up | |
621 | * to the number of needed pages. | |
622 | */ | |
623 | static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr) | |
624 | { | |
625 | *dma_addr = sg_dma_address(sg); | |
626 | ||
627 | return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) + | |
628 | (PAGE_SIZE - 1)) >> PAGE_SHIFT; | |
629 | } | |
630 | ||
631 | /* | |
632 | * init_phys_pg_pack_from_userptr - initialize physical page pack from host | |
633 | * memory | |
634 | * | |
635 | * @ctx : current context | |
636 | * @userptr : userptr to initialize from | |
637 | * @pphys_pg_pack : res pointer | |
638 | * | |
639 | * This function does the following: | |
640 | * - Pin the physical pages related to the given virtual block | |
641 | * - Create a physical page pack from the physical pages related to the given | |
642 | * virtual block | |
643 | */ | |
644 | static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, | |
645 | struct hl_userptr *userptr, | |
646 | struct hl_vm_phys_pg_pack **pphys_pg_pack) | |
647 | { | |
648 | struct hl_vm_phys_pg_pack *phys_pg_pack; | |
649 | struct scatterlist *sg; | |
650 | dma_addr_t dma_addr; | |
651 | u64 page_mask; | |
652 | u32 npages, total_npages, page_size = PAGE_SIZE; | |
653 | bool first = true, is_huge_page_opt = true; | |
654 | int rc, i, j; | |
655 | ||
656 | phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); | |
657 | if (!phys_pg_pack) | |
658 | return -ENOMEM; | |
659 | ||
660 | phys_pg_pack->vm_type = userptr->vm_type; | |
661 | phys_pg_pack->created_from_userptr = true; | |
662 | phys_pg_pack->asid = ctx->asid; | |
663 | atomic_set(&phys_pg_pack->mapping_cnt, 1); | |
664 | ||
665 | /* Only if all dma_addrs are aligned to 2MB and their | |
666 | * sizes is at least 2MB, we can use huge page mapping. | |
667 | * We limit the 2MB optimization to this condition, | |
668 | * since later on we acquire the related VA range as one | |
669 | * consecutive block. | |
670 | */ | |
671 | total_npages = 0; | |
672 | for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { | |
673 | npages = get_sg_info(sg, &dma_addr); | |
674 | ||
675 | total_npages += npages; | |
676 | ||
677 | if (first) { | |
678 | first = false; | |
679 | dma_addr &= PAGE_MASK_2MB; | |
680 | } | |
681 | ||
682 | if ((npages % PGS_IN_2MB_PAGE) || | |
683 | (dma_addr & (PAGE_SIZE_2MB - 1))) | |
684 | is_huge_page_opt = false; | |
685 | } | |
686 | ||
687 | if (is_huge_page_opt) { | |
688 | page_size = PAGE_SIZE_2MB; | |
689 | total_npages /= PGS_IN_2MB_PAGE; | |
690 | } | |
691 | ||
692 | page_mask = ~(((u64) page_size) - 1); | |
693 | ||
694 | phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL); | |
695 | if (!phys_pg_pack->pages) { | |
696 | rc = -ENOMEM; | |
697 | goto page_pack_arr_mem_err; | |
698 | } | |
699 | ||
700 | phys_pg_pack->npages = total_npages; | |
701 | phys_pg_pack->page_size = page_size; | |
702 | phys_pg_pack->total_size = total_npages * page_size; | |
703 | ||
704 | j = 0; | |
705 | first = true; | |
706 | for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { | |
707 | npages = get_sg_info(sg, &dma_addr); | |
708 | ||
709 | /* align down to physical page size and save the offset */ | |
710 | if (first) { | |
711 | first = false; | |
712 | phys_pg_pack->offset = dma_addr & (page_size - 1); | |
713 | dma_addr &= page_mask; | |
714 | } | |
715 | ||
716 | while (npages) { | |
717 | phys_pg_pack->pages[j++] = dma_addr; | |
718 | dma_addr += page_size; | |
719 | ||
720 | if (is_huge_page_opt) | |
721 | npages -= PGS_IN_2MB_PAGE; | |
722 | else | |
723 | npages--; | |
724 | } | |
725 | } | |
726 | ||
727 | *pphys_pg_pack = phys_pg_pack; | |
728 | ||
729 | return 0; | |
730 | ||
731 | page_pack_arr_mem_err: | |
732 | kfree(phys_pg_pack); | |
733 | ||
734 | return rc; | |
735 | } | |
736 | ||
737 | /* | |
738 | * map_phys_page_pack - maps the physical page pack | |
739 | * | |
740 | * @ctx : current context | |
741 | * @vaddr : start address of the virtual area to map from | |
742 | * @phys_pg_pack : the pack of physical pages to map to | |
743 | * | |
744 | * This function does the following: | |
745 | * - Maps each chunk of virtual memory to matching physical chunk | |
746 | * - Stores number of successful mappings in the given argument | |
747 | * - Returns 0 on success, error code otherwise. | |
748 | */ | |
749 | static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr, | |
750 | struct hl_vm_phys_pg_pack *phys_pg_pack) | |
751 | { | |
752 | struct hl_device *hdev = ctx->hdev; | |
753 | u64 next_vaddr = vaddr, paddr; | |
754 | u32 page_size = phys_pg_pack->page_size; | |
755 | int i, rc = 0, mapped_pg_cnt = 0; | |
756 | ||
757 | for (i = 0 ; i < phys_pg_pack->npages ; i++) { | |
758 | paddr = phys_pg_pack->pages[i]; | |
759 | ||
760 | /* For accessing the host we need to turn on bit 39 */ | |
761 | if (phys_pg_pack->created_from_userptr) | |
762 | paddr += hdev->asic_prop.host_phys_base_address; | |
763 | ||
764 | rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size); | |
765 | if (rc) { | |
766 | dev_err(hdev->dev, | |
767 | "map failed for handle %u, npages: %d, mapped: %d", | |
768 | phys_pg_pack->handle, phys_pg_pack->npages, | |
769 | mapped_pg_cnt); | |
770 | goto err; | |
771 | } | |
772 | ||
773 | mapped_pg_cnt++; | |
774 | next_vaddr += page_size; | |
775 | } | |
776 | ||
777 | return 0; | |
778 | ||
779 | err: | |
780 | next_vaddr = vaddr; | |
781 | for (i = 0 ; i < mapped_pg_cnt ; i++) { | |
782 | if (hl_mmu_unmap(ctx, next_vaddr, page_size)) | |
783 | dev_warn_ratelimited(hdev->dev, | |
784 | "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n", | |
785 | phys_pg_pack->handle, next_vaddr, | |
786 | phys_pg_pack->pages[i], page_size); | |
787 | ||
788 | next_vaddr += page_size; | |
789 | } | |
790 | ||
791 | return rc; | |
792 | } | |
793 | ||
794 | static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args, | |
795 | u64 *paddr) | |
796 | { | |
797 | struct hl_device *hdev = ctx->hdev; | |
798 | struct hl_vm *vm = &hdev->vm; | |
799 | struct hl_vm_phys_pg_pack *phys_pg_pack; | |
800 | u32 handle; | |
801 | ||
802 | handle = lower_32_bits(args->map_device.handle); | |
803 | spin_lock(&vm->idr_lock); | |
804 | phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); | |
805 | if (!phys_pg_pack) { | |
806 | spin_unlock(&vm->idr_lock); | |
807 | dev_err(hdev->dev, "no match for handle %u\n", handle); | |
808 | return -EINVAL; | |
809 | } | |
810 | ||
811 | *paddr = phys_pg_pack->pages[0]; | |
812 | ||
813 | spin_unlock(&vm->idr_lock); | |
814 | ||
815 | return 0; | |
816 | } | |
817 | ||
818 | /* | |
819 | * map_device_va - map the given memory | |
820 | * | |
821 | * @ctx : current context | |
822 | * @args : host parameters with handle/host virtual address | |
823 | * @device_addr : pointer to result device virtual address | |
824 | * | |
825 | * This function does the following: | |
826 | * - If given a physical device memory handle, map to a device virtual block | |
827 | * and return the start address of this block | |
828 | * - If given a host virtual address and size, find the related physical pages, | |
829 | * map a device virtual block to this pages and return the start address of | |
830 | * this block | |
831 | */ | |
832 | static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, | |
833 | u64 *device_addr) | |
834 | { | |
835 | struct hl_device *hdev = ctx->hdev; | |
836 | struct hl_vm *vm = &hdev->vm; | |
837 | struct hl_vm_phys_pg_pack *phys_pg_pack; | |
838 | struct hl_userptr *userptr = NULL; | |
839 | struct hl_vm_hash_node *hnode; | |
840 | enum vm_type_t *vm_type; | |
841 | u64 ret_vaddr, hint_addr; | |
842 | u32 handle = 0; | |
843 | int rc; | |
844 | bool is_userptr = args->flags & HL_MEM_USERPTR; | |
845 | ||
846 | /* Assume failure */ | |
847 | *device_addr = 0; | |
848 | ||
849 | if (is_userptr) { | |
850 | rc = get_userptr_from_host_va(hdev, args, &userptr); | |
851 | if (rc) { | |
852 | dev_err(hdev->dev, "failed to get userptr from va\n"); | |
853 | return rc; | |
854 | } | |
855 | ||
856 | rc = init_phys_pg_pack_from_userptr(ctx, userptr, | |
857 | &phys_pg_pack); | |
858 | if (rc) { | |
859 | dev_err(hdev->dev, | |
860 | "unable to init page pack for vaddr 0x%llx\n", | |
861 | args->map_host.host_virt_addr); | |
862 | goto init_page_pack_err; | |
863 | } | |
864 | ||
865 | vm_type = (enum vm_type_t *) userptr; | |
866 | hint_addr = args->map_host.hint_addr; | |
867 | } else { | |
868 | handle = lower_32_bits(args->map_device.handle); | |
869 | ||
870 | spin_lock(&vm->idr_lock); | |
871 | phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); | |
872 | if (!phys_pg_pack) { | |
873 | spin_unlock(&vm->idr_lock); | |
874 | dev_err(hdev->dev, | |
875 | "no match for handle %u\n", handle); | |
876 | return -EINVAL; | |
877 | } | |
878 | ||
879 | /* increment now to avoid freeing device memory while mapping */ | |
880 | atomic_inc(&phys_pg_pack->mapping_cnt); | |
881 | ||
882 | spin_unlock(&vm->idr_lock); | |
883 | ||
884 | vm_type = (enum vm_type_t *) phys_pg_pack; | |
885 | ||
886 | hint_addr = args->map_device.hint_addr; | |
887 | } | |
888 | ||
889 | /* | |
890 | * relevant for mapping device physical memory only, as host memory is | |
891 | * implicitly shared | |
892 | */ | |
893 | if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) && | |
894 | phys_pg_pack->asid != ctx->asid) { | |
895 | dev_err(hdev->dev, | |
896 | "Failed to map memory, handle %u is not shared\n", | |
897 | handle); | |
898 | rc = -EPERM; | |
899 | goto shared_err; | |
900 | } | |
901 | ||
902 | hnode = kzalloc(sizeof(*hnode), GFP_KERNEL); | |
903 | if (!hnode) { | |
904 | rc = -ENOMEM; | |
905 | goto hnode_err; | |
906 | } | |
907 | ||
908 | ret_vaddr = get_va_block(hdev, | |
909 | is_userptr ? &ctx->host_va_range : &ctx->dram_va_range, | |
910 | phys_pg_pack->total_size, hint_addr, is_userptr); | |
911 | if (!ret_vaddr) { | |
912 | dev_err(hdev->dev, "no available va block for handle %u\n", | |
913 | handle); | |
914 | rc = -ENOMEM; | |
915 | goto va_block_err; | |
916 | } | |
917 | ||
918 | mutex_lock(&ctx->mmu_lock); | |
919 | ||
920 | rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack); | |
921 | if (rc) { | |
922 | mutex_unlock(&ctx->mmu_lock); | |
923 | dev_err(hdev->dev, "mapping page pack failed for handle %u\n", | |
924 | handle); | |
925 | goto map_err; | |
926 | } | |
927 | ||
928 | hdev->asic_funcs->mmu_invalidate_cache_range(hdev, false, ctx->asid, | |
929 | ret_vaddr, phys_pg_pack->total_size); | |
930 | ||
931 | mutex_unlock(&ctx->mmu_lock); | |
932 | ||
933 | ret_vaddr += phys_pg_pack->offset; | |
934 | ||
935 | hnode->ptr = vm_type; | |
936 | hnode->vaddr = ret_vaddr; | |
937 | ||
938 | mutex_lock(&ctx->mem_hash_lock); | |
939 | hash_add(ctx->mem_hash, &hnode->node, ret_vaddr); | |
940 | mutex_unlock(&ctx->mem_hash_lock); | |
941 | ||
942 | *device_addr = ret_vaddr; | |
943 | ||
944 | if (is_userptr) | |
945 | free_phys_pg_pack(hdev, phys_pg_pack); | |
946 | ||
947 | return 0; | |
948 | ||
949 | map_err: | |
950 | if (add_va_block(hdev, | |
951 | is_userptr ? &ctx->host_va_range : &ctx->dram_va_range, | |
952 | ret_vaddr, | |
953 | ret_vaddr + phys_pg_pack->total_size - 1)) | |
954 | dev_warn(hdev->dev, | |
955 | "release va block failed for handle 0x%x, vaddr: 0x%llx\n", | |
956 | handle, ret_vaddr); | |
957 | ||
958 | va_block_err: | |
959 | kfree(hnode); | |
960 | hnode_err: | |
961 | shared_err: | |
962 | atomic_dec(&phys_pg_pack->mapping_cnt); | |
963 | if (is_userptr) | |
964 | free_phys_pg_pack(hdev, phys_pg_pack); | |
965 | init_page_pack_err: | |
966 | if (is_userptr) | |
967 | free_userptr(hdev, userptr); | |
968 | ||
969 | return rc; | |
970 | } | |
971 | ||
972 | /* | |
973 | * unmap_device_va - unmap the given device virtual address | |
974 | * | |
975 | * @ctx : current context | |
976 | * @vaddr : device virtual address to unmap | |
977 | * | |
978 | * This function does the following: | |
979 | * - Unmap the physical pages related to the given virtual address | |
980 | * - return the device virtual block to the virtual block list | |
981 | */ | |
982 | static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) | |
983 | { | |
984 | struct hl_device *hdev = ctx->hdev; | |
985 | struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; | |
986 | struct hl_vm_hash_node *hnode = NULL; | |
987 | struct hl_userptr *userptr = NULL; | |
988 | enum vm_type_t *vm_type; | |
989 | u64 next_vaddr; | |
990 | u32 page_size; | |
991 | bool is_userptr; | |
992 | int i, rc; | |
993 | ||
994 | /* protect from double entrance */ | |
995 | mutex_lock(&ctx->mem_hash_lock); | |
996 | hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr) | |
997 | if (vaddr == hnode->vaddr) | |
998 | break; | |
999 | ||
1000 | if (!hnode) { | |
1001 | mutex_unlock(&ctx->mem_hash_lock); | |
1002 | dev_err(hdev->dev, | |
1003 | "unmap failed, no mem hnode for vaddr 0x%llx\n", | |
1004 | vaddr); | |
1005 | return -EINVAL; | |
1006 | } | |
1007 | ||
1008 | hash_del(&hnode->node); | |
1009 | mutex_unlock(&ctx->mem_hash_lock); | |
1010 | ||
1011 | vm_type = hnode->ptr; | |
1012 | ||
1013 | if (*vm_type == VM_TYPE_USERPTR) { | |
1014 | is_userptr = true; | |
1015 | userptr = hnode->ptr; | |
1016 | rc = init_phys_pg_pack_from_userptr(ctx, userptr, | |
1017 | &phys_pg_pack); | |
1018 | if (rc) { | |
1019 | dev_err(hdev->dev, | |
1020 | "unable to init page pack for vaddr 0x%llx\n", | |
1021 | vaddr); | |
1022 | goto vm_type_err; | |
1023 | } | |
1024 | } else if (*vm_type == VM_TYPE_PHYS_PACK) { | |
1025 | is_userptr = false; | |
1026 | phys_pg_pack = hnode->ptr; | |
1027 | } else { | |
1028 | dev_warn(hdev->dev, | |
1029 | "unmap failed, unknown vm desc for vaddr 0x%llx\n", | |
1030 | vaddr); | |
1031 | rc = -EFAULT; | |
1032 | goto vm_type_err; | |
1033 | } | |
1034 | ||
1035 | if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) { | |
1036 | dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr); | |
1037 | rc = -EINVAL; | |
1038 | goto mapping_cnt_err; | |
1039 | } | |
1040 | ||
1041 | page_size = phys_pg_pack->page_size; | |
1042 | vaddr &= ~(((u64) page_size) - 1); | |
1043 | ||
1044 | next_vaddr = vaddr; | |
1045 | ||
1046 | mutex_lock(&ctx->mmu_lock); | |
1047 | ||
1048 | for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) | |
1049 | if (hl_mmu_unmap(ctx, next_vaddr, page_size)) | |
1050 | dev_warn_ratelimited(hdev->dev, | |
1051 | "unmap failed for vaddr: 0x%llx\n", next_vaddr); | |
1052 | ||
1053 | hdev->asic_funcs->mmu_invalidate_cache_range(hdev, true, ctx->asid, | |
1054 | vaddr, phys_pg_pack->total_size); | |
1055 | ||
1056 | mutex_unlock(&ctx->mmu_lock); | |
1057 | ||
1058 | if (add_va_block(hdev, | |
1059 | is_userptr ? &ctx->host_va_range : &ctx->dram_va_range, | |
1060 | vaddr, | |
1061 | vaddr + phys_pg_pack->total_size - 1)) | |
1062 | dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n", | |
1063 | vaddr); | |
1064 | ||
1065 | atomic_dec(&phys_pg_pack->mapping_cnt); | |
1066 | kfree(hnode); | |
1067 | ||
1068 | if (is_userptr) { | |
1069 | free_phys_pg_pack(hdev, phys_pg_pack); | |
1070 | free_userptr(hdev, userptr); | |
1071 | } | |
1072 | ||
1073 | return 0; | |
1074 | ||
1075 | mapping_cnt_err: | |
1076 | if (is_userptr) | |
1077 | free_phys_pg_pack(hdev, phys_pg_pack); | |
1078 | vm_type_err: | |
1079 | mutex_lock(&ctx->mem_hash_lock); | |
1080 | hash_add(ctx->mem_hash, &hnode->node, vaddr); | |
1081 | mutex_unlock(&ctx->mem_hash_lock); | |
1082 | ||
1083 | return rc; | |
1084 | } | |
1085 | ||
1086 | int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) | |
1087 | { | |
1088 | union hl_mem_args *args = data; | |
1089 | struct hl_device *hdev = hpriv->hdev; | |
1090 | struct hl_ctx *ctx = hpriv->ctx; | |
1091 | u64 device_addr = 0; | |
1092 | u32 handle = 0; | |
1093 | int rc; | |
1094 | ||
1095 | if (hl_device_disabled_or_in_reset(hdev)) { | |
1096 | dev_warn_ratelimited(hdev->dev, | |
1097 | "Device is disabled or in reset. Can't execute memory IOCTL\n"); | |
1098 | return -EBUSY; | |
1099 | } | |
1100 | ||
1101 | if (hdev->mmu_enable) { | |
1102 | switch (args->in.op) { | |
1103 | case HL_MEM_OP_ALLOC: | |
1104 | if (!hdev->dram_supports_virtual_memory) { | |
1105 | dev_err(hdev->dev, | |
1106 | "DRAM alloc is not supported\n"); | |
1107 | rc = -EINVAL; | |
1108 | goto out; | |
1109 | } | |
1110 | if (args->in.alloc.mem_size == 0) { | |
1111 | dev_err(hdev->dev, | |
1112 | "alloc size must be larger than 0\n"); | |
1113 | rc = -EINVAL; | |
1114 | goto out; | |
1115 | } | |
1116 | rc = alloc_device_memory(ctx, &args->in, &handle); | |
1117 | ||
1118 | memset(args, 0, sizeof(*args)); | |
1119 | args->out.handle = (__u64) handle; | |
1120 | break; | |
1121 | ||
1122 | case HL_MEM_OP_FREE: | |
1123 | if (!hdev->dram_supports_virtual_memory) { | |
1124 | dev_err(hdev->dev, | |
1125 | "DRAM free is not supported\n"); | |
1126 | rc = -EINVAL; | |
1127 | goto out; | |
1128 | } | |
1129 | rc = free_device_memory(ctx, args->in.free.handle); | |
1130 | break; | |
1131 | ||
1132 | case HL_MEM_OP_MAP: | |
1133 | rc = map_device_va(ctx, &args->in, &device_addr); | |
1134 | ||
1135 | memset(args, 0, sizeof(*args)); | |
1136 | args->out.device_virt_addr = device_addr; | |
1137 | break; | |
1138 | ||
1139 | case HL_MEM_OP_UNMAP: | |
1140 | rc = unmap_device_va(ctx, | |
1141 | args->in.unmap.device_virt_addr); | |
1142 | break; | |
1143 | ||
1144 | default: | |
1145 | dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); | |
1146 | rc = -ENOTTY; | |
1147 | break; | |
1148 | } | |
1149 | } else { | |
1150 | switch (args->in.op) { | |
1151 | case HL_MEM_OP_ALLOC: | |
1152 | if (args->in.alloc.mem_size == 0) { | |
1153 | dev_err(hdev->dev, | |
1154 | "alloc size must be larger than 0\n"); | |
1155 | rc = -EINVAL; | |
1156 | goto out; | |
1157 | } | |
1158 | ||
1159 | /* Force contiguous as there are no real MMU | |
1160 | * translations to overcome physical memory gaps | |
1161 | */ | |
1162 | args->in.flags |= HL_MEM_CONTIGUOUS; | |
1163 | rc = alloc_device_memory(ctx, &args->in, &handle); | |
1164 | ||
1165 | memset(args, 0, sizeof(*args)); | |
1166 | args->out.handle = (__u64) handle; | |
1167 | break; | |
1168 | ||
1169 | case HL_MEM_OP_FREE: | |
1170 | rc = free_device_memory(ctx, args->in.free.handle); | |
1171 | break; | |
1172 | ||
1173 | case HL_MEM_OP_MAP: | |
1174 | if (args->in.flags & HL_MEM_USERPTR) { | |
1175 | device_addr = args->in.map_host.host_virt_addr; | |
1176 | rc = 0; | |
1177 | } else { | |
1178 | rc = get_paddr_from_handle(ctx, &args->in, | |
1179 | &device_addr); | |
1180 | } | |
1181 | ||
1182 | memset(args, 0, sizeof(*args)); | |
1183 | args->out.device_virt_addr = device_addr; | |
1184 | break; | |
1185 | ||
1186 | case HL_MEM_OP_UNMAP: | |
1187 | rc = 0; | |
1188 | break; | |
1189 | ||
1190 | default: | |
1191 | dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); | |
1192 | rc = -ENOTTY; | |
1193 | break; | |
1194 | } | |
1195 | } | |
1196 | ||
1197 | out: | |
1198 | return rc; | |
1199 | } | |
eff6f4a0 OG |
1200 | |
1201 | /* | |
1202 | * hl_pin_host_memory - pins a chunk of host memory | |
1203 | * | |
1204 | * @hdev : pointer to the habanalabs device structure | |
1205 | * @addr : the user-space virtual address of the memory area | |
1206 | * @size : the size of the memory area | |
1207 | * @userptr : pointer to hl_userptr structure | |
1208 | * | |
1209 | * This function does the following: | |
1210 | * - Pins the physical pages | |
1211 | * - Create a SG list from those pages | |
1212 | */ | |
1213 | int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u32 size, | |
1214 | struct hl_userptr *userptr) | |
1215 | { | |
1216 | u64 start, end; | |
1217 | u32 npages, offset; | |
1218 | int rc; | |
1219 | ||
1220 | if (!size) { | |
1221 | dev_err(hdev->dev, "size to pin is invalid - %d\n", | |
1222 | size); | |
1223 | return -EINVAL; | |
1224 | } | |
1225 | ||
1226 | if (!access_ok((void __user *) (uintptr_t) addr, size)) { | |
1227 | dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", | |
1228 | addr); | |
1229 | return -EFAULT; | |
1230 | } | |
1231 | ||
1232 | /* | |
1233 | * If the combination of the address and size requested for this memory | |
1234 | * region causes an integer overflow, return error. | |
1235 | */ | |
1236 | if (((addr + size) < addr) || | |
1237 | PAGE_ALIGN(addr + size) < (addr + size)) { | |
1238 | dev_err(hdev->dev, | |
1239 | "user pointer 0x%llx + %u causes integer overflow\n", | |
1240 | addr, size); | |
1241 | return -EINVAL; | |
1242 | } | |
1243 | ||
1244 | start = addr & PAGE_MASK; | |
1245 | offset = addr & ~PAGE_MASK; | |
1246 | end = PAGE_ALIGN(addr + size); | |
1247 | npages = (end - start) >> PAGE_SHIFT; | |
1248 | ||
1249 | userptr->size = size; | |
1250 | userptr->addr = addr; | |
1251 | userptr->dma_mapped = false; | |
1252 | INIT_LIST_HEAD(&userptr->job_node); | |
1253 | ||
1254 | userptr->vec = frame_vector_create(npages); | |
1255 | if (!userptr->vec) { | |
1256 | dev_err(hdev->dev, "Failed to create frame vector\n"); | |
1257 | return -ENOMEM; | |
1258 | } | |
1259 | ||
1260 | rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, | |
1261 | userptr->vec); | |
1262 | ||
1263 | if (rc != npages) { | |
1264 | dev_err(hdev->dev, | |
1265 | "Failed to map host memory, user ptr probably wrong\n"); | |
1266 | if (rc < 0) | |
1267 | goto destroy_framevec; | |
1268 | rc = -EFAULT; | |
1269 | goto put_framevec; | |
1270 | } | |
1271 | ||
1272 | if (frame_vector_to_pages(userptr->vec) < 0) { | |
1273 | dev_err(hdev->dev, | |
1274 | "Failed to translate frame vector to pages\n"); | |
1275 | rc = -EFAULT; | |
1276 | goto put_framevec; | |
1277 | } | |
1278 | ||
1279 | userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC); | |
1280 | if (!userptr->sgt) { | |
1281 | rc = -ENOMEM; | |
1282 | goto put_framevec; | |
1283 | } | |
1284 | ||
1285 | rc = sg_alloc_table_from_pages(userptr->sgt, | |
1286 | frame_vector_pages(userptr->vec), | |
1287 | npages, offset, size, GFP_ATOMIC); | |
1288 | if (rc < 0) { | |
1289 | dev_err(hdev->dev, "failed to create SG table from pages\n"); | |
1290 | goto free_sgt; | |
1291 | } | |
1292 | ||
1293 | return 0; | |
1294 | ||
1295 | free_sgt: | |
1296 | kfree(userptr->sgt); | |
1297 | put_framevec: | |
1298 | put_vaddr_frames(userptr->vec); | |
1299 | destroy_framevec: | |
1300 | frame_vector_destroy(userptr->vec); | |
1301 | return rc; | |
1302 | } | |
1303 | ||
1304 | /* | |
1305 | * hl_unpin_host_memory - unpins a chunk of host memory | |
1306 | * | |
1307 | * @hdev : pointer to the habanalabs device structure | |
1308 | * @userptr : pointer to hl_userptr structure | |
1309 | * | |
1310 | * This function does the following: | |
1311 | * - Unpins the physical pages related to the host memory | |
1312 | * - Free the SG list | |
1313 | */ | |
1314 | int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr) | |
1315 | { | |
1316 | struct page **pages; | |
1317 | ||
1318 | if (userptr->dma_mapped) | |
1319 | hdev->asic_funcs->hl_dma_unmap_sg(hdev, | |
1320 | userptr->sgt->sgl, | |
1321 | userptr->sgt->nents, | |
1322 | userptr->dir); | |
1323 | ||
1324 | pages = frame_vector_pages(userptr->vec); | |
1325 | if (!IS_ERR(pages)) { | |
1326 | int i; | |
1327 | ||
1328 | for (i = 0; i < frame_vector_count(userptr->vec); i++) | |
1329 | set_page_dirty_lock(pages[i]); | |
1330 | } | |
1331 | put_vaddr_frames(userptr->vec); | |
1332 | frame_vector_destroy(userptr->vec); | |
1333 | ||
1334 | list_del(&userptr->job_node); | |
1335 | ||
1336 | sg_free_table(userptr->sgt); | |
1337 | kfree(userptr->sgt); | |
1338 | ||
1339 | return 0; | |
1340 | } | |
1341 | ||
1342 | /* | |
1343 | * hl_userptr_delete_list - clear userptr list | |
1344 | * | |
1345 | * @hdev : pointer to the habanalabs device structure | |
1346 | * @userptr_list : pointer to the list to clear | |
1347 | * | |
1348 | * This function does the following: | |
1349 | * - Iterates over the list and unpins the host memory and frees the userptr | |
1350 | * structure. | |
1351 | */ | |
1352 | void hl_userptr_delete_list(struct hl_device *hdev, | |
1353 | struct list_head *userptr_list) | |
1354 | { | |
1355 | struct hl_userptr *userptr, *tmp; | |
1356 | ||
1357 | list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) { | |
1358 | hl_unpin_host_memory(hdev, userptr); | |
1359 | kfree(userptr); | |
1360 | } | |
1361 | ||
1362 | INIT_LIST_HEAD(userptr_list); | |
1363 | } | |
1364 | ||
1365 | /* | |
1366 | * hl_userptr_is_pinned - returns whether the given userptr is pinned | |
1367 | * | |
1368 | * @hdev : pointer to the habanalabs device structure | |
1369 | * @userptr_list : pointer to the list to clear | |
1370 | * @userptr : pointer to userptr to check | |
1371 | * | |
1372 | * This function does the following: | |
1373 | * - Iterates over the list and checks if the given userptr is in it, means is | |
1374 | * pinned. If so, returns true, otherwise returns false. | |
1375 | */ | |
1376 | bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, | |
1377 | u32 size, struct list_head *userptr_list, | |
1378 | struct hl_userptr **userptr) | |
1379 | { | |
1380 | list_for_each_entry((*userptr), userptr_list, job_node) { | |
1381 | if ((addr == (*userptr)->addr) && (size == (*userptr)->size)) | |
1382 | return true; | |
1383 | } | |
1384 | ||
1385 | return false; | |
1386 | } | |
0feaf86d OS |
1387 | |
1388 | /* | |
1389 | * hl_va_range_init - initialize virtual addresses range | |
1390 | * | |
1391 | * @hdev : pointer to the habanalabs device structure | |
1392 | * @va_range : pointer to the range to initialize | |
1393 | * @start : range start address | |
1394 | * @end : range end address | |
1395 | * | |
1396 | * This function does the following: | |
1397 | * - Initializes the virtual addresses list of the given range with the given | |
1398 | * addresses. | |
1399 | */ | |
1400 | static int hl_va_range_init(struct hl_device *hdev, | |
1401 | struct hl_va_range *va_range, u64 start, u64 end) | |
1402 | { | |
1403 | int rc; | |
1404 | ||
1405 | INIT_LIST_HEAD(&va_range->list); | |
1406 | ||
1407 | /* PAGE_SIZE alignment */ | |
1408 | ||
1409 | if (start & (PAGE_SIZE - 1)) { | |
1410 | start &= PAGE_MASK; | |
1411 | start += PAGE_SIZE; | |
1412 | } | |
1413 | ||
1414 | if (end & (PAGE_SIZE - 1)) | |
1415 | end &= PAGE_MASK; | |
1416 | ||
1417 | if (start >= end) { | |
1418 | dev_err(hdev->dev, "too small vm range for va list\n"); | |
1419 | return -EFAULT; | |
1420 | } | |
1421 | ||
1422 | rc = add_va_block(hdev, va_range, start, end); | |
1423 | ||
1424 | if (rc) { | |
1425 | dev_err(hdev->dev, "Failed to init host va list\n"); | |
1426 | return rc; | |
1427 | } | |
1428 | ||
1429 | va_range->start_addr = start; | |
1430 | va_range->end_addr = end; | |
1431 | ||
1432 | return 0; | |
1433 | } | |
1434 | ||
1435 | /* | |
1436 | * hl_vm_ctx_init_with_ranges - initialize virtual memory for context | |
1437 | * | |
1438 | * @ctx : pointer to the habanalabs context structure | |
1439 | * @host_range_start : host virtual addresses range start | |
1440 | * @host_range_end : host virtual addresses range end | |
1441 | * @dram_range_start : dram virtual addresses range start | |
1442 | * @dram_range_end : dram virtual addresses range end | |
1443 | * | |
1444 | * This function initializes the following: | |
1445 | * - MMU for context | |
1446 | * - Virtual address to area descriptor hashtable | |
1447 | * - Virtual block list of available virtual memory | |
1448 | */ | |
1449 | int hl_vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start, | |
1450 | u64 host_range_end, u64 dram_range_start, | |
1451 | u64 dram_range_end) | |
1452 | { | |
1453 | struct hl_device *hdev = ctx->hdev; | |
1454 | int rc; | |
1455 | ||
1456 | hl_mmu_ctx_init(ctx); | |
1457 | ||
1458 | mutex_init(&ctx->mem_hash_lock); | |
1459 | hash_init(ctx->mem_hash); | |
1460 | ||
1461 | mutex_init(&ctx->host_va_range.lock); | |
1462 | ||
1463 | rc = hl_va_range_init(hdev, &ctx->host_va_range, host_range_start, | |
1464 | host_range_end); | |
1465 | if (rc) { | |
1466 | dev_err(hdev->dev, "failed to init host vm range\n"); | |
1467 | goto host_vm_err; | |
1468 | } | |
1469 | ||
1470 | mutex_init(&ctx->dram_va_range.lock); | |
1471 | ||
1472 | rc = hl_va_range_init(hdev, &ctx->dram_va_range, dram_range_start, | |
1473 | dram_range_end); | |
1474 | if (rc) { | |
1475 | dev_err(hdev->dev, "failed to init dram vm range\n"); | |
1476 | goto dram_vm_err; | |
1477 | } | |
1478 | ||
1479 | return 0; | |
1480 | ||
1481 | dram_vm_err: | |
1482 | mutex_destroy(&ctx->dram_va_range.lock); | |
1483 | ||
1484 | mutex_lock(&ctx->host_va_range.lock); | |
1485 | clear_va_list_locked(hdev, &ctx->host_va_range.list); | |
1486 | mutex_unlock(&ctx->host_va_range.lock); | |
1487 | host_vm_err: | |
1488 | mutex_destroy(&ctx->host_va_range.lock); | |
1489 | mutex_destroy(&ctx->mem_hash_lock); | |
1490 | hl_mmu_ctx_fini(ctx); | |
1491 | ||
1492 | return rc; | |
1493 | } | |
1494 | ||
1495 | int hl_vm_ctx_init(struct hl_ctx *ctx) | |
1496 | { | |
1497 | struct asic_fixed_properties *prop = &ctx->hdev->asic_prop; | |
1498 | u64 host_range_start, host_range_end, dram_range_start, | |
1499 | dram_range_end; | |
1500 | ||
1501 | atomic64_set(&ctx->dram_phys_mem, 0); | |
1502 | ||
1503 | /* | |
1504 | * - If MMU is enabled, init the ranges as usual. | |
1505 | * - If MMU is disabled, in case of host mapping, the returned address | |
1506 | * is the given one. | |
1507 | * In case of DRAM mapping, the returned address is the physical | |
1508 | * address of the memory related to the given handle. | |
1509 | */ | |
1510 | if (ctx->hdev->mmu_enable) { | |
1511 | dram_range_start = prop->va_space_dram_start_address; | |
1512 | dram_range_end = prop->va_space_dram_end_address; | |
1513 | host_range_start = prop->va_space_host_start_address; | |
1514 | host_range_end = prop->va_space_host_end_address; | |
1515 | } else { | |
1516 | dram_range_start = prop->dram_user_base_address; | |
1517 | dram_range_end = prop->dram_end_address; | |
1518 | host_range_start = prop->dram_user_base_address; | |
1519 | host_range_end = prop->dram_end_address; | |
1520 | } | |
1521 | ||
1522 | return hl_vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end, | |
1523 | dram_range_start, dram_range_end); | |
1524 | } | |
1525 | ||
1526 | /* | |
1527 | * hl_va_range_fini - clear a virtual addresses range | |
1528 | * | |
1529 | * @hdev : pointer to the habanalabs structure | |
1530 | * va_range : pointer to virtual addresses range | |
1531 | * | |
1532 | * This function initializes the following: | |
1533 | * - Checks that the given range contains the whole initial range | |
1534 | * - Frees the virtual addresses block list and its lock | |
1535 | */ | |
1536 | static void hl_va_range_fini(struct hl_device *hdev, | |
1537 | struct hl_va_range *va_range) | |
1538 | { | |
1539 | struct hl_vm_va_block *va_block; | |
1540 | ||
1541 | if (list_empty(&va_range->list)) { | |
1542 | dev_warn(hdev->dev, | |
1543 | "va list should not be empty on cleanup!\n"); | |
1544 | goto out; | |
1545 | } | |
1546 | ||
1547 | if (!list_is_singular(&va_range->list)) { | |
1548 | dev_warn(hdev->dev, | |
1549 | "va list should not contain multiple blocks on cleanup!\n"); | |
1550 | goto free_va_list; | |
1551 | } | |
1552 | ||
1553 | va_block = list_first_entry(&va_range->list, typeof(*va_block), node); | |
1554 | ||
1555 | if (va_block->start != va_range->start_addr || | |
1556 | va_block->end != va_range->end_addr) { | |
1557 | dev_warn(hdev->dev, | |
1558 | "wrong va block on cleanup, from 0x%llx to 0x%llx\n", | |
1559 | va_block->start, va_block->end); | |
1560 | goto free_va_list; | |
1561 | } | |
1562 | ||
1563 | free_va_list: | |
1564 | mutex_lock(&va_range->lock); | |
1565 | clear_va_list_locked(hdev, &va_range->list); | |
1566 | mutex_unlock(&va_range->lock); | |
1567 | ||
1568 | out: | |
1569 | mutex_destroy(&va_range->lock); | |
1570 | } | |
1571 | ||
1572 | /* | |
1573 | * hl_vm_ctx_fini - virtual memory teardown of context | |
1574 | * | |
1575 | * @ctx : pointer to the habanalabs context structure | |
1576 | * | |
1577 | * This function perform teardown the following: | |
1578 | * - Virtual block list of available virtual memory | |
1579 | * - Virtual address to area descriptor hashtable | |
1580 | * - MMU for context | |
1581 | * | |
1582 | * In addition this function does the following: | |
1583 | * - Unmaps the existing hashtable nodes if the hashtable is not empty. The | |
1584 | * hashtable should be empty as no valid mappings should exist at this | |
1585 | * point. | |
1586 | * - Frees any existing physical page list from the idr which relates to the | |
1587 | * current context asid. | |
1588 | * - This function checks the virtual block list for correctness. At this point | |
1589 | * the list should contain one element which describes the whole virtual | |
1590 | * memory range of the context. Otherwise, a warning is printed. | |
1591 | */ | |
1592 | void hl_vm_ctx_fini(struct hl_ctx *ctx) | |
1593 | { | |
1594 | struct hl_device *hdev = ctx->hdev; | |
1595 | struct hl_vm *vm = &hdev->vm; | |
1596 | struct hl_vm_phys_pg_pack *phys_pg_list; | |
1597 | struct hl_vm_hash_node *hnode; | |
1598 | struct hlist_node *tmp_node; | |
1599 | int i; | |
1600 | ||
1601 | if (!hash_empty(ctx->mem_hash)) | |
1602 | dev_notice(hdev->dev, "ctx is freed while it has va in use\n"); | |
1603 | ||
1604 | hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) { | |
1605 | dev_dbg(hdev->dev, | |
1606 | "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n", | |
1607 | hnode->vaddr, ctx->asid); | |
1608 | unmap_device_va(ctx, hnode->vaddr); | |
1609 | } | |
1610 | ||
1611 | spin_lock(&vm->idr_lock); | |
1612 | idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i) | |
1613 | if (phys_pg_list->asid == ctx->asid) { | |
1614 | dev_dbg(hdev->dev, | |
1615 | "page list 0x%p of asid %d is still alive\n", | |
1616 | phys_pg_list, ctx->asid); | |
1617 | free_phys_pg_pack(hdev, phys_pg_list); | |
1618 | idr_remove(&vm->phys_pg_pack_handles, i); | |
1619 | } | |
1620 | spin_unlock(&vm->idr_lock); | |
1621 | ||
1622 | hl_va_range_fini(hdev, &ctx->dram_va_range); | |
1623 | hl_va_range_fini(hdev, &ctx->host_va_range); | |
1624 | ||
1625 | mutex_destroy(&ctx->mem_hash_lock); | |
1626 | hl_mmu_ctx_fini(ctx); | |
1627 | } | |
1628 | ||
1629 | /* | |
1630 | * hl_vm_init - initialize virtual memory module | |
1631 | * | |
1632 | * @hdev : pointer to the habanalabs device structure | |
1633 | * | |
1634 | * This function initializes the following: | |
1635 | * - MMU module | |
1636 | * - DRAM physical pages pool of 2MB | |
1637 | * - Idr for device memory allocation handles | |
1638 | */ | |
1639 | int hl_vm_init(struct hl_device *hdev) | |
1640 | { | |
1641 | struct asic_fixed_properties *prop = &hdev->asic_prop; | |
1642 | struct hl_vm *vm = &hdev->vm; | |
1643 | int rc; | |
1644 | ||
1645 | rc = hl_mmu_init(hdev); | |
1646 | if (rc) { | |
1647 | dev_err(hdev->dev, "Failed to init MMU\n"); | |
1648 | return rc; | |
1649 | } | |
1650 | ||
1651 | vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1); | |
1652 | if (!vm->dram_pg_pool) { | |
1653 | dev_err(hdev->dev, "Failed to create dram page pool\n"); | |
1654 | rc = -ENOMEM; | |
1655 | goto pool_create_err; | |
1656 | } | |
1657 | ||
1658 | kref_init(&vm->dram_pg_pool_refcount); | |
1659 | ||
1660 | rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address, | |
1661 | prop->dram_end_address - prop->dram_user_base_address, | |
1662 | -1); | |
1663 | ||
1664 | if (rc) { | |
1665 | dev_err(hdev->dev, | |
1666 | "Failed to add memory to dram page pool %d\n", rc); | |
1667 | goto pool_add_err; | |
1668 | } | |
1669 | ||
1670 | spin_lock_init(&vm->idr_lock); | |
1671 | idr_init(&vm->phys_pg_pack_handles); | |
1672 | ||
1673 | atomic64_set(&hdev->dram_used_mem, 0); | |
1674 | ||
1675 | vm->init_done = true; | |
1676 | ||
1677 | return 0; | |
1678 | ||
1679 | pool_add_err: | |
1680 | gen_pool_destroy(vm->dram_pg_pool); | |
1681 | pool_create_err: | |
1682 | hl_mmu_fini(hdev); | |
1683 | ||
1684 | return rc; | |
1685 | } | |
1686 | ||
1687 | /* | |
1688 | * hl_vm_fini - virtual memory module teardown | |
1689 | * | |
1690 | * @hdev : pointer to the habanalabs device structure | |
1691 | * | |
1692 | * This function perform teardown to the following: | |
1693 | * - Idr for device memory allocation handles | |
1694 | * - DRAM physical pages pool of 2MB | |
1695 | * - MMU module | |
1696 | */ | |
1697 | void hl_vm_fini(struct hl_device *hdev) | |
1698 | { | |
1699 | struct hl_vm *vm = &hdev->vm; | |
1700 | ||
1701 | if (!vm->init_done) | |
1702 | return; | |
1703 | ||
1704 | /* | |
1705 | * At this point all the contexts should be freed and hence no DRAM | |
1706 | * memory should be in use. Hence the DRAM pool should be freed here. | |
1707 | */ | |
1708 | if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1) | |
1709 | dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n", | |
1710 | __func__); | |
1711 | ||
1712 | hl_mmu_fini(hdev); | |
1713 | ||
1714 | vm->init_done = false; | |
1715 | } |