mm: replace vma->vm_flags direct modifications with modifier calls
[linux-block.git] / drivers / misc / habanalabs / common / memory.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 #include "../include/hw_ip/mmu/mmu_general.h"
11
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/pci-p2pdma.h>
16
17 MODULE_IMPORT_NS(DMA_BUF);
18
19 #define HL_MMU_DEBUG    0
20
21 /* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
22 #define DRAM_POOL_PAGE_SIZE SZ_8M
23
24 static int allocate_timestamps_buffers(struct hl_fpriv *hpriv,
25                         struct hl_mem_in *args, u64 *handle);
26
27 static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u32 *page_size)
28 {
29         struct asic_fixed_properties *prop = &hdev->asic_prop;
30         u64 psize;
31
32         /*
33          * for ASIC that supports setting the allocation page size by user we will address
34          * user's choice only if it is not 0 (as 0 means taking the default page size)
35          */
36         if (prop->supports_user_set_page_size && args->alloc.page_size) {
37                 psize = args->alloc.page_size;
38
39                 if (!is_power_of_2(psize)) {
40                         dev_err(hdev->dev, "user page size (%#llx) is not power of 2\n", psize);
41                         return -EINVAL;
42                 }
43         } else {
44                 psize = prop->device_mem_alloc_default_page_size;
45         }
46
47         *page_size = psize;
48
49         return 0;
50 }
51
52 /*
53  * The va ranges in context object contain a list with the available chunks of
54  * device virtual memory.
55  * There is one range for host allocations and one for DRAM allocations.
56  *
57  * On initialization each range contains one chunk of all of its available
58  * virtual range which is a half of the total device virtual range.
59  *
60  * On each mapping of physical pages, a suitable virtual range chunk (with a
61  * minimum size) is selected from the list. If the chunk size equals the
62  * requested size, the chunk is returned. Otherwise, the chunk is split into
63  * two chunks - one to return as result and a remainder to stay in the list.
64  *
65  * On each Unmapping of a virtual address, the relevant virtual chunk is
66  * returned to the list. The chunk is added to the list and if its edges match
67  * the edges of the adjacent chunks (means a contiguous chunk can be created),
68  * the chunks are merged.
69  *
70  * On finish, the list is checked to have only one chunk of all the relevant
71  * virtual range (which is a half of the device total virtual range).
72  * If not (means not all mappings were unmapped), a warning is printed.
73  */
74
75 /*
76  * alloc_device_memory() - allocate device memory.
77  * @ctx: pointer to the context structure.
78  * @args: host parameters containing the requested size.
79  * @ret_handle: result handle.
80  *
81  * This function does the following:
82  * - Allocate the requested size rounded up to 'dram_page_size' pages.
83  * - Return unique handle for later map/unmap/free.
84  */
85 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
86                                 u32 *ret_handle)
87 {
88         struct hl_device *hdev = ctx->hdev;
89         struct hl_vm *vm = &hdev->vm;
90         struct hl_vm_phys_pg_pack *phys_pg_pack;
91         u64 paddr = 0, total_size, num_pgs, i;
92         u32 num_curr_pgs, page_size;
93         bool contiguous;
94         int handle, rc;
95
96         num_curr_pgs = 0;
97
98         rc = set_alloc_page_size(hdev, args, &page_size);
99         if (rc)
100                 return rc;
101
102         num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size);
103         total_size = num_pgs * page_size;
104
105         if (!total_size) {
106                 dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
107                 return -EINVAL;
108         }
109
110         contiguous = args->flags & HL_MEM_CONTIGUOUS;
111
112         if (contiguous) {
113                 if (is_power_of_2(page_size))
114                         paddr = (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
115                                                                      total_size, NULL, page_size);
116                 else
117                         paddr = gen_pool_alloc(vm->dram_pg_pool, total_size);
118                 if (!paddr) {
119                         dev_err(hdev->dev,
120                                 "Cannot allocate %llu contiguous pages with total size of %llu\n",
121                                 num_pgs, total_size);
122                         return -ENOMEM;
123                 }
124         }
125
126         phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
127         if (!phys_pg_pack) {
128                 rc = -ENOMEM;
129                 goto pages_pack_err;
130         }
131
132         phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
133         phys_pg_pack->asid = ctx->asid;
134         phys_pg_pack->npages = num_pgs;
135         phys_pg_pack->page_size = page_size;
136         phys_pg_pack->total_size = total_size;
137         phys_pg_pack->flags = args->flags;
138         phys_pg_pack->contiguous = contiguous;
139
140         phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
141         if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
142                 rc = -ENOMEM;
143                 goto pages_arr_err;
144         }
145
146         if (phys_pg_pack->contiguous) {
147                 for (i = 0 ; i < num_pgs ; i++)
148                         phys_pg_pack->pages[i] = paddr + i * page_size;
149         } else {
150                 for (i = 0 ; i < num_pgs ; i++) {
151                         if (is_power_of_2(page_size))
152                                 phys_pg_pack->pages[i] =
153                                         (uintptr_t)gen_pool_dma_alloc_align(vm->dram_pg_pool,
154                                                                             page_size, NULL,
155                                                                             page_size);
156                         else
157                                 phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool,
158                                                                         page_size);
159
160                         if (!phys_pg_pack->pages[i]) {
161                                 dev_err(hdev->dev,
162                                         "Cannot allocate device memory (out of memory)\n");
163                                 rc = -ENOMEM;
164                                 goto page_err;
165                         }
166
167                         num_curr_pgs++;
168                 }
169         }
170
171         spin_lock(&vm->idr_lock);
172         handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
173                                 GFP_ATOMIC);
174         spin_unlock(&vm->idr_lock);
175
176         if (handle < 0) {
177                 dev_err(hdev->dev, "Failed to get handle for page\n");
178                 rc = -EFAULT;
179                 goto idr_err;
180         }
181
182         for (i = 0 ; i < num_pgs ; i++)
183                 kref_get(&vm->dram_pg_pool_refcount);
184
185         phys_pg_pack->handle = handle;
186
187         atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
188         atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
189
190         *ret_handle = handle;
191
192         return 0;
193
194 idr_err:
195 page_err:
196         if (!phys_pg_pack->contiguous)
197                 for (i = 0 ; i < num_curr_pgs ; i++)
198                         gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
199                                         page_size);
200
201         kvfree(phys_pg_pack->pages);
202 pages_arr_err:
203         kfree(phys_pg_pack);
204 pages_pack_err:
205         if (contiguous)
206                 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
207
208         return rc;
209 }
210
211 /**
212  * dma_map_host_va() - DMA mapping of the given host virtual address.
213  * @hdev: habanalabs device structure.
214  * @addr: the host virtual address of the memory area.
215  * @size: the size of the memory area.
216  * @p_userptr: pointer to result userptr structure.
217  *
218  * This function does the following:
219  * - Allocate userptr structure.
220  * - Pin the given host memory using the userptr structure.
221  * - Perform DMA mapping to have the DMA addresses of the pages.
222  */
223 static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
224                                 struct hl_userptr **p_userptr)
225 {
226         struct hl_userptr *userptr;
227         int rc;
228
229         userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
230         if (!userptr) {
231                 rc = -ENOMEM;
232                 goto userptr_err;
233         }
234
235         rc = hl_pin_host_memory(hdev, addr, size, userptr);
236         if (rc) {
237                 dev_err(hdev->dev, "Failed to pin host memory\n");
238                 goto pin_err;
239         }
240
241         userptr->dma_mapped = true;
242         userptr->dir = DMA_BIDIRECTIONAL;
243         userptr->vm_type = VM_TYPE_USERPTR;
244
245         *p_userptr = userptr;
246
247         rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
248         if (rc) {
249                 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
250                 goto dma_map_err;
251         }
252
253         return 0;
254
255 dma_map_err:
256         hl_unpin_host_memory(hdev, userptr);
257 pin_err:
258         kfree(userptr);
259 userptr_err:
260
261         return rc;
262 }
263
264 /**
265  * dma_unmap_host_va() - DMA unmapping of the given host virtual address.
266  * @hdev: habanalabs device structure.
267  * @userptr: userptr to free.
268  *
269  * This function does the following:
270  * - Unpins the physical pages.
271  * - Frees the userptr structure.
272  */
273 static void dma_unmap_host_va(struct hl_device *hdev,
274                                 struct hl_userptr *userptr)
275 {
276         hl_unpin_host_memory(hdev, userptr);
277         kfree(userptr);
278 }
279
280 /**
281  * dram_pg_pool_do_release() - free DRAM pages pool
282  * @ref: pointer to reference object.
283  *
284  * This function does the following:
285  * - Frees the idr structure of physical pages handles.
286  * - Frees the generic pool of DRAM physical pages.
287  */
288 static void dram_pg_pool_do_release(struct kref *ref)
289 {
290         struct hl_vm *vm = container_of(ref, struct hl_vm,
291                         dram_pg_pool_refcount);
292
293         /*
294          * free the idr here as only here we know for sure that there are no
295          * allocated physical pages and hence there are no handles in use
296          */
297         idr_destroy(&vm->phys_pg_pack_handles);
298         gen_pool_destroy(vm->dram_pg_pool);
299 }
300
301 /**
302  * free_phys_pg_pack() - free physical page pack.
303  * @hdev: habanalabs device structure.
304  * @phys_pg_pack: physical page pack to free.
305  *
306  * This function does the following:
307  * - For DRAM memory only
308  *   - iterate over the pack, free each physical block structure by
309  *     returning it to the general pool.
310  * - Free the hl_vm_phys_pg_pack structure.
311  */
312 static void free_phys_pg_pack(struct hl_device *hdev,
313                                 struct hl_vm_phys_pg_pack *phys_pg_pack)
314 {
315         struct hl_vm *vm = &hdev->vm;
316         u64 i;
317
318         if (phys_pg_pack->created_from_userptr)
319                 goto end;
320
321         if (phys_pg_pack->contiguous) {
322                 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
323                         phys_pg_pack->total_size);
324
325                 for (i = 0; i < phys_pg_pack->npages ; i++)
326                         kref_put(&vm->dram_pg_pool_refcount,
327                                 dram_pg_pool_do_release);
328         } else {
329                 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
330                         gen_pool_free(vm->dram_pg_pool,
331                                 phys_pg_pack->pages[i],
332                                 phys_pg_pack->page_size);
333                         kref_put(&vm->dram_pg_pool_refcount,
334                                 dram_pg_pool_do_release);
335                 }
336         }
337
338 end:
339         kvfree(phys_pg_pack->pages);
340         kfree(phys_pg_pack);
341
342         return;
343 }
344
345 /**
346  * free_device_memory() - free device memory.
347  * @ctx: pointer to the context structure.
348  * @args: host parameters containing the requested size.
349  *
350  * This function does the following:
351  * - Free the device memory related to the given handle.
352  */
353 static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
354 {
355         struct hl_device *hdev = ctx->hdev;
356         struct hl_vm *vm = &hdev->vm;
357         struct hl_vm_phys_pg_pack *phys_pg_pack;
358         u32 handle = args->free.handle;
359
360         spin_lock(&vm->idr_lock);
361         phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
362         if (!phys_pg_pack) {
363                 spin_unlock(&vm->idr_lock);
364                 dev_err(hdev->dev, "free device memory failed, no match for handle %u\n", handle);
365                 return -EINVAL;
366         }
367
368         if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
369                 spin_unlock(&vm->idr_lock);
370                 dev_err(hdev->dev, "handle %u is mapped, cannot free\n", handle);
371                 return -EINVAL;
372         }
373
374         if (phys_pg_pack->exporting_cnt) {
375                 spin_unlock(&vm->idr_lock);
376                 dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle);
377                 return -EINVAL;
378         }
379
380         /* must remove from idr before the freeing of the physical pages as the refcount of the pool
381          * is also the trigger of the idr destroy
382          */
383         idr_remove(&vm->phys_pg_pack_handles, handle);
384         spin_unlock(&vm->idr_lock);
385
386         atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
387         atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
388
389         free_phys_pg_pack(hdev, phys_pg_pack);
390
391         return 0;
392 }
393
394 /**
395  * clear_va_list_locked() - free virtual addresses list.
396  * @hdev: habanalabs device structure.
397  * @va_list: list of virtual addresses to free.
398  *
399  * This function does the following:
400  * - Iterate over the list and free each virtual addresses block.
401  *
402  * This function should be called only when va_list lock is taken.
403  */
404 static void clear_va_list_locked(struct hl_device *hdev,
405                 struct list_head *va_list)
406 {
407         struct hl_vm_va_block *va_block, *tmp;
408
409         list_for_each_entry_safe(va_block, tmp, va_list, node) {
410                 list_del(&va_block->node);
411                 kfree(va_block);
412         }
413 }
414
415 /**
416  * print_va_list_locked() - print virtual addresses list.
417  * @hdev: habanalabs device structure.
418  * @va_list: list of virtual addresses to print.
419  *
420  * This function does the following:
421  * - Iterate over the list and print each virtual addresses block.
422  *
423  * This function should be called only when va_list lock is taken.
424  */
425 static void print_va_list_locked(struct hl_device *hdev,
426                 struct list_head *va_list)
427 {
428 #if HL_MMU_DEBUG
429         struct hl_vm_va_block *va_block;
430
431         dev_dbg(hdev->dev, "print va list:\n");
432
433         list_for_each_entry(va_block, va_list, node)
434                 dev_dbg(hdev->dev,
435                         "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
436                         va_block->start, va_block->end, va_block->size);
437 #endif
438 }
439
440 /**
441  * merge_va_blocks_locked() - merge a virtual block if possible.
442  * @hdev: pointer to the habanalabs device structure.
443  * @va_list: pointer to the virtual addresses block list.
444  * @va_block: virtual block to merge with adjacent blocks.
445  *
446  * This function does the following:
447  * - Merge the given blocks with the adjacent blocks if their virtual ranges
448  *   create a contiguous virtual range.
449  *
450  * This Function should be called only when va_list lock is taken.
451  */
452 static void merge_va_blocks_locked(struct hl_device *hdev,
453                 struct list_head *va_list, struct hl_vm_va_block *va_block)
454 {
455         struct hl_vm_va_block *prev, *next;
456
457         prev = list_prev_entry(va_block, node);
458         if (&prev->node != va_list && prev->end + 1 == va_block->start) {
459                 prev->end = va_block->end;
460                 prev->size = prev->end - prev->start + 1;
461                 list_del(&va_block->node);
462                 kfree(va_block);
463                 va_block = prev;
464         }
465
466         next = list_next_entry(va_block, node);
467         if (&next->node != va_list && va_block->end + 1 == next->start) {
468                 next->start = va_block->start;
469                 next->size = next->end - next->start + 1;
470                 list_del(&va_block->node);
471                 kfree(va_block);
472         }
473 }
474
475 /**
476  * add_va_block_locked() - add a virtual block to the virtual addresses list.
477  * @hdev: pointer to the habanalabs device structure.
478  * @va_list: pointer to the virtual addresses block list.
479  * @start: start virtual address.
480  * @end: end virtual address.
481  *
482  * This function does the following:
483  * - Add the given block to the virtual blocks list and merge with other blocks
484  *   if a contiguous virtual block can be created.
485  *
486  * This Function should be called only when va_list lock is taken.
487  */
488 static int add_va_block_locked(struct hl_device *hdev,
489                 struct list_head *va_list, u64 start, u64 end)
490 {
491         struct hl_vm_va_block *va_block, *res = NULL;
492         u64 size = end - start + 1;
493
494         print_va_list_locked(hdev, va_list);
495
496         list_for_each_entry(va_block, va_list, node) {
497                 /* TODO: remove upon matureness */
498                 if (hl_mem_area_crosses_range(start, size, va_block->start,
499                                 va_block->end)) {
500                         dev_err(hdev->dev,
501                                 "block crossing ranges at start 0x%llx, end 0x%llx\n",
502                                 va_block->start, va_block->end);
503                         return -EINVAL;
504                 }
505
506                 if (va_block->end < start)
507                         res = va_block;
508         }
509
510         va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
511         if (!va_block)
512                 return -ENOMEM;
513
514         va_block->start = start;
515         va_block->end = end;
516         va_block->size = size;
517
518         if (!res)
519                 list_add(&va_block->node, va_list);
520         else
521                 list_add(&va_block->node, &res->node);
522
523         merge_va_blocks_locked(hdev, va_list, va_block);
524
525         print_va_list_locked(hdev, va_list);
526
527         return 0;
528 }
529
530 /**
531  * add_va_block() - wrapper for add_va_block_locked.
532  * @hdev: pointer to the habanalabs device structure.
533  * @va_range: pointer to the virtual addresses range object.
534  * @start: start virtual address.
535  * @end: end virtual address.
536  *
537  * This function does the following:
538  * - Takes the list lock and calls add_va_block_locked.
539  */
540 static inline int add_va_block(struct hl_device *hdev,
541                 struct hl_va_range *va_range, u64 start, u64 end)
542 {
543         int rc;
544
545         mutex_lock(&va_range->lock);
546         rc = add_va_block_locked(hdev, &va_range->list, start, end);
547         mutex_unlock(&va_range->lock);
548
549         return rc;
550 }
551
552 /**
553  * is_hint_crossing_range() - check if hint address crossing specified reserved.
554  * @range_type: virtual space range type.
555  * @start_addr: start virtual address.
556  * @size: block size.
557  * @prop: asic properties structure to retrieve reserved ranges from.
558  */
559 static inline bool is_hint_crossing_range(enum hl_va_range_type range_type,
560                 u64 start_addr, u32 size, struct asic_fixed_properties *prop) {
561         bool range_cross;
562
563         if (range_type == HL_VA_RANGE_TYPE_DRAM)
564                 range_cross =
565                         hl_mem_area_crosses_range(start_addr, size,
566                         prop->hints_dram_reserved_va_range.start_addr,
567                         prop->hints_dram_reserved_va_range.end_addr);
568         else if (range_type == HL_VA_RANGE_TYPE_HOST)
569                 range_cross =
570                         hl_mem_area_crosses_range(start_addr,   size,
571                         prop->hints_host_reserved_va_range.start_addr,
572                         prop->hints_host_reserved_va_range.end_addr);
573         else
574                 range_cross =
575                         hl_mem_area_crosses_range(start_addr, size,
576                         prop->hints_host_hpage_reserved_va_range.start_addr,
577                         prop->hints_host_hpage_reserved_va_range.end_addr);
578
579         return range_cross;
580 }
581
582 /**
583  * get_va_block() - get a virtual block for the given size and alignment.
584  *
585  * @hdev: pointer to the habanalabs device structure.
586  * @va_range: pointer to the virtual addresses range.
587  * @size: requested block size.
588  * @hint_addr: hint for requested address by the user.
589  * @va_block_align: required alignment of the virtual block start address.
590  * @range_type: va range type (host, dram)
591  * @flags: additional memory flags, currently only uses HL_MEM_FORCE_HINT
592  *
593  * This function does the following:
594  * - Iterate on the virtual block list to find a suitable virtual block for the
595  *   given size, hint address and alignment.
596  * - Reserve the requested block and update the list.
597  * - Return the start address of the virtual block.
598  */
599 static u64 get_va_block(struct hl_device *hdev,
600                                 struct hl_va_range *va_range,
601                                 u64 size, u64 hint_addr, u32 va_block_align,
602                                 enum hl_va_range_type range_type,
603                                 u32 flags)
604 {
605         struct hl_vm_va_block *va_block, *new_va_block = NULL;
606         struct asic_fixed_properties *prop = &hdev->asic_prop;
607         u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
608                 align_mask, reserved_valid_start = 0, reserved_valid_size = 0,
609                 dram_hint_mask = prop->dram_hints_align_mask;
610         bool add_prev = false;
611         bool is_align_pow_2  = is_power_of_2(va_range->page_size);
612         bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
613         bool force_hint = flags & HL_MEM_FORCE_HINT;
614
615         if (is_align_pow_2)
616                 align_mask = ~((u64)va_block_align - 1);
617         else
618                 /*
619                  * with non-power-of-2 range we work only with page granularity
620                  * and the start address is page aligned,
621                  * so no need for alignment checking.
622                  */
623                 size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
624                                                         va_range->page_size;
625
626         tmp_hint_addr = hint_addr & ~dram_hint_mask;
627
628         /* Check if we need to ignore hint address */
629         if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
630                         (!is_align_pow_2 && is_hint_dram_addr &&
631                         do_div(tmp_hint_addr, va_range->page_size))) {
632
633                 if (force_hint) {
634                         /* Hint must be respected, so here we just fail */
635                         dev_err(hdev->dev,
636                                 "Hint address 0x%llx is not page aligned - cannot be respected\n",
637                                 hint_addr);
638                         return 0;
639                 }
640
641                 dev_dbg(hdev->dev,
642                         "Hint address 0x%llx will be ignored because it is not aligned\n",
643                         hint_addr);
644                 hint_addr = 0;
645         }
646
647         mutex_lock(&va_range->lock);
648
649         print_va_list_locked(hdev, &va_range->list);
650
651         list_for_each_entry(va_block, &va_range->list, node) {
652                 /* Calc the first possible aligned addr */
653                 valid_start = va_block->start;
654
655                 if (is_align_pow_2 && (valid_start & (va_block_align - 1))) {
656                         valid_start &= align_mask;
657                         valid_start += va_block_align;
658                         if (valid_start > va_block->end)
659                                 continue;
660                 }
661
662                 valid_size = va_block->end - valid_start + 1;
663                 if (valid_size < size)
664                         continue;
665
666                 /*
667                  * In case hint address is 0, and hints_range_reservation
668                  * property enabled, then avoid allocating va blocks from the
669                  * range reserved for hint addresses
670                  */
671                 if (prop->hints_range_reservation && !hint_addr)
672                         if (is_hint_crossing_range(range_type, valid_start,
673                                         size, prop))
674                                 continue;
675
676                 /* Pick the minimal length block which has the required size */
677                 if (!new_va_block || (valid_size < reserved_valid_size)) {
678                         new_va_block = va_block;
679                         reserved_valid_start = valid_start;
680                         reserved_valid_size = valid_size;
681                 }
682
683                 if (hint_addr && hint_addr >= valid_start &&
684                                         (hint_addr + size) <= va_block->end) {
685                         new_va_block = va_block;
686                         reserved_valid_start = hint_addr;
687                         reserved_valid_size = valid_size;
688                         break;
689                 }
690         }
691
692         if (!new_va_block) {
693                 dev_err(hdev->dev, "no available va block for size %llu\n",
694                                                                 size);
695                 goto out;
696         }
697
698         if (force_hint && reserved_valid_start != hint_addr) {
699                 /* Hint address must be respected. If we are here - this means
700                  * we could not respect it.
701                  */
702                 dev_err(hdev->dev,
703                         "Hint address 0x%llx could not be respected\n",
704                         hint_addr);
705                 reserved_valid_start = 0;
706                 goto out;
707         }
708
709         /*
710          * Check if there is some leftover range due to reserving the new
711          * va block, then return it to the main virtual addresses list.
712          */
713         if (reserved_valid_start > new_va_block->start) {
714                 prev_start = new_va_block->start;
715                 prev_end = reserved_valid_start - 1;
716
717                 new_va_block->start = reserved_valid_start;
718                 new_va_block->size = reserved_valid_size;
719
720                 add_prev = true;
721         }
722
723         if (new_va_block->size > size) {
724                 new_va_block->start += size;
725                 new_va_block->size = new_va_block->end - new_va_block->start + 1;
726         } else {
727                 list_del(&new_va_block->node);
728                 kfree(new_va_block);
729         }
730
731         if (add_prev)
732                 add_va_block_locked(hdev, &va_range->list, prev_start,
733                                 prev_end);
734
735         print_va_list_locked(hdev, &va_range->list);
736 out:
737         mutex_unlock(&va_range->lock);
738
739         return reserved_valid_start;
740 }
741
742 /*
743  * hl_reserve_va_block() - reserve a virtual block of a given size.
744  * @hdev: pointer to the habanalabs device structure.
745  * @ctx: current context
746  * @type: virtual addresses range type.
747  * @size: requested block size.
748  * @alignment: required alignment in bytes of the virtual block start address,
749  *             0 means no alignment.
750  *
751  * This function does the following:
752  * - Iterate on the virtual block list to find a suitable virtual block for the
753  *   given size and alignment.
754  * - Reserve the requested block and update the list.
755  * - Return the start address of the virtual block.
756  */
757 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
758                 enum hl_va_range_type type, u64 size, u32 alignment)
759 {
760         return get_va_block(hdev, ctx->va_range[type], size, 0,
761                         max(alignment, ctx->va_range[type]->page_size),
762                         type, 0);
763 }
764
765 /**
766  * hl_get_va_range_type() - get va_range type for the given address and size.
767  * @ctx: context to fetch va_range from.
768  * @address: the start address of the area we want to validate.
769  * @size: the size in bytes of the area we want to validate.
770  * @type: returned va_range type.
771  *
772  * Return: true if the area is inside a valid range, false otherwise.
773  */
774 static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
775                         enum hl_va_range_type *type)
776 {
777         int i;
778
779         for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) {
780                 if (hl_mem_area_inside_range(address, size,
781                                 ctx->va_range[i]->start_addr,
782                                 ctx->va_range[i]->end_addr)) {
783                         *type = i;
784                         return 0;
785                 }
786         }
787
788         return -EINVAL;
789 }
790
791 /**
792  * hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
793  * @hdev: pointer to the habanalabs device structure
794  * @ctx: pointer to the context structure.
795  * @start_addr: start virtual address.
796  * @size: number of bytes to unreserve.
797  *
798  * This function does the following:
799  * - Takes the list lock and calls add_va_block_locked.
800  */
801 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
802                 u64 start_addr, u64 size)
803 {
804         enum hl_va_range_type type;
805         int rc;
806
807         rc = hl_get_va_range_type(ctx, start_addr, size, &type);
808         if (rc) {
809                 dev_err(hdev->dev,
810                         "cannot find va_range for va %#llx size %llu",
811                         start_addr, size);
812                 return rc;
813         }
814
815         rc = add_va_block(hdev, ctx->va_range[type], start_addr,
816                                                 start_addr + size - 1);
817         if (rc)
818                 dev_warn(hdev->dev,
819                         "add va block failed for vaddr: 0x%llx\n", start_addr);
820
821         return rc;
822 }
823
824 /**
825  * init_phys_pg_pack_from_userptr() - initialize physical page pack from host
826  *                                    memory
827  * @ctx: pointer to the context structure.
828  * @userptr: userptr to initialize from.
829  * @pphys_pg_pack: result pointer.
830  * @force_regular_page: tell the function to ignore huge page optimization,
831  *                      even if possible. Needed for cases where the device VA
832  *                      is allocated before we know the composition of the
833  *                      physical pages
834  *
835  * This function does the following:
836  * - Pin the physical pages related to the given virtual block.
837  * - Create a physical page pack from the physical pages related to the given
838  *   virtual block.
839  */
840 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
841                                 struct hl_userptr *userptr,
842                                 struct hl_vm_phys_pg_pack **pphys_pg_pack,
843                                 bool force_regular_page)
844 {
845         u32 npages, page_size = PAGE_SIZE,
846                 huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
847         u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
848         struct hl_vm_phys_pg_pack *phys_pg_pack;
849         bool first = true, is_huge_page_opt;
850         u64 page_mask, total_npages;
851         struct scatterlist *sg;
852         dma_addr_t dma_addr;
853         int rc, i, j;
854
855         phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
856         if (!phys_pg_pack)
857                 return -ENOMEM;
858
859         phys_pg_pack->vm_type = userptr->vm_type;
860         phys_pg_pack->created_from_userptr = true;
861         phys_pg_pack->asid = ctx->asid;
862         atomic_set(&phys_pg_pack->mapping_cnt, 1);
863
864         is_huge_page_opt = (force_regular_page ? false : true);
865
866         /* Only if all dma_addrs are aligned to 2MB and their
867          * sizes is at least 2MB, we can use huge page mapping.
868          * We limit the 2MB optimization to this condition,
869          * since later on we acquire the related VA range as one
870          * consecutive block.
871          */
872         total_npages = 0;
873         for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
874                 npages = hl_get_sg_info(sg, &dma_addr);
875
876                 total_npages += npages;
877
878                 if ((npages % pgs_in_huge_page) ||
879                                         (dma_addr & (huge_page_size - 1)))
880                         is_huge_page_opt = false;
881         }
882
883         if (is_huge_page_opt) {
884                 page_size = huge_page_size;
885                 do_div(total_npages, pgs_in_huge_page);
886         }
887
888         page_mask = ~(((u64) page_size) - 1);
889
890         phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
891                                                 GFP_KERNEL);
892         if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
893                 rc = -ENOMEM;
894                 goto page_pack_arr_mem_err;
895         }
896
897         phys_pg_pack->npages = total_npages;
898         phys_pg_pack->page_size = page_size;
899         phys_pg_pack->total_size = total_npages * page_size;
900
901         j = 0;
902         for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
903                 npages = hl_get_sg_info(sg, &dma_addr);
904
905                 /* align down to physical page size and save the offset */
906                 if (first) {
907                         first = false;
908                         phys_pg_pack->offset = dma_addr & (page_size - 1);
909                         dma_addr &= page_mask;
910                 }
911
912                 while (npages) {
913                         phys_pg_pack->pages[j++] = dma_addr;
914                         dma_addr += page_size;
915
916                         if (is_huge_page_opt)
917                                 npages -= pgs_in_huge_page;
918                         else
919                                 npages--;
920                 }
921         }
922
923         *pphys_pg_pack = phys_pg_pack;
924
925         return 0;
926
927 page_pack_arr_mem_err:
928         kfree(phys_pg_pack);
929
930         return rc;
931 }
932
933 /**
934  * map_phys_pg_pack() - maps the physical page pack..
935  * @ctx: pointer to the context structure.
936  * @vaddr: start address of the virtual area to map from.
937  * @phys_pg_pack: the pack of physical pages to map to.
938  *
939  * This function does the following:
940  * - Maps each chunk of virtual memory to matching physical chunk.
941  * - Stores number of successful mappings in the given argument.
942  * - Returns 0 on success, error code otherwise.
943  */
944 static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
945                                 struct hl_vm_phys_pg_pack *phys_pg_pack)
946 {
947         struct hl_device *hdev = ctx->hdev;
948         u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
949         u32 page_size = phys_pg_pack->page_size;
950         int rc = 0;
951         bool is_host_addr;
952
953         for (i = 0 ; i < phys_pg_pack->npages ; i++) {
954                 paddr = phys_pg_pack->pages[i];
955
956                 rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size,
957                                 (i + 1) == phys_pg_pack->npages);
958                 if (rc) {
959                         dev_err(hdev->dev,
960                                 "map failed for handle %u, npages: %llu, mapped: %llu",
961                                 phys_pg_pack->handle, phys_pg_pack->npages,
962                                 mapped_pg_cnt);
963                         goto err;
964                 }
965
966                 mapped_pg_cnt++;
967                 next_vaddr += page_size;
968         }
969
970         return 0;
971
972 err:
973         is_host_addr = !hl_is_dram_va(hdev, vaddr);
974
975         next_vaddr = vaddr;
976         for (i = 0 ; i < mapped_pg_cnt ; i++) {
977                 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
978                                         (i + 1) == mapped_pg_cnt))
979                         dev_warn_ratelimited(hdev->dev,
980                                 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
981                                         phys_pg_pack->handle, next_vaddr,
982                                         phys_pg_pack->pages[i], page_size);
983
984                 next_vaddr += page_size;
985
986                 /*
987                  * unmapping on Palladium can be really long, so avoid a CPU
988                  * soft lockup bug by sleeping a little between unmapping pages
989                  *
990                  * In addition, on host num of pages could be huge,
991                  * because page size could be 4KB, so when unmapping host
992                  * pages sleep every 32K pages to avoid soft lockup
993                  */
994                 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
995                         usleep_range(50, 200);
996         }
997
998         return rc;
999 }
1000
1001 /**
1002  * unmap_phys_pg_pack() - unmaps the physical page pack.
1003  * @ctx: pointer to the context structure.
1004  * @vaddr: start address of the virtual area to unmap.
1005  * @phys_pg_pack: the pack of physical pages to unmap.
1006  */
1007 static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
1008                                 struct hl_vm_phys_pg_pack *phys_pg_pack)
1009 {
1010         struct hl_device *hdev = ctx->hdev;
1011         u64 next_vaddr, i;
1012         bool is_host_addr;
1013         u32 page_size;
1014
1015         is_host_addr = !hl_is_dram_va(hdev, vaddr);
1016         page_size = phys_pg_pack->page_size;
1017         next_vaddr = vaddr;
1018
1019         for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
1020                 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
1021                                        (i + 1) == phys_pg_pack->npages))
1022                         dev_warn_ratelimited(hdev->dev,
1023                         "unmap failed for vaddr: 0x%llx\n", next_vaddr);
1024
1025                 /*
1026                  * unmapping on Palladium can be really long, so avoid a CPU
1027                  * soft lockup bug by sleeping a little between unmapping pages
1028                  *
1029                  * In addition, on host num of pages could be huge,
1030                  * because page size could be 4KB, so when unmapping host
1031                  * pages sleep every 32K pages to avoid soft lockup
1032                  */
1033                 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
1034                         usleep_range(50, 200);
1035         }
1036 }
1037
1038 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
1039                                         u64 *paddr)
1040 {
1041         struct hl_device *hdev = ctx->hdev;
1042         struct hl_vm *vm = &hdev->vm;
1043         struct hl_vm_phys_pg_pack *phys_pg_pack;
1044         u32 handle;
1045
1046         handle = lower_32_bits(args->map_device.handle);
1047         spin_lock(&vm->idr_lock);
1048         phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1049         if (!phys_pg_pack) {
1050                 spin_unlock(&vm->idr_lock);
1051                 dev_err(hdev->dev, "no match for handle %u\n", handle);
1052                 return -EINVAL;
1053         }
1054
1055         *paddr = phys_pg_pack->pages[0];
1056
1057         spin_unlock(&vm->idr_lock);
1058
1059         return 0;
1060 }
1061
1062 /**
1063  * map_device_va() - map the given memory.
1064  * @ctx: pointer to the context structure.
1065  * @args: host parameters with handle/host virtual address.
1066  * @device_addr: pointer to result device virtual address.
1067  *
1068  * This function does the following:
1069  * - If given a physical device memory handle, map to a device virtual block
1070  *   and return the start address of this block.
1071  * - If given a host virtual address and size, find the related physical pages,
1072  *   map a device virtual block to this pages and return the start address of
1073  *   this block.
1074  */
1075 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device_addr)
1076 {
1077         struct hl_vm_phys_pg_pack *phys_pg_pack;
1078         enum hl_va_range_type va_range_type = 0;
1079         struct hl_device *hdev = ctx->hdev;
1080         struct hl_userptr *userptr = NULL;
1081         u32 handle = 0, va_block_align;
1082         struct hl_vm_hash_node *hnode;
1083         struct hl_vm *vm = &hdev->vm;
1084         struct hl_va_range *va_range;
1085         bool is_userptr, do_prefetch;
1086         u64 ret_vaddr, hint_addr;
1087         enum vm_type *vm_type;
1088         int rc;
1089
1090         /* set map flags */
1091         is_userptr = args->flags & HL_MEM_USERPTR;
1092         do_prefetch = hdev->supports_mmu_prefetch && (args->flags & HL_MEM_PREFETCH);
1093
1094         /* Assume failure */
1095         *device_addr = 0;
1096
1097         if (is_userptr) {
1098                 u64 addr = args->map_host.host_virt_addr,
1099                         size = args->map_host.mem_size;
1100                 u32 page_size = hdev->asic_prop.pmmu.page_size,
1101                         huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
1102
1103                 rc = dma_map_host_va(hdev, addr, size, &userptr);
1104                 if (rc) {
1105                         dev_err(hdev->dev, "failed to get userptr from va\n");
1106                         return rc;
1107                 }
1108
1109                 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1110                                 &phys_pg_pack, false);
1111                 if (rc) {
1112                         dev_err(hdev->dev,
1113                                 "unable to init page pack for vaddr 0x%llx\n",
1114                                 addr);
1115                         goto init_page_pack_err;
1116                 }
1117
1118                 vm_type = (enum vm_type *) userptr;
1119                 hint_addr = args->map_host.hint_addr;
1120                 handle = phys_pg_pack->handle;
1121
1122                 /* get required alignment */
1123                 if (phys_pg_pack->page_size == page_size) {
1124                         va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1125                         va_range_type = HL_VA_RANGE_TYPE_HOST;
1126                         /*
1127                          * huge page alignment may be needed in case of regular
1128                          * page mapping, depending on the host VA alignment
1129                          */
1130                         if (addr & (huge_page_size - 1))
1131                                 va_block_align = page_size;
1132                         else
1133                                 va_block_align = huge_page_size;
1134                 } else {
1135                         /*
1136                          * huge page alignment is needed in case of huge page
1137                          * mapping
1138                          */
1139                         va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1140                         va_range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
1141                         va_block_align = huge_page_size;
1142                 }
1143         } else {
1144                 handle = lower_32_bits(args->map_device.handle);
1145
1146                 spin_lock(&vm->idr_lock);
1147                 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1148                 if (!phys_pg_pack) {
1149                         spin_unlock(&vm->idr_lock);
1150                         dev_err(hdev->dev,
1151                                 "no match for handle %u\n", handle);
1152                         return -EINVAL;
1153                 }
1154
1155                 /* increment now to avoid freeing device memory while mapping */
1156                 atomic_inc(&phys_pg_pack->mapping_cnt);
1157
1158                 spin_unlock(&vm->idr_lock);
1159
1160                 vm_type = (enum vm_type *) phys_pg_pack;
1161
1162                 hint_addr = args->map_device.hint_addr;
1163
1164                 /* DRAM VA alignment is the same as the MMU page size */
1165                 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1166                 va_range_type = HL_VA_RANGE_TYPE_DRAM;
1167                 va_block_align = hdev->asic_prop.dmmu.page_size;
1168         }
1169
1170         /*
1171          * relevant for mapping device physical memory only, as host memory is
1172          * implicitly shared
1173          */
1174         if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
1175                         phys_pg_pack->asid != ctx->asid) {
1176                 dev_err(hdev->dev,
1177                         "Failed to map memory, handle %u is not shared\n",
1178                         handle);
1179                 rc = -EPERM;
1180                 goto shared_err;
1181         }
1182
1183         hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
1184         if (!hnode) {
1185                 rc = -ENOMEM;
1186                 goto hnode_err;
1187         }
1188
1189         if (hint_addr && phys_pg_pack->offset) {
1190                 if (args->flags & HL_MEM_FORCE_HINT) {
1191                         /* Fail if hint must be respected but it can't be */
1192                         dev_err(hdev->dev,
1193                                 "Hint address 0x%llx cannot be respected because source memory is not aligned 0x%x\n",
1194                                 hint_addr, phys_pg_pack->offset);
1195                         rc = -EINVAL;
1196                         goto va_block_err;
1197                 }
1198                 dev_dbg(hdev->dev,
1199                         "Hint address 0x%llx will be ignored because source memory is not aligned 0x%x\n",
1200                         hint_addr, phys_pg_pack->offset);
1201         }
1202
1203         ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
1204                                         hint_addr, va_block_align,
1205                                         va_range_type, args->flags);
1206         if (!ret_vaddr) {
1207                 dev_err(hdev->dev, "no available va block for handle %u\n",
1208                                 handle);
1209                 rc = -ENOMEM;
1210                 goto va_block_err;
1211         }
1212
1213         mutex_lock(&hdev->mmu_lock);
1214
1215         rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
1216         if (rc) {
1217                 dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
1218                 mutex_unlock(&hdev->mmu_lock);
1219                 goto map_err;
1220         }
1221
1222         rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
1223                                 ctx->asid, ret_vaddr, phys_pg_pack->total_size);
1224         mutex_unlock(&hdev->mmu_lock);
1225         if (rc)
1226                 goto map_err;
1227
1228         /*
1229          * prefetch is done upon user's request. it is performed in WQ as and so can
1230          * be outside the MMU lock. the operation itself is already protected by the mmu lock
1231          */
1232         if (do_prefetch) {
1233                 rc = hl_mmu_prefetch_cache_range(ctx, *vm_type, ctx->asid, ret_vaddr,
1234                                                         phys_pg_pack->total_size);
1235                 if (rc)
1236                         goto map_err;
1237         }
1238
1239         ret_vaddr += phys_pg_pack->offset;
1240
1241         hnode->ptr = vm_type;
1242         hnode->vaddr = ret_vaddr;
1243
1244         mutex_lock(&ctx->mem_hash_lock);
1245         hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
1246         mutex_unlock(&ctx->mem_hash_lock);
1247
1248         *device_addr = ret_vaddr;
1249
1250         if (is_userptr)
1251                 free_phys_pg_pack(hdev, phys_pg_pack);
1252
1253         return rc;
1254
1255 map_err:
1256         if (add_va_block(hdev, va_range, ret_vaddr,
1257                                 ret_vaddr + phys_pg_pack->total_size - 1))
1258                 dev_warn(hdev->dev,
1259                         "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1260                                 handle, ret_vaddr);
1261
1262 va_block_err:
1263         kfree(hnode);
1264 hnode_err:
1265 shared_err:
1266         atomic_dec(&phys_pg_pack->mapping_cnt);
1267         if (is_userptr)
1268                 free_phys_pg_pack(hdev, phys_pg_pack);
1269 init_page_pack_err:
1270         if (is_userptr)
1271                 dma_unmap_host_va(hdev, userptr);
1272
1273         return rc;
1274 }
1275
1276 /**
1277  * unmap_device_va() - unmap the given device virtual address.
1278  * @ctx: pointer to the context structure.
1279  * @args: host parameters with device virtual address to unmap.
1280  * @ctx_free: true if in context free flow, false otherwise.
1281  *
1282  * This function does the following:
1283  * - unmap the physical pages related to the given virtual address.
1284  * - return the device virtual block to the virtual block list.
1285  */
1286 static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
1287                                 bool ctx_free)
1288 {
1289         struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1290         u64 vaddr = args->unmap.device_virt_addr;
1291         struct hl_vm_hash_node *hnode = NULL;
1292         struct asic_fixed_properties *prop;
1293         struct hl_device *hdev = ctx->hdev;
1294         struct hl_userptr *userptr = NULL;
1295         struct hl_va_range *va_range;
1296         enum vm_type *vm_type;
1297         bool is_userptr;
1298         int rc = 0;
1299
1300         prop = &hdev->asic_prop;
1301
1302         /* protect from double entrance */
1303         mutex_lock(&ctx->mem_hash_lock);
1304         hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
1305                 if (vaddr == hnode->vaddr)
1306                         break;
1307
1308         if (!hnode) {
1309                 mutex_unlock(&ctx->mem_hash_lock);
1310                 dev_err(hdev->dev,
1311                         "unmap failed, no mem hnode for vaddr 0x%llx\n",
1312                         vaddr);
1313                 return -EINVAL;
1314         }
1315
1316         hash_del(&hnode->node);
1317         mutex_unlock(&ctx->mem_hash_lock);
1318
1319         vm_type = hnode->ptr;
1320
1321         if (*vm_type == VM_TYPE_USERPTR) {
1322                 is_userptr = true;
1323                 userptr = hnode->ptr;
1324
1325                 rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack,
1326                                                         false);
1327                 if (rc) {
1328                         dev_err(hdev->dev,
1329                                 "unable to init page pack for vaddr 0x%llx\n",
1330                                 vaddr);
1331                         goto vm_type_err;
1332                 }
1333
1334                 if (phys_pg_pack->page_size ==
1335                                         hdev->asic_prop.pmmu.page_size)
1336                         va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1337                 else
1338                         va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1339         } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1340                 is_userptr = false;
1341                 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1342                 phys_pg_pack = hnode->ptr;
1343         } else {
1344                 dev_warn(hdev->dev,
1345                         "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1346                                 vaddr);
1347                 rc = -EFAULT;
1348                 goto vm_type_err;
1349         }
1350
1351         if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1352                 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1353                 rc = -EINVAL;
1354                 goto mapping_cnt_err;
1355         }
1356
1357         if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size))
1358                 vaddr = prop->dram_base_address +
1359                         DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address,
1360                                                 phys_pg_pack->page_size) *
1361                                                         phys_pg_pack->page_size;
1362         else
1363                 vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
1364
1365         mutex_lock(&hdev->mmu_lock);
1366
1367         unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
1368
1369         /*
1370          * During context free this function is called in a loop to clean all
1371          * the context mappings. Hence the cache invalidation can be called once
1372          * at the loop end rather than for each iteration
1373          */
1374         if (!ctx_free)
1375                 rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
1376                                                         phys_pg_pack->total_size);
1377
1378         mutex_unlock(&hdev->mmu_lock);
1379
1380         /*
1381          * If the context is closing we don't need to check for the MMU cache
1382          * invalidation return code and update the VA free list as in this flow
1383          * we invalidate the MMU cache outside of this unmap function and the VA
1384          * free list will be freed anyway.
1385          */
1386         if (!ctx_free) {
1387                 int tmp_rc;
1388
1389                 tmp_rc = add_va_block(hdev, va_range, vaddr,
1390                                         vaddr + phys_pg_pack->total_size - 1);
1391                 if (tmp_rc) {
1392                         dev_warn(hdev->dev,
1393                                         "add va block failed for vaddr: 0x%llx\n",
1394                                         vaddr);
1395                         if (!rc)
1396                                 rc = tmp_rc;
1397                 }
1398         }
1399
1400         atomic_dec(&phys_pg_pack->mapping_cnt);
1401         kfree(hnode);
1402
1403         if (is_userptr) {
1404                 free_phys_pg_pack(hdev, phys_pg_pack);
1405                 dma_unmap_host_va(hdev, userptr);
1406         }
1407
1408         return rc;
1409
1410 mapping_cnt_err:
1411         if (is_userptr)
1412                 free_phys_pg_pack(hdev, phys_pg_pack);
1413 vm_type_err:
1414         mutex_lock(&ctx->mem_hash_lock);
1415         hash_add(ctx->mem_hash, &hnode->node, vaddr);
1416         mutex_unlock(&ctx->mem_hash_lock);
1417
1418         return rc;
1419 }
1420
1421 static int map_block(struct hl_device *hdev, u64 address, u64 *handle, u32 *size)
1422 {
1423         u32 block_id;
1424         int rc;
1425
1426         *handle = 0;
1427         if (size)
1428                 *size = 0;
1429
1430         rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
1431         if (rc)
1432                 return rc;
1433
1434         *handle = block_id | HL_MMAP_TYPE_BLOCK;
1435         *handle <<= PAGE_SHIFT;
1436
1437         return 0;
1438 }
1439
1440 static void hw_block_vm_close(struct vm_area_struct *vma)
1441 {
1442         struct hl_vm_hw_block_list_node *lnode =
1443                 (struct hl_vm_hw_block_list_node *) vma->vm_private_data;
1444         struct hl_ctx *ctx = lnode->ctx;
1445         long new_mmap_size;
1446
1447         new_mmap_size = lnode->mapped_size - (vma->vm_end - vma->vm_start);
1448         if (new_mmap_size > 0) {
1449                 lnode->mapped_size = new_mmap_size;
1450                 return;
1451         }
1452
1453         mutex_lock(&ctx->hw_block_list_lock);
1454         list_del(&lnode->node);
1455         mutex_unlock(&ctx->hw_block_list_lock);
1456         hl_ctx_put(ctx);
1457         kfree(lnode);
1458         vma->vm_private_data = NULL;
1459 }
1460
1461 static const struct vm_operations_struct hw_block_vm_ops = {
1462         .close = hw_block_vm_close
1463 };
1464
1465 /**
1466  * hl_hw_block_mmap() - mmap a hw block to user.
1467  * @hpriv: pointer to the private data of the fd
1468  * @vma: pointer to vm_area_struct of the process
1469  *
1470  * Driver increments context reference for every HW block mapped in order
1471  * to prevent user from closing FD without unmapping first
1472  */
1473 int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
1474 {
1475         struct hl_vm_hw_block_list_node *lnode;
1476         struct hl_device *hdev = hpriv->hdev;
1477         struct hl_ctx *ctx = hpriv->ctx;
1478         u32 block_id, block_size;
1479         int rc;
1480
1481         /* We use the page offset to hold the block id and thus we need to clear
1482          * it before doing the mmap itself
1483          */
1484         block_id = vma->vm_pgoff;
1485         vma->vm_pgoff = 0;
1486
1487         /* Driver only allows mapping of a complete HW block */
1488         block_size = vma->vm_end - vma->vm_start;
1489
1490         if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) {
1491                 dev_err(hdev->dev,
1492                         "user pointer is invalid - 0x%lx\n",
1493                         vma->vm_start);
1494
1495                 return -EINVAL;
1496         }
1497
1498         lnode = kzalloc(sizeof(*lnode), GFP_KERNEL);
1499         if (!lnode)
1500                 return -ENOMEM;
1501
1502         rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
1503         if (rc) {
1504                 kfree(lnode);
1505                 return rc;
1506         }
1507
1508         hl_ctx_get(ctx);
1509
1510         lnode->ctx = ctx;
1511         lnode->vaddr = vma->vm_start;
1512         lnode->block_size = block_size;
1513         lnode->mapped_size = lnode->block_size;
1514         lnode->id = block_id;
1515
1516         vma->vm_private_data = lnode;
1517         vma->vm_ops = &hw_block_vm_ops;
1518
1519         mutex_lock(&ctx->hw_block_list_lock);
1520         list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
1521         mutex_unlock(&ctx->hw_block_list_lock);
1522
1523         vma->vm_pgoff = block_id;
1524
1525         return 0;
1526 }
1527
1528 static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size,
1529                         struct device *dev, enum dma_data_direction dir)
1530 {
1531         dma_addr_t addr;
1532         int rc;
1533
1534         addr = dma_map_resource(dev, bar_address, chunk_size, dir,
1535                                 DMA_ATTR_SKIP_CPU_SYNC);
1536         rc = dma_mapping_error(dev, addr);
1537         if (rc)
1538                 return rc;
1539
1540         sg_set_page(sg, NULL, chunk_size, 0);
1541         sg_dma_address(sg) = addr;
1542         sg_dma_len(sg) = chunk_size;
1543
1544         return 0;
1545 }
1546
1547 static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
1548                                                 u64 page_size, struct device *dev,
1549                                                 enum dma_data_direction dir)
1550 {
1551         u64 chunk_size, bar_address, dma_max_seg_size;
1552         struct asic_fixed_properties *prop;
1553         int rc, i, j, nents, cur_page;
1554         struct scatterlist *sg;
1555         struct sg_table *sgt;
1556
1557         prop = &hdev->asic_prop;
1558
1559         dma_max_seg_size = dma_get_max_seg_size(dev);
1560
1561         /* We would like to align the max segment size to PAGE_SIZE, so the
1562          * SGL will contain aligned addresses that can be easily mapped to
1563          * an MMU
1564          */
1565         dma_max_seg_size = ALIGN_DOWN(dma_max_seg_size, PAGE_SIZE);
1566         if (dma_max_seg_size < PAGE_SIZE) {
1567                 dev_err_ratelimited(hdev->dev,
1568                                 "dma_max_seg_size %llu can't be smaller than PAGE_SIZE\n",
1569                                 dma_max_seg_size);
1570                 return ERR_PTR(-EINVAL);
1571         }
1572
1573         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
1574         if (!sgt)
1575                 return ERR_PTR(-ENOMEM);
1576
1577         /* If the size of each page is larger than the dma max segment size,
1578          * then we can't combine pages and the number of entries in the SGL
1579          * will just be the
1580          * <number of pages> * <chunks of max segment size in each page>
1581          */
1582         if (page_size > dma_max_seg_size)
1583                 nents = npages * DIV_ROUND_UP_ULL(page_size, dma_max_seg_size);
1584         else
1585                 /* Get number of non-contiguous chunks */
1586                 for (i = 1, nents = 1, chunk_size = page_size ; i < npages ; i++) {
1587                         if (pages[i - 1] + page_size != pages[i] ||
1588                                         chunk_size + page_size > dma_max_seg_size) {
1589                                 nents++;
1590                                 chunk_size = page_size;
1591                                 continue;
1592                         }
1593
1594                         chunk_size += page_size;
1595                 }
1596
1597         rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
1598         if (rc)
1599                 goto error_free;
1600
1601         cur_page = 0;
1602
1603         if (page_size > dma_max_seg_size) {
1604                 u64 size_left, cur_device_address = 0;
1605
1606                 size_left = page_size;
1607
1608                 /* Need to split each page into the number of chunks of
1609                  * dma_max_seg_size
1610                  */
1611                 for_each_sgtable_dma_sg(sgt, sg, i) {
1612                         if (size_left == page_size)
1613                                 cur_device_address =
1614                                         pages[cur_page] - prop->dram_base_address;
1615                         else
1616                                 cur_device_address += dma_max_seg_size;
1617
1618                         chunk_size = min(size_left, dma_max_seg_size);
1619
1620                         bar_address = hdev->dram_pci_bar_start + cur_device_address;
1621
1622                         rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
1623                         if (rc)
1624                                 goto error_unmap;
1625
1626                         if (size_left > dma_max_seg_size) {
1627                                 size_left -= dma_max_seg_size;
1628                         } else {
1629                                 cur_page++;
1630                                 size_left = page_size;
1631                         }
1632                 }
1633         } else {
1634                 /* Merge pages and put them into the scatterlist */
1635                 for_each_sgtable_dma_sg(sgt, sg, i) {
1636                         chunk_size = page_size;
1637                         for (j = cur_page + 1 ; j < npages ; j++) {
1638                                 if (pages[j - 1] + page_size != pages[j] ||
1639                                                 chunk_size + page_size > dma_max_seg_size)
1640                                         break;
1641
1642                                 chunk_size += page_size;
1643                         }
1644
1645                         bar_address = hdev->dram_pci_bar_start +
1646                                         (pages[cur_page] - prop->dram_base_address);
1647
1648                         rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
1649                         if (rc)
1650                                 goto error_unmap;
1651
1652                         cur_page = j;
1653                 }
1654         }
1655
1656         /* Because we are not going to include a CPU list we want to have some
1657          * chance that other users will detect this by setting the orig_nents
1658          * to 0 and using only nents (length of DMA list) when going over the
1659          * sgl
1660          */
1661         sgt->orig_nents = 0;
1662
1663         return sgt;
1664
1665 error_unmap:
1666         for_each_sgtable_dma_sg(sgt, sg, i) {
1667                 if (!sg_dma_len(sg))
1668                         continue;
1669
1670                 dma_unmap_resource(dev, sg_dma_address(sg),
1671                                         sg_dma_len(sg), dir,
1672                                         DMA_ATTR_SKIP_CPU_SYNC);
1673         }
1674
1675         sg_free_table(sgt);
1676
1677 error_free:
1678         kfree(sgt);
1679         return ERR_PTR(rc);
1680 }
1681
1682 static int hl_dmabuf_attach(struct dma_buf *dmabuf,
1683                                 struct dma_buf_attachment *attachment)
1684 {
1685         struct hl_dmabuf_priv *hl_dmabuf;
1686         struct hl_device *hdev;
1687         int rc;
1688
1689         hl_dmabuf = dmabuf->priv;
1690         hdev = hl_dmabuf->ctx->hdev;
1691
1692         rc = pci_p2pdma_distance(hdev->pdev, attachment->dev, true);
1693
1694         if (rc < 0)
1695                 attachment->peer2peer = false;
1696         return 0;
1697 }
1698
1699 static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
1700                                         enum dma_data_direction dir)
1701 {
1702         struct dma_buf *dma_buf = attachment->dmabuf;
1703         struct hl_vm_phys_pg_pack *phys_pg_pack;
1704         struct hl_dmabuf_priv *hl_dmabuf;
1705         struct hl_device *hdev;
1706         struct sg_table *sgt;
1707
1708         hl_dmabuf = dma_buf->priv;
1709         hdev = hl_dmabuf->ctx->hdev;
1710         phys_pg_pack = hl_dmabuf->phys_pg_pack;
1711
1712         if (!attachment->peer2peer) {
1713                 dev_dbg(hdev->dev, "Failed to map dmabuf because p2p is disabled\n");
1714                 return ERR_PTR(-EPERM);
1715         }
1716
1717         if (phys_pg_pack)
1718                 sgt = alloc_sgt_from_device_pages(hdev,
1719                                                 phys_pg_pack->pages,
1720                                                 phys_pg_pack->npages,
1721                                                 phys_pg_pack->page_size,
1722                                                 attachment->dev,
1723                                                 dir);
1724         else
1725                 sgt = alloc_sgt_from_device_pages(hdev,
1726                                                 &hl_dmabuf->device_address,
1727                                                 1,
1728                                                 hl_dmabuf->dmabuf->size,
1729                                                 attachment->dev,
1730                                                 dir);
1731
1732         if (IS_ERR(sgt))
1733                 dev_err(hdev->dev, "failed (%ld) to initialize sgt for dmabuf\n", PTR_ERR(sgt));
1734
1735         return sgt;
1736 }
1737
1738 static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
1739                                   struct sg_table *sgt,
1740                                   enum dma_data_direction dir)
1741 {
1742         struct scatterlist *sg;
1743         int i;
1744
1745         /* The memory behind the dma-buf has *always* resided on the device itself, i.e. it lives
1746          * only in the 'device' domain (after all, it maps a PCI bar address which points to the
1747          * device memory).
1748          *
1749          * Therefore, it was never in the 'CPU' domain and hence, there is no need to perform
1750          * a sync of the memory to the CPU's cache, as it never resided inside that cache.
1751          */
1752         for_each_sgtable_dma_sg(sgt, sg, i)
1753                 dma_unmap_resource(attachment->dev, sg_dma_address(sg),
1754                                         sg_dma_len(sg), dir,
1755                                         DMA_ATTR_SKIP_CPU_SYNC);
1756
1757         /* Need to restore orig_nents because sg_free_table use that field */
1758         sgt->orig_nents = sgt->nents;
1759         sg_free_table(sgt);
1760         kfree(sgt);
1761 }
1762
1763 static void hl_release_dmabuf(struct dma_buf *dmabuf)
1764 {
1765         struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
1766         struct hl_ctx *ctx = hl_dmabuf->ctx;
1767         struct hl_device *hdev = ctx->hdev;
1768         struct hl_vm *vm = &hdev->vm;
1769
1770         if (hl_dmabuf->phys_pg_pack) {
1771                 spin_lock(&vm->idr_lock);
1772                 hl_dmabuf->phys_pg_pack->exporting_cnt--;
1773                 spin_unlock(&vm->idr_lock);
1774         }
1775
1776         hl_ctx_put(hl_dmabuf->ctx);
1777
1778         kfree(hl_dmabuf);
1779 }
1780
1781 static const struct dma_buf_ops habanalabs_dmabuf_ops = {
1782         .attach = hl_dmabuf_attach,
1783         .map_dma_buf = hl_map_dmabuf,
1784         .unmap_dma_buf = hl_unmap_dmabuf,
1785         .release = hl_release_dmabuf,
1786 };
1787
1788 static int export_dmabuf_common(struct hl_ctx *ctx,
1789                                 struct hl_dmabuf_priv *hl_dmabuf,
1790                                 u64 total_size, int flags, int *dmabuf_fd)
1791 {
1792         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1793         struct hl_device *hdev = ctx->hdev;
1794         int rc, fd;
1795
1796         exp_info.ops = &habanalabs_dmabuf_ops;
1797         exp_info.size = total_size;
1798         exp_info.flags = flags;
1799         exp_info.priv = hl_dmabuf;
1800
1801         hl_dmabuf->dmabuf = dma_buf_export(&exp_info);
1802         if (IS_ERR(hl_dmabuf->dmabuf)) {
1803                 dev_err(hdev->dev, "failed to export dma-buf\n");
1804                 return PTR_ERR(hl_dmabuf->dmabuf);
1805         }
1806
1807         fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
1808         if (fd < 0) {
1809                 dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf\n");
1810                 rc = fd;
1811                 goto err_dma_buf_put;
1812         }
1813
1814         hl_dmabuf->ctx = ctx;
1815         hl_ctx_get(hl_dmabuf->ctx);
1816
1817         *dmabuf_fd = fd;
1818
1819         return 0;
1820
1821 err_dma_buf_put:
1822         dma_buf_put(hl_dmabuf->dmabuf);
1823         return rc;
1824 }
1825
1826 /**
1827  * export_dmabuf_from_addr() - export a dma-buf object for the given memory
1828  *                             address and size.
1829  * @ctx: pointer to the context structure.
1830  * @device_addr:  device memory physical address.
1831  * @size: size of device memory.
1832  * @flags: DMA-BUF file/FD flags.
1833  * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
1834  *
1835  * Create and export a dma-buf object for an existing memory allocation inside
1836  * the device memory, and return a FD which is associated with the dma-buf
1837  * object.
1838  *
1839  * Return: 0 on success, non-zero for failure.
1840  */
1841 static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr,
1842                                         u64 size, int flags, int *dmabuf_fd)
1843 {
1844         struct hl_dmabuf_priv *hl_dmabuf;
1845         struct hl_device *hdev = ctx->hdev;
1846         struct asic_fixed_properties *prop;
1847         u64 bar_address;
1848         int rc;
1849
1850         prop = &hdev->asic_prop;
1851
1852         if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
1853                 dev_dbg(hdev->dev,
1854                         "exported device memory address 0x%llx should be aligned to 0x%lx\n",
1855                         device_addr, PAGE_SIZE);
1856                 return -EINVAL;
1857         }
1858
1859         if (size < PAGE_SIZE) {
1860                 dev_dbg(hdev->dev,
1861                         "exported device memory size %llu should be equal to or greater than %lu\n",
1862                         size, PAGE_SIZE);
1863                 return -EINVAL;
1864         }
1865
1866         if (device_addr < prop->dram_user_base_address ||
1867                                 device_addr + size > prop->dram_end_address ||
1868                                 device_addr + size < device_addr) {
1869                 dev_dbg(hdev->dev,
1870                         "DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n",
1871                         device_addr, size);
1872                 return -EINVAL;
1873         }
1874
1875         bar_address = hdev->dram_pci_bar_start +
1876                         (device_addr - prop->dram_base_address);
1877
1878         if (bar_address + size >
1879                         hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
1880                         bar_address + size < bar_address) {
1881                 dev_dbg(hdev->dev,
1882                         "DRAM memory range 0x%llx (+0x%llx) is outside of PCI BAR boundaries\n",
1883                         device_addr, size);
1884                 return -EINVAL;
1885         }
1886
1887         hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
1888         if (!hl_dmabuf)
1889                 return -ENOMEM;
1890
1891         hl_dmabuf->device_address = device_addr;
1892
1893         rc = export_dmabuf_common(ctx, hl_dmabuf, size, flags, dmabuf_fd);
1894         if (rc)
1895                 goto err_free_dmabuf_wrapper;
1896
1897         return 0;
1898
1899 err_free_dmabuf_wrapper:
1900         kfree(hl_dmabuf);
1901         return rc;
1902 }
1903
1904 /**
1905  * export_dmabuf_from_handle() - export a dma-buf object for the given memory
1906  *                               handle.
1907  * @ctx: pointer to the context structure.
1908  * @handle: device memory allocation handle.
1909  * @flags: DMA-BUF file/FD flags.
1910  * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
1911  *
1912  * Create and export a dma-buf object for an existing memory allocation inside
1913  * the device memory, and return a FD which is associated with the dma-buf
1914  * object.
1915  *
1916  * Return: 0 on success, non-zero for failure.
1917  */
1918 static int export_dmabuf_from_handle(struct hl_ctx *ctx, u64 handle, int flags,
1919                                         int *dmabuf_fd)
1920 {
1921         struct hl_vm_phys_pg_pack *phys_pg_pack;
1922         struct hl_dmabuf_priv *hl_dmabuf;
1923         struct hl_device *hdev = ctx->hdev;
1924         struct asic_fixed_properties *prop;
1925         struct hl_vm *vm = &hdev->vm;
1926         u64 bar_address;
1927         int rc, i;
1928
1929         prop = &hdev->asic_prop;
1930
1931         if (upper_32_bits(handle)) {
1932                 dev_dbg(hdev->dev, "no match for handle 0x%llx\n", handle);
1933                 return -EINVAL;
1934         }
1935
1936         spin_lock(&vm->idr_lock);
1937
1938         phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) handle);
1939         if (!phys_pg_pack) {
1940                 spin_unlock(&vm->idr_lock);
1941                 dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) handle);
1942                 return -EINVAL;
1943         }
1944
1945         /* increment now to avoid freeing device memory while exporting */
1946         phys_pg_pack->exporting_cnt++;
1947
1948         spin_unlock(&vm->idr_lock);
1949
1950         if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
1951                 dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", handle);
1952                 rc = -EINVAL;
1953                 goto err_dec_exporting_cnt;
1954         }
1955
1956         for (i = 0 ; i < phys_pg_pack->npages ; i++) {
1957
1958                 bar_address = hdev->dram_pci_bar_start +
1959                                                 (phys_pg_pack->pages[i] -
1960                                                 prop->dram_base_address);
1961
1962                 if (bar_address + phys_pg_pack->page_size >
1963                         hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
1964                         bar_address + phys_pg_pack->page_size < bar_address) {
1965
1966                         dev_dbg(hdev->dev,
1967                                 "DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
1968                                 phys_pg_pack->pages[i],
1969                                 phys_pg_pack->page_size);
1970
1971                         rc = -EINVAL;
1972                         goto err_dec_exporting_cnt;
1973                 }
1974         }
1975
1976         hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
1977         if (!hl_dmabuf) {
1978                 rc = -ENOMEM;
1979                 goto err_dec_exporting_cnt;
1980         }
1981
1982         hl_dmabuf->phys_pg_pack = phys_pg_pack;
1983
1984         rc = export_dmabuf_common(ctx, hl_dmabuf, phys_pg_pack->total_size,
1985                                 flags, dmabuf_fd);
1986         if (rc)
1987                 goto err_free_dmabuf_wrapper;
1988
1989         return 0;
1990
1991 err_free_dmabuf_wrapper:
1992         kfree(hl_dmabuf);
1993
1994 err_dec_exporting_cnt:
1995         spin_lock(&vm->idr_lock);
1996         phys_pg_pack->exporting_cnt--;
1997         spin_unlock(&vm->idr_lock);
1998
1999         return rc;
2000 }
2001
2002 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
2003 {
2004         struct hl_device *hdev = hpriv->hdev;
2005         u64 block_handle, device_addr = 0;
2006         struct hl_ctx *ctx = hpriv->ctx;
2007         u32 handle = 0, block_size;
2008         int rc;
2009
2010         switch (args->in.op) {
2011         case HL_MEM_OP_ALLOC:
2012                 if (args->in.alloc.mem_size == 0) {
2013                         dev_err(hdev->dev, "alloc size must be larger than 0\n");
2014                         rc = -EINVAL;
2015                         goto out;
2016                 }
2017
2018                 /* Force contiguous as there are no real MMU
2019                  * translations to overcome physical memory gaps
2020                  */
2021                 args->in.flags |= HL_MEM_CONTIGUOUS;
2022                 rc = alloc_device_memory(ctx, &args->in, &handle);
2023
2024                 memset(args, 0, sizeof(*args));
2025                 args->out.handle = (__u64) handle;
2026                 break;
2027
2028         case HL_MEM_OP_FREE:
2029                 rc = free_device_memory(ctx, &args->in);
2030                 break;
2031
2032         case HL_MEM_OP_MAP:
2033                 if (args->in.flags & HL_MEM_USERPTR) {
2034                         dev_err(hdev->dev, "Failed to map host memory when MMU is disabled\n");
2035                         rc = -EPERM;
2036                 } else {
2037                         rc = get_paddr_from_handle(ctx, &args->in, &device_addr);
2038                         memset(args, 0, sizeof(*args));
2039                         args->out.device_virt_addr = device_addr;
2040                 }
2041
2042                 break;
2043
2044         case HL_MEM_OP_UNMAP:
2045                 rc = 0;
2046                 break;
2047
2048         case HL_MEM_OP_MAP_BLOCK:
2049                 rc = map_block(hdev, args->in.map_block.block_addr, &block_handle, &block_size);
2050                 args->out.block_handle = block_handle;
2051                 args->out.block_size = block_size;
2052                 break;
2053
2054         case HL_MEM_OP_EXPORT_DMABUF_FD:
2055                 dev_err(hdev->dev, "Failed to export dma-buf object when MMU is disabled\n");
2056                 rc = -EPERM;
2057                 break;
2058
2059         case HL_MEM_OP_TS_ALLOC:
2060                 rc = allocate_timestamps_buffers(hpriv, &args->in, &args->out.handle);
2061                 break;
2062         default:
2063                 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
2064                 rc = -EINVAL;
2065                 break;
2066         }
2067
2068 out:
2069         return rc;
2070 }
2071
2072 static void ts_buff_release(struct hl_mmap_mem_buf *buf)
2073 {
2074         struct hl_ts_buff *ts_buff = buf->private;
2075
2076         vfree(ts_buff->kernel_buff_address);
2077         vfree(ts_buff->user_buff_address);
2078         kfree(ts_buff);
2079 }
2080
2081 static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args)
2082 {
2083         struct hl_ts_buff *ts_buff = buf->private;
2084
2085         vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE);
2086         return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
2087 }
2088
2089 static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
2090 {
2091         struct hl_ts_buff *ts_buff = NULL;
2092         u32 size, num_elements;
2093         void *p;
2094
2095         num_elements = *(u32 *)args;
2096
2097         ts_buff = kzalloc(sizeof(*ts_buff), GFP_KERNEL);
2098         if (!ts_buff)
2099                 return -ENOMEM;
2100
2101         /* Allocate the user buffer */
2102         size = num_elements * sizeof(u64);
2103         p = vmalloc_user(size);
2104         if (!p)
2105                 goto free_mem;
2106
2107         ts_buff->user_buff_address = p;
2108         buf->mappable_size = size;
2109
2110         /* Allocate the internal kernel buffer */
2111         size = num_elements * sizeof(struct hl_user_pending_interrupt);
2112         p = vzalloc(size);
2113         if (!p)
2114                 goto free_user_buff;
2115
2116         ts_buff->kernel_buff_address = p;
2117         ts_buff->kernel_buff_size = size;
2118
2119         buf->private = ts_buff;
2120
2121         return 0;
2122
2123 free_user_buff:
2124         vfree(ts_buff->user_buff_address);
2125 free_mem:
2126         kfree(ts_buff);
2127         return -ENOMEM;
2128 }
2129
2130 static struct hl_mmap_mem_buf_behavior hl_ts_behavior = {
2131         .topic = "TS",
2132         .mem_id = HL_MMAP_TYPE_TS_BUFF,
2133         .mmap = hl_ts_mmap,
2134         .alloc = hl_ts_alloc_buf,
2135         .release = ts_buff_release,
2136 };
2137
2138 /**
2139  * allocate_timestamps_buffers() - allocate timestamps buffers
2140  * This function will allocate ts buffer that will later on be mapped to the user
2141  * in order to be able to read the timestamp.
2142  * in additon it'll allocate an extra buffer for registration management.
2143  * since we cannot fail during registration for out-of-memory situation, so
2144  * we'll prepare a pool which will be used as user interrupt nodes and instead
2145  * of dynamically allocating nodes while registration we'll pick the node from
2146  * this pool. in addtion it'll add node to the mapping hash which will be used
2147  * to map user ts buffer to the internal kernel ts buffer.
2148  * @hpriv: pointer to the private data of the fd
2149  * @args: ioctl input
2150  * @handle: user timestamp buffer handle as an output
2151  */
2152 static int allocate_timestamps_buffers(struct hl_fpriv *hpriv, struct hl_mem_in *args, u64 *handle)
2153 {
2154         struct hl_mem_mgr *mmg = &hpriv->mem_mgr;
2155         struct hl_mmap_mem_buf *buf;
2156
2157         if (args->num_of_elements > TS_MAX_ELEMENTS_NUM) {
2158                 dev_err(mmg->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n",
2159                                 args->num_of_elements, TS_MAX_ELEMENTS_NUM);
2160                 return -EINVAL;
2161         }
2162
2163         buf = hl_mmap_mem_buf_alloc(mmg, &hl_ts_behavior, GFP_KERNEL, &args->num_of_elements);
2164         if (!buf)
2165                 return -ENOMEM;
2166
2167         *handle = buf->handle;
2168
2169         return 0;
2170 }
2171
2172 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
2173 {
2174         enum hl_device_status status;
2175         union hl_mem_args *args = data;
2176         struct hl_device *hdev = hpriv->hdev;
2177         struct hl_ctx *ctx = hpriv->ctx;
2178         u64 block_handle, device_addr = 0;
2179         u32 handle = 0, block_size;
2180         int rc, dmabuf_fd = -EBADF;
2181
2182         if (!hl_device_operational(hdev, &status)) {
2183                 dev_warn_ratelimited(hdev->dev,
2184                         "Device is %s. Can't execute MEMORY IOCTL\n",
2185                         hdev->status[status]);
2186                 return -EBUSY;
2187         }
2188
2189         if (!hdev->mmu_enable)
2190                 return mem_ioctl_no_mmu(hpriv, args);
2191
2192         switch (args->in.op) {
2193         case HL_MEM_OP_ALLOC:
2194                 if (args->in.alloc.mem_size == 0) {
2195                         dev_err(hdev->dev,
2196                                 "alloc size must be larger than 0\n");
2197                         rc = -EINVAL;
2198                         goto out;
2199                 }
2200
2201                 /* If DRAM does not support virtual memory the driver won't
2202                  * handle the allocation/freeing of that memory. However, for
2203                  * system administration/monitoring purposes, the driver will
2204                  * keep track of the amount of DRAM memory that is allocated
2205                  * and freed by the user. Because this code totally relies on
2206                  * the user's input, the driver can't ensure the validity
2207                  * of this accounting.
2208                  */
2209                 if (!hdev->asic_prop.dram_supports_virtual_memory) {
2210                         atomic64_add(args->in.alloc.mem_size,
2211                                         &ctx->dram_phys_mem);
2212                         atomic64_add(args->in.alloc.mem_size,
2213                                         &hdev->dram_used_mem);
2214
2215                         dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
2216                         rc = 0;
2217
2218                         memset(args, 0, sizeof(*args));
2219                         args->out.handle = 0;
2220                         goto out;
2221                 }
2222
2223                 rc = alloc_device_memory(ctx, &args->in, &handle);
2224
2225                 memset(args, 0, sizeof(*args));
2226                 args->out.handle = (__u64) handle;
2227                 break;
2228
2229         case HL_MEM_OP_FREE:
2230                 /* If DRAM does not support virtual memory the driver won't
2231                  * handle the allocation/freeing of that memory. However, for
2232                  * system administration/monitoring purposes, the driver will
2233                  * keep track of the amount of DRAM memory that is allocated
2234                  * and freed by the user. Because this code totally relies on
2235                  * the user's input, the driver can't ensure the validity
2236                  * of this accounting.
2237                  */
2238                 if (!hdev->asic_prop.dram_supports_virtual_memory) {
2239                         atomic64_sub(args->in.alloc.mem_size,
2240                                         &ctx->dram_phys_mem);
2241                         atomic64_sub(args->in.alloc.mem_size,
2242                                         &hdev->dram_used_mem);
2243
2244                         dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
2245                         rc = 0;
2246
2247                         goto out;
2248                 }
2249
2250                 rc = free_device_memory(ctx, &args->in);
2251                 break;
2252
2253         case HL_MEM_OP_MAP:
2254                 rc = map_device_va(ctx, &args->in, &device_addr);
2255
2256                 memset(args, 0, sizeof(*args));
2257                 args->out.device_virt_addr = device_addr;
2258                 break;
2259
2260         case HL_MEM_OP_UNMAP:
2261                 rc = unmap_device_va(ctx, &args->in, false);
2262                 break;
2263
2264         case HL_MEM_OP_MAP_BLOCK:
2265                 rc = map_block(hdev, args->in.map_block.block_addr,
2266                                 &block_handle, &block_size);
2267                 args->out.block_handle = block_handle;
2268                 args->out.block_size = block_size;
2269                 break;
2270
2271         case HL_MEM_OP_EXPORT_DMABUF_FD:
2272                 if (hdev->asic_prop.dram_supports_virtual_memory)
2273                         rc = export_dmabuf_from_handle(ctx,
2274                                         args->in.export_dmabuf_fd.handle,
2275                                         args->in.flags,
2276                                         &dmabuf_fd);
2277                 else
2278                         rc = export_dmabuf_from_addr(ctx,
2279                                         args->in.export_dmabuf_fd.handle,
2280                                         args->in.export_dmabuf_fd.mem_size,
2281                                         args->in.flags,
2282                                         &dmabuf_fd);
2283                 memset(args, 0, sizeof(*args));
2284                 args->out.fd = dmabuf_fd;
2285                 break;
2286
2287         case HL_MEM_OP_TS_ALLOC:
2288                 rc = allocate_timestamps_buffers(hpriv, &args->in, &args->out.handle);
2289                 break;
2290         default:
2291                 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
2292                 rc = -EINVAL;
2293                 break;
2294         }
2295
2296 out:
2297         return rc;
2298 }
2299
2300 static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
2301                                 u32 npages, u64 start, u32 offset,
2302                                 struct hl_userptr *userptr)
2303 {
2304         int rc;
2305
2306         if (!access_ok((void __user *) (uintptr_t) addr, size)) {
2307                 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
2308                 return -EFAULT;
2309         }
2310
2311         userptr->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
2312         if (!userptr->pages)
2313                 return -ENOMEM;
2314
2315         rc = pin_user_pages_fast(start, npages, FOLL_WRITE | FOLL_LONGTERM,
2316                                  userptr->pages);
2317
2318         if (rc != npages) {
2319                 dev_err(hdev->dev,
2320                         "Failed (%d) to pin host memory with user ptr 0x%llx, size 0x%llx, npages %d\n",
2321                         rc, addr, size, npages);
2322                 if (rc < 0)
2323                         goto destroy_pages;
2324                 npages = rc;
2325                 rc = -EFAULT;
2326                 goto put_pages;
2327         }
2328         userptr->npages = npages;
2329
2330         rc = sg_alloc_table_from_pages(userptr->sgt,
2331                                        userptr->pages,
2332                                        npages, offset, size, GFP_KERNEL);
2333         if (rc < 0) {
2334                 dev_err(hdev->dev, "failed to create SG table from pages\n");
2335                 goto put_pages;
2336         }
2337
2338         return 0;
2339
2340 put_pages:
2341         unpin_user_pages(userptr->pages, npages);
2342 destroy_pages:
2343         kvfree(userptr->pages);
2344         return rc;
2345 }
2346
2347 /**
2348  * hl_pin_host_memory() - pins a chunk of host memory.
2349  * @hdev: pointer to the habanalabs device structure.
2350  * @addr: the host virtual address of the memory area.
2351  * @size: the size of the memory area.
2352  * @userptr: pointer to hl_userptr structure.
2353  *
2354  * This function does the following:
2355  * - Pins the physical pages.
2356  * - Create an SG list from those pages.
2357  */
2358 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
2359                                         struct hl_userptr *userptr)
2360 {
2361         u64 start, end;
2362         u32 npages, offset;
2363         int rc;
2364
2365         if (!size) {
2366                 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
2367                 return -EINVAL;
2368         }
2369
2370         /*
2371          * If the combination of the address and size requested for this memory
2372          * region causes an integer overflow, return error.
2373          */
2374         if (((addr + size) < addr) ||
2375                         PAGE_ALIGN(addr + size) < (addr + size)) {
2376                 dev_err(hdev->dev,
2377                         "user pointer 0x%llx + %llu causes integer overflow\n",
2378                         addr, size);
2379                 return -EINVAL;
2380         }
2381
2382         userptr->pid = current->pid;
2383         userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
2384         if (!userptr->sgt)
2385                 return -ENOMEM;
2386
2387         start = addr & PAGE_MASK;
2388         offset = addr & ~PAGE_MASK;
2389         end = PAGE_ALIGN(addr + size);
2390         npages = (end - start) >> PAGE_SHIFT;
2391
2392         userptr->size = size;
2393         userptr->addr = addr;
2394         userptr->dma_mapped = false;
2395         INIT_LIST_HEAD(&userptr->job_node);
2396
2397         rc = get_user_memory(hdev, addr, size, npages, start, offset,
2398                                 userptr);
2399         if (rc) {
2400                 dev_err(hdev->dev,
2401                         "failed to get user memory for address 0x%llx\n",
2402                         addr);
2403                 goto free_sgt;
2404         }
2405
2406         hl_debugfs_add_userptr(hdev, userptr);
2407
2408         return 0;
2409
2410 free_sgt:
2411         kfree(userptr->sgt);
2412         return rc;
2413 }
2414
2415 /*
2416  * hl_unpin_host_memory - unpins a chunk of host memory.
2417  * @hdev: pointer to the habanalabs device structure
2418  * @userptr: pointer to hl_userptr structure
2419  *
2420  * This function does the following:
2421  * - Unpins the physical pages related to the host memory
2422  * - Free the SG list
2423  */
2424 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
2425 {
2426         hl_debugfs_remove_userptr(hdev, userptr);
2427
2428         if (userptr->dma_mapped)
2429                 hdev->asic_funcs->hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
2430
2431         unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
2432         kvfree(userptr->pages);
2433
2434         list_del(&userptr->job_node);
2435
2436         sg_free_table(userptr->sgt);
2437         kfree(userptr->sgt);
2438 }
2439
2440 /**
2441  * hl_userptr_delete_list() - clear userptr list.
2442  * @hdev: pointer to the habanalabs device structure.
2443  * @userptr_list: pointer to the list to clear.
2444  *
2445  * This function does the following:
2446  * - Iterates over the list and unpins the host memory and frees the userptr
2447  *   structure.
2448  */
2449 void hl_userptr_delete_list(struct hl_device *hdev,
2450                                 struct list_head *userptr_list)
2451 {
2452         struct hl_userptr *userptr, *tmp;
2453
2454         list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
2455                 hl_unpin_host_memory(hdev, userptr);
2456                 kfree(userptr);
2457         }
2458
2459         INIT_LIST_HEAD(userptr_list);
2460 }
2461
2462 /**
2463  * hl_userptr_is_pinned() - returns whether the given userptr is pinned.
2464  * @hdev: pointer to the habanalabs device structure.
2465  * @addr: user address to check.
2466  * @size: user block size to check.
2467  * @userptr_list: pointer to the list to clear.
2468  * @userptr: pointer to userptr to check.
2469  *
2470  * This function does the following:
2471  * - Iterates over the list and checks if the given userptr is in it, means is
2472  *   pinned. If so, returns true, otherwise returns false.
2473  */
2474 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
2475                                 u32 size, struct list_head *userptr_list,
2476                                 struct hl_userptr **userptr)
2477 {
2478         list_for_each_entry((*userptr), userptr_list, job_node) {
2479                 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
2480                         return true;
2481         }
2482
2483         return false;
2484 }
2485
2486 /**
2487  * va_range_init() - initialize virtual addresses range.
2488  * @hdev: pointer to the habanalabs device structure.
2489  * @va_ranges: pointer to va_ranges array.
2490  * @range_type: virtual address range type.
2491  * @start: range start address, inclusive.
2492  * @end: range end address, inclusive.
2493  * @page_size: page size for this va_range.
2494  *
2495  * This function does the following:
2496  * - Initializes the virtual addresses list of the given range with the given
2497  *   addresses.
2498  */
2499 static int va_range_init(struct hl_device *hdev, struct hl_va_range **va_ranges,
2500                                 enum hl_va_range_type range_type, u64 start,
2501                                 u64 end, u32 page_size)
2502 {
2503         struct hl_va_range *va_range = va_ranges[range_type];
2504         int rc;
2505
2506         INIT_LIST_HEAD(&va_range->list);
2507
2508         /*
2509          * PAGE_SIZE alignment
2510          * it is the caller's responsibility to align the addresses if the
2511          * page size is not a power of 2
2512          */
2513
2514         if (is_power_of_2(page_size)) {
2515                 start = round_up(start, page_size);
2516
2517                 /*
2518                  * The end of the range is inclusive, hence we need to align it
2519                  * to the end of the last full page in the range. For example if
2520                  * end = 0x3ff5 with page size 0x1000, we need to align it to
2521                  * 0x2fff. The remaining 0xff5 bytes do not form a full page.
2522                  */
2523                 end = round_down(end + 1, page_size) - 1;
2524         }
2525
2526         if (start >= end) {
2527                 dev_err(hdev->dev, "too small vm range for va list\n");
2528                 return -EFAULT;
2529         }
2530
2531         rc = add_va_block(hdev, va_range, start, end);
2532
2533         if (rc) {
2534                 dev_err(hdev->dev, "Failed to init host va list\n");
2535                 return rc;
2536         }
2537
2538         va_range->start_addr = start;
2539         va_range->end_addr = end;
2540         va_range->page_size = page_size;
2541
2542         return 0;
2543 }
2544
2545 /**
2546  * va_range_fini() - clear a virtual addresses range.
2547  * @hdev: pointer to the habanalabs structure.
2548  * @va_range: pointer to virtual addresses range.
2549  *
2550  * This function does the following:
2551  * - Frees the virtual addresses block list and its lock.
2552  */
2553 static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
2554 {
2555         mutex_lock(&va_range->lock);
2556         clear_va_list_locked(hdev, &va_range->list);
2557         mutex_unlock(&va_range->lock);
2558
2559         mutex_destroy(&va_range->lock);
2560         kfree(va_range);
2561 }
2562
2563 /**
2564  * vm_ctx_init_with_ranges() - initialize virtual memory for context.
2565  * @ctx: pointer to the habanalabs context structure.
2566  * @host_range_start: host virtual addresses range start.
2567  * @host_range_end: host virtual addresses range end.
2568  * @host_page_size: host page size.
2569  * @host_huge_range_start: host virtual addresses range start for memory
2570  *                         allocated with huge pages.
2571  * @host_huge_range_end: host virtual addresses range end for memory allocated
2572  *                        with huge pages.
2573  * @host_huge_page_size: host huge page size.
2574  * @dram_range_start: dram virtual addresses range start.
2575  * @dram_range_end: dram virtual addresses range end.
2576  * @dram_page_size: dram page size.
2577  *
2578  * This function initializes the following:
2579  * - MMU for context.
2580  * - Virtual address to area descriptor hashtable.
2581  * - Virtual block list of available virtual memory.
2582  */
2583 static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
2584                                         u64 host_range_start,
2585                                         u64 host_range_end,
2586                                         u32 host_page_size,
2587                                         u64 host_huge_range_start,
2588                                         u64 host_huge_range_end,
2589                                         u32 host_huge_page_size,
2590                                         u64 dram_range_start,
2591                                         u64 dram_range_end,
2592                                         u32 dram_page_size)
2593 {
2594         struct hl_device *hdev = ctx->hdev;
2595         int i, rc;
2596
2597         for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) {
2598                 ctx->va_range[i] =
2599                         kzalloc(sizeof(struct hl_va_range), GFP_KERNEL);
2600                 if (!ctx->va_range[i]) {
2601                         rc = -ENOMEM;
2602                         goto free_va_range;
2603                 }
2604         }
2605
2606         rc = hl_mmu_ctx_init(ctx);
2607         if (rc) {
2608                 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
2609                 goto free_va_range;
2610         }
2611
2612         mutex_init(&ctx->mem_hash_lock);
2613         hash_init(ctx->mem_hash);
2614
2615         mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2616
2617         rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_HOST,
2618                         host_range_start, host_range_end, host_page_size);
2619         if (rc) {
2620                 dev_err(hdev->dev, "failed to init host vm range\n");
2621                 goto mmu_ctx_fini;
2622         }
2623
2624         if (hdev->pmmu_huge_range) {
2625                 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2626
2627                 rc = va_range_init(hdev,
2628                         ctx->va_range, HL_VA_RANGE_TYPE_HOST_HUGE,
2629                         host_huge_range_start, host_huge_range_end,
2630                         host_huge_page_size);
2631                 if (rc) {
2632                         dev_err(hdev->dev,
2633                                 "failed to init host huge vm range\n");
2634                         goto clear_host_va_range;
2635                 }
2636         } else {
2637                 kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2638                 ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] =
2639                                 ctx->va_range[HL_VA_RANGE_TYPE_HOST];
2640         }
2641
2642         mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
2643
2644         rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_DRAM,
2645                         dram_range_start, dram_range_end, dram_page_size);
2646         if (rc) {
2647                 dev_err(hdev->dev, "failed to init dram vm range\n");
2648                 goto clear_host_huge_va_range;
2649         }
2650
2651         hl_debugfs_add_ctx_mem_hash(hdev, ctx);
2652
2653         return 0;
2654
2655 clear_host_huge_va_range:
2656         mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
2657
2658         if (hdev->pmmu_huge_range) {
2659                 mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2660                 clear_va_list_locked(hdev,
2661                         &ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list);
2662                 mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2663         }
2664 clear_host_va_range:
2665         if (hdev->pmmu_huge_range)
2666                 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2667         mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2668         clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list);
2669         mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2670 mmu_ctx_fini:
2671         mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2672         mutex_destroy(&ctx->mem_hash_lock);
2673         hl_mmu_ctx_fini(ctx);
2674 free_va_range:
2675         for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++)
2676                 kfree(ctx->va_range[i]);
2677
2678         return rc;
2679 }
2680
2681 int hl_vm_ctx_init(struct hl_ctx *ctx)
2682 {
2683         struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
2684         u64 host_range_start, host_range_end, host_huge_range_start,
2685                 host_huge_range_end, dram_range_start, dram_range_end;
2686         u32 host_page_size, host_huge_page_size, dram_page_size;
2687
2688         atomic64_set(&ctx->dram_phys_mem, 0);
2689
2690         /*
2691          * - If MMU is enabled, init the ranges as usual.
2692          * - If MMU is disabled, in case of host mapping, the returned address
2693          *   is the given one.
2694          *   In case of DRAM mapping, the returned address is the physical
2695          *   address of the memory related to the given handle.
2696          */
2697         if (!ctx->hdev->mmu_enable)
2698                 return 0;
2699
2700         dram_range_start = prop->dmmu.start_addr;
2701         dram_range_end = prop->dmmu.end_addr - 1;
2702         dram_page_size = prop->dram_page_size ?
2703                                 prop->dram_page_size : prop->dmmu.page_size;
2704         host_range_start = prop->pmmu.start_addr;
2705         host_range_end = prop->pmmu.end_addr - 1;
2706         host_page_size = prop->pmmu.page_size;
2707         host_huge_range_start = prop->pmmu_huge.start_addr;
2708         host_huge_range_end = prop->pmmu_huge.end_addr - 1;
2709         host_huge_page_size = prop->pmmu_huge.page_size;
2710
2711         return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
2712                         host_page_size, host_huge_range_start,
2713                         host_huge_range_end, host_huge_page_size,
2714                         dram_range_start, dram_range_end, dram_page_size);
2715 }
2716
2717 /**
2718  * hl_vm_ctx_fini() - virtual memory teardown of context.
2719  * @ctx: pointer to the habanalabs context structure.
2720  *
2721  * This function perform teardown the following:
2722  * - Virtual block list of available virtual memory.
2723  * - Virtual address to area descriptor hashtable.
2724  * - MMU for context.
2725  *
2726  * In addition this function does the following:
2727  * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
2728  *   hashtable should be empty as no valid mappings should exist at this
2729  *   point.
2730  * - Frees any existing physical page list from the idr which relates to the
2731  *   current context asid.
2732  * - This function checks the virtual block list for correctness. At this point
2733  *   the list should contain one element which describes the whole virtual
2734  *   memory range of the context. Otherwise, a warning is printed.
2735  */
2736 void hl_vm_ctx_fini(struct hl_ctx *ctx)
2737 {
2738         struct hl_vm_phys_pg_pack *phys_pg_list, *tmp_phys_node;
2739         struct hl_device *hdev = ctx->hdev;
2740         struct hl_vm_hash_node *hnode;
2741         struct hl_vm *vm = &hdev->vm;
2742         struct hlist_node *tmp_node;
2743         struct list_head free_list;
2744         struct hl_mem_in args;
2745         int i;
2746
2747         if (!hdev->mmu_enable)
2748                 return;
2749
2750         hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
2751
2752         /*
2753          * Clearly something went wrong on hard reset so no point in printing
2754          * another side effect error
2755          */
2756         if (!hdev->reset_info.hard_reset_pending && !hash_empty(ctx->mem_hash))
2757                 dev_dbg(hdev->dev,
2758                         "user released device without removing its memory mappings\n");
2759
2760         hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
2761                 dev_dbg(hdev->dev,
2762                         "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
2763                         hnode->vaddr, ctx->asid);
2764                 args.unmap.device_virt_addr = hnode->vaddr;
2765                 unmap_device_va(ctx, &args, true);
2766         }
2767
2768         mutex_lock(&hdev->mmu_lock);
2769
2770         /* invalidate the cache once after the unmapping loop */
2771         hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
2772         hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK);
2773
2774         mutex_unlock(&hdev->mmu_lock);
2775
2776         INIT_LIST_HEAD(&free_list);
2777
2778         spin_lock(&vm->idr_lock);
2779         idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
2780                 if (phys_pg_list->asid == ctx->asid) {
2781                         dev_dbg(hdev->dev,
2782                                 "page list 0x%px of asid %d is still alive\n",
2783                                 phys_pg_list, ctx->asid);
2784
2785                         atomic64_sub(phys_pg_list->total_size, &hdev->dram_used_mem);
2786                         idr_remove(&vm->phys_pg_pack_handles, i);
2787                         list_add(&phys_pg_list->node, &free_list);
2788                 }
2789         spin_unlock(&vm->idr_lock);
2790
2791         list_for_each_entry_safe(phys_pg_list, tmp_phys_node, &free_list, node)
2792                 free_phys_pg_pack(hdev, phys_pg_list);
2793
2794         va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
2795         va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
2796
2797         if (hdev->pmmu_huge_range)
2798                 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2799
2800         mutex_destroy(&ctx->mem_hash_lock);
2801         hl_mmu_ctx_fini(ctx);
2802
2803         /* In this case we need to clear the global accounting of DRAM usage
2804          * because the user notifies us on allocations. If the user is no more,
2805          * all DRAM is available
2806          */
2807         if (ctx->asid != HL_KERNEL_ASID_ID &&
2808                         !hdev->asic_prop.dram_supports_virtual_memory)
2809                 atomic64_set(&hdev->dram_used_mem, 0);
2810 }
2811
2812 /**
2813  * hl_vm_init() - initialize virtual memory module.
2814  * @hdev: pointer to the habanalabs device structure.
2815  *
2816  * This function initializes the following:
2817  * - MMU module.
2818  * - DRAM physical pages pool of 2MB.
2819  * - Idr for device memory allocation handles.
2820  */
2821 int hl_vm_init(struct hl_device *hdev)
2822 {
2823         struct asic_fixed_properties *prop = &hdev->asic_prop;
2824         struct hl_vm *vm = &hdev->vm;
2825         int rc;
2826
2827         if (is_power_of_2(prop->dram_page_size))
2828                 vm->dram_pg_pool =
2829                         gen_pool_create(__ffs(prop->dram_page_size), -1);
2830         else
2831                 vm->dram_pg_pool =
2832                         gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1);
2833
2834         if (!vm->dram_pg_pool) {
2835                 dev_err(hdev->dev, "Failed to create dram page pool\n");
2836                 return -ENOMEM;
2837         }
2838
2839         kref_init(&vm->dram_pg_pool_refcount);
2840
2841         rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
2842                         prop->dram_end_address - prop->dram_user_base_address,
2843                         -1);
2844
2845         if (rc) {
2846                 dev_err(hdev->dev,
2847                         "Failed to add memory to dram page pool %d\n", rc);
2848                 goto pool_add_err;
2849         }
2850
2851         spin_lock_init(&vm->idr_lock);
2852         idr_init(&vm->phys_pg_pack_handles);
2853
2854         atomic64_set(&hdev->dram_used_mem, 0);
2855
2856         vm->init_done = true;
2857
2858         return 0;
2859
2860 pool_add_err:
2861         gen_pool_destroy(vm->dram_pg_pool);
2862
2863         return rc;
2864 }
2865
2866 /**
2867  * hl_vm_fini() - virtual memory module teardown.
2868  * @hdev: pointer to the habanalabs device structure.
2869  *
2870  * This function perform teardown to the following:
2871  * - Idr for device memory allocation handles.
2872  * - DRAM physical pages pool of 2MB.
2873  * - MMU module.
2874  */
2875 void hl_vm_fini(struct hl_device *hdev)
2876 {
2877         struct hl_vm *vm = &hdev->vm;
2878
2879         if (!vm->init_done)
2880                 return;
2881
2882         /*
2883          * At this point all the contexts should be freed and hence no DRAM
2884          * memory should be in use. Hence the DRAM pool should be freed here.
2885          */
2886         if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
2887                 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
2888                                 __func__);
2889
2890         vm->init_done = false;
2891 }
2892
2893 /**
2894  * hl_hw_block_mem_init() - HW block memory initialization.
2895  * @ctx: pointer to the habanalabs context structure.
2896  *
2897  * This function initializes the HW block virtual mapped addresses list and
2898  * it's lock.
2899  */
2900 void hl_hw_block_mem_init(struct hl_ctx *ctx)
2901 {
2902         mutex_init(&ctx->hw_block_list_lock);
2903         INIT_LIST_HEAD(&ctx->hw_block_mem_list);
2904 }
2905
2906 /**
2907  * hl_hw_block_mem_fini() - HW block memory teardown.
2908  * @ctx: pointer to the habanalabs context structure.
2909  *
2910  * This function clears the HW block virtual mapped addresses list and destroys
2911  * it's lock.
2912  */
2913 void hl_hw_block_mem_fini(struct hl_ctx *ctx)
2914 {
2915         struct hl_vm_hw_block_list_node *lnode, *tmp;
2916
2917         if (!list_empty(&ctx->hw_block_mem_list))
2918                 dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n");
2919
2920         list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) {
2921                 list_del(&lnode->node);
2922                 kfree(lnode);
2923         }
2924
2925         mutex_destroy(&ctx->hw_block_list_lock);
2926 }