Commit | Line | Data |
---|---|---|
6bf9d8f6 | 1 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
8ada2c1c SR |
2 | /* |
3 | * Copyright (c) 2014 Mellanox Technologies. All rights reserved. | |
8ada2c1c SR |
4 | */ |
5 | ||
6 | #ifndef IB_UMEM_ODP_H | |
7 | #define IB_UMEM_ODP_H | |
8 | ||
9 | #include <rdma/ib_umem.h> | |
882214e2 | 10 | #include <rdma/ib_verbs.h> |
882214e2 | 11 | |
8ada2c1c | 12 | struct ib_umem_odp { |
41b4deea | 13 | struct ib_umem umem; |
f25a546e JG |
14 | struct mmu_interval_notifier notifier; |
15 | struct pid *tgid; | |
c9990ab3 | 16 | |
36f30e48 YH |
17 | /* An array of the pfns included in the on-demand paging umem. */ |
18 | unsigned long *pfn_list; | |
19 | ||
8ada2c1c | 20 | /* |
36f30e48 YH |
21 | * An array with DMA addresses mapped for pfns in pfn_list. |
22 | * The lower two bits designate access permissions. | |
23 | * See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT. | |
8ada2c1c SR |
24 | */ |
25 | dma_addr_t *dma_list; | |
26 | /* | |
27 | * The umem_mutex protects the page_list and dma_list fields of an ODP | |
882214e2 HE |
28 | * umem, allowing only a single thread to map/unmap pages. The mutex |
29 | * also protects access to the mmu notifier counters. | |
8ada2c1c SR |
30 | */ |
31 | struct mutex umem_mutex; | |
32 | void *private; /* for the HW driver to use. */ | |
882214e2 | 33 | |
d10bcf94 | 34 | int npages; |
882214e2 | 35 | |
fd7dbf03 JG |
36 | /* |
37 | * An implicit odp umem cannot be DMA mapped, has 0 length, and serves | |
38 | * only as an anchor for the driver to hold onto the per_mm. FIXME: | |
39 | * This should be removed and drivers should work with the per_mm | |
40 | * directly. | |
41 | */ | |
42 | bool is_implicit_odp; | |
43 | ||
d2183c6f | 44 | unsigned int page_shift; |
8ada2c1c SR |
45 | }; |
46 | ||
b5231b01 JG |
47 | static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) |
48 | { | |
41b4deea | 49 | return container_of(umem, struct ib_umem_odp, umem); |
b5231b01 JG |
50 | } |
51 | ||
d2183c6f JG |
52 | /* Returns the first page of an ODP umem. */ |
53 | static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp) | |
54 | { | |
f25a546e | 55 | return umem_odp->notifier.interval_tree.start; |
d2183c6f JG |
56 | } |
57 | ||
58 | /* Returns the address of the page after the last one of an ODP umem. */ | |
59 | static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp) | |
60 | { | |
f25a546e | 61 | return umem_odp->notifier.interval_tree.last + 1; |
d2183c6f JG |
62 | } |
63 | ||
64 | static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp) | |
65 | { | |
66 | return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >> | |
67 | umem_odp->page_shift; | |
68 | } | |
69 | ||
13859d5d LR |
70 | /* |
71 | * The lower 2 bits of the DMA address signal the R/W permissions for | |
72 | * the entry. To upgrade the permissions, provide the appropriate | |
73 | * bitmask to the map_dma_pages function. | |
74 | * | |
75 | * Be aware that upgrading a mapped address might result in change of | |
76 | * the DMA address for the page. | |
77 | */ | |
78 | #define ODP_READ_ALLOWED_BIT (1<<0ULL) | |
79 | #define ODP_WRITE_ALLOWED_BIT (1<<1ULL) | |
80 | ||
81 | #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) | |
82 | ||
8ada2c1c SR |
83 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
84 | ||
f25a546e | 85 | struct ib_umem_odp * |
c320e527 | 86 | ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size, |
f25a546e | 87 | int access, const struct mmu_interval_notifier_ops *ops); |
c320e527 | 88 | struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device, |
f20bef6a | 89 | int access); |
f25a546e JG |
90 | struct ib_umem_odp * |
91 | ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr, | |
92 | size_t size, | |
93 | const struct mmu_interval_notifier_ops *ops); | |
b5231b01 | 94 | void ib_umem_odp_release(struct ib_umem_odp *umem_odp); |
8ada2c1c | 95 | |
36f30e48 | 96 | int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 start_offset, |
8bfafde0 | 97 | u64 bcnt, u64 access_mask, bool fault); |
8ada2c1c | 98 | |
b5231b01 | 99 | void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, |
8ada2c1c SR |
100 | u64 bound); |
101 | ||
102 | #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ | |
103 | ||
f25a546e | 104 | static inline struct ib_umem_odp * |
c320e527 | 105 | ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size, |
f25a546e | 106 | int access, const struct mmu_interval_notifier_ops *ops) |
8ada2c1c | 107 | { |
261dc53f | 108 | return ERR_PTR(-EINVAL); |
8ada2c1c SR |
109 | } |
110 | ||
b5231b01 | 111 | static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {} |
8ada2c1c SR |
112 | |
113 | #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ | |
114 | ||
115 | #endif /* IB_UMEM_ODP_H */ |