Commit | Line | Data |
---|---|---|
2251334d BM |
1 | /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ |
2 | ||
3 | /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ | |
4 | /* Copyright (c) 2008-2019, IBM Corporation */ | |
5 | ||
6 | #ifndef _SIW_MEM_H | |
7 | #define _SIW_MEM_H | |
8 | ||
9 | struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable); | |
10 | void siw_umem_release(struct siw_umem *umem, bool dirty); | |
11 | struct siw_pbl *siw_pbl_alloc(u32 num_buf); | |
12 | u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); | |
13 | struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index); | |
14 | int siw_mem_add(struct siw_device *sdev, struct siw_mem *m); | |
15 | int siw_invalidate_stag(struct ib_pd *pd, u32 stag); | |
16 | int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr, | |
17 | enum ib_access_flags perms, int len); | |
18 | int siw_check_sge(struct ib_pd *pd, struct siw_sge *sge, | |
19 | struct siw_mem *mem[], enum ib_access_flags perms, | |
20 | u32 off, int len); | |
21 | void siw_wqe_put_mem(struct siw_wqe *wqe, enum siw_opcode op); | |
22 | int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj, | |
23 | u64 start, u64 len, int rights); | |
24 | void siw_mr_drop_mem(struct siw_mr *mr); | |
25 | void siw_free_mem(struct kref *ref); | |
26 | ||
27 | static inline void siw_mem_put(struct siw_mem *mem) | |
28 | { | |
29 | kref_put(&mem->ref, siw_free_mem); | |
30 | } | |
31 | ||
32 | static inline struct siw_mr *siw_mem2mr(struct siw_mem *m) | |
33 | { | |
34 | return container_of(m, struct siw_mr, mem); | |
35 | } | |
36 | ||
37 | static inline void siw_unref_mem_sgl(struct siw_mem **mem, unsigned int num_sge) | |
38 | { | |
39 | while (num_sge) { | |
40 | if (*mem == NULL) | |
41 | break; | |
42 | ||
43 | siw_mem_put(*mem); | |
44 | *mem = NULL; | |
45 | mem++; | |
46 | num_sge--; | |
47 | } | |
48 | } | |
49 | ||
50 | #define CHUNK_SHIFT 9 /* sets number of pages per chunk */ | |
51 | #define PAGES_PER_CHUNK (_AC(1, UL) << CHUNK_SHIFT) | |
52 | #define CHUNK_MASK (~(PAGES_PER_CHUNK - 1)) | |
53 | #define PAGE_CHUNK_SIZE (PAGES_PER_CHUNK * sizeof(struct page *)) | |
54 | ||
55 | /* | |
56 | * siw_get_upage() | |
57 | * | |
58 | * Get page pointer for address on given umem. | |
59 | * | |
60 | * @umem: two dimensional list of page pointers | |
61 | * @addr: user virtual address | |
62 | */ | |
63 | static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr) | |
64 | { | |
65 | unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT, | |
66 | chunk_idx = page_idx >> CHUNK_SHIFT, | |
67 | page_in_chunk = page_idx & ~CHUNK_MASK; | |
68 | ||
69 | if (likely(page_idx < umem->num_pages)) | |
70 | return umem->page_chunk[chunk_idx].plist[page_in_chunk]; | |
71 | ||
72 | return NULL; | |
73 | } | |
74 | #endif |