Commit | Line | Data |
---|---|---|
29b24f6c GX |
1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* | |
5eb20ec3 | 3 | * Copyright (C) 2018 HUAWEI, Inc. |
592e7cd0 | 4 | * https://www.huawei.com/ |
5eb20ec3 | 5 | * Created by Gao Xiang <gaoxiang25@huawei.com> |
5eb20ec3 | 6 | */ |
57b78c9f GX |
7 | #ifndef __EROFS_FS_ZPVEC_H |
8 | #define __EROFS_FS_ZPVEC_H | |
5eb20ec3 | 9 | |
57b78c9f | 10 | #include "tagptr.h" |
5eb20ec3 | 11 | |
046d64e1 | 12 | /* page type in pagevec for decompress subsystem */ |
5eb20ec3 GX |
13 | enum z_erofs_page_type { |
14 | /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */ | |
15 | Z_EROFS_PAGE_TYPE_EXCLUSIVE, | |
16 | ||
17 | Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED, | |
18 | ||
19 | Z_EROFS_VLE_PAGE_TYPE_HEAD, | |
20 | Z_EROFS_VLE_PAGE_TYPE_MAX | |
21 | }; | |
22 | ||
23 | extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0") | |
24 | __bad_page_type_exclusive(void); | |
25 | ||
26 | /* pagevec tagged pointer */ | |
27 | typedef tagptr2_t erofs_vtptr_t; | |
28 | ||
29 | /* pagevec collector */ | |
30 | struct z_erofs_pagevec_ctor { | |
31 | struct page *curr, *next; | |
32 | erofs_vtptr_t *pages; | |
33 | ||
34 | unsigned int nr, index; | |
35 | }; | |
36 | ||
37 | static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor, | |
38 | bool atomic) | |
39 | { | |
561fb35a | 40 | if (!ctor->curr) |
5eb20ec3 GX |
41 | return; |
42 | ||
43 | if (atomic) | |
44 | kunmap_atomic(ctor->pages); | |
45 | else | |
46 | kunmap(ctor->curr); | |
47 | } | |
48 | ||
49 | static inline struct page * | |
50 | z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor, | |
e82a9a17 | 51 | unsigned int nr) |
5eb20ec3 | 52 | { |
e82a9a17 | 53 | unsigned int index; |
5eb20ec3 GX |
54 | |
55 | /* keep away from occupied pages */ | |
561fb35a | 56 | if (ctor->next) |
5eb20ec3 GX |
57 | return ctor->next; |
58 | ||
59 | for (index = 0; index < nr; ++index) { | |
60 | const erofs_vtptr_t t = ctor->pages[index]; | |
e82a9a17 | 61 | const unsigned int tags = tagptr_unfold_tags(t); |
5eb20ec3 GX |
62 | |
63 | if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE) | |
64 | return tagptr_unfold_ptr(t); | |
65 | } | |
3fb58b85 | 66 | DBG_BUGON(nr >= ctor->nr); |
5eb20ec3 GX |
67 | return NULL; |
68 | } | |
69 | ||
70 | static inline void | |
71 | z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor, | |
72 | bool atomic) | |
73 | { | |
74 | struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr); | |
75 | ||
76 | z_erofs_pagevec_ctor_exit(ctor, atomic); | |
77 | ||
78 | ctor->curr = next; | |
79 | ctor->next = NULL; | |
80 | ctor->pages = atomic ? | |
81 | kmap_atomic(ctor->curr) : kmap(ctor->curr); | |
82 | ||
83 | ctor->nr = PAGE_SIZE / sizeof(struct page *); | |
84 | ctor->index = 0; | |
85 | } | |
86 | ||
87 | static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor, | |
e82a9a17 PS |
88 | unsigned int nr, |
89 | erofs_vtptr_t *pages, | |
90 | unsigned int i) | |
5eb20ec3 GX |
91 | { |
92 | ctor->nr = nr; | |
93 | ctor->curr = ctor->next = NULL; | |
94 | ctor->pages = pages; | |
95 | ||
96 | if (i >= nr) { | |
97 | i -= nr; | |
98 | z_erofs_pagevec_ctor_pagedown(ctor, false); | |
99 | while (i > ctor->nr) { | |
100 | i -= ctor->nr; | |
101 | z_erofs_pagevec_ctor_pagedown(ctor, false); | |
102 | } | |
103 | } | |
5eb20ec3 GX |
104 | ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i); |
105 | ctor->index = i; | |
106 | } | |
107 | ||
046d64e1 GX |
108 | static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor, |
109 | struct page *page, | |
110 | enum z_erofs_page_type type, | |
111 | bool *occupied) | |
5eb20ec3 GX |
112 | { |
113 | *occupied = false; | |
8d8a09b0 | 114 | if (!ctor->next && type) |
5eb20ec3 GX |
115 | if (ctor->index + 1 == ctor->nr) |
116 | return false; | |
117 | ||
8d8a09b0 | 118 | if (ctor->index >= ctor->nr) |
5eb20ec3 GX |
119 | z_erofs_pagevec_ctor_pagedown(ctor, false); |
120 | ||
121 | /* exclusive page type must be 0 */ | |
122 | if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL) | |
123 | __bad_page_type_exclusive(); | |
124 | ||
125 | /* should remind that collector->next never equal to 1, 2 */ | |
126 | if (type == (uintptr_t)ctor->next) { | |
127 | ctor->next = page; | |
128 | *occupied = true; | |
129 | } | |
046d64e1 | 130 | ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type); |
5eb20ec3 GX |
131 | return true; |
132 | } | |
133 | ||
134 | static inline struct page * | |
046d64e1 GX |
135 | z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor, |
136 | enum z_erofs_page_type *type) | |
5eb20ec3 GX |
137 | { |
138 | erofs_vtptr_t t; | |
139 | ||
8d8a09b0 | 140 | if (ctor->index >= ctor->nr) { |
70b17991 | 141 | DBG_BUGON(!ctor->next); |
5eb20ec3 GX |
142 | z_erofs_pagevec_ctor_pagedown(ctor, true); |
143 | } | |
144 | ||
145 | t = ctor->pages[ctor->index]; | |
146 | ||
147 | *type = tagptr_unfold_tags(t); | |
148 | ||
149 | /* should remind that collector->next never equal to 1, 2 */ | |
150 | if (*type == (uintptr_t)ctor->next) | |
151 | ctor->next = tagptr_unfold_ptr(t); | |
152 | ||
046d64e1 | 153 | ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0); |
5eb20ec3 GX |
154 | return tagptr_unfold_ptr(t); |
155 | } | |
5eb20ec3 GX |
156 | #endif |
157 |