Commit | Line | Data |
---|---|---|
4f04d8f0 CM |
1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | #ifndef __ASM_PGTABLE_H | |
17 | #define __ASM_PGTABLE_H | |
18 | ||
19 | #include <asm/proc-fns.h> | |
20 | ||
21 | #include <asm/memory.h> | |
22 | #include <asm/pgtable-hwdef.h> | |
23 | ||
24 | /* | |
25 | * Software defined PTE bits definition. | |
26 | */ | |
a6fadf7e | 27 | #define PTE_VALID (_AT(pteval_t, 1) << 0) |
59911ca4 SC |
28 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */ |
29 | #define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */ | |
4f04d8f0 CM |
30 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) |
31 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) | |
32 | ||
33 | /* | |
34 | * VMALLOC and SPARSEMEM_VMEMMAP ranges. | |
35 | */ | |
36 | #define VMALLOC_START UL(0xffffff8000000000) | |
37 | #define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K) | |
38 | ||
39 | #define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) | |
40 | ||
41 | #define FIRST_USER_ADDRESS 0 | |
42 | ||
43 | #ifndef __ASSEMBLY__ | |
44 | extern void __pte_error(const char *file, int line, unsigned long val); | |
45 | extern void __pmd_error(const char *file, int line, unsigned long val); | |
46 | extern void __pgd_error(const char *file, int line, unsigned long val); | |
47 | ||
48 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) | |
49 | #ifndef CONFIG_ARM64_64K_PAGES | |
50 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) | |
51 | #endif | |
52 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) | |
53 | ||
54 | /* | |
55 | * The pgprot_* and protection_map entries will be fixed up at runtime to | |
56 | * include the cachable and bufferable bits based on memory policy, as well as | |
57 | * any architecture dependent bits like global/ASID and SMP shared mapping | |
58 | * bits. | |
59 | */ | |
60 | #define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF | |
61 | ||
62 | extern pgprot_t pgprot_default; | |
63 | ||
a6fadf7e WD |
64 | #define __pgprot_modify(prot,mask,bits) \ |
65 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) | |
66 | ||
67 | #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) | |
4f04d8f0 | 68 | |
072b1b62 | 69 | #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) |
8e620b04 CM |
70 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
71 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) | |
72 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | |
73 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) | |
74 | #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | |
75 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) | |
76 | #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) | |
77 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) | |
78 | ||
072b1b62 | 79 | #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) |
8e620b04 CM |
80 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
81 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) | |
82 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | |
83 | #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) | |
84 | #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | |
85 | #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) | |
4f04d8f0 CM |
86 | |
87 | #endif /* __ASSEMBLY__ */ | |
88 | ||
89 | #define __P000 __PAGE_NONE | |
90 | #define __P001 __PAGE_READONLY | |
91 | #define __P010 __PAGE_COPY | |
92 | #define __P011 __PAGE_COPY | |
93 | #define __P100 __PAGE_READONLY_EXEC | |
94 | #define __P101 __PAGE_READONLY_EXEC | |
95 | #define __P110 __PAGE_COPY_EXEC | |
96 | #define __P111 __PAGE_COPY_EXEC | |
97 | ||
98 | #define __S000 __PAGE_NONE | |
99 | #define __S001 __PAGE_READONLY | |
100 | #define __S010 __PAGE_SHARED | |
101 | #define __S011 __PAGE_SHARED | |
102 | #define __S100 __PAGE_READONLY_EXEC | |
103 | #define __S101 __PAGE_READONLY_EXEC | |
104 | #define __S110 __PAGE_SHARED_EXEC | |
105 | #define __S111 __PAGE_SHARED_EXEC | |
106 | ||
107 | #ifndef __ASSEMBLY__ | |
108 | /* | |
109 | * ZERO_PAGE is a global shared page that is always zero: used | |
110 | * for zero-mapped memory areas etc.. | |
111 | */ | |
112 | extern struct page *empty_zero_page; | |
113 | #define ZERO_PAGE(vaddr) (empty_zero_page) | |
114 | ||
115 | #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) | |
116 | ||
117 | #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | |
118 | ||
119 | #define pte_none(pte) (!pte_val(pte)) | |
120 | #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) | |
121 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | |
122 | #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) | |
123 | ||
124 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | |
125 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) | |
126 | #define pte_unmap(pte) do { } while (0) | |
127 | #define pte_unmap_nested(pte) do { } while (0) | |
128 | ||
129 | /* | |
130 | * The following only work if pte_present(). Undefined behaviour otherwise. | |
131 | */ | |
a6fadf7e | 132 | #define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) |
4f04d8f0 CM |
133 | #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) |
134 | #define pte_young(pte) (pte_val(pte) & PTE_AF) | |
135 | #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) | |
136 | #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) | |
8e620b04 | 137 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
4f04d8f0 | 138 | |
a6fadf7e | 139 | #define pte_valid_user(pte) \ |
02522463 | 140 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) |
4f04d8f0 CM |
141 | |
142 | #define PTE_BIT_FUNC(fn,op) \ | |
143 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } | |
144 | ||
145 | PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY); | |
146 | PTE_BIT_FUNC(mkwrite, &= ~PTE_RDONLY); | |
147 | PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY); | |
148 | PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY); | |
149 | PTE_BIT_FUNC(mkold, &= ~PTE_AF); | |
150 | PTE_BIT_FUNC(mkyoung, |= PTE_AF); | |
151 | PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL); | |
152 | ||
153 | static inline void set_pte(pte_t *ptep, pte_t pte) | |
154 | { | |
155 | *ptep = pte; | |
156 | } | |
157 | ||
158 | extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); | |
159 | ||
160 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |
161 | pte_t *ptep, pte_t pte) | |
162 | { | |
a6fadf7e | 163 | if (pte_valid_user(pte)) { |
02522463 WD |
164 | if (pte_exec(pte)) |
165 | __sync_icache_dcache(pte, addr); | |
166 | if (!pte_dirty(pte)) | |
167 | pte = pte_wrprotect(pte); | |
168 | } | |
169 | ||
4f04d8f0 CM |
170 | set_pte(ptep, pte); |
171 | } | |
172 | ||
173 | /* | |
174 | * Huge pte definitions. | |
175 | */ | |
084bd298 SC |
176 | #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) |
177 | #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) | |
178 | ||
179 | /* | |
180 | * Hugetlb definitions. | |
181 | */ | |
182 | #define HUGE_MAX_HSTATE 2 | |
183 | #define HPAGE_SHIFT PMD_SHIFT | |
184 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | |
185 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | |
186 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | |
4f04d8f0 | 187 | |
4f04d8f0 CM |
188 | #define __HAVE_ARCH_PTE_SPECIAL |
189 | ||
af074848 SC |
190 | /* |
191 | * Software PMD bits for THP | |
192 | */ | |
193 | ||
194 | #define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) | |
195 | #define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57) | |
196 | ||
197 | /* | |
198 | * THP definitions. | |
199 | */ | |
200 | #define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) | |
201 | ||
202 | #define __HAVE_ARCH_PMD_WRITE | |
203 | #define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) | |
204 | ||
205 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
206 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) | |
207 | #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) | |
208 | #endif | |
209 | ||
210 | #define PMD_BIT_FUNC(fn,op) \ | |
211 | static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } | |
212 | ||
213 | PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); | |
214 | PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); | |
215 | PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); | |
216 | PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); | |
217 | PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); | |
218 | PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); | |
219 | PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK); | |
220 | ||
221 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) | |
222 | ||
223 | #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) | |
224 | #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | |
225 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) | |
226 | ||
227 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) | |
228 | ||
229 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |
230 | { | |
231 | const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN | | |
232 | PMD_SECT_RDONLY | PMD_SECT_PROT_NONE | | |
233 | PMD_SECT_VALID; | |
234 | pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); | |
235 | return pmd; | |
236 | } | |
237 | ||
238 | #define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd) | |
239 | ||
240 | static inline int has_transparent_hugepage(void) | |
241 | { | |
242 | return 1; | |
243 | } | |
244 | ||
4f04d8f0 CM |
245 | /* |
246 | * Mark the prot value as uncacheable and unbufferable. | |
247 | */ | |
248 | #define pgprot_noncached(prot) \ | |
249 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE)) | |
250 | #define pgprot_writecombine(prot) \ | |
251 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE)) | |
252 | #define pgprot_dmacoherent(prot) \ | |
253 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) | |
254 | #define __HAVE_PHYS_MEM_ACCESS_PROT | |
255 | struct file; | |
256 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
257 | unsigned long size, pgprot_t vma_prot); | |
258 | ||
259 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
260 | #define pmd_present(pmd) (pmd_val(pmd)) | |
261 | ||
262 | #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) | |
263 | ||
264 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | |
265 | { | |
266 | *pmdp = pmd; | |
267 | dsb(); | |
268 | } | |
269 | ||
270 | static inline void pmd_clear(pmd_t *pmdp) | |
271 | { | |
272 | set_pmd(pmdp, __pmd(0)); | |
273 | } | |
274 | ||
275 | static inline pte_t *pmd_page_vaddr(pmd_t pmd) | |
276 | { | |
277 | return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); | |
278 | } | |
279 | ||
280 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) | |
281 | ||
282 | /* | |
283 | * Conversion functions: convert a page and protection to a page entry, | |
284 | * and a page entry and page directory to the page they refer to. | |
285 | */ | |
286 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) | |
287 | ||
288 | #ifndef CONFIG_ARM64_64K_PAGES | |
289 | ||
290 | #define pud_none(pud) (!pud_val(pud)) | |
291 | #define pud_bad(pud) (!(pud_val(pud) & 2)) | |
292 | #define pud_present(pud) (pud_val(pud)) | |
293 | ||
294 | static inline void set_pud(pud_t *pudp, pud_t pud) | |
295 | { | |
296 | *pudp = pud; | |
297 | dsb(); | |
298 | } | |
299 | ||
300 | static inline void pud_clear(pud_t *pudp) | |
301 | { | |
302 | set_pud(pudp, __pud(0)); | |
303 | } | |
304 | ||
305 | static inline pmd_t *pud_page_vaddr(pud_t pud) | |
306 | { | |
307 | return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); | |
308 | } | |
309 | ||
310 | #endif /* CONFIG_ARM64_64K_PAGES */ | |
311 | ||
312 | /* to find an entry in a page-table-directory */ | |
313 | #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | |
314 | ||
315 | #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) | |
316 | ||
317 | /* to find an entry in a kernel page-table-directory */ | |
318 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | |
319 | ||
320 | /* Find an entry in the second-level page table.. */ | |
321 | #ifndef CONFIG_ARM64_64K_PAGES | |
322 | #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) | |
323 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |
324 | { | |
325 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); | |
326 | } | |
327 | #endif | |
328 | ||
329 | /* Find an entry in the third-level page table.. */ | |
330 | #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
331 | ||
332 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
333 | { | |
a6fadf7e WD |
334 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | |
335 | PTE_PROT_NONE | PTE_VALID; | |
4f04d8f0 CM |
336 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
337 | return pte; | |
338 | } | |
339 | ||
340 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
341 | extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | |
342 | ||
343 | #define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) | |
344 | #define IDMAP_DIR_SIZE (2 * PAGE_SIZE) | |
345 | ||
346 | /* | |
347 | * Encode and decode a swap entry: | |
59911ca4 SC |
348 | * bits 0, 2: present (must both be zero) |
349 | * bit 3: PTE_FILE | |
350 | * bits 4-8: swap type | |
4f04d8f0 CM |
351 | * bits 9-63: swap offset |
352 | */ | |
59911ca4 | 353 | #define __SWP_TYPE_SHIFT 4 |
4f04d8f0 CM |
354 | #define __SWP_TYPE_BITS 6 |
355 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) | |
356 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | |
357 | ||
358 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) | |
359 | #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) | |
360 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) | |
361 | ||
362 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
363 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) | |
364 | ||
365 | /* | |
366 | * Ensure that there are not more swap files than can be encoded in the kernel | |
367 | * the PTEs. | |
368 | */ | |
369 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) | |
370 | ||
371 | /* | |
372 | * Encode and decode a file entry: | |
59911ca4 SC |
373 | * bits 0, 2: present (must both be zero) |
374 | * bit 3: PTE_FILE | |
375 | * bits 4-63: file offset / PAGE_SIZE | |
4f04d8f0 CM |
376 | */ |
377 | #define pte_file(pte) (pte_val(pte) & PTE_FILE) | |
59911ca4 SC |
378 | #define pte_to_pgoff(x) (pte_val(x) >> 4) |
379 | #define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE) | |
4f04d8f0 | 380 | |
59911ca4 | 381 | #define PTE_FILE_MAX_BITS 60 |
4f04d8f0 CM |
382 | |
383 | extern int kern_addr_valid(unsigned long addr); | |
384 | ||
385 | #include <asm-generic/pgtable.h> | |
386 | ||
387 | /* | |
388 | * remap a physical page `pfn' of size `size' with page protection `prot' | |
389 | * into virtual address `from' | |
390 | */ | |
391 | #define io_remap_pfn_range(vma,from,pfn,size,prot) \ | |
392 | remap_pfn_range(vma, from, pfn, size, prot) | |
393 | ||
394 | #define pgtable_cache_init() do { } while (0) | |
395 | ||
396 | #endif /* !__ASSEMBLY__ */ | |
397 | ||
398 | #endif /* __ASM_PGTABLE_H */ |