mm: kmsan: call KMSAN hooks from SLUB code
[linux-block.git] / include / linux / kmsan.h
CommitLineData
b073d7f8
AP
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * KMSAN API for subsystems.
4 *
5 * Copyright (C) 2017-2022 Google LLC
6 * Author: Alexander Potapenko <glider@google.com>
7 *
8 */
9#ifndef _LINUX_KMSAN_H
10#define _LINUX_KMSAN_H
11
12#include <linux/gfp.h>
13#include <linux/kmsan-checks.h>
14#include <linux/types.h>
15
16struct page;
68ef169a 17struct kmem_cache;
b073d7f8
AP
18
19#ifdef CONFIG_KMSAN
20
21/**
22 * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
23 * @page: struct page pointer returned by alloc_pages().
24 * @order: order of allocated struct page.
25 * @flags: GFP flags used by alloc_pages()
26 *
27 * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
28 * @flags contain __GFP_ZERO.
29 */
30void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
31
32/**
33 * kmsan_free_page() - Notify KMSAN about a free_pages() call.
34 * @page: struct page pointer passed to free_pages().
35 * @order: order of deallocated struct page.
36 *
37 * KMSAN marks freed memory as uninitialized.
38 */
39void kmsan_free_page(struct page *page, unsigned int order);
40
41/**
42 * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
43 * @dst: destination page.
44 * @src: source page.
45 *
46 * KMSAN copies the contents of metadata pages for @src into the metadata pages
47 * for @dst. If @dst has no associated metadata pages, nothing happens.
48 * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
49 */
50void kmsan_copy_page_meta(struct page *dst, struct page *src);
51
68ef169a
AP
52/**
53 * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
54 * @s: slab cache the object belongs to.
55 * @object: object pointer.
56 * @flags: GFP flags passed to the allocator.
57 *
58 * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
59 * newly created object, marking it as initialized or uninitialized.
60 */
61void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
62
63/**
64 * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
65 * @s: slab cache the object belongs to.
66 * @object: object pointer.
67 *
68 * KMSAN marks the freed object as uninitialized.
69 */
70void kmsan_slab_free(struct kmem_cache *s, void *object);
71
72/**
73 * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
74 * @ptr: object pointer.
75 * @size: object size.
76 * @flags: GFP flags passed to the allocator.
77 *
78 * Similar to kmsan_slab_alloc(), but for large allocations.
79 */
80void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
81
82/**
83 * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
84 * @ptr: object pointer.
85 *
86 * Similar to kmsan_slab_free(), but for large allocations.
87 */
88void kmsan_kfree_large(const void *ptr);
89
b073d7f8
AP
90/**
91 * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
92 * @start: start of vmapped range.
93 * @end: end of vmapped range.
94 * @prot: page protection flags used for vmap.
95 * @pages: array of pages.
96 * @page_shift: page_shift passed to vmap_range_noflush().
97 *
98 * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
99 * vmalloc metadata address range.
100 */
101void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
102 pgprot_t prot, struct page **pages,
103 unsigned int page_shift);
104
105/**
106 * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
107 * @start: start of vunmapped range.
108 * @end: end of vunmapped range.
109 *
110 * KMSAN unmaps the contiguous metadata ranges created by
111 * kmsan_map_kernel_range_noflush().
112 */
113void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
114
115/**
116 * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
117 * @addr: range start.
118 * @end: range end.
119 * @phys_addr: physical range start.
120 * @prot: page protection flags used for ioremap_page_range().
121 * @page_shift: page_shift argument passed to vmap_range_noflush().
122 *
123 * KMSAN creates new metadata pages for the physical pages mapped into the
124 * virtual memory.
125 */
126void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
127 phys_addr_t phys_addr, pgprot_t prot,
128 unsigned int page_shift);
129
130/**
131 * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
132 * @start: range start.
133 * @end: range end.
134 *
135 * KMSAN unmaps the metadata pages for the given range and, unlike for
136 * vunmap_page_range(), also deallocates them.
137 */
138void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
139
140#else
141
142static inline int kmsan_alloc_page(struct page *page, unsigned int order,
143 gfp_t flags)
144{
145 return 0;
146}
147
148static inline void kmsan_free_page(struct page *page, unsigned int order)
149{
150}
151
152static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
153{
154}
155
68ef169a
AP
156static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
157 gfp_t flags)
158{
159}
160
161static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
162{
163}
164
165static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
166 gfp_t flags)
167{
168}
169
170static inline void kmsan_kfree_large(const void *ptr)
171{
172}
173
b073d7f8
AP
174static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
175 unsigned long end,
176 pgprot_t prot,
177 struct page **pages,
178 unsigned int page_shift)
179{
180}
181
182static inline void kmsan_vunmap_range_noflush(unsigned long start,
183 unsigned long end)
184{
185}
186
187static inline void kmsan_ioremap_page_range(unsigned long start,
188 unsigned long end,
189 phys_addr_t phys_addr,
190 pgprot_t prot,
191 unsigned int page_shift)
192{
193}
194
195static inline void kmsan_iounmap_page_range(unsigned long start,
196 unsigned long end)
197{
198}
199
200#endif
201
202#endif /* _LINUX_KMSAN_H */