Merge tag 'perf-tools-for-v6.4-3-2023-05-06' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / include / linux / livepatch.h
CommitLineData
1ccea77e 1/* SPDX-License-Identifier: GPL-2.0-or-later */
b700e7f0
SJ
2/*
3 * livepatch.h - Kernel Live Patching Core
4 *
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
b700e7f0
SJ
7 */
8
9#ifndef _LINUX_LIVEPATCH_H_
10#define _LINUX_LIVEPATCH_H_
11
12#include <linux/module.h>
13#include <linux/ftrace.h>
3ec24776 14#include <linux/completion.h>
20e55025 15#include <linux/list.h>
e3ff7c60 16#include <linux/livepatch_sched.h>
b700e7f0 17
7e545d6e
JY
18#if IS_ENABLED(CONFIG_LIVEPATCH)
19
d83a7cb3
JP
20/* task patch states */
21#define KLP_UNDEFINED -1
22#define KLP_UNPATCHED 0
23#define KLP_PATCHED 1
24
b700e7f0
SJ
25/**
26 * struct klp_func - function structure for live patching
27 * @old_name: name of the function to be patched
28 * @new_func: pointer to the patched function code
b2b018ef
CA
29 * @old_sympos: a hint indicating which symbol position the old function
30 * can be found (optional)
19514910 31 * @old_func: pointer to the function being patched
b700e7f0 32 * @kobj: kobject for sysfs resources
20e55025 33 * @node: list node for klp_object func_list
3c33f5b9 34 * @stack_node: list node for klp_ops func_stack list
f5e547f4
JP
35 * @old_size: size of the old function
36 * @new_size: size of the new function
e1452b60 37 * @nop: temporary patch to use the original code again; dyn. allocated
0dade9f3 38 * @patched: the func has been added to the klp_ops list
d83a7cb3
JP
39 * @transition: the func is currently being applied or reverted
40 *
41 * The patched and transition variables define the func's patching state. When
42 * patching, a func is always in one of the following states:
43 *
44 * patched=0 transition=0: unpatched
45 * patched=0 transition=1: unpatched, temporary starting state
46 * patched=1 transition=1: patched, may be visible to some tasks
47 * patched=1 transition=0: patched, visible to all tasks
48 *
49 * And when unpatching, it goes in the reverse order:
50 *
51 * patched=1 transition=0: patched, visible to all tasks
52 * patched=1 transition=1: patched, may be visible to some tasks
53 * patched=0 transition=1: unpatched, temporary ending state
54 * patched=0 transition=0: unpatched
b700e7f0
SJ
55 */
56struct klp_func {
57 /* external */
58 const char *old_name;
59 void *new_func;
60 /*
b2b018ef
CA
61 * The old_sympos field is optional and can be used to resolve
62 * duplicate symbol names in livepatch objects. If this field is zero,
63 * it is expected the symbol is unique, otherwise patching fails. If
64 * this value is greater than zero then that occurrence of the symbol
65 * in kallsyms for the given object is used.
b700e7f0 66 */
b2b018ef 67 unsigned long old_sympos;
b700e7f0
SJ
68
69 /* internal */
19514910 70 void *old_func;
b700e7f0 71 struct kobject kobj;
20e55025 72 struct list_head node;
3c33f5b9 73 struct list_head stack_node;
f5e547f4 74 unsigned long old_size, new_size;
e1452b60 75 bool nop;
0dade9f3 76 bool patched;
d83a7cb3 77 bool transition;
b700e7f0
SJ
78};
79
93862e38
JL
80struct klp_object;
81
82/**
83 * struct klp_callbacks - pre/post live-(un)patch callback structure
84 * @pre_patch: executed before code patching
85 * @post_patch: executed after code patching
86 * @pre_unpatch: executed before code unpatching
87 * @post_unpatch: executed after code unpatching
88 * @post_unpatch_enabled: flag indicating if post-unpatch callback
89 * should run
90 *
91 * All callbacks are optional. Only the pre-patch callback, if provided,
92 * will be unconditionally executed. If the parent klp_object fails to
93 * patch for any reason, including a non-zero error status returned from
94 * the pre-patch callback, no further callbacks will be executed.
95 */
96struct klp_callbacks {
97 int (*pre_patch)(struct klp_object *obj);
98 void (*post_patch)(struct klp_object *obj);
99 void (*pre_unpatch)(struct klp_object *obj);
100 void (*post_unpatch)(struct klp_object *obj);
101 bool post_unpatch_enabled;
102};
103
b700e7f0
SJ
104/**
105 * struct klp_object - kernel object structure for live patching
106 * @name: module name (or NULL for vmlinux)
b700e7f0 107 * @funcs: function entries for functions to be patched in the object
93862e38 108 * @callbacks: functions to be executed pre/post (un)patching
b700e7f0 109 * @kobj: kobject for sysfs resources
20e55025
JB
110 * @func_list: dynamic list of the function entries
111 * @node: list node for klp_patch obj_list
b700e7f0 112 * @mod: kernel module associated with the patched object
d83a7cb3 113 * (NULL for vmlinux)
e1452b60 114 * @dynamic: temporary object for nop functions; dynamically allocated
0dade9f3 115 * @patched: the object's funcs have been added to the klp_ops list
b700e7f0
SJ
116 */
117struct klp_object {
118 /* external */
119 const char *name;
b700e7f0 120 struct klp_func *funcs;
93862e38 121 struct klp_callbacks callbacks;
b700e7f0
SJ
122
123 /* internal */
cad706df 124 struct kobject kobj;
20e55025
JB
125 struct list_head func_list;
126 struct list_head node;
b700e7f0 127 struct module *mod;
e1452b60 128 bool dynamic;
0dade9f3 129 bool patched;
b700e7f0
SJ
130};
131
73727f4d
PM
132/**
133 * struct klp_state - state of the system modified by the livepatch
134 * @id: system state identifier (non-zero)
92c9abf5 135 * @version: version of the change
73727f4d
PM
136 * @data: custom data
137 */
138struct klp_state {
139 unsigned long id;
92c9abf5 140 unsigned int version;
73727f4d
PM
141 void *data;
142};
143
b700e7f0
SJ
144/**
145 * struct klp_patch - patch structure for live patching
146 * @mod: reference to the live patch module
147 * @objs: object entries for kernel objects to be patched
73727f4d 148 * @states: system states that can get modified
e1452b60 149 * @replace: replace all actively used patches
958ef1e3 150 * @list: list node for global list of actively used patches
b700e7f0 151 * @kobj: kobject for sysfs resources
20e55025 152 * @obj_list: dynamic list of the object entries
0dade9f3 153 * @enabled: the patch is enabled (but operation may be incomplete)
68007289 154 * @forced: was involved in a forced transition
958ef1e3 155 * @free_work: patch cleanup from workqueue-context
3ec24776 156 * @finish: for waiting till it is safe to remove the patch module
b700e7f0
SJ
157 */
158struct klp_patch {
159 /* external */
160 struct module *mod;
161 struct klp_object *objs;
73727f4d 162 struct klp_state *states;
e1452b60 163 bool replace;
b700e7f0
SJ
164
165 /* internal */
166 struct list_head list;
167 struct kobject kobj;
20e55025 168 struct list_head obj_list;
0dade9f3 169 bool enabled;
68007289 170 bool forced;
958ef1e3 171 struct work_struct free_work;
3ec24776 172 struct completion finish;
b700e7f0
SJ
173};
174
20e55025 175#define klp_for_each_object_static(patch, obj) \
f09d9086 176 for (obj = patch->objs; obj->funcs || obj->name; obj++)
8cdd043a 177
e1452b60
JB
178#define klp_for_each_object_safe(patch, obj, tmp_obj) \
179 list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node)
180
20e55025
JB
181#define klp_for_each_object(patch, obj) \
182 list_for_each_entry(obj, &patch->obj_list, node)
183
184#define klp_for_each_func_static(obj, func) \
f09d9086
MB
185 for (func = obj->funcs; \
186 func->old_name || func->new_func || func->old_sympos; \
187 func++)
8cdd043a 188
e1452b60
JB
189#define klp_for_each_func_safe(obj, func, tmp_func) \
190 list_for_each_entry_safe(func, tmp_func, &obj->func_list, node)
191
20e55025
JB
192#define klp_for_each_func(obj, func) \
193 list_for_each_entry(func, &obj->func_list, node)
194
4421f8f0 195int klp_enable_patch(struct klp_patch *);
b700e7f0 196
7e545d6e
JY
197/* Called from the module loader during module coming/going states */
198int klp_module_coming(struct module *mod);
199void klp_module_going(struct module *mod);
200
d83a7cb3 201void klp_copy_process(struct task_struct *child);
46c5a011
JP
202void klp_update_patch_state(struct task_struct *task);
203
d83a7cb3
JP
204static inline bool klp_patch_pending(struct task_struct *task)
205{
206 return test_tsk_thread_flag(task, TIF_PATCH_PENDING);
207}
208
209static inline bool klp_have_reliable_stack(void)
210{
211 return IS_ENABLED(CONFIG_STACKTRACE) &&
212 IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
213}
214
e91c2518
PM
215typedef int (*klp_shadow_ctor_t)(void *obj,
216 void *shadow_data,
217 void *ctor_data);
3b2c77d0 218typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data);
e91c2518 219
439e7271 220void *klp_shadow_get(void *obj, unsigned long id);
e91c2518
PM
221void *klp_shadow_alloc(void *obj, unsigned long id,
222 size_t size, gfp_t gfp_flags,
223 klp_shadow_ctor_t ctor, void *ctor_data);
224void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
225 size_t size, gfp_t gfp_flags,
226 klp_shadow_ctor_t ctor, void *ctor_data);
3b2c77d0
PM
227void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
228void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
439e7271 229
73727f4d
PM
230struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
231struct klp_state *klp_get_prev_state(unsigned long id);
232
7c8e2bdd
JP
233int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
234 const char *shstrtab, const char *strtab,
235 unsigned int symindex, unsigned int secindex,
236 const char *objname);
237
7e545d6e
JY
238#else /* !CONFIG_LIVEPATCH */
239
240static inline int klp_module_coming(struct module *mod) { return 0; }
46c5a011 241static inline void klp_module_going(struct module *mod) {}
d83a7cb3 242static inline bool klp_patch_pending(struct task_struct *task) { return false; }
46c5a011 243static inline void klp_update_patch_state(struct task_struct *task) {}
d83a7cb3 244static inline void klp_copy_process(struct task_struct *child) {}
7e545d6e 245
7c8e2bdd
JP
246static inline
247int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
248 const char *shstrtab, const char *strtab,
249 unsigned int symindex, unsigned int secindex,
250 const char *objname)
251{
252 return 0;
253}
254
7e545d6e
JY
255#endif /* CONFIG_LIVEPATCH */
256
b700e7f0 257#endif /* _LINUX_LIVEPATCH_H_ */