2 * core.c - Kernel Live Patching Core
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
33 * struct klp_ops - structure for tracking registered ftrace ops structs
35 * A single ftrace_ops is shared between all enabled replacement functions
36 * (klp_func structs) which have the same old_addr. This allows the switch
37 * between function versions to happen instantaneously by updating the klp_ops
38 * struct's func_stack list. The winner is the klp_func at the top of the
39 * func_stack (front of the list).
41 * @node: node for the global klp_ops list
42 * @func_stack: list head for the stack of klp_func's (active func is on top)
43 * @fops: registered ftrace ops struct
46 struct list_head node;
47 struct list_head func_stack;
48 struct ftrace_ops fops;
52 * The klp_mutex protects the global lists and state transitions of any
53 * structure reachable from them. References to any structure must be obtained
54 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
55 * ensure it gets consistent data).
57 static DEFINE_MUTEX(klp_mutex);
59 static LIST_HEAD(klp_patches);
60 static LIST_HEAD(klp_ops);
62 static struct kobject *klp_root_kobj;
64 static struct klp_ops *klp_find_ops(unsigned long old_addr)
67 struct klp_func *func;
69 list_for_each_entry(ops, &klp_ops, node) {
70 func = list_first_entry(&ops->func_stack, struct klp_func,
72 if (func->old_addr == old_addr)
79 static bool klp_is_module(struct klp_object *obj)
84 static bool klp_is_object_loaded(struct klp_object *obj)
86 return !obj->name || obj->mod;
89 /* sets obj->mod if object is not vmlinux and module is found */
90 static void klp_find_object_module(struct klp_object *obj)
94 if (!klp_is_module(obj))
97 mutex_lock(&module_mutex);
99 * We do not want to block removal of patched modules and therefore
100 * we do not take a reference here. The patches are removed by
101 * a going module handler instead.
103 mod = find_module(obj->name);
105 * Do not mess work of the module coming and going notifiers.
106 * Note that the patch might still be needed before the going handler
107 * is called. Module functions can be called even in the GOING state
108 * until mod->exit() finishes. This is especially important for
109 * patches that modify semantic of the functions.
111 if (mod && mod->klp_alive)
114 mutex_unlock(&module_mutex);
117 /* klp_mutex must be held by caller */
118 static bool klp_is_patch_registered(struct klp_patch *patch)
120 struct klp_patch *mypatch;
122 list_for_each_entry(mypatch, &klp_patches, list)
123 if (mypatch == patch)
129 static bool klp_initialized(void)
131 return !!klp_root_kobj;
134 struct klp_find_arg {
139 * If count == 0, the symbol was not found. If count == 1, a unique
140 * match was found and addr is set. If count > 1, there is
141 * unresolvable ambiguity among "count" number of symbols with the same
142 * name in the same object.
147 static int klp_find_callback(void *data, const char *name,
148 struct module *mod, unsigned long addr)
150 struct klp_find_arg *args = data;
152 if ((mod && !args->objname) || (!mod && args->objname))
155 if (strcmp(args->name, name))
158 if (args->objname && strcmp(args->objname, mod->name))
162 * args->addr might be overwritten if another match is found
163 * but klp_find_object_symbol() handles this and only returns the
164 * addr if count == 1.
172 static int klp_find_object_symbol(const char *objname, const char *name,
175 struct klp_find_arg args = {
182 mutex_lock(&module_mutex);
183 kallsyms_on_each_symbol(klp_find_callback, &args);
184 mutex_unlock(&module_mutex);
187 pr_err("symbol '%s' not found in symbol table\n", name);
188 else if (args.count > 1)
189 pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
190 args.count, name, objname);
200 struct klp_verify_args {
202 const unsigned long addr;
205 static int klp_verify_callback(void *data, const char *name,
206 struct module *mod, unsigned long addr)
208 struct klp_verify_args *args = data;
211 !strcmp(args->name, name) &&
218 static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
220 struct klp_verify_args args = {
226 mutex_lock(&module_mutex);
227 ret = kallsyms_on_each_symbol(klp_verify_callback, &args);
228 mutex_unlock(&module_mutex);
231 pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
239 static int klp_find_verify_func_addr(struct klp_object *obj,
240 struct klp_func *func)
244 #if defined(CONFIG_RANDOMIZE_BASE)
245 /* If KASLR has been enabled, adjust old_addr accordingly */
246 if (kaslr_enabled() && func->old_addr)
247 func->old_addr += kaslr_offset();
250 if (!func->old_addr || klp_is_module(obj))
251 ret = klp_find_object_symbol(obj->name, func->old_name,
254 ret = klp_verify_vmlinux_symbol(func->old_name,
261 * external symbols are located outside the parent object (where the parent
262 * object is either vmlinux or the kmod being patched).
264 static int klp_find_external_symbol(struct module *pmod, const char *name,
267 const struct kernel_symbol *sym;
269 /* first, check if it's an exported symbol */
271 sym = find_symbol(name, NULL, NULL, true, true);
279 /* otherwise check if it's in another .o within the patch module */
280 return klp_find_object_symbol(pmod->name, name, addr);
283 static int klp_write_object_relocations(struct module *pmod,
284 struct klp_object *obj)
287 struct klp_reloc *reloc;
289 if (WARN_ON(!klp_is_object_loaded(obj)))
292 if (WARN_ON(!obj->relocs))
295 for (reloc = obj->relocs; reloc->name; reloc++) {
296 if (!klp_is_module(obj)) {
298 #if defined(CONFIG_RANDOMIZE_BASE)
299 /* If KASLR has been enabled, adjust old value accordingly */
301 reloc->val += kaslr_offset();
303 ret = klp_verify_vmlinux_symbol(reloc->name,
308 /* module, reloc->val needs to be discovered */
310 ret = klp_find_external_symbol(pmod,
314 ret = klp_find_object_symbol(obj->mod->name,
320 ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
321 reloc->val + reloc->addend);
323 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
324 reloc->name, reloc->val, ret);
332 static void notrace klp_ftrace_handler(unsigned long ip,
333 unsigned long parent_ip,
334 struct ftrace_ops *fops,
335 struct pt_regs *regs)
338 struct klp_func *func;
340 ops = container_of(fops, struct klp_ops, fops);
343 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
345 if (WARN_ON_ONCE(!func))
348 klp_arch_set_pc(regs, (unsigned long)func->new_func);
353 static void klp_disable_func(struct klp_func *func)
357 if (WARN_ON(func->state != KLP_ENABLED))
359 if (WARN_ON(!func->old_addr))
362 ops = klp_find_ops(func->old_addr);
366 if (list_is_singular(&ops->func_stack)) {
367 WARN_ON(unregister_ftrace_function(&ops->fops));
368 WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
370 list_del_rcu(&func->stack_node);
371 list_del(&ops->node);
374 list_del_rcu(&func->stack_node);
377 func->state = KLP_DISABLED;
380 static int klp_enable_func(struct klp_func *func)
385 if (WARN_ON(!func->old_addr))
388 if (WARN_ON(func->state != KLP_DISABLED))
391 ops = klp_find_ops(func->old_addr);
393 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
397 ops->fops.func = klp_ftrace_handler;
398 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
399 FTRACE_OPS_FL_DYNAMIC |
400 FTRACE_OPS_FL_IPMODIFY;
402 list_add(&ops->node, &klp_ops);
404 INIT_LIST_HEAD(&ops->func_stack);
405 list_add_rcu(&func->stack_node, &ops->func_stack);
407 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
409 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
410 func->old_name, ret);
414 ret = register_ftrace_function(&ops->fops);
416 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
417 func->old_name, ret);
418 ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
424 list_add_rcu(&func->stack_node, &ops->func_stack);
427 func->state = KLP_ENABLED;
432 list_del_rcu(&func->stack_node);
433 list_del(&ops->node);
438 static void klp_disable_object(struct klp_object *obj)
440 struct klp_func *func;
442 klp_for_each_func(obj, func)
443 if (func->state == KLP_ENABLED)
444 klp_disable_func(func);
446 obj->state = KLP_DISABLED;
449 static int klp_enable_object(struct klp_object *obj)
451 struct klp_func *func;
454 if (WARN_ON(obj->state != KLP_DISABLED))
457 if (WARN_ON(!klp_is_object_loaded(obj)))
460 klp_for_each_func(obj, func) {
461 ret = klp_enable_func(func);
463 klp_disable_object(obj);
467 obj->state = KLP_ENABLED;
472 static int __klp_disable_patch(struct klp_patch *patch)
474 struct klp_object *obj;
476 /* enforce stacking: only the last enabled patch can be disabled */
477 if (!list_is_last(&patch->list, &klp_patches) &&
478 list_next_entry(patch, list)->state == KLP_ENABLED)
481 pr_notice("disabling patch '%s'\n", patch->mod->name);
483 klp_for_each_object(patch, obj) {
484 if (obj->state == KLP_ENABLED)
485 klp_disable_object(obj);
488 patch->state = KLP_DISABLED;
494 * klp_disable_patch() - disables a registered patch
495 * @patch: The registered, enabled patch to be disabled
497 * Unregisters the patched functions from ftrace.
499 * Return: 0 on success, otherwise error
501 int klp_disable_patch(struct klp_patch *patch)
505 mutex_lock(&klp_mutex);
507 if (!klp_is_patch_registered(patch)) {
512 if (patch->state == KLP_DISABLED) {
517 ret = __klp_disable_patch(patch);
520 mutex_unlock(&klp_mutex);
523 EXPORT_SYMBOL_GPL(klp_disable_patch);
525 static int __klp_enable_patch(struct klp_patch *patch)
527 struct klp_object *obj;
530 if (WARN_ON(patch->state != KLP_DISABLED))
533 /* enforce stacking: only the first disabled patch can be enabled */
534 if (patch->list.prev != &klp_patches &&
535 list_prev_entry(patch, list)->state == KLP_DISABLED)
538 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
539 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
541 pr_notice("enabling patch '%s'\n", patch->mod->name);
543 klp_for_each_object(patch, obj) {
544 if (!klp_is_object_loaded(obj))
547 ret = klp_enable_object(obj);
552 patch->state = KLP_ENABLED;
557 WARN_ON(__klp_disable_patch(patch));
562 * klp_enable_patch() - enables a registered patch
563 * @patch: The registered, disabled patch to be enabled
565 * Performs the needed symbol lookups and code relocations,
566 * then registers the patched functions with ftrace.
568 * Return: 0 on success, otherwise error
570 int klp_enable_patch(struct klp_patch *patch)
574 mutex_lock(&klp_mutex);
576 if (!klp_is_patch_registered(patch)) {
581 ret = __klp_enable_patch(patch);
584 mutex_unlock(&klp_mutex);
587 EXPORT_SYMBOL_GPL(klp_enable_patch);
592 * /sys/kernel/livepatch
593 * /sys/kernel/livepatch/<patch>
594 * /sys/kernel/livepatch/<patch>/enabled
595 * /sys/kernel/livepatch/<patch>/<object>
596 * /sys/kernel/livepatch/<patch>/<object>/<func>
599 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
600 const char *buf, size_t count)
602 struct klp_patch *patch;
606 ret = kstrtoul(buf, 10, &val);
610 if (val != KLP_DISABLED && val != KLP_ENABLED)
613 patch = container_of(kobj, struct klp_patch, kobj);
615 mutex_lock(&klp_mutex);
617 if (val == patch->state) {
618 /* already in requested state */
623 if (val == KLP_ENABLED) {
624 ret = __klp_enable_patch(patch);
628 ret = __klp_disable_patch(patch);
633 mutex_unlock(&klp_mutex);
638 mutex_unlock(&klp_mutex);
642 static ssize_t enabled_show(struct kobject *kobj,
643 struct kobj_attribute *attr, char *buf)
645 struct klp_patch *patch;
647 patch = container_of(kobj, struct klp_patch, kobj);
648 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
651 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
652 static struct attribute *klp_patch_attrs[] = {
653 &enabled_kobj_attr.attr,
657 static void klp_kobj_release_patch(struct kobject *kobj)
660 * Once we have a consistency model we'll need to module_put() the
661 * patch module here. See klp_register_patch() for more details.
665 static struct kobj_type klp_ktype_patch = {
666 .release = klp_kobj_release_patch,
667 .sysfs_ops = &kobj_sysfs_ops,
668 .default_attrs = klp_patch_attrs,
671 static void klp_kobj_release_object(struct kobject *kobj)
675 static struct kobj_type klp_ktype_object = {
676 .release = klp_kobj_release_object,
677 .sysfs_ops = &kobj_sysfs_ops,
680 static void klp_kobj_release_func(struct kobject *kobj)
684 static struct kobj_type klp_ktype_func = {
685 .release = klp_kobj_release_func,
686 .sysfs_ops = &kobj_sysfs_ops,
690 * Free all functions' kobjects in the array up to some limit. When limit is
691 * NULL, all kobjects are freed.
693 static void klp_free_funcs_limited(struct klp_object *obj,
694 struct klp_func *limit)
696 struct klp_func *func;
698 for (func = obj->funcs; func->old_name && func != limit; func++)
699 kobject_put(&func->kobj);
702 /* Clean up when a patched object is unloaded */
703 static void klp_free_object_loaded(struct klp_object *obj)
705 struct klp_func *func;
709 klp_for_each_func(obj, func)
714 * Free all objects' kobjects in the array up to some limit. When limit is
715 * NULL, all kobjects are freed.
717 static void klp_free_objects_limited(struct klp_patch *patch,
718 struct klp_object *limit)
720 struct klp_object *obj;
722 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
723 klp_free_funcs_limited(obj, NULL);
724 kobject_put(&obj->kobj);
728 static void klp_free_patch(struct klp_patch *patch)
730 klp_free_objects_limited(patch, NULL);
731 if (!list_empty(&patch->list))
732 list_del(&patch->list);
733 kobject_put(&patch->kobj);
736 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
738 INIT_LIST_HEAD(&func->stack_node);
739 func->state = KLP_DISABLED;
741 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
742 &obj->kobj, "%s", func->old_name);
745 /* parts of the initialization that is done only when the object is loaded */
746 static int klp_init_object_loaded(struct klp_patch *patch,
747 struct klp_object *obj)
749 struct klp_func *func;
753 ret = klp_write_object_relocations(patch->mod, obj);
758 klp_for_each_func(obj, func) {
759 ret = klp_find_verify_func_addr(obj, func);
767 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
769 struct klp_func *func;
776 obj->state = KLP_DISABLED;
779 klp_find_object_module(obj);
781 name = klp_is_module(obj) ? obj->name : "vmlinux";
782 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
783 &patch->kobj, "%s", name);
787 klp_for_each_func(obj, func) {
788 ret = klp_init_func(obj, func);
793 if (klp_is_object_loaded(obj)) {
794 ret = klp_init_object_loaded(patch, obj);
802 klp_free_funcs_limited(obj, func);
803 kobject_put(&obj->kobj);
807 static int klp_init_patch(struct klp_patch *patch)
809 struct klp_object *obj;
815 mutex_lock(&klp_mutex);
817 patch->state = KLP_DISABLED;
819 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
820 klp_root_kobj, "%s", patch->mod->name);
824 klp_for_each_object(patch, obj) {
825 ret = klp_init_object(patch, obj);
830 list_add_tail(&patch->list, &klp_patches);
832 mutex_unlock(&klp_mutex);
837 klp_free_objects_limited(patch, obj);
838 kobject_put(&patch->kobj);
840 mutex_unlock(&klp_mutex);
845 * klp_unregister_patch() - unregisters a patch
846 * @patch: Disabled patch to be unregistered
848 * Frees the data structures and removes the sysfs interface.
850 * Return: 0 on success, otherwise error
852 int klp_unregister_patch(struct klp_patch *patch)
856 mutex_lock(&klp_mutex);
858 if (!klp_is_patch_registered(patch)) {
863 if (patch->state == KLP_ENABLED) {
868 klp_free_patch(patch);
871 mutex_unlock(&klp_mutex);
874 EXPORT_SYMBOL_GPL(klp_unregister_patch);
877 * klp_register_patch() - registers a patch
878 * @patch: Patch to be registered
880 * Initializes the data structure associated with the patch and
881 * creates the sysfs interface.
883 * Return: 0 on success, otherwise error
885 int klp_register_patch(struct klp_patch *patch)
889 if (!klp_initialized())
892 if (!patch || !patch->mod)
896 * A reference is taken on the patch module to prevent it from being
897 * unloaded. Right now, we don't allow patch modules to unload since
898 * there is currently no method to determine if a thread is still
899 * running in the patched code contained in the patch module once
900 * the ftrace registration is successful.
902 if (!try_module_get(patch->mod))
905 ret = klp_init_patch(patch);
907 module_put(patch->mod);
911 EXPORT_SYMBOL_GPL(klp_register_patch);
913 static int klp_module_notify_coming(struct klp_patch *patch,
914 struct klp_object *obj)
916 struct module *pmod = patch->mod;
917 struct module *mod = obj->mod;
920 ret = klp_init_object_loaded(patch, obj);
922 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
923 pmod->name, mod->name, ret);
927 if (patch->state == KLP_DISABLED)
930 pr_notice("applying patch '%s' to loading module '%s'\n",
931 pmod->name, mod->name);
933 ret = klp_enable_object(obj);
935 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
936 pmod->name, mod->name, ret);
940 static void klp_module_notify_going(struct klp_patch *patch,
941 struct klp_object *obj)
943 struct module *pmod = patch->mod;
944 struct module *mod = obj->mod;
946 if (patch->state == KLP_DISABLED)
949 pr_notice("reverting patch '%s' on unloading module '%s'\n",
950 pmod->name, mod->name);
952 klp_disable_object(obj);
955 klp_free_object_loaded(obj);
958 static int klp_module_notify(struct notifier_block *nb, unsigned long action,
962 struct module *mod = data;
963 struct klp_patch *patch;
964 struct klp_object *obj;
966 if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
969 mutex_lock(&klp_mutex);
972 * Each module has to know that the notifier has been called.
973 * We never know what module will get patched by a new patch.
975 if (action == MODULE_STATE_COMING)
976 mod->klp_alive = true;
977 else /* MODULE_STATE_GOING */
978 mod->klp_alive = false;
980 list_for_each_entry(patch, &klp_patches, list) {
981 klp_for_each_object(patch, obj) {
982 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
985 if (action == MODULE_STATE_COMING) {
987 ret = klp_module_notify_coming(patch, obj);
990 pr_warn("patch '%s' is in an inconsistent state!\n",
993 } else /* MODULE_STATE_GOING */
994 klp_module_notify_going(patch, obj);
1000 mutex_unlock(&klp_mutex);
1005 static struct notifier_block klp_module_nb = {
1006 .notifier_call = klp_module_notify,
1007 .priority = INT_MIN+1, /* called late but before ftrace notifier */
1010 static int __init klp_init(void)
1014 ret = klp_check_compiler_support();
1016 pr_info("Your compiler is too old; turning off.\n");
1020 ret = register_module_notifier(&klp_module_nb);
1024 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1025 if (!klp_root_kobj) {
1033 unregister_module_notifier(&klp_module_nb);
1037 module_init(klp_init);