Skip to content

Commit

Permalink
livepatch: unpatch all klp_objects if klp_module_coming fails
Browse files Browse the repository at this point in the history
When an incoming module is considered for livepatching by
klp_module_coming(), it iterates over multiple patches and multiple
kernel objects in this order:

	list_for_each_entry(patch, &klp_patches, list) {
		klp_for_each_object(patch, obj) {

which means that if one of the kernel objects fails to patch,
klp_module_coming()'s error path needs to unpatch and cleanup any kernel
objects that were already patched by a previous patch.

Reported-by: Miroslav Benes <mbenes@suse.cz>
Suggested-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Joe Lawrence <joe.lawrence@redhat.com>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
  • Loading branch information
Joe Lawrence authored and Jiri Kosina committed Oct 11, 2017
1 parent dcba710 commit ef8daf8
Showing 1 changed file with 37 additions and 23 deletions.
60 changes: 37 additions & 23 deletions kernel/livepatch/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -830,6 +830,41 @@ int klp_register_patch(struct klp_patch *patch)
}
EXPORT_SYMBOL_GPL(klp_register_patch);

/*
* Remove parts of patches that touch a given kernel module. The list of
* patches processed might be limited. When limit is NULL, all patches
* will be handled.
*/
static void klp_cleanup_module_patches_limited(struct module *mod,
struct klp_patch *limit)
{
struct klp_patch *patch;
struct klp_object *obj;

list_for_each_entry(patch, &klp_patches, list) {
if (patch == limit)
break;

klp_for_each_object(patch, obj) {
if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
continue;

/*
* Only unpatch the module if the patch is enabled or
* is in transition.
*/
if (patch->enabled || patch == klp_transition_patch) {
pr_notice("reverting patch '%s' on unloading module '%s'\n",
patch->mod->name, obj->mod->name);
klp_unpatch_object(obj);
}

klp_free_object_loaded(obj);
break;
}
}
}

int klp_module_coming(struct module *mod)
{
int ret;
Expand Down Expand Up @@ -894,17 +929,14 @@ int klp_module_coming(struct module *mod)
pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
patch->mod->name, obj->mod->name, obj->mod->name);
mod->klp_alive = false;
klp_free_object_loaded(obj);
klp_cleanup_module_patches_limited(mod, patch);
mutex_unlock(&klp_mutex);

return ret;
}

void klp_module_going(struct module *mod)
{
struct klp_patch *patch;
struct klp_object *obj;

if (WARN_ON(mod->state != MODULE_STATE_GOING &&
mod->state != MODULE_STATE_COMING))
return;
Expand All @@ -917,25 +949,7 @@ void klp_module_going(struct module *mod)
*/
mod->klp_alive = false;

list_for_each_entry(patch, &klp_patches, list) {
klp_for_each_object(patch, obj) {
if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
continue;

/*
* Only unpatch the module if the patch is enabled or
* is in transition.
*/
if (patch->enabled || patch == klp_transition_patch) {
pr_notice("reverting patch '%s' on unloading module '%s'\n",
patch->mod->name, obj->mod->name);
klp_unpatch_object(obj);
}

klp_free_object_loaded(obj);
break;
}
}
klp_cleanup_module_patches_limited(mod, NULL);

mutex_unlock(&klp_mutex);
}
Expand Down

0 comments on commit ef8daf8

Please sign in to comment.