From dcf550e52f567cb7a421169d2522869f9188aca5 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 29 Apr 2020 10:24:43 -0500 Subject: livepatch: Disallow vmlinux.ko This is purely a theoretical issue, but if there were a module named vmlinux.ko, the livepatch relocation code wouldn't be able to distinguish between vmlinux-specific and vmlinux.o-specific KLP relocations. If CONFIG_LIVEPATCH is enabled, don't allow a module named vmlinux.ko. Suggested-by: Peter Zijlstra Signed-off-by: Josh Poimboeuf Acked-by: Miroslav Benes Acked-by: Joe Lawrence Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index c3512e7e0801..40cfac8156fd 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1139,6 +1139,11 @@ int klp_module_coming(struct module *mod) if (WARN_ON(mod->state != MODULE_STATE_COMING)) return -EINVAL; + if (!strcmp(mod->name, "vmlinux")) { + pr_err("vmlinux.ko: invalid module name"); + return -EINVAL; + } + mutex_lock(&klp_mutex); /* * Each module has to know that klp_module_coming() -- cgit v1.2.3 From 7c8e2bdd5f0d990e2398ee3deafc626dd469fc2d Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 29 Apr 2020 10:24:44 -0500 Subject: livepatch: Apply vmlinux-specific KLP relocations early KLP relocations are livepatch-specific relocations which are applied to a KLP module's text or data. They exist for two reasons: 1) Unexported symbols: replacement functions often need to access unexported symbols (e.g. static functions), which "normal" relocations don't allow. 2) Late module patching: this is the ability for a KLP module to bypass normal module dependencies, such that the KLP module can be loaded *before* a to-be-patched module. This means that relocations which need to access symbols in the to-be-patched module might need to be applied to the KLP module well after it has been loaded. Non-late-patched KLP relocations are applied from the KLP module's init function. That usually works fine, unless the patched code wants to use alternatives, paravirt patching, jump tables, or some other special section which needs relocations. Then we run into ordering issues and crashes. In order for those special sections to work properly, the KLP relocations should be applied *before* the special section init code runs, such as apply_paravirt(), apply_alternatives(), or jump_label_apply_nops(). You might think the obvious solution would be to move the KLP relocation initialization earlier, but it's not necessarily that simple. The problem is the above-mentioned late module patching, for which KLP relocations can get applied well after the KLP module is loaded. To "fix" this issue in the past, we created .klp.arch sections: .klp.arch.{module}..altinstructions .klp.arch.{module}..parainstructions Those sections allow KLP late module patching code to call apply_paravirt() and apply_alternatives() after the module-specific KLP relocations (.klp.rela.{module}.{section}) have been applied. But that has a lot of drawbacks, including code complexity, the need for arch-specific code, and the (per-arch) danger that we missed some special section -- for example the __jump_table section which is used for jump labels. It turns out there's a simpler and more functional approach. There are two kinds of KLP relocation sections: 1) vmlinux-specific KLP relocation sections .klp.rela.vmlinux.{sec} These are relocations (applied to the KLP module) which reference unexported vmlinux symbols. 2) module-specific KLP relocation sections .klp.rela.{module}.{sec}: These are relocations (applied to the KLP module) which reference unexported or exported module symbols. Up until now, these have been treated the same. However, they're inherently different. Because of late module patching, module-specific KLP relocations can be applied very late, thus they can create the ordering headaches described above. But vmlinux-specific KLP relocations don't have that problem. There's nothing to prevent them from being applied earlier. So apply them at the same time as normal relocations, when the KLP module is being loaded. This means that for vmlinux-specific KLP relocations, we no longer have any ordering issues. vmlinux-referencing jump labels, alternatives, and paravirt patching will work automatically, without the need for the .klp.arch hacks. All that said, for module-specific KLP relocations, the ordering problems still exist and we *do* still need .klp.arch. Or do we? Stay tuned. Suggested-by: Peter Zijlstra Signed-off-by: Josh Poimboeuf Acked-by: Peter Zijlstra (Intel) Acked-by: Joe Lawrence Acked-by: Miroslav Benes Acked-by: Jessica Yu Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 137 ++++++++++++++++++++++++++++++------------------ kernel/module.c | 10 ++-- 2 files changed, 92 insertions(+), 55 deletions(-) (limited to 'kernel') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 40cfac8156fd..c02791e5c75b 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -191,12 +191,12 @@ static int klp_find_object_symbol(const char *objname, const char *name, return -EINVAL; } -static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod) +static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab, + unsigned int symndx, Elf_Shdr *relasec) { int i, cnt, vmlinux, ret; char objname[MODULE_NAME_LEN]; char symname[KSYM_NAME_LEN]; - char *strtab = pmod->core_kallsyms.strtab; Elf_Rela *relas; Elf_Sym *sym; unsigned long sympos, addr; @@ -216,7 +216,7 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod) relas = (Elf_Rela *) relasec->sh_addr; /* For each rela in this klp relocation section */ for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { - sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info); + sym = (Elf64_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info); if (sym->st_shndx != SHN_LIVEPATCH) { pr_err("symbol %s is not marked as a livepatch symbol\n", strtab + sym->st_name); @@ -246,54 +246,59 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod) return 0; } -static int klp_write_object_relocations(struct module *pmod, - struct klp_object *obj) +/* + * At a high-level, there are two types of klp relocation sections: those which + * reference symbols which live in vmlinux; and those which reference symbols + * which live in other modules. This function is called for both types: + * + * 1) When a klp module itself loads, the module code calls this function to + * write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections). + * These relocations are written to the klp module text to allow the patched + * code/data to reference unexported vmlinux symbols. They're written as + * early as possible to ensure that other module init code (.e.g., + * jump_label_apply_nops) can access any unexported vmlinux symbols which + * might be referenced by the klp module's special sections. + * + * 2) When a to-be-patched module loads -- or is already loaded when a + * corresponding klp module loads -- klp code calls this function to write + * module-specific klp relocations (.klp.rela.{module}.* sections). These + * are written to the klp module text to allow the patched code/data to + * reference symbols which live in the to-be-patched module or one of its + * module dependencies. Exported symbols are supported, in addition to + * unexported symbols, in order to enable late module patching, which allows + * the to-be-patched module to be loaded and patched sometime *after* the + * klp module is loaded. + */ +int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, + const char *shstrtab, const char *strtab, + unsigned int symndx, unsigned int secndx, + const char *objname) { - int i, cnt, ret = 0; - const char *objname, *secname; + int cnt, ret; char sec_objname[MODULE_NAME_LEN]; - Elf_Shdr *sec; + Elf_Shdr *sec = sechdrs + secndx; - if (WARN_ON(!klp_is_object_loaded(obj))) + /* + * Format: .klp.rela.sec_objname.section_name + * See comment in klp_resolve_symbols() for an explanation + * of the selected field width value. + */ + cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]", + sec_objname); + if (cnt != 1) { + pr_err("section %s has an incorrectly formatted name\n", + shstrtab + sec->sh_name); return -EINVAL; + } - objname = klp_is_module(obj) ? obj->name : "vmlinux"; - - /* For each klp relocation section */ - for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) { - sec = pmod->klp_info->sechdrs + i; - secname = pmod->klp_info->secstrings + sec->sh_name; - if (!(sec->sh_flags & SHF_RELA_LIVEPATCH)) - continue; - - /* - * Format: .klp.rela.sec_objname.section_name - * See comment in klp_resolve_symbols() for an explanation - * of the selected field width value. - */ - cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname); - if (cnt != 1) { - pr_err("section %s has an incorrectly formatted name\n", - secname); - ret = -EINVAL; - break; - } - - if (strcmp(objname, sec_objname)) - continue; - - ret = klp_resolve_symbols(sec, pmod); - if (ret) - break; + if (strcmp(objname ? objname : "vmlinux", sec_objname)) + return 0; - ret = apply_relocate_add(pmod->klp_info->sechdrs, - pmod->core_kallsyms.strtab, - pmod->klp_info->symndx, i, pmod); - if (ret) - break; - } + ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec); + if (ret) + return ret; - return ret; + return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod); } /* @@ -730,6 +735,28 @@ void __weak arch_klp_init_object_loaded(struct klp_patch *patch, { } +int klp_apply_object_relocs(struct klp_patch *patch, struct klp_object *obj) +{ + int i, ret; + struct klp_modinfo *info = patch->mod->klp_info; + + for (i = 1; i < info->hdr.e_shnum; i++) { + Elf_Shdr *sec = info->sechdrs + i; + + if (!(sec->sh_flags & SHF_RELA_LIVEPATCH)) + continue; + + ret = klp_apply_section_relocs(patch->mod, info->sechdrs, + info->secstrings, + patch->mod->core_kallsyms.strtab, + info->symndx, i, obj->name); + if (ret) + return ret; + } + + return 0; +} + /* parts of the initialization that is done only when the object is loaded */ static int klp_init_object_loaded(struct klp_patch *patch, struct klp_object *obj) @@ -738,18 +765,26 @@ static int klp_init_object_loaded(struct klp_patch *patch, int ret; mutex_lock(&text_mutex); - module_disable_ro(patch->mod); - ret = klp_write_object_relocations(patch->mod, obj); - if (ret) { - module_enable_ro(patch->mod, true); - mutex_unlock(&text_mutex); - return ret; + + if (klp_is_module(obj)) { + /* + * Only write module-specific relocations here + * (.klp.rela.{module}.*). vmlinux-specific relocations were + * written earlier during the initialization of the klp module + * itself. + */ + ret = klp_apply_object_relocs(patch, obj); + if (ret) { + module_enable_ro(patch->mod, true); + mutex_unlock(&text_mutex); + return ret; + } } arch_klp_init_object_loaded(patch, obj); - module_enable_ro(patch->mod, true); + module_enable_ro(patch->mod, true); mutex_unlock(&text_mutex); klp_for_each_func(obj, func) { diff --git a/kernel/module.c b/kernel/module.c index 646f1e2330d2..fdd9f6970e9a 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2334,11 +2334,13 @@ static int apply_relocations(struct module *mod, const struct load_info *info) if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) continue; - /* Livepatch relocation sections are applied by livepatch */ if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) - continue; - - if (info->sechdrs[i].sh_type == SHT_REL) + err = klp_apply_section_relocs(mod, info->sechdrs, + info->secstrings, + info->strtab, + info->index.sym, i, + NULL); + else if (info->sechdrs[i].sh_type == SHT_REL) err = apply_relocate(info->sechdrs, info->strtab, info->index.sym, i, mod); else if (info->sechdrs[i].sh_type == SHT_RELA) -- cgit v1.2.3 From 1d05334d2899bd3ecdf01beb53f0a70884a7f471 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 29 Apr 2020 10:24:45 -0500 Subject: livepatch: Remove .klp.arch After the previous patch, vmlinux-specific KLP relocations are now applied early during KLP module load. This means that .klp.arch sections are no longer needed for *vmlinux-specific* KLP relocations. One might think they're still needed for *module-specific* KLP relocations. If a to-be-patched module is loaded *after* its corresponding KLP module is loaded, any corresponding KLP relocations will be delayed until the to-be-patched module is loaded. If any special sections (.parainstructions, for example) rely on those relocations, their initializations (apply_paravirt) need to be done afterwards. Thus the apparent need for arch_klp_init_object_loaded() and its corresponding .klp.arch sections -- it allows some of the special section initializations to be done at a later time. But... if you look closer, that dependency between the special sections and the module-specific KLP relocations doesn't actually exist in reality. Looking at the contents of the .altinstructions and .parainstructions sections, there's not a realistic scenario in which a KLP module's .altinstructions or .parainstructions section needs to access a symbol in a to-be-patched module. It might need to access a local symbol or even a vmlinux symbol; but not another module's symbol. When a special section needs to reference a local or vmlinux symbol, a normal rela can be used instead of a KLP rela. Since the special section initializations don't actually have any real dependency on module-specific KLP relocations, .klp.arch and arch_klp_init_object_loaded() no longer have a reason to exist. So remove them. As Peter said much more succinctly: So the reason for .klp.arch was that .klp.rela.* stuff would overwrite paravirt instructions. If that happens you're doing it wrong. Those RELAs are core kernel, not module, and thus should've happened in .rela.* sections at patch-module loading time. Reverting this removes the two apply_{paravirt,alternatives}() calls from the late patching path, and means we don't have to worry about them when removing module_disable_ro(). [ jpoimboe: Rewrote patch description. Tweaked klp_init_object_loaded() error path. ] Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Josh Poimboeuf Acked-by: Peter Zijlstra (Intel) Acked-by: Joe Lawrence Acked-by: Miroslav Benes Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index c02791e5c75b..16632e75112a 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -729,12 +729,6 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) func->old_sympos ? func->old_sympos : 1); } -/* Arches may override this to finish any remaining arch-specific tasks */ -void __weak arch_klp_init_object_loaded(struct klp_patch *patch, - struct klp_object *obj) -{ -} - int klp_apply_object_relocs(struct klp_patch *patch, struct klp_object *obj) { int i, ret; @@ -764,10 +758,11 @@ static int klp_init_object_loaded(struct klp_patch *patch, struct klp_func *func; int ret; - mutex_lock(&text_mutex); - module_disable_ro(patch->mod); - if (klp_is_module(obj)) { + + mutex_lock(&text_mutex); + module_disable_ro(patch->mod); + /* * Only write module-specific relocations here * (.klp.rela.{module}.*). vmlinux-specific relocations were @@ -775,17 +770,13 @@ static int klp_init_object_loaded(struct klp_patch *patch, * itself. */ ret = klp_apply_object_relocs(patch, obj); - if (ret) { - module_enable_ro(patch->mod, true); - mutex_unlock(&text_mutex); - return ret; - } - } - arch_klp_init_object_loaded(patch, obj); + module_enable_ro(patch->mod, true); + mutex_unlock(&text_mutex); - module_enable_ro(patch->mod, true); - mutex_unlock(&text_mutex); + if (ret) + return ret; + } klp_for_each_func(obj, func) { ret = klp_find_object_symbol(obj->name, func->old_name, -- cgit v1.2.3 From ca376a9374867d09ece6f61803764fb187201294 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 29 Apr 2020 10:24:46 -0500 Subject: livepatch: Prevent module-specific KLP rela sections from referencing vmlinux symbols Prevent module-specific KLP rela sections from referencing vmlinux symbols. This helps prevent ordering issues with module special section initializations. Presumably such symbols are exported and normal relas can be used instead. Suggested-by: Peter Zijlstra Signed-off-by: Josh Poimboeuf Acked-by: Peter Zijlstra (Intel) Acked-by: Joe Lawrence Acked-by: Miroslav Benes Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 16632e75112a..f9ebb54affab 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -192,17 +192,20 @@ static int klp_find_object_symbol(const char *objname, const char *name, } static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab, - unsigned int symndx, Elf_Shdr *relasec) + unsigned int symndx, Elf_Shdr *relasec, + const char *sec_objname) { - int i, cnt, vmlinux, ret; - char objname[MODULE_NAME_LEN]; - char symname[KSYM_NAME_LEN]; + int i, cnt, ret; + char sym_objname[MODULE_NAME_LEN]; + char sym_name[KSYM_NAME_LEN]; Elf_Rela *relas; Elf_Sym *sym; unsigned long sympos, addr; + bool sym_vmlinux; + bool sec_vmlinux = !strcmp(sec_objname, "vmlinux"); /* - * Since the field widths for objname and symname in the sscanf() + * Since the field widths for sym_objname and sym_name in the sscanf() * call are hard-coded and correspond to MODULE_NAME_LEN and * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN * and KSYM_NAME_LEN have the values we expect them to have. @@ -223,20 +226,33 @@ static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab, return -EINVAL; } - /* Format: .klp.sym.objname.symname,sympos */ + /* Format: .klp.sym.sym_objname.sym_name,sympos */ cnt = sscanf(strtab + sym->st_name, ".klp.sym.%55[^.].%127[^,],%lu", - objname, symname, &sympos); + sym_objname, sym_name, &sympos); if (cnt != 3) { pr_err("symbol %s has an incorrectly formatted name\n", strtab + sym->st_name); return -EINVAL; } + sym_vmlinux = !strcmp(sym_objname, "vmlinux"); + + /* + * Prevent module-specific KLP rela sections from referencing + * vmlinux symbols. This helps prevent ordering issues with + * module special section initializations. Presumably such + * symbols are exported and normal relas can be used instead. + */ + if (!sec_vmlinux && sym_vmlinux) { + pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section", + sym_name); + return -EINVAL; + } + /* klp_find_object_symbol() treats a NULL objname as vmlinux */ - vmlinux = !strcmp(objname, "vmlinux"); - ret = klp_find_object_symbol(vmlinux ? NULL : objname, - symname, sympos, &addr); + ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname, + sym_name, sympos, &addr); if (ret) return ret; @@ -294,7 +310,7 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, if (strcmp(objname ? objname : "vmlinux", sec_objname)) return 0; - ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec); + ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec, sec_objname); if (ret) return ret; -- cgit v1.2.3 From d556e1be33320366272ec02f93f98d7f308479f1 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 29 Apr 2020 10:24:50 -0500 Subject: livepatch: Remove module_disable_ro() usage With arch_klp_init_object_loaded() gone, and apply_relocate_add() now using text_poke(), livepatch no longer needs to use module_disable_ro(). Signed-off-by: Josh Poimboeuf Acked-by: Peter Zijlstra (Intel) Acked-by: Joe Lawrence Acked-by: Miroslav Benes Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index f9ebb54affab..6b8b3c067be0 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -777,7 +777,6 @@ static int klp_init_object_loaded(struct klp_patch *patch, if (klp_is_module(obj)) { mutex_lock(&text_mutex); - module_disable_ro(patch->mod); /* * Only write module-specific relocations here @@ -787,7 +786,6 @@ static int klp_init_object_loaded(struct klp_patch *patch, */ ret = klp_apply_object_relocs(patch, obj); - module_enable_ro(patch->mod, true); mutex_unlock(&text_mutex); if (ret) -- cgit v1.2.3 From 0d9fbf78fefb421a3af97394ce80bba0db4f046a Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 29 Apr 2020 10:24:51 -0500 Subject: module: Remove module_disable_ro() module_disable_ro() has no more users. Remove it. Signed-off-by: Josh Poimboeuf Acked-by: Peter Zijlstra (Intel) Acked-by: Joe Lawrence Acked-by: Miroslav Benes Acked-by: Jessica Yu Signed-off-by: Jiri Kosina --- kernel/module.c | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index fdd9f6970e9a..3ba024afe379 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1997,19 +1997,6 @@ static void frob_writable_data(const struct module_layout *layout, (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT); } -/* livepatching wants to disable read-only so it can frob module. */ -void module_disable_ro(const struct module *mod) -{ - if (!rodata_enabled) - return; - - frob_text(&mod->core_layout, set_memory_rw); - frob_rodata(&mod->core_layout, set_memory_rw); - frob_ro_after_init(&mod->core_layout, set_memory_rw); - frob_text(&mod->init_layout, set_memory_rw); - frob_rodata(&mod->init_layout, set_memory_rw); -} - void module_enable_ro(const struct module *mod, bool after_init) { if (!rodata_enabled) -- cgit v1.2.3 From 5b384f933590a086ca9a0abdc2e55e41107ac440 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 29 Apr 2020 10:24:52 -0500 Subject: x86/module: Use text_mutex in apply_relocate_add() Now that the livepatch code no longer needs the text_mutex for changing module permissions, move its usage down to apply_relocate_add(). Note the s390 version of apply_relocate_add() doesn't need to use the text_mutex because it already uses s390_kernel_write_lock, which accomplishes the same task. Signed-off-by: Josh Poimboeuf Acked-by: Joe Lawrence Acked-by: Miroslav Benes Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'kernel') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 6b8b3c067be0..96d2da14eb0d 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -775,9 +775,6 @@ static int klp_init_object_loaded(struct klp_patch *patch, int ret; if (klp_is_module(obj)) { - - mutex_lock(&text_mutex); - /* * Only write module-specific relocations here * (.klp.rela.{module}.*). vmlinux-specific relocations were @@ -785,9 +782,6 @@ static int klp_init_object_loaded(struct klp_patch *patch, * itself. */ ret = klp_apply_object_relocs(patch, obj); - - mutex_unlock(&text_mutex); - if (ret) return ret; } -- cgit v1.2.3 From e6eff4376e2897c2e14b70d87bf7284cdb093830 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 29 Apr 2020 10:24:53 -0500 Subject: module: Make module_enable_ro() static again Now that module_enable_ro() has no more external users, make it static again. Suggested-by: Jessica Yu Signed-off-by: Josh Poimboeuf Acked-by: Jessica Yu Signed-off-by: Jiri Kosina --- kernel/module.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 3ba024afe379..a26343ea4d50 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1997,7 +1997,7 @@ static void frob_writable_data(const struct module_layout *layout, (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT); } -void module_enable_ro(const struct module *mod, bool after_init) +static void module_enable_ro(const struct module *mod, bool after_init) { if (!rodata_enabled) return; @@ -2025,6 +2025,7 @@ static void module_enable_nx(const struct module *mod) #else /* !CONFIG_STRICT_MODULE_RWX */ static void module_enable_nx(const struct module *mod) { } +static void module_enable_ro(const struct module *mod, bool after_init) {} #endif /* CONFIG_STRICT_MODULE_RWX */ static void module_enable_x(const struct module *mod) { -- cgit v1.2.3 From a4ae16f65c335f8be58b67b78628c788c4b325a5 Mon Sep 17 00:00:00 2001 From: Samuel Zou Date: Sat, 9 May 2020 09:16:41 +0800 Subject: livepatch: Make klp_apply_object_relocs static Fix the following sparse warning: kernel/livepatch/core.c:748:5: warning: symbol 'klp_apply_object_relocs' was not declared. The klp_apply_object_relocs() has only one call site within core.c; it should be static Fixes: 7c8e2bdd5f0d ("livepatch: Apply vmlinux-specific KLP relocations early") Reported-by: Hulk Robot Signed-off-by: Samuel Zou Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 96d2da14eb0d..f76fdb925532 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -745,7 +745,8 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) func->old_sympos ? func->old_sympos : 1); } -int klp_apply_object_relocs(struct klp_patch *patch, struct klp_object *obj) +static int klp_apply_object_relocs(struct klp_patch *patch, + struct klp_object *obj) { int i, ret; struct klp_modinfo *info = patch->mod->klp_info; -- cgit v1.2.3