1cfc1d277SAaron Tomlin // SPDX-License-Identifier: GPL-2.0-or-later 2cfc1d277SAaron Tomlin /* 3cfc1d277SAaron Tomlin * Copyright (C) 2002 Richard Henderson 4cfc1d277SAaron Tomlin * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. 546752820SLuis Chamberlain * Copyright (C) 2023 Luis Chamberlain <[email protected]> 6cfc1d277SAaron Tomlin */ 7cfc1d277SAaron Tomlin 8cfc1d277SAaron Tomlin #define INCLUDE_VERMAGIC 9cfc1d277SAaron Tomlin 10cfc1d277SAaron Tomlin #include <linux/export.h> 11cfc1d277SAaron Tomlin #include <linux/extable.h> 12cfc1d277SAaron Tomlin #include <linux/moduleloader.h> 13cfc1d277SAaron Tomlin #include <linux/module_signature.h> 14cfc1d277SAaron Tomlin #include <linux/trace_events.h> 15cfc1d277SAaron Tomlin #include <linux/init.h> 16cfc1d277SAaron Tomlin #include <linux/kallsyms.h> 17cfc1d277SAaron Tomlin #include <linux/buildid.h> 18cfc1d277SAaron Tomlin #include <linux/fs.h> 19cfc1d277SAaron Tomlin #include <linux/kernel.h> 20cfc1d277SAaron Tomlin #include <linux/kernel_read_file.h> 21fbed4feaSChristophe JAILLET #include <linux/kstrtox.h> 22cfc1d277SAaron Tomlin #include <linux/slab.h> 23cfc1d277SAaron Tomlin #include <linux/vmalloc.h> 24cfc1d277SAaron Tomlin #include <linux/elf.h> 25cfc1d277SAaron Tomlin #include <linux/seq_file.h> 26cfc1d277SAaron Tomlin #include <linux/syscalls.h> 27cfc1d277SAaron Tomlin #include <linux/fcntl.h> 28cfc1d277SAaron Tomlin #include <linux/rcupdate.h> 29cfc1d277SAaron Tomlin #include <linux/capability.h> 30cfc1d277SAaron Tomlin #include <linux/cpu.h> 31cfc1d277SAaron Tomlin #include <linux/moduleparam.h> 32cfc1d277SAaron Tomlin #include <linux/errno.h> 33cfc1d277SAaron Tomlin #include <linux/err.h> 34cfc1d277SAaron Tomlin #include <linux/vermagic.h> 35cfc1d277SAaron Tomlin #include <linux/notifier.h> 36cfc1d277SAaron Tomlin #include <linux/sched.h> 37cfc1d277SAaron Tomlin #include <linux/device.h> 38cfc1d277SAaron Tomlin #include <linux/string.h> 39cfc1d277SAaron Tomlin #include <linux/mutex.h> 40cfc1d277SAaron Tomlin #include <linux/rculist.h> 41cfc1d277SAaron Tomlin #include <linux/uaccess.h> 42cfc1d277SAaron Tomlin #include <asm/cacheflush.h> 43cfc1d277SAaron Tomlin #include <linux/set_memory.h> 44cfc1d277SAaron Tomlin #include <asm/mmu_context.h> 45cfc1d277SAaron Tomlin #include <linux/license.h> 46cfc1d277SAaron Tomlin #include <asm/sections.h> 47cfc1d277SAaron Tomlin #include <linux/tracepoint.h> 48cfc1d277SAaron Tomlin #include <linux/ftrace.h> 49cfc1d277SAaron Tomlin #include <linux/livepatch.h> 50cfc1d277SAaron Tomlin #include <linux/async.h> 51cfc1d277SAaron Tomlin #include <linux/percpu.h> 52cfc1d277SAaron Tomlin #include <linux/kmemleak.h> 53cfc1d277SAaron Tomlin #include <linux/jump_label.h> 54cfc1d277SAaron Tomlin #include <linux/pfn.h> 55cfc1d277SAaron Tomlin #include <linux/bsearch.h> 56cfc1d277SAaron Tomlin #include <linux/dynamic_debug.h> 57cfc1d277SAaron Tomlin #include <linux/audit.h> 5889245600SSami Tolvanen #include <linux/cfi.h> 59a4735739SSuren Baghdasaryan #include <linux/codetag.h> 60df3e764dSLuis Chamberlain #include <linux/debugfs.h> 6112af2b83SMike Rapoport (IBM) #include <linux/execmem.h> 62cfc1d277SAaron Tomlin #include <uapi/linux/module.h> 63cfc1d277SAaron Tomlin #include "internal.h" 64cfc1d277SAaron Tomlin 65cfc1d277SAaron Tomlin #define CREATE_TRACE_POINTS 66cfc1d277SAaron Tomlin #include <trace/events/module.h> 67cfc1d277SAaron Tomlin 68cfc1d277SAaron Tomlin /* 69cfc1d277SAaron Tomlin * Mutex protects: 70cfc1d277SAaron Tomlin * 1) List of modules (also safely readable with preempt_disable), 71cfc1d277SAaron Tomlin * 2) module_use links, 7255ce556dSChristophe Leroy * 3) mod_tree.addr_min/mod_tree.addr_max. 73cfc1d277SAaron Tomlin * (delete and add uses RCU list operations). 74cfc1d277SAaron Tomlin */ 758ab4ed08SAaron Tomlin DEFINE_MUTEX(module_mutex); 768ab4ed08SAaron Tomlin LIST_HEAD(modules); 77cfc1d277SAaron Tomlin 78cfc1d277SAaron Tomlin /* Work queue for freeing init sections in success case */ 79cfc1d277SAaron Tomlin static void do_free_init(struct work_struct *w); 80cfc1d277SAaron Tomlin static DECLARE_WORK(init_free_wq, do_free_init); 81cfc1d277SAaron Tomlin static LLIST_HEAD(init_free_list); 82cfc1d277SAaron Tomlin 8358d208deSAaron Tomlin struct mod_tree_root mod_tree __cacheline_aligned = { 84cfc1d277SAaron Tomlin .addr_min = -1UL, 85cfc1d277SAaron Tomlin }; 86cfc1d277SAaron Tomlin 8747889798SAaron Tomlin struct symsearch { 8847889798SAaron Tomlin const struct kernel_symbol *start, *stop; 891cd9502eSMasahiro Yamada const u32 *crcs; 9047889798SAaron Tomlin enum mod_license license; 9147889798SAaron Tomlin }; 9247889798SAaron Tomlin 93cfc1d277SAaron Tomlin /* 94ac3b4328SSong Liu * Bounds of module memory, for speeding up __module_address. 95cfc1d277SAaron Tomlin * Protected by module_mutex. 96cfc1d277SAaron Tomlin */ 97ac3b4328SSong Liu static void __mod_update_bounds(enum mod_mem_type type __maybe_unused, void *base, 98ac3b4328SSong Liu unsigned int size, struct mod_tree_root *tree) 99cfc1d277SAaron Tomlin { 100cfc1d277SAaron Tomlin unsigned long min = (unsigned long)base; 101cfc1d277SAaron Tomlin unsigned long max = min + size; 102cfc1d277SAaron Tomlin 103ac3b4328SSong Liu #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 104ac3b4328SSong Liu if (mod_mem_type_is_core_data(type)) { 105ac3b4328SSong Liu if (min < tree->data_addr_min) 106ac3b4328SSong Liu tree->data_addr_min = min; 107ac3b4328SSong Liu if (max > tree->data_addr_max) 108ac3b4328SSong Liu tree->data_addr_max = max; 109ac3b4328SSong Liu return; 110ac3b4328SSong Liu } 111ac3b4328SSong Liu #endif 112446d5566SChristophe Leroy if (min < tree->addr_min) 113446d5566SChristophe Leroy tree->addr_min = min; 114446d5566SChristophe Leroy if (max > tree->addr_max) 115446d5566SChristophe Leroy tree->addr_max = max; 116cfc1d277SAaron Tomlin } 117cfc1d277SAaron Tomlin 118cfc1d277SAaron Tomlin static void mod_update_bounds(struct module *mod) 119cfc1d277SAaron Tomlin { 120ac3b4328SSong Liu for_each_mod_mem_type(type) { 121ac3b4328SSong Liu struct module_memory *mod_mem = &mod->mem[type]; 122ac3b4328SSong Liu 123ac3b4328SSong Liu if (mod_mem->size) 124ac3b4328SSong Liu __mod_update_bounds(type, mod_mem->base, mod_mem->size, &mod_tree); 125ac3b4328SSong Liu } 126cfc1d277SAaron Tomlin } 127cfc1d277SAaron Tomlin 128cfc1d277SAaron Tomlin /* Block module loading/unloading? */ 129ecc726f1SChristophe Leroy int modules_disabled; 130cfc1d277SAaron Tomlin core_param(nomodule, modules_disabled, bint, 0); 131cfc1d277SAaron Tomlin 132cfc1d277SAaron Tomlin /* Waiting for a module to finish initializing? */ 133cfc1d277SAaron Tomlin static DECLARE_WAIT_QUEUE_HEAD(module_wq); 134cfc1d277SAaron Tomlin 135cfc1d277SAaron Tomlin static BLOCKING_NOTIFIER_HEAD(module_notify_list); 136cfc1d277SAaron Tomlin 137cfc1d277SAaron Tomlin int register_module_notifier(struct notifier_block *nb) 138cfc1d277SAaron Tomlin { 139cfc1d277SAaron Tomlin return blocking_notifier_chain_register(&module_notify_list, nb); 140cfc1d277SAaron Tomlin } 141cfc1d277SAaron Tomlin EXPORT_SYMBOL(register_module_notifier); 142cfc1d277SAaron Tomlin 143cfc1d277SAaron Tomlin int unregister_module_notifier(struct notifier_block *nb) 144cfc1d277SAaron Tomlin { 145cfc1d277SAaron Tomlin return blocking_notifier_chain_unregister(&module_notify_list, nb); 146cfc1d277SAaron Tomlin } 147cfc1d277SAaron Tomlin EXPORT_SYMBOL(unregister_module_notifier); 148cfc1d277SAaron Tomlin 149cfc1d277SAaron Tomlin /* 150cfc1d277SAaron Tomlin * We require a truly strong try_module_get(): 0 means success. 151cfc1d277SAaron Tomlin * Otherwise an error is returned due to ongoing or failed 152cfc1d277SAaron Tomlin * initialization etc. 153cfc1d277SAaron Tomlin */ 154cfc1d277SAaron Tomlin static inline int strong_try_module_get(struct module *mod) 155cfc1d277SAaron Tomlin { 156cfc1d277SAaron Tomlin BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); 157cfc1d277SAaron Tomlin if (mod && mod->state == MODULE_STATE_COMING) 158cfc1d277SAaron Tomlin return -EBUSY; 159cfc1d277SAaron Tomlin if (try_module_get(mod)) 160cfc1d277SAaron Tomlin return 0; 161cfc1d277SAaron Tomlin else 162cfc1d277SAaron Tomlin return -ENOENT; 163cfc1d277SAaron Tomlin } 164cfc1d277SAaron Tomlin 165cfc1d277SAaron Tomlin static inline void add_taint_module(struct module *mod, unsigned flag, 166cfc1d277SAaron Tomlin enum lockdep_ok lockdep_ok) 167cfc1d277SAaron Tomlin { 168cfc1d277SAaron Tomlin add_taint(flag, lockdep_ok); 169cfc1d277SAaron Tomlin set_bit(flag, &mod->taints); 170cfc1d277SAaron Tomlin } 171cfc1d277SAaron Tomlin 172cfc1d277SAaron Tomlin /* 173cfc1d277SAaron Tomlin * A thread that wants to hold a reference to a module only while it 174cfc1d277SAaron Tomlin * is running can call this to safely exit. 175cfc1d277SAaron Tomlin */ 176cfc1d277SAaron Tomlin void __noreturn __module_put_and_kthread_exit(struct module *mod, long code) 177cfc1d277SAaron Tomlin { 178cfc1d277SAaron Tomlin module_put(mod); 179cfc1d277SAaron Tomlin kthread_exit(code); 180cfc1d277SAaron Tomlin } 181cfc1d277SAaron Tomlin EXPORT_SYMBOL(__module_put_and_kthread_exit); 182cfc1d277SAaron Tomlin 183cfc1d277SAaron Tomlin /* Find a module section: 0 means not found. */ 184cfc1d277SAaron Tomlin static unsigned int find_sec(const struct load_info *info, const char *name) 185cfc1d277SAaron Tomlin { 186cfc1d277SAaron Tomlin unsigned int i; 187cfc1d277SAaron Tomlin 188cfc1d277SAaron Tomlin for (i = 1; i < info->hdr->e_shnum; i++) { 189cfc1d277SAaron Tomlin Elf_Shdr *shdr = &info->sechdrs[i]; 190cfc1d277SAaron Tomlin /* Alloc bit cleared means "ignore it." */ 191cfc1d277SAaron Tomlin if ((shdr->sh_flags & SHF_ALLOC) 192cfc1d277SAaron Tomlin && strcmp(info->secstrings + shdr->sh_name, name) == 0) 193cfc1d277SAaron Tomlin return i; 194cfc1d277SAaron Tomlin } 195cfc1d277SAaron Tomlin return 0; 196cfc1d277SAaron Tomlin } 197cfc1d277SAaron Tomlin 198fbc0e4e4SMatthew Maurer /** 199fbc0e4e4SMatthew Maurer * find_any_unique_sec() - Find a unique section index by name 200fbc0e4e4SMatthew Maurer * @info: Load info for the module to scan 201fbc0e4e4SMatthew Maurer * @name: Name of the section we're looking for 202fbc0e4e4SMatthew Maurer * 203fbc0e4e4SMatthew Maurer * Locates a unique section by name. Ignores SHF_ALLOC. 204fbc0e4e4SMatthew Maurer * 205fbc0e4e4SMatthew Maurer * Return: Section index if found uniquely, zero if absent, negative count 206fbc0e4e4SMatthew Maurer * of total instances if multiple were found. 207fbc0e4e4SMatthew Maurer */ 208fbc0e4e4SMatthew Maurer static int find_any_unique_sec(const struct load_info *info, const char *name) 209fbc0e4e4SMatthew Maurer { 210fbc0e4e4SMatthew Maurer unsigned int idx; 211fbc0e4e4SMatthew Maurer unsigned int count = 0; 212fbc0e4e4SMatthew Maurer int i; 213fbc0e4e4SMatthew Maurer 214fbc0e4e4SMatthew Maurer for (i = 1; i < info->hdr->e_shnum; i++) { 215fbc0e4e4SMatthew Maurer if (strcmp(info->secstrings + info->sechdrs[i].sh_name, 216fbc0e4e4SMatthew Maurer name) == 0) { 217fbc0e4e4SMatthew Maurer count++; 218fbc0e4e4SMatthew Maurer idx = i; 219fbc0e4e4SMatthew Maurer } 220fbc0e4e4SMatthew Maurer } 221fbc0e4e4SMatthew Maurer if (count == 1) { 222fbc0e4e4SMatthew Maurer return idx; 223fbc0e4e4SMatthew Maurer } else if (count == 0) { 224fbc0e4e4SMatthew Maurer return 0; 225fbc0e4e4SMatthew Maurer } else { 226fbc0e4e4SMatthew Maurer return -count; 227fbc0e4e4SMatthew Maurer } 228fbc0e4e4SMatthew Maurer } 229fbc0e4e4SMatthew Maurer 230cfc1d277SAaron Tomlin /* Find a module section, or NULL. */ 231cfc1d277SAaron Tomlin static void *section_addr(const struct load_info *info, const char *name) 232cfc1d277SAaron Tomlin { 233cfc1d277SAaron Tomlin /* Section 0 has sh_addr 0. */ 234cfc1d277SAaron Tomlin return (void *)info->sechdrs[find_sec(info, name)].sh_addr; 235cfc1d277SAaron Tomlin } 236cfc1d277SAaron Tomlin 237cfc1d277SAaron Tomlin /* Find a module section, or NULL. Fill in number of "objects" in section. */ 238cfc1d277SAaron Tomlin static void *section_objs(const struct load_info *info, 239cfc1d277SAaron Tomlin const char *name, 240cfc1d277SAaron Tomlin size_t object_size, 241cfc1d277SAaron Tomlin unsigned int *num) 242cfc1d277SAaron Tomlin { 243cfc1d277SAaron Tomlin unsigned int sec = find_sec(info, name); 244cfc1d277SAaron Tomlin 245cfc1d277SAaron Tomlin /* Section 0 has sh_addr 0 and sh_size 0. */ 246cfc1d277SAaron Tomlin *num = info->sechdrs[sec].sh_size / object_size; 247cfc1d277SAaron Tomlin return (void *)info->sechdrs[sec].sh_addr; 248cfc1d277SAaron Tomlin } 249cfc1d277SAaron Tomlin 250cfc1d277SAaron Tomlin /* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */ 251cfc1d277SAaron Tomlin static unsigned int find_any_sec(const struct load_info *info, const char *name) 252cfc1d277SAaron Tomlin { 253cfc1d277SAaron Tomlin unsigned int i; 254cfc1d277SAaron Tomlin 255cfc1d277SAaron Tomlin for (i = 1; i < info->hdr->e_shnum; i++) { 256cfc1d277SAaron Tomlin Elf_Shdr *shdr = &info->sechdrs[i]; 257cfc1d277SAaron Tomlin if (strcmp(info->secstrings + shdr->sh_name, name) == 0) 258cfc1d277SAaron Tomlin return i; 259cfc1d277SAaron Tomlin } 260cfc1d277SAaron Tomlin return 0; 261cfc1d277SAaron Tomlin } 262cfc1d277SAaron Tomlin 263cfc1d277SAaron Tomlin /* 264cfc1d277SAaron Tomlin * Find a module section, or NULL. Fill in number of "objects" in section. 265cfc1d277SAaron Tomlin * Ignores SHF_ALLOC flag. 266cfc1d277SAaron Tomlin */ 267cfc1d277SAaron Tomlin static __maybe_unused void *any_section_objs(const struct load_info *info, 268cfc1d277SAaron Tomlin const char *name, 269cfc1d277SAaron Tomlin size_t object_size, 270cfc1d277SAaron Tomlin unsigned int *num) 271cfc1d277SAaron Tomlin { 272cfc1d277SAaron Tomlin unsigned int sec = find_any_sec(info, name); 273cfc1d277SAaron Tomlin 274cfc1d277SAaron Tomlin /* Section 0 has sh_addr 0 and sh_size 0. */ 275cfc1d277SAaron Tomlin *num = info->sechdrs[sec].sh_size / object_size; 276cfc1d277SAaron Tomlin return (void *)info->sechdrs[sec].sh_addr; 277cfc1d277SAaron Tomlin } 278cfc1d277SAaron Tomlin 279cfc1d277SAaron Tomlin #ifndef CONFIG_MODVERSIONS 280cfc1d277SAaron Tomlin #define symversion(base, idx) NULL 281cfc1d277SAaron Tomlin #else 282cfc1d277SAaron Tomlin #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) 283cfc1d277SAaron Tomlin #endif 284cfc1d277SAaron Tomlin 285cfc1d277SAaron Tomlin static const char *kernel_symbol_name(const struct kernel_symbol *sym) 286cfc1d277SAaron Tomlin { 287cfc1d277SAaron Tomlin #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 288cfc1d277SAaron Tomlin return offset_to_ptr(&sym->name_offset); 289cfc1d277SAaron Tomlin #else 290cfc1d277SAaron Tomlin return sym->name; 291cfc1d277SAaron Tomlin #endif 292cfc1d277SAaron Tomlin } 293cfc1d277SAaron Tomlin 294cfc1d277SAaron Tomlin static const char *kernel_symbol_namespace(const struct kernel_symbol *sym) 295cfc1d277SAaron Tomlin { 296cfc1d277SAaron Tomlin #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 297cfc1d277SAaron Tomlin if (!sym->namespace_offset) 298cfc1d277SAaron Tomlin return NULL; 299cfc1d277SAaron Tomlin return offset_to_ptr(&sym->namespace_offset); 300cfc1d277SAaron Tomlin #else 301cfc1d277SAaron Tomlin return sym->namespace; 302cfc1d277SAaron Tomlin #endif 303cfc1d277SAaron Tomlin } 304cfc1d277SAaron Tomlin 30591fb02f3SAaron Tomlin int cmp_name(const void *name, const void *sym) 306cfc1d277SAaron Tomlin { 307cfc1d277SAaron Tomlin return strcmp(name, kernel_symbol_name(sym)); 308cfc1d277SAaron Tomlin } 309cfc1d277SAaron Tomlin 310cfc1d277SAaron Tomlin static bool find_exported_symbol_in_section(const struct symsearch *syms, 311cfc1d277SAaron Tomlin struct module *owner, 312c6eee9dfSMasahiro Yamada struct find_symbol_arg *fsa) 313cfc1d277SAaron Tomlin { 314cfc1d277SAaron Tomlin struct kernel_symbol *sym; 315cfc1d277SAaron Tomlin 316cdd66eb5SMasahiro Yamada if (!fsa->gplok && syms->license == GPL_ONLY) 317cdd66eb5SMasahiro Yamada return false; 318cdd66eb5SMasahiro Yamada 319cfc1d277SAaron Tomlin sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, 320cfc1d277SAaron Tomlin sizeof(struct kernel_symbol), cmp_name); 3217390b94aSMasahiro Yamada if (!sym) 322cfc1d277SAaron Tomlin return false; 3237390b94aSMasahiro Yamada 3247390b94aSMasahiro Yamada fsa->owner = owner; 3257390b94aSMasahiro Yamada fsa->crc = symversion(syms->crcs, sym - syms->start); 3267390b94aSMasahiro Yamada fsa->sym = sym; 3277390b94aSMasahiro Yamada fsa->license = syms->license; 3287390b94aSMasahiro Yamada 3297390b94aSMasahiro Yamada return true; 330cfc1d277SAaron Tomlin } 331cfc1d277SAaron Tomlin 332cfc1d277SAaron Tomlin /* 333cfc1d277SAaron Tomlin * Find an exported symbol and return it, along with, (optional) crc and 334cfc1d277SAaron Tomlin * (optional) module which owns it. Needs preempt disabled or module_mutex. 335cfc1d277SAaron Tomlin */ 33647889798SAaron Tomlin bool find_symbol(struct find_symbol_arg *fsa) 337cfc1d277SAaron Tomlin { 338cfc1d277SAaron Tomlin static const struct symsearch arr[] = { 339cfc1d277SAaron Tomlin { __start___ksymtab, __stop___ksymtab, __start___kcrctab, 340cfc1d277SAaron Tomlin NOT_GPL_ONLY }, 341cfc1d277SAaron Tomlin { __start___ksymtab_gpl, __stop___ksymtab_gpl, 342cfc1d277SAaron Tomlin __start___kcrctab_gpl, 343cfc1d277SAaron Tomlin GPL_ONLY }, 344cfc1d277SAaron Tomlin }; 345cfc1d277SAaron Tomlin struct module *mod; 346cfc1d277SAaron Tomlin unsigned int i; 347cfc1d277SAaron Tomlin 348cfc1d277SAaron Tomlin module_assert_mutex_or_preempt(); 349cfc1d277SAaron Tomlin 350cfc1d277SAaron Tomlin for (i = 0; i < ARRAY_SIZE(arr); i++) 351cfc1d277SAaron Tomlin if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) 352cfc1d277SAaron Tomlin return true; 353cfc1d277SAaron Tomlin 354cfc1d277SAaron Tomlin list_for_each_entry_rcu(mod, &modules, list, 355cfc1d277SAaron Tomlin lockdep_is_held(&module_mutex)) { 356cfc1d277SAaron Tomlin struct symsearch arr[] = { 357cfc1d277SAaron Tomlin { mod->syms, mod->syms + mod->num_syms, mod->crcs, 358cfc1d277SAaron Tomlin NOT_GPL_ONLY }, 359cfc1d277SAaron Tomlin { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, 360cfc1d277SAaron Tomlin mod->gpl_crcs, 361cfc1d277SAaron Tomlin GPL_ONLY }, 362cfc1d277SAaron Tomlin }; 363cfc1d277SAaron Tomlin 364cfc1d277SAaron Tomlin if (mod->state == MODULE_STATE_UNFORMED) 365cfc1d277SAaron Tomlin continue; 366cfc1d277SAaron Tomlin 367cfc1d277SAaron Tomlin for (i = 0; i < ARRAY_SIZE(arr); i++) 368cfc1d277SAaron Tomlin if (find_exported_symbol_in_section(&arr[i], mod, fsa)) 369cfc1d277SAaron Tomlin return true; 370cfc1d277SAaron Tomlin } 371cfc1d277SAaron Tomlin 372cfc1d277SAaron Tomlin pr_debug("Failed to find symbol %s\n", fsa->name); 373cfc1d277SAaron Tomlin return false; 374cfc1d277SAaron Tomlin } 375cfc1d277SAaron Tomlin 376cfc1d277SAaron Tomlin /* 377cfc1d277SAaron Tomlin * Search for module by name: must hold module_mutex (or preempt disabled 378cfc1d277SAaron Tomlin * for read-only access). 379cfc1d277SAaron Tomlin */ 38091fb02f3SAaron Tomlin struct module *find_module_all(const char *name, size_t len, 381cfc1d277SAaron Tomlin bool even_unformed) 382cfc1d277SAaron Tomlin { 383cfc1d277SAaron Tomlin struct module *mod; 384cfc1d277SAaron Tomlin 385cfc1d277SAaron Tomlin module_assert_mutex_or_preempt(); 386cfc1d277SAaron Tomlin 387cfc1d277SAaron Tomlin list_for_each_entry_rcu(mod, &modules, list, 388cfc1d277SAaron Tomlin lockdep_is_held(&module_mutex)) { 389cfc1d277SAaron Tomlin if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 390cfc1d277SAaron Tomlin continue; 391cfc1d277SAaron Tomlin if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) 392cfc1d277SAaron Tomlin return mod; 393cfc1d277SAaron Tomlin } 394cfc1d277SAaron Tomlin return NULL; 395cfc1d277SAaron Tomlin } 396cfc1d277SAaron Tomlin 397cfc1d277SAaron Tomlin struct module *find_module(const char *name) 398cfc1d277SAaron Tomlin { 399cfc1d277SAaron Tomlin return find_module_all(name, strlen(name), false); 400cfc1d277SAaron Tomlin } 401cfc1d277SAaron Tomlin 402cfc1d277SAaron Tomlin #ifdef CONFIG_SMP 403cfc1d277SAaron Tomlin 404cfc1d277SAaron Tomlin static inline void __percpu *mod_percpu(struct module *mod) 405cfc1d277SAaron Tomlin { 406cfc1d277SAaron Tomlin return mod->percpu; 407cfc1d277SAaron Tomlin } 408cfc1d277SAaron Tomlin 409cfc1d277SAaron Tomlin static int percpu_modalloc(struct module *mod, struct load_info *info) 410cfc1d277SAaron Tomlin { 411cfc1d277SAaron Tomlin Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; 412cfc1d277SAaron Tomlin unsigned long align = pcpusec->sh_addralign; 413cfc1d277SAaron Tomlin 414cfc1d277SAaron Tomlin if (!pcpusec->sh_size) 415cfc1d277SAaron Tomlin return 0; 416cfc1d277SAaron Tomlin 417cfc1d277SAaron Tomlin if (align > PAGE_SIZE) { 418cfc1d277SAaron Tomlin pr_warn("%s: per-cpu alignment %li > %li\n", 419cfc1d277SAaron Tomlin mod->name, align, PAGE_SIZE); 420cfc1d277SAaron Tomlin align = PAGE_SIZE; 421cfc1d277SAaron Tomlin } 422cfc1d277SAaron Tomlin 423cfc1d277SAaron Tomlin mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); 424cfc1d277SAaron Tomlin if (!mod->percpu) { 425cfc1d277SAaron Tomlin pr_warn("%s: Could not allocate %lu bytes percpu data\n", 426cfc1d277SAaron Tomlin mod->name, (unsigned long)pcpusec->sh_size); 427cfc1d277SAaron Tomlin return -ENOMEM; 428cfc1d277SAaron Tomlin } 429cfc1d277SAaron Tomlin mod->percpu_size = pcpusec->sh_size; 430cfc1d277SAaron Tomlin return 0; 431cfc1d277SAaron Tomlin } 432cfc1d277SAaron Tomlin 433cfc1d277SAaron Tomlin static void percpu_modfree(struct module *mod) 434cfc1d277SAaron Tomlin { 435cfc1d277SAaron Tomlin free_percpu(mod->percpu); 436cfc1d277SAaron Tomlin } 437cfc1d277SAaron Tomlin 438cfc1d277SAaron Tomlin static unsigned int find_pcpusec(struct load_info *info) 439cfc1d277SAaron Tomlin { 440cfc1d277SAaron Tomlin return find_sec(info, ".data..percpu"); 441cfc1d277SAaron Tomlin } 442cfc1d277SAaron Tomlin 443cfc1d277SAaron Tomlin static void percpu_modcopy(struct module *mod, 444cfc1d277SAaron Tomlin const void *from, unsigned long size) 445cfc1d277SAaron Tomlin { 446cfc1d277SAaron Tomlin int cpu; 447cfc1d277SAaron Tomlin 448cfc1d277SAaron Tomlin for_each_possible_cpu(cpu) 449cfc1d277SAaron Tomlin memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); 450cfc1d277SAaron Tomlin } 451cfc1d277SAaron Tomlin 452cfc1d277SAaron Tomlin bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 453cfc1d277SAaron Tomlin { 454cfc1d277SAaron Tomlin struct module *mod; 455cfc1d277SAaron Tomlin unsigned int cpu; 456cfc1d277SAaron Tomlin 457cfc1d277SAaron Tomlin preempt_disable(); 458cfc1d277SAaron Tomlin 459cfc1d277SAaron Tomlin list_for_each_entry_rcu(mod, &modules, list) { 460cfc1d277SAaron Tomlin if (mod->state == MODULE_STATE_UNFORMED) 461cfc1d277SAaron Tomlin continue; 462cfc1d277SAaron Tomlin if (!mod->percpu_size) 463cfc1d277SAaron Tomlin continue; 464cfc1d277SAaron Tomlin for_each_possible_cpu(cpu) { 465cfc1d277SAaron Tomlin void *start = per_cpu_ptr(mod->percpu, cpu); 466cfc1d277SAaron Tomlin void *va = (void *)addr; 467cfc1d277SAaron Tomlin 468cfc1d277SAaron Tomlin if (va >= start && va < start + mod->percpu_size) { 469cfc1d277SAaron Tomlin if (can_addr) { 470cfc1d277SAaron Tomlin *can_addr = (unsigned long) (va - start); 471cfc1d277SAaron Tomlin *can_addr += (unsigned long) 472cfc1d277SAaron Tomlin per_cpu_ptr(mod->percpu, 473cfc1d277SAaron Tomlin get_boot_cpu_id()); 474cfc1d277SAaron Tomlin } 475cfc1d277SAaron Tomlin preempt_enable(); 476cfc1d277SAaron Tomlin return true; 477cfc1d277SAaron Tomlin } 478cfc1d277SAaron Tomlin } 479cfc1d277SAaron Tomlin } 480cfc1d277SAaron Tomlin 481cfc1d277SAaron Tomlin preempt_enable(); 482cfc1d277SAaron Tomlin return false; 483cfc1d277SAaron Tomlin } 484cfc1d277SAaron Tomlin 485cfc1d277SAaron Tomlin /** 486cfc1d277SAaron Tomlin * is_module_percpu_address() - test whether address is from module static percpu 487cfc1d277SAaron Tomlin * @addr: address to test 488cfc1d277SAaron Tomlin * 489cfc1d277SAaron Tomlin * Test whether @addr belongs to module static percpu area. 490cfc1d277SAaron Tomlin * 491cfc1d277SAaron Tomlin * Return: %true if @addr is from module static percpu area 492cfc1d277SAaron Tomlin */ 493cfc1d277SAaron Tomlin bool is_module_percpu_address(unsigned long addr) 494cfc1d277SAaron Tomlin { 495cfc1d277SAaron Tomlin return __is_module_percpu_address(addr, NULL); 496cfc1d277SAaron Tomlin } 497cfc1d277SAaron Tomlin 498cfc1d277SAaron Tomlin #else /* ... !CONFIG_SMP */ 499cfc1d277SAaron Tomlin 500cfc1d277SAaron Tomlin static inline void __percpu *mod_percpu(struct module *mod) 501cfc1d277SAaron Tomlin { 502cfc1d277SAaron Tomlin return NULL; 503cfc1d277SAaron Tomlin } 504cfc1d277SAaron Tomlin static int percpu_modalloc(struct module *mod, struct load_info *info) 505cfc1d277SAaron Tomlin { 506cfc1d277SAaron Tomlin /* UP modules shouldn't have this section: ENOMEM isn't quite right */ 507cfc1d277SAaron Tomlin if (info->sechdrs[info->index.pcpu].sh_size != 0) 508cfc1d277SAaron Tomlin return -ENOMEM; 509cfc1d277SAaron Tomlin return 0; 510cfc1d277SAaron Tomlin } 511cfc1d277SAaron Tomlin static inline void percpu_modfree(struct module *mod) 512cfc1d277SAaron Tomlin { 513cfc1d277SAaron Tomlin } 514cfc1d277SAaron Tomlin static unsigned int find_pcpusec(struct load_info *info) 515cfc1d277SAaron Tomlin { 516cfc1d277SAaron Tomlin return 0; 517cfc1d277SAaron Tomlin } 518cfc1d277SAaron Tomlin static inline void percpu_modcopy(struct module *mod, 519cfc1d277SAaron Tomlin const void *from, unsigned long size) 520cfc1d277SAaron Tomlin { 521cfc1d277SAaron Tomlin /* pcpusec should be 0, and size of that section should be 0. */ 522cfc1d277SAaron Tomlin BUG_ON(size != 0); 523cfc1d277SAaron Tomlin } 524cfc1d277SAaron Tomlin bool is_module_percpu_address(unsigned long addr) 525cfc1d277SAaron Tomlin { 526cfc1d277SAaron Tomlin return false; 527cfc1d277SAaron Tomlin } 528cfc1d277SAaron Tomlin 529cfc1d277SAaron Tomlin bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 530cfc1d277SAaron Tomlin { 531cfc1d277SAaron Tomlin return false; 532cfc1d277SAaron Tomlin } 533cfc1d277SAaron Tomlin 534cfc1d277SAaron Tomlin #endif /* CONFIG_SMP */ 535cfc1d277SAaron Tomlin 536cfc1d277SAaron Tomlin #define MODINFO_ATTR(field) \ 537cfc1d277SAaron Tomlin static void setup_modinfo_##field(struct module *mod, const char *s) \ 538cfc1d277SAaron Tomlin { \ 539cfc1d277SAaron Tomlin mod->field = kstrdup(s, GFP_KERNEL); \ 540cfc1d277SAaron Tomlin } \ 541f3227ffdSThomas Weißschuh static ssize_t show_modinfo_##field(const struct module_attribute *mattr, \ 542cfc1d277SAaron Tomlin struct module_kobject *mk, char *buffer) \ 543cfc1d277SAaron Tomlin { \ 544cfc1d277SAaron Tomlin return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ 545cfc1d277SAaron Tomlin } \ 546cfc1d277SAaron Tomlin static int modinfo_##field##_exists(struct module *mod) \ 547cfc1d277SAaron Tomlin { \ 548cfc1d277SAaron Tomlin return mod->field != NULL; \ 549cfc1d277SAaron Tomlin } \ 550cfc1d277SAaron Tomlin static void free_modinfo_##field(struct module *mod) \ 551cfc1d277SAaron Tomlin { \ 552cfc1d277SAaron Tomlin kfree(mod->field); \ 553cfc1d277SAaron Tomlin mod->field = NULL; \ 554cfc1d277SAaron Tomlin } \ 555f3227ffdSThomas Weißschuh static const struct module_attribute modinfo_##field = { \ 556cfc1d277SAaron Tomlin .attr = { .name = __stringify(field), .mode = 0444 }, \ 557cfc1d277SAaron Tomlin .show = show_modinfo_##field, \ 558cfc1d277SAaron Tomlin .setup = setup_modinfo_##field, \ 559cfc1d277SAaron Tomlin .test = modinfo_##field##_exists, \ 560cfc1d277SAaron Tomlin .free = free_modinfo_##field, \ 561cfc1d277SAaron Tomlin }; 562cfc1d277SAaron Tomlin 563cfc1d277SAaron Tomlin MODINFO_ATTR(version); 564cfc1d277SAaron Tomlin MODINFO_ATTR(srcversion); 565cfc1d277SAaron Tomlin 5666f1dae1dSAaron Tomlin static struct { 5676f1dae1dSAaron Tomlin char name[MODULE_NAME_LEN + 1]; 5686f1dae1dSAaron Tomlin char taints[MODULE_FLAGS_BUF_SIZE]; 5696f1dae1dSAaron Tomlin } last_unloaded_module; 570cfc1d277SAaron Tomlin 571cfc1d277SAaron Tomlin #ifdef CONFIG_MODULE_UNLOAD 572cfc1d277SAaron Tomlin 573cfc1d277SAaron Tomlin EXPORT_TRACEPOINT_SYMBOL(module_get); 574cfc1d277SAaron Tomlin 575cfc1d277SAaron Tomlin /* MODULE_REF_BASE is the base reference count by kmodule loader. */ 576cfc1d277SAaron Tomlin #define MODULE_REF_BASE 1 577cfc1d277SAaron Tomlin 578cfc1d277SAaron Tomlin /* Init the unload section of the module. */ 579cfc1d277SAaron Tomlin static int module_unload_init(struct module *mod) 580cfc1d277SAaron Tomlin { 581cfc1d277SAaron Tomlin /* 582cfc1d277SAaron Tomlin * Initialize reference counter to MODULE_REF_BASE. 583cfc1d277SAaron Tomlin * refcnt == 0 means module is going. 584cfc1d277SAaron Tomlin */ 585cfc1d277SAaron Tomlin atomic_set(&mod->refcnt, MODULE_REF_BASE); 586cfc1d277SAaron Tomlin 587cfc1d277SAaron Tomlin INIT_LIST_HEAD(&mod->source_list); 588cfc1d277SAaron Tomlin INIT_LIST_HEAD(&mod->target_list); 589cfc1d277SAaron Tomlin 590cfc1d277SAaron Tomlin /* Hold reference count during initialization. */ 591cfc1d277SAaron Tomlin atomic_inc(&mod->refcnt); 592cfc1d277SAaron Tomlin 593cfc1d277SAaron Tomlin return 0; 594cfc1d277SAaron Tomlin } 595cfc1d277SAaron Tomlin 596cfc1d277SAaron Tomlin /* Does a already use b? */ 597cfc1d277SAaron Tomlin static int already_uses(struct module *a, struct module *b) 598cfc1d277SAaron Tomlin { 599cfc1d277SAaron Tomlin struct module_use *use; 600cfc1d277SAaron Tomlin 601cfc1d277SAaron Tomlin list_for_each_entry(use, &b->source_list, source_list) { 60233c951f6SJim Cromie if (use->source == a) 603cfc1d277SAaron Tomlin return 1; 604cfc1d277SAaron Tomlin } 605cfc1d277SAaron Tomlin pr_debug("%s does not use %s!\n", a->name, b->name); 606cfc1d277SAaron Tomlin return 0; 607cfc1d277SAaron Tomlin } 608cfc1d277SAaron Tomlin 609cfc1d277SAaron Tomlin /* 610cfc1d277SAaron Tomlin * Module a uses b 611cfc1d277SAaron Tomlin * - we add 'a' as a "source", 'b' as a "target" of module use 612cfc1d277SAaron Tomlin * - the module_use is added to the list of 'b' sources (so 613cfc1d277SAaron Tomlin * 'b' can walk the list to see who sourced them), and of 'a' 614cfc1d277SAaron Tomlin * targets (so 'a' can see what modules it targets). 615cfc1d277SAaron Tomlin */ 616cfc1d277SAaron Tomlin static int add_module_usage(struct module *a, struct module *b) 617cfc1d277SAaron Tomlin { 618cfc1d277SAaron Tomlin struct module_use *use; 619cfc1d277SAaron Tomlin 620cfc1d277SAaron Tomlin pr_debug("Allocating new usage for %s.\n", a->name); 621cfc1d277SAaron Tomlin use = kmalloc(sizeof(*use), GFP_ATOMIC); 622cfc1d277SAaron Tomlin if (!use) 623cfc1d277SAaron Tomlin return -ENOMEM; 624cfc1d277SAaron Tomlin 625cfc1d277SAaron Tomlin use->source = a; 626cfc1d277SAaron Tomlin use->target = b; 627cfc1d277SAaron Tomlin list_add(&use->source_list, &b->source_list); 628cfc1d277SAaron Tomlin list_add(&use->target_list, &a->target_list); 629cfc1d277SAaron Tomlin return 0; 630cfc1d277SAaron Tomlin } 631cfc1d277SAaron Tomlin 632cfc1d277SAaron Tomlin /* Module a uses b: caller needs module_mutex() */ 633cfc1d277SAaron Tomlin static int ref_module(struct module *a, struct module *b) 634cfc1d277SAaron Tomlin { 635cfc1d277SAaron Tomlin int err; 636cfc1d277SAaron Tomlin 637cfc1d277SAaron Tomlin if (b == NULL || already_uses(a, b)) 638cfc1d277SAaron Tomlin return 0; 639cfc1d277SAaron Tomlin 640cfc1d277SAaron Tomlin /* If module isn't available, we fail. */ 641cfc1d277SAaron Tomlin err = strong_try_module_get(b); 642cfc1d277SAaron Tomlin if (err) 643cfc1d277SAaron Tomlin return err; 644cfc1d277SAaron Tomlin 645cfc1d277SAaron Tomlin err = add_module_usage(a, b); 646cfc1d277SAaron Tomlin if (err) { 647cfc1d277SAaron Tomlin module_put(b); 648cfc1d277SAaron Tomlin return err; 649cfc1d277SAaron Tomlin } 650cfc1d277SAaron Tomlin return 0; 651cfc1d277SAaron Tomlin } 652cfc1d277SAaron Tomlin 653cfc1d277SAaron Tomlin /* Clear the unload stuff of the module. */ 654cfc1d277SAaron Tomlin static void module_unload_free(struct module *mod) 655cfc1d277SAaron Tomlin { 656cfc1d277SAaron Tomlin struct module_use *use, *tmp; 657cfc1d277SAaron Tomlin 658cfc1d277SAaron Tomlin mutex_lock(&module_mutex); 659cfc1d277SAaron Tomlin list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { 660cfc1d277SAaron Tomlin struct module *i = use->target; 661cfc1d277SAaron Tomlin pr_debug("%s unusing %s\n", mod->name, i->name); 662cfc1d277SAaron Tomlin module_put(i); 663cfc1d277SAaron Tomlin list_del(&use->source_list); 664cfc1d277SAaron Tomlin list_del(&use->target_list); 665cfc1d277SAaron Tomlin kfree(use); 666cfc1d277SAaron Tomlin } 667cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 668cfc1d277SAaron Tomlin } 669cfc1d277SAaron Tomlin 670cfc1d277SAaron Tomlin #ifdef CONFIG_MODULE_FORCE_UNLOAD 671cfc1d277SAaron Tomlin static inline int try_force_unload(unsigned int flags) 672cfc1d277SAaron Tomlin { 673cfc1d277SAaron Tomlin int ret = (flags & O_TRUNC); 674cfc1d277SAaron Tomlin if (ret) 675cfc1d277SAaron Tomlin add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); 676cfc1d277SAaron Tomlin return ret; 677cfc1d277SAaron Tomlin } 678cfc1d277SAaron Tomlin #else 679cfc1d277SAaron Tomlin static inline int try_force_unload(unsigned int flags) 680cfc1d277SAaron Tomlin { 681cfc1d277SAaron Tomlin return 0; 682cfc1d277SAaron Tomlin } 683cfc1d277SAaron Tomlin #endif /* CONFIG_MODULE_FORCE_UNLOAD */ 684cfc1d277SAaron Tomlin 685cfc1d277SAaron Tomlin /* Try to release refcount of module, 0 means success. */ 686cfc1d277SAaron Tomlin static int try_release_module_ref(struct module *mod) 687cfc1d277SAaron Tomlin { 688cfc1d277SAaron Tomlin int ret; 689cfc1d277SAaron Tomlin 690cfc1d277SAaron Tomlin /* Try to decrement refcnt which we set at loading */ 691cfc1d277SAaron Tomlin ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); 692cfc1d277SAaron Tomlin BUG_ON(ret < 0); 693cfc1d277SAaron Tomlin if (ret) 694cfc1d277SAaron Tomlin /* Someone can put this right now, recover with checking */ 695cfc1d277SAaron Tomlin ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); 696cfc1d277SAaron Tomlin 697cfc1d277SAaron Tomlin return ret; 698cfc1d277SAaron Tomlin } 699cfc1d277SAaron Tomlin 700cfc1d277SAaron Tomlin static int try_stop_module(struct module *mod, int flags, int *forced) 701cfc1d277SAaron Tomlin { 702cfc1d277SAaron Tomlin /* If it's not unused, quit unless we're forcing. */ 703cfc1d277SAaron Tomlin if (try_release_module_ref(mod) != 0) { 704cfc1d277SAaron Tomlin *forced = try_force_unload(flags); 705cfc1d277SAaron Tomlin if (!(*forced)) 706cfc1d277SAaron Tomlin return -EWOULDBLOCK; 707cfc1d277SAaron Tomlin } 708cfc1d277SAaron Tomlin 709cfc1d277SAaron Tomlin /* Mark it as dying. */ 710cfc1d277SAaron Tomlin mod->state = MODULE_STATE_GOING; 711cfc1d277SAaron Tomlin 712cfc1d277SAaron Tomlin return 0; 713cfc1d277SAaron Tomlin } 714cfc1d277SAaron Tomlin 715cfc1d277SAaron Tomlin /** 716cfc1d277SAaron Tomlin * module_refcount() - return the refcount or -1 if unloading 717cfc1d277SAaron Tomlin * @mod: the module we're checking 718cfc1d277SAaron Tomlin * 719cfc1d277SAaron Tomlin * Return: 720cfc1d277SAaron Tomlin * -1 if the module is in the process of unloading 721cfc1d277SAaron Tomlin * otherwise the number of references in the kernel to the module 722cfc1d277SAaron Tomlin */ 723cfc1d277SAaron Tomlin int module_refcount(struct module *mod) 724cfc1d277SAaron Tomlin { 725cfc1d277SAaron Tomlin return atomic_read(&mod->refcnt) - MODULE_REF_BASE; 726cfc1d277SAaron Tomlin } 727cfc1d277SAaron Tomlin EXPORT_SYMBOL(module_refcount); 728cfc1d277SAaron Tomlin 729cfc1d277SAaron Tomlin /* This exists whether we can unload or not */ 730cfc1d277SAaron Tomlin static void free_module(struct module *mod); 731cfc1d277SAaron Tomlin 732cfc1d277SAaron Tomlin SYSCALL_DEFINE2(delete_module, const char __user *, name_user, 733cfc1d277SAaron Tomlin unsigned int, flags) 734cfc1d277SAaron Tomlin { 735cfc1d277SAaron Tomlin struct module *mod; 736cfc1d277SAaron Tomlin char name[MODULE_NAME_LEN]; 7376f1dae1dSAaron Tomlin char buf[MODULE_FLAGS_BUF_SIZE]; 738cfc1d277SAaron Tomlin int ret, forced = 0; 739cfc1d277SAaron Tomlin 740cfc1d277SAaron Tomlin if (!capable(CAP_SYS_MODULE) || modules_disabled) 741cfc1d277SAaron Tomlin return -EPERM; 742cfc1d277SAaron Tomlin 743cfc1d277SAaron Tomlin if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) 744cfc1d277SAaron Tomlin return -EFAULT; 745cfc1d277SAaron Tomlin name[MODULE_NAME_LEN-1] = '\0'; 746cfc1d277SAaron Tomlin 747cfc1d277SAaron Tomlin audit_log_kern_module(name); 748cfc1d277SAaron Tomlin 749cfc1d277SAaron Tomlin if (mutex_lock_interruptible(&module_mutex) != 0) 750cfc1d277SAaron Tomlin return -EINTR; 751cfc1d277SAaron Tomlin 752cfc1d277SAaron Tomlin mod = find_module(name); 753cfc1d277SAaron Tomlin if (!mod) { 754cfc1d277SAaron Tomlin ret = -ENOENT; 755cfc1d277SAaron Tomlin goto out; 756cfc1d277SAaron Tomlin } 757cfc1d277SAaron Tomlin 758cfc1d277SAaron Tomlin if (!list_empty(&mod->source_list)) { 759cfc1d277SAaron Tomlin /* Other modules depend on us: get rid of them first. */ 760cfc1d277SAaron Tomlin ret = -EWOULDBLOCK; 761cfc1d277SAaron Tomlin goto out; 762cfc1d277SAaron Tomlin } 763cfc1d277SAaron Tomlin 764cfc1d277SAaron Tomlin /* Doing init or already dying? */ 765cfc1d277SAaron Tomlin if (mod->state != MODULE_STATE_LIVE) { 766cfc1d277SAaron Tomlin /* FIXME: if (force), slam module count damn the torpedoes */ 767cfc1d277SAaron Tomlin pr_debug("%s already dying\n", mod->name); 768cfc1d277SAaron Tomlin ret = -EBUSY; 769cfc1d277SAaron Tomlin goto out; 770cfc1d277SAaron Tomlin } 771cfc1d277SAaron Tomlin 772cfc1d277SAaron Tomlin /* If it has an init func, it must have an exit func to unload */ 773cfc1d277SAaron Tomlin if (mod->init && !mod->exit) { 774cfc1d277SAaron Tomlin forced = try_force_unload(flags); 775cfc1d277SAaron Tomlin if (!forced) { 776cfc1d277SAaron Tomlin /* This module can't be removed */ 777cfc1d277SAaron Tomlin ret = -EBUSY; 778cfc1d277SAaron Tomlin goto out; 779cfc1d277SAaron Tomlin } 780cfc1d277SAaron Tomlin } 781cfc1d277SAaron Tomlin 782cfc1d277SAaron Tomlin ret = try_stop_module(mod, flags, &forced); 783cfc1d277SAaron Tomlin if (ret != 0) 784cfc1d277SAaron Tomlin goto out; 785cfc1d277SAaron Tomlin 786cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 787cfc1d277SAaron Tomlin /* Final destruction now no one is using it. */ 788cfc1d277SAaron Tomlin if (mod->exit != NULL) 789cfc1d277SAaron Tomlin mod->exit(); 790cfc1d277SAaron Tomlin blocking_notifier_call_chain(&module_notify_list, 791cfc1d277SAaron Tomlin MODULE_STATE_GOING, mod); 792cfc1d277SAaron Tomlin klp_module_going(mod); 793cfc1d277SAaron Tomlin ftrace_release_mod(mod); 794cfc1d277SAaron Tomlin 795cfc1d277SAaron Tomlin async_synchronize_full(); 796cfc1d277SAaron Tomlin 7976f1dae1dSAaron Tomlin /* Store the name and taints of the last unloaded module for diagnostic purposes */ 7986f1dae1dSAaron Tomlin strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name)); 7996f1dae1dSAaron Tomlin strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints)); 800cfc1d277SAaron Tomlin 801cfc1d277SAaron Tomlin free_module(mod); 802cfc1d277SAaron Tomlin /* someone could wait for the module in add_unformed_module() */ 803cfc1d277SAaron Tomlin wake_up_all(&module_wq); 804cfc1d277SAaron Tomlin return 0; 805cfc1d277SAaron Tomlin out: 806cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 807cfc1d277SAaron Tomlin return ret; 808cfc1d277SAaron Tomlin } 809cfc1d277SAaron Tomlin 810cfc1d277SAaron Tomlin void __symbol_put(const char *symbol) 811cfc1d277SAaron Tomlin { 812cfc1d277SAaron Tomlin struct find_symbol_arg fsa = { 813cfc1d277SAaron Tomlin .name = symbol, 814cfc1d277SAaron Tomlin .gplok = true, 815cfc1d277SAaron Tomlin }; 816cfc1d277SAaron Tomlin 817cfc1d277SAaron Tomlin preempt_disable(); 818cfc1d277SAaron Tomlin BUG_ON(!find_symbol(&fsa)); 819cfc1d277SAaron Tomlin module_put(fsa.owner); 820cfc1d277SAaron Tomlin preempt_enable(); 821cfc1d277SAaron Tomlin } 822cfc1d277SAaron Tomlin EXPORT_SYMBOL(__symbol_put); 823cfc1d277SAaron Tomlin 824cfc1d277SAaron Tomlin /* Note this assumes addr is a function, which it currently always is. */ 825cfc1d277SAaron Tomlin void symbol_put_addr(void *addr) 826cfc1d277SAaron Tomlin { 827cfc1d277SAaron Tomlin struct module *modaddr; 828cfc1d277SAaron Tomlin unsigned long a = (unsigned long)dereference_function_descriptor(addr); 829cfc1d277SAaron Tomlin 830cfc1d277SAaron Tomlin if (core_kernel_text(a)) 831cfc1d277SAaron Tomlin return; 832cfc1d277SAaron Tomlin 833cfc1d277SAaron Tomlin /* 834cfc1d277SAaron Tomlin * Even though we hold a reference on the module; we still need to 835cfc1d277SAaron Tomlin * disable preemption in order to safely traverse the data structure. 836cfc1d277SAaron Tomlin */ 837cfc1d277SAaron Tomlin preempt_disable(); 838cfc1d277SAaron Tomlin modaddr = __module_text_address(a); 839cfc1d277SAaron Tomlin BUG_ON(!modaddr); 840cfc1d277SAaron Tomlin module_put(modaddr); 841cfc1d277SAaron Tomlin preempt_enable(); 842cfc1d277SAaron Tomlin } 843cfc1d277SAaron Tomlin EXPORT_SYMBOL_GPL(symbol_put_addr); 844cfc1d277SAaron Tomlin 845f3227ffdSThomas Weißschuh static ssize_t show_refcnt(const struct module_attribute *mattr, 846cfc1d277SAaron Tomlin struct module_kobject *mk, char *buffer) 847cfc1d277SAaron Tomlin { 848cfc1d277SAaron Tomlin return sprintf(buffer, "%i\n", module_refcount(mk->mod)); 849cfc1d277SAaron Tomlin } 850cfc1d277SAaron Tomlin 851f3227ffdSThomas Weißschuh static const struct module_attribute modinfo_refcnt = 852cfc1d277SAaron Tomlin __ATTR(refcnt, 0444, show_refcnt, NULL); 853cfc1d277SAaron Tomlin 854cfc1d277SAaron Tomlin void __module_get(struct module *module) 855cfc1d277SAaron Tomlin { 856cfc1d277SAaron Tomlin if (module) { 857cfc1d277SAaron Tomlin atomic_inc(&module->refcnt); 858cfc1d277SAaron Tomlin trace_module_get(module, _RET_IP_); 859cfc1d277SAaron Tomlin } 860cfc1d277SAaron Tomlin } 861cfc1d277SAaron Tomlin EXPORT_SYMBOL(__module_get); 862cfc1d277SAaron Tomlin 863cfc1d277SAaron Tomlin bool try_module_get(struct module *module) 864cfc1d277SAaron Tomlin { 865cfc1d277SAaron Tomlin bool ret = true; 866cfc1d277SAaron Tomlin 867cfc1d277SAaron Tomlin if (module) { 868cfc1d277SAaron Tomlin /* Note: here, we can fail to get a reference */ 869cfc1d277SAaron Tomlin if (likely(module_is_live(module) && 870cfc1d277SAaron Tomlin atomic_inc_not_zero(&module->refcnt) != 0)) 871cfc1d277SAaron Tomlin trace_module_get(module, _RET_IP_); 872cfc1d277SAaron Tomlin else 873cfc1d277SAaron Tomlin ret = false; 874cfc1d277SAaron Tomlin } 875cfc1d277SAaron Tomlin return ret; 876cfc1d277SAaron Tomlin } 877cfc1d277SAaron Tomlin EXPORT_SYMBOL(try_module_get); 878cfc1d277SAaron Tomlin 879cfc1d277SAaron Tomlin void module_put(struct module *module) 880cfc1d277SAaron Tomlin { 881cfc1d277SAaron Tomlin int ret; 882cfc1d277SAaron Tomlin 883cfc1d277SAaron Tomlin if (module) { 884cfc1d277SAaron Tomlin ret = atomic_dec_if_positive(&module->refcnt); 885cfc1d277SAaron Tomlin WARN_ON(ret < 0); /* Failed to put refcount */ 886cfc1d277SAaron Tomlin trace_module_put(module, _RET_IP_); 887cfc1d277SAaron Tomlin } 888cfc1d277SAaron Tomlin } 889cfc1d277SAaron Tomlin EXPORT_SYMBOL(module_put); 890cfc1d277SAaron Tomlin 891cfc1d277SAaron Tomlin #else /* !CONFIG_MODULE_UNLOAD */ 892cfc1d277SAaron Tomlin static inline void module_unload_free(struct module *mod) 893cfc1d277SAaron Tomlin { 894cfc1d277SAaron Tomlin } 895cfc1d277SAaron Tomlin 896cfc1d277SAaron Tomlin static int ref_module(struct module *a, struct module *b) 897cfc1d277SAaron Tomlin { 898cfc1d277SAaron Tomlin return strong_try_module_get(b); 899cfc1d277SAaron Tomlin } 900cfc1d277SAaron Tomlin 901cfc1d277SAaron Tomlin static inline int module_unload_init(struct module *mod) 902cfc1d277SAaron Tomlin { 903cfc1d277SAaron Tomlin return 0; 904cfc1d277SAaron Tomlin } 905cfc1d277SAaron Tomlin #endif /* CONFIG_MODULE_UNLOAD */ 906cfc1d277SAaron Tomlin 907c14e522bSAaron Tomlin size_t module_flags_taint(unsigned long taints, char *buf) 908cfc1d277SAaron Tomlin { 909cfc1d277SAaron Tomlin size_t l = 0; 910cfc1d277SAaron Tomlin int i; 911cfc1d277SAaron Tomlin 912cfc1d277SAaron Tomlin for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 913c14e522bSAaron Tomlin if (taint_flags[i].module && test_bit(i, &taints)) 914cfc1d277SAaron Tomlin buf[l++] = taint_flags[i].c_true; 915cfc1d277SAaron Tomlin } 916cfc1d277SAaron Tomlin 917cfc1d277SAaron Tomlin return l; 918cfc1d277SAaron Tomlin } 919cfc1d277SAaron Tomlin 920f3227ffdSThomas Weißschuh static ssize_t show_initstate(const struct module_attribute *mattr, 921cfc1d277SAaron Tomlin struct module_kobject *mk, char *buffer) 922cfc1d277SAaron Tomlin { 923cfc1d277SAaron Tomlin const char *state = "unknown"; 924cfc1d277SAaron Tomlin 925cfc1d277SAaron Tomlin switch (mk->mod->state) { 926cfc1d277SAaron Tomlin case MODULE_STATE_LIVE: 927cfc1d277SAaron Tomlin state = "live"; 928cfc1d277SAaron Tomlin break; 929cfc1d277SAaron Tomlin case MODULE_STATE_COMING: 930cfc1d277SAaron Tomlin state = "coming"; 931cfc1d277SAaron Tomlin break; 932cfc1d277SAaron Tomlin case MODULE_STATE_GOING: 933cfc1d277SAaron Tomlin state = "going"; 934cfc1d277SAaron Tomlin break; 935cfc1d277SAaron Tomlin default: 936cfc1d277SAaron Tomlin BUG(); 937cfc1d277SAaron Tomlin } 938cfc1d277SAaron Tomlin return sprintf(buffer, "%s\n", state); 939cfc1d277SAaron Tomlin } 940cfc1d277SAaron Tomlin 941f3227ffdSThomas Weißschuh static const struct module_attribute modinfo_initstate = 942cfc1d277SAaron Tomlin __ATTR(initstate, 0444, show_initstate, NULL); 943cfc1d277SAaron Tomlin 944f3227ffdSThomas Weißschuh static ssize_t store_uevent(const struct module_attribute *mattr, 945cfc1d277SAaron Tomlin struct module_kobject *mk, 946cfc1d277SAaron Tomlin const char *buffer, size_t count) 947cfc1d277SAaron Tomlin { 948cfc1d277SAaron Tomlin int rc; 949cfc1d277SAaron Tomlin 950cfc1d277SAaron Tomlin rc = kobject_synth_uevent(&mk->kobj, buffer, count); 951cfc1d277SAaron Tomlin return rc ? rc : count; 952cfc1d277SAaron Tomlin } 953cfc1d277SAaron Tomlin 954f3227ffdSThomas Weißschuh const struct module_attribute module_uevent = 955cfc1d277SAaron Tomlin __ATTR(uevent, 0200, NULL, store_uevent); 956cfc1d277SAaron Tomlin 957f3227ffdSThomas Weißschuh static ssize_t show_coresize(const struct module_attribute *mattr, 958cfc1d277SAaron Tomlin struct module_kobject *mk, char *buffer) 959cfc1d277SAaron Tomlin { 960ac3b4328SSong Liu unsigned int size = mk->mod->mem[MOD_TEXT].size; 961ac3b4328SSong Liu 962ac3b4328SSong Liu if (!IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC)) { 963ac3b4328SSong Liu for_class_mod_mem_type(type, core_data) 964ac3b4328SSong Liu size += mk->mod->mem[type].size; 965ac3b4328SSong Liu } 966ac3b4328SSong Liu return sprintf(buffer, "%u\n", size); 967cfc1d277SAaron Tomlin } 968cfc1d277SAaron Tomlin 969f3227ffdSThomas Weißschuh static const struct module_attribute modinfo_coresize = 970cfc1d277SAaron Tomlin __ATTR(coresize, 0444, show_coresize, NULL); 971cfc1d277SAaron Tomlin 97201dc0386SChristophe Leroy #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 973f3227ffdSThomas Weißschuh static ssize_t show_datasize(const struct module_attribute *mattr, 97401dc0386SChristophe Leroy struct module_kobject *mk, char *buffer) 97501dc0386SChristophe Leroy { 976ac3b4328SSong Liu unsigned int size = 0; 977ac3b4328SSong Liu 978ac3b4328SSong Liu for_class_mod_mem_type(type, core_data) 979ac3b4328SSong Liu size += mk->mod->mem[type].size; 980ac3b4328SSong Liu return sprintf(buffer, "%u\n", size); 98101dc0386SChristophe Leroy } 98201dc0386SChristophe Leroy 983f3227ffdSThomas Weißschuh static const struct module_attribute modinfo_datasize = 98401dc0386SChristophe Leroy __ATTR(datasize, 0444, show_datasize, NULL); 98501dc0386SChristophe Leroy #endif 98601dc0386SChristophe Leroy 987f3227ffdSThomas Weißschuh static ssize_t show_initsize(const struct module_attribute *mattr, 988cfc1d277SAaron Tomlin struct module_kobject *mk, char *buffer) 989cfc1d277SAaron Tomlin { 990ac3b4328SSong Liu unsigned int size = 0; 991ac3b4328SSong Liu 992ac3b4328SSong Liu for_class_mod_mem_type(type, init) 993ac3b4328SSong Liu size += mk->mod->mem[type].size; 994ac3b4328SSong Liu return sprintf(buffer, "%u\n", size); 995cfc1d277SAaron Tomlin } 996cfc1d277SAaron Tomlin 997f3227ffdSThomas Weißschuh static const struct module_attribute modinfo_initsize = 998cfc1d277SAaron Tomlin __ATTR(initsize, 0444, show_initsize, NULL); 999cfc1d277SAaron Tomlin 1000f3227ffdSThomas Weißschuh static ssize_t show_taint(const struct module_attribute *mattr, 1001cfc1d277SAaron Tomlin struct module_kobject *mk, char *buffer) 1002cfc1d277SAaron Tomlin { 1003cfc1d277SAaron Tomlin size_t l; 1004cfc1d277SAaron Tomlin 1005c14e522bSAaron Tomlin l = module_flags_taint(mk->mod->taints, buffer); 1006cfc1d277SAaron Tomlin buffer[l++] = '\n'; 1007cfc1d277SAaron Tomlin return l; 1008cfc1d277SAaron Tomlin } 1009cfc1d277SAaron Tomlin 1010f3227ffdSThomas Weißschuh static const struct module_attribute modinfo_taint = 1011cfc1d277SAaron Tomlin __ATTR(taint, 0444, show_taint, NULL); 1012cfc1d277SAaron Tomlin 1013f3227ffdSThomas Weißschuh const struct module_attribute *const modinfo_attrs[] = { 1014cfc1d277SAaron Tomlin &module_uevent, 1015cfc1d277SAaron Tomlin &modinfo_version, 1016cfc1d277SAaron Tomlin &modinfo_srcversion, 1017cfc1d277SAaron Tomlin &modinfo_initstate, 1018cfc1d277SAaron Tomlin &modinfo_coresize, 101901dc0386SChristophe Leroy #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 102001dc0386SChristophe Leroy &modinfo_datasize, 102101dc0386SChristophe Leroy #endif 1022cfc1d277SAaron Tomlin &modinfo_initsize, 1023cfc1d277SAaron Tomlin &modinfo_taint, 1024cfc1d277SAaron Tomlin #ifdef CONFIG_MODULE_UNLOAD 1025cfc1d277SAaron Tomlin &modinfo_refcnt, 1026cfc1d277SAaron Tomlin #endif 1027cfc1d277SAaron Tomlin NULL, 1028cfc1d277SAaron Tomlin }; 1029cfc1d277SAaron Tomlin 1030f3227ffdSThomas Weißschuh const size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs); 103144c09535SAaron Tomlin 1032cfc1d277SAaron Tomlin static const char vermagic[] = VERMAGIC_STRING; 1033cfc1d277SAaron Tomlin 103447889798SAaron Tomlin int try_to_force_load(struct module *mod, const char *reason) 1035cfc1d277SAaron Tomlin { 1036cfc1d277SAaron Tomlin #ifdef CONFIG_MODULE_FORCE_LOAD 1037cfc1d277SAaron Tomlin if (!test_taint(TAINT_FORCED_MODULE)) 1038cfc1d277SAaron Tomlin pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); 1039cfc1d277SAaron Tomlin add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); 1040cfc1d277SAaron Tomlin return 0; 1041cfc1d277SAaron Tomlin #else 1042cfc1d277SAaron Tomlin return -ENOEXEC; 1043cfc1d277SAaron Tomlin #endif 1044cfc1d277SAaron Tomlin } 1045cfc1d277SAaron Tomlin 1046b66973b8SLuis Chamberlain /* Parse tag=value strings from .modinfo section */ 1047feb5b784SLuis Chamberlain char *module_next_tag_pair(char *string, unsigned long *secsize) 1048b66973b8SLuis Chamberlain { 1049b66973b8SLuis Chamberlain /* Skip non-zero chars */ 1050b66973b8SLuis Chamberlain while (string[0]) { 1051b66973b8SLuis Chamberlain string++; 1052b66973b8SLuis Chamberlain if ((*secsize)-- <= 1) 1053b66973b8SLuis Chamberlain return NULL; 1054b66973b8SLuis Chamberlain } 1055b66973b8SLuis Chamberlain 1056b66973b8SLuis Chamberlain /* Skip any zero padding. */ 1057b66973b8SLuis Chamberlain while (!string[0]) { 1058b66973b8SLuis Chamberlain string++; 1059b66973b8SLuis Chamberlain if ((*secsize)-- <= 1) 1060b66973b8SLuis Chamberlain return NULL; 1061b66973b8SLuis Chamberlain } 1062b66973b8SLuis Chamberlain return string; 1063b66973b8SLuis Chamberlain } 1064b66973b8SLuis Chamberlain 1065cfc1d277SAaron Tomlin static char *get_next_modinfo(const struct load_info *info, const char *tag, 1066b66973b8SLuis Chamberlain char *prev) 1067b66973b8SLuis Chamberlain { 1068b66973b8SLuis Chamberlain char *p; 1069b66973b8SLuis Chamberlain unsigned int taglen = strlen(tag); 1070b66973b8SLuis Chamberlain Elf_Shdr *infosec = &info->sechdrs[info->index.info]; 1071b66973b8SLuis Chamberlain unsigned long size = infosec->sh_size; 1072b66973b8SLuis Chamberlain 1073b66973b8SLuis Chamberlain /* 1074b66973b8SLuis Chamberlain * get_modinfo() calls made before rewrite_section_headers() 1075b66973b8SLuis Chamberlain * must use sh_offset, as sh_addr isn't set! 1076b66973b8SLuis Chamberlain */ 1077b66973b8SLuis Chamberlain char *modinfo = (char *)info->hdr + infosec->sh_offset; 1078b66973b8SLuis Chamberlain 1079b66973b8SLuis Chamberlain if (prev) { 1080b66973b8SLuis Chamberlain size -= prev - modinfo; 1081feb5b784SLuis Chamberlain modinfo = module_next_tag_pair(prev, &size); 1082b66973b8SLuis Chamberlain } 1083b66973b8SLuis Chamberlain 1084feb5b784SLuis Chamberlain for (p = modinfo; p; p = module_next_tag_pair(p, &size)) { 1085b66973b8SLuis Chamberlain if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') 1086b66973b8SLuis Chamberlain return p + taglen + 1; 1087b66973b8SLuis Chamberlain } 1088b66973b8SLuis Chamberlain return NULL; 1089b66973b8SLuis Chamberlain } 1090b66973b8SLuis Chamberlain 1091b66973b8SLuis Chamberlain static char *get_modinfo(const struct load_info *info, const char *tag) 1092b66973b8SLuis Chamberlain { 1093b66973b8SLuis Chamberlain return get_next_modinfo(info, tag, NULL); 1094b66973b8SLuis Chamberlain } 1095cfc1d277SAaron Tomlin 1096cfc1d277SAaron Tomlin static int verify_namespace_is_imported(const struct load_info *info, 1097cfc1d277SAaron Tomlin const struct kernel_symbol *sym, 1098cfc1d277SAaron Tomlin struct module *mod) 1099cfc1d277SAaron Tomlin { 1100cfc1d277SAaron Tomlin const char *namespace; 1101cfc1d277SAaron Tomlin char *imported_namespace; 1102cfc1d277SAaron Tomlin 1103cfc1d277SAaron Tomlin namespace = kernel_symbol_namespace(sym); 1104cfc1d277SAaron Tomlin if (namespace && namespace[0]) { 11051e684172SLuis Chamberlain for_each_modinfo_entry(imported_namespace, info, "import_ns") { 1106cfc1d277SAaron Tomlin if (strcmp(namespace, imported_namespace) == 0) 1107cfc1d277SAaron Tomlin return 0; 1108cfc1d277SAaron Tomlin } 1109cfc1d277SAaron Tomlin #ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS 1110cfc1d277SAaron Tomlin pr_warn( 1111cfc1d277SAaron Tomlin #else 1112cfc1d277SAaron Tomlin pr_err( 1113cfc1d277SAaron Tomlin #endif 1114cfc1d277SAaron Tomlin "%s: module uses symbol (%s) from namespace %s, but does not import it.\n", 1115cfc1d277SAaron Tomlin mod->name, kernel_symbol_name(sym), namespace); 1116cfc1d277SAaron Tomlin #ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS 1117cfc1d277SAaron Tomlin return -EINVAL; 1118cfc1d277SAaron Tomlin #endif 1119cfc1d277SAaron Tomlin } 1120cfc1d277SAaron Tomlin return 0; 1121cfc1d277SAaron Tomlin } 1122cfc1d277SAaron Tomlin 11238eac910aSLecopzer Chen static bool inherit_taint(struct module *mod, struct module *owner, const char *name) 1124cfc1d277SAaron Tomlin { 1125cfc1d277SAaron Tomlin if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) 1126cfc1d277SAaron Tomlin return true; 1127cfc1d277SAaron Tomlin 1128cfc1d277SAaron Tomlin if (mod->using_gplonly_symbols) { 11298eac910aSLecopzer Chen pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n", 11308eac910aSLecopzer Chen mod->name, name, owner->name); 1131cfc1d277SAaron Tomlin return false; 1132cfc1d277SAaron Tomlin } 1133cfc1d277SAaron Tomlin 1134cfc1d277SAaron Tomlin if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { 11358eac910aSLecopzer Chen pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n", 11368eac910aSLecopzer Chen mod->name, name, owner->name); 1137cfc1d277SAaron Tomlin set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); 1138cfc1d277SAaron Tomlin } 1139cfc1d277SAaron Tomlin return true; 1140cfc1d277SAaron Tomlin } 1141cfc1d277SAaron Tomlin 1142cfc1d277SAaron Tomlin /* Resolve a symbol for this module. I.e. if we find one, record usage. */ 1143cfc1d277SAaron Tomlin static const struct kernel_symbol *resolve_symbol(struct module *mod, 1144cfc1d277SAaron Tomlin const struct load_info *info, 1145cfc1d277SAaron Tomlin const char *name, 1146cfc1d277SAaron Tomlin char ownername[]) 1147cfc1d277SAaron Tomlin { 1148cfc1d277SAaron Tomlin struct find_symbol_arg fsa = { 1149cfc1d277SAaron Tomlin .name = name, 1150cfc1d277SAaron Tomlin .gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), 1151cfc1d277SAaron Tomlin .warn = true, 1152cfc1d277SAaron Tomlin }; 1153cfc1d277SAaron Tomlin int err; 1154cfc1d277SAaron Tomlin 1155cfc1d277SAaron Tomlin /* 1156cfc1d277SAaron Tomlin * The module_mutex should not be a heavily contended lock; 1157cfc1d277SAaron Tomlin * if we get the occasional sleep here, we'll go an extra iteration 1158cfc1d277SAaron Tomlin * in the wait_event_interruptible(), which is harmless. 1159cfc1d277SAaron Tomlin */ 1160cfc1d277SAaron Tomlin sched_annotate_sleep(); 1161cfc1d277SAaron Tomlin mutex_lock(&module_mutex); 1162cfc1d277SAaron Tomlin if (!find_symbol(&fsa)) 1163cfc1d277SAaron Tomlin goto unlock; 1164cfc1d277SAaron Tomlin 1165cfc1d277SAaron Tomlin if (fsa.license == GPL_ONLY) 1166cfc1d277SAaron Tomlin mod->using_gplonly_symbols = true; 1167cfc1d277SAaron Tomlin 11688eac910aSLecopzer Chen if (!inherit_taint(mod, fsa.owner, name)) { 1169cfc1d277SAaron Tomlin fsa.sym = NULL; 1170cfc1d277SAaron Tomlin goto getname; 1171cfc1d277SAaron Tomlin } 1172cfc1d277SAaron Tomlin 1173cfc1d277SAaron Tomlin if (!check_version(info, name, mod, fsa.crc)) { 1174cfc1d277SAaron Tomlin fsa.sym = ERR_PTR(-EINVAL); 1175cfc1d277SAaron Tomlin goto getname; 1176cfc1d277SAaron Tomlin } 1177cfc1d277SAaron Tomlin 1178cfc1d277SAaron Tomlin err = verify_namespace_is_imported(info, fsa.sym, mod); 1179cfc1d277SAaron Tomlin if (err) { 1180cfc1d277SAaron Tomlin fsa.sym = ERR_PTR(err); 1181cfc1d277SAaron Tomlin goto getname; 1182cfc1d277SAaron Tomlin } 1183cfc1d277SAaron Tomlin 1184cfc1d277SAaron Tomlin err = ref_module(mod, fsa.owner); 1185cfc1d277SAaron Tomlin if (err) { 1186cfc1d277SAaron Tomlin fsa.sym = ERR_PTR(err); 1187cfc1d277SAaron Tomlin goto getname; 1188cfc1d277SAaron Tomlin } 1189cfc1d277SAaron Tomlin 1190cfc1d277SAaron Tomlin getname: 1191cfc1d277SAaron Tomlin /* We must make copy under the lock if we failed to get ref. */ 1192cfc1d277SAaron Tomlin strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); 1193cfc1d277SAaron Tomlin unlock: 1194cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 1195cfc1d277SAaron Tomlin return fsa.sym; 1196cfc1d277SAaron Tomlin } 1197cfc1d277SAaron Tomlin 1198cfc1d277SAaron Tomlin static const struct kernel_symbol * 1199cfc1d277SAaron Tomlin resolve_symbol_wait(struct module *mod, 1200cfc1d277SAaron Tomlin const struct load_info *info, 1201cfc1d277SAaron Tomlin const char *name) 1202cfc1d277SAaron Tomlin { 1203cfc1d277SAaron Tomlin const struct kernel_symbol *ksym; 1204cfc1d277SAaron Tomlin char owner[MODULE_NAME_LEN]; 1205cfc1d277SAaron Tomlin 1206cfc1d277SAaron Tomlin if (wait_event_interruptible_timeout(module_wq, 1207cfc1d277SAaron Tomlin !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) 1208cfc1d277SAaron Tomlin || PTR_ERR(ksym) != -EBUSY, 1209cfc1d277SAaron Tomlin 30 * HZ) <= 0) { 1210cfc1d277SAaron Tomlin pr_warn("%s: gave up waiting for init of module %s.\n", 1211cfc1d277SAaron Tomlin mod->name, owner); 1212cfc1d277SAaron Tomlin } 1213cfc1d277SAaron Tomlin return ksym; 1214cfc1d277SAaron Tomlin } 1215cfc1d277SAaron Tomlin 1216cfc1d277SAaron Tomlin void __weak module_arch_cleanup(struct module *mod) 1217cfc1d277SAaron Tomlin { 1218cfc1d277SAaron Tomlin } 1219cfc1d277SAaron Tomlin 1220cfc1d277SAaron Tomlin void __weak module_arch_freeing_init(struct module *mod) 1221cfc1d277SAaron Tomlin { 1222cfc1d277SAaron Tomlin } 1223cfc1d277SAaron Tomlin 1224bc6b94d3SMike Rapoport (IBM) static int module_memory_alloc(struct module *mod, enum mod_mem_type type) 1225ac3b4328SSong Liu { 1226bc6b94d3SMike Rapoport (IBM) unsigned int size = PAGE_ALIGN(mod->mem[type].size); 1227223b5e57SMike Rapoport (IBM) enum execmem_type execmem_type; 1228bc6b94d3SMike Rapoport (IBM) void *ptr; 1229bc6b94d3SMike Rapoport (IBM) 1230bc6b94d3SMike Rapoport (IBM) mod->mem[type].size = size; 1231bc6b94d3SMike Rapoport (IBM) 1232223b5e57SMike Rapoport (IBM) if (mod_mem_type_is_data(type)) 1233223b5e57SMike Rapoport (IBM) execmem_type = EXECMEM_MODULE_DATA; 1234bc6b94d3SMike Rapoport (IBM) else 1235223b5e57SMike Rapoport (IBM) execmem_type = EXECMEM_MODULE_TEXT; 1236bc6b94d3SMike Rapoport (IBM) 1237223b5e57SMike Rapoport (IBM) ptr = execmem_alloc(execmem_type, size); 1238bc6b94d3SMike Rapoport (IBM) if (!ptr) 1239bc6b94d3SMike Rapoport (IBM) return -ENOMEM; 1240bc6b94d3SMike Rapoport (IBM) 12410c133b1eSMike Rapoport (Microsoft) if (execmem_is_rox(execmem_type)) { 1242*c287c072SMike Rapoport (Microsoft) int err = execmem_make_temp_rw(ptr, size); 12430c133b1eSMike Rapoport (Microsoft) 1244*c287c072SMike Rapoport (Microsoft) if (err) { 1245*c287c072SMike Rapoport (Microsoft) execmem_free(ptr); 12460c133b1eSMike Rapoport (Microsoft) return -ENOMEM; 12470c133b1eSMike Rapoport (Microsoft) } 12480c133b1eSMike Rapoport (Microsoft) 12490c133b1eSMike Rapoport (Microsoft) mod->mem[type].is_rox = true; 12500c133b1eSMike Rapoport (Microsoft) } 12510c133b1eSMike Rapoport (Microsoft) 1252bc6b94d3SMike Rapoport (IBM) /* 1253bc6b94d3SMike Rapoport (IBM) * The pointer to these blocks of memory are stored on the module 1254bc6b94d3SMike Rapoport (IBM) * structure and we keep that around so long as the module is 1255bc6b94d3SMike Rapoport (IBM) * around. We only free that memory when we unload the module. 1256bc6b94d3SMike Rapoport (IBM) * Just mark them as not being a leak then. The .init* ELF 1257bc6b94d3SMike Rapoport (IBM) * sections *do* get freed after boot so we *could* treat them 1258bc6b94d3SMike Rapoport (IBM) * slightly differently with kmemleak_ignore() and only grey 1259bc6b94d3SMike Rapoport (IBM) * them out as they work as typical memory allocations which 1260bc6b94d3SMike Rapoport (IBM) * *do* eventually get freed, but let's just keep things simple 1261bc6b94d3SMike Rapoport (IBM) * and avoid *any* false positives. 1262bc6b94d3SMike Rapoport (IBM) */ 1263bc6b94d3SMike Rapoport (IBM) kmemleak_not_leak(ptr); 1264bc6b94d3SMike Rapoport (IBM) 1265*c287c072SMike Rapoport (Microsoft) memset(ptr, 0, size); 1266*c287c072SMike Rapoport (Microsoft) mod->mem[type].base = ptr; 1267*c287c072SMike Rapoport (Microsoft) 1268bc6b94d3SMike Rapoport (IBM) return 0; 1269ac3b4328SSong Liu } 1270ac3b4328SSong Liu 1271*c287c072SMike Rapoport (Microsoft) static void module_memory_restore_rox(struct module *mod) 1272*c287c072SMike Rapoport (Microsoft) { 1273*c287c072SMike Rapoport (Microsoft) for_class_mod_mem_type(type, text) { 1274*c287c072SMike Rapoport (Microsoft) struct module_memory *mem = &mod->mem[type]; 1275*c287c072SMike Rapoport (Microsoft) 1276*c287c072SMike Rapoport (Microsoft) if (mem->is_rox) 1277*c287c072SMike Rapoport (Microsoft) execmem_restore_rox(mem->base, mem->size); 1278*c287c072SMike Rapoport (Microsoft) } 1279*c287c072SMike Rapoport (Microsoft) } 1280*c287c072SMike Rapoport (Microsoft) 12810db6f8d7SSuren Baghdasaryan static void module_memory_free(struct module *mod, enum mod_mem_type type) 1282ac3b4328SSong Liu { 12830c133b1eSMike Rapoport (Microsoft) struct module_memory *mem = &mod->mem[type]; 1284bc6b94d3SMike Rapoport (IBM) 12850db6f8d7SSuren Baghdasaryan execmem_free(mem->base); 1286ac3b4328SSong Liu } 1287ac3b4328SSong Liu 12880db6f8d7SSuren Baghdasaryan static void free_mod_mem(struct module *mod) 1289ac3b4328SSong Liu { 1290ac3b4328SSong Liu for_each_mod_mem_type(type) { 1291ac3b4328SSong Liu struct module_memory *mod_mem = &mod->mem[type]; 1292ac3b4328SSong Liu 1293ac3b4328SSong Liu if (type == MOD_DATA) 1294ac3b4328SSong Liu continue; 1295ac3b4328SSong Liu 1296ac3b4328SSong Liu /* Free lock-classes; relies on the preceding sync_rcu(). */ 1297ac3b4328SSong Liu lockdep_free_key_range(mod_mem->base, mod_mem->size); 1298ac3b4328SSong Liu if (mod_mem->size) 12990db6f8d7SSuren Baghdasaryan module_memory_free(mod, type); 1300ac3b4328SSong Liu } 1301ac3b4328SSong Liu 1302ac3b4328SSong Liu /* MOD_DATA hosts mod, so free it at last */ 1303ac3b4328SSong Liu lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size); 13040db6f8d7SSuren Baghdasaryan module_memory_free(mod, MOD_DATA); 1305ac3b4328SSong Liu } 1306ac3b4328SSong Liu 1307cfc1d277SAaron Tomlin /* Free a module, remove from lists, etc. */ 1308cfc1d277SAaron Tomlin static void free_module(struct module *mod) 1309cfc1d277SAaron Tomlin { 1310cfc1d277SAaron Tomlin trace_module_free(mod); 1311cfc1d277SAaron Tomlin 13120db6f8d7SSuren Baghdasaryan codetag_unload_module(mod); 131347a92dfbSSuren Baghdasaryan 1314cfc1d277SAaron Tomlin mod_sysfs_teardown(mod); 1315cfc1d277SAaron Tomlin 1316cfc1d277SAaron Tomlin /* 1317cfc1d277SAaron Tomlin * We leave it in list to prevent duplicate loads, but make sure 1318cfc1d277SAaron Tomlin * that noone uses it while it's being deconstructed. 1319cfc1d277SAaron Tomlin */ 1320cfc1d277SAaron Tomlin mutex_lock(&module_mutex); 1321cfc1d277SAaron Tomlin mod->state = MODULE_STATE_UNFORMED; 1322cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 1323cfc1d277SAaron Tomlin 1324cfc1d277SAaron Tomlin /* Arch-specific cleanup. */ 1325cfc1d277SAaron Tomlin module_arch_cleanup(mod); 1326cfc1d277SAaron Tomlin 1327cfc1d277SAaron Tomlin /* Module unload stuff */ 1328cfc1d277SAaron Tomlin module_unload_free(mod); 1329cfc1d277SAaron Tomlin 1330cfc1d277SAaron Tomlin /* Free any allocated parameters. */ 1331cfc1d277SAaron Tomlin destroy_params(mod->kp, mod->num_kp); 1332cfc1d277SAaron Tomlin 1333cfc1d277SAaron Tomlin if (is_livepatch_module(mod)) 1334cfc1d277SAaron Tomlin free_module_elf(mod); 1335cfc1d277SAaron Tomlin 1336cfc1d277SAaron Tomlin /* Now we can delete it from the lists */ 1337cfc1d277SAaron Tomlin mutex_lock(&module_mutex); 1338cfc1d277SAaron Tomlin /* Unlink carefully: kallsyms could be walking list. */ 1339cfc1d277SAaron Tomlin list_del_rcu(&mod->list); 1340cfc1d277SAaron Tomlin mod_tree_remove(mod); 1341cfc1d277SAaron Tomlin /* Remove this module from bug list, this uses list_del_rcu */ 1342cfc1d277SAaron Tomlin module_bug_cleanup(mod); 1343cfc1d277SAaron Tomlin /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ 1344cfc1d277SAaron Tomlin synchronize_rcu(); 134599bd9956SAaron Tomlin if (try_add_tainted_module(mod)) 134699bd9956SAaron Tomlin pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n", 134799bd9956SAaron Tomlin mod->name); 1348cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 1349cfc1d277SAaron Tomlin 1350cfc1d277SAaron Tomlin /* This may be empty, but that's OK */ 1351cfc1d277SAaron Tomlin module_arch_freeing_init(mod); 1352cfc1d277SAaron Tomlin kfree(mod->args); 1353cfc1d277SAaron Tomlin percpu_modfree(mod); 1354cfc1d277SAaron Tomlin 13550db6f8d7SSuren Baghdasaryan free_mod_mem(mod); 1356cfc1d277SAaron Tomlin } 1357cfc1d277SAaron Tomlin 1358cfc1d277SAaron Tomlin void *__symbol_get(const char *symbol) 1359cfc1d277SAaron Tomlin { 1360cfc1d277SAaron Tomlin struct find_symbol_arg fsa = { 1361cfc1d277SAaron Tomlin .name = symbol, 1362cfc1d277SAaron Tomlin .gplok = true, 1363cfc1d277SAaron Tomlin .warn = true, 1364cfc1d277SAaron Tomlin }; 1365cfc1d277SAaron Tomlin 1366cfc1d277SAaron Tomlin preempt_disable(); 13679011e49dSChristoph Hellwig if (!find_symbol(&fsa)) 13689011e49dSChristoph Hellwig goto fail; 13699011e49dSChristoph Hellwig if (fsa.license != GPL_ONLY) { 13709011e49dSChristoph Hellwig pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n", 13719011e49dSChristoph Hellwig symbol); 13729011e49dSChristoph Hellwig goto fail; 1373cfc1d277SAaron Tomlin } 13749011e49dSChristoph Hellwig if (strong_try_module_get(fsa.owner)) 13759011e49dSChristoph Hellwig goto fail; 1376cfc1d277SAaron Tomlin preempt_enable(); 1377cfc1d277SAaron Tomlin return (void *)kernel_symbol_value(fsa.sym); 13789011e49dSChristoph Hellwig fail: 13799011e49dSChristoph Hellwig preempt_enable(); 13809011e49dSChristoph Hellwig return NULL; 1381cfc1d277SAaron Tomlin } 1382cfc1d277SAaron Tomlin EXPORT_SYMBOL_GPL(__symbol_get); 1383cfc1d277SAaron Tomlin 1384cfc1d277SAaron Tomlin /* 1385cfc1d277SAaron Tomlin * Ensure that an exported symbol [global namespace] does not already exist 1386cfc1d277SAaron Tomlin * in the kernel or in some other module's exported symbol table. 1387cfc1d277SAaron Tomlin * 1388cfc1d277SAaron Tomlin * You must hold the module_mutex. 1389cfc1d277SAaron Tomlin */ 1390cfc1d277SAaron Tomlin static int verify_exported_symbols(struct module *mod) 1391cfc1d277SAaron Tomlin { 1392cfc1d277SAaron Tomlin unsigned int i; 1393cfc1d277SAaron Tomlin const struct kernel_symbol *s; 1394cfc1d277SAaron Tomlin struct { 1395cfc1d277SAaron Tomlin const struct kernel_symbol *sym; 1396cfc1d277SAaron Tomlin unsigned int num; 1397cfc1d277SAaron Tomlin } arr[] = { 1398cfc1d277SAaron Tomlin { mod->syms, mod->num_syms }, 1399cfc1d277SAaron Tomlin { mod->gpl_syms, mod->num_gpl_syms }, 1400cfc1d277SAaron Tomlin }; 1401cfc1d277SAaron Tomlin 1402cfc1d277SAaron Tomlin for (i = 0; i < ARRAY_SIZE(arr); i++) { 1403cfc1d277SAaron Tomlin for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { 1404cfc1d277SAaron Tomlin struct find_symbol_arg fsa = { 1405cfc1d277SAaron Tomlin .name = kernel_symbol_name(s), 1406cfc1d277SAaron Tomlin .gplok = true, 1407cfc1d277SAaron Tomlin }; 1408cfc1d277SAaron Tomlin if (find_symbol(&fsa)) { 1409cfc1d277SAaron Tomlin pr_err("%s: exports duplicate symbol %s" 1410cfc1d277SAaron Tomlin " (owned by %s)\n", 1411cfc1d277SAaron Tomlin mod->name, kernel_symbol_name(s), 1412cfc1d277SAaron Tomlin module_name(fsa.owner)); 1413cfc1d277SAaron Tomlin return -ENOEXEC; 1414cfc1d277SAaron Tomlin } 1415cfc1d277SAaron Tomlin } 1416cfc1d277SAaron Tomlin } 1417cfc1d277SAaron Tomlin return 0; 1418cfc1d277SAaron Tomlin } 1419cfc1d277SAaron Tomlin 1420cfc1d277SAaron Tomlin static bool ignore_undef_symbol(Elf_Half emachine, const char *name) 1421cfc1d277SAaron Tomlin { 1422cfc1d277SAaron Tomlin /* 1423cfc1d277SAaron Tomlin * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as 1424cfc1d277SAaron Tomlin * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. 1425cfc1d277SAaron Tomlin * i386 has a similar problem but may not deserve a fix. 1426cfc1d277SAaron Tomlin * 1427cfc1d277SAaron Tomlin * If we ever have to ignore many symbols, consider refactoring the code to 1428cfc1d277SAaron Tomlin * only warn if referenced by a relocation. 1429cfc1d277SAaron Tomlin */ 1430cfc1d277SAaron Tomlin if (emachine == EM_386 || emachine == EM_X86_64) 1431cfc1d277SAaron Tomlin return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); 1432cfc1d277SAaron Tomlin return false; 1433cfc1d277SAaron Tomlin } 1434cfc1d277SAaron Tomlin 1435cfc1d277SAaron Tomlin /* Change all symbols so that st_value encodes the pointer directly. */ 1436cfc1d277SAaron Tomlin static int simplify_symbols(struct module *mod, const struct load_info *info) 1437cfc1d277SAaron Tomlin { 1438cfc1d277SAaron Tomlin Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; 1439cfc1d277SAaron Tomlin Elf_Sym *sym = (void *)symsec->sh_addr; 1440cfc1d277SAaron Tomlin unsigned long secbase; 1441cfc1d277SAaron Tomlin unsigned int i; 1442cfc1d277SAaron Tomlin int ret = 0; 1443cfc1d277SAaron Tomlin const struct kernel_symbol *ksym; 1444cfc1d277SAaron Tomlin 1445cfc1d277SAaron Tomlin for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { 1446cfc1d277SAaron Tomlin const char *name = info->strtab + sym[i].st_name; 1447cfc1d277SAaron Tomlin 1448cfc1d277SAaron Tomlin switch (sym[i].st_shndx) { 1449cfc1d277SAaron Tomlin case SHN_COMMON: 1450cfc1d277SAaron Tomlin /* Ignore common symbols */ 1451cfc1d277SAaron Tomlin if (!strncmp(name, "__gnu_lto", 9)) 1452cfc1d277SAaron Tomlin break; 1453cfc1d277SAaron Tomlin 1454cfc1d277SAaron Tomlin /* 1455cfc1d277SAaron Tomlin * We compiled with -fno-common. These are not 1456cfc1d277SAaron Tomlin * supposed to happen. 1457cfc1d277SAaron Tomlin */ 1458cfc1d277SAaron Tomlin pr_debug("Common symbol: %s\n", name); 1459cfc1d277SAaron Tomlin pr_warn("%s: please compile with -fno-common\n", 1460cfc1d277SAaron Tomlin mod->name); 1461cfc1d277SAaron Tomlin ret = -ENOEXEC; 1462cfc1d277SAaron Tomlin break; 1463cfc1d277SAaron Tomlin 1464cfc1d277SAaron Tomlin case SHN_ABS: 1465cfc1d277SAaron Tomlin /* Don't need to do anything */ 1466b10addf3SJim Cromie pr_debug("Absolute symbol: 0x%08lx %s\n", 1467b10addf3SJim Cromie (long)sym[i].st_value, name); 1468cfc1d277SAaron Tomlin break; 1469cfc1d277SAaron Tomlin 1470cfc1d277SAaron Tomlin case SHN_LIVEPATCH: 1471cfc1d277SAaron Tomlin /* Livepatch symbols are resolved by livepatch */ 1472cfc1d277SAaron Tomlin break; 1473cfc1d277SAaron Tomlin 1474cfc1d277SAaron Tomlin case SHN_UNDEF: 1475cfc1d277SAaron Tomlin ksym = resolve_symbol_wait(mod, info, name); 1476cfc1d277SAaron Tomlin /* Ok if resolved. */ 1477cfc1d277SAaron Tomlin if (ksym && !IS_ERR(ksym)) { 1478cfc1d277SAaron Tomlin sym[i].st_value = kernel_symbol_value(ksym); 1479cfc1d277SAaron Tomlin break; 1480cfc1d277SAaron Tomlin } 1481cfc1d277SAaron Tomlin 1482cfc1d277SAaron Tomlin /* Ok if weak or ignored. */ 1483cfc1d277SAaron Tomlin if (!ksym && 1484cfc1d277SAaron Tomlin (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || 1485cfc1d277SAaron Tomlin ignore_undef_symbol(info->hdr->e_machine, name))) 1486cfc1d277SAaron Tomlin break; 1487cfc1d277SAaron Tomlin 1488cfc1d277SAaron Tomlin ret = PTR_ERR(ksym) ?: -ENOENT; 1489cfc1d277SAaron Tomlin pr_warn("%s: Unknown symbol %s (err %d)\n", 1490cfc1d277SAaron Tomlin mod->name, name, ret); 1491cfc1d277SAaron Tomlin break; 1492cfc1d277SAaron Tomlin 1493cfc1d277SAaron Tomlin default: 1494cfc1d277SAaron Tomlin /* Divert to percpu allocation if a percpu var. */ 1495cfc1d277SAaron Tomlin if (sym[i].st_shndx == info->index.pcpu) 1496cfc1d277SAaron Tomlin secbase = (unsigned long)mod_percpu(mod); 1497cfc1d277SAaron Tomlin else 1498cfc1d277SAaron Tomlin secbase = info->sechdrs[sym[i].st_shndx].sh_addr; 1499cfc1d277SAaron Tomlin sym[i].st_value += secbase; 1500cfc1d277SAaron Tomlin break; 1501cfc1d277SAaron Tomlin } 1502cfc1d277SAaron Tomlin } 1503cfc1d277SAaron Tomlin 1504cfc1d277SAaron Tomlin return ret; 1505cfc1d277SAaron Tomlin } 1506cfc1d277SAaron Tomlin 1507cfc1d277SAaron Tomlin static int apply_relocations(struct module *mod, const struct load_info *info) 1508cfc1d277SAaron Tomlin { 1509cfc1d277SAaron Tomlin unsigned int i; 1510cfc1d277SAaron Tomlin int err = 0; 1511cfc1d277SAaron Tomlin 1512cfc1d277SAaron Tomlin /* Now do relocations. */ 1513cfc1d277SAaron Tomlin for (i = 1; i < info->hdr->e_shnum; i++) { 1514cfc1d277SAaron Tomlin unsigned int infosec = info->sechdrs[i].sh_info; 1515cfc1d277SAaron Tomlin 1516cfc1d277SAaron Tomlin /* Not a valid relocation section? */ 1517cfc1d277SAaron Tomlin if (infosec >= info->hdr->e_shnum) 1518cfc1d277SAaron Tomlin continue; 1519cfc1d277SAaron Tomlin 1520cfc1d277SAaron Tomlin /* Don't bother with non-allocated sections */ 1521cfc1d277SAaron Tomlin if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) 1522cfc1d277SAaron Tomlin continue; 1523cfc1d277SAaron Tomlin 1524cfc1d277SAaron Tomlin if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) 1525cfc1d277SAaron Tomlin err = klp_apply_section_relocs(mod, info->sechdrs, 1526cfc1d277SAaron Tomlin info->secstrings, 1527cfc1d277SAaron Tomlin info->strtab, 1528cfc1d277SAaron Tomlin info->index.sym, i, 1529cfc1d277SAaron Tomlin NULL); 1530cfc1d277SAaron Tomlin else if (info->sechdrs[i].sh_type == SHT_REL) 1531cfc1d277SAaron Tomlin err = apply_relocate(info->sechdrs, info->strtab, 1532cfc1d277SAaron Tomlin info->index.sym, i, mod); 1533cfc1d277SAaron Tomlin else if (info->sechdrs[i].sh_type == SHT_RELA) 1534cfc1d277SAaron Tomlin err = apply_relocate_add(info->sechdrs, info->strtab, 1535cfc1d277SAaron Tomlin info->index.sym, i, mod); 1536cfc1d277SAaron Tomlin if (err < 0) 1537cfc1d277SAaron Tomlin break; 1538cfc1d277SAaron Tomlin } 1539cfc1d277SAaron Tomlin return err; 1540cfc1d277SAaron Tomlin } 1541cfc1d277SAaron Tomlin 1542cfc1d277SAaron Tomlin /* Additional bytes needed by arch in front of individual sections */ 1543cfc1d277SAaron Tomlin unsigned int __weak arch_mod_section_prepend(struct module *mod, 1544cfc1d277SAaron Tomlin unsigned int section) 1545cfc1d277SAaron Tomlin { 1546cfc1d277SAaron Tomlin /* default implementation just returns zero */ 1547cfc1d277SAaron Tomlin return 0; 1548cfc1d277SAaron Tomlin } 1549cfc1d277SAaron Tomlin 1550ac3b4328SSong Liu long module_get_offset_and_type(struct module *mod, enum mod_mem_type type, 1551cfc1d277SAaron Tomlin Elf_Shdr *sechdr, unsigned int section) 1552cfc1d277SAaron Tomlin { 1553ac3b4328SSong Liu long offset; 1554ac3b4328SSong Liu long mask = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) << SH_ENTSIZE_TYPE_SHIFT; 1555cfc1d277SAaron Tomlin 1556ac3b4328SSong Liu mod->mem[type].size += arch_mod_section_prepend(mod, section); 1557ac3b4328SSong Liu offset = ALIGN(mod->mem[type].size, sechdr->sh_addralign ?: 1); 1558ac3b4328SSong Liu mod->mem[type].size = offset + sechdr->sh_size; 1559ac3b4328SSong Liu 1560ac3b4328SSong Liu WARN_ON_ONCE(offset & mask); 1561ac3b4328SSong Liu return offset | mask; 1562cfc1d277SAaron Tomlin } 1563cfc1d277SAaron Tomlin 15642abcc4b5SJames Morse bool module_init_layout_section(const char *sname) 1565cfc1d277SAaron Tomlin { 1566cfc1d277SAaron Tomlin #ifndef CONFIG_MODULE_UNLOAD 1567cfc1d277SAaron Tomlin if (module_exit_section(sname)) 1568cfc1d277SAaron Tomlin return true; 1569cfc1d277SAaron Tomlin #endif 1570cfc1d277SAaron Tomlin return module_init_section(sname); 1571cfc1d277SAaron Tomlin } 1572cfc1d277SAaron Tomlin 1573ac3b4328SSong Liu static void __layout_sections(struct module *mod, struct load_info *info, bool is_init) 1574cfc1d277SAaron Tomlin { 1575ac3b4328SSong Liu unsigned int m, i; 1576ac3b4328SSong Liu 1577ac3b4328SSong Liu static const unsigned long masks[][2] = { 1578cfc1d277SAaron Tomlin /* 1579cfc1d277SAaron Tomlin * NOTE: all executable code must be the first section 1580cfc1d277SAaron Tomlin * in this array; otherwise modify the text_size 1581cfc1d277SAaron Tomlin * finder in the two loops below 1582cfc1d277SAaron Tomlin */ 1583cfc1d277SAaron Tomlin { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, 1584cfc1d277SAaron Tomlin { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, 1585cfc1d277SAaron Tomlin { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, 1586cfc1d277SAaron Tomlin { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, 1587cfc1d277SAaron Tomlin { ARCH_SHF_SMALL | SHF_ALLOC, 0 } 1588cfc1d277SAaron Tomlin }; 1589ac3b4328SSong Liu static const int core_m_to_mem_type[] = { 1590ac3b4328SSong Liu MOD_TEXT, 1591ac3b4328SSong Liu MOD_RODATA, 1592ac3b4328SSong Liu MOD_RO_AFTER_INIT, 1593ac3b4328SSong Liu MOD_DATA, 1594db3e33ddSSong Liu MOD_DATA, 1595ac3b4328SSong Liu }; 1596ac3b4328SSong Liu static const int init_m_to_mem_type[] = { 1597ac3b4328SSong Liu MOD_INIT_TEXT, 1598ac3b4328SSong Liu MOD_INIT_RODATA, 1599ac3b4328SSong Liu MOD_INVALID, 1600ac3b4328SSong Liu MOD_INIT_DATA, 1601db3e33ddSSong Liu MOD_INIT_DATA, 1602ac3b4328SSong Liu }; 1603ac3b4328SSong Liu 1604ac3b4328SSong Liu for (m = 0; m < ARRAY_SIZE(masks); ++m) { 1605ac3b4328SSong Liu enum mod_mem_type type = is_init ? init_m_to_mem_type[m] : core_m_to_mem_type[m]; 1606ac3b4328SSong Liu 1607ac3b4328SSong Liu for (i = 0; i < info->hdr->e_shnum; ++i) { 1608ac3b4328SSong Liu Elf_Shdr *s = &info->sechdrs[i]; 1609ac3b4328SSong Liu const char *sname = info->secstrings + s->sh_name; 1610ac3b4328SSong Liu 1611ac3b4328SSong Liu if ((s->sh_flags & masks[m][0]) != masks[m][0] 1612ac3b4328SSong Liu || (s->sh_flags & masks[m][1]) 1613ac3b4328SSong Liu || s->sh_entsize != ~0UL 1614ac3b4328SSong Liu || is_init != module_init_layout_section(sname)) 1615ac3b4328SSong Liu continue; 1616ac3b4328SSong Liu 1617ac3b4328SSong Liu if (WARN_ON_ONCE(type == MOD_INVALID)) 1618ac3b4328SSong Liu continue; 1619ac3b4328SSong Liu 16200db6f8d7SSuren Baghdasaryan /* 16210db6f8d7SSuren Baghdasaryan * Do not allocate codetag memory as we load it into 16220db6f8d7SSuren Baghdasaryan * preallocated contiguous memory. 16230db6f8d7SSuren Baghdasaryan */ 16240db6f8d7SSuren Baghdasaryan if (codetag_needs_module_section(mod, sname, s->sh_size)) { 16250db6f8d7SSuren Baghdasaryan /* 16260db6f8d7SSuren Baghdasaryan * s->sh_entsize won't be used but populate the 16270db6f8d7SSuren Baghdasaryan * type field to avoid confusion. 16280db6f8d7SSuren Baghdasaryan */ 16290db6f8d7SSuren Baghdasaryan s->sh_entsize = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) 16300db6f8d7SSuren Baghdasaryan << SH_ENTSIZE_TYPE_SHIFT; 16310db6f8d7SSuren Baghdasaryan continue; 16320db6f8d7SSuren Baghdasaryan } 16330db6f8d7SSuren Baghdasaryan 1634ac3b4328SSong Liu s->sh_entsize = module_get_offset_and_type(mod, type, s, i); 1635ac3b4328SSong Liu pr_debug("\t%s\n", sname); 1636ac3b4328SSong Liu } 1637ac3b4328SSong Liu } 1638ac3b4328SSong Liu } 1639ac3b4328SSong Liu 1640ac3b4328SSong Liu /* 1641ac3b4328SSong Liu * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld 1642ac3b4328SSong Liu * might -- code, read-only data, read-write data, small data. Tally 1643ac3b4328SSong Liu * sizes, and place the offsets into sh_entsize fields: high bit means it 1644ac3b4328SSong Liu * belongs in init. 1645ac3b4328SSong Liu */ 1646ac3b4328SSong Liu static void layout_sections(struct module *mod, struct load_info *info) 1647ac3b4328SSong Liu { 1648ac3b4328SSong Liu unsigned int i; 1649cfc1d277SAaron Tomlin 1650cfc1d277SAaron Tomlin for (i = 0; i < info->hdr->e_shnum; i++) 1651cfc1d277SAaron Tomlin info->sechdrs[i].sh_entsize = ~0UL; 1652cfc1d277SAaron Tomlin 16536ed81802SJim Cromie pr_debug("Core section allocation order for %s:\n", mod->name); 1654ac3b4328SSong Liu __layout_sections(mod, info, false); 1655cfc1d277SAaron Tomlin 16566ed81802SJim Cromie pr_debug("Init section allocation order for %s:\n", mod->name); 1657ac3b4328SSong Liu __layout_sections(mod, info, true); 1658cfc1d277SAaron Tomlin } 1659cfc1d277SAaron Tomlin 1660ad8d3a36SLuis Chamberlain static void module_license_taint_check(struct module *mod, const char *license) 1661cfc1d277SAaron Tomlin { 1662cfc1d277SAaron Tomlin if (!license) 1663cfc1d277SAaron Tomlin license = "unspecified"; 1664cfc1d277SAaron Tomlin 1665cfc1d277SAaron Tomlin if (!license_is_gpl_compatible(license)) { 1666cfc1d277SAaron Tomlin if (!test_taint(TAINT_PROPRIETARY_MODULE)) 1667cfc1d277SAaron Tomlin pr_warn("%s: module license '%s' taints kernel.\n", 1668cfc1d277SAaron Tomlin mod->name, license); 1669cfc1d277SAaron Tomlin add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 1670cfc1d277SAaron Tomlin LOCKDEP_NOW_UNRELIABLE); 1671cfc1d277SAaron Tomlin } 1672cfc1d277SAaron Tomlin } 1673cfc1d277SAaron Tomlin 1674cfc1d277SAaron Tomlin static void setup_modinfo(struct module *mod, struct load_info *info) 1675cfc1d277SAaron Tomlin { 1676f3227ffdSThomas Weißschuh const struct module_attribute *attr; 1677cfc1d277SAaron Tomlin int i; 1678cfc1d277SAaron Tomlin 1679cfc1d277SAaron Tomlin for (i = 0; (attr = modinfo_attrs[i]); i++) { 1680cfc1d277SAaron Tomlin if (attr->setup) 1681cfc1d277SAaron Tomlin attr->setup(mod, get_modinfo(info, attr->attr.name)); 1682cfc1d277SAaron Tomlin } 1683cfc1d277SAaron Tomlin } 1684cfc1d277SAaron Tomlin 1685cfc1d277SAaron Tomlin static void free_modinfo(struct module *mod) 1686cfc1d277SAaron Tomlin { 1687f3227ffdSThomas Weißschuh const struct module_attribute *attr; 1688cfc1d277SAaron Tomlin int i; 1689cfc1d277SAaron Tomlin 1690cfc1d277SAaron Tomlin for (i = 0; (attr = modinfo_attrs[i]); i++) { 1691cfc1d277SAaron Tomlin if (attr->free) 1692cfc1d277SAaron Tomlin attr->free(mod); 1693cfc1d277SAaron Tomlin } 1694cfc1d277SAaron Tomlin } 1695cfc1d277SAaron Tomlin 1696cfc1d277SAaron Tomlin bool __weak module_init_section(const char *name) 1697cfc1d277SAaron Tomlin { 1698cfc1d277SAaron Tomlin return strstarts(name, ".init"); 1699cfc1d277SAaron Tomlin } 1700cfc1d277SAaron Tomlin 1701cfc1d277SAaron Tomlin bool __weak module_exit_section(const char *name) 1702cfc1d277SAaron Tomlin { 1703cfc1d277SAaron Tomlin return strstarts(name, ".exit"); 1704cfc1d277SAaron Tomlin } 1705cfc1d277SAaron Tomlin 1706f4392216SMatthew Maurer static int validate_section_offset(const struct load_info *info, Elf_Shdr *shdr) 1707cfc1d277SAaron Tomlin { 1708cfc1d277SAaron Tomlin #if defined(CONFIG_64BIT) 1709cfc1d277SAaron Tomlin unsigned long long secend; 1710cfc1d277SAaron Tomlin #else 1711cfc1d277SAaron Tomlin unsigned long secend; 1712cfc1d277SAaron Tomlin #endif 1713cfc1d277SAaron Tomlin 1714cfc1d277SAaron Tomlin /* 1715cfc1d277SAaron Tomlin * Check for both overflow and offset/size being 1716cfc1d277SAaron Tomlin * too large. 1717cfc1d277SAaron Tomlin */ 1718cfc1d277SAaron Tomlin secend = shdr->sh_offset + shdr->sh_size; 1719cfc1d277SAaron Tomlin if (secend < shdr->sh_offset || secend > info->len) 1720cfc1d277SAaron Tomlin return -ENOEXEC; 1721cfc1d277SAaron Tomlin 1722cfc1d277SAaron Tomlin return 0; 1723cfc1d277SAaron Tomlin } 1724cfc1d277SAaron Tomlin 172590f8f312SMatthew Maurer /** 172690f8f312SMatthew Maurer * elf_validity_ehdr() - Checks an ELF header for module validity 172790f8f312SMatthew Maurer * @info: Load info containing the ELF header to check 172890f8f312SMatthew Maurer * 172990f8f312SMatthew Maurer * Checks whether an ELF header could belong to a valid module. Checks: 173090f8f312SMatthew Maurer * 173190f8f312SMatthew Maurer * * ELF header is within the data the user provided 173290f8f312SMatthew Maurer * * ELF magic is present 173390f8f312SMatthew Maurer * * It is relocatable (not final linked, not core file, etc.) 173490f8f312SMatthew Maurer * * The header's machine type matches what the architecture expects. 173590f8f312SMatthew Maurer * * Optional arch-specific hook for other properties 173690f8f312SMatthew Maurer * - module_elf_check_arch() is currently only used by PPC to check 173790f8f312SMatthew Maurer * ELF ABI version, but may be used by others in the future. 173890f8f312SMatthew Maurer * 173990f8f312SMatthew Maurer * Return: %0 if valid, %-ENOEXEC on failure. 174090f8f312SMatthew Maurer */ 174190f8f312SMatthew Maurer static int elf_validity_ehdr(const struct load_info *info) 174290f8f312SMatthew Maurer { 174390f8f312SMatthew Maurer if (info->len < sizeof(*(info->hdr))) { 174490f8f312SMatthew Maurer pr_err("Invalid ELF header len %lu\n", info->len); 174590f8f312SMatthew Maurer return -ENOEXEC; 174690f8f312SMatthew Maurer } 174790f8f312SMatthew Maurer if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { 174890f8f312SMatthew Maurer pr_err("Invalid ELF header magic: != %s\n", ELFMAG); 174990f8f312SMatthew Maurer return -ENOEXEC; 175090f8f312SMatthew Maurer } 175190f8f312SMatthew Maurer if (info->hdr->e_type != ET_REL) { 175290f8f312SMatthew Maurer pr_err("Invalid ELF header type: %u != %u\n", 175390f8f312SMatthew Maurer info->hdr->e_type, ET_REL); 175490f8f312SMatthew Maurer return -ENOEXEC; 175590f8f312SMatthew Maurer } 175690f8f312SMatthew Maurer if (!elf_check_arch(info->hdr)) { 175790f8f312SMatthew Maurer pr_err("Invalid architecture in ELF header: %u\n", 175890f8f312SMatthew Maurer info->hdr->e_machine); 175990f8f312SMatthew Maurer return -ENOEXEC; 176090f8f312SMatthew Maurer } 176190f8f312SMatthew Maurer if (!module_elf_check_arch(info->hdr)) { 176290f8f312SMatthew Maurer pr_err("Invalid module architecture in ELF header: %u\n", 176390f8f312SMatthew Maurer info->hdr->e_machine); 176490f8f312SMatthew Maurer return -ENOEXEC; 176590f8f312SMatthew Maurer } 176690f8f312SMatthew Maurer return 0; 176790f8f312SMatthew Maurer } 176890f8f312SMatthew Maurer 1769c92aab81SMatthew Maurer /** 1770c92aab81SMatthew Maurer * elf_validity_cache_sechdrs() - Cache section headers if valid 1771c92aab81SMatthew Maurer * @info: Load info to compute section headers from 1772c92aab81SMatthew Maurer * 1773c92aab81SMatthew Maurer * Checks: 1774c92aab81SMatthew Maurer * 1775c92aab81SMatthew Maurer * * ELF header is valid (see elf_validity_ehdr()) 1776c92aab81SMatthew Maurer * * Section headers are the size we expect 1777c92aab81SMatthew Maurer * * Section array fits in the user provided data 1778c92aab81SMatthew Maurer * * Section index 0 is NULL 1779c92aab81SMatthew Maurer * * Section contents are inbounds 1780c92aab81SMatthew Maurer * 1781c92aab81SMatthew Maurer * Then updates @info with a &load_info->sechdrs pointer if valid. 1782c92aab81SMatthew Maurer * 1783c92aab81SMatthew Maurer * Return: %0 if valid, negative error code if validation failed. 1784c92aab81SMatthew Maurer */ 1785c92aab81SMatthew Maurer static int elf_validity_cache_sechdrs(struct load_info *info) 1786c92aab81SMatthew Maurer { 1787c92aab81SMatthew Maurer Elf_Shdr *sechdrs; 1788c92aab81SMatthew Maurer Elf_Shdr *shdr; 1789c92aab81SMatthew Maurer int i; 1790c92aab81SMatthew Maurer int err; 1791c92aab81SMatthew Maurer 1792c92aab81SMatthew Maurer err = elf_validity_ehdr(info); 1793c92aab81SMatthew Maurer if (err < 0) 1794c92aab81SMatthew Maurer return err; 1795c92aab81SMatthew Maurer 1796c92aab81SMatthew Maurer if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { 1797c92aab81SMatthew Maurer pr_err("Invalid ELF section header size\n"); 1798c92aab81SMatthew Maurer return -ENOEXEC; 1799c92aab81SMatthew Maurer } 1800c92aab81SMatthew Maurer 1801c92aab81SMatthew Maurer /* 1802c92aab81SMatthew Maurer * e_shnum is 16 bits, and sizeof(Elf_Shdr) is 1803c92aab81SMatthew Maurer * known and small. So e_shnum * sizeof(Elf_Shdr) 1804c92aab81SMatthew Maurer * will not overflow unsigned long on any platform. 1805c92aab81SMatthew Maurer */ 1806c92aab81SMatthew Maurer if (info->hdr->e_shoff >= info->len 1807c92aab81SMatthew Maurer || (info->hdr->e_shnum * sizeof(Elf_Shdr) > 1808c92aab81SMatthew Maurer info->len - info->hdr->e_shoff)) { 1809c92aab81SMatthew Maurer pr_err("Invalid ELF section header overflow\n"); 1810c92aab81SMatthew Maurer return -ENOEXEC; 1811c92aab81SMatthew Maurer } 1812c92aab81SMatthew Maurer 1813c92aab81SMatthew Maurer sechdrs = (void *)info->hdr + info->hdr->e_shoff; 1814c92aab81SMatthew Maurer 1815c92aab81SMatthew Maurer /* 1816c92aab81SMatthew Maurer * The code assumes that section 0 has a length of zero and 1817c92aab81SMatthew Maurer * an addr of zero, so check for it. 1818c92aab81SMatthew Maurer */ 1819c92aab81SMatthew Maurer if (sechdrs[0].sh_type != SHT_NULL 1820c92aab81SMatthew Maurer || sechdrs[0].sh_size != 0 1821c92aab81SMatthew Maurer || sechdrs[0].sh_addr != 0) { 1822c92aab81SMatthew Maurer pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", 1823c92aab81SMatthew Maurer sechdrs[0].sh_type); 1824c92aab81SMatthew Maurer return -ENOEXEC; 1825c92aab81SMatthew Maurer } 1826c92aab81SMatthew Maurer 1827c92aab81SMatthew Maurer /* Validate contents are inbounds */ 1828c92aab81SMatthew Maurer for (i = 1; i < info->hdr->e_shnum; i++) { 1829c92aab81SMatthew Maurer shdr = &sechdrs[i]; 1830c92aab81SMatthew Maurer switch (shdr->sh_type) { 1831c92aab81SMatthew Maurer case SHT_NULL: 1832c92aab81SMatthew Maurer case SHT_NOBITS: 1833c92aab81SMatthew Maurer /* No contents, offset/size don't mean anything */ 1834c92aab81SMatthew Maurer continue; 1835c92aab81SMatthew Maurer default: 1836c92aab81SMatthew Maurer err = validate_section_offset(info, shdr); 1837c92aab81SMatthew Maurer if (err < 0) { 1838c92aab81SMatthew Maurer pr_err("Invalid ELF section in module (section %u type %u)\n", 1839c92aab81SMatthew Maurer i, shdr->sh_type); 1840c92aab81SMatthew Maurer return err; 1841c92aab81SMatthew Maurer } 1842c92aab81SMatthew Maurer } 1843c92aab81SMatthew Maurer } 1844c92aab81SMatthew Maurer 1845c92aab81SMatthew Maurer info->sechdrs = sechdrs; 1846c92aab81SMatthew Maurer 1847c92aab81SMatthew Maurer return 0; 1848c92aab81SMatthew Maurer } 1849c92aab81SMatthew Maurer 18503c5700aeSMatthew Maurer /** 18513c5700aeSMatthew Maurer * elf_validity_cache_secstrings() - Caches section names if valid 18523c5700aeSMatthew Maurer * @info: Load info to cache section names from. Must have valid sechdrs. 18533c5700aeSMatthew Maurer * 18543c5700aeSMatthew Maurer * Specifically checks: 18553c5700aeSMatthew Maurer * 18563c5700aeSMatthew Maurer * * Section name table index is inbounds of section headers 18573c5700aeSMatthew Maurer * * Section name table is not empty 18583c5700aeSMatthew Maurer * * Section name table is NUL terminated 18593c5700aeSMatthew Maurer * * All section name offsets are inbounds of the section 18603c5700aeSMatthew Maurer * 18613c5700aeSMatthew Maurer * Then updates @info with a &load_info->secstrings pointer if valid. 18623c5700aeSMatthew Maurer * 18633c5700aeSMatthew Maurer * Return: %0 if valid, negative error code if validation failed. 18643c5700aeSMatthew Maurer */ 18653c5700aeSMatthew Maurer static int elf_validity_cache_secstrings(struct load_info *info) 18663c5700aeSMatthew Maurer { 18673c5700aeSMatthew Maurer Elf_Shdr *strhdr, *shdr; 18683c5700aeSMatthew Maurer char *secstrings; 18693c5700aeSMatthew Maurer int i; 18703c5700aeSMatthew Maurer 18713c5700aeSMatthew Maurer /* 18723c5700aeSMatthew Maurer * Verify if the section name table index is valid. 18733c5700aeSMatthew Maurer */ 18743c5700aeSMatthew Maurer if (info->hdr->e_shstrndx == SHN_UNDEF 18753c5700aeSMatthew Maurer || info->hdr->e_shstrndx >= info->hdr->e_shnum) { 18763c5700aeSMatthew Maurer pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", 18773c5700aeSMatthew Maurer info->hdr->e_shstrndx, info->hdr->e_shstrndx, 18783c5700aeSMatthew Maurer info->hdr->e_shnum); 18793c5700aeSMatthew Maurer return -ENOEXEC; 18803c5700aeSMatthew Maurer } 18813c5700aeSMatthew Maurer 18823c5700aeSMatthew Maurer strhdr = &info->sechdrs[info->hdr->e_shstrndx]; 18833c5700aeSMatthew Maurer 18843c5700aeSMatthew Maurer /* 18853c5700aeSMatthew Maurer * The section name table must be NUL-terminated, as required 18863c5700aeSMatthew Maurer * by the spec. This makes strcmp and pr_* calls that access 18873c5700aeSMatthew Maurer * strings in the section safe. 18883c5700aeSMatthew Maurer */ 18893c5700aeSMatthew Maurer secstrings = (void *)info->hdr + strhdr->sh_offset; 18903c5700aeSMatthew Maurer if (strhdr->sh_size == 0) { 18913c5700aeSMatthew Maurer pr_err("empty section name table\n"); 18923c5700aeSMatthew Maurer return -ENOEXEC; 18933c5700aeSMatthew Maurer } 18943c5700aeSMatthew Maurer if (secstrings[strhdr->sh_size - 1] != '\0') { 18953c5700aeSMatthew Maurer pr_err("ELF Spec violation: section name table isn't null terminated\n"); 18963c5700aeSMatthew Maurer return -ENOEXEC; 18973c5700aeSMatthew Maurer } 18983c5700aeSMatthew Maurer 18993c5700aeSMatthew Maurer for (i = 0; i < info->hdr->e_shnum; i++) { 19003c5700aeSMatthew Maurer shdr = &info->sechdrs[i]; 19013c5700aeSMatthew Maurer /* SHT_NULL means sh_name has an undefined value */ 19023c5700aeSMatthew Maurer if (shdr->sh_type == SHT_NULL) 19033c5700aeSMatthew Maurer continue; 19043c5700aeSMatthew Maurer if (shdr->sh_name >= strhdr->sh_size) { 19053c5700aeSMatthew Maurer pr_err("Invalid ELF section name in module (section %u type %u)\n", 19063c5700aeSMatthew Maurer i, shdr->sh_type); 19073c5700aeSMatthew Maurer return -ENOEXEC; 19083c5700aeSMatthew Maurer } 19093c5700aeSMatthew Maurer } 19103c5700aeSMatthew Maurer 19113c5700aeSMatthew Maurer info->secstrings = secstrings; 19123c5700aeSMatthew Maurer return 0; 19133c5700aeSMatthew Maurer } 19143c5700aeSMatthew Maurer 1915fbc0e4e4SMatthew Maurer /** 1916fbc0e4e4SMatthew Maurer * elf_validity_cache_index_info() - Validate and cache modinfo section 1917fbc0e4e4SMatthew Maurer * @info: Load info to populate the modinfo index on. 1918fbc0e4e4SMatthew Maurer * Must have &load_info->sechdrs and &load_info->secstrings populated 1919fbc0e4e4SMatthew Maurer * 1920fbc0e4e4SMatthew Maurer * Checks that if there is a .modinfo section, it is unique. 1921fbc0e4e4SMatthew Maurer * Then, it caches its index in &load_info->index.info. 1922fbc0e4e4SMatthew Maurer * Finally, it tries to populate the name to improve error messages. 1923fbc0e4e4SMatthew Maurer * 1924fbc0e4e4SMatthew Maurer * Return: %0 if valid, %-ENOEXEC if multiple modinfo sections were found. 1925fbc0e4e4SMatthew Maurer */ 1926fbc0e4e4SMatthew Maurer static int elf_validity_cache_index_info(struct load_info *info) 1927fbc0e4e4SMatthew Maurer { 1928fbc0e4e4SMatthew Maurer int info_idx; 1929fbc0e4e4SMatthew Maurer 1930fbc0e4e4SMatthew Maurer info_idx = find_any_unique_sec(info, ".modinfo"); 1931fbc0e4e4SMatthew Maurer 1932fbc0e4e4SMatthew Maurer if (info_idx == 0) 1933fbc0e4e4SMatthew Maurer /* Early return, no .modinfo */ 1934fbc0e4e4SMatthew Maurer return 0; 1935fbc0e4e4SMatthew Maurer 1936fbc0e4e4SMatthew Maurer if (info_idx < 0) { 1937fbc0e4e4SMatthew Maurer pr_err("Only one .modinfo section must exist.\n"); 1938fbc0e4e4SMatthew Maurer return -ENOEXEC; 1939fbc0e4e4SMatthew Maurer } 1940fbc0e4e4SMatthew Maurer 1941fbc0e4e4SMatthew Maurer info->index.info = info_idx; 1942fbc0e4e4SMatthew Maurer /* Try to find a name early so we can log errors with a module name */ 1943fbc0e4e4SMatthew Maurer info->name = get_modinfo(info, "name"); 1944fbc0e4e4SMatthew Maurer 1945fbc0e4e4SMatthew Maurer return 0; 1946fbc0e4e4SMatthew Maurer } 1947fbc0e4e4SMatthew Maurer 19480be41a93SMatthew Maurer /** 19490be41a93SMatthew Maurer * elf_validity_cache_index_mod() - Validates and caches this_module section 19500be41a93SMatthew Maurer * @info: Load info to cache this_module on. 19510be41a93SMatthew Maurer * Must have &load_info->sechdrs and &load_info->secstrings populated 19520be41a93SMatthew Maurer * 19530be41a93SMatthew Maurer * The ".gnu.linkonce.this_module" ELF section is special. It is what modpost 19540be41a93SMatthew Maurer * uses to refer to __this_module and let's use rely on THIS_MODULE to point 19550be41a93SMatthew Maurer * to &__this_module properly. The kernel's modpost declares it on each 19560be41a93SMatthew Maurer * modules's *.mod.c file. If the struct module of the kernel changes a full 19570be41a93SMatthew Maurer * kernel rebuild is required. 19580be41a93SMatthew Maurer * 19590be41a93SMatthew Maurer * We have a few expectations for this special section, this function 19600be41a93SMatthew Maurer * validates all this for us: 19610be41a93SMatthew Maurer * 19620be41a93SMatthew Maurer * * The section has contents 19630be41a93SMatthew Maurer * * The section is unique 19640be41a93SMatthew Maurer * * We expect the kernel to always have to allocate it: SHF_ALLOC 19650be41a93SMatthew Maurer * * The section size must match the kernel's run time's struct module 19660be41a93SMatthew Maurer * size 19670be41a93SMatthew Maurer * 19680be41a93SMatthew Maurer * If all checks pass, the index will be cached in &load_info->index.mod 19690be41a93SMatthew Maurer * 19700be41a93SMatthew Maurer * Return: %0 on validation success, %-ENOEXEC on failure 19710be41a93SMatthew Maurer */ 19720be41a93SMatthew Maurer static int elf_validity_cache_index_mod(struct load_info *info) 19730be41a93SMatthew Maurer { 19740be41a93SMatthew Maurer Elf_Shdr *shdr; 19750be41a93SMatthew Maurer int mod_idx; 19760be41a93SMatthew Maurer 19770be41a93SMatthew Maurer mod_idx = find_any_unique_sec(info, ".gnu.linkonce.this_module"); 19780be41a93SMatthew Maurer if (mod_idx <= 0) { 19790be41a93SMatthew Maurer pr_err("module %s: Exactly one .gnu.linkonce.this_module section must exist.\n", 19800be41a93SMatthew Maurer info->name ?: "(missing .modinfo section or name field)"); 19810be41a93SMatthew Maurer return -ENOEXEC; 19820be41a93SMatthew Maurer } 19830be41a93SMatthew Maurer 19840be41a93SMatthew Maurer shdr = &info->sechdrs[mod_idx]; 19850be41a93SMatthew Maurer 19860be41a93SMatthew Maurer if (shdr->sh_type == SHT_NOBITS) { 19870be41a93SMatthew Maurer pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n", 19880be41a93SMatthew Maurer info->name ?: "(missing .modinfo section or name field)"); 19890be41a93SMatthew Maurer return -ENOEXEC; 19900be41a93SMatthew Maurer } 19910be41a93SMatthew Maurer 19920be41a93SMatthew Maurer if (!(shdr->sh_flags & SHF_ALLOC)) { 19930be41a93SMatthew Maurer pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n", 19940be41a93SMatthew Maurer info->name ?: "(missing .modinfo section or name field)"); 19950be41a93SMatthew Maurer return -ENOEXEC; 19960be41a93SMatthew Maurer } 19970be41a93SMatthew Maurer 19980be41a93SMatthew Maurer if (shdr->sh_size != sizeof(struct module)) { 19990be41a93SMatthew Maurer pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n", 20000be41a93SMatthew Maurer info->name ?: "(missing .modinfo section or name field)"); 20010be41a93SMatthew Maurer return -ENOEXEC; 20020be41a93SMatthew Maurer } 20030be41a93SMatthew Maurer 20040be41a93SMatthew Maurer info->index.mod = mod_idx; 20050be41a93SMatthew Maurer 20060be41a93SMatthew Maurer return 0; 20070be41a93SMatthew Maurer } 20080be41a93SMatthew Maurer 20099bd4982cSMatthew Maurer /** 20109bd4982cSMatthew Maurer * elf_validity_cache_index_sym() - Validate and cache symtab index 20119bd4982cSMatthew Maurer * @info: Load info to cache symtab index in. 20129bd4982cSMatthew Maurer * Must have &load_info->sechdrs and &load_info->secstrings populated. 20139bd4982cSMatthew Maurer * 20149bd4982cSMatthew Maurer * Checks that there is exactly one symbol table, then caches its index in 20159bd4982cSMatthew Maurer * &load_info->index.sym. 20169bd4982cSMatthew Maurer * 20179bd4982cSMatthew Maurer * Return: %0 if valid, %-ENOEXEC on failure. 20189bd4982cSMatthew Maurer */ 20199bd4982cSMatthew Maurer static int elf_validity_cache_index_sym(struct load_info *info) 20209bd4982cSMatthew Maurer { 20219bd4982cSMatthew Maurer unsigned int sym_idx; 20229bd4982cSMatthew Maurer unsigned int num_sym_secs = 0; 20239bd4982cSMatthew Maurer int i; 20249bd4982cSMatthew Maurer 20259bd4982cSMatthew Maurer for (i = 1; i < info->hdr->e_shnum; i++) { 20269bd4982cSMatthew Maurer if (info->sechdrs[i].sh_type == SHT_SYMTAB) { 20279bd4982cSMatthew Maurer num_sym_secs++; 20289bd4982cSMatthew Maurer sym_idx = i; 20299bd4982cSMatthew Maurer } 20309bd4982cSMatthew Maurer } 20319bd4982cSMatthew Maurer 20329bd4982cSMatthew Maurer if (num_sym_secs != 1) { 20339bd4982cSMatthew Maurer pr_warn("%s: module has no symbols (stripped?)\n", 20349bd4982cSMatthew Maurer info->name ?: "(missing .modinfo section or name field)"); 20359bd4982cSMatthew Maurer return -ENOEXEC; 20369bd4982cSMatthew Maurer } 20379bd4982cSMatthew Maurer 20389bd4982cSMatthew Maurer info->index.sym = sym_idx; 20399bd4982cSMatthew Maurer 20409bd4982cSMatthew Maurer return 0; 20419bd4982cSMatthew Maurer } 20420be41a93SMatthew Maurer 20430a939533SMatthew Maurer /** 20440a939533SMatthew Maurer * elf_validity_cache_index_str() - Validate and cache strtab index 20450a939533SMatthew Maurer * @info: Load info to cache strtab index in. 20460a939533SMatthew Maurer * Must have &load_info->sechdrs and &load_info->secstrings populated. 20470a939533SMatthew Maurer * Must have &load_info->index.sym populated. 20480a939533SMatthew Maurer * 20490a939533SMatthew Maurer * Looks at the symbol table's associated string table, makes sure it is 20500a939533SMatthew Maurer * in-bounds, and caches it. 20510a939533SMatthew Maurer * 20520a939533SMatthew Maurer * Return: %0 if valid, %-ENOEXEC on failure. 20530a939533SMatthew Maurer */ 20540a939533SMatthew Maurer static int elf_validity_cache_index_str(struct load_info *info) 20550a939533SMatthew Maurer { 20560a939533SMatthew Maurer unsigned int str_idx = info->sechdrs[info->index.sym].sh_link; 20570a939533SMatthew Maurer 20580a939533SMatthew Maurer if (str_idx == SHN_UNDEF || str_idx >= info->hdr->e_shnum) { 20590a939533SMatthew Maurer pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", 20600a939533SMatthew Maurer str_idx, str_idx, info->hdr->e_shnum); 20610a939533SMatthew Maurer return -ENOEXEC; 20620a939533SMatthew Maurer } 20630a939533SMatthew Maurer 20640a939533SMatthew Maurer info->index.str = str_idx; 20650a939533SMatthew Maurer return 0; 20660a939533SMatthew Maurer } 20670a939533SMatthew Maurer 2068f3f56121SMatthew Maurer /** 206954ac1ac8SMatthew Maurer * elf_validity_cache_index_versions() - Validate and cache version indices 207054ac1ac8SMatthew Maurer * @info: Load info to cache version indices in. 207154ac1ac8SMatthew Maurer * Must have &load_info->sechdrs and &load_info->secstrings populated. 207254ac1ac8SMatthew Maurer * @flags: Load flags, relevant to suppress version loading, see 207354ac1ac8SMatthew Maurer * uapi/linux/module.h 207454ac1ac8SMatthew Maurer * 207554ac1ac8SMatthew Maurer * If we're ignoring modversions based on @flags, zero all version indices 207654ac1ac8SMatthew Maurer * and return validity. Othewrise check: 207754ac1ac8SMatthew Maurer * 207854ac1ac8SMatthew Maurer * * If "__version_ext_crcs" is present, "__version_ext_names" is present 207954ac1ac8SMatthew Maurer * * There is a name present for every crc 208054ac1ac8SMatthew Maurer * 208154ac1ac8SMatthew Maurer * Then populate: 208254ac1ac8SMatthew Maurer * 208354ac1ac8SMatthew Maurer * * &load_info->index.vers 208454ac1ac8SMatthew Maurer * * &load_info->index.vers_ext_crc 208554ac1ac8SMatthew Maurer * * &load_info->index.vers_ext_names 208654ac1ac8SMatthew Maurer * 208754ac1ac8SMatthew Maurer * if present. 208854ac1ac8SMatthew Maurer * 208954ac1ac8SMatthew Maurer * Return: %0 if valid, %-ENOEXEC on failure. 209054ac1ac8SMatthew Maurer */ 209154ac1ac8SMatthew Maurer static int elf_validity_cache_index_versions(struct load_info *info, int flags) 209254ac1ac8SMatthew Maurer { 209354ac1ac8SMatthew Maurer unsigned int vers_ext_crc; 209454ac1ac8SMatthew Maurer unsigned int vers_ext_name; 209554ac1ac8SMatthew Maurer size_t crc_count; 209654ac1ac8SMatthew Maurer size_t remaining_len; 209754ac1ac8SMatthew Maurer size_t name_size; 209854ac1ac8SMatthew Maurer char *name; 209954ac1ac8SMatthew Maurer 210054ac1ac8SMatthew Maurer /* If modversions were suppressed, pretend we didn't find any */ 210154ac1ac8SMatthew Maurer if (flags & MODULE_INIT_IGNORE_MODVERSIONS) { 210254ac1ac8SMatthew Maurer info->index.vers = 0; 210354ac1ac8SMatthew Maurer info->index.vers_ext_crc = 0; 210454ac1ac8SMatthew Maurer info->index.vers_ext_name = 0; 210554ac1ac8SMatthew Maurer return 0; 210654ac1ac8SMatthew Maurer } 210754ac1ac8SMatthew Maurer 210854ac1ac8SMatthew Maurer vers_ext_crc = find_sec(info, "__version_ext_crcs"); 210954ac1ac8SMatthew Maurer vers_ext_name = find_sec(info, "__version_ext_names"); 211054ac1ac8SMatthew Maurer 211154ac1ac8SMatthew Maurer /* If we have one field, we must have the other */ 211254ac1ac8SMatthew Maurer if (!!vers_ext_crc != !!vers_ext_name) { 211354ac1ac8SMatthew Maurer pr_err("extended version crc+name presence does not match"); 211454ac1ac8SMatthew Maurer return -ENOEXEC; 211554ac1ac8SMatthew Maurer } 211654ac1ac8SMatthew Maurer 211754ac1ac8SMatthew Maurer /* 211854ac1ac8SMatthew Maurer * If we have extended version information, we should have the same 211954ac1ac8SMatthew Maurer * number of entries in every section. 212054ac1ac8SMatthew Maurer */ 212154ac1ac8SMatthew Maurer if (vers_ext_crc) { 212254ac1ac8SMatthew Maurer crc_count = info->sechdrs[vers_ext_crc].sh_size / sizeof(u32); 212354ac1ac8SMatthew Maurer name = (void *)info->hdr + 212454ac1ac8SMatthew Maurer info->sechdrs[vers_ext_name].sh_offset; 212554ac1ac8SMatthew Maurer remaining_len = info->sechdrs[vers_ext_name].sh_size; 212654ac1ac8SMatthew Maurer 212754ac1ac8SMatthew Maurer while (crc_count--) { 212854ac1ac8SMatthew Maurer name_size = strnlen(name, remaining_len) + 1; 212954ac1ac8SMatthew Maurer if (name_size > remaining_len) { 213054ac1ac8SMatthew Maurer pr_err("more extended version crcs than names"); 213154ac1ac8SMatthew Maurer return -ENOEXEC; 213254ac1ac8SMatthew Maurer } 213354ac1ac8SMatthew Maurer remaining_len -= name_size; 213454ac1ac8SMatthew Maurer name += name_size; 213554ac1ac8SMatthew Maurer } 213654ac1ac8SMatthew Maurer } 213754ac1ac8SMatthew Maurer 213854ac1ac8SMatthew Maurer info->index.vers = find_sec(info, "__versions"); 213954ac1ac8SMatthew Maurer info->index.vers_ext_crc = vers_ext_crc; 214054ac1ac8SMatthew Maurer info->index.vers_ext_name = vers_ext_name; 214154ac1ac8SMatthew Maurer return 0; 214254ac1ac8SMatthew Maurer } 214354ac1ac8SMatthew Maurer 214454ac1ac8SMatthew Maurer /** 2145f3f56121SMatthew Maurer * elf_validity_cache_index() - Resolve, validate, cache section indices 2146f3f56121SMatthew Maurer * @info: Load info to read from and update. 2147f3f56121SMatthew Maurer * &load_info->sechdrs and &load_info->secstrings must be populated. 2148f3f56121SMatthew Maurer * @flags: Load flags, relevant to suppress version loading, see 2149f3f56121SMatthew Maurer * uapi/linux/module.h 2150f3f56121SMatthew Maurer * 2151f3f56121SMatthew Maurer * Populates &load_info->index, validating as it goes. 2152f3f56121SMatthew Maurer * See child functions for per-field validation: 2153f3f56121SMatthew Maurer * 2154f3f56121SMatthew Maurer * * elf_validity_cache_index_info() 2155f3f56121SMatthew Maurer * * elf_validity_cache_index_mod() 2156f3f56121SMatthew Maurer * * elf_validity_cache_index_sym() 2157f3f56121SMatthew Maurer * * elf_validity_cache_index_str() 215854ac1ac8SMatthew Maurer * * elf_validity_cache_index_versions() 2159f3f56121SMatthew Maurer * 2160f3f56121SMatthew Maurer * If CONFIG_SMP is enabled, load the percpu section by name with no 2161f3f56121SMatthew Maurer * validation. 2162f3f56121SMatthew Maurer * 2163f3f56121SMatthew Maurer * Return: 0 on success, negative error code if an index failed validation. 2164f3f56121SMatthew Maurer */ 2165f3f56121SMatthew Maurer static int elf_validity_cache_index(struct load_info *info, int flags) 2166f3f56121SMatthew Maurer { 2167f3f56121SMatthew Maurer int err; 2168f3f56121SMatthew Maurer 2169f3f56121SMatthew Maurer err = elf_validity_cache_index_info(info); 2170f3f56121SMatthew Maurer if (err < 0) 2171f3f56121SMatthew Maurer return err; 2172f3f56121SMatthew Maurer err = elf_validity_cache_index_mod(info); 2173f3f56121SMatthew Maurer if (err < 0) 2174f3f56121SMatthew Maurer return err; 2175f3f56121SMatthew Maurer err = elf_validity_cache_index_sym(info); 2176f3f56121SMatthew Maurer if (err < 0) 2177f3f56121SMatthew Maurer return err; 2178f3f56121SMatthew Maurer err = elf_validity_cache_index_str(info); 2179f3f56121SMatthew Maurer if (err < 0) 2180f3f56121SMatthew Maurer return err; 218154ac1ac8SMatthew Maurer err = elf_validity_cache_index_versions(info, flags); 218254ac1ac8SMatthew Maurer if (err < 0) 218354ac1ac8SMatthew Maurer return err; 2184f3f56121SMatthew Maurer 2185f3f56121SMatthew Maurer info->index.pcpu = find_pcpusec(info); 2186f3f56121SMatthew Maurer 2187f3f56121SMatthew Maurer return 0; 2188f3f56121SMatthew Maurer } 2189f3f56121SMatthew Maurer 2190837031e0SMatthew Maurer /** 2191d979e3dfSMatthew Maurer * elf_validity_cache_strtab() - Validate and cache symbol string table 2192837031e0SMatthew Maurer * @info: Load info to read from and update. 2193837031e0SMatthew Maurer * Must have &load_info->sechdrs and &load_info->secstrings populated. 2194837031e0SMatthew Maurer * Must have &load_info->index populated. 2195837031e0SMatthew Maurer * 2196d979e3dfSMatthew Maurer * Checks: 2197d979e3dfSMatthew Maurer * 2198d979e3dfSMatthew Maurer * * The string table is not empty. 2199d979e3dfSMatthew Maurer * * The string table starts and ends with NUL (required by ELF spec). 2200d979e3dfSMatthew Maurer * * Every &Elf_Sym->st_name offset in the symbol table is inbounds of the 2201d979e3dfSMatthew Maurer * string table. 2202d979e3dfSMatthew Maurer * 2203d979e3dfSMatthew Maurer * And caches the pointer as &load_info->strtab in @info. 2204d979e3dfSMatthew Maurer * 2205837031e0SMatthew Maurer * Return: 0 on success, negative error code if a check failed. 2206837031e0SMatthew Maurer */ 2207837031e0SMatthew Maurer static int elf_validity_cache_strtab(struct load_info *info) 2208837031e0SMatthew Maurer { 2209837031e0SMatthew Maurer Elf_Shdr *str_shdr = &info->sechdrs[info->index.str]; 2210d979e3dfSMatthew Maurer Elf_Shdr *sym_shdr = &info->sechdrs[info->index.sym]; 2211837031e0SMatthew Maurer char *strtab = (char *)info->hdr + str_shdr->sh_offset; 2212d979e3dfSMatthew Maurer Elf_Sym *syms = (void *)info->hdr + sym_shdr->sh_offset; 2213d979e3dfSMatthew Maurer int i; 2214d979e3dfSMatthew Maurer 2215d979e3dfSMatthew Maurer if (str_shdr->sh_size == 0) { 2216d979e3dfSMatthew Maurer pr_err("empty symbol string table\n"); 2217d979e3dfSMatthew Maurer return -ENOEXEC; 2218d979e3dfSMatthew Maurer } 2219d979e3dfSMatthew Maurer if (strtab[0] != '\0') { 2220d979e3dfSMatthew Maurer pr_err("symbol string table missing leading NUL\n"); 2221d979e3dfSMatthew Maurer return -ENOEXEC; 2222d979e3dfSMatthew Maurer } 2223d979e3dfSMatthew Maurer if (strtab[str_shdr->sh_size - 1] != '\0') { 2224d979e3dfSMatthew Maurer pr_err("symbol string table isn't NUL terminated\n"); 2225d979e3dfSMatthew Maurer return -ENOEXEC; 2226d979e3dfSMatthew Maurer } 2227d979e3dfSMatthew Maurer 2228d979e3dfSMatthew Maurer /* 2229d979e3dfSMatthew Maurer * Now that we know strtab is correctly structured, check symbol 2230d979e3dfSMatthew Maurer * starts are inbounds before they're used later. 2231d979e3dfSMatthew Maurer */ 2232d979e3dfSMatthew Maurer for (i = 0; i < sym_shdr->sh_size / sizeof(*syms); i++) { 2233d979e3dfSMatthew Maurer if (syms[i].st_name >= str_shdr->sh_size) { 2234d979e3dfSMatthew Maurer pr_err("symbol name out of bounds in string table"); 2235d979e3dfSMatthew Maurer return -ENOEXEC; 2236d979e3dfSMatthew Maurer } 2237d979e3dfSMatthew Maurer } 2238837031e0SMatthew Maurer 2239837031e0SMatthew Maurer info->strtab = strtab; 2240837031e0SMatthew Maurer return 0; 2241837031e0SMatthew Maurer } 2242837031e0SMatthew Maurer 2243cfc1d277SAaron Tomlin /* 22443d40bb90SLuis Chamberlain * Check userspace passed ELF module against our expectations, and cache 22453d40bb90SLuis Chamberlain * useful variables for further processing as we go. 2246cfc1d277SAaron Tomlin * 22473d40bb90SLuis Chamberlain * This does basic validity checks against section offsets and sizes, the 2248cfc1d277SAaron Tomlin * section name string table, and the indices used for it (sh_name). 22493d40bb90SLuis Chamberlain * 22503d40bb90SLuis Chamberlain * As a last step, since we're already checking the ELF sections we cache 22513d40bb90SLuis Chamberlain * useful variables which will be used later for our convenience: 22523d40bb90SLuis Chamberlain * 22533d40bb90SLuis Chamberlain * o pointers to section headers 22543d40bb90SLuis Chamberlain * o cache the modinfo symbol section 22553d40bb90SLuis Chamberlain * o cache the string symbol section 22563d40bb90SLuis Chamberlain * o cache the module section 22573d40bb90SLuis Chamberlain * 22583d40bb90SLuis Chamberlain * As a last step we set info->mod to the temporary copy of the module in 22593d40bb90SLuis Chamberlain * info->hdr. The final one will be allocated in move_module(). Any 22603d40bb90SLuis Chamberlain * modifications we make to our copy of the module will be carried over 22613d40bb90SLuis Chamberlain * to the final minted module. 2262cfc1d277SAaron Tomlin */ 22633d40bb90SLuis Chamberlain static int elf_validity_cache_copy(struct load_info *info, int flags) 2264cfc1d277SAaron Tomlin { 2265cfc1d277SAaron Tomlin int err; 2266cfc1d277SAaron Tomlin 2267c92aab81SMatthew Maurer err = elf_validity_cache_sechdrs(info); 226890f8f312SMatthew Maurer if (err < 0) 2269cfc1d277SAaron Tomlin return err; 22703c5700aeSMatthew Maurer err = elf_validity_cache_secstrings(info); 22713c5700aeSMatthew Maurer if (err < 0) 2272cfc1d277SAaron Tomlin return err; 2273f3f56121SMatthew Maurer err = elf_validity_cache_index(info, flags); 22740a939533SMatthew Maurer if (err < 0) 22750a939533SMatthew Maurer return err; 2276837031e0SMatthew Maurer err = elf_validity_cache_strtab(info); 2277837031e0SMatthew Maurer if (err < 0) 2278837031e0SMatthew Maurer return err; 227946752820SLuis Chamberlain 228046752820SLuis Chamberlain /* This is temporary: point mod into copy of data. */ 22810be41a93SMatthew Maurer info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset; 228246752820SLuis Chamberlain 22831bb49db9SLuis Chamberlain /* 22841bb49db9SLuis Chamberlain * If we didn't load the .modinfo 'name' field earlier, fall back to 22851bb49db9SLuis Chamberlain * on-disk struct mod 'name' field. 22861bb49db9SLuis Chamberlain */ 22871bb49db9SLuis Chamberlain if (!info->name) 22881bb49db9SLuis Chamberlain info->name = info->mod->name; 22891bb49db9SLuis Chamberlain 2290cfc1d277SAaron Tomlin return 0; 2291cfc1d277SAaron Tomlin } 2292cfc1d277SAaron Tomlin 2293cfc1d277SAaron Tomlin #define COPY_CHUNK_SIZE (16*PAGE_SIZE) 2294cfc1d277SAaron Tomlin 2295cfc1d277SAaron Tomlin static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) 2296cfc1d277SAaron Tomlin { 2297cfc1d277SAaron Tomlin do { 2298cfc1d277SAaron Tomlin unsigned long n = min(len, COPY_CHUNK_SIZE); 2299cfc1d277SAaron Tomlin 2300cfc1d277SAaron Tomlin if (copy_from_user(dst, usrc, n) != 0) 2301cfc1d277SAaron Tomlin return -EFAULT; 2302cfc1d277SAaron Tomlin cond_resched(); 2303cfc1d277SAaron Tomlin dst += n; 2304cfc1d277SAaron Tomlin usrc += n; 2305cfc1d277SAaron Tomlin len -= n; 2306cfc1d277SAaron Tomlin } while (len); 2307cfc1d277SAaron Tomlin return 0; 2308cfc1d277SAaron Tomlin } 2309cfc1d277SAaron Tomlin 2310cfc1d277SAaron Tomlin static int check_modinfo_livepatch(struct module *mod, struct load_info *info) 2311cfc1d277SAaron Tomlin { 23121be9473eSAaron Tomlin if (!get_modinfo(info, "livepatch")) 23131be9473eSAaron Tomlin /* Nothing more to do */ 23141be9473eSAaron Tomlin return 0; 23151be9473eSAaron Tomlin 2316ed52cabeSLuis Chamberlain if (set_livepatch_module(mod)) 2317cfc1d277SAaron Tomlin return 0; 23181be9473eSAaron Tomlin 2319cfc1d277SAaron Tomlin pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", 2320cfc1d277SAaron Tomlin mod->name); 2321cfc1d277SAaron Tomlin return -ENOEXEC; 2322cfc1d277SAaron Tomlin } 2323cfc1d277SAaron Tomlin 2324cfc1d277SAaron Tomlin static void check_modinfo_retpoline(struct module *mod, struct load_info *info) 2325cfc1d277SAaron Tomlin { 2326cfc1d277SAaron Tomlin if (retpoline_module_ok(get_modinfo(info, "retpoline"))) 2327cfc1d277SAaron Tomlin return; 2328cfc1d277SAaron Tomlin 2329cfc1d277SAaron Tomlin pr_warn("%s: loading module not compiled with retpoline compiler.\n", 2330cfc1d277SAaron Tomlin mod->name); 2331cfc1d277SAaron Tomlin } 2332cfc1d277SAaron Tomlin 2333cfc1d277SAaron Tomlin /* Sets info->hdr and info->len. */ 2334cfc1d277SAaron Tomlin static int copy_module_from_user(const void __user *umod, unsigned long len, 2335cfc1d277SAaron Tomlin struct load_info *info) 2336cfc1d277SAaron Tomlin { 2337cfc1d277SAaron Tomlin int err; 2338cfc1d277SAaron Tomlin 2339cfc1d277SAaron Tomlin info->len = len; 2340cfc1d277SAaron Tomlin if (info->len < sizeof(*(info->hdr))) 2341cfc1d277SAaron Tomlin return -ENOEXEC; 2342cfc1d277SAaron Tomlin 2343cfc1d277SAaron Tomlin err = security_kernel_load_data(LOADING_MODULE, true); 2344cfc1d277SAaron Tomlin if (err) 2345cfc1d277SAaron Tomlin return err; 2346cfc1d277SAaron Tomlin 2347cfc1d277SAaron Tomlin /* Suck in entire file: we'll want most of it. */ 2348cfc1d277SAaron Tomlin info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN); 2349cfc1d277SAaron Tomlin if (!info->hdr) 2350cfc1d277SAaron Tomlin return -ENOMEM; 2351cfc1d277SAaron Tomlin 2352cfc1d277SAaron Tomlin if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { 2353cfc1d277SAaron Tomlin err = -EFAULT; 2354cfc1d277SAaron Tomlin goto out; 2355cfc1d277SAaron Tomlin } 2356cfc1d277SAaron Tomlin 2357cfc1d277SAaron Tomlin err = security_kernel_post_load_data((char *)info->hdr, info->len, 2358cfc1d277SAaron Tomlin LOADING_MODULE, "init_module"); 2359cfc1d277SAaron Tomlin out: 2360cfc1d277SAaron Tomlin if (err) 2361cfc1d277SAaron Tomlin vfree(info->hdr); 2362cfc1d277SAaron Tomlin 2363cfc1d277SAaron Tomlin return err; 2364cfc1d277SAaron Tomlin } 2365cfc1d277SAaron Tomlin 2366cfc1d277SAaron Tomlin static void free_copy(struct load_info *info, int flags) 2367cfc1d277SAaron Tomlin { 2368cfc1d277SAaron Tomlin if (flags & MODULE_INIT_COMPRESSED_FILE) 2369cfc1d277SAaron Tomlin module_decompress_cleanup(info); 2370cfc1d277SAaron Tomlin else 2371cfc1d277SAaron Tomlin vfree(info->hdr); 2372cfc1d277SAaron Tomlin } 2373cfc1d277SAaron Tomlin 2374cfc1d277SAaron Tomlin static int rewrite_section_headers(struct load_info *info, int flags) 2375cfc1d277SAaron Tomlin { 2376cfc1d277SAaron Tomlin unsigned int i; 2377cfc1d277SAaron Tomlin 2378cfc1d277SAaron Tomlin /* This should always be true, but let's be sure. */ 2379cfc1d277SAaron Tomlin info->sechdrs[0].sh_addr = 0; 2380cfc1d277SAaron Tomlin 2381cfc1d277SAaron Tomlin for (i = 1; i < info->hdr->e_shnum; i++) { 2382cfc1d277SAaron Tomlin Elf_Shdr *shdr = &info->sechdrs[i]; 2383cfc1d277SAaron Tomlin 2384cfc1d277SAaron Tomlin /* 2385cfc1d277SAaron Tomlin * Mark all sections sh_addr with their address in the 2386cfc1d277SAaron Tomlin * temporary image. 2387cfc1d277SAaron Tomlin */ 2388cfc1d277SAaron Tomlin shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; 2389cfc1d277SAaron Tomlin 2390cfc1d277SAaron Tomlin } 2391cfc1d277SAaron Tomlin 2392cfc1d277SAaron Tomlin /* Track but don't keep modinfo and version sections. */ 2393cfc1d277SAaron Tomlin info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; 239454ac1ac8SMatthew Maurer info->sechdrs[info->index.vers_ext_crc].sh_flags &= 239554ac1ac8SMatthew Maurer ~(unsigned long)SHF_ALLOC; 239654ac1ac8SMatthew Maurer info->sechdrs[info->index.vers_ext_name].sh_flags &= 239754ac1ac8SMatthew Maurer ~(unsigned long)SHF_ALLOC; 2398cfc1d277SAaron Tomlin info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; 2399cfc1d277SAaron Tomlin 2400cfc1d277SAaron Tomlin return 0; 2401cfc1d277SAaron Tomlin } 2402cfc1d277SAaron Tomlin 2403c8e0bd57SUwe Kleine-König static const char *const module_license_offenders[] = { 2404c8e0bd57SUwe Kleine-König /* driverloader was caught wrongly pretending to be under GPL */ 2405c8e0bd57SUwe Kleine-König "driverloader", 2406c8e0bd57SUwe Kleine-König 2407c8e0bd57SUwe Kleine-König /* lve claims to be GPL but upstream won't provide source */ 2408c8e0bd57SUwe Kleine-König "lve", 2409c8e0bd57SUwe Kleine-König }; 2410c8e0bd57SUwe Kleine-König 2411cfc1d277SAaron Tomlin /* 2412437c1f9cSLuis Chamberlain * These calls taint the kernel depending certain module circumstances */ 2413437c1f9cSLuis Chamberlain static void module_augment_kernel_taints(struct module *mod, struct load_info *info) 2414437c1f9cSLuis Chamberlain { 241572f08b3cSLuis Chamberlain int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); 2416c8e0bd57SUwe Kleine-König size_t i; 241772f08b3cSLuis Chamberlain 2418437c1f9cSLuis Chamberlain if (!get_modinfo(info, "intree")) { 2419437c1f9cSLuis Chamberlain if (!test_taint(TAINT_OOT_MODULE)) 2420437c1f9cSLuis Chamberlain pr_warn("%s: loading out-of-tree module taints kernel.\n", 2421437c1f9cSLuis Chamberlain mod->name); 2422437c1f9cSLuis Chamberlain add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); 2423437c1f9cSLuis Chamberlain } 2424437c1f9cSLuis Chamberlain 2425437c1f9cSLuis Chamberlain check_modinfo_retpoline(mod, info); 2426437c1f9cSLuis Chamberlain 2427437c1f9cSLuis Chamberlain if (get_modinfo(info, "staging")) { 2428437c1f9cSLuis Chamberlain add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); 2429437c1f9cSLuis Chamberlain pr_warn("%s: module is from the staging directory, the quality " 2430437c1f9cSLuis Chamberlain "is unknown, you have been warned.\n", mod->name); 2431437c1f9cSLuis Chamberlain } 2432437c1f9cSLuis Chamberlain 2433437c1f9cSLuis Chamberlain if (is_livepatch_module(mod)) { 2434437c1f9cSLuis Chamberlain add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); 2435437c1f9cSLuis Chamberlain pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n", 2436437c1f9cSLuis Chamberlain mod->name); 2437437c1f9cSLuis Chamberlain } 2438437c1f9cSLuis Chamberlain 2439437c1f9cSLuis Chamberlain module_license_taint_check(mod, get_modinfo(info, "license")); 2440437c1f9cSLuis Chamberlain 2441437c1f9cSLuis Chamberlain if (get_modinfo(info, "test")) { 2442437c1f9cSLuis Chamberlain if (!test_taint(TAINT_TEST)) 2443437c1f9cSLuis Chamberlain pr_warn("%s: loading test module taints kernel.\n", 2444437c1f9cSLuis Chamberlain mod->name); 2445437c1f9cSLuis Chamberlain add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK); 2446437c1f9cSLuis Chamberlain } 2447c3bbf62eSLuis Chamberlain #ifdef CONFIG_MODULE_SIG 2448c3bbf62eSLuis Chamberlain mod->sig_ok = info->sig_ok; 2449c3bbf62eSLuis Chamberlain if (!mod->sig_ok) { 2450c3bbf62eSLuis Chamberlain pr_notice_once("%s: module verification failed: signature " 2451c3bbf62eSLuis Chamberlain "and/or required key missing - tainting " 2452c3bbf62eSLuis Chamberlain "kernel\n", mod->name); 2453c3bbf62eSLuis Chamberlain add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); 2454c3bbf62eSLuis Chamberlain } 2455c3bbf62eSLuis Chamberlain #endif 245672f08b3cSLuis Chamberlain 245772f08b3cSLuis Chamberlain /* 245872f08b3cSLuis Chamberlain * ndiswrapper is under GPL by itself, but loads proprietary modules. 245972f08b3cSLuis Chamberlain * Don't use add_taint_module(), as it would prevent ndiswrapper from 246072f08b3cSLuis Chamberlain * using GPL-only symbols it needs. 246172f08b3cSLuis Chamberlain */ 246272f08b3cSLuis Chamberlain if (strcmp(mod->name, "ndiswrapper") == 0) 246372f08b3cSLuis Chamberlain add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); 246472f08b3cSLuis Chamberlain 2465c8e0bd57SUwe Kleine-König for (i = 0; i < ARRAY_SIZE(module_license_offenders); ++i) { 2466c8e0bd57SUwe Kleine-König if (strcmp(mod->name, module_license_offenders[i]) == 0) 246772f08b3cSLuis Chamberlain add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 246872f08b3cSLuis Chamberlain LOCKDEP_NOW_UNRELIABLE); 2469c8e0bd57SUwe Kleine-König } 247072f08b3cSLuis Chamberlain 247172f08b3cSLuis Chamberlain if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) 247272f08b3cSLuis Chamberlain pr_warn("%s: module license taints kernel.\n", mod->name); 247372f08b3cSLuis Chamberlain 2474437c1f9cSLuis Chamberlain } 2475437c1f9cSLuis Chamberlain 2476cfc1d277SAaron Tomlin static int check_modinfo(struct module *mod, struct load_info *info, int flags) 2477cfc1d277SAaron Tomlin { 2478cfc1d277SAaron Tomlin const char *modmagic = get_modinfo(info, "vermagic"); 2479cfc1d277SAaron Tomlin int err; 2480cfc1d277SAaron Tomlin 2481cfc1d277SAaron Tomlin if (flags & MODULE_INIT_IGNORE_VERMAGIC) 2482cfc1d277SAaron Tomlin modmagic = NULL; 2483cfc1d277SAaron Tomlin 2484cfc1d277SAaron Tomlin /* This is allowed: modprobe --force will invalidate it. */ 2485cfc1d277SAaron Tomlin if (!modmagic) { 2486cfc1d277SAaron Tomlin err = try_to_force_load(mod, "bad vermagic"); 2487cfc1d277SAaron Tomlin if (err) 2488cfc1d277SAaron Tomlin return err; 2489cfc1d277SAaron Tomlin } else if (!same_magic(modmagic, vermagic, info->index.vers)) { 2490cfc1d277SAaron Tomlin pr_err("%s: version magic '%s' should be '%s'\n", 2491cfc1d277SAaron Tomlin info->name, modmagic, vermagic); 2492cfc1d277SAaron Tomlin return -ENOEXEC; 2493cfc1d277SAaron Tomlin } 2494cfc1d277SAaron Tomlin 2495cfc1d277SAaron Tomlin err = check_modinfo_livepatch(mod, info); 2496cfc1d277SAaron Tomlin if (err) 2497cfc1d277SAaron Tomlin return err; 2498cfc1d277SAaron Tomlin 2499cfc1d277SAaron Tomlin return 0; 2500cfc1d277SAaron Tomlin } 2501cfc1d277SAaron Tomlin 2502cfc1d277SAaron Tomlin static int find_module_sections(struct module *mod, struct load_info *info) 2503cfc1d277SAaron Tomlin { 2504cfc1d277SAaron Tomlin mod->kp = section_objs(info, "__param", 2505cfc1d277SAaron Tomlin sizeof(*mod->kp), &mod->num_kp); 2506cfc1d277SAaron Tomlin mod->syms = section_objs(info, "__ksymtab", 2507cfc1d277SAaron Tomlin sizeof(*mod->syms), &mod->num_syms); 2508cfc1d277SAaron Tomlin mod->crcs = section_addr(info, "__kcrctab"); 2509cfc1d277SAaron Tomlin mod->gpl_syms = section_objs(info, "__ksymtab_gpl", 2510cfc1d277SAaron Tomlin sizeof(*mod->gpl_syms), 2511cfc1d277SAaron Tomlin &mod->num_gpl_syms); 2512cfc1d277SAaron Tomlin mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); 2513cfc1d277SAaron Tomlin 2514cfc1d277SAaron Tomlin #ifdef CONFIG_CONSTRUCTORS 2515cfc1d277SAaron Tomlin mod->ctors = section_objs(info, ".ctors", 2516cfc1d277SAaron Tomlin sizeof(*mod->ctors), &mod->num_ctors); 2517cfc1d277SAaron Tomlin if (!mod->ctors) 2518cfc1d277SAaron Tomlin mod->ctors = section_objs(info, ".init_array", 2519cfc1d277SAaron Tomlin sizeof(*mod->ctors), &mod->num_ctors); 2520cfc1d277SAaron Tomlin else if (find_sec(info, ".init_array")) { 2521cfc1d277SAaron Tomlin /* 2522cfc1d277SAaron Tomlin * This shouldn't happen with same compiler and binutils 2523cfc1d277SAaron Tomlin * building all parts of the module. 2524cfc1d277SAaron Tomlin */ 2525cfc1d277SAaron Tomlin pr_warn("%s: has both .ctors and .init_array.\n", 2526cfc1d277SAaron Tomlin mod->name); 2527cfc1d277SAaron Tomlin return -EINVAL; 2528cfc1d277SAaron Tomlin } 2529cfc1d277SAaron Tomlin #endif 2530cfc1d277SAaron Tomlin 2531cfc1d277SAaron Tomlin mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1, 2532cfc1d277SAaron Tomlin &mod->noinstr_text_size); 2533cfc1d277SAaron Tomlin 2534cfc1d277SAaron Tomlin #ifdef CONFIG_TRACEPOINTS 2535cfc1d277SAaron Tomlin mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", 2536cfc1d277SAaron Tomlin sizeof(*mod->tracepoints_ptrs), 2537cfc1d277SAaron Tomlin &mod->num_tracepoints); 2538cfc1d277SAaron Tomlin #endif 2539cfc1d277SAaron Tomlin #ifdef CONFIG_TREE_SRCU 2540cfc1d277SAaron Tomlin mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs", 2541cfc1d277SAaron Tomlin sizeof(*mod->srcu_struct_ptrs), 2542cfc1d277SAaron Tomlin &mod->num_srcu_structs); 2543cfc1d277SAaron Tomlin #endif 2544cfc1d277SAaron Tomlin #ifdef CONFIG_BPF_EVENTS 2545cfc1d277SAaron Tomlin mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map", 2546cfc1d277SAaron Tomlin sizeof(*mod->bpf_raw_events), 2547cfc1d277SAaron Tomlin &mod->num_bpf_raw_events); 2548cfc1d277SAaron Tomlin #endif 2549cfc1d277SAaron Tomlin #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 2550cfc1d277SAaron Tomlin mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size); 2551d4e48e3dSAlan Maguire mod->btf_base_data = any_section_objs(info, ".BTF.base", 1, 2552d4e48e3dSAlan Maguire &mod->btf_base_data_size); 2553cfc1d277SAaron Tomlin #endif 2554cfc1d277SAaron Tomlin #ifdef CONFIG_JUMP_LABEL 2555cfc1d277SAaron Tomlin mod->jump_entries = section_objs(info, "__jump_table", 2556cfc1d277SAaron Tomlin sizeof(*mod->jump_entries), 2557cfc1d277SAaron Tomlin &mod->num_jump_entries); 2558cfc1d277SAaron Tomlin #endif 2559cfc1d277SAaron Tomlin #ifdef CONFIG_EVENT_TRACING 2560cfc1d277SAaron Tomlin mod->trace_events = section_objs(info, "_ftrace_events", 2561cfc1d277SAaron Tomlin sizeof(*mod->trace_events), 2562cfc1d277SAaron Tomlin &mod->num_trace_events); 2563cfc1d277SAaron Tomlin mod->trace_evals = section_objs(info, "_ftrace_eval_map", 2564cfc1d277SAaron Tomlin sizeof(*mod->trace_evals), 2565cfc1d277SAaron Tomlin &mod->num_trace_evals); 2566cfc1d277SAaron Tomlin #endif 2567cfc1d277SAaron Tomlin #ifdef CONFIG_TRACING 2568cfc1d277SAaron Tomlin mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", 2569cfc1d277SAaron Tomlin sizeof(*mod->trace_bprintk_fmt_start), 2570cfc1d277SAaron Tomlin &mod->num_trace_bprintk_fmt); 2571cfc1d277SAaron Tomlin #endif 2572cfc1d277SAaron Tomlin #ifdef CONFIG_FTRACE_MCOUNT_RECORD 2573cfc1d277SAaron Tomlin /* sechdrs[0].sh_size is always zero */ 2574cfc1d277SAaron Tomlin mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, 2575cfc1d277SAaron Tomlin sizeof(*mod->ftrace_callsites), 2576cfc1d277SAaron Tomlin &mod->num_ftrace_callsites); 2577cfc1d277SAaron Tomlin #endif 2578cfc1d277SAaron Tomlin #ifdef CONFIG_FUNCTION_ERROR_INJECTION 2579cfc1d277SAaron Tomlin mod->ei_funcs = section_objs(info, "_error_injection_whitelist", 2580cfc1d277SAaron Tomlin sizeof(*mod->ei_funcs), 2581cfc1d277SAaron Tomlin &mod->num_ei_funcs); 2582cfc1d277SAaron Tomlin #endif 2583cfc1d277SAaron Tomlin #ifdef CONFIG_KPROBES 2584cfc1d277SAaron Tomlin mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1, 2585cfc1d277SAaron Tomlin &mod->kprobes_text_size); 2586cfc1d277SAaron Tomlin mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist", 2587cfc1d277SAaron Tomlin sizeof(unsigned long), 2588cfc1d277SAaron Tomlin &mod->num_kprobe_blacklist); 2589cfc1d277SAaron Tomlin #endif 2590cfc1d277SAaron Tomlin #ifdef CONFIG_PRINTK_INDEX 2591cfc1d277SAaron Tomlin mod->printk_index_start = section_objs(info, ".printk_index", 2592cfc1d277SAaron Tomlin sizeof(*mod->printk_index_start), 2593cfc1d277SAaron Tomlin &mod->printk_index_size); 2594cfc1d277SAaron Tomlin #endif 2595cfc1d277SAaron Tomlin #ifdef CONFIG_HAVE_STATIC_CALL_INLINE 2596cfc1d277SAaron Tomlin mod->static_call_sites = section_objs(info, ".static_call_sites", 2597cfc1d277SAaron Tomlin sizeof(*mod->static_call_sites), 2598cfc1d277SAaron Tomlin &mod->num_static_call_sites); 2599cfc1d277SAaron Tomlin #endif 260041a55567SDavid Gow #if IS_ENABLED(CONFIG_KUNIT) 26013d6e4462SJeremy Kerr mod->kunit_suites = section_objs(info, ".kunit_test_suites", 26023d6e4462SJeremy Kerr sizeof(*mod->kunit_suites), 26033d6e4462SJeremy Kerr &mod->num_kunit_suites); 2604d81f0d7bSRae Moar mod->kunit_init_suites = section_objs(info, ".kunit_init_test_suites", 2605d81f0d7bSRae Moar sizeof(*mod->kunit_init_suites), 2606d81f0d7bSRae Moar &mod->num_kunit_init_suites); 26073d6e4462SJeremy Kerr #endif 26083d6e4462SJeremy Kerr 2609cfc1d277SAaron Tomlin mod->extable = section_objs(info, "__ex_table", 2610cfc1d277SAaron Tomlin sizeof(*mod->extable), &mod->num_exentries); 2611cfc1d277SAaron Tomlin 2612cfc1d277SAaron Tomlin if (section_addr(info, "__obsparm")) 2613cfc1d277SAaron Tomlin pr_warn("%s: Ignoring obsolete parameters\n", mod->name); 2614cfc1d277SAaron Tomlin 26157deabd67SJason Baron #ifdef CONFIG_DYNAMIC_DEBUG_CORE 26167deabd67SJason Baron mod->dyndbg_info.descs = section_objs(info, "__dyndbg", 26177deabd67SJason Baron sizeof(*mod->dyndbg_info.descs), 26187deabd67SJason Baron &mod->dyndbg_info.num_descs); 26197deabd67SJason Baron mod->dyndbg_info.classes = section_objs(info, "__dyndbg_classes", 26207deabd67SJason Baron sizeof(*mod->dyndbg_info.classes), 26217deabd67SJason Baron &mod->dyndbg_info.num_classes); 26227deabd67SJason Baron #endif 2623cfc1d277SAaron Tomlin 2624cfc1d277SAaron Tomlin return 0; 2625cfc1d277SAaron Tomlin } 2626cfc1d277SAaron Tomlin 2627cfc1d277SAaron Tomlin static int move_module(struct module *mod, struct load_info *info) 2628cfc1d277SAaron Tomlin { 2629cfc1d277SAaron Tomlin int i; 2630c7ee8aebSLuis Chamberlain enum mod_mem_type t = 0; 2631c7ee8aebSLuis Chamberlain int ret = -ENOMEM; 26320db6f8d7SSuren Baghdasaryan bool codetag_section_found = false; 2633cfc1d277SAaron Tomlin 2634ac3b4328SSong Liu for_each_mod_mem_type(type) { 2635ac3b4328SSong Liu if (!mod->mem[type].size) { 2636ac3b4328SSong Liu mod->mem[type].base = NULL; 2637ac3b4328SSong Liu continue; 2638ac3b4328SSong Liu } 2639bc6b94d3SMike Rapoport (IBM) 2640bc6b94d3SMike Rapoport (IBM) ret = module_memory_alloc(mod, type); 2641bc6b94d3SMike Rapoport (IBM) if (ret) { 2642ac3b4328SSong Liu t = type; 26430db6f8d7SSuren Baghdasaryan goto out_err; 2644cfc1d277SAaron Tomlin } 264501dc0386SChristophe Leroy } 264601dc0386SChristophe Leroy 2647cfc1d277SAaron Tomlin /* Transfer each section which specifies SHF_ALLOC */ 26486ed81802SJim Cromie pr_debug("Final section addresses for %s:\n", mod->name); 2649cfc1d277SAaron Tomlin for (i = 0; i < info->hdr->e_shnum; i++) { 2650cfc1d277SAaron Tomlin void *dest; 2651cfc1d277SAaron Tomlin Elf_Shdr *shdr = &info->sechdrs[i]; 26520db6f8d7SSuren Baghdasaryan const char *sname; 2653cfc1d277SAaron Tomlin 2654cfc1d277SAaron Tomlin if (!(shdr->sh_flags & SHF_ALLOC)) 2655cfc1d277SAaron Tomlin continue; 2656cfc1d277SAaron Tomlin 26570db6f8d7SSuren Baghdasaryan sname = info->secstrings + shdr->sh_name; 26580db6f8d7SSuren Baghdasaryan /* 26590db6f8d7SSuren Baghdasaryan * Load codetag sections separately as they might still be used 26600db6f8d7SSuren Baghdasaryan * after module unload. 26610db6f8d7SSuren Baghdasaryan */ 26620db6f8d7SSuren Baghdasaryan if (codetag_needs_module_section(mod, sname, shdr->sh_size)) { 26630db6f8d7SSuren Baghdasaryan dest = codetag_alloc_module_section(mod, sname, shdr->sh_size, 26640db6f8d7SSuren Baghdasaryan arch_mod_section_prepend(mod, i), shdr->sh_addralign); 26650db6f8d7SSuren Baghdasaryan if (WARN_ON(!dest)) { 26660db6f8d7SSuren Baghdasaryan ret = -EINVAL; 26670db6f8d7SSuren Baghdasaryan goto out_err; 26680db6f8d7SSuren Baghdasaryan } 26690db6f8d7SSuren Baghdasaryan if (IS_ERR(dest)) { 26700db6f8d7SSuren Baghdasaryan ret = PTR_ERR(dest); 26710db6f8d7SSuren Baghdasaryan goto out_err; 26720db6f8d7SSuren Baghdasaryan } 26730db6f8d7SSuren Baghdasaryan codetag_section_found = true; 26740db6f8d7SSuren Baghdasaryan } else { 26750db6f8d7SSuren Baghdasaryan enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT; 26760db6f8d7SSuren Baghdasaryan unsigned long offset = shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK; 26770db6f8d7SSuren Baghdasaryan 2678*c287c072SMike Rapoport (Microsoft) dest = mod->mem[type].base + offset; 26790db6f8d7SSuren Baghdasaryan } 2680cfc1d277SAaron Tomlin 2681c7ee8aebSLuis Chamberlain if (shdr->sh_type != SHT_NOBITS) { 2682c7ee8aebSLuis Chamberlain /* 2683c7ee8aebSLuis Chamberlain * Our ELF checker already validated this, but let's 2684c7ee8aebSLuis Chamberlain * be pedantic and make the goal clearer. We actually 2685c7ee8aebSLuis Chamberlain * end up copying over all modifications made to the 2686c7ee8aebSLuis Chamberlain * userspace copy of the entire struct module. 2687c7ee8aebSLuis Chamberlain */ 2688c7ee8aebSLuis Chamberlain if (i == info->index.mod && 2689c7ee8aebSLuis Chamberlain (WARN_ON_ONCE(shdr->sh_size != sizeof(struct module)))) { 2690c7ee8aebSLuis Chamberlain ret = -ENOEXEC; 26910db6f8d7SSuren Baghdasaryan goto out_err; 2692c7ee8aebSLuis Chamberlain } 2693cfc1d277SAaron Tomlin memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); 2694c7ee8aebSLuis Chamberlain } 2695c7ee8aebSLuis Chamberlain /* 2696c7ee8aebSLuis Chamberlain * Update the userspace copy's ELF section address to point to 2697c7ee8aebSLuis Chamberlain * our newly allocated memory as a pure convenience so that 2698c7ee8aebSLuis Chamberlain * users of info can keep taking advantage and using the newly 2699c7ee8aebSLuis Chamberlain * minted official memory area. 2700c7ee8aebSLuis Chamberlain */ 2701*c287c072SMike Rapoport (Microsoft) shdr->sh_addr = (unsigned long)dest; 270266a2301eSJim Cromie pr_debug("\t0x%lx 0x%.8lx %s\n", (long)shdr->sh_addr, 270366a2301eSJim Cromie (long)shdr->sh_size, info->secstrings + shdr->sh_name); 2704cfc1d277SAaron Tomlin } 2705cfc1d277SAaron Tomlin 2706cfc1d277SAaron Tomlin return 0; 27070db6f8d7SSuren Baghdasaryan out_err: 2708*c287c072SMike Rapoport (Microsoft) module_memory_restore_rox(mod); 2709ac3b4328SSong Liu for (t--; t >= 0; t--) 27100db6f8d7SSuren Baghdasaryan module_memory_free(mod, t); 27110db6f8d7SSuren Baghdasaryan if (codetag_section_found) 27120db6f8d7SSuren Baghdasaryan codetag_free_module_sections(mod); 27130db6f8d7SSuren Baghdasaryan 2714c7ee8aebSLuis Chamberlain return ret; 2715cfc1d277SAaron Tomlin } 2716cfc1d277SAaron Tomlin 2717419e1a20SLuis Chamberlain static int check_export_symbol_versions(struct module *mod) 2718cfc1d277SAaron Tomlin { 2719cfc1d277SAaron Tomlin #ifdef CONFIG_MODVERSIONS 2720cfc1d277SAaron Tomlin if ((mod->num_syms && !mod->crcs) || 2721cfc1d277SAaron Tomlin (mod->num_gpl_syms && !mod->gpl_crcs)) { 2722cfc1d277SAaron Tomlin return try_to_force_load(mod, 2723cfc1d277SAaron Tomlin "no versions for exported symbols"); 2724cfc1d277SAaron Tomlin } 2725cfc1d277SAaron Tomlin #endif 2726cfc1d277SAaron Tomlin return 0; 2727cfc1d277SAaron Tomlin } 2728cfc1d277SAaron Tomlin 2729cfc1d277SAaron Tomlin static void flush_module_icache(const struct module *mod) 2730cfc1d277SAaron Tomlin { 2731cfc1d277SAaron Tomlin /* 2732cfc1d277SAaron Tomlin * Flush the instruction cache, since we've played with text. 2733cfc1d277SAaron Tomlin * Do it before processing of module parameters, so the module 2734cfc1d277SAaron Tomlin * can provide parameter accessor functions of its own. 2735cfc1d277SAaron Tomlin */ 2736ac3b4328SSong Liu for_each_mod_mem_type(type) { 2737ac3b4328SSong Liu const struct module_memory *mod_mem = &mod->mem[type]; 2738ac3b4328SSong Liu 2739ac3b4328SSong Liu if (mod_mem->size) { 2740ac3b4328SSong Liu flush_icache_range((unsigned long)mod_mem->base, 2741ac3b4328SSong Liu (unsigned long)mod_mem->base + mod_mem->size); 2742ac3b4328SSong Liu } 2743ac3b4328SSong Liu } 2744cfc1d277SAaron Tomlin } 2745cfc1d277SAaron Tomlin 2746f9231a99SNicholas Piggin bool __weak module_elf_check_arch(Elf_Ehdr *hdr) 2747f9231a99SNicholas Piggin { 2748f9231a99SNicholas Piggin return true; 2749f9231a99SNicholas Piggin } 2750f9231a99SNicholas Piggin 2751cfc1d277SAaron Tomlin int __weak module_frob_arch_sections(Elf_Ehdr *hdr, 2752cfc1d277SAaron Tomlin Elf_Shdr *sechdrs, 2753cfc1d277SAaron Tomlin char *secstrings, 2754cfc1d277SAaron Tomlin struct module *mod) 2755cfc1d277SAaron Tomlin { 2756cfc1d277SAaron Tomlin return 0; 2757cfc1d277SAaron Tomlin } 2758cfc1d277SAaron Tomlin 2759cfc1d277SAaron Tomlin /* module_blacklist is a comma-separated list of module names */ 2760cfc1d277SAaron Tomlin static char *module_blacklist; 2761cfc1d277SAaron Tomlin static bool blacklisted(const char *module_name) 2762cfc1d277SAaron Tomlin { 2763cfc1d277SAaron Tomlin const char *p; 2764cfc1d277SAaron Tomlin size_t len; 2765cfc1d277SAaron Tomlin 2766cfc1d277SAaron Tomlin if (!module_blacklist) 2767cfc1d277SAaron Tomlin return false; 2768cfc1d277SAaron Tomlin 2769cfc1d277SAaron Tomlin for (p = module_blacklist; *p; p += len) { 2770cfc1d277SAaron Tomlin len = strcspn(p, ","); 2771cfc1d277SAaron Tomlin if (strlen(module_name) == len && !memcmp(module_name, p, len)) 2772cfc1d277SAaron Tomlin return true; 2773cfc1d277SAaron Tomlin if (p[len] == ',') 2774cfc1d277SAaron Tomlin len++; 2775cfc1d277SAaron Tomlin } 2776cfc1d277SAaron Tomlin return false; 2777cfc1d277SAaron Tomlin } 2778cfc1d277SAaron Tomlin core_param(module_blacklist, module_blacklist, charp, 0400); 2779cfc1d277SAaron Tomlin 2780cfc1d277SAaron Tomlin static struct module *layout_and_allocate(struct load_info *info, int flags) 2781cfc1d277SAaron Tomlin { 2782cfc1d277SAaron Tomlin struct module *mod; 2783cfc1d277SAaron Tomlin unsigned int ndx; 2784cfc1d277SAaron Tomlin int err; 2785cfc1d277SAaron Tomlin 2786cfc1d277SAaron Tomlin /* Allow arches to frob section contents and sizes. */ 2787cfc1d277SAaron Tomlin err = module_frob_arch_sections(info->hdr, info->sechdrs, 2788cfc1d277SAaron Tomlin info->secstrings, info->mod); 2789cfc1d277SAaron Tomlin if (err < 0) 2790cfc1d277SAaron Tomlin return ERR_PTR(err); 2791cfc1d277SAaron Tomlin 2792cfc1d277SAaron Tomlin err = module_enforce_rwx_sections(info->hdr, info->sechdrs, 2793cfc1d277SAaron Tomlin info->secstrings, info->mod); 2794cfc1d277SAaron Tomlin if (err < 0) 2795cfc1d277SAaron Tomlin return ERR_PTR(err); 2796cfc1d277SAaron Tomlin 2797cfc1d277SAaron Tomlin /* We will do a special allocation for per-cpu sections later. */ 2798cfc1d277SAaron Tomlin info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; 2799cfc1d277SAaron Tomlin 2800cfc1d277SAaron Tomlin /* 2801cfc1d277SAaron Tomlin * Mark ro_after_init section with SHF_RO_AFTER_INIT so that 2802cfc1d277SAaron Tomlin * layout_sections() can put it in the right place. 2803cfc1d277SAaron Tomlin * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. 2804cfc1d277SAaron Tomlin */ 2805cfc1d277SAaron Tomlin ndx = find_sec(info, ".data..ro_after_init"); 2806cfc1d277SAaron Tomlin if (ndx) 2807cfc1d277SAaron Tomlin info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; 2808cfc1d277SAaron Tomlin /* 2809cfc1d277SAaron Tomlin * Mark the __jump_table section as ro_after_init as well: these data 2810cfc1d277SAaron Tomlin * structures are never modified, with the exception of entries that 2811cfc1d277SAaron Tomlin * refer to code in the __init section, which are annotated as such 2812cfc1d277SAaron Tomlin * at module load time. 2813cfc1d277SAaron Tomlin */ 2814cfc1d277SAaron Tomlin ndx = find_sec(info, "__jump_table"); 2815cfc1d277SAaron Tomlin if (ndx) 2816cfc1d277SAaron Tomlin info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; 2817cfc1d277SAaron Tomlin 2818cfc1d277SAaron Tomlin /* 2819cfc1d277SAaron Tomlin * Determine total sizes, and put offsets in sh_entsize. For now 2820cfc1d277SAaron Tomlin * this is done generically; there doesn't appear to be any 2821cfc1d277SAaron Tomlin * special cases for the architectures. 2822cfc1d277SAaron Tomlin */ 2823cfc1d277SAaron Tomlin layout_sections(info->mod, info); 2824cfc1d277SAaron Tomlin layout_symtab(info->mod, info); 2825cfc1d277SAaron Tomlin 2826cfc1d277SAaron Tomlin /* Allocate and move to the final place */ 2827cfc1d277SAaron Tomlin err = move_module(info->mod, info); 2828cfc1d277SAaron Tomlin if (err) 2829cfc1d277SAaron Tomlin return ERR_PTR(err); 2830cfc1d277SAaron Tomlin 2831cfc1d277SAaron Tomlin /* Module has been copied to its final place now: return it. */ 2832cfc1d277SAaron Tomlin mod = (void *)info->sechdrs[info->index.mod].sh_addr; 2833cfc1d277SAaron Tomlin kmemleak_load_module(mod, info); 28340db6f8d7SSuren Baghdasaryan codetag_module_replaced(info->mod, mod); 28350db6f8d7SSuren Baghdasaryan 2836cfc1d277SAaron Tomlin return mod; 2837cfc1d277SAaron Tomlin } 2838cfc1d277SAaron Tomlin 2839cfc1d277SAaron Tomlin /* mod is no longer valid after this! */ 2840cfc1d277SAaron Tomlin static void module_deallocate(struct module *mod, struct load_info *info) 2841cfc1d277SAaron Tomlin { 2842cfc1d277SAaron Tomlin percpu_modfree(mod); 2843cfc1d277SAaron Tomlin module_arch_freeing_init(mod); 2844ac3b4328SSong Liu 28450db6f8d7SSuren Baghdasaryan free_mod_mem(mod); 2846cfc1d277SAaron Tomlin } 2847cfc1d277SAaron Tomlin 2848cfc1d277SAaron Tomlin int __weak module_finalize(const Elf_Ehdr *hdr, 2849cfc1d277SAaron Tomlin const Elf_Shdr *sechdrs, 2850cfc1d277SAaron Tomlin struct module *me) 2851cfc1d277SAaron Tomlin { 2852cfc1d277SAaron Tomlin return 0; 2853cfc1d277SAaron Tomlin } 2854cfc1d277SAaron Tomlin 2855cfc1d277SAaron Tomlin static int post_relocation(struct module *mod, const struct load_info *info) 2856cfc1d277SAaron Tomlin { 2857cfc1d277SAaron Tomlin /* Sort exception table now relocations are done. */ 2858cfc1d277SAaron Tomlin sort_extable(mod->extable, mod->extable + mod->num_exentries); 2859cfc1d277SAaron Tomlin 2860cfc1d277SAaron Tomlin /* Copy relocated percpu area over. */ 2861cfc1d277SAaron Tomlin percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, 2862cfc1d277SAaron Tomlin info->sechdrs[info->index.pcpu].sh_size); 2863cfc1d277SAaron Tomlin 2864cfc1d277SAaron Tomlin /* Setup kallsyms-specific fields. */ 2865cfc1d277SAaron Tomlin add_kallsyms(mod, info); 2866cfc1d277SAaron Tomlin 2867cfc1d277SAaron Tomlin /* Arch-specific module finalizing. */ 2868*c287c072SMike Rapoport (Microsoft) return module_finalize(info->hdr, info->sechdrs, mod); 2869cfc1d277SAaron Tomlin } 2870cfc1d277SAaron Tomlin 2871cfc1d277SAaron Tomlin /* Call module constructors. */ 2872cfc1d277SAaron Tomlin static void do_mod_ctors(struct module *mod) 2873cfc1d277SAaron Tomlin { 2874cfc1d277SAaron Tomlin #ifdef CONFIG_CONSTRUCTORS 2875cfc1d277SAaron Tomlin unsigned long i; 2876cfc1d277SAaron Tomlin 2877cfc1d277SAaron Tomlin for (i = 0; i < mod->num_ctors; i++) 2878cfc1d277SAaron Tomlin mod->ctors[i](); 2879cfc1d277SAaron Tomlin #endif 2880cfc1d277SAaron Tomlin } 2881cfc1d277SAaron Tomlin 2882cfc1d277SAaron Tomlin /* For freeing module_init on success, in case kallsyms traversing */ 2883cfc1d277SAaron Tomlin struct mod_initfree { 2884cfc1d277SAaron Tomlin struct llist_node node; 2885ac3b4328SSong Liu void *init_text; 2886ac3b4328SSong Liu void *init_data; 2887ac3b4328SSong Liu void *init_rodata; 2888cfc1d277SAaron Tomlin }; 2889cfc1d277SAaron Tomlin 2890cfc1d277SAaron Tomlin static void do_free_init(struct work_struct *w) 2891cfc1d277SAaron Tomlin { 2892cfc1d277SAaron Tomlin struct llist_node *pos, *n, *list; 2893cfc1d277SAaron Tomlin struct mod_initfree *initfree; 2894cfc1d277SAaron Tomlin 2895cfc1d277SAaron Tomlin list = llist_del_all(&init_free_list); 2896cfc1d277SAaron Tomlin 2897cfc1d277SAaron Tomlin synchronize_rcu(); 2898cfc1d277SAaron Tomlin 2899cfc1d277SAaron Tomlin llist_for_each_safe(pos, n, list) { 2900cfc1d277SAaron Tomlin initfree = container_of(pos, struct mod_initfree, node); 290112af2b83SMike Rapoport (IBM) execmem_free(initfree->init_text); 290212af2b83SMike Rapoport (IBM) execmem_free(initfree->init_data); 290312af2b83SMike Rapoport (IBM) execmem_free(initfree->init_rodata); 2904cfc1d277SAaron Tomlin kfree(initfree); 2905cfc1d277SAaron Tomlin } 2906cfc1d277SAaron Tomlin } 2907cfc1d277SAaron Tomlin 29088f8cd6c0SChangbin Du void flush_module_init_free_work(void) 29098f8cd6c0SChangbin Du { 29108f8cd6c0SChangbin Du flush_work(&init_free_wq); 29118f8cd6c0SChangbin Du } 29128f8cd6c0SChangbin Du 2913ae39e9edSSaravana Kannan #undef MODULE_PARAM_PREFIX 2914ae39e9edSSaravana Kannan #define MODULE_PARAM_PREFIX "module." 2915ae39e9edSSaravana Kannan /* Default value for module->async_probe_requested */ 2916ae39e9edSSaravana Kannan static bool async_probe; 2917ae39e9edSSaravana Kannan module_param(async_probe, bool, 0644); 2918ae39e9edSSaravana Kannan 2919cfc1d277SAaron Tomlin /* 2920cfc1d277SAaron Tomlin * This is where the real work happens. 2921cfc1d277SAaron Tomlin * 2922cfc1d277SAaron Tomlin * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb 2923cfc1d277SAaron Tomlin * helper command 'lx-symbols'. 2924cfc1d277SAaron Tomlin */ 2925cfc1d277SAaron Tomlin static noinline int do_init_module(struct module *mod) 2926cfc1d277SAaron Tomlin { 2927cfc1d277SAaron Tomlin int ret = 0; 2928cfc1d277SAaron Tomlin struct mod_initfree *freeinit; 2929df3e764dSLuis Chamberlain #if defined(CONFIG_MODULE_STATS) 2930df3e764dSLuis Chamberlain unsigned int text_size = 0, total_size = 0; 2931df3e764dSLuis Chamberlain 2932df3e764dSLuis Chamberlain for_each_mod_mem_type(type) { 2933df3e764dSLuis Chamberlain const struct module_memory *mod_mem = &mod->mem[type]; 2934df3e764dSLuis Chamberlain if (mod_mem->size) { 2935df3e764dSLuis Chamberlain total_size += mod_mem->size; 2936df3e764dSLuis Chamberlain if (type == MOD_TEXT || type == MOD_INIT_TEXT) 2937df3e764dSLuis Chamberlain text_size += mod_mem->size; 2938df3e764dSLuis Chamberlain } 2939df3e764dSLuis Chamberlain } 2940df3e764dSLuis Chamberlain #endif 2941cfc1d277SAaron Tomlin 2942cfc1d277SAaron Tomlin freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); 2943cfc1d277SAaron Tomlin if (!freeinit) { 2944cfc1d277SAaron Tomlin ret = -ENOMEM; 2945cfc1d277SAaron Tomlin goto fail; 2946cfc1d277SAaron Tomlin } 2947ac3b4328SSong Liu freeinit->init_text = mod->mem[MOD_INIT_TEXT].base; 2948ac3b4328SSong Liu freeinit->init_data = mod->mem[MOD_INIT_DATA].base; 2949ac3b4328SSong Liu freeinit->init_rodata = mod->mem[MOD_INIT_RODATA].base; 2950cfc1d277SAaron Tomlin 2951cfc1d277SAaron Tomlin do_mod_ctors(mod); 2952cfc1d277SAaron Tomlin /* Start the module */ 2953cfc1d277SAaron Tomlin if (mod->init != NULL) 2954cfc1d277SAaron Tomlin ret = do_one_initcall(mod->init); 2955cfc1d277SAaron Tomlin if (ret < 0) { 2956cfc1d277SAaron Tomlin goto fail_free_freeinit; 2957cfc1d277SAaron Tomlin } 2958cfc1d277SAaron Tomlin if (ret > 0) { 2959cfc1d277SAaron Tomlin pr_warn("%s: '%s'->init suspiciously returned %d, it should " 2960cfc1d277SAaron Tomlin "follow 0/-E convention\n" 2961cfc1d277SAaron Tomlin "%s: loading module anyway...\n", 2962cfc1d277SAaron Tomlin __func__, mod->name, ret, __func__); 2963cfc1d277SAaron Tomlin dump_stack(); 2964cfc1d277SAaron Tomlin } 2965cfc1d277SAaron Tomlin 2966cfc1d277SAaron Tomlin /* Now it's a first class citizen! */ 2967cfc1d277SAaron Tomlin mod->state = MODULE_STATE_LIVE; 2968cfc1d277SAaron Tomlin blocking_notifier_call_chain(&module_notify_list, 2969cfc1d277SAaron Tomlin MODULE_STATE_LIVE, mod); 2970cfc1d277SAaron Tomlin 2971cfc1d277SAaron Tomlin /* Delay uevent until module has finished its init routine */ 2972cfc1d277SAaron Tomlin kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); 2973cfc1d277SAaron Tomlin 2974cfc1d277SAaron Tomlin /* 2975cfc1d277SAaron Tomlin * We need to finish all async code before the module init sequence 2976cfc1d277SAaron Tomlin * is done. This has potential to deadlock if synchronous module 2977cfc1d277SAaron Tomlin * loading is requested from async (which is not allowed!). 2978cfc1d277SAaron Tomlin * 2979cfc1d277SAaron Tomlin * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous 2980cfc1d277SAaron Tomlin * request_module() from async workers") for more details. 2981cfc1d277SAaron Tomlin */ 2982cfc1d277SAaron Tomlin if (!mod->async_probe_requested) 2983cfc1d277SAaron Tomlin async_synchronize_full(); 2984cfc1d277SAaron Tomlin 2985ac3b4328SSong Liu ftrace_free_mem(mod, mod->mem[MOD_INIT_TEXT].base, 2986ac3b4328SSong Liu mod->mem[MOD_INIT_TEXT].base + mod->mem[MOD_INIT_TEXT].size); 2987cfc1d277SAaron Tomlin mutex_lock(&module_mutex); 2988cfc1d277SAaron Tomlin /* Drop initial reference. */ 2989cfc1d277SAaron Tomlin module_put(mod); 2990cfc1d277SAaron Tomlin trim_init_extable(mod); 2991cfc1d277SAaron Tomlin #ifdef CONFIG_KALLSYMS 2992cfc1d277SAaron Tomlin /* Switch to core kallsyms now init is done: kallsyms may be walking! */ 2993cfc1d277SAaron Tomlin rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); 2994cfc1d277SAaron Tomlin #endif 2995097fd001SChristophe Leroy ret = module_enable_rodata_ro_after_init(mod); 2996d1909c02SChristophe Leroy if (ret) 2997110b1e07SChristophe Leroy pr_warn("%s: module_enable_rodata_ro_after_init() returned %d, " 2998110b1e07SChristophe Leroy "ro_after_init data might still be writable\n", 2999110b1e07SChristophe Leroy mod->name, ret); 3000110b1e07SChristophe Leroy 3001cfc1d277SAaron Tomlin mod_tree_remove_init(mod); 3002cfc1d277SAaron Tomlin module_arch_freeing_init(mod); 3003ac3b4328SSong Liu for_class_mod_mem_type(type, init) { 3004ac3b4328SSong Liu mod->mem[type].base = NULL; 3005ac3b4328SSong Liu mod->mem[type].size = 0; 3006ac3b4328SSong Liu } 3007df3e764dSLuis Chamberlain 3008cfc1d277SAaron Tomlin #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 3009d4e48e3dSAlan Maguire /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointers */ 3010cfc1d277SAaron Tomlin mod->btf_data = NULL; 3011d4e48e3dSAlan Maguire mod->btf_base_data = NULL; 3012cfc1d277SAaron Tomlin #endif 3013cfc1d277SAaron Tomlin /* 3014cfc1d277SAaron Tomlin * We want to free module_init, but be aware that kallsyms may be 3015cfc1d277SAaron Tomlin * walking this with preempt disabled. In all the failure paths, we 3016cfc1d277SAaron Tomlin * call synchronize_rcu(), but we don't want to slow down the success 301712af2b83SMike Rapoport (IBM) * path. execmem_free() cannot be called in an interrupt, so do the 3018cfc1d277SAaron Tomlin * work and call synchronize_rcu() in a work queue. 3019cfc1d277SAaron Tomlin * 302012af2b83SMike Rapoport (IBM) * Note that execmem_alloc() on most architectures creates W+X page 3021cfc1d277SAaron Tomlin * mappings which won't be cleaned up until do_free_init() runs. Any 3022cfc1d277SAaron Tomlin * code such as mark_rodata_ro() which depends on those mappings to 30238f8cd6c0SChangbin Du * be cleaned up needs to sync with the queued work by invoking 30248f8cd6c0SChangbin Du * flush_module_init_free_work(). 3025cfc1d277SAaron Tomlin */ 3026cfc1d277SAaron Tomlin if (llist_add(&freeinit->node, &init_free_list)) 3027cfc1d277SAaron Tomlin schedule_work(&init_free_wq); 3028cfc1d277SAaron Tomlin 3029cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 3030cfc1d277SAaron Tomlin wake_up_all(&module_wq); 3031cfc1d277SAaron Tomlin 3032df3e764dSLuis Chamberlain mod_stat_add_long(text_size, &total_text_size); 3033df3e764dSLuis Chamberlain mod_stat_add_long(total_size, &total_mod_size); 3034df3e764dSLuis Chamberlain 3035df3e764dSLuis Chamberlain mod_stat_inc(&modcount); 3036df3e764dSLuis Chamberlain 3037cfc1d277SAaron Tomlin return 0; 3038cfc1d277SAaron Tomlin 3039cfc1d277SAaron Tomlin fail_free_freeinit: 3040cfc1d277SAaron Tomlin kfree(freeinit); 3041cfc1d277SAaron Tomlin fail: 3042cfc1d277SAaron Tomlin /* Try to protect us from buggy refcounters. */ 3043cfc1d277SAaron Tomlin mod->state = MODULE_STATE_GOING; 3044cfc1d277SAaron Tomlin synchronize_rcu(); 3045cfc1d277SAaron Tomlin module_put(mod); 3046cfc1d277SAaron Tomlin blocking_notifier_call_chain(&module_notify_list, 3047cfc1d277SAaron Tomlin MODULE_STATE_GOING, mod); 3048cfc1d277SAaron Tomlin klp_module_going(mod); 3049cfc1d277SAaron Tomlin ftrace_release_mod(mod); 3050cfc1d277SAaron Tomlin free_module(mod); 3051cfc1d277SAaron Tomlin wake_up_all(&module_wq); 3052df3e764dSLuis Chamberlain 3053cfc1d277SAaron Tomlin return ret; 3054cfc1d277SAaron Tomlin } 3055cfc1d277SAaron Tomlin 3056cfc1d277SAaron Tomlin static int may_init_module(void) 3057cfc1d277SAaron Tomlin { 3058cfc1d277SAaron Tomlin if (!capable(CAP_SYS_MODULE) || modules_disabled) 3059cfc1d277SAaron Tomlin return -EPERM; 3060cfc1d277SAaron Tomlin 3061cfc1d277SAaron Tomlin return 0; 3062cfc1d277SAaron Tomlin } 3063cfc1d277SAaron Tomlin 3064f71afa6aSLuis Chamberlain /* Is this module of this name done loading? No locks held. */ 3065f71afa6aSLuis Chamberlain static bool finished_loading(const char *name) 3066cfc1d277SAaron Tomlin { 3067f71afa6aSLuis Chamberlain struct module *mod; 3068f71afa6aSLuis Chamberlain bool ret; 3069cfc1d277SAaron Tomlin 3070f71afa6aSLuis Chamberlain /* 3071f71afa6aSLuis Chamberlain * The module_mutex should not be a heavily contended lock; 3072f71afa6aSLuis Chamberlain * if we get the occasional sleep here, we'll go an extra iteration 3073f71afa6aSLuis Chamberlain * in the wait_event_interruptible(), which is harmless. 3074f71afa6aSLuis Chamberlain */ 3075f71afa6aSLuis Chamberlain sched_annotate_sleep(); 3076cfc1d277SAaron Tomlin mutex_lock(&module_mutex); 3077f71afa6aSLuis Chamberlain mod = find_module_all(name, strlen(name), true); 3078f71afa6aSLuis Chamberlain ret = !mod || mod->state == MODULE_STATE_LIVE 3079f71afa6aSLuis Chamberlain || mod->state == MODULE_STATE_GOING; 3080f71afa6aSLuis Chamberlain mutex_unlock(&module_mutex); 3081f71afa6aSLuis Chamberlain 3082f71afa6aSLuis Chamberlain return ret; 3083f71afa6aSLuis Chamberlain } 3084f71afa6aSLuis Chamberlain 3085f71afa6aSLuis Chamberlain /* Must be called with module_mutex held */ 3086df3e764dSLuis Chamberlain static int module_patient_check_exists(const char *name, 3087df3e764dSLuis Chamberlain enum fail_dup_mod_reason reason) 3088f71afa6aSLuis Chamberlain { 3089f71afa6aSLuis Chamberlain struct module *old; 3090f71afa6aSLuis Chamberlain int err = 0; 3091f71afa6aSLuis Chamberlain 3092f71afa6aSLuis Chamberlain old = find_module_all(name, strlen(name), true); 3093f71afa6aSLuis Chamberlain if (old == NULL) 3094f71afa6aSLuis Chamberlain return 0; 3095f71afa6aSLuis Chamberlain 3096f71afa6aSLuis Chamberlain if (old->state == MODULE_STATE_COMING || 3097f71afa6aSLuis Chamberlain old->state == MODULE_STATE_UNFORMED) { 3098cfc1d277SAaron Tomlin /* Wait in case it fails to load. */ 3099cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 3100cfc1d277SAaron Tomlin err = wait_event_interruptible(module_wq, 3101f71afa6aSLuis Chamberlain finished_loading(name)); 3102f71afa6aSLuis Chamberlain mutex_lock(&module_mutex); 3103cfc1d277SAaron Tomlin if (err) 3104f71afa6aSLuis Chamberlain return err; 31050254127aSPetr Pavlu 31060254127aSPetr Pavlu /* The module might have gone in the meantime. */ 3107f71afa6aSLuis Chamberlain old = find_module_all(name, strlen(name), true); 3108cfc1d277SAaron Tomlin } 31090254127aSPetr Pavlu 3110df3e764dSLuis Chamberlain if (try_add_failed_module(name, reason)) 3111df3e764dSLuis Chamberlain pr_warn("Could not add fail-tracking for module: %s\n", name); 3112df3e764dSLuis Chamberlain 31130254127aSPetr Pavlu /* 31140254127aSPetr Pavlu * We are here only when the same module was being loaded. Do 31150254127aSPetr Pavlu * not try to load it again right now. It prevents long delays 31160254127aSPetr Pavlu * caused by serialized module load failures. It might happen 31170254127aSPetr Pavlu * when more devices of the same type trigger load of 31180254127aSPetr Pavlu * a particular module. 31190254127aSPetr Pavlu */ 31200254127aSPetr Pavlu if (old && old->state == MODULE_STATE_LIVE) 3121f71afa6aSLuis Chamberlain return -EEXIST; 3122f71afa6aSLuis Chamberlain return -EBUSY; 3123cfc1d277SAaron Tomlin } 3124f71afa6aSLuis Chamberlain 3125f71afa6aSLuis Chamberlain /* 3126f71afa6aSLuis Chamberlain * We try to place it in the list now to make sure it's unique before 3127f71afa6aSLuis Chamberlain * we dedicate too many resources. In particular, temporary percpu 3128f71afa6aSLuis Chamberlain * memory exhaustion. 3129f71afa6aSLuis Chamberlain */ 3130f71afa6aSLuis Chamberlain static int add_unformed_module(struct module *mod) 3131f71afa6aSLuis Chamberlain { 3132f71afa6aSLuis Chamberlain int err; 3133f71afa6aSLuis Chamberlain 3134f71afa6aSLuis Chamberlain mod->state = MODULE_STATE_UNFORMED; 3135f71afa6aSLuis Chamberlain 3136f71afa6aSLuis Chamberlain mutex_lock(&module_mutex); 3137df3e764dSLuis Chamberlain err = module_patient_check_exists(mod->name, FAIL_DUP_MOD_LOAD); 3138f71afa6aSLuis Chamberlain if (err) 3139f71afa6aSLuis Chamberlain goto out; 3140f71afa6aSLuis Chamberlain 3141cfc1d277SAaron Tomlin mod_update_bounds(mod); 3142cfc1d277SAaron Tomlin list_add_rcu(&mod->list, &modules); 3143cfc1d277SAaron Tomlin mod_tree_insert(mod); 3144cfc1d277SAaron Tomlin err = 0; 3145cfc1d277SAaron Tomlin 3146cfc1d277SAaron Tomlin out: 3147cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 3148cfc1d277SAaron Tomlin return err; 3149cfc1d277SAaron Tomlin } 3150cfc1d277SAaron Tomlin 3151cfc1d277SAaron Tomlin static int complete_formation(struct module *mod, struct load_info *info) 3152cfc1d277SAaron Tomlin { 3153cfc1d277SAaron Tomlin int err; 3154cfc1d277SAaron Tomlin 3155cfc1d277SAaron Tomlin mutex_lock(&module_mutex); 3156cfc1d277SAaron Tomlin 3157cfc1d277SAaron Tomlin /* Find duplicate symbols (must be called under lock). */ 3158cfc1d277SAaron Tomlin err = verify_exported_symbols(mod); 3159cfc1d277SAaron Tomlin if (err < 0) 3160cfc1d277SAaron Tomlin goto out; 3161cfc1d277SAaron Tomlin 316289245600SSami Tolvanen /* These rely on module_mutex for list integrity. */ 3163cfc1d277SAaron Tomlin module_bug_finalize(info->hdr, info->sechdrs, mod); 316489245600SSami Tolvanen module_cfi_finalize(info->hdr, info->sechdrs, mod); 3165cfc1d277SAaron Tomlin 3166097fd001SChristophe Leroy err = module_enable_rodata_ro(mod); 3167d1909c02SChristophe Leroy if (err) 3168d1909c02SChristophe Leroy goto out_strict_rwx; 3169d1909c02SChristophe Leroy err = module_enable_data_nx(mod); 3170d1909c02SChristophe Leroy if (err) 3171d1909c02SChristophe Leroy goto out_strict_rwx; 3172d1909c02SChristophe Leroy err = module_enable_text_rox(mod); 3173d1909c02SChristophe Leroy if (err) 3174d1909c02SChristophe Leroy goto out_strict_rwx; 3175cfc1d277SAaron Tomlin 3176cfc1d277SAaron Tomlin /* 3177cfc1d277SAaron Tomlin * Mark state as coming so strong_try_module_get() ignores us, 3178cfc1d277SAaron Tomlin * but kallsyms etc. can see us. 3179cfc1d277SAaron Tomlin */ 3180cfc1d277SAaron Tomlin mod->state = MODULE_STATE_COMING; 3181cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 3182cfc1d277SAaron Tomlin 3183cfc1d277SAaron Tomlin return 0; 3184cfc1d277SAaron Tomlin 3185d1909c02SChristophe Leroy out_strict_rwx: 3186d1909c02SChristophe Leroy module_bug_cleanup(mod); 3187cfc1d277SAaron Tomlin out: 3188cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 3189cfc1d277SAaron Tomlin return err; 3190cfc1d277SAaron Tomlin } 3191cfc1d277SAaron Tomlin 3192cfc1d277SAaron Tomlin static int prepare_coming_module(struct module *mod) 3193cfc1d277SAaron Tomlin { 3194cfc1d277SAaron Tomlin int err; 3195cfc1d277SAaron Tomlin 3196cfc1d277SAaron Tomlin ftrace_module_enable(mod); 3197cfc1d277SAaron Tomlin err = klp_module_coming(mod); 3198cfc1d277SAaron Tomlin if (err) 3199cfc1d277SAaron Tomlin return err; 3200cfc1d277SAaron Tomlin 3201cfc1d277SAaron Tomlin err = blocking_notifier_call_chain_robust(&module_notify_list, 3202cfc1d277SAaron Tomlin MODULE_STATE_COMING, MODULE_STATE_GOING, mod); 3203cfc1d277SAaron Tomlin err = notifier_to_errno(err); 3204cfc1d277SAaron Tomlin if (err) 3205cfc1d277SAaron Tomlin klp_module_going(mod); 3206cfc1d277SAaron Tomlin 3207cfc1d277SAaron Tomlin return err; 3208cfc1d277SAaron Tomlin } 3209cfc1d277SAaron Tomlin 3210cfc1d277SAaron Tomlin static int unknown_module_param_cb(char *param, char *val, const char *modname, 3211cfc1d277SAaron Tomlin void *arg) 3212cfc1d277SAaron Tomlin { 3213cfc1d277SAaron Tomlin struct module *mod = arg; 3214cfc1d277SAaron Tomlin int ret; 3215cfc1d277SAaron Tomlin 3216cfc1d277SAaron Tomlin if (strcmp(param, "async_probe") == 0) { 3217fbed4feaSChristophe JAILLET if (kstrtobool(val, &mod->async_probe_requested)) 3218cfc1d277SAaron Tomlin mod->async_probe_requested = true; 3219cfc1d277SAaron Tomlin return 0; 3220cfc1d277SAaron Tomlin } 3221cfc1d277SAaron Tomlin 3222cfc1d277SAaron Tomlin /* Check for magic 'dyndbg' arg */ 3223cfc1d277SAaron Tomlin ret = ddebug_dyndbg_module_param_cb(param, val, modname); 3224cfc1d277SAaron Tomlin if (ret != 0) 3225cfc1d277SAaron Tomlin pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); 3226cfc1d277SAaron Tomlin return 0; 3227cfc1d277SAaron Tomlin } 3228cfc1d277SAaron Tomlin 322985e6f61cSLuis Chamberlain /* Module within temporary copy, this doesn't do any allocation */ 323085e6f61cSLuis Chamberlain static int early_mod_check(struct load_info *info, int flags) 323185e6f61cSLuis Chamberlain { 323285e6f61cSLuis Chamberlain int err; 323385e6f61cSLuis Chamberlain 323485e6f61cSLuis Chamberlain /* 323585e6f61cSLuis Chamberlain * Now that we know we have the correct module name, check 323685e6f61cSLuis Chamberlain * if it's blacklisted. 323785e6f61cSLuis Chamberlain */ 323885e6f61cSLuis Chamberlain if (blacklisted(info->name)) { 323985e6f61cSLuis Chamberlain pr_err("Module %s is blacklisted\n", info->name); 324085e6f61cSLuis Chamberlain return -EPERM; 324185e6f61cSLuis Chamberlain } 324285e6f61cSLuis Chamberlain 324385e6f61cSLuis Chamberlain err = rewrite_section_headers(info, flags); 324485e6f61cSLuis Chamberlain if (err) 324585e6f61cSLuis Chamberlain return err; 324685e6f61cSLuis Chamberlain 324785e6f61cSLuis Chamberlain /* Check module struct version now, before we try to use module. */ 324885e6f61cSLuis Chamberlain if (!check_modstruct_version(info, info->mod)) 324985e6f61cSLuis Chamberlain return -ENOEXEC; 325085e6f61cSLuis Chamberlain 325102da2cbaSLuis Chamberlain err = check_modinfo(info->mod, info, flags); 325202da2cbaSLuis Chamberlain if (err) 325302da2cbaSLuis Chamberlain return err; 325402da2cbaSLuis Chamberlain 3255064f4536SLuis Chamberlain mutex_lock(&module_mutex); 3256064f4536SLuis Chamberlain err = module_patient_check_exists(info->mod->name, FAIL_DUP_MOD_BECOMING); 3257064f4536SLuis Chamberlain mutex_unlock(&module_mutex); 3258064f4536SLuis Chamberlain 3259064f4536SLuis Chamberlain return err; 326085e6f61cSLuis Chamberlain } 326185e6f61cSLuis Chamberlain 3262cfc1d277SAaron Tomlin /* 3263cfc1d277SAaron Tomlin * Allocate and load the module: note that size of section 0 is always 3264cfc1d277SAaron Tomlin * zero, and we rely on this for optional sections. 3265cfc1d277SAaron Tomlin */ 3266cfc1d277SAaron Tomlin static int load_module(struct load_info *info, const char __user *uargs, 3267cfc1d277SAaron Tomlin int flags) 3268cfc1d277SAaron Tomlin { 3269cfc1d277SAaron Tomlin struct module *mod; 3270df3e764dSLuis Chamberlain bool module_allocated = false; 3271cfc1d277SAaron Tomlin long err = 0; 3272cfc1d277SAaron Tomlin char *after_dashes; 3273cfc1d277SAaron Tomlin 3274cfc1d277SAaron Tomlin /* 3275cfc1d277SAaron Tomlin * Do the signature check (if any) first. All that 3276cfc1d277SAaron Tomlin * the signature check needs is info->len, it does 3277cfc1d277SAaron Tomlin * not need any of the section info. That can be 3278cfc1d277SAaron Tomlin * set up later. This will minimize the chances 3279cfc1d277SAaron Tomlin * of a corrupt module causing problems before 3280cfc1d277SAaron Tomlin * we even get to the signature check. 3281cfc1d277SAaron Tomlin * 3282cfc1d277SAaron Tomlin * The check will also adjust info->len by stripping 3283cfc1d277SAaron Tomlin * off the sig length at the end of the module, making 3284cfc1d277SAaron Tomlin * checks against info->len more correct. 3285cfc1d277SAaron Tomlin */ 3286cfc1d277SAaron Tomlin err = module_sig_check(info, flags); 3287cfc1d277SAaron Tomlin if (err) 3288cfc1d277SAaron Tomlin goto free_copy; 3289cfc1d277SAaron Tomlin 3290cfc1d277SAaron Tomlin /* 3291cfc1d277SAaron Tomlin * Do basic sanity checks against the ELF header and 32923d40bb90SLuis Chamberlain * sections. Cache useful sections and set the 32933d40bb90SLuis Chamberlain * info->mod to the userspace passed struct module. 3294cfc1d277SAaron Tomlin */ 32953d40bb90SLuis Chamberlain err = elf_validity_cache_copy(info, flags); 3296cfc1d277SAaron Tomlin if (err) 3297cfc1d277SAaron Tomlin goto free_copy; 3298cfc1d277SAaron Tomlin 329985e6f61cSLuis Chamberlain err = early_mod_check(info, flags); 3300cfc1d277SAaron Tomlin if (err) 3301cfc1d277SAaron Tomlin goto free_copy; 3302cfc1d277SAaron Tomlin 3303cfc1d277SAaron Tomlin /* Figure out module layout, and allocate all the memory. */ 3304cfc1d277SAaron Tomlin mod = layout_and_allocate(info, flags); 3305cfc1d277SAaron Tomlin if (IS_ERR(mod)) { 3306cfc1d277SAaron Tomlin err = PTR_ERR(mod); 3307cfc1d277SAaron Tomlin goto free_copy; 3308cfc1d277SAaron Tomlin } 3309cfc1d277SAaron Tomlin 3310df3e764dSLuis Chamberlain module_allocated = true; 3311df3e764dSLuis Chamberlain 3312cfc1d277SAaron Tomlin audit_log_kern_module(mod->name); 3313cfc1d277SAaron Tomlin 3314cfc1d277SAaron Tomlin /* Reserve our place in the list. */ 3315cfc1d277SAaron Tomlin err = add_unformed_module(mod); 3316cfc1d277SAaron Tomlin if (err) 3317cfc1d277SAaron Tomlin goto free_module; 3318cfc1d277SAaron Tomlin 3319a12b9451SLuis Chamberlain /* 3320a12b9451SLuis Chamberlain * We are tainting your kernel if your module gets into 3321a12b9451SLuis Chamberlain * the modules linked list somehow. 3322a12b9451SLuis Chamberlain */ 3323a12b9451SLuis Chamberlain module_augment_kernel_taints(mod, info); 3324cfc1d277SAaron Tomlin 3325cfc1d277SAaron Tomlin /* To avoid stressing percpu allocator, do this once we're unique. */ 3326cfc1d277SAaron Tomlin err = percpu_modalloc(mod, info); 3327cfc1d277SAaron Tomlin if (err) 3328cfc1d277SAaron Tomlin goto unlink_mod; 3329cfc1d277SAaron Tomlin 3330cfc1d277SAaron Tomlin /* Now module is in final location, initialize linked lists, etc. */ 3331cfc1d277SAaron Tomlin err = module_unload_init(mod); 3332cfc1d277SAaron Tomlin if (err) 3333cfc1d277SAaron Tomlin goto unlink_mod; 3334cfc1d277SAaron Tomlin 3335cfc1d277SAaron Tomlin init_param_lock(mod); 3336cfc1d277SAaron Tomlin 3337cfc1d277SAaron Tomlin /* 3338cfc1d277SAaron Tomlin * Now we've got everything in the final locations, we can 3339cfc1d277SAaron Tomlin * find optional sections. 3340cfc1d277SAaron Tomlin */ 3341cfc1d277SAaron Tomlin err = find_module_sections(mod, info); 3342cfc1d277SAaron Tomlin if (err) 3343cfc1d277SAaron Tomlin goto free_unload; 3344cfc1d277SAaron Tomlin 3345419e1a20SLuis Chamberlain err = check_export_symbol_versions(mod); 3346cfc1d277SAaron Tomlin if (err) 3347cfc1d277SAaron Tomlin goto free_unload; 3348cfc1d277SAaron Tomlin 3349cfc1d277SAaron Tomlin /* Set up MODINFO_ATTR fields */ 3350cfc1d277SAaron Tomlin setup_modinfo(mod, info); 3351cfc1d277SAaron Tomlin 3352cfc1d277SAaron Tomlin /* Fix up syms, so that st_value is a pointer to location. */ 3353cfc1d277SAaron Tomlin err = simplify_symbols(mod, info); 3354cfc1d277SAaron Tomlin if (err < 0) 3355cfc1d277SAaron Tomlin goto free_modinfo; 3356cfc1d277SAaron Tomlin 3357cfc1d277SAaron Tomlin err = apply_relocations(mod, info); 3358cfc1d277SAaron Tomlin if (err < 0) 3359cfc1d277SAaron Tomlin goto free_modinfo; 3360cfc1d277SAaron Tomlin 3361cfc1d277SAaron Tomlin err = post_relocation(mod, info); 3362cfc1d277SAaron Tomlin if (err < 0) 3363cfc1d277SAaron Tomlin goto free_modinfo; 3364cfc1d277SAaron Tomlin 3365cfc1d277SAaron Tomlin flush_module_icache(mod); 3366cfc1d277SAaron Tomlin 3367cfc1d277SAaron Tomlin /* Now copy in args */ 3368cfc1d277SAaron Tomlin mod->args = strndup_user(uargs, ~0UL >> 1); 3369cfc1d277SAaron Tomlin if (IS_ERR(mod->args)) { 3370cfc1d277SAaron Tomlin err = PTR_ERR(mod->args); 3371cfc1d277SAaron Tomlin goto free_arch_cleanup; 3372cfc1d277SAaron Tomlin } 3373cfc1d277SAaron Tomlin 3374cfc1d277SAaron Tomlin init_build_id(mod, info); 3375cfc1d277SAaron Tomlin 3376cfc1d277SAaron Tomlin /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ 3377cfc1d277SAaron Tomlin ftrace_module_init(mod); 3378cfc1d277SAaron Tomlin 3379cfc1d277SAaron Tomlin /* Finally it's fully formed, ready to start executing. */ 3380cfc1d277SAaron Tomlin err = complete_formation(mod, info); 3381cfc1d277SAaron Tomlin if (err) 3382cfc1d277SAaron Tomlin goto ddebug_cleanup; 3383cfc1d277SAaron Tomlin 3384cfc1d277SAaron Tomlin err = prepare_coming_module(mod); 3385cfc1d277SAaron Tomlin if (err) 3386cfc1d277SAaron Tomlin goto bug_cleanup; 3387cfc1d277SAaron Tomlin 3388ae39e9edSSaravana Kannan mod->async_probe_requested = async_probe; 3389ae39e9edSSaravana Kannan 3390cfc1d277SAaron Tomlin /* Module is ready to execute: parsing args may do that. */ 3391cfc1d277SAaron Tomlin after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 3392cfc1d277SAaron Tomlin -32768, 32767, mod, 3393cfc1d277SAaron Tomlin unknown_module_param_cb); 3394cfc1d277SAaron Tomlin if (IS_ERR(after_dashes)) { 3395cfc1d277SAaron Tomlin err = PTR_ERR(after_dashes); 3396cfc1d277SAaron Tomlin goto coming_cleanup; 3397cfc1d277SAaron Tomlin } else if (after_dashes) { 3398cfc1d277SAaron Tomlin pr_warn("%s: parameters '%s' after `--' ignored\n", 3399cfc1d277SAaron Tomlin mod->name, after_dashes); 3400cfc1d277SAaron Tomlin } 3401cfc1d277SAaron Tomlin 3402cfc1d277SAaron Tomlin /* Link in to sysfs. */ 3403cfc1d277SAaron Tomlin err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); 3404cfc1d277SAaron Tomlin if (err < 0) 3405cfc1d277SAaron Tomlin goto coming_cleanup; 3406cfc1d277SAaron Tomlin 3407cfc1d277SAaron Tomlin if (is_livepatch_module(mod)) { 3408cfc1d277SAaron Tomlin err = copy_module_elf(mod, info); 3409cfc1d277SAaron Tomlin if (err < 0) 3410cfc1d277SAaron Tomlin goto sysfs_cleanup; 3411cfc1d277SAaron Tomlin } 3412cfc1d277SAaron Tomlin 3413cfc1d277SAaron Tomlin /* Get rid of temporary copy. */ 3414cfc1d277SAaron Tomlin free_copy(info, flags); 3415cfc1d277SAaron Tomlin 3416a4735739SSuren Baghdasaryan codetag_load_module(mod); 3417a4735739SSuren Baghdasaryan 3418cfc1d277SAaron Tomlin /* Done! */ 3419cfc1d277SAaron Tomlin trace_module_load(mod); 3420cfc1d277SAaron Tomlin 3421cfc1d277SAaron Tomlin return do_init_module(mod); 3422cfc1d277SAaron Tomlin 3423cfc1d277SAaron Tomlin sysfs_cleanup: 3424cfc1d277SAaron Tomlin mod_sysfs_teardown(mod); 3425cfc1d277SAaron Tomlin coming_cleanup: 3426cfc1d277SAaron Tomlin mod->state = MODULE_STATE_GOING; 3427cfc1d277SAaron Tomlin destroy_params(mod->kp, mod->num_kp); 3428cfc1d277SAaron Tomlin blocking_notifier_call_chain(&module_notify_list, 3429cfc1d277SAaron Tomlin MODULE_STATE_GOING, mod); 3430cfc1d277SAaron Tomlin klp_module_going(mod); 3431cfc1d277SAaron Tomlin bug_cleanup: 3432cfc1d277SAaron Tomlin mod->state = MODULE_STATE_GOING; 3433cfc1d277SAaron Tomlin /* module_bug_cleanup needs module_mutex protection */ 3434cfc1d277SAaron Tomlin mutex_lock(&module_mutex); 3435cfc1d277SAaron Tomlin module_bug_cleanup(mod); 3436cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 3437cfc1d277SAaron Tomlin 3438cfc1d277SAaron Tomlin ddebug_cleanup: 3439cfc1d277SAaron Tomlin ftrace_release_mod(mod); 3440cfc1d277SAaron Tomlin synchronize_rcu(); 3441cfc1d277SAaron Tomlin kfree(mod->args); 3442cfc1d277SAaron Tomlin free_arch_cleanup: 3443cfc1d277SAaron Tomlin module_arch_cleanup(mod); 3444cfc1d277SAaron Tomlin free_modinfo: 3445cfc1d277SAaron Tomlin free_modinfo(mod); 3446cfc1d277SAaron Tomlin free_unload: 3447cfc1d277SAaron Tomlin module_unload_free(mod); 3448cfc1d277SAaron Tomlin unlink_mod: 3449cfc1d277SAaron Tomlin mutex_lock(&module_mutex); 3450cfc1d277SAaron Tomlin /* Unlink carefully: kallsyms could be walking list. */ 3451cfc1d277SAaron Tomlin list_del_rcu(&mod->list); 3452cfc1d277SAaron Tomlin mod_tree_remove(mod); 3453cfc1d277SAaron Tomlin wake_up_all(&module_wq); 3454cfc1d277SAaron Tomlin /* Wait for RCU-sched synchronizing before releasing mod->list. */ 3455cfc1d277SAaron Tomlin synchronize_rcu(); 3456cfc1d277SAaron Tomlin mutex_unlock(&module_mutex); 3457cfc1d277SAaron Tomlin free_module: 3458df3e764dSLuis Chamberlain mod_stat_bump_invalid(info, flags); 3459cfc1d277SAaron Tomlin /* Free lock-classes; relies on the preceding sync_rcu() */ 3460ac3b4328SSong Liu for_class_mod_mem_type(type, core_data) { 3461ac3b4328SSong Liu lockdep_free_key_range(mod->mem[type].base, 3462ac3b4328SSong Liu mod->mem[type].size); 3463ac3b4328SSong Liu } 3464cfc1d277SAaron Tomlin 3465*c287c072SMike Rapoport (Microsoft) module_memory_restore_rox(mod); 3466cfc1d277SAaron Tomlin module_deallocate(mod, info); 3467cfc1d277SAaron Tomlin free_copy: 3468df3e764dSLuis Chamberlain /* 3469df3e764dSLuis Chamberlain * The info->len is always set. We distinguish between 3470df3e764dSLuis Chamberlain * failures once the proper module was allocated and 3471df3e764dSLuis Chamberlain * before that. 3472df3e764dSLuis Chamberlain */ 3473df3e764dSLuis Chamberlain if (!module_allocated) 3474df3e764dSLuis Chamberlain mod_stat_bump_becoming(info, flags); 3475cfc1d277SAaron Tomlin free_copy(info, flags); 3476cfc1d277SAaron Tomlin return err; 3477cfc1d277SAaron Tomlin } 3478cfc1d277SAaron Tomlin 3479cfc1d277SAaron Tomlin SYSCALL_DEFINE3(init_module, void __user *, umod, 3480cfc1d277SAaron Tomlin unsigned long, len, const char __user *, uargs) 3481cfc1d277SAaron Tomlin { 3482cfc1d277SAaron Tomlin int err; 3483cfc1d277SAaron Tomlin struct load_info info = { }; 3484cfc1d277SAaron Tomlin 3485cfc1d277SAaron Tomlin err = may_init_module(); 3486cfc1d277SAaron Tomlin if (err) 3487cfc1d277SAaron Tomlin return err; 3488cfc1d277SAaron Tomlin 3489cfc1d277SAaron Tomlin pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", 3490cfc1d277SAaron Tomlin umod, len, uargs); 3491cfc1d277SAaron Tomlin 3492cfc1d277SAaron Tomlin err = copy_module_from_user(umod, len, &info); 3493df3e764dSLuis Chamberlain if (err) { 3494df3e764dSLuis Chamberlain mod_stat_inc(&failed_kreads); 3495df3e764dSLuis Chamberlain mod_stat_add_long(len, &invalid_kread_bytes); 3496cfc1d277SAaron Tomlin return err; 3497df3e764dSLuis Chamberlain } 3498cfc1d277SAaron Tomlin 3499cfc1d277SAaron Tomlin return load_module(&info, uargs, 0); 3500cfc1d277SAaron Tomlin } 3501cfc1d277SAaron Tomlin 35029b9879fcSLinus Torvalds struct idempotent { 35039b9879fcSLinus Torvalds const void *cookie; 35049b9879fcSLinus Torvalds struct hlist_node entry; 35059b9879fcSLinus Torvalds struct completion complete; 35069b9879fcSLinus Torvalds int ret; 35079b9879fcSLinus Torvalds }; 35089b9879fcSLinus Torvalds 35099b9879fcSLinus Torvalds #define IDEM_HASH_BITS 8 35109b9879fcSLinus Torvalds static struct hlist_head idem_hash[1 << IDEM_HASH_BITS]; 35119b9879fcSLinus Torvalds static DEFINE_SPINLOCK(idem_lock); 35129b9879fcSLinus Torvalds 35139b9879fcSLinus Torvalds static bool idempotent(struct idempotent *u, const void *cookie) 35149b9879fcSLinus Torvalds { 35159b9879fcSLinus Torvalds int hash = hash_ptr(cookie, IDEM_HASH_BITS); 35169b9879fcSLinus Torvalds struct hlist_head *head = idem_hash + hash; 35179b9879fcSLinus Torvalds struct idempotent *existing; 35189b9879fcSLinus Torvalds bool first; 35199b9879fcSLinus Torvalds 35202124d84dSLinus Torvalds u->ret = -EINTR; 35219b9879fcSLinus Torvalds u->cookie = cookie; 35229b9879fcSLinus Torvalds init_completion(&u->complete); 35239b9879fcSLinus Torvalds 35249b9879fcSLinus Torvalds spin_lock(&idem_lock); 35259b9879fcSLinus Torvalds first = true; 35269b9879fcSLinus Torvalds hlist_for_each_entry(existing, head, entry) { 35279b9879fcSLinus Torvalds if (existing->cookie != cookie) 35289b9879fcSLinus Torvalds continue; 35299b9879fcSLinus Torvalds first = false; 35309b9879fcSLinus Torvalds break; 35319b9879fcSLinus Torvalds } 35329b9879fcSLinus Torvalds hlist_add_head(&u->entry, idem_hash + hash); 35339b9879fcSLinus Torvalds spin_unlock(&idem_lock); 35349b9879fcSLinus Torvalds 35359b9879fcSLinus Torvalds return !first; 35369b9879fcSLinus Torvalds } 35379b9879fcSLinus Torvalds 35389b9879fcSLinus Torvalds /* 35399b9879fcSLinus Torvalds * We were the first one with 'cookie' on the list, and we ended 35409b9879fcSLinus Torvalds * up completing the operation. We now need to walk the list, 35419b9879fcSLinus Torvalds * remove everybody - which includes ourselves - fill in the return 35429b9879fcSLinus Torvalds * value, and then complete the operation. 35439b9879fcSLinus Torvalds */ 3544f1962207SLinus Torvalds static int idempotent_complete(struct idempotent *u, int ret) 35459b9879fcSLinus Torvalds { 35469b9879fcSLinus Torvalds const void *cookie = u->cookie; 35479b9879fcSLinus Torvalds int hash = hash_ptr(cookie, IDEM_HASH_BITS); 35489b9879fcSLinus Torvalds struct hlist_head *head = idem_hash + hash; 35499b9879fcSLinus Torvalds struct hlist_node *next; 35509b9879fcSLinus Torvalds struct idempotent *pos; 35519b9879fcSLinus Torvalds 35529b9879fcSLinus Torvalds spin_lock(&idem_lock); 35539b9879fcSLinus Torvalds hlist_for_each_entry_safe(pos, next, head, entry) { 35549b9879fcSLinus Torvalds if (pos->cookie != cookie) 35559b9879fcSLinus Torvalds continue; 35562124d84dSLinus Torvalds hlist_del_init(&pos->entry); 35579b9879fcSLinus Torvalds pos->ret = ret; 35589b9879fcSLinus Torvalds complete(&pos->complete); 35599b9879fcSLinus Torvalds } 35609b9879fcSLinus Torvalds spin_unlock(&idem_lock); 3561f1962207SLinus Torvalds return ret; 35629b9879fcSLinus Torvalds } 35639b9879fcSLinus Torvalds 35642124d84dSLinus Torvalds /* 35652124d84dSLinus Torvalds * Wait for the idempotent worker. 35662124d84dSLinus Torvalds * 35672124d84dSLinus Torvalds * If we get interrupted, we need to remove ourselves from the 35682124d84dSLinus Torvalds * the idempotent list, and the completion may still come in. 35692124d84dSLinus Torvalds * 35702124d84dSLinus Torvalds * The 'idem_lock' protects against the race, and 'idem.ret' was 35712124d84dSLinus Torvalds * initialized to -EINTR and is thus always the right return 35722124d84dSLinus Torvalds * value even if the idempotent work then completes between 35732124d84dSLinus Torvalds * the wait_for_completion and the cleanup. 35742124d84dSLinus Torvalds */ 35752124d84dSLinus Torvalds static int idempotent_wait_for_completion(struct idempotent *u) 35762124d84dSLinus Torvalds { 35772124d84dSLinus Torvalds if (wait_for_completion_interruptible(&u->complete)) { 35782124d84dSLinus Torvalds spin_lock(&idem_lock); 35792124d84dSLinus Torvalds if (!hlist_unhashed(&u->entry)) 35802124d84dSLinus Torvalds hlist_del(&u->entry); 35812124d84dSLinus Torvalds spin_unlock(&idem_lock); 35822124d84dSLinus Torvalds } 35832124d84dSLinus Torvalds return u->ret; 35842124d84dSLinus Torvalds } 35852124d84dSLinus Torvalds 3586054a7300SLinus Torvalds static int init_module_from_file(struct file *f, const char __user * uargs, int flags) 3587cfc1d277SAaron Tomlin { 3588cfc1d277SAaron Tomlin struct load_info info = { }; 3589cfc1d277SAaron Tomlin void *buf = NULL; 3590f1962207SLinus Torvalds int len; 35919b9879fcSLinus Torvalds 3592054a7300SLinus Torvalds len = kernel_read_file(f, 0, &buf, INT_MAX, NULL, READING_MODULE); 3593df3e764dSLuis Chamberlain if (len < 0) { 3594df3e764dSLuis Chamberlain mod_stat_inc(&failed_kreads); 3595cfc1d277SAaron Tomlin return len; 3596df3e764dSLuis Chamberlain } 3597cfc1d277SAaron Tomlin 3598cfc1d277SAaron Tomlin if (flags & MODULE_INIT_COMPRESSED_FILE) { 3599054a7300SLinus Torvalds int err = module_decompress(&info, buf, len); 3600cfc1d277SAaron Tomlin vfree(buf); /* compressed data is no longer needed */ 3601df3e764dSLuis Chamberlain if (err) { 3602df3e764dSLuis Chamberlain mod_stat_inc(&failed_decompress); 3603df3e764dSLuis Chamberlain mod_stat_add_long(len, &invalid_decompress_bytes); 3604cfc1d277SAaron Tomlin return err; 3605df3e764dSLuis Chamberlain } 3606cfc1d277SAaron Tomlin } else { 3607cfc1d277SAaron Tomlin info.hdr = buf; 3608cfc1d277SAaron Tomlin info.len = len; 3609cfc1d277SAaron Tomlin } 3610cfc1d277SAaron Tomlin 3611f1962207SLinus Torvalds return load_module(&info, uargs, flags); 3612f1962207SLinus Torvalds } 3613f1962207SLinus Torvalds 3614f1962207SLinus Torvalds static int idempotent_init_module(struct file *f, const char __user * uargs, int flags) 3615f1962207SLinus Torvalds { 3616f1962207SLinus Torvalds struct idempotent idem; 3617f1962207SLinus Torvalds 361805e55564SAl Viro if (!(f->f_mode & FMODE_READ)) 3619f1962207SLinus Torvalds return -EBADF; 3620f1962207SLinus Torvalds 3621cb5b81bcSLinus Torvalds /* Are we the winners of the race and get to do this? */ 3622cb5b81bcSLinus Torvalds if (!idempotent(&idem, file_inode(f))) { 3623cb5b81bcSLinus Torvalds int ret = init_module_from_file(f, uargs, flags); 3624cb5b81bcSLinus Torvalds return idempotent_complete(&idem, ret); 3625f1962207SLinus Torvalds } 3626f1962207SLinus Torvalds 3627cb5b81bcSLinus Torvalds /* 3628cb5b81bcSLinus Torvalds * Somebody else won the race and is loading the module. 3629cb5b81bcSLinus Torvalds */ 36302124d84dSLinus Torvalds return idempotent_wait_for_completion(&idem); 3631cfc1d277SAaron Tomlin } 3632cfc1d277SAaron Tomlin 3633054a7300SLinus Torvalds SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) 3634054a7300SLinus Torvalds { 36358152f820SAl Viro int err = may_init_module(); 3636054a7300SLinus Torvalds if (err) 3637054a7300SLinus Torvalds return err; 3638054a7300SLinus Torvalds 3639054a7300SLinus Torvalds pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); 3640054a7300SLinus Torvalds 3641054a7300SLinus Torvalds if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS 3642054a7300SLinus Torvalds |MODULE_INIT_IGNORE_VERMAGIC 3643054a7300SLinus Torvalds |MODULE_INIT_COMPRESSED_FILE)) 3644054a7300SLinus Torvalds return -EINVAL; 3645054a7300SLinus Torvalds 36468152f820SAl Viro CLASS(fd, f)(fd); 364705e55564SAl Viro if (fd_empty(f)) 364805e55564SAl Viro return -EBADF; 36498152f820SAl Viro return idempotent_init_module(fd_file(f), uargs, flags); 3650054a7300SLinus Torvalds } 3651054a7300SLinus Torvalds 3652cfc1d277SAaron Tomlin /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ 365317dd25c2SAaron Tomlin char *module_flags(struct module *mod, char *buf, bool show_state) 3654cfc1d277SAaron Tomlin { 3655cfc1d277SAaron Tomlin int bx = 0; 3656cfc1d277SAaron Tomlin 3657cfc1d277SAaron Tomlin BUG_ON(mod->state == MODULE_STATE_UNFORMED); 365817dd25c2SAaron Tomlin if (!mod->taints && !show_state) 365917dd25c2SAaron Tomlin goto out; 3660cfc1d277SAaron Tomlin if (mod->taints || 3661cfc1d277SAaron Tomlin mod->state == MODULE_STATE_GOING || 3662cfc1d277SAaron Tomlin mod->state == MODULE_STATE_COMING) { 3663cfc1d277SAaron Tomlin buf[bx++] = '('; 3664c14e522bSAaron Tomlin bx += module_flags_taint(mod->taints, buf + bx); 3665cfc1d277SAaron Tomlin /* Show a - for module-is-being-unloaded */ 366617dd25c2SAaron Tomlin if (mod->state == MODULE_STATE_GOING && show_state) 3667cfc1d277SAaron Tomlin buf[bx++] = '-'; 3668cfc1d277SAaron Tomlin /* Show a + for module-is-being-loaded */ 366917dd25c2SAaron Tomlin if (mod->state == MODULE_STATE_COMING && show_state) 3670cfc1d277SAaron Tomlin buf[bx++] = '+'; 3671cfc1d277SAaron Tomlin buf[bx++] = ')'; 3672cfc1d277SAaron Tomlin } 367317dd25c2SAaron Tomlin out: 3674cfc1d277SAaron Tomlin buf[bx] = '\0'; 3675cfc1d277SAaron Tomlin 3676cfc1d277SAaron Tomlin return buf; 3677cfc1d277SAaron Tomlin } 3678cfc1d277SAaron Tomlin 3679cfc1d277SAaron Tomlin /* Given an address, look for it in the module exception tables. */ 3680cfc1d277SAaron Tomlin const struct exception_table_entry *search_module_extables(unsigned long addr) 3681cfc1d277SAaron Tomlin { 3682cfc1d277SAaron Tomlin const struct exception_table_entry *e = NULL; 3683cfc1d277SAaron Tomlin struct module *mod; 3684cfc1d277SAaron Tomlin 3685cfc1d277SAaron Tomlin preempt_disable(); 3686cfc1d277SAaron Tomlin mod = __module_address(addr); 3687cfc1d277SAaron Tomlin if (!mod) 3688cfc1d277SAaron Tomlin goto out; 3689cfc1d277SAaron Tomlin 3690cfc1d277SAaron Tomlin if (!mod->num_exentries) 3691cfc1d277SAaron Tomlin goto out; 3692cfc1d277SAaron Tomlin 3693cfc1d277SAaron Tomlin e = search_extable(mod->extable, 3694cfc1d277SAaron Tomlin mod->num_exentries, 3695cfc1d277SAaron Tomlin addr); 3696cfc1d277SAaron Tomlin out: 3697cfc1d277SAaron Tomlin preempt_enable(); 3698cfc1d277SAaron Tomlin 3699cfc1d277SAaron Tomlin /* 3700cfc1d277SAaron Tomlin * Now, if we found one, we are running inside it now, hence 3701cfc1d277SAaron Tomlin * we cannot unload the module, hence no refcnt needed. 3702cfc1d277SAaron Tomlin */ 3703cfc1d277SAaron Tomlin return e; 3704cfc1d277SAaron Tomlin } 3705cfc1d277SAaron Tomlin 3706cfc1d277SAaron Tomlin /** 3707cfc1d277SAaron Tomlin * is_module_address() - is this address inside a module? 3708cfc1d277SAaron Tomlin * @addr: the address to check. 3709cfc1d277SAaron Tomlin * 3710cfc1d277SAaron Tomlin * See is_module_text_address() if you simply want to see if the address 3711cfc1d277SAaron Tomlin * is code (not data). 3712cfc1d277SAaron Tomlin */ 3713cfc1d277SAaron Tomlin bool is_module_address(unsigned long addr) 3714cfc1d277SAaron Tomlin { 3715cfc1d277SAaron Tomlin bool ret; 3716cfc1d277SAaron Tomlin 3717cfc1d277SAaron Tomlin preempt_disable(); 3718cfc1d277SAaron Tomlin ret = __module_address(addr) != NULL; 3719cfc1d277SAaron Tomlin preempt_enable(); 3720cfc1d277SAaron Tomlin 3721cfc1d277SAaron Tomlin return ret; 3722cfc1d277SAaron Tomlin } 3723cfc1d277SAaron Tomlin 3724cfc1d277SAaron Tomlin /** 3725cfc1d277SAaron Tomlin * __module_address() - get the module which contains an address. 3726cfc1d277SAaron Tomlin * @addr: the address. 3727cfc1d277SAaron Tomlin * 3728cfc1d277SAaron Tomlin * Must be called with preempt disabled or module mutex held so that 3729cfc1d277SAaron Tomlin * module doesn't get freed during this. 3730cfc1d277SAaron Tomlin */ 3731cfc1d277SAaron Tomlin struct module *__module_address(unsigned long addr) 3732cfc1d277SAaron Tomlin { 3733cfc1d277SAaron Tomlin struct module *mod; 3734cfc1d277SAaron Tomlin 373501dc0386SChristophe Leroy if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max) 3736ac3b4328SSong Liu goto lookup; 3737ac3b4328SSong Liu 373801dc0386SChristophe Leroy #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 3739ac3b4328SSong Liu if (addr >= mod_tree.data_addr_min && addr <= mod_tree.data_addr_max) 3740ac3b4328SSong Liu goto lookup; 374101dc0386SChristophe Leroy #endif 3742ac3b4328SSong Liu 3743cfc1d277SAaron Tomlin return NULL; 3744cfc1d277SAaron Tomlin 3745ac3b4328SSong Liu lookup: 3746cfc1d277SAaron Tomlin module_assert_mutex_or_preempt(); 3747cfc1d277SAaron Tomlin 3748ac3b4328SSong Liu mod = mod_find(addr, &mod_tree); 3749cfc1d277SAaron Tomlin if (mod) { 3750cfc1d277SAaron Tomlin BUG_ON(!within_module(addr, mod)); 3751cfc1d277SAaron Tomlin if (mod->state == MODULE_STATE_UNFORMED) 3752cfc1d277SAaron Tomlin mod = NULL; 3753cfc1d277SAaron Tomlin } 3754cfc1d277SAaron Tomlin return mod; 3755cfc1d277SAaron Tomlin } 3756cfc1d277SAaron Tomlin 3757cfc1d277SAaron Tomlin /** 3758cfc1d277SAaron Tomlin * is_module_text_address() - is this address inside module code? 3759cfc1d277SAaron Tomlin * @addr: the address to check. 3760cfc1d277SAaron Tomlin * 3761cfc1d277SAaron Tomlin * See is_module_address() if you simply want to see if the address is 3762cfc1d277SAaron Tomlin * anywhere in a module. See kernel_text_address() for testing if an 3763cfc1d277SAaron Tomlin * address corresponds to kernel or module code. 3764cfc1d277SAaron Tomlin */ 3765cfc1d277SAaron Tomlin bool is_module_text_address(unsigned long addr) 3766cfc1d277SAaron Tomlin { 3767cfc1d277SAaron Tomlin bool ret; 3768cfc1d277SAaron Tomlin 3769cfc1d277SAaron Tomlin preempt_disable(); 3770cfc1d277SAaron Tomlin ret = __module_text_address(addr) != NULL; 3771cfc1d277SAaron Tomlin preempt_enable(); 3772cfc1d277SAaron Tomlin 3773cfc1d277SAaron Tomlin return ret; 3774cfc1d277SAaron Tomlin } 3775cfc1d277SAaron Tomlin 3776cfc1d277SAaron Tomlin /** 3777cfc1d277SAaron Tomlin * __module_text_address() - get the module whose code contains an address. 3778cfc1d277SAaron Tomlin * @addr: the address. 3779cfc1d277SAaron Tomlin * 3780cfc1d277SAaron Tomlin * Must be called with preempt disabled or module mutex held so that 3781cfc1d277SAaron Tomlin * module doesn't get freed during this. 3782cfc1d277SAaron Tomlin */ 3783cfc1d277SAaron Tomlin struct module *__module_text_address(unsigned long addr) 3784cfc1d277SAaron Tomlin { 3785cfc1d277SAaron Tomlin struct module *mod = __module_address(addr); 3786cfc1d277SAaron Tomlin if (mod) { 3787cfc1d277SAaron Tomlin /* Make sure it's within the text section. */ 3788ac3b4328SSong Liu if (!within_module_mem_type(addr, mod, MOD_TEXT) && 3789ac3b4328SSong Liu !within_module_mem_type(addr, mod, MOD_INIT_TEXT)) 3790cfc1d277SAaron Tomlin mod = NULL; 3791cfc1d277SAaron Tomlin } 3792cfc1d277SAaron Tomlin return mod; 3793cfc1d277SAaron Tomlin } 3794cfc1d277SAaron Tomlin 3795cfc1d277SAaron Tomlin /* Don't grab lock, we're oopsing. */ 3796cfc1d277SAaron Tomlin void print_modules(void) 3797cfc1d277SAaron Tomlin { 3798cfc1d277SAaron Tomlin struct module *mod; 3799cfc1d277SAaron Tomlin char buf[MODULE_FLAGS_BUF_SIZE]; 3800cfc1d277SAaron Tomlin 3801cfc1d277SAaron Tomlin printk(KERN_DEFAULT "Modules linked in:"); 3802cfc1d277SAaron Tomlin /* Most callers should already have preempt disabled, but make sure */ 3803cfc1d277SAaron Tomlin preempt_disable(); 3804cfc1d277SAaron Tomlin list_for_each_entry_rcu(mod, &modules, list) { 3805cfc1d277SAaron Tomlin if (mod->state == MODULE_STATE_UNFORMED) 3806cfc1d277SAaron Tomlin continue; 380717dd25c2SAaron Tomlin pr_cont(" %s%s", mod->name, module_flags(mod, buf, true)); 3808cfc1d277SAaron Tomlin } 380999bd9956SAaron Tomlin 381099bd9956SAaron Tomlin print_unloaded_tainted_modules(); 3811cfc1d277SAaron Tomlin preempt_enable(); 38126f1dae1dSAaron Tomlin if (last_unloaded_module.name[0]) 38136f1dae1dSAaron Tomlin pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name, 38146f1dae1dSAaron Tomlin last_unloaded_module.taints); 3815cfc1d277SAaron Tomlin pr_cont("\n"); 3816cfc1d277SAaron Tomlin } 3817df3e764dSLuis Chamberlain 3818df3e764dSLuis Chamberlain #ifdef CONFIG_MODULE_DEBUGFS 3819df3e764dSLuis Chamberlain struct dentry *mod_debugfs_root; 3820df3e764dSLuis Chamberlain 3821df3e764dSLuis Chamberlain static int module_debugfs_init(void) 3822df3e764dSLuis Chamberlain { 3823df3e764dSLuis Chamberlain mod_debugfs_root = debugfs_create_dir("modules", NULL); 3824df3e764dSLuis Chamberlain return 0; 3825df3e764dSLuis Chamberlain } 3826df3e764dSLuis Chamberlain module_init(module_debugfs_init); 3827df3e764dSLuis Chamberlain #endif 3828