diff options
Diffstat (limited to 'init')
-rw-r--r-- | init/Kconfig | 178 | ||||
-rw-r--r-- | init/do_mounts_initrd.c | 22 | ||||
-rw-r--r-- | init/initramfs.c | 76 | ||||
-rw-r--r-- | init/main.c | 13 |
4 files changed, 106 insertions, 183 deletions
diff --git a/init/Kconfig b/init/Kconfig index ddcbefe535e9..7efe7c56893f 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -77,6 +77,11 @@ config CC_HAS_ASM_GOTO_OUTPUT depends on CC_HAS_ASM_GOTO def_bool $(success,echo 'int foo(int x) { asm goto ("": "=r"(x) ::: bar); return x; bar: return 0; }' | $(CC) -x c - -c -o /dev/null) +config CC_HAS_ASM_GOTO_TIED_OUTPUT + depends on CC_HAS_ASM_GOTO_OUTPUT + # Detect buggy gcc and clang, fixed in gcc-11 clang-14. + def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .\n": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null) + config TOOLS_SUPPORT_RELR def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh) @@ -352,23 +357,6 @@ config DEFAULT_HOSTNAME but you may wish to use a different default here to make a minimal system more usable with less configuration. -# -# For some reason microblaze and nios2 hard code SWAP=n. Hopefully we can -# add proper SWAP support to them, in which case this can be remove. -# -config ARCH_NO_SWAP - bool - -config SWAP - bool "Support for paging of anonymous memory (swap)" - depends on MMU && BLOCK && !ARCH_NO_SWAP - default y - help - This option allows you to choose whether you want to have support - for so called swap devices or swap files in your kernel that are - used to provide more virtual memory than the actual RAM present - in your computer. If unsure say Y. - config SYSVIPC bool "System V IPC" help @@ -435,8 +423,8 @@ config CROSS_MEMORY_ATTACH See the man page for more details. config USELIB - bool "uselib syscall" - def_bool ALPHA || M68K || SPARC || X86_32 || IA32_EMULATION + bool "uselib syscall (for libc5 and earlier)" + default ALPHA || M68K || SPARC help This option enables the uselib syscall, a system call used in the dynamic linker from libc5 and earlier. glibc does not use this @@ -715,8 +703,7 @@ config IKHEADERS config LOG_BUF_SHIFT int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" - range 12 25 if !H8300 - range 12 19 if H8300 + range 12 25 default 17 depends on PRINTK help @@ -1361,6 +1348,16 @@ config BOOT_CONFIG If unsure, say Y. +config INITRAMFS_PRESERVE_MTIME + bool "Preserve cpio archive mtimes in initramfs" + default y + help + Each entry in an initramfs cpio archive carries an mtime value. When + enabled, extracted cpio items take this mtime, with directory mtime + setting deferred until after creation of any child entries. + + If unsure, say Y. + choice prompt "Compiler optimization level" default CC_OPTIMIZE_FOR_PERFORMANCE @@ -1667,16 +1664,6 @@ config ADVISE_SYSCALLS applications use these syscalls, you can disable this option to save space. -config HAVE_ARCH_USERFAULTFD_WP - bool - help - Arch has userfaultfd write protection support - -config HAVE_ARCH_USERFAULTFD_MINOR - bool - help - Arch has userfaultfd minor fault support - config MEMBARRIER bool "Enable membarrier() system call" if EXPERT default y @@ -1741,13 +1728,6 @@ config KALLSYMS_BASE_RELATIVE # syscall, maps, verifier -config USERFAULTFD - bool "Enable userfaultfd() system call" - depends on MMU - help - Enable the userfaultfd() system call that allows to intercept and - handle page faults in userland. - config ARCH_HAS_MEMBARRIER_CALLBACKS bool @@ -1875,6 +1855,7 @@ config SLUB_DEBUG default y bool "Enable SLUB debugging support" if EXPERT depends on SLUB && SYSFS + select STACKDEPOT if STACKTRACE_SUPPORT help SLUB has extensive debug support features. Disabling these can result in significant savings in code size. This also disables @@ -1893,112 +1874,6 @@ config COMPAT_BRK On non-ancient distros (post-2000 ones) N is usually a safe choice. -choice - prompt "Choose SLAB allocator" - default SLUB - help - This option allows to select a slab allocator. - -config SLAB - bool "SLAB" - depends on !PREEMPT_RT - select HAVE_HARDENED_USERCOPY_ALLOCATOR - help - The regular slab allocator that is established and known to work - well in all environments. It organizes cache hot objects in - per cpu and per node queues. - -config SLUB - bool "SLUB (Unqueued Allocator)" - select HAVE_HARDENED_USERCOPY_ALLOCATOR - help - SLUB is a slab allocator that minimizes cache line usage - instead of managing queues of cached objects (SLAB approach). - Per cpu caching is realized using slabs of objects instead - of queues of objects. SLUB can use memory efficiently - and has enhanced diagnostics. SLUB is the default choice for - a slab allocator. - -config SLOB - depends on EXPERT - bool "SLOB (Simple Allocator)" - depends on !PREEMPT_RT - help - SLOB replaces the stock allocator with a drastically simpler - allocator. SLOB is generally more space efficient but - does not perform as well on large systems. - -endchoice - -config SLAB_MERGE_DEFAULT - bool "Allow slab caches to be merged" - default y - depends on SLAB || SLUB - help - For reduced kernel memory fragmentation, slab caches can be - merged when they share the same size and other characteristics. - This carries a risk of kernel heap overflows being able to - overwrite objects from merged caches (and more easily control - cache layout), which makes such heap attacks easier to exploit - by attackers. By keeping caches unmerged, these kinds of exploits - can usually only damage objects in the same cache. To disable - merging at runtime, "slab_nomerge" can be passed on the kernel - command line. - -config SLAB_FREELIST_RANDOM - bool "Randomize slab freelist" - depends on SLAB || SLUB - help - Randomizes the freelist order used on creating new pages. This - security feature reduces the predictability of the kernel slab - allocator against heap overflows. - -config SLAB_FREELIST_HARDENED - bool "Harden slab freelist metadata" - depends on SLAB || SLUB - help - Many kernel heap attacks try to target slab cache metadata and - other infrastructure. This options makes minor performance - sacrifices to harden the kernel slab allocator against common - freelist exploit methods. Some slab implementations have more - sanity-checking than others. This option is most effective with - CONFIG_SLUB. - -config SHUFFLE_PAGE_ALLOCATOR - bool "Page allocator randomization" - default SLAB_FREELIST_RANDOM && ACPI_NUMA - help - Randomization of the page allocator improves the average - utilization of a direct-mapped memory-side-cache. See section - 5.2.27 Heterogeneous Memory Attribute Table (HMAT) in the ACPI - 6.2a specification for an example of how a platform advertises - the presence of a memory-side-cache. There are also incidental - security benefits as it reduces the predictability of page - allocations to compliment SLAB_FREELIST_RANDOM, but the - default granularity of shuffling on the "MAX_ORDER - 1" i.e, - 10th order of pages is selected based on cache utilization - benefits on x86. - - While the randomization improves cache utilization it may - negatively impact workloads on platforms without a cache. For - this reason, by default, the randomization is enabled only - after runtime detection of a direct-mapped memory-side-cache. - Otherwise, the randomization may be force enabled with the - 'page_alloc.shuffle' kernel command line parameter. - - Say Y if unsure. - -config SLUB_CPU_PARTIAL - default y - depends on SLUB && SMP - bool "SLUB per cpu partial cache" - help - Per cpu partial caches accelerate objects allocation and freeing - that is local to a processor at the price of more indeterminism - in the latency of the free. On overflow these caches will be cleared - which requires the taking of locks that may cause latency spikes. - Typically one would choose no for a realtime system. - config MMAP_ALLOW_UNINITIALIZED bool "Allow mmapped anonymous memory to be uninitialized" depends on EXPERT && !MMU @@ -2118,6 +1993,17 @@ config MODULE_FORCE_UNLOAD rmmod). This is mainly for kernel developers and desperate users. If unsure, say N. +config MODULE_UNLOAD_TAINT_TRACKING + bool "Tainted module unload tracking" + depends on MODULE_UNLOAD + default n + help + This option allows you to maintain a record of each unloaded + module that tainted the kernel. In addition to displaying a + list of linked (or loaded) modules e.g. on detection of a bad + page (see bad_page()), the aforementioned details are also + shown. If unsure, say N. + config MODVERSIONS bool "Module versioning support" help @@ -2136,10 +2022,6 @@ config ASM_MODVERSIONS assembly. This can be enabled only when the target architecture supports it. -config MODULE_REL_CRCS - bool - depends on MODVERSIONS - config MODULE_SRCVERSION_ALL bool "Source checksum for all modules" help diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 533d81ed74d4..327962ea354c 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -14,12 +14,32 @@ unsigned long initrd_start, initrd_end; int initrd_below_start_ok; -unsigned int real_root_dev; /* do_proc_dointvec cannot handle kdev_t */ +static unsigned int real_root_dev; /* do_proc_dointvec cannot handle kdev_t */ static int __initdata mount_initrd = 1; phys_addr_t phys_initrd_start __initdata; unsigned long phys_initrd_size __initdata; +#ifdef CONFIG_SYSCTL +static struct ctl_table kern_do_mounts_initrd_table[] = { + { + .procname = "real-root-dev", + .data = &real_root_dev, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { } +}; + +static __init int kernel_do_mounts_initrd_sysctls_init(void) +{ + register_sysctl_init("kernel", kern_do_mounts_initrd_table); + return 0; +} +late_initcall(kernel_do_mounts_initrd_sysctls_init); +#endif /* CONFIG_SYSCTL */ + static int __init no_initrd(char *str) { mount_initrd = 0; diff --git a/init/initramfs.c b/init/initramfs.c index 2f3d96dc3db6..dc84cf756cea 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -17,8 +17,11 @@ #include <linux/init_syscalls.h> #include <linux/umh.h> -static ssize_t __init xwrite(struct file *file, const char *p, size_t count, - loff_t *pos) +static __initdata bool csum_present; +static __initdata u32 io_csum; + +static ssize_t __init xwrite(struct file *file, const unsigned char *p, + size_t count, loff_t *pos) { ssize_t out = 0; @@ -33,6 +36,13 @@ static ssize_t __init xwrite(struct file *file, const char *p, size_t count, } else if (rv == 0) break; + if (csum_present) { + ssize_t i; + + for (i = 0; i < rv; i++) + io_csum += p[i]; + } + p += rv; out += rv; count -= rv; @@ -116,31 +126,36 @@ static void __init free_hash(void) } } -static long __init do_utime(char *filename, time64_t mtime) +#ifdef CONFIG_INITRAMFS_PRESERVE_MTIME +static void __init do_utime(char *filename, time64_t mtime) { - struct timespec64 t[2]; + struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } }; + init_utimes(filename, t); +} - t[0].tv_sec = mtime; - t[0].tv_nsec = 0; - t[1].tv_sec = mtime; - t[1].tv_nsec = 0; - return init_utimes(filename, t); +static void __init do_utime_path(const struct path *path, time64_t mtime) +{ + struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } }; + vfs_utimes(path, t); } static __initdata LIST_HEAD(dir_list); struct dir_entry { struct list_head list; - char *name; time64_t mtime; + char name[]; }; static void __init dir_add(const char *name, time64_t mtime) { - struct dir_entry *de = kmalloc(sizeof(struct dir_entry), GFP_KERNEL); + size_t nlen = strlen(name) + 1; + struct dir_entry *de; + + de = kmalloc(sizeof(struct dir_entry) + nlen, GFP_KERNEL); if (!de) panic_show_mem("can't allocate dir_entry buffer"); INIT_LIST_HEAD(&de->list); - de->name = kstrdup(name, GFP_KERNEL); + strscpy(de->name, name, nlen); de->mtime = mtime; list_add(&de->list, &dir_list); } @@ -151,10 +166,15 @@ static void __init dir_utime(void) list_for_each_entry_safe(de, tmp, &dir_list, list) { list_del(&de->list); do_utime(de->name, de->mtime); - kfree(de->name); kfree(de); } } +#else +static void __init do_utime(char *filename, time64_t mtime) {} +static void __init do_utime_path(const struct path *path, time64_t mtime) {} +static void __init dir_add(const char *name, time64_t mtime) {} +static void __init dir_utime(void) {} +#endif static __initdata time64_t mtime; @@ -166,15 +186,16 @@ static __initdata unsigned long body_len, name_len; static __initdata uid_t uid; static __initdata gid_t gid; static __initdata unsigned rdev; +static __initdata u32 hdr_csum; static void __init parse_header(char *s) { - unsigned long parsed[12]; + unsigned long parsed[13]; char buf[9]; int i; buf[8] = '\0'; - for (i = 0, s += 6; i < 12; i++, s += 8) { + for (i = 0, s += 6; i < 13; i++, s += 8) { memcpy(buf, s, 8); parsed[i] = simple_strtoul(buf, NULL, 16); } @@ -189,6 +210,7 @@ static void __init parse_header(char *s) minor = parsed[8]; rdev = new_encode_dev(MKDEV(parsed[9], parsed[10])); name_len = parsed[11]; + hdr_csum = parsed[12]; } /* FSM */ @@ -257,12 +279,15 @@ static int __init do_collect(void) static int __init do_header(void) { - if (memcmp(collected, "070707", 6)==0) { - error("incorrect cpio method used: use -H newc option"); - return 1; - } - if (memcmp(collected, "070701", 6)) { - error("no cpio magic"); + if (!memcmp(collected, "070701", 6)) { + csum_present = false; + } else if (!memcmp(collected, "070702", 6)) { + csum_present = true; + } else { + if (memcmp(collected, "070707", 6) == 0) + error("incorrect cpio method used: use -H newc option"); + else + error("no cpio magic"); return 1; } parse_header(collected); @@ -353,6 +378,7 @@ static int __init do_name(void) if (IS_ERR(wfile)) return 0; wfile_pos = 0; + io_csum = 0; vfs_fchown(wfile, uid, gid); vfs_fchmod(wfile, mode); @@ -380,15 +406,13 @@ static int __init do_name(void) static int __init do_copy(void) { if (byte_count >= body_len) { - struct timespec64 t[2] = { }; if (xwrite(wfile, victim, body_len, &wfile_pos) != body_len) error("write error"); - t[0].tv_sec = mtime; - t[1].tv_sec = mtime; - vfs_utimes(&wfile->f_path, t); - + do_utime_path(&wfile->f_path, mtime); fput(wfile); + if (csum_present && io_csum != hdr_csum) + error("bad data checksum"); eat(body_len); state = SkipIt; return 0; diff --git a/init/main.c b/init/main.c index 98182c3c2c4b..f057c49f1d9d 100644 --- a/init/main.c +++ b/init/main.c @@ -1035,21 +1035,18 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) softirq_init(); timekeeping_init(); kfence_init(); + time_init(); /* * For best initial stack canary entropy, prepare it after: * - setup_arch() for any UEFI RNG entropy and boot cmdline access - * - timekeeping_init() for ktime entropy used in rand_initialize() - * - rand_initialize() to get any arch-specific entropy like RDRAND - * - add_latent_entropy() to get any latent entropy - * - adding command line entropy + * - timekeeping_init() for ktime entropy used in random_init() + * - time_init() for making random_get_entropy() work on some platforms + * - random_init() to initialize the RNG from from early entropy sources */ - rand_initialize(); - add_latent_entropy(); - add_device_randomness(command_line, strlen(command_line)); + random_init(command_line); boot_init_stack_canary(); - time_init(); perf_event_init(); profile_init(); call_function_init(); |