diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/boot/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/boot/compressed/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/signal.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/highmem_32.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/iomap_32.c | 2 |
6 files changed, 6 insertions, 19 deletions
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index fb737ce5888d..6633b6e7505a 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -57,6 +57,7 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE # How to compile the 16-bit code. Note we always compile for -march=i386, # that way we can complain to the user if the CPU is insufficient. KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ + -DDISABLE_BRANCH_PROFILING \ -Wall -Wstrict-prototypes \ -march=i386 -mregparm=3 \ -include $(srctree)/$(src)/code16gcc.h \ @@ -66,7 +67,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ $(call cc-option, -fno-unit-at-a-time)) \ $(call cc-option, -fno-stack-protector) \ $(call cc-option, -mpreferred-stack-boundary=2) -KBUILD_CFLAGS += $(call cc-option,-m32) +KBUILD_CFLAGS += $(call cc-option, -m32) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ $(obj)/bzImage: asflags-y := $(SVGA_MODE) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 3ca4c194b8e5..65551c9f8571 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -8,6 +8,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma h KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC +KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING cflags-$(CONFIG_X86_64) := -mcmodel=small KBUILD_CFLAGS += $(cflags-y) KBUILD_CFLAGS += $(call cc-option,-ffreestanding) diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 400331b50a53..3a97a4cf1872 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -153,7 +153,6 @@ static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) static ssize_t __init setup_pcpu_remap(size_t static_size) { static struct vm_struct vm; - pg_data_t *last; size_t ptrs_size, dyn_size; unsigned int cpu; ssize_t ret; @@ -162,22 +161,9 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) * If large page isn't supported, there's no benefit in doing * this. Also, on non-NUMA, embedding is better. */ - if (!cpu_has_pse || pcpu_need_numa()) + if (!cpu_has_pse || !pcpu_need_numa()) return -EINVAL; - last = NULL; - for_each_possible_cpu(cpu) { - int node = early_cpu_to_node(cpu); - - if (node_online(node) && NODE_DATA(node) && - last && last != NODE_DATA(node)) - goto proceed; - - last = NODE_DATA(node); - } - return -EINVAL; - -proceed: /* * Currently supports only single page. Supporting multiple * pages won't be too difficult if it ever becomes necessary. diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index dfcc74ab0ab6..14425166b8e3 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -221,7 +221,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, if (!onsigstack) { /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { - if (sas_ss_flags(sp) == 0) + if (current->sas_ss_size) sp = current->sas_ss_sp + current->sas_ss_size; } else { #ifdef CONFIG_X86_32 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 5bc5d1688c1c..8126e8d1a2a4 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -40,7 +40,6 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) debug_kmap_atomic(type); - debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index bff0c9032f8c..e331f77348a7 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -39,6 +39,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) pagefault_disable(); + debug_kmap_atomic(type); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); @@ -72,7 +73,6 @@ iounmap_atomic(void *kvaddr, enum km_type type) unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); - debug_kmap_atomic(type); /* * Force other mappings to Oops if they'll try to access this pte * without first remap it. Keeping stale mappings around is a bad idea |