From f4117ac9e237b74afdf5e001d5ea26a4d15e9847 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 18:07:14 +0000 Subject: ARM: P2V: separate PHYS_OFFSET from platform definitions This uncouple PHYS_OFFSET from the platform definitions, thereby facilitating run-time computation of the physical memory offset. Acked-by: Nicolas Pitre Acked-by: Viresh Kumar Acked-by: H Hartley Sweeten Acked-by: Magnus Damm Acked-by: Tony Lindgren Acked-by: Jean-Christophe PLAGNIOL-VILLARD Acked-by: Wan ZongShun Acked-by: Kukjin Kim Acked-by: Eric Miao Acked-by: Jiandong Zheng Signed-off-by: Russell King --- arch/arm/kernel/tcm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 26685c2f7a49..f5cf660eefcc 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -15,7 +15,7 @@ #include /* memcpy */ #include #include -#include +#include #include "tcm.h" static struct gen_pool *tcm_pool; -- cgit From b75c178afaa975896e894bb2b6951dc4cd43c977 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 19:03:16 +0000 Subject: ARM: P2V: avoid initializers and assembly using PHYS_OFFSET As PHYS_OFFSET will be becoming a variable, we can't have it used in initializers nor assembly code. Replace those in generic code with a run-time initialization. Replace those in platform code using the individual platform specific PLAT_PHYS_OFFSET. Acked-by: Nicolas Pitre Acked-by: Tony Lindgren Acked-by: Kukjin Kim Acked-by: David Brown Acked-by: Eric Miao Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 78678b07901c..056bf1878f8a 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -740,7 +740,7 @@ static struct init_tags { { tag_size(tag_core), ATAG_CORE }, { 1, PAGE_SIZE, 0xff }, { tag_size(tag_mem32), ATAG_MEM }, - { MEM_SIZE, PHYS_OFFSET }, + { MEM_SIZE }, { 0, ATAG_NONE } }; @@ -839,6 +839,8 @@ void __init setup_arch(char **cmdline_p) struct machine_desc *mdesc; char *from = default_command_line; + init_tags.mem.start = PHYS_OFFSET; + unwind_init(); setup_processor(); -- cgit From 72a20e22f49e2dad3180c23980a9df1c63faab0a Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 19:04:00 +0000 Subject: ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL head.S makes use of PHYS_OFFSET. When it becomes a variable, the assembler won't understand this. Compute PHYS_OFFSET by the following method. This code is linked at its virtual address, but run at before the MMU is enabled, so at his physical address. 1: .long . .long PAGE_OFFSET adr r0, 1b @ r0 = physical ',' ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET sub r1, r0, r1 @ r1 = physical-virtual add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual @ := PHYS_OFFSET. Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't use this method for XIP kernels as the code doesn't execute in RAM. Tested-by: Tony Lindgren Reviewed-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/head.S | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 8a154b940fef..03a588b6e15c 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -26,14 +26,6 @@ #include #endif -#if (PHYS_OFFSET & 0x001fffff) -#error "PHYS_OFFSET must be at an even 2MiB boundary!" -#endif - -#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) -#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET) - - /* * swapper_pg_dir is the virtual address of the initial page table. * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must @@ -41,6 +33,7 @@ * the least significant 16 bits to be 0x8000, but we could probably * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. */ +#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 #error KERNEL_RAM_VADDR must start at 0xXXXX8000 #endif @@ -48,8 +41,8 @@ .globl swapper_pg_dir .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000 - .macro pgtbl, rd - ldr \rd, =(KERNEL_RAM_PADDR - 0x4000) + .macro pgtbl, rd, phys + add \rd, \phys, #TEXT_OFFSET - 0x4000 .endm #ifdef CONFIG_XIP_KERNEL @@ -88,9 +81,18 @@ ENTRY(stext) THUMB( it eq ) @ force fixup-able long branch encoding beq __error_p @ yes, error 'p' +#ifndef CONFIG_XIP_KERNEL + adr r3, 2f + ldmia r3, {r4, r8} + sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) + add r8, r8, r4 @ PHYS_OFFSET +#else + ldr r8, =PLAT_PHYS_OFFSET +#endif + /* * r1 = machine no, r2 = atags, - * r9 = cpuid, r10 = procinfo + * r8 = phys_offset, r9 = cpuid, r10 = procinfo */ bl __vet_atags #ifdef CONFIG_SMP_ON_UP @@ -114,21 +116,24 @@ ENTRY(stext) 1: b __enable_mmu ENDPROC(stext) .ltorg +#ifndef CONFIG_XIP_KERNEL +2: .long . + .long PAGE_OFFSET +#endif /* * Setup the initial page tables. We only setup the barest * amount which are required to get the kernel running, which * generally means mapping in the kernel code. * - * r9 = cpuid - * r10 = procinfo + * r8 = phys_offset, r9 = cpuid, r10 = procinfo * * Returns: * r0, r3, r5-r7 corrupted * r4 = physical page table address */ __create_page_tables: - pgtbl r4 @ page table address + pgtbl r4, r8 @ page table address /* * Clear the 16K level 1 swapper page table @@ -184,10 +189,8 @@ __create_page_tables: /* * Map some ram to cover our .data and .bss areas. */ - orr r3, r7, #(KERNEL_RAM_PADDR & 0xff000000) - .if (KERNEL_RAM_PADDR & 0x00f00000) - orr r3, r3, #(KERNEL_RAM_PADDR & 0x00f00000) - .endif + add r3, r8, #TEXT_OFFSET + orr r3, r3, r7 add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18 str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]! ldr r6, =(_end - 1) @@ -203,10 +206,7 @@ __create_page_tables: * Then map first 1MB of ram in case it contains our boot params. */ add r0, r4, #PAGE_OFFSET >> 18 - orr r6, r7, #(PHYS_OFFSET & 0xff000000) - .if (PHYS_OFFSET & 0x00f00000) - orr r6, r6, #(PHYS_OFFSET & 0x00f00000) - .endif + orr r6, r7, r8 str r6, [r0] #ifdef CONFIG_DEBUG_LL -- cgit From dc21af99fadcfa0ae65b52fd0895f85824f0c288 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 19:09:43 +0000 Subject: ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching This idea came from Nicolas, Eric Miao produced an initial version, which was then rewritten into this. Patch the physical to virtual translations at runtime. As we modify the code, this makes it incompatible with XIP kernels, but allows us to achieve this with minimal loss of performance. As many translations are of the form: physical = virtual + (PHYS_OFFSET - PAGE_OFFSET) virtual = physical - (PHYS_OFFSET - PAGE_OFFSET) we generate an 'add' instruction for __virt_to_phys(), and a 'sub' instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET - PAGE_OFFSET) by comparing the address prior to MMU initialization with where it should be once the MMU has been initialized, and place this constant into the above add/sub instructions. Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for the C-mode PHYS_OFFSET variable definition to use. At present, we are unable to support Realview with Sparsemem enabled as this uses a complex mapping function, and MSM as this requires a constant which will not fit in our math instruction. Add a module version magic string for this feature to prevent incompatible modules being loaded. Tested-by: Tony Lindgren Reviewed-by: Nicolas Pitre Tested-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/armksyms.c | 4 +++ arch/arm/kernel/head.S | 68 +++++++++++++++++++++++++++++++++++++++++++ arch/arm/kernel/module.c | 23 ++++++++++++++- arch/arm/kernel/vmlinux.lds.S | 4 +++ 4 files changed, 98 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index e5e1e5387678..9615423c37dd 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -170,3 +170,7 @@ EXPORT_SYMBOL(mcount); #endif EXPORT_SYMBOL(__gnu_mcount_nc); #endif + +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +EXPORT_SYMBOL(__pv_phys_offset); +#endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 03a588b6e15c..1db8ead2e331 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -97,6 +97,9 @@ ENTRY(stext) bl __vet_atags #ifdef CONFIG_SMP_ON_UP bl __fixup_smp +#endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + bl __fixup_pv_table #endif bl __create_page_tables @@ -438,4 +441,69 @@ smp_on_up: #endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + +/* __fixup_pv_table - patch the stub instructions with the delta between + * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and + * can be expressed by an immediate shifter operand. The stub instruction + * has a form of '(add|sub) rd, rn, #imm'. + */ + __HEAD +__fixup_pv_table: + adr r0, 1f + ldmia r0, {r3-r5, r7} + sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET + add r4, r4, r3 @ adjust table start address + add r5, r5, r3 @ adjust table end address + str r8, [r7, r3]! @ save computed PHYS_OFFSET to __pv_phys_offset + mov r6, r3, lsr #24 @ constant for add/sub instructions + teq r3, r6, lsl #24 @ must be 16MiB aligned + bne __error + str r6, [r7, #4] @ save to __pv_offset + b __fixup_a_pv_table +ENDPROC(__fixup_pv_table) + + .align +1: .long . + .long __pv_table_begin + .long __pv_table_end +2: .long __pv_phys_offset + + .text +__fixup_a_pv_table: + b 3f +2: ldr ip, [r7, r3] + bic ip, ip, #0x000000ff + orr ip, ip, r6 + str ip, [r7, r3] +3: cmp r4, r5 + ldrcc r7, [r4], #4 @ use branch for delay slot + bcc 2b + mov pc, lr +ENDPROC(__fixup_a_pv_table) + +ENTRY(fixup_pv_table) + stmfd sp!, {r4 - r7, lr} + ldr r2, 2f @ get address of __pv_phys_offset + mov r3, #0 @ no offset + mov r4, r0 @ r0 = table start + add r5, r0, r1 @ r1 = table size + ldr r6, [r2, #4] @ get __pv_offset + bl __fixup_a_pv_table + ldmfd sp!, {r4 - r7, pc} +ENDPROC(fixup_pv_table) + + .align +2: .long __pv_phys_offset + + .data + .globl __pv_phys_offset + .type __pv_phys_offset, %object +__pv_phys_offset: + .long 0 + .size __pv_phys_offset, . - __pv_phys_offset +__pv_offset: + .long 0 +#endif + #include "head-common.S" diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 2cfe8161b478..c5679f6d9f64 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -268,12 +268,28 @@ struct mod_unwind_map { const Elf_Shdr *txt_sec; }; +static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, + const Elf_Shdr *sechdrs, const char *name) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + + return NULL; +} + +extern void fixup_pv_table(const void *, unsigned long); + int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { + const Elf_Shdr *s = NULL; #ifdef CONFIG_ARM_UNWIND const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; + const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; struct mod_unwind_map maps[ARM_SEC_MAX]; int i; @@ -314,6 +330,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, maps[i].unw_sec->sh_size, maps[i].txt_sec->sh_addr, maps[i].txt_sec->sh_size); +#endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + s = find_mod_section(hdr, sechdrs, ".pv_table"); + if (s) + fixup_pv_table((void *)s->sh_addr, s->sh_size); #endif return 0; } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3f2031..45b5651777ee 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -57,6 +57,10 @@ SECTIONS __smpalt_end = .; #endif + __pv_table_begin = .; + *(.pv_table) + __pv_table_end = .; + INIT_SETUP(16) INIT_CALLS -- cgit From cada3c0841e1deaec4c0f92654610b028dc683ff Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 19:39:29 +0000 Subject: ARM: P2V: extend to 16-bit translation offsets MSM's memory is aligned to 2MB, which is more than we can do with our existing method as we're limited to the upper 8 bits. Extend this by using two instructions to 16 bits, automatically selected when MSM is enabled. Acked-by: Tony Lindgren Reviewed-by: Nicolas Pitre Tested-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/head.S | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 1db8ead2e331..a94dd99d54c3 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -456,8 +456,13 @@ __fixup_pv_table: add r4, r4, r3 @ adjust table start address add r5, r5, r3 @ adjust table end address str r8, [r7, r3]! @ save computed PHYS_OFFSET to __pv_phys_offset +#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT mov r6, r3, lsr #24 @ constant for add/sub instructions teq r3, r6, lsl #24 @ must be 16MiB aligned +#else + mov r6, r3, lsr #16 @ constant for add/sub instructions + teq r3, r6, lsl #16 @ must be 64kiB aligned +#endif bne __error str r6, [r7, #4] @ save to __pv_offset b __fixup_a_pv_table @@ -471,10 +476,18 @@ ENDPROC(__fixup_pv_table) .text __fixup_a_pv_table: +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + and r0, r6, #255 @ offset bits 23-16 + mov r6, r6, lsr #8 @ offset bits 31-24 +#else + mov r0, #0 @ just in case... +#endif b 3f 2: ldr ip, [r7, r3] bic ip, ip, #0x000000ff - orr ip, ip, r6 + tst ip, #0x400 @ rotate shift tells us LS or MS byte + orrne ip, ip, r6 @ mask in offset bits 31-24 + orreq ip, ip, r0 @ mask in offset bits 23-16 str ip, [r7, r3] 3: cmp r4, r5 ldrcc r7, [r4], #4 @ use branch for delay slot -- cgit From 4d901c4271951d110afb13ee9aa73d27a6c8e53d Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 2 Feb 2011 16:33:17 +0100 Subject: ARM: 6648/1: map ATAGs when not in first 1MB of RAM If ATAGs or DTB pointer is not within first 1MB of RAM, then the boot params will not be mapped early enough, so map the 1MB region that r2 points to. Only map the first 1MB when r2 is 0. Some assembly improvements from Nicolas Pitre. Acked-by: Tony Lindgren Acked-by: Nicolas Pitre Signed-off-by: Rob Herring Signed-off-by: Russell King --- arch/arm/kernel/head.S | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index a94dd99d54c3..591a2ead8cef 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -206,11 +206,17 @@ __create_page_tables: #endif /* - * Then map first 1MB of ram in case it contains our boot params. + * Then map boot params address in r2 or + * the first 1MB of ram if boot params address is not specified. */ - add r0, r4, #PAGE_OFFSET >> 18 - orr r6, r7, r8 - str r6, [r0] + mov r0, r2, lsr #20 + movs r0, r0, lsl #20 + moveq r0, r8 + sub r3, r0, r8 + add r3, r3, #PAGE_OFFSET + add r3, r4, r3, lsr #18 + orr r6, r7, r0 + str r6, [r3] #ifdef CONFIG_DEBUG_LL #ifndef CONFIG_DEBUG_ICEDCC -- cgit From dce72dd08c976c9e5e1367bf994b306b15ae87fe Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 21 Feb 2011 07:00:32 +0100 Subject: ARM: 6749/1: fold lookup_machine_type() into setup_machine() Since commit 6fc31d54 there is no callers for lookup_machine_type() other than setup_machine(). And if the former fails it won't return, therefore the error path in the later is dead code. Let's clean things up by merging them together. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 48 +++++++++++++++++------------------------------- 1 file changed, 17 insertions(+), 31 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 056bf1878f8a..6ce80106316e 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -325,28 +325,6 @@ static void __init early_print(const char *str, ...) printk("%s", buf); } -static struct machine_desc * __init lookup_machine_type(unsigned int type) -{ - extern struct machine_desc __arch_info_begin[], __arch_info_end[]; - struct machine_desc *p; - - for (p = __arch_info_begin; p < __arch_info_end; p++) - if (type == p->nr) - return p; - - early_print("\n" - "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n" - "Available machine support:\n\nID (hex)\tNAME\n", type); - - for (p = __arch_info_begin; p < __arch_info_end; p++) - early_print("%08x\t%s\n", p->nr, p->name); - - early_print("\nPlease check your kernel config and/or bootloader.\n"); - - while (true) - /* can't use cpu_relax() here as it may require MMU setup */; -} - static void __init feat_v6_fixup(void) { int id = read_cpuid_id(); @@ -463,21 +441,29 @@ void cpu_init(void) static struct machine_desc * __init setup_machine(unsigned int nr) { - struct machine_desc *list; + extern struct machine_desc __arch_info_begin[], __arch_info_end[]; + struct machine_desc *p; /* * locate machine in the list of supported machines. */ - list = lookup_machine_type(nr); - if (!list) { - printk("Machine configuration botched (nr %d), unable " - "to continue.\n", nr); - while (1); - } + for (p = __arch_info_begin; p < __arch_info_end; p++) + if (nr == p->nr) { + printk("Machine: %s\n", p->name); + return p; + } - printk("Machine: %s\n", list->name); + early_print("\n" + "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n" + "Available machine support:\n\nID (hex)\tNAME\n", nr); + + for (p = __arch_info_begin; p < __arch_info_end; p++) + early_print("%08x\t%s\n", p->nr, p->name); - return list; + early_print("\nPlease check your kernel config and/or bootloader.\n"); + + while (true) + /* can't use cpu_relax() here as it may require MMU setup */; } static int __init arm_add_memory(unsigned long start, unsigned long size) -- cgit From 3572bea8cbc57f0bef1e0f4580c01717df7026d8 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 21 Feb 2011 06:57:33 +0100 Subject: ARM: 6748/1: ignore mdesc->boot_params if out of range The initial MMU table created in head.S contains a 1 MB mapping at the start of memory to let the early kernel boot code access the boot params specified by mdesc->boot_params. When using CONFIG_ARM_PATCH_PHYS_VIRT it is possible for the kernel to have a different idea of where the start of memory is at run time, making the compile-time determined mdesc->boot_params pointing to a memory area which is not mapped. Any access to the boot params in that case will fault and silently hang the kernel at that point. It is therefore a better idea to simply ignore mdesc->boot_params in that case and give the kernel a chance to print some diagnostic on the console later. If the bootloader provides a valid pointer in r2 to the kernel then this is used instead of mdesc->boot_params, and an explicit mapping is already created in the initial MMU table for it. It is therefore a good idea to use that facility when using a relocated kernel. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 6ce80106316e..f43f041a0977 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -839,8 +839,25 @@ void __init setup_arch(char **cmdline_p) if (__atags_pointer) tags = phys_to_virt(__atags_pointer); - else if (mdesc->boot_params) - tags = phys_to_virt(mdesc->boot_params); + else if (mdesc->boot_params) { +#ifdef CONFIG_MMU + /* + * We still are executing with a minimal MMU mapping created + * with the presumption that the machine default for this + * is located in the first MB of RAM. Anything else will + * fault and silently hang the kernel at this point. + */ + if (mdesc->boot_params < PHYS_OFFSET || + mdesc->boot_params >= PHYS_OFFSET + SZ_1M) { + printk(KERN_WARNING + "Default boot params at physical 0x%08lx out of reach\n", + mdesc->boot_params); + } else +#endif + { + tags = phys_to_virt(mdesc->boot_params); + } + } #if defined(CONFIG_DEPRECATED_PARAM_STRUCT) /* -- cgit From b511d75d6150892e67c8ebfa9dc8eb37ebd02aa3 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 21 Feb 2011 06:53:35 +0100 Subject: ARM: 6747/1: P2V: Thumb2 support Adding Thumb2 support to the runtime patching of the virt_to_phys and phys_to_virt opcodes. Tested both the 8-bit and the 16-bit fixups, using different placements in memory to exercize all code paths. Signed-off-by: Nicolas Pitre Reviewed-by: Dave Martin Signed-off-by: Russell King --- arch/arm/kernel/head.S | 49 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 591a2ead8cef..6a87261e1b1e 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -461,7 +461,8 @@ __fixup_pv_table: sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET add r4, r4, r3 @ adjust table start address add r5, r5, r3 @ adjust table end address - str r8, [r7, r3]! @ save computed PHYS_OFFSET to __pv_phys_offset + add r7, r7, r3 @ adjust __pv_phys_offset address + str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset #ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT mov r6, r3, lsr #24 @ constant for add/sub instructions teq r3, r6, lsl #24 @ must be 16MiB aligned @@ -469,6 +470,7 @@ __fixup_pv_table: mov r6, r3, lsr #16 @ constant for add/sub instructions teq r3, r6, lsl #16 @ must be 64kiB aligned #endif +THUMB( it ne @ cross section branch ) bne __error str r6, [r7, #4] @ save to __pv_offset b __fixup_a_pv_table @@ -482,6 +484,50 @@ ENDPROC(__fixup_pv_table) .text __fixup_a_pv_table: +#ifdef CONFIG_THUMB2_KERNEL +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + lsls r0, r6, #24 + lsr r6, #8 + beq 1f + clz r7, r0 + lsr r0, #24 + lsl r0, r7 + bic r0, 0x0080 + lsrs r7, #1 + orrcs r0, #0x0080 + orr r0, r0, r7, lsl #12 +#endif +1: lsls r6, #24 + beq 4f + clz r7, r6 + lsr r6, #24 + lsl r6, r7 + bic r6, #0x0080 + lsrs r7, #1 + orrcs r6, #0x0080 + orr r6, r6, r7, lsl #12 + orr r6, #0x4000 + b 4f +2: @ at this point the C flag is always clear + add r7, r3 +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + ldrh ip, [r7] + tst ip, 0x0400 @ the i bit tells us LS or MS byte + beq 3f + cmp r0, #0 @ set C flag, and ... + biceq ip, 0x0400 @ immediate zero value has a special encoding + streqh ip, [r7] @ that requires the i bit cleared +#endif +3: ldrh ip, [r7, #2] + and ip, 0x8f00 + orrcc ip, r6 @ mask in offset bits 31-24 + orrcs ip, r0 @ mask in offset bits 23-16 + strh ip, [r7, #2] +4: cmp r4, r5 + ldrcc r7, [r4], #4 @ use branch for delay slot + bcc 2b + bx lr +#else #ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT and r0, r6, #255 @ offset bits 23-16 mov r6, r6, lsr #8 @ offset bits 31-24 @@ -499,6 +545,7 @@ __fixup_a_pv_table: ldrcc r7, [r4], #4 @ use branch for delay slot bcc 2b mov pc, lr +#endif ENDPROC(__fixup_a_pv_table) ENTRY(fixup_pv_table) -- cgit