aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/assembler.h
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2022-07-25 10:59:15 +0100
committerWill Deacon <will@kernel.org>2022-07-25 10:59:15 +0100
commitf96d67a8af7a39f7ffaac464d8bccc4c720e52c2 (patch)
treedaee5c1458a5d8ed1bdb58b54de19b232ebd44aa /arch/arm64/include/asm/assembler.h
parent92867739e3439ecc9bfa0a106be515d93f14c735 (diff)
parent1191b6256e50a07e7d8ce36eb970708e42a4be1a (diff)
downloadlinux-f96d67a8af7a39f7ffaac464d8bccc4c720e52c2.tar.gz
Merge branch 'for-next/boot' into for-next/core
* for-next/boot: (34 commits) arm64: fix KASAN_INLINE arm64: Add an override for ID_AA64SMFR0_EL1.FA64 arm64: Add the arm64.nosve command line option arm64: Add the arm64.nosme command line option arm64: Expose a __check_override primitive for oddball features arm64: Allow the idreg override to deal with variable field width arm64: Factor out checking of a feature against the override into a macro arm64: Allow sticky E2H when entering EL1 arm64: Save state of HCR_EL2.E2H before switch to EL1 arm64: Rename the VHE switch to "finalise_el2" arm64: mm: fix booting with 52-bit address space arm64: head: remove __PHYS_OFFSET arm64: lds: use PROVIDE instead of conditional definitions arm64: setup: drop early FDT pointer helpers arm64: head: avoid relocating the kernel twice for KASLR arm64: kaslr: defer initialization to initcall where permitted arm64: head: record CPU boot mode after enabling the MMU arm64: head: populate kernel page tables with MMU and caches on arm64: head: factor out TTBR1 assignment into a macro arm64: idreg-override: use early FDT mapping in ID map ...
Diffstat (limited to 'arch/arm64/include/asm/assembler.h')
-rw-r--r--arch/arm64/include/asm/assembler.h31
1 files changed, 27 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index dc422fa437c2..5846145be523 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -360,6 +360,20 @@ alternative_cb_end
.endm
/*
+ * idmap_get_t0sz - get the T0SZ value needed to cover the ID map
+ *
+ * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
+ * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
+ * this number conveniently equals the number of leading zeroes in
+ * the physical address of _end.
+ */
+ .macro idmap_get_t0sz, reg
+ adrp \reg, _end
+ orr \reg, \reg, #(1 << VA_BITS_MIN) - 1
+ clz \reg, \reg
+ .endm
+
+/*
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
* ID_AA64MMFR0_EL1.PARange value
*
@@ -466,6 +480,18 @@ alternative_endif
.endm
/*
+ * load_ttbr1 - install @pgtbl as a TTBR1 page table
+ * pgtbl preserved
+ * tmp1/tmp2 clobbered, either may overlap with pgtbl
+ */
+ .macro load_ttbr1, pgtbl, tmp1, tmp2
+ phys_to_ttbr \tmp1, \pgtbl
+ offset_ttbr1 \tmp1, \tmp2
+ msr ttbr1_el1, \tmp1
+ isb
+ .endm
+
+/*
* To prevent the possibility of old and new partial table walks being visible
* in the tlb, switch the ttbr to a zero page when we invalidate the old
* records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
@@ -478,10 +504,7 @@ alternative_endif
isb
tlbi vmalle1
dsb nsh
- phys_to_ttbr \tmp, \page_table
- offset_ttbr1 \tmp, \tmp2
- msr ttbr1_el1, \tmp
- isb
+ load_ttbr1 \page_table, \tmp, \tmp2
.endm
/*