diff options
Diffstat (limited to 'arch/powerpc/kernel')
37 files changed, 1608 insertions, 786 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index f4c2b52e58b3..811f441a125f 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -15,7 +15,7 @@ CFLAGS_btext.o += -fPIC endif CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) +CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) @@ -96,6 +96,7 @@ obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_BOOTX_TEXT) += btext.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_OPTPROBES) += optprobes.o optprobes_head.o obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o obj-$(CONFIG_STACKTRACE) += stacktrace.o diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 8d58c61908f7..cbc7c42cdb74 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -204,7 +204,7 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr) int i, size; #ifdef __powerpc64__ - size = ppc64_caches.dline_size; + size = ppc64_caches.l1d.block_size; #else size = L1_CACHE_BYTES; #endif diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 195a9fc8f81c..4367e7df51a1 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -72,205 +72,190 @@ #include <asm/fixmap.h> #endif +#define STACK_PT_REGS_OFFSET(sym, val) \ + DEFINE(sym, STACK_FRAME_OVERHEAD + offsetof(struct pt_regs, val)) + int main(void) { - DEFINE(THREAD, offsetof(struct task_struct, thread)); - DEFINE(MM, offsetof(struct task_struct, mm)); - DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); + OFFSET(THREAD, task_struct, thread); + OFFSET(MM, task_struct, mm); + OFFSET(MMCONTEXTID, mm_struct, context.id); #ifdef CONFIG_PPC64 DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); - DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr)); + OFFSET(TASKTHREADPPR, task_struct, thread.ppr); #else - DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); + OFFSET(THREAD_INFO, task_struct, stack); DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16)); - DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); + OFFSET(KSP_LIMIT, thread_struct, ksp_limit); #endif /* CONFIG_PPC64 */ #ifdef CONFIG_LIVEPATCH - DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); + OFFSET(TI_livepatch_sp, thread_info, livepatch_sp); #endif - DEFINE(KSP, offsetof(struct thread_struct, ksp)); - DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); + OFFSET(KSP, thread_struct, ksp); + OFFSET(PT_REGS, thread_struct, regs); #ifdef CONFIG_BOOKE - DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); + OFFSET(THREAD_NORMSAVES, thread_struct, normsave[0]); #endif - DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); - DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state)); - DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area)); - DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr)); - DEFINE(THREAD_LOAD_FP, offsetof(struct thread_struct, load_fp)); + OFFSET(THREAD_FPEXC_MODE, thread_struct, fpexc_mode); + OFFSET(THREAD_FPSTATE, thread_struct, fp_state); + OFFSET(THREAD_FPSAVEAREA, thread_struct, fp_save_area); + OFFSET(FPSTATE_FPSCR, thread_fp_state, fpscr); + OFFSET(THREAD_LOAD_FP, thread_struct, load_fp); #ifdef CONFIG_ALTIVEC - DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state)); - DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area)); - DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); - DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); - DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr)); - DEFINE(THREAD_LOAD_VEC, offsetof(struct thread_struct, load_vec)); + OFFSET(THREAD_VRSTATE, thread_struct, vr_state); + OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area); + OFFSET(THREAD_VRSAVE, thread_struct, vrsave); + OFFSET(THREAD_USED_VR, thread_struct, used_vr); + OFFSET(VRSTATE_VSCR, thread_vr_state, vscr); + OFFSET(THREAD_LOAD_VEC, thread_struct, load_vec); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX - DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr)); + OFFSET(THREAD_USED_VSR, thread_struct, used_vsr); #endif /* CONFIG_VSX */ #ifdef CONFIG_PPC64 - DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid)); + OFFSET(KSP_VSID, thread_struct, ksp_vsid); #else /* CONFIG_PPC64 */ - DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); + OFFSET(PGDIR, thread_struct, pgdir); #ifdef CONFIG_SPE - DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); - DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); - DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr)); - DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); + OFFSET(THREAD_EVR0, thread_struct, evr[0]); + OFFSET(THREAD_ACC, thread_struct, acc); + OFFSET(THREAD_SPEFSCR, thread_struct, spefscr); + OFFSET(THREAD_USED_SPE, thread_struct, used_spe); #endif /* CONFIG_SPE */ #endif /* CONFIG_PPC64 */ #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, debug.dbcr0)); + OFFSET(THREAD_DBCR0, thread_struct, debug.dbcr0); #endif #ifdef CONFIG_KVM_BOOK3S_32_HANDLER - DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); + OFFSET(THREAD_KVM_SVCPU, thread_struct, kvm_shadow_vcpu); #endif #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE) - DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu)); + OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu); #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM - DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch)); - DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); - DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); - DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); - DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar)); - DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr)); - DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr)); - DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); - DEFINE(THREAD_CKVRSTATE, offsetof(struct thread_struct, - ckvr_state)); - DEFINE(THREAD_CKVRSAVE, offsetof(struct thread_struct, - ckvrsave)); - DEFINE(THREAD_CKFPSTATE, offsetof(struct thread_struct, - ckfp_state)); + OFFSET(PACATMSCRATCH, paca_struct, tm_scratch); + OFFSET(THREAD_TM_TFHAR, thread_struct, tm_tfhar); + OFFSET(THREAD_TM_TEXASR, thread_struct, tm_texasr); + OFFSET(THREAD_TM_TFIAR, thread_struct, tm_tfiar); + OFFSET(THREAD_TM_TAR, thread_struct, tm_tar); + OFFSET(THREAD_TM_PPR, thread_struct, tm_ppr); + OFFSET(THREAD_TM_DSCR, thread_struct, tm_dscr); + OFFSET(PT_CKPT_REGS, thread_struct, ckpt_regs); + OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state); + OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave); + OFFSET(THREAD_CKFPSTATE, thread_struct, ckfp_state); /* Local pt_regs on stack for Transactional Memory funcs. */ DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ - DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); - DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); - DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); - DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); + OFFSET(TI_FLAGS, thread_info, flags); + OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags); + OFFSET(TI_PREEMPT, thread_info, preempt_count); + OFFSET(TI_TASK, thread_info, task); + OFFSET(TI_CPU, thread_info, cpu); #ifdef CONFIG_PPC64 - DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size)); - DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size)); - DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page)); - DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size)); - DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); - DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); + OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size); + OFFSET(DCACHEL1LOGBLOCKSIZE, ppc64_caches, l1d.log_block_size); + OFFSET(DCACHEL1BLOCKSPERPAGE, ppc64_caches, l1d.blocks_per_page); + OFFSET(ICACHEL1BLOCKSIZE, ppc64_caches, l1i.block_size); + OFFSET(ICACHEL1LOGBLOCKSIZE, ppc64_caches, l1i.log_block_size); + OFFSET(ICACHEL1BLOCKSPERPAGE, ppc64_caches, l1i.blocks_per_page); /* paca */ DEFINE(PACA_SIZE, sizeof(struct paca_struct)); - DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); - DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); - DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); - DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); - DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); - DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); - DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); - DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); - DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); - DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); - DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); - DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened)); + OFFSET(PACAPACAINDEX, paca_struct, paca_index); + OFFSET(PACAPROCSTART, paca_struct, cpu_start); + OFFSET(PACAKSAVE, paca_struct, kstack); + OFFSET(PACACURRENT, paca_struct, __current); + OFFSET(PACASAVEDMSR, paca_struct, saved_msr); + OFFSET(PACASTABRR, paca_struct, stab_rr); + OFFSET(PACAR1, paca_struct, saved_r1); + OFFSET(PACATOC, paca_struct, kernel_toc); + OFFSET(PACAKBASE, paca_struct, kernelbase); + OFFSET(PACAKMSR, paca_struct, kernel_msr); + OFFSET(PACASOFTIRQEN, paca_struct, soft_enabled); + OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened); #ifdef CONFIG_PPC_BOOK3S - DEFINE(PACACONTEXTID, offsetof(struct paca_struct, mm_ctx_id)); + OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id); #ifdef CONFIG_PPC_MM_SLICES - DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, - mm_ctx_low_slices_psize)); - DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct, - mm_ctx_high_slices_psize)); + OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize); + OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize); DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); #endif /* CONFIG_PPC_MM_SLICES */ #endif #ifdef CONFIG_PPC_BOOK3E - DEFINE(PACAPGD, offsetof(struct paca_struct, pgd)); - DEFINE(PACA_KERNELPGD, offsetof(struct paca_struct, kernel_pgd)); - DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); - DEFINE(PACA_EXTLB, offsetof(struct paca_struct, extlb)); - DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); - DEFINE(PACA_EXCRIT, offsetof(struct paca_struct, excrit)); - DEFINE(PACA_EXDBG, offsetof(struct paca_struct, exdbg)); - DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack)); - DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack)); - DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack)); - DEFINE(PACA_TCD_PTR, offsetof(struct paca_struct, tcd_ptr)); - - DEFINE(TCD_ESEL_NEXT, - offsetof(struct tlb_core_data, esel_next)); - DEFINE(TCD_ESEL_MAX, - offsetof(struct tlb_core_data, esel_max)); - DEFINE(TCD_ESEL_FIRST, - offsetof(struct tlb_core_data, esel_first)); + OFFSET(PACAPGD, paca_struct, pgd); + OFFSET(PACA_KERNELPGD, paca_struct, kernel_pgd); + OFFSET(PACA_EXGEN, paca_struct, exgen); + OFFSET(PACA_EXTLB, paca_struct, extlb); + OFFSET(PACA_EXMC, paca_struct, exmc); + OFFSET(PACA_EXCRIT, paca_struct, excrit); + OFFSET(PACA_EXDBG, paca_struct, exdbg); + OFFSET(PACA_MC_STACK, paca_struct, mc_kstack); + OFFSET(PACA_CRIT_STACK, paca_struct, crit_kstack); + OFFSET(PACA_DBG_STACK, paca_struct, dbg_kstack); + OFFSET(PACA_TCD_PTR, paca_struct, tcd_ptr); + + OFFSET(TCD_ESEL_NEXT, tlb_core_data, esel_next); + OFFSET(TCD_ESEL_MAX, tlb_core_data, esel_max); + OFFSET(TCD_ESEL_FIRST, tlb_core_data, esel_first); #endif /* CONFIG_PPC_BOOK3E */ #ifdef CONFIG_PPC_STD_MMU_64 - DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); - DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); - DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); + OFFSET(PACASLBCACHE, paca_struct, slb_cache); + OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr); + OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp); #ifdef CONFIG_PPC_MM_SLICES - DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp)); + OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp); #else - DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, mm_ctx_sllp)); + OFFSET(PACACONTEXTSLLP, paca_struct, mm_ctx_sllp); #endif /* CONFIG_PPC_MM_SLICES */ - DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); - DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); - DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); - DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); - DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); - DEFINE(SLBSHADOW_STACKVSID, - offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); - DEFINE(SLBSHADOW_STACKESID, - offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); - DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); - DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); - DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); - DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count)); - DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); + OFFSET(PACA_EXGEN, paca_struct, exgen); + OFFSET(PACA_EXMC, paca_struct, exmc); + OFFSET(PACA_EXSLB, paca_struct, exslb); + OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr); + OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr); + OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid); + OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid); + OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area); + OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use); + OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx); + OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count); + OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx); #endif /* CONFIG_PPC_STD_MMU_64 */ - DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); + OFFSET(PACAEMERGSP, paca_struct, emergency_sp); #ifdef CONFIG_PPC_BOOK3S_64 - DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp)); - DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce)); -#endif - DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); - DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); - DEFINE(PACA_DSCR_DEFAULT, offsetof(struct paca_struct, dscr_default)); - DEFINE(ACCOUNT_STARTTIME, - offsetof(struct paca_struct, accounting.starttime)); - DEFINE(ACCOUNT_STARTTIME_USER, - offsetof(struct paca_struct, accounting.starttime_user)); - DEFINE(ACCOUNT_USER_TIME, - offsetof(struct paca_struct, accounting.user_time)); - DEFINE(ACCOUNT_SYSTEM_TIME, - offsetof(struct paca_struct, accounting.system_time)); - DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); - DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); - DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso)); + OFFSET(PACAMCEMERGSP, paca_struct, mc_emergency_sp); + OFFSET(PACA_IN_MCE, paca_struct, in_mce); +#endif + OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); + OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); + OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default); + OFFSET(ACCOUNT_STARTTIME, paca_struct, accounting.starttime); + OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user); + OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime); + OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime); + OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save); + OFFSET(PACA_NAPSTATELOST, paca_struct, nap_state_lost); + OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso); #else /* CONFIG_PPC64 */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - DEFINE(ACCOUNT_STARTTIME, - offsetof(struct thread_info, accounting.starttime)); - DEFINE(ACCOUNT_STARTTIME_USER, - offsetof(struct thread_info, accounting.starttime_user)); - DEFINE(ACCOUNT_USER_TIME, - offsetof(struct thread_info, accounting.user_time)); - DEFINE(ACCOUNT_SYSTEM_TIME, - offsetof(struct thread_info, accounting.system_time)); + OFFSET(ACCOUNT_STARTTIME, thread_info, accounting.starttime); + OFFSET(ACCOUNT_STARTTIME_USER, thread_info, accounting.starttime_user); + OFFSET(ACCOUNT_USER_TIME, thread_info, accounting.utime); + OFFSET(ACCOUNT_SYSTEM_TIME, thread_info, accounting.stime); #endif #endif /* CONFIG_PPC64 */ /* RTAS */ - DEFINE(RTASBASE, offsetof(struct rtas_t, base)); - DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); + OFFSET(RTASBASE, rtas_t, base); + OFFSET(RTASENTRY, rtas_t, entry); /* Interrupt register frame */ DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); @@ -280,38 +265,38 @@ int main(void) DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); #endif /* CONFIG_PPC64 */ - DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); - DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); - DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2])); - DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3])); - DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4])); - DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5])); - DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6])); - DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); - DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); - DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); - DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10])); - DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11])); - DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12])); - DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13])); + STACK_PT_REGS_OFFSET(GPR0, gpr[0]); + STACK_PT_REGS_OFFSET(GPR1, gpr[1]); + STACK_PT_REGS_OFFSET(GPR2, gpr[2]); + STACK_PT_REGS_OFFSET(GPR3, gpr[3]); + STACK_PT_REGS_OFFSET(GPR4, gpr[4]); + STACK_PT_REGS_OFFSET(GPR5, gpr[5]); + STACK_PT_REGS_OFFSET(GPR6, gpr[6]); + STACK_PT_REGS_OFFSET(GPR7, gpr[7]); + STACK_PT_REGS_OFFSET(GPR8, gpr[8]); + STACK_PT_REGS_OFFSET(GPR9, gpr[9]); + STACK_PT_REGS_OFFSET(GPR10, gpr[10]); + STACK_PT_REGS_OFFSET(GPR11, gpr[11]); + STACK_PT_REGS_OFFSET(GPR12, gpr[12]); + STACK_PT_REGS_OFFSET(GPR13, gpr[13]); #ifndef CONFIG_PPC64 - DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14])); + STACK_PT_REGS_OFFSET(GPR14, gpr[14]); #endif /* CONFIG_PPC64 */ /* * Note: these symbols include _ because they overlap with special * register names */ - DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip)); - DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr)); - DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr)); - DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link)); - DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr)); - DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer)); - DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); - DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); - DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); - DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); - DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); + STACK_PT_REGS_OFFSET(_NIP, nip); + STACK_PT_REGS_OFFSET(_MSR, msr); + STACK_PT_REGS_OFFSET(_CTR, ctr); + STACK_PT_REGS_OFFSET(_LINK, link); + STACK_PT_REGS_OFFSET(_CCR, ccr); + STACK_PT_REGS_OFFSET(_XER, xer); + STACK_PT_REGS_OFFSET(_DAR, dar); + STACK_PT_REGS_OFFSET(_DSISR, dsisr); + STACK_PT_REGS_OFFSET(ORIG_GPR3, orig_gpr3); + STACK_PT_REGS_OFFSET(RESULT, result); + STACK_PT_REGS_OFFSET(_TRAP, trap); #ifndef CONFIG_PPC64 /* * The PowerPC 400-class & Book-E processors have neither the DAR @@ -319,10 +304,10 @@ int main(void) * DEAR and ESR SPRs for such processors. For critical interrupts * we use them to hold SRR0 and SRR1. */ - DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); - DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); + STACK_PT_REGS_OFFSET(_DEAR, dar); + STACK_PT_REGS_OFFSET(_ESR, dsisr); #else /* CONFIG_PPC64 */ - DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe)); + STACK_PT_REGS_OFFSET(SOFTE, softe); /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */ DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)); @@ -351,17 +336,17 @@ int main(void) #endif #ifndef CONFIG_PPC64 - DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); + OFFSET(MM_PGD, mm_struct, pgd); #endif /* ! CONFIG_PPC64 */ /* About the CPU features table */ - DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); - DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); - DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore)); + OFFSET(CPU_SPEC_FEATURES, cpu_spec, cpu_features); + OFFSET(CPU_SPEC_SETUP, cpu_spec, cpu_setup); + OFFSET(CPU_SPEC_RESTORE, cpu_spec, cpu_restore); - DEFINE(pbe_address, offsetof(struct pbe, address)); - DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); - DEFINE(pbe_next, offsetof(struct pbe, next)); + OFFSET(pbe_address, pbe, address); + OFFSET(pbe_orig_address, pbe, orig_address); + OFFSET(pbe_next, pbe, next); #ifndef CONFIG_PPC64 DEFINE(TASK_SIZE, TASK_SIZE); @@ -369,40 +354,40 @@ int main(void) #endif /* ! CONFIG_PPC64 */ /* datapage offsets for use by vdso */ - DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp)); - DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec)); - DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs)); - DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count)); - DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest)); - DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); - DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); - DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); - DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); - DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime)); - DEFINE(STAMP_SEC_FRAC, offsetof(struct vdso_data, stamp_sec_fraction)); - DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size)); - DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size)); - DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size)); - DEFINE(CFG_DCACHE_LOGBLOCKSZ, offsetof(struct vdso_data, dcache_log_block_size)); + OFFSET(CFG_TB_ORIG_STAMP, vdso_data, tb_orig_stamp); + OFFSET(CFG_TB_TICKS_PER_SEC, vdso_data, tb_ticks_per_sec); + OFFSET(CFG_TB_TO_XS, vdso_data, tb_to_xs); + OFFSET(CFG_TB_UPDATE_COUNT, vdso_data, tb_update_count); + OFFSET(CFG_TZ_MINUTEWEST, vdso_data, tz_minuteswest); + OFFSET(CFG_TZ_DSTTIME, vdso_data, tz_dsttime); + OFFSET(CFG_SYSCALL_MAP32, vdso_data, syscall_map_32); + OFFSET(WTOM_CLOCK_SEC, vdso_data, wtom_clock_sec); + OFFSET(WTOM_CLOCK_NSEC, vdso_data, wtom_clock_nsec); + OFFSET(STAMP_XTIME, vdso_data, stamp_xtime); + OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction); + OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size); + OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size); + OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size); + OFFSET(CFG_DCACHE_LOGBLOCKSZ, vdso_data, dcache_log_block_size); #ifdef CONFIG_PPC64 - DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64)); - DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); - DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec)); - DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec)); - DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec)); - DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec)); - DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec)); - DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec)); - DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec)); + OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64); + OFFSET(TVAL64_TV_SEC, timeval, tv_sec); + OFFSET(TVAL64_TV_USEC, timeval, tv_usec); + OFFSET(TVAL32_TV_SEC, compat_timeval, tv_sec); + OFFSET(TVAL32_TV_USEC, compat_timeval, tv_usec); + OFFSET(TSPC64_TV_SEC, timespec, tv_sec); + OFFSET(TSPC64_TV_NSEC, timespec, tv_nsec); + OFFSET(TSPC32_TV_SEC, compat_timespec, tv_sec); + OFFSET(TSPC32_TV_NSEC, compat_timespec, tv_nsec); #else - DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec)); - DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec)); - DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec)); - DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec)); + OFFSET(TVAL32_TV_SEC, timeval, tv_sec); + OFFSET(TVAL32_TV_USEC, timeval, tv_usec); + OFFSET(TSPC32_TV_SEC, timespec, tv_sec); + OFFSET(TSPC32_TV_NSEC, timespec, tv_nsec); #endif /* timeval/timezone offsets for use by vdso */ - DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); - DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); + OFFSET(TZONE_TZ_MINWEST, timezone, tz_minuteswest); + OFFSET(TZONE_TZ_DSTTIME, timezone, tz_dsttime); /* Other bits used by the vdso */ DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); @@ -422,168 +407,170 @@ int main(void) DEFINE(PTE_SIZE, sizeof(pte_t)); #ifdef CONFIG_KVM - DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); - DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); - DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); - DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); - DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); - DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr)); + OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack); + OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid); + OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid); + OFFSET(VCPU_GPRS, kvm_vcpu, arch.gpr); + OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave); + OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr); #ifdef CONFIG_ALTIVEC - DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr)); + OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr); #endif - DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); - DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); - DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); + OFFSET(VCPU_XER, kvm_vcpu, arch.xer); + OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); + OFFSET(VCPU_LR, kvm_vcpu, arch.lr); #ifdef CONFIG_PPC_BOOK3S - DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar)); + OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); #endif - DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); + OFFSET(VCPU_CR, kvm_vcpu, arch.cr); + OFFSET(VCPU_PC, kvm_vcpu, arch.pc); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); - DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); - DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); - DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0)); - DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1)); - DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); - DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); + OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); + OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0); + OFFSET(VCPU_SRR1, kvm_vcpu, arch.shregs.srr1); + OFFSET(VCPU_SPRG0, kvm_vcpu, arch.shregs.sprg0); + OFFSET(VCPU_SPRG1, kvm_vcpu, arch.shregs.sprg1); + OFFSET(VCPU_SPRG2, kvm_vcpu, arch.shregs.sprg2); + OFFSET(VCPU_SPRG3, kvm_vcpu, arch.shregs.sprg3); #endif #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING - DEFINE(VCPU_TB_RMENTRY, offsetof(struct kvm_vcpu, arch.rm_entry)); - DEFINE(VCPU_TB_RMINTR, offsetof(struct kvm_vcpu, arch.rm_intr)); - DEFINE(VCPU_TB_RMEXIT, offsetof(struct kvm_vcpu, arch.rm_exit)); - DEFINE(VCPU_TB_GUEST, offsetof(struct kvm_vcpu, arch.guest_time)); - DEFINE(VCPU_TB_CEDE, offsetof(struct kvm_vcpu, arch.cede_time)); - DEFINE(VCPU_CUR_ACTIVITY, offsetof(struct kvm_vcpu, arch.cur_activity)); - DEFINE(VCPU_ACTIVITY_START, offsetof(struct kvm_vcpu, arch.cur_tb_start)); - DEFINE(TAS_SEQCOUNT, offsetof(struct kvmhv_tb_accumulator, seqcount)); - DEFINE(TAS_TOTAL, offsetof(struct kvmhv_tb_accumulator, tb_total)); - DEFINE(TAS_MIN, offsetof(struct kvmhv_tb_accumulator, tb_min)); - DEFINE(TAS_MAX, offsetof(struct kvmhv_tb_accumulator, tb_max)); -#endif - DEFINE(VCPU_SHARED_SPRG3, offsetof(struct kvm_vcpu_arch_shared, sprg3)); - DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4)); - DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5)); - DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6)); - DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7)); - DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); - DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1)); - DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); - DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); - DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); + OFFSET(VCPU_TB_RMENTRY, kvm_vcpu, arch.rm_entry); + OFFSET(VCPU_TB_RMINTR, kvm_vcpu, arch.rm_intr); + OFFSET(VCPU_TB_RMEXIT, kvm_vcpu, arch.rm_exit); + OFFSET(VCPU_TB_GUEST, kvm_vcpu, arch.guest_time); + OFFSET(VCPU_TB_CEDE, kvm_vcpu, arch.cede_time); + OFFSET(VCPU_CUR_ACTIVITY, kvm_vcpu, arch.cur_activity); + OFFSET(VCPU_ACTIVITY_START, kvm_vcpu, arch.cur_tb_start); + OFFSET(TAS_SEQCOUNT, kvmhv_tb_accumulator, seqcount); + OFFSET(TAS_TOTAL, kvmhv_tb_accumulator, tb_total); + OFFSET(TAS_MIN, kvmhv_tb_accumulator, tb_min); + OFFSET(TAS_MAX, kvmhv_tb_accumulator, tb_max); +#endif + OFFSET(VCPU_SHARED_SPRG3, kvm_vcpu_arch_shared, sprg3); + OFFSET(VCPU_SHARED_SPRG4, kvm_vcpu_arch_shared, sprg4); + OFFSET(VCPU_SHARED_SPRG5, kvm_vcpu_arch_shared, sprg5); + OFFSET(VCPU_SHARED_SPRG6, kvm_vcpu_arch_shared, sprg6); + OFFSET(VCPU_SHARED_SPRG7, kvm_vcpu_arch_shared, sprg7); + OFFSET(VCPU_SHADOW_PID, kvm_vcpu, arch.shadow_pid); + OFFSET(VCPU_SHADOW_PID1, kvm_vcpu, arch.shadow_pid1); + OFFSET(VCPU_SHARED, kvm_vcpu, arch.shared); + OFFSET(VCPU_SHARED_MSR, kvm_vcpu_arch_shared, msr); + OFFSET(VCPU_SHADOW_MSR, kvm_vcpu, arch.shadow_msr); #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) - DEFINE(VCPU_SHAREDBE, offsetof(struct kvm_vcpu, arch.shared_big_endian)); + OFFSET(VCPU_SHAREDBE, kvm_vcpu, arch.shared_big_endian); #endif - DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0)); - DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1)); - DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2)); - DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3)); - DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4)); - DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6)); + OFFSET(VCPU_SHARED_MAS0, kvm_vcpu_arch_shared, mas0); + OFFSET(VCPU_SHARED_MAS1, kvm_vcpu_arch_shared, mas1); + OFFSET(VCPU_SHARED_MAS2, kvm_vcpu_arch_shared, mas2); + OFFSET(VCPU_SHARED_MAS7_3, kvm_vcpu_arch_shared, mas7_3); + OFFSET(VCPU_SHARED_MAS4, kvm_vcpu_arch_shared, mas4); + OFFSET(VCPU_SHARED_MAS6, kvm_vcpu_arch_shared, mas6); - DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); - DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); + OFFSET(VCPU_KVM, kvm_vcpu, kvm); + OFFSET(KVM_LPID, kvm, arch.lpid); /* book3s */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - DEFINE(KVM_TLB_SETS, offsetof(struct kvm, arch.tlb_sets)); - DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); - DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); - DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); - DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); - DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits)); - DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls)); - DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); - DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); - DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); - DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); - DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); - DEFINE(VCPU_HEIR, offsetof(struct kvm_vcpu, arch.emul_inst)); - DEFINE(VCPU_CPU, offsetof(struct kvm_vcpu, cpu)); - DEFINE(VCPU_THREAD_CPU, offsetof(struct kvm_vcpu, arch.thread_cpu)); + OFFSET(KVM_TLB_SETS, kvm, arch.tlb_sets); + OFFSET(KVM_SDR1, kvm, arch.sdr1); + OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid); + OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr); + OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1); + OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits); + OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls); + OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v); + OFFSET(KVM_RADIX, kvm, arch.radix); + OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr); + OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar); + OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr); + OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty); + OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst); + OFFSET(VCPU_CPU, kvm_vcpu, cpu); + OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu); #endif #ifdef CONFIG_PPC_BOOK3S - DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); - DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); - DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic)); - DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); - DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); - DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); - DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr)); - DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); - DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); - DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx)); - DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr)); - DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx)); - DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr)); - DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); - DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); - DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); - DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions)); - DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded)); - DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); - DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); - DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); - DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc)); - DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar)); - DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar)); - DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier)); - DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); - DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); - DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); - DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); - DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); - DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); - DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); - DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); - DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); - DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); - DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); - DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); - DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); - DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); - DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr)); - DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr)); - DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr)); - DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr)); - DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop)); - DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort)); - DEFINE(VCPU_TID, offsetof(struct kvm_vcpu, arch.tid)); - DEFINE(VCPU_PSSCR, offsetof(struct kvm_vcpu, arch.psscr)); - DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_map)); - DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); - DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); - DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm)); - DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); - DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); - DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); - DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes)); - DEFINE(VCORE_VTB, offsetof(struct kvmppc_vcore, vtb)); - DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); - DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); + OFFSET(VCPU_PURR, kvm_vcpu, arch.purr); + OFFSET(VCPU_SPURR, kvm_vcpu, arch.spurr); + OFFSET(VCPU_IC, kvm_vcpu, arch.ic); + OFFSET(VCPU_DSCR, kvm_vcpu, arch.dscr); + OFFSET(VCPU_AMR, kvm_vcpu, arch.amr); + OFFSET(VCPU_UAMOR, kvm_vcpu, arch.uamor); + OFFSET(VCPU_IAMR, kvm_vcpu, arch.iamr); + OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl); + OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr); + OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx); + OFFSET(VCPU_DAWR, kvm_vcpu, arch.dawr); + OFFSET(VCPU_DAWRX, kvm_vcpu, arch.dawrx); + OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr); + OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags); + OFFSET(VCPU_DEC, kvm_vcpu, arch.dec); + OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires); + OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions); + OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded); + OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded); + OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr); + OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc); + OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc); + OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar); + OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar); + OFFSET(VCPU_SIER, kvm_vcpu, arch.sier); + OFFSET(VCPU_SLB, kvm_vcpu, arch.slb); + OFFSET(VCPU_SLB_MAX, kvm_vcpu, arch.slb_max); + OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr); + OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr); + OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar); + OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa); + OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr); + OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); + OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap); + OFFSET(VCPU_CFAR, kvm_vcpu, arch.cfar); + OFFSET(VCPU_PPR, kvm_vcpu, arch.ppr); + OFFSET(VCPU_FSCR, kvm_vcpu, arch.fscr); + OFFSET(VCPU_PSPB, kvm_vcpu, arch.pspb); + OFFSET(VCPU_EBBHR, kvm_vcpu, arch.ebbhr); + OFFSET(VCPU_EBBRR, kvm_vcpu, arch.ebbrr); + OFFSET(VCPU_BESCR, kvm_vcpu, arch.bescr); + OFFSET(VCPU_CSIGR, kvm_vcpu, arch.csigr); + OFFSET(VCPU_TACR, kvm_vcpu, arch.tacr); + OFFSET(VCPU_TCSCR, kvm_vcpu, arch.tcscr); + OFFSET(VCPU_ACOP, kvm_vcpu, arch.acop); + OFFSET(VCPU_WORT, kvm_vcpu, arch.wort); + OFFSET(VCPU_TID, kvm_vcpu, arch.tid); + OFFSET(VCPU_PSSCR, kvm_vcpu, arch.psscr); + OFFSET(VCORE_ENTRY_EXIT, kvmppc_vcore, entry_exit_map); + OFFSET(VCORE_IN_GUEST, kvmppc_vcore, in_guest); + OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads); + OFFSET(VCORE_KVM, kvmppc_vcore, kvm); + OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset); + OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr); + OFFSET(VCORE_PCR, kvmppc_vcore, pcr); + OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes); + OFFSET(VCORE_VTB, kvmppc_vcore, vtb); + OFFSET(VCPU_SLB_E, kvmppc_slb, orige); + OFFSET(VCPU_SLB_V, kvmppc_slb, origv); DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM - DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar)); - DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar)); - DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr)); - DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm)); - DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr)); - DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr)); - DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm)); - DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm)); - DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm)); - DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm)); - DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm)); - DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm)); - DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm)); - DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm)); - DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm)); + OFFSET(VCPU_TFHAR, kvm_vcpu, arch.tfhar); + OFFSET(VCPU_TFIAR, kvm_vcpu, arch.tfiar); + OFFSET(VCPU_TEXASR, kvm_vcpu, arch.texasr); + OFFSET(VCPU_GPR_TM, kvm_vcpu, arch.gpr_tm); + OFFSET(VCPU_FPRS_TM, kvm_vcpu, arch.fp_tm.fpr); + OFFSET(VCPU_VRS_TM, kvm_vcpu, arch.vr_tm.vr); + OFFSET(VCPU_VRSAVE_TM, kvm_vcpu, arch.vrsave_tm); + OFFSET(VCPU_CR_TM, kvm_vcpu, arch.cr_tm); + OFFSET(VCPU_XER_TM, kvm_vcpu, arch.xer_tm); + OFFSET(VCPU_LR_TM, kvm_vcpu, arch.lr_tm); + OFFSET(VCPU_CTR_TM, kvm_vcpu, arch.ctr_tm); + OFFSET(VCPU_AMR_TM, kvm_vcpu, arch.amr_tm); + OFFSET(VCPU_PPR_TM, kvm_vcpu, arch.ppr_tm); + OFFSET(VCPU_DSCR_TM, kvm_vcpu, arch.dscr_tm); + OFFSET(VCPU_TAR_TM, kvm_vcpu, arch.tar_tm); #endif #ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE - DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu)); + OFFSET(PACA_SVCPU, paca_struct, shadow_vcpu); # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) #else # define SVCPU_FIELD(x, f) @@ -666,11 +653,11 @@ int main(void) HSTATE_FIELD(HSTATE_DECEXP, dec_expires); HSTATE_FIELD(HSTATE_SPLIT_MODE, kvm_split_mode); DEFINE(IPI_PRIORITY, IPI_PRIORITY); - DEFINE(KVM_SPLIT_RPR, offsetof(struct kvm_split_mode, rpr)); - DEFINE(KVM_SPLIT_PMMAR, offsetof(struct kvm_split_mode, pmmar)); - DEFINE(KVM_SPLIT_LDBAR, offsetof(struct kvm_split_mode, ldbar)); - DEFINE(KVM_SPLIT_DO_NAP, offsetof(struct kvm_split_mode, do_nap)); - DEFINE(KVM_SPLIT_NAPPED, offsetof(struct kvm_split_mode, napped)); + OFFSET(KVM_SPLIT_RPR, kvm_split_mode, rpr); + OFFSET(KVM_SPLIT_PMMAR, kvm_split_mode, pmmar); + OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar); + OFFSET(KVM_SPLIT_DO_NAP, kvm_split_mode, do_nap); + OFFSET(KVM_SPLIT_NAPPED, kvm_split_mode, napped); #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #ifdef CONFIG_PPC_BOOK3S_64 @@ -680,32 +667,27 @@ int main(void) #endif /* CONFIG_PPC_BOOK3S_64 */ #else /* CONFIG_PPC_BOOK3S */ - DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); - DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); - DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); - DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); - DEFINE(VCPU_SPRG9, offsetof(struct kvm_vcpu, arch.sprg9)); - DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); - DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); - DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); - DEFINE(VCPU_CRIT_SAVE, offsetof(struct kvm_vcpu, arch.crit_save)); + OFFSET(VCPU_CR, kvm_vcpu, arch.cr); + OFFSET(VCPU_XER, kvm_vcpu, arch.xer); + OFFSET(VCPU_LR, kvm_vcpu, arch.lr); + OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); + OFFSET(VCPU_PC, kvm_vcpu, arch.pc); + OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9); + OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); + OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear); + OFFSET(VCPU_FAULT_ESR, kvm_vcpu, arch.fault_esr); + OFFSET(VCPU_CRIT_SAVE, kvm_vcpu, arch.crit_save); #endif /* CONFIG_PPC_BOOK3S */ #endif /* CONFIG_KVM */ #ifdef CONFIG_KVM_GUEST - DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, - scratch1)); - DEFINE(KVM_MAGIC_SCRATCH2, offsetof(struct kvm_vcpu_arch_shared, - scratch2)); - DEFINE(KVM_MAGIC_SCRATCH3, offsetof(struct kvm_vcpu_arch_shared, - scratch3)); - DEFINE(KVM_MAGIC_INT, offsetof(struct kvm_vcpu_arch_shared, - int_pending)); - DEFINE(KVM_MAGIC_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); - DEFINE(KVM_MAGIC_CRITICAL, offsetof(struct kvm_vcpu_arch_shared, - critical)); - DEFINE(KVM_MAGIC_SR, offsetof(struct kvm_vcpu_arch_shared, sr)); + OFFSET(KVM_MAGIC_SCRATCH1, kvm_vcpu_arch_shared, scratch1); + OFFSET(KVM_MAGIC_SCRATCH2, kvm_vcpu_arch_shared, scratch2); + OFFSET(KVM_MAGIC_SCRATCH3, kvm_vcpu_arch_shared, scratch3); + OFFSET(KVM_MAGIC_INT, kvm_vcpu_arch_shared, int_pending); + OFFSET(KVM_MAGIC_MSR, kvm_vcpu_arch_shared, msr); + OFFSET(KVM_MAGIC_CRITICAL, kvm_vcpu_arch_shared, critical); + OFFSET(KVM_MAGIC_SR, kvm_vcpu_arch_shared, sr); #endif #ifdef CONFIG_44x @@ -714,45 +696,37 @@ int main(void) #endif #ifdef CONFIG_PPC_FSL_BOOK3E DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); - DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); - DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); - DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2)); - DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3)); - DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); + OFFSET(TLBCAM_MAS0, tlbcam, MAS0); + OFFSET(TLBCAM_MAS1, tlbcam, MAS1); + OFFSET(TLBCAM_MAS2, tlbcam, MAS2); + OFFSET(TLBCAM_MAS3, tlbcam, MAS3); + OFFSET(TLBCAM_MAS7, tlbcam, MAS7); #endif #if defined(CONFIG_KVM) && defined(CONFIG_SPE) - DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0])); - DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc)); - DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr)); - DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); + OFFSET(VCPU_EVR, kvm_vcpu, arch.evr[0]); + OFFSET(VCPU_ACC, kvm_vcpu, arch.acc); + OFFSET(VCPU_SPEFSCR, kvm_vcpu, arch.spefscr); + OFFSET(VCPU_HOST_SPEFSCR, kvm_vcpu, arch.host_spefscr); #endif #ifdef CONFIG_KVM_BOOKE_HV - DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4)); - DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6)); + OFFSET(VCPU_HOST_MAS4, kvm_vcpu, arch.host_mas4); + OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6); #endif #ifdef CONFIG_KVM_EXIT_TIMING - DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, - arch.timing_exit.tv32.tbu)); - DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu, - arch.timing_exit.tv32.tbl)); - DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu, - arch.timing_last_enter.tv32.tbu)); - DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu, - arch.timing_last_enter.tv32.tbl)); + OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu); + OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl); + OFFSET(VCPU_TIMING_LAST_ENTER_TBU, kvm_vcpu, arch.timing_last_enter.tv32.tbu); + OFFSET(VCPU_TIMING_LAST_ENTER_TBL, kvm_vcpu, arch.timing_last_enter.tv32.tbl); #endif #ifdef CONFIG_PPC_POWERNV - DEFINE(PACA_CORE_IDLE_STATE_PTR, - offsetof(struct paca_struct, core_idle_state_ptr)); - DEFINE(PACA_THREAD_IDLE_STATE, - offsetof(struct paca_struct, thread_idle_state)); - DEFINE(PACA_THREAD_MASK, - offsetof(struct paca_struct, thread_mask)); - DEFINE(PACA_SUBCORE_SIBLING_MASK, - offsetof(struct paca_struct, subcore_sibling_mask)); + OFFSET(PACA_CORE_IDLE_STATE_PTR, paca_struct, core_idle_state_ptr); + OFFSET(PACA_THREAD_IDLE_STATE, paca_struct, thread_idle_state); + OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask); + OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask); #endif DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER); diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 917188615bf5..7fe8c79e6937 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -101,6 +101,8 @@ _GLOBAL(__setup_cpu_power9) mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) or r3, r3, r4 + LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR) + andc r3, r3, r4 bl __init_LPCR bl __init_HFSCR bl __init_tlb_power9 @@ -122,6 +124,8 @@ _GLOBAL(__restore_cpu_power9) mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) or r3, r3, r4 + LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR) + andc r3, r3, r4 bl __init_LPCR bl __init_HFSCR bl __init_tlb_power9 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 6a82ef039c50..bb7a1890aeb7 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -386,6 +386,23 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, + { /* 3.00-compliant processor, i.e. Power9 "architected" mode */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000005, + .cpu_name = "POWER9 (architected)", + .cpu_features = CPU_FTRS_POWER9, + .cpu_user_features = COMMON_USER_POWER9, + .cpu_user_features2 = COMMON_USER2_POWER9, + .mmu_features = MMU_FTRS_POWER9, + .icache_bsize = 128, + .dcache_bsize = 128, + .oprofile_type = PPC_OPROFILE_INVALID, + .oprofile_cpu_type = "ppc64/ibm-compat-v1", + .cpu_setup = __setup_cpu_power9, + .cpu_restore = __restore_cpu_power9, + .flush_tlb = __flush_tlb_power9, + .platform = "power9", + }, { /* Power7 */ .pvr_mask = 0xffff0000, .pvr_value = 0x003f0000, diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index c6689f658b50..d0ea7860e02b 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -46,7 +46,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev) * map_page, and unmap_page on highmem, use normal dma_ops * for everything else. */ -struct dma_map_ops swiotlb_dma_ops = { +const struct dma_map_ops swiotlb_dma_ops = { .alloc = __dma_direct_alloc_coherent, .free = __dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 6877e3fa95bb..41c749586bd2 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -33,7 +33,7 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev) struct dev_archdata __maybe_unused *sd = &dev->archdata; #ifdef CONFIG_SWIOTLB - if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops) + if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops) pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT); #endif @@ -274,7 +274,7 @@ static inline void dma_direct_sync_single(struct device *dev, } #endif -struct dma_map_ops dma_direct_ops = { +const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, @@ -316,7 +316,7 @@ EXPORT_SYMBOL(dma_set_coherent_mask); int __dma_set_mask(struct device *dev, u64 dma_mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) return dma_ops->set_dma_mask(dev, dma_mask); @@ -344,7 +344,7 @@ EXPORT_SYMBOL(dma_set_mask); u64 __dma_get_required_mask(struct device *dev) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); if (unlikely(dma_ops == NULL)) return 0; diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 3841d749a430..a38600949f3a 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -205,6 +205,9 @@ transfer_to_handler_cont: mflr r9 lwz r11,0(r9) /* virtual address of handler */ lwz r9,4(r9) /* where to go when done */ +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif #ifdef CONFIG_TRACE_IRQFLAGS lis r12,reenable_mmu@h ori r12,r12,reenable_mmu@l @@ -292,7 +295,9 @@ stack_ovf: lis r9,StackOverflow@ha addi r9,r9,StackOverflow@l LOAD_MSR_KERNEL(r10,MSR_KERNEL) - FIX_SRR1(r10,r12) +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif mtspr SPRN_SRR0,r9 mtspr SPRN_SRR1,r10 SYNC @@ -417,9 +422,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) mtlr r4 mtcr r5 lwz r7,_NIP(r1) - FIX_SRR1(r8, r0) lwz r2,GPR2(r1) lwz r1,GPR1(r1) +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 SYNC @@ -699,6 +706,9 @@ fast_exception_return: lwz r10,_LINK(r11) mtlr r10 REST_GPR(10, r11) +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif mtspr SPRN_SRR1,r9 mtspr SPRN_SRR0,r12 REST_GPR(9, r11) @@ -947,7 +957,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) .globl exc_exit_restart exc_exit_restart: lwz r12,_NIP(r1) - FIX_SRR1(r9,r10) +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif mtspr SPRN_SRR0,r12 mtspr SPRN_SRR1,r9 REST_4GPRS(9, r1) @@ -1290,7 +1302,6 @@ _GLOBAL(enter_rtas) 1: tophys(r9,r1) lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ lwz r9,8(r9) /* original msr value */ - FIX_SRR1(r9,r0) addi r1,r1,INT_FRAME_SIZE li r0,0 mtspr SPRN_SPRG_RTAS,r0 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index d39d6118c6e9..857bf7c5b946 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -93,7 +93,7 @@ USE_FIXED_SECTION(real_vectors) __start_interrupts: /* No virt vectors corresponding with 0x0..0x100 */ -EXC_VIRT_NONE(0x4000, 0x4100) +EXC_VIRT_NONE(0x4000, 0x100) #ifdef CONFIG_PPC_P7_NAP @@ -114,15 +114,15 @@ EXC_VIRT_NONE(0x4000, 0x4100) #define IDLETEST NOTEST #endif -EXC_REAL_BEGIN(system_reset, 0x100, 0x200) +EXC_REAL_BEGIN(system_reset, 0x100, 0x100) SET_SCRATCH0(r13) GET_PACA(r13) clrrdi r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */ EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD, IDLETEST, 0x100) -EXC_REAL_END(system_reset, 0x100, 0x200) -EXC_VIRT_NONE(0x4100, 0x4200) +EXC_REAL_END(system_reset, 0x100, 0x100) +EXC_VIRT_NONE(0x4100, 0x100) #ifdef CONFIG_PPC_P7_NAP EXC_COMMON_BEGIN(system_reset_idle_common) @@ -142,7 +142,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) lbz r0,HSTATE_HWTHREAD_REQ(r13) cmpwi r0,0 beq 1f - b kvm_start_guest + BRANCH_TO_KVM(r10, kvm_start_guest) 1: #endif @@ -166,7 +166,7 @@ TRAMP_REAL_BEGIN(system_reset_fwnmi) #endif /* CONFIG_PPC_PSERIES */ -EXC_REAL_BEGIN(machine_check, 0x200, 0x300) +EXC_REAL_BEGIN(machine_check, 0x200, 0x100) /* This is moved out of line as it can be patched by FW, but * some code path might still want to branch into the original * vector @@ -186,8 +186,8 @@ BEGIN_FTR_SECTION FTR_SECTION_ELSE b machine_check_pSeries_0 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) -EXC_REAL_END(machine_check, 0x200, 0x300) -EXC_VIRT_NONE(0x4200, 0x4300) +EXC_REAL_END(machine_check, 0x200, 0x100) +EXC_VIRT_NONE(0x4200, 0x100) TRAMP_REAL_BEGIN(machine_check_powernv_early) BEGIN_FTR_SECTION EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200) @@ -381,12 +381,12 @@ EXC_COMMON_BEGIN(machine_check_handle_early) lbz r3,PACA_THREAD_IDLE_STATE(r13) cmpwi r3,PNV_THREAD_NAP bgt 10f - IDLE_STATE_ENTER_SEQ(PPC_NAP) + IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) /* No return */ 10: cmpwi r3,PNV_THREAD_SLEEP bgt 2f - IDLE_STATE_ENTER_SEQ(PPC_SLEEP) + IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) /* No return */ 2: @@ -400,7 +400,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) */ ori r13,r13,1 SET_PACA(r13) - IDLE_STATE_ENTER_SEQ(PPC_WINKLE) + IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) /* No return */ 4: #endif @@ -483,8 +483,8 @@ EXC_COMMON_BEGIN(unrecover_mce) b 1b -EXC_REAL(data_access, 0x300, 0x380) -EXC_VIRT(data_access, 0x4300, 0x4380, 0x300) +EXC_REAL(data_access, 0x300, 0x80) +EXC_VIRT(data_access, 0x4300, 0x80, 0x300) TRAMP_KVM_SKIP(PACA_EXGEN, 0x300) EXC_COMMON_BEGIN(data_access_common) @@ -512,7 +512,7 @@ MMU_FTR_SECTION_ELSE ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) -EXC_REAL_BEGIN(data_access_slb, 0x380, 0x400) +EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) SET_SCRATCH0(r13) EXCEPTION_PROLOG_0(PACA_EXSLB) EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) @@ -533,9 +533,9 @@ EXC_REAL_BEGIN(data_access_slb, 0x380, 0x400) mtctr r10 bctr #endif -EXC_REAL_END(data_access_slb, 0x380, 0x400) +EXC_REAL_END(data_access_slb, 0x380, 0x80) -EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x4400) +EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) SET_SCRATCH0(r13) EXCEPTION_PROLOG_0(PACA_EXSLB) EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380) @@ -556,12 +556,12 @@ EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x4400) mtctr r10 bctr #endif -EXC_VIRT_END(data_access_slb, 0x4380, 0x4400) +EXC_VIRT_END(data_access_slb, 0x4380, 0x80) TRAMP_KVM_SKIP(PACA_EXSLB, 0x380) -EXC_REAL(instruction_access, 0x400, 0x480) -EXC_VIRT(instruction_access, 0x4400, 0x4480, 0x400) +EXC_REAL(instruction_access, 0x400, 0x80) +EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400) TRAMP_KVM(PACA_EXGEN, 0x400) EXC_COMMON_BEGIN(instruction_access_common) @@ -580,7 +580,7 @@ MMU_FTR_SECTION_ELSE ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) -EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x500) +EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) SET_SCRATCH0(r13) EXCEPTION_PROLOG_0(PACA_EXSLB) EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) @@ -596,9 +596,9 @@ EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x500) mtctr r10 bctr #endif -EXC_REAL_END(instruction_access_slb, 0x480, 0x500) +EXC_REAL_END(instruction_access_slb, 0x480, 0x80) -EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x4500) +EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) SET_SCRATCH0(r13) EXCEPTION_PROLOG_0(PACA_EXSLB) EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480) @@ -614,7 +614,7 @@ EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x4500) mtctr r10 bctr #endif -EXC_VIRT_END(instruction_access_slb, 0x4480, 0x4500) +EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) TRAMP_KVM(PACA_EXSLB, 0x480) @@ -711,23 +711,19 @@ EXC_COMMON_BEGIN(bad_addr_slb) bl slb_miss_bad_addr b ret_from_except -EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x600) +EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) .globl hardware_interrupt_hv; hardware_interrupt_hv: BEGIN_FTR_SECTION _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common, EXC_HV, SOFTEN_TEST_HV) -do_kvm_H0x500: - KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) FTR_SECTION_ELSE _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common, EXC_STD, SOFTEN_TEST_PR) -do_kvm_0x500: - KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) -EXC_REAL_END(hardware_interrupt, 0x500, 0x600) +EXC_REAL_END(hardware_interrupt, 0x500, 0x100) -EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x4600) +EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) .globl hardware_interrupt_relon_hv; hardware_interrupt_relon_hv: BEGIN_FTR_SECTION @@ -735,13 +731,15 @@ hardware_interrupt_relon_hv: FTR_SECTION_ELSE _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common, EXC_STD, SOFTEN_TEST_PR) ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) -EXC_VIRT_END(hardware_interrupt, 0x4500, 0x4600) +EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) +TRAMP_KVM(PACA_EXGEN, 0x500) +TRAMP_KVM_HV(PACA_EXGEN, 0x500) EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ) -EXC_REAL(alignment, 0x600, 0x700) -EXC_VIRT(alignment, 0x4600, 0x4700, 0x600) +EXC_REAL(alignment, 0x600, 0x100) +EXC_VIRT(alignment, 0x4600, 0x100, 0x600) TRAMP_KVM(PACA_EXGEN, 0x600) EXC_COMMON_BEGIN(alignment_common) mfspr r10,SPRN_DAR @@ -760,8 +758,8 @@ EXC_COMMON_BEGIN(alignment_common) b ret_from_except -EXC_REAL(program_check, 0x700, 0x800) -EXC_VIRT(program_check, 0x4700, 0x4800, 0x700) +EXC_REAL(program_check, 0x700, 0x100) +EXC_VIRT(program_check, 0x4700, 0x100, 0x700) TRAMP_KVM(PACA_EXGEN, 0x700) EXC_COMMON_BEGIN(program_check_common) EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) @@ -772,8 +770,8 @@ EXC_COMMON_BEGIN(program_check_common) b ret_from_except -EXC_REAL(fp_unavailable, 0x800, 0x900) -EXC_VIRT(fp_unavailable, 0x4800, 0x4900, 0x800) +EXC_REAL(fp_unavailable, 0x800, 0x100) +EXC_VIRT(fp_unavailable, 0x4800, 0x100, 0x800) TRAMP_KVM(PACA_EXGEN, 0x800) EXC_COMMON_BEGIN(fp_unavailable_common) EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) @@ -805,20 +803,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif -EXC_REAL_MASKABLE(decrementer, 0x900, 0x980) -EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x4980, 0x900) +EXC_REAL_MASKABLE(decrementer, 0x900, 0x80) +EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900) TRAMP_KVM(PACA_EXGEN, 0x900) EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) -EXC_REAL_HV(hdecrementer, 0x980, 0xa00) -EXC_VIRT_HV(hdecrementer, 0x4980, 0x4a00, 0x980) +EXC_REAL_HV(hdecrementer, 0x980, 0x80) +EXC_VIRT_HV(hdecrementer, 0x4980, 0x80, 0x980) TRAMP_KVM_HV(PACA_EXGEN, 0x980) EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt) -EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0xb00) -EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x4b00, 0xa00) +EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0x100) +EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x100, 0xa00) TRAMP_KVM(PACA_EXGEN, 0xa00) #ifdef CONFIG_PPC_DOORBELL EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception) @@ -827,11 +825,36 @@ EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception) #endif -EXC_REAL(trap_0b, 0xb00, 0xc00) -EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00) +EXC_REAL(trap_0b, 0xb00, 0x100) +EXC_VIRT(trap_0b, 0x4b00, 0x100, 0xb00) TRAMP_KVM(PACA_EXGEN, 0xb00) EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) +#ifdef CONFIG_KVM_BOOK3S_64_HANDLER + /* + * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems + * that support it) before changing to HMT_MEDIUM. That allows the KVM + * code to save that value into the guest state (it is the guest's PPR + * value). Otherwise just change to HMT_MEDIUM as userspace has + * already saved the PPR. + */ +#define SYSCALL_KVMTEST \ + SET_SCRATCH0(r13); \ + GET_PACA(r13); \ + std r9,PACA_EXGEN+EX_R9(r13); \ + OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \ + HMT_MEDIUM; \ + std r10,PACA_EXGEN+EX_R10(r13); \ + OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR); \ + mfcr r9; \ + KVMTEST_PR(0xc00); \ + GET_SCRATCH0(r13) + +#else +#define SYSCALL_KVMTEST \ + HMT_MEDIUM +#endif + #define LOAD_SYSCALL_HANDLER(reg) \ __LOAD_HANDLER(reg, system_call_common) @@ -884,50 +907,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ b system_call_common ; #endif -EXC_REAL_BEGIN(system_call, 0xc00, 0xd00) - /* - * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems - * that support it) before changing to HMT_MEDIUM. That allows the KVM - * code to save that value into the guest state (it is the guest's PPR - * value). Otherwise just change to HMT_MEDIUM as userspace has - * already saved the PPR. - */ -#ifdef CONFIG_KVM_BOOK3S_64_HANDLER - SET_SCRATCH0(r13) - GET_PACA(r13) - std r9,PACA_EXGEN+EX_R9(r13) - OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); - HMT_MEDIUM; - std r10,PACA_EXGEN+EX_R10(r13) - OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR); - mfcr r9 - KVMTEST_PR(0xc00) - GET_SCRATCH0(r13) -#else - HMT_MEDIUM; -#endif +EXC_REAL_BEGIN(system_call, 0xc00, 0x100) + SYSCALL_KVMTEST SYSCALL_PSERIES_1 SYSCALL_PSERIES_2_RFID SYSCALL_PSERIES_3 -EXC_REAL_END(system_call, 0xc00, 0xd00) +EXC_REAL_END(system_call, 0xc00, 0x100) -EXC_VIRT_BEGIN(system_call, 0x4c00, 0x4d00) - HMT_MEDIUM +EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) + SYSCALL_KVMTEST SYSCALL_PSERIES_1 SYSCALL_PSERIES_2_DIRECT SYSCALL_PSERIES_3 -EXC_VIRT_END(system_call, 0x4c00, 0x4d00) +EXC_VIRT_END(system_call, 0x4c00, 0x100) TRAMP_KVM(PACA_EXGEN, 0xc00) -EXC_REAL(single_step, 0xd00, 0xe00) -EXC_VIRT(single_step, 0x4d00, 0x4e00, 0xd00) +EXC_REAL(single_step, 0xd00, 0x100) +EXC_VIRT(single_step, 0x4d00, 0x100, 0xd00) TRAMP_KVM(PACA_EXGEN, 0xd00) EXC_COMMON(single_step_common, 0xd00, single_step_exception) -EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0xe20) -EXC_VIRT_NONE(0x4e00, 0x4e20) +EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0x20) +EXC_VIRT_OOL_HV(h_data_storage, 0x4e00, 0x20, 0xe00) TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00) EXC_COMMON_BEGIN(h_data_storage_common) mfspr r10,SPRN_HDAR @@ -942,14 +945,14 @@ EXC_COMMON_BEGIN(h_data_storage_common) b ret_from_except -EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0xe40) -EXC_VIRT_NONE(0x4e20, 0x4e40) +EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0x20) +EXC_VIRT_OOL_HV(h_instr_storage, 0x4e20, 0x20, 0xe20) TRAMP_KVM_HV(PACA_EXGEN, 0xe20) EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception) -EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0xe60) -EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x4e60, 0xe40) +EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0x20) +EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x20, 0xe40) TRAMP_KVM_HV(PACA_EXGEN, 0xe40) EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt) @@ -959,9 +962,9 @@ EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt) * first, and then eventaully from there to the trampoline to get into virtual * mode. */ -__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0xe80, hmi_exception_early) -__TRAMP_REAL_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60) -EXC_VIRT_NONE(0x4e60, 0x4e80) +__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0x20, hmi_exception_early) +__TRAMP_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60) +EXC_VIRT_NONE(0x4e60, 0x20) TRAMP_KVM_HV(PACA_EXGEN, 0xe60) TRAMP_REAL_BEGIN(hmi_exception_early) EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60) @@ -979,7 +982,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early) EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN) EXCEPTION_PROLOG_COMMON_3(0xe60) addi r3,r1,STACK_FRAME_OVERHEAD - bl hmi_exception_realmode + BRANCH_LINK_TO_FAR(r4, hmi_exception_realmode) /* Windup the stack. */ /* Move original HSRR0 and HSRR1 into the respective regs */ ld r9,_MSR(r1) @@ -1015,8 +1018,8 @@ hmi_exception_after_realmode: EXC_COMMON_ASYNC(hmi_exception_common, 0xe60, handle_hmi_exception) -EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0xea0) -EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x4ea0, 0xe80) +EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20) +EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80) TRAMP_KVM_HV(PACA_EXGEN, 0xe80) #ifdef CONFIG_PPC_DOORBELL EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception) @@ -1025,24 +1028,26 @@ EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception) #endif -EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0xec0) -EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x4ec0, 0xea0) +EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0x20) +EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x20, 0xea0) TRAMP_KVM_HV(PACA_EXGEN, 0xea0) EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ) -EXC_REAL_NONE(0xec0, 0xf00) -EXC_VIRT_NONE(0x4ec0, 0x4f00) +EXC_REAL_NONE(0xec0, 0x20) +EXC_VIRT_NONE(0x4ec0, 0x20) +EXC_REAL_NONE(0xee0, 0x20) +EXC_VIRT_NONE(0x4ee0, 0x20) -EXC_REAL_OOL(performance_monitor, 0xf00, 0xf20) -EXC_VIRT_OOL(performance_monitor, 0x4f00, 0x4f20, 0xf00) +EXC_REAL_OOL(performance_monitor, 0xf00, 0x20) +EXC_VIRT_OOL(performance_monitor, 0x4f00, 0x20, 0xf00) TRAMP_KVM(PACA_EXGEN, 0xf00) EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception) -EXC_REAL_OOL(altivec_unavailable, 0xf20, 0xf40) -EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x4f40, 0xf20) +EXC_REAL_OOL(altivec_unavailable, 0xf20, 0x20) +EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x20, 0xf20) TRAMP_KVM(PACA_EXGEN, 0xf20) EXC_COMMON_BEGIN(altivec_unavailable_common) EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) @@ -1078,8 +1083,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) b ret_from_except -EXC_REAL_OOL(vsx_unavailable, 0xf40, 0xf60) -EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x4f60, 0xf40) +EXC_REAL_OOL(vsx_unavailable, 0xf40, 0x20) +EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x20, 0xf40) TRAMP_KVM(PACA_EXGEN, 0xf40) EXC_COMMON_BEGIN(vsx_unavailable_common) EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) @@ -1114,41 +1119,50 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) b ret_from_except -EXC_REAL_OOL(facility_unavailable, 0xf60, 0xf80) -EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x4f80, 0xf60) +EXC_REAL_OOL(facility_unavailable, 0xf60, 0x20) +EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x20, 0xf60) TRAMP_KVM(PACA_EXGEN, 0xf60) EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception) -EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0xfa0) -EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x4fa0, 0xf80) +EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0x20) +EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x20, 0xf80) TRAMP_KVM_HV(PACA_EXGEN, 0xf80) EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception) -EXC_REAL_NONE(0xfa0, 0x1200) -EXC_VIRT_NONE(0x4fa0, 0x5200) +EXC_REAL_NONE(0xfa0, 0x20) +EXC_VIRT_NONE(0x4fa0, 0x20) +EXC_REAL_NONE(0xfc0, 0x20) +EXC_VIRT_NONE(0x4fc0, 0x20) +EXC_REAL_NONE(0xfe0, 0x20) +EXC_VIRT_NONE(0x4fe0, 0x20) + +EXC_REAL_NONE(0x1000, 0x100) +EXC_VIRT_NONE(0x5000, 0x100) +EXC_REAL_NONE(0x1100, 0x100) +EXC_VIRT_NONE(0x5100, 0x100) #ifdef CONFIG_CBE_RAS -EXC_REAL_HV(cbe_system_error, 0x1200, 0x1300) -EXC_VIRT_NONE(0x5200, 0x5300) +EXC_REAL_HV(cbe_system_error, 0x1200, 0x100) +EXC_VIRT_NONE(0x5200, 0x100) TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200) EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception) #else /* CONFIG_CBE_RAS */ -EXC_REAL_NONE(0x1200, 0x1300) -EXC_VIRT_NONE(0x5200, 0x5300) +EXC_REAL_NONE(0x1200, 0x100) +EXC_VIRT_NONE(0x5200, 0x100) #endif -EXC_REAL(instruction_breakpoint, 0x1300, 0x1400) -EXC_VIRT(instruction_breakpoint, 0x5300, 0x5400, 0x1300) +EXC_REAL(instruction_breakpoint, 0x1300, 0x100) +EXC_VIRT(instruction_breakpoint, 0x5300, 0x100, 0x1300) TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300) EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception) -EXC_REAL_NONE(0x1400, 0x1500) -EXC_VIRT_NONE(0x5400, 0x5500) +EXC_REAL_NONE(0x1400, 0x100) +EXC_VIRT_NONE(0x5400, 0x100) -EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x1600) +EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100) mtspr SPRN_SPRG_HSCRATCH0,r13 EXCEPTION_PROLOG_0(PACA_EXGEN) EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500) @@ -1163,14 +1177,14 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x1600) KVMTEST_PR(0x1500) EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV) -EXC_REAL_END(denorm_exception_hv, 0x1500, 0x1600) +EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100) #ifdef CONFIG_PPC_DENORMALISATION -EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x5600) +EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) b exc_real_0x1500_denorm_exception_hv -EXC_VIRT_END(denorm_exception, 0x5500, 0x5600) +EXC_VIRT_END(denorm_exception, 0x5500, 0x100) #else -EXC_VIRT_NONE(0x5500, 0x5600) +EXC_VIRT_NONE(0x5500, 0x100) #endif TRAMP_KVM_SKIP(PACA_EXGEN, 0x1500) @@ -1243,18 +1257,18 @@ EXC_COMMON_HV(denorm_common, 0x1500, unknown_exception) #ifdef CONFIG_CBE_RAS -EXC_REAL_HV(cbe_maintenance, 0x1600, 0x1700) -EXC_VIRT_NONE(0x5600, 0x5700) +EXC_REAL_HV(cbe_maintenance, 0x1600, 0x100) +EXC_VIRT_NONE(0x5600, 0x100) TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600) EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception) #else /* CONFIG_CBE_RAS */ -EXC_REAL_NONE(0x1600, 0x1700) -EXC_VIRT_NONE(0x5600, 0x5700) +EXC_REAL_NONE(0x1600, 0x100) +EXC_VIRT_NONE(0x5600, 0x100) #endif -EXC_REAL(altivec_assist, 0x1700, 0x1800) -EXC_VIRT(altivec_assist, 0x5700, 0x5800, 0x1700) +EXC_REAL(altivec_assist, 0x1700, 0x100) +EXC_VIRT(altivec_assist, 0x5700, 0x100, 0x1700) TRAMP_KVM(PACA_EXGEN, 0x1700) #ifdef CONFIG_ALTIVEC EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception) @@ -1264,13 +1278,13 @@ EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception) #ifdef CONFIG_CBE_RAS -EXC_REAL_HV(cbe_thermal, 0x1800, 0x1900) -EXC_VIRT_NONE(0x5800, 0x5900) +EXC_REAL_HV(cbe_thermal, 0x1800, 0x100) +EXC_VIRT_NONE(0x5800, 0x100) TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800) EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception) #else /* CONFIG_CBE_RAS */ -EXC_REAL_NONE(0x1800, 0x1900) -EXC_VIRT_NONE(0x5800, 0x5900) +EXC_REAL_NONE(0x1800, 0x100) +EXC_VIRT_NONE(0x5800, 0x100) #endif diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 8f0c7c5d93f2..8ff0dd4e77a7 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -406,12 +406,35 @@ static void register_fw_dump(struct fadump_mem_struct *fdm) void crash_fadump(struct pt_regs *regs, const char *str) { struct fadump_crash_info_header *fdh = NULL; + int old_cpu, this_cpu; if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr) return; + /* + * old_cpu == -1 means this is the first CPU which has come here, + * go ahead and trigger fadump. + * + * old_cpu != -1 means some other CPU has already on it's way + * to trigger fadump, just keep looping here. + */ + this_cpu = smp_processor_id(); + old_cpu = cmpxchg(&crashing_cpu, -1, this_cpu); + + if (old_cpu != -1) { + /* + * We can't loop here indefinitely. Wait as long as fadump + * is in force. If we race with fadump un-registration this + * loop will break and then we go down to normal panic path + * and reboot. If fadump is in force the first crashing + * cpu will definitely trigger fadump. + */ + while (fw_dump.dump_registered) + cpu_relax(); + return; + } + fdh = __va(fw_dump.fadumphdr_addr); - crashing_cpu = smp_processor_id(); fdh->crashing_cpu = crashing_cpu; crash_save_vmcoreinfo(); diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 9d963547d243..1607be7c0ef2 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -869,7 +869,6 @@ __secondary_start: /* enable MMU and jump to start_secondary */ li r4,MSR_KERNEL - FIX_SRR1(r4,r5) lis r3,start_secondary@h ori r3,r3,start_secondary@l mtspr SPRN_SRR0,r3 @@ -977,7 +976,6 @@ start_here: ori r4,r4,2f@l tophys(r4,r4) li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) - FIX_SRR1(r3,r5) mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 SYNC @@ -1001,7 +999,6 @@ start_here: /* Now turn on the MMU for real! */ li r4,MSR_KERNEL - FIX_SRR1(r4,r5) lis r3,start_kernel@h ori r3,r3,start_kernel@l mtspr SPRN_SRR0,r3 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 1a9c99d3e5d8..c032fe8c2d26 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -329,6 +329,12 @@ InstructionTLBMiss: mtspr SPRN_SPRG_SCRATCH2, r3 #endif EXCEPTION_PROLOG_0 +#ifdef CONFIG_PPC_8xx_PERF_EVENT + lis r10, (itlb_miss_counter - PAGE_OFFSET)@ha + lwz r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10) + addi r11, r11, 1 + stw r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10) +#endif /* If we are faulting a kernel address, we have to use the * kernel page tables. @@ -429,6 +435,12 @@ InstructionTLBMiss: DataStoreTLBMiss: mtspr SPRN_SPRG_SCRATCH2, r3 EXCEPTION_PROLOG_0 +#ifdef CONFIG_PPC_8xx_PERF_EVENT + lis r10, (dtlb_miss_counter - PAGE_OFFSET)@ha + lwz r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10) + addi r11, r11, 1 + stw r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10) +#endif mfcr r3 /* If we are faulting a kernel address, we have to use the @@ -561,6 +573,7 @@ InstructionTLBError: andis. r10,r5,0x4000 beq+ 1f tlbie r4 +itlbie: /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */ 1: EXC_XFER_LITE(0x400, handle_page_fault) @@ -585,6 +598,7 @@ DARFixed:/* Return from dcbx instruction bug workaround */ andis. r10,r5,0x4000 beq+ 1f tlbie r4 +dtlbie: 1: li r10,RPN_PATTERN mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */ /* 0x300 is DataAccess exception, needed by bad_page_fault() */ @@ -602,8 +616,43 @@ DARFixed:/* Return from dcbx instruction bug workaround */ * support of breakpoints and such. Someday I will get around to * using them. */ - EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE) + . = 0x1c00 +DataBreakpoint: + EXCEPTION_PROLOG_0 + mfcr r10 + mfspr r11, SPRN_SRR0 + cmplwi cr0, r11, (dtlbie - PAGE_OFFSET)@l + cmplwi cr7, r11, (itlbie - PAGE_OFFSET)@l + beq- cr0, 11f + beq- cr7, 11f + EXCEPTION_PROLOG_1 + EXCEPTION_PROLOG_2 + addi r3,r1,STACK_FRAME_OVERHEAD + mfspr r4,SPRN_BAR + stw r4,_DAR(r11) + mfspr r5,SPRN_DSISR + EXC_XFER_EE(0x1c00, do_break) +11: + mtcr r10 + EXCEPTION_EPILOG_0 + rfi + +#ifdef CONFIG_PPC_8xx_PERF_EVENT + . = 0x1d00 +InstructionBreakpoint: + EXCEPTION_PROLOG_0 + lis r10, (instruction_counter - PAGE_OFFSET)@ha + lwz r11, (instruction_counter - PAGE_OFFSET)@l(r10) + addi r11, r11, -1 + stw r11, (instruction_counter - PAGE_OFFSET)@l(r10) + lis r10, 0xffff + ori r10, r10, 0x01 + mtspr SPRN_COUNTA, r10 + EXCEPTION_EPILOG_0 + rfi +#else EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE) +#endif EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE) EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE) @@ -977,6 +1026,14 @@ initial_mmu: lis r8, IDC_ENABLE@h mtspr SPRN_DC_CST, r8 #endif + /* Disable debug mode entry on breakpoints */ + mfspr r8, SPRN_DER +#ifdef CONFIG_PPC_8xx_PERF_EVENT + rlwinm r8, r8, 0, ~0xc +#else + rlwinm r8, r8, 0, ~0x8 +#endif + mtspr SPRN_DER, r8 blr @@ -1010,3 +1067,16 @@ cpu6_errata_word: .space 16 #endif +#ifdef CONFIG_PPC_8xx_PERF_EVENT + .globl itlb_miss_counter +itlb_miss_counter: + .space 4 + + .globl dtlb_miss_counter +dtlb_miss_counter: + .space 4 + + .globl instruction_counter +instruction_counter: + .space 4 +#endif diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 4d3aa05e28be..53b9c1dfd7d9 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -211,9 +211,11 @@ int hw_breakpoint_handler(struct die_args *args) int rc = NOTIFY_STOP; struct perf_event *bp; struct pt_regs *regs = args->regs; +#ifndef CONFIG_PPC_8xx int stepped = 1; - struct arch_hw_breakpoint *info; unsigned int instr; +#endif + struct arch_hw_breakpoint *info; unsigned long dar = regs->dar; /* Disable breakpoints during exception handling */ @@ -228,8 +230,10 @@ int hw_breakpoint_handler(struct die_args *args) rcu_read_lock(); bp = __this_cpu_read(bp_per_reg); - if (!bp) + if (!bp) { + rc = NOTIFY_DONE; goto out; + } info = counter_arch_bp(bp); /* @@ -255,6 +259,7 @@ int hw_breakpoint_handler(struct die_args *args) (dar - bp->attr.bp_addr < bp->attr.bp_len))) info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; +#ifndef CONFIG_PPC_8xx /* Do not emulate user-space instructions, instead single-step them */ if (user_mode(regs)) { current->thread.last_hit_ubp = bp; @@ -278,6 +283,7 @@ int hw_breakpoint_handler(struct die_args *args) perf_event_disable_inatomic(bp); goto out; } +#endif /* * As a policy, the callback is invoked in a 'trigger-after-execute' * fashion diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 72dac0b58061..5f61cc0349c0 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -40,9 +40,7 @@ #define _WORC GPR11 #define _PTCR GPR12 -#define PSSCR_HV_TEMPLATE PSSCR_ESL | PSSCR_EC | \ - PSSCR_PSLL_MASK | PSSCR_TR_MASK | \ - PSSCR_MTL_MASK +#define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16 .text @@ -205,7 +203,7 @@ pnv_enter_arch207_idle_mode: stb r3,PACA_THREAD_IDLE_STATE(r13) cmpwi cr3,r3,PNV_THREAD_SLEEP bge cr3,2f - IDLE_STATE_ENTER_SEQ(PPC_NAP) + IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) /* No return */ 2: /* Sleep or winkle */ @@ -239,7 +237,7 @@ pnv_fastsleep_workaround_at_entry: common_enter: /* common code for all the threads entering sleep or winkle */ bgt cr3,enter_winkle - IDLE_STATE_ENTER_SEQ(PPC_SLEEP) + IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) fastsleep_workaround_at_entry: ori r15,r15,PNV_CORE_IDLE_LOCK_BIT @@ -250,7 +248,7 @@ fastsleep_workaround_at_entry: /* Fast sleep workaround */ li r3,1 li r4,1 - bl opal_rm_config_cpu_idle_state + bl opal_config_cpu_idle_state /* Clear Lock bit */ li r0,0 @@ -261,10 +259,10 @@ fastsleep_workaround_at_entry: enter_winkle: bl save_sprs_to_stack - IDLE_STATE_ENTER_SEQ(PPC_WINKLE) + IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) /* - * r3 - requested stop state + * r3 - PSSCR value corresponding to the requested stop state. */ power_enter_stop: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE @@ -274,13 +272,22 @@ power_enter_stop: stb r4,HSTATE_HWTHREAD_STATE(r13) #endif /* + * Check if we are executing the lite variant with ESL=EC=0 + */ + andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED + clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ + bne 1f + IDLE_STATE_ENTER_SEQ(PPC_STOP) + li r3,0 /* Since we didn't lose state, return 0 */ + b pnv_wakeup_noloss +/* * Check if the requested state is a deep idle state. */ - LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) +1: LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) cmpd r3,r4 bge 2f - IDLE_STATE_ENTER_SEQ(PPC_STOP) + IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) 2: /* * Entering deep idle state. @@ -302,7 +309,7 @@ lwarx_loop_stop: bl save_sprs_to_stack - IDLE_STATE_ENTER_SEQ(PPC_STOP) + IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) _GLOBAL(power7_idle) /* Now check if user or arch enabled NAP mode */ @@ -353,16 +360,17 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ 20: nop; - /* - * r3 - requested stop state + * r3 - The PSSCR value corresponding to the stop state. + * r4 - The PSSCR mask corrresonding to the stop state. */ _GLOBAL(power9_idle_stop) - LOAD_REG_IMMEDIATE(r4, PSSCR_HV_TEMPLATE) - or r4,r4,r3 - mtspr SPRN_PSSCR, r4 - li r4, 1 + mfspr r5,SPRN_PSSCR + andc r5,r5,r4 + or r3,r3,r5 + mtspr SPRN_PSSCR,r3 LOAD_REG_ADDR(r5,power_enter_stop) + li r4,1 b pnv_powersave_common /* No return */ /* @@ -544,7 +552,7 @@ timebase_resync: */ ble cr3,clear_lock /* Time base re-sync */ - bl opal_rm_resync_timebase; + bl opal_resync_timebase; /* * If waking up from sleep, per core state is not lost, skip to * clear_lock. @@ -633,7 +641,7 @@ hypervisor_state_restored: fastsleep_workaround_at_exit: li r3,1 li r4,0 - bl opal_rm_config_cpu_idle_state + bl opal_config_cpu_idle_state b timebase_resync /* diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index 5f8613ceb97f..a582e0d42525 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c @@ -12,7 +12,7 @@ #undef DEBUG #include <linux/kernel.h> -#include <linux/sched.h> /* for init_mm */ +#include <linux/sched/mm.h> /* for init_mm */ #include <asm/io.h> #include <asm/machdep.h> diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index 3963f0b68d52..a1854d1ded8b 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c @@ -8,6 +8,7 @@ #include <linux/export.h> #include <asm/io.h> #include <asm/pci-bridge.h> +#include <asm/isa-bridge.h> /* * Here comes the ppc64 implementation of the IOMAP diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index ae1316106e2b..bb6f8993412e 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -29,6 +29,7 @@ #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> +#include <asm/isa-bridge.h> unsigned long isa_io_base; /* NULL if no ISA bus */ EXPORT_SYMBOL(isa_io_base); @@ -167,6 +168,97 @@ void __init isa_bridge_find_early(struct pci_controller *hose) } /** + * isa_bridge_find_early - Find and map the ISA IO space early before + * main PCI discovery. This is optionally called by + * the arch code when adding PCI PHBs to get early + * access to ISA IO ports + */ +void __init isa_bridge_init_non_pci(struct device_node *np) +{ + const __be32 *ranges, *pbasep = NULL; + int rlen, i, rs; + u32 na, ns, pna; + u64 cbase, pbase, size = 0; + + /* If we already have an ISA bridge, bail off */ + if (isa_bridge_devnode != NULL) + return; + + pna = of_n_addr_cells(np); + if (of_property_read_u32(np, "#address-cells", &na) || + of_property_read_u32(np, "#size-cells", &ns)) { + pr_warn("ISA: Non-PCI bridge %s is missing address format\n", + np->full_name); + return; + } + + /* Check it's a supported address format */ + if (na != 2 || ns != 1) { + pr_warn("ISA: Non-PCI bridge %s has unsupported address format\n", + np->full_name); + return; + } + rs = na + ns + pna; + + /* Grab the ranges property */ + ranges = of_get_property(np, "ranges", &rlen); + if (ranges == NULL || rlen < rs) { + pr_warn("ISA: Non-PCI bridge %s has absent or invalid ranges\n", + np->full_name); + return; + } + + /* Parse it. We are only looking for IO space */ + for (i = 0; (i + rs - 1) < rlen; i += rs) { + if (be32_to_cpup(ranges + i) != 1) + continue; + cbase = be32_to_cpup(ranges + i + 1); + size = of_read_number(ranges + i + na + pna, ns); + pbasep = ranges + i + na; + break; + } + + /* Got something ? */ + if (!size || !pbasep) { + pr_warn("ISA: Non-PCI bridge %s has no usable IO range\n", + np->full_name); + return; + } + + /* Align size and make sure it's cropped to 64K */ + size = PAGE_ALIGN(size); + if (size > 0x10000) + size = 0x10000; + + /* Map pbase */ + pbase = of_translate_address(np, pbasep); + if (pbase == OF_BAD_ADDR) { + pr_warn("ISA: Non-PCI bridge %s failed to translate IO base\n", + np->full_name); + return; + } + + /* We need page alignment */ + if ((cbase & ~PAGE_MASK) || (pbase & ~PAGE_MASK)) { + pr_warn("ISA: Non-PCI bridge %s has non aligned IO range\n", + np->full_name); + return; + } + + /* Got it */ + isa_bridge_devnode = np; + + /* Set the global ISA io base to indicate we have an ISA bridge + * and map it + */ + isa_io_base = ISA_IO_BASE; + __ioremap_at(pbase, (void *)ISA_IO_BASE, + size, pgprot_val(pgprot_noncached(__pgprot(0)))); + + pr_debug("ISA: Non-PCI bridge is %s\n", np->full_name); +} + +/** * isa_bridge_find_late - Find and map the ISA IO space upon discovery of * a new ISA bridge */ diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 735ff3d3f77d..fce05a38851c 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -285,6 +285,7 @@ asm(".global kretprobe_trampoline\n" ".type kretprobe_trampoline, @function\n" "kretprobe_trampoline:\n" "nop\n" + "blr\n" ".size kretprobe_trampoline, .-kretprobe_trampoline\n"); /* @@ -337,6 +338,13 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, kretprobe_assert(ri, orig_ret_address, trampoline_address); regs->nip = orig_ret_address; + /* + * Make LR point to the orig_ret_address. + * When the 'nop' inside the kretprobe_trampoline + * is optimized, we can do a 'blr' after executing the + * detour buffer code. + */ + regs->link = orig_ret_address; reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); @@ -467,15 +475,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) return 0; } -/* - * Wrapper routine to for handling exceptions. - */ -int __kprobes kprobe_exceptions_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - return NOTIFY_DONE; -} - unsigned long arch_deref_entry_point(void *entry) { return ppc_global_function_entry(entry); diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index bc525ea0dc09..0694d20f85b6 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -233,7 +233,8 @@ static int __init add_legacy_isa_port(struct device_node *np, * * Note: Don't even try on P8 lpc, we know it's not directly mapped */ - if (!of_device_is_compatible(isa_brg, "ibm,power8-lpc")) { + if (!of_device_is_compatible(isa_brg, "ibm,power8-lpc") || + of_get_property(isa_brg, "ranges", NULL)) { taddr = of_translate_address(np, reg); if (taddr == OF_BAD_ADDR) taddr = 0; diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 32be2a844947..ae179cb1bb3c 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -80,12 +80,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) * each other. */ ld r10,PPC64_CACHES@toc(r2) - lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */ + lwz r7,DCACHEL1BLOCKSIZE(r10)/* Get cache block size */ addi r5,r7,-1 andc r6,r3,r5 /* round low to line bdy */ subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ - lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */ + lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of cache block size */ srw. r8,r8,r9 /* compute line count */ beqlr /* nothing to do? */ mtctr r8 @@ -96,12 +96,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) /* Now invalidate the instruction cache */ - lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */ + lwz r7,ICACHEL1BLOCKSIZE(r10) /* Get Icache block size */ addi r5,r7,-1 andc r6,r3,r5 /* round low to line bdy */ subf r8,r6,r4 /* compute length */ add r8,r8,r5 - lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */ + lwz r9,ICACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of Icache block size */ srw. r8,r8,r9 /* compute line count */ beqlr /* nothing to do? */ mtctr r8 @@ -128,12 +128,12 @@ _GLOBAL(flush_dcache_range) * Different systems have different cache line sizes */ ld r10,PPC64_CACHES@toc(r2) - lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ + lwz r7,DCACHEL1BLOCKSIZE(r10) /* Get dcache block size */ addi r5,r7,-1 andc r6,r3,r5 /* round low to line bdy */ subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ - lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ + lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */ srw. r8,r8,r9 /* compute line count */ beqlr /* nothing to do? */ mtctr r8 @@ -156,12 +156,12 @@ EXPORT_SYMBOL(flush_dcache_range) */ _GLOBAL(flush_dcache_phys_range) ld r10,PPC64_CACHES@toc(r2) - lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ + lwz r7,DCACHEL1BLOCKSIZE(r10) /* Get dcache block size */ addi r5,r7,-1 andc r6,r3,r5 /* round low to line bdy */ subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ - lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ + lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */ srw. r8,r8,r9 /* compute line count */ beqlr /* nothing to do? */ mfmsr r5 /* Disable MMU Data Relocation */ @@ -184,12 +184,12 @@ _GLOBAL(flush_dcache_phys_range) _GLOBAL(flush_inval_dcache_range) ld r10,PPC64_CACHES@toc(r2) - lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ + lwz r7,DCACHEL1BLOCKSIZE(r10) /* Get dcache block size */ addi r5,r7,-1 andc r6,r3,r5 /* round low to line bdy */ subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ - lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */ + lwz r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */ srw. r8,r8,r9 /* compute line count */ beqlr /* nothing to do? */ sync @@ -225,8 +225,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) /* Flush the dcache */ ld r7,PPC64_CACHES@toc(r2) clrrdi r3,r3,PAGE_SHIFT /* Page align */ - lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */ - lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */ + lwz r4,DCACHEL1BLOCKSPERPAGE(r7) /* Get # dcache blocks per page */ + lwz r5,DCACHEL1BLOCKSIZE(r7) /* Get dcache block size */ mr r6,r3 mtctr r4 0: dcbst 0,r6 @@ -236,8 +236,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) /* Now invalidate the icache */ - lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */ - lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */ + lwz r4,ICACHEL1BLOCKSPERPAGE(r7) /* Get # icache blocks per page */ + lwz r5,ICACHEL1BLOCKSIZE(r7) /* Get icache block size */ mtctr r4 1: icbi 0,r3 add r3,r3,r5 diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c new file mode 100644 index 000000000000..2282bf4e63cd --- /dev/null +++ b/arch/powerpc/kernel/optprobes.c @@ -0,0 +1,347 @@ +/* + * Code for Kernel probes Jump optimization. + * + * Copyright 2017, Anju T, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kprobes.h> +#include <linux/jump_label.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <asm/kprobes.h> +#include <asm/ptrace.h> +#include <asm/cacheflush.h> +#include <asm/code-patching.h> +#include <asm/sstep.h> +#include <asm/ppc-opcode.h> + +#define TMPL_CALL_HDLR_IDX \ + (optprobe_template_call_handler - optprobe_template_entry) +#define TMPL_EMULATE_IDX \ + (optprobe_template_call_emulate - optprobe_template_entry) +#define TMPL_RET_IDX \ + (optprobe_template_ret - optprobe_template_entry) +#define TMPL_OP_IDX \ + (optprobe_template_op_address - optprobe_template_entry) +#define TMPL_INSN_IDX \ + (optprobe_template_insn - optprobe_template_entry) +#define TMPL_END_IDX \ + (optprobe_template_end - optprobe_template_entry) + +DEFINE_INSN_CACHE_OPS(ppc_optinsn); + +static bool insn_page_in_use; + +static void *__ppc_alloc_insn_page(void) +{ + if (insn_page_in_use) + return NULL; + insn_page_in_use = true; + return &optinsn_slot; +} + +static void __ppc_free_insn_page(void *page __maybe_unused) +{ + insn_page_in_use = false; +} + +struct kprobe_insn_cache kprobe_ppc_optinsn_slots = { + .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex), + .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages), + /* insn_size initialized later */ + .alloc = __ppc_alloc_insn_page, + .free = __ppc_free_insn_page, + .nr_garbage = 0, +}; + +/* + * Check if we can optimize this probe. Returns NIP post-emulation if this can + * be optimized and 0 otherwise. + */ +static unsigned long can_optimize(struct kprobe *p) +{ + struct pt_regs regs; + struct instruction_op op; + unsigned long nip = 0; + + /* + * kprobe placed for kretprobe during boot time + * has a 'nop' instruction, which can be emulated. + * So further checks can be skipped. + */ + if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) + return (unsigned long)p->addr + sizeof(kprobe_opcode_t); + + /* + * We only support optimizing kernel addresses, but not + * module addresses. + * + * FIXME: Optimize kprobes placed in module addresses. + */ + if (!is_kernel_addr((unsigned long)p->addr)) + return 0; + + memset(®s, 0, sizeof(struct pt_regs)); + regs.nip = (unsigned long)p->addr; + regs.trap = 0x0; + regs.msr = MSR_KERNEL; + + /* + * Kprobe placed in conditional branch instructions are + * not optimized, as we can't predict the nip prior with + * dummy pt_regs and can not ensure that the return branch + * from detour buffer falls in the range of address (i.e 32MB). + * A branch back from trampoline is set up in the detour buffer + * to the nip returned by the analyse_instr() here. + * + * Ensure that the instruction is not a conditional branch, + * and that can be emulated. + */ + if (!is_conditional_branch(*p->ainsn.insn) && + analyse_instr(&op, ®s, *p->ainsn.insn)) + nip = regs.nip; + + return nip; +} + +static void optimized_callback(struct optimized_kprobe *op, + struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + unsigned long flags; + + /* This is possible if op is under delayed unoptimizing */ + if (kprobe_disabled(&op->kp)) + return; + + local_irq_save(flags); + hard_irq_disable(); + + if (kprobe_running()) { + kprobes_inc_nmissed_count(&op->kp); + } else { + __this_cpu_write(current_kprobe, &op->kp); + regs->nip = (unsigned long)op->kp.addr; + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + opt_pre_handler(&op->kp, regs); + __this_cpu_write(current_kprobe, NULL); + } + + /* + * No need for an explicit __hard_irq_enable() here. + * local_irq_restore() will re-enable interrupts, + * if they were hard disabled. + */ + local_irq_restore(flags); +} +NOKPROBE_SYMBOL(optimized_callback); + +void arch_remove_optimized_kprobe(struct optimized_kprobe *op) +{ + if (op->optinsn.insn) { + free_ppc_optinsn_slot(op->optinsn.insn, 1); + op->optinsn.insn = NULL; + } +} + +/* + * emulate_step() requires insn to be emulated as + * second parameter. Load register 'r4' with the + * instruction. + */ +void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) +{ + /* addis r4,0,(insn)@h */ + *addr++ = PPC_INST_ADDIS | ___PPC_RT(4) | + ((val >> 16) & 0xffff); + + /* ori r4,r4,(insn)@l */ + *addr = PPC_INST_ORI | ___PPC_RA(4) | ___PPC_RS(4) | + (val & 0xffff); +} + +/* + * Generate instructions to load provided immediate 64-bit value + * to register 'r3' and patch these instructions at 'addr'. + */ +void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr) +{ + /* lis r3,(op)@highest */ + *addr++ = PPC_INST_ADDIS | ___PPC_RT(3) | + ((val >> 48) & 0xffff); + + /* ori r3,r3,(op)@higher */ + *addr++ = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) | + ((val >> 32) & 0xffff); + + /* rldicr r3,r3,32,31 */ + *addr++ = PPC_INST_RLDICR | ___PPC_RA(3) | ___PPC_RS(3) | + __PPC_SH64(32) | __PPC_ME64(31); + + /* oris r3,r3,(op)@h */ + *addr++ = PPC_INST_ORIS | ___PPC_RA(3) | ___PPC_RS(3) | + ((val >> 16) & 0xffff); + + /* ori r3,r3,(op)@l */ + *addr = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) | + (val & 0xffff); +} + +int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) +{ + kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step; + kprobe_opcode_t *op_callback_addr, *emulate_step_addr; + long b_offset; + unsigned long nip; + + kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; + + nip = can_optimize(p); + if (!nip) + return -EILSEQ; + + /* Allocate instruction slot for detour buffer */ + buff = get_ppc_optinsn_slot(); + if (!buff) + return -ENOMEM; + + /* + * OPTPROBE uses 'b' instruction to branch to optinsn.insn. + * + * The target address has to be relatively nearby, to permit use + * of branch instruction in powerpc, because the address is specified + * in an immediate field in the instruction opcode itself, ie 24 bits + * in the opcode specify the address. Therefore the address should + * be within 32MB on either side of the current instruction. + */ + b_offset = (unsigned long)buff - (unsigned long)p->addr; + if (!is_offset_in_branch_range(b_offset)) + goto error; + + /* Check if the return address is also within 32MB range */ + b_offset = (unsigned long)(buff + TMPL_RET_IDX) - + (unsigned long)nip; + if (!is_offset_in_branch_range(b_offset)) + goto error; + + /* Setup template */ + memcpy(buff, optprobe_template_entry, + TMPL_END_IDX * sizeof(kprobe_opcode_t)); + + /* + * Fixup the template with instructions to: + * 1. load the address of the actual probepoint + */ + patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX); + + /* + * 2. branch to optimized_callback() and emulate_step() + */ + kprobe_lookup_name("optimized_callback", op_callback_addr); + kprobe_lookup_name("emulate_step", emulate_step_addr); + if (!op_callback_addr || !emulate_step_addr) { + WARN(1, "kprobe_lookup_name() failed\n"); + goto error; + } + + branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX, + (unsigned long)op_callback_addr, + BRANCH_SET_LINK); + + branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX, + (unsigned long)emulate_step_addr, + BRANCH_SET_LINK); + + if (!branch_op_callback || !branch_emulate_step) + goto error; + + buff[TMPL_CALL_HDLR_IDX] = branch_op_callback; + buff[TMPL_EMULATE_IDX] = branch_emulate_step; + + /* + * 3. load instruction to be emulated into relevant register, and + */ + patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX); + + /* + * 4. branch back from trampoline + */ + buff[TMPL_RET_IDX] = create_branch((unsigned int *)buff + TMPL_RET_IDX, + (unsigned long)nip, 0); + + flush_icache_range((unsigned long)buff, + (unsigned long)(&buff[TMPL_END_IDX])); + + op->optinsn.insn = buff; + + return 0; + +error: + free_ppc_optinsn_slot(buff, 0); + return -ERANGE; + +} + +int arch_prepared_optinsn(struct arch_optimized_insn *optinsn) +{ + return optinsn->insn != NULL; +} + +/* + * On powerpc, Optprobes always replaces one instruction (4 bytes + * aligned and 4 bytes long). It is impossible to encounter another + * kprobe in this address range. So always return 0. + */ +int arch_check_optimized_kprobe(struct optimized_kprobe *op) +{ + return 0; +} + +void arch_optimize_kprobes(struct list_head *oplist) +{ + struct optimized_kprobe *op; + struct optimized_kprobe *tmp; + + list_for_each_entry_safe(op, tmp, oplist, list) { + /* + * Backup instructions which will be replaced + * by jump address + */ + memcpy(op->optinsn.copied_insn, op->kp.addr, + RELATIVEJUMP_SIZE); + patch_instruction(op->kp.addr, + create_branch((unsigned int *)op->kp.addr, + (unsigned long)op->optinsn.insn, 0)); + list_del_init(&op->list); + } +} + +void arch_unoptimize_kprobe(struct optimized_kprobe *op) +{ + arch_arm_kprobe(&op->kp); +} + +void arch_unoptimize_kprobes(struct list_head *oplist, + struct list_head *done_list) +{ + struct optimized_kprobe *op; + struct optimized_kprobe *tmp; + + list_for_each_entry_safe(op, tmp, oplist, list) { + arch_unoptimize_kprobe(op); + list_move(&op->list, done_list); + } +} + +int arch_within_optimized_kprobe(struct optimized_kprobe *op, + unsigned long addr) +{ + return ((unsigned long)op->kp.addr <= addr && + (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr); +} diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S new file mode 100644 index 000000000000..4937bef7652f --- /dev/null +++ b/arch/powerpc/kernel/optprobes_head.S @@ -0,0 +1,142 @@ +/* + * Code to prepare detour buffer for optprobes in Kernel. + * + * Copyright 2017, Anju T, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <asm/ppc_asm.h> +#include <asm/ptrace.h> +#include <asm/asm-offsets.h> + +#define OPT_SLOT_SIZE 65536 + + .balign 4 + + /* + * Reserve an area to allocate slots for detour buffer. + * This is part of .text section (rather than vmalloc area) + * as this needs to be within 32MB of the probed address. + */ + .global optinsn_slot +optinsn_slot: + .space OPT_SLOT_SIZE + + /* + * Optprobe template: + * This template gets copied into one of the slots in optinsn_slot + * and gets fixed up with real optprobe structures et al. + */ + .global optprobe_template_entry +optprobe_template_entry: + /* Create an in-memory pt_regs */ + stdu r1,-INT_FRAME_SIZE(r1) + SAVE_GPR(0,r1) + /* Save the previous SP into stack */ + addi r0,r1,INT_FRAME_SIZE + std r0,GPR1(r1) + SAVE_10GPRS(2,r1) + SAVE_10GPRS(12,r1) + SAVE_10GPRS(22,r1) + /* Save SPRS */ + mfmsr r5 + std r5,_MSR(r1) + li r5,0x700 + std r5,_TRAP(r1) + li r5,0 + std r5,ORIG_GPR3(r1) + std r5,RESULT(r1) + mfctr r5 + std r5,_CTR(r1) + mflr r5 + std r5,_LINK(r1) + mfspr r5,SPRN_XER + std r5,_XER(r1) + mfcr r5 + std r5,_CCR(r1) + lbz r5,PACASOFTIRQEN(r13) + std r5,SOFTE(r1) + mfdar r5 + std r5,_DAR(r1) + mfdsisr r5 + std r5,_DSISR(r1) + + /* + * We may get here from a module, so load the kernel TOC in r2. + * The original TOC gets restored when pt_regs is restored + * further below. + */ + ld r2,PACATOC(r13) + + .global optprobe_template_op_address +optprobe_template_op_address: + /* + * Parameters to optimized_callback(): + * 1. optimized_kprobe structure in r3 + */ + nop + nop + nop + nop + nop + /* 2. pt_regs pointer in r4 */ + addi r4,r1,STACK_FRAME_OVERHEAD + + .global optprobe_template_call_handler +optprobe_template_call_handler: + /* Branch to optimized_callback() */ + nop + + /* + * Parameters for instruction emulation: + * 1. Pass SP in register r3. + */ + addi r3,r1,STACK_FRAME_OVERHEAD + + .global optprobe_template_insn +optprobe_template_insn: + /* 2, Pass instruction to be emulated in r4 */ + nop + nop + + .global optprobe_template_call_emulate +optprobe_template_call_emulate: + /* Branch to emulate_step() */ + nop + + /* + * All done. + * Now, restore the registers... + */ + ld r5,_MSR(r1) + mtmsr r5 + ld r5,_CTR(r1) + mtctr r5 + ld r5,_LINK(r1) + mtlr r5 + ld r5,_XER(r1) + mtxer r5 + ld r5,_CCR(r1) + mtcr r5 + ld r5,_DAR(r1) + mtdar r5 + ld r5,_DSISR(r1) + mtdsisr r5 + REST_GPR(0,r1) + REST_10GPRS(2,r1) + REST_10GPRS(12,r1) + REST_10GPRS(22,r1) + /* Restore the previous SP */ + addi r1,r1,INT_FRAME_SIZE + + .global optprobe_template_ret +optprobe_template_ret: + /* ... and jump back from trampoline */ + nop + + .global optprobe_template_end +optprobe_template_end: diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index fa20060ff7a5..dfc479df9634 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -10,6 +10,7 @@ #include <linux/smp.h> #include <linux/export.h> #include <linux/memblock.h> +#include <linux/sched/task.h> #include <asm/lppaca.h> #include <asm/paca.h> diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 74bec5498972..ffda24a38dda 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -25,6 +25,7 @@ #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/mm.h> +#include <linux/shmem_fs.h> #include <linux/list.h> #include <linux/syscalls.h> #include <linux/irq.h> @@ -59,14 +60,14 @@ resource_size_t isa_mem_base; EXPORT_SYMBOL(isa_mem_base); -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; -void set_pci_dma_ops(struct dma_map_ops *dma_ops) +void set_pci_dma_ops(const struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; } -struct dma_map_ops *get_pci_dma_ops(void) +const struct dma_map_ops *get_pci_dma_ops(void) { return pci_dma_ops; } @@ -1559,16 +1560,10 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose, /* Hookup PHB Memory resources */ for (i = 0; i < 3; ++i) { res = &hose->mem_resources[i]; - if (!res->flags) { - if (i == 0) - printk(KERN_ERR "PCI: Memory resource 0 not set for " - "host bridge %s (domain %d)\n", - hose->dn->full_name, hose->global_number); + if (!res->flags) continue; - } - offset = hose->mem_offset[i]; - + offset = hose->mem_offset[i]; pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i, res, (unsigned long long)offset); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 5dd056df0baa..d645da302bf2 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -16,6 +16,9 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> @@ -730,6 +733,28 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) mtspr(SPRN_DABRX, dabrx); return 0; } +#elif defined(CONFIG_PPC_8xx) +static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) +{ + unsigned long addr = dabr & ~HW_BRK_TYPE_DABR; + unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */ + unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */ + + if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ) + lctrl1 |= 0xa0000; + else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE) + lctrl1 |= 0xf0000; + else if ((dabr & HW_BRK_TYPE_RDWR) == 0) + lctrl2 = 0; + + mtspr(SPRN_LCTRL2, 0); + mtspr(SPRN_CMPE, addr); + mtspr(SPRN_CMPF, addr + 4); + mtspr(SPRN_LCTRL1, lctrl1); + mtspr(SPRN_LCTRL2, lctrl2); + + return 0; +} #else static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index ac83eb04a8b8..a3944540fe0d 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -649,6 +649,7 @@ static void __init early_cmdline_parse(void) struct option_vector1 { u8 byte1; u8 arch_versions; + u8 arch_versions3; } __packed; struct option_vector2 { @@ -691,6 +692,9 @@ struct option_vector5 { u8 reserved2; __be16 reserved3; u8 subprocessors; + u8 byte22; + u8 intarch; + u8 mmu; } __packed; struct option_vector6 { @@ -700,7 +704,7 @@ struct option_vector6 { } __packed; struct ibm_arch_vec { - struct { u32 mask, val; } pvrs[10]; + struct { u32 mask, val; } pvrs[12]; u8 num_vectors; @@ -750,6 +754,14 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { .val = cpu_to_be32(0x004d0000), }, { + .mask = cpu_to_be32(0xffff0000), /* POWER9 */ + .val = cpu_to_be32(0x004e0000), + }, + { + .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */ + .val = cpu_to_be32(0x0f000005), + }, + { .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */ .val = cpu_to_be32(0x0f000004), }, @@ -774,6 +786,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { .byte1 = 0, .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, + .arch_versions3 = OV1_PPC_3_00, }, .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)), @@ -826,7 +839,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { 0, #endif .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), - .bin_opts = 0, + .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT), .micro_checkpoint = 0, .reserved0 = 0, .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ @@ -836,6 +849,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { .reserved2 = 0, .reserved3 = 0, .subprocessors = 1, + .intarch = 0, + .mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) | + OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE), }, /* option vector 6: IBM PAPR hints */ diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 112cc3b2ee1a..b8a4987f58cf 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -1145,31 +1145,29 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) void __init rtas_initialize(void) { unsigned long rtas_region = RTAS_INSTANTIATE_MAX; + u32 base, size, entry; + int no_base, no_size, no_entry; /* Get RTAS dev node and fill up our "rtas" structure with infos * about it. */ rtas.dev = of_find_node_by_name(NULL, "rtas"); - if (rtas.dev) { - const __be32 *basep, *entryp, *sizep; - - basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); - sizep = of_get_property(rtas.dev, "rtas-size", NULL); - if (basep != NULL && sizep != NULL) { - rtas.base = __be32_to_cpu(*basep); - rtas.size = __be32_to_cpu(*sizep); - entryp = of_get_property(rtas.dev, - "linux,rtas-entry", NULL); - if (entryp == NULL) /* Ugh */ - rtas.entry = rtas.base; - else - rtas.entry = __be32_to_cpu(*entryp); - } else - rtas.dev = NULL; - } if (!rtas.dev) return; + no_base = of_property_read_u32(rtas.dev, "linux,rtas-base", &base); + no_size = of_property_read_u32(rtas.dev, "rtas-size", &size); + if (no_base || no_size) { + of_node_put(rtas.dev); + rtas.dev = NULL; + return; + } + + rtas.base = base; + rtas.size = size; + no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry); + rtas.entry = no_entry ? rtas.base : entry; + /* If RTAS was found, allocate the RMO buffer for it and look for * the stop-self token if any */ diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 2bf1f9b5b34b..3650732639ed 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -21,6 +21,7 @@ #include <linux/cpu.h> #include <linux/workqueue.h> #include <linux/slab.h> +#include <linux/topology.h> #include <linux/uaccess.h> #include <asm/io.h> @@ -282,6 +283,7 @@ static void prrn_work_fn(struct work_struct *work) * the RTAS event. */ pseries_devicetree_update(-prrn_update_scope); + arch_update_cpu_topology(); } static DECLARE_WORK(prrn_work, prrn_work_fn); @@ -434,7 +436,10 @@ static void do_event_scan(void) } if (error == 0) { - pSeries_log_error(logdata, ERR_TYPE_RTAS_LOG, 0); + if (rtas_error_type((struct rtas_error_log *)logdata) != + RTAS_TYPE_PRRN) + pSeries_log_error(logdata, ERR_TYPE_RTAS_LOG, + 0); handle_rtas_event((struct rtas_error_log *)logdata); } diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index f516ac508ae3..4697da895133 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -87,6 +87,15 @@ EXPORT_SYMBOL(machine_id); int boot_cpuid = -1; EXPORT_SYMBOL_GPL(boot_cpuid); +/* + * These are used in binfmt_elf.c to put aux entries on the stack + * for each elf executable being started. + */ +int dcache_bsize; +int icache_bsize; +int ucache_bsize; + + unsigned long klimit = (unsigned long) _end; /* diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 7fcf1f7f01c1..2f88f6cf1a42 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -59,14 +59,6 @@ EXPORT_SYMBOL(DMA_MODE_READ); EXPORT_SYMBOL(DMA_MODE_WRITE); /* - * These are used in binfmt_elf.c to put aux entries on the stack - * for each elf executable being started. - */ -int dcache_bsize; -int icache_bsize; -int ucache_bsize; - -/* * We're called here very early in the boot. * * Note that the kernel may be running at an address which is different diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6824157e4d2e..adf2084f214b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -77,25 +77,18 @@ int spinning_secondaries; u64 ppc64_pft_size; -/* Pick defaults since we might want to patch instructions - * before we've read this from the device tree. - */ struct ppc64_caches ppc64_caches = { - .dline_size = 0x40, - .log_dline_size = 6, - .iline_size = 0x40, - .log_iline_size = 6 + .l1d = { + .block_size = 0x40, + .log_block_size = 6, + }, + .l1i = { + .block_size = 0x40, + .log_block_size = 6 + }, }; EXPORT_SYMBOL_GPL(ppc64_caches); -/* - * These are used in binfmt_elf.c to put aux entries on the stack - * for each elf executable being started. - */ -int dcache_bsize; -int icache_bsize; -int ucache_bsize; - #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) void __init setup_tlb_core_data(void) { @@ -120,14 +113,12 @@ void __init setup_tlb_core_data(void) * If we have threads, we need either tlbsrx. * or e6500 tablewalk mode, or else TLB handlers * will be racy and could produce duplicate entries. + * Should we panic instead? */ - if (smt_enabled_at_boot >= 2 && - !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && - book3e_htw_mode != PPC_HTW_E6500) { - /* Should we panic instead? */ - WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n", - __func__); - } + WARN_ONCE(smt_enabled_at_boot >= 2 && + !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && + book3e_htw_mode != PPC_HTW_E6500, + "%s: unsupported MMU configuration\n", __func__); } } #endif @@ -408,74 +399,135 @@ void smp_release_cpus(void) * cache informations about the CPU that will be used by cache flush * routines and/or provided to userland */ + +static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize, + u32 bsize, u32 sets) +{ + info->size = size; + info->sets = sets; + info->line_size = lsize; + info->block_size = bsize; + info->log_block_size = __ilog2(bsize); + info->blocks_per_page = PAGE_SIZE / bsize; + + if (sets == 0) + info->assoc = 0xffff; + else + info->assoc = size / (sets * lsize); +} + +static bool __init parse_cache_info(struct device_node *np, + bool icache, + struct ppc_cache_info *info) +{ + static const char *ipropnames[] __initdata = { + "i-cache-size", + "i-cache-sets", + "i-cache-block-size", + "i-cache-line-size", + }; + static const char *dpropnames[] __initdata = { + "d-cache-size", + "d-cache-sets", + "d-cache-block-size", + "d-cache-line-size", + }; + const char **propnames = icache ? ipropnames : dpropnames; + const __be32 *sizep, *lsizep, *bsizep, *setsp; + u32 size, lsize, bsize, sets; + bool success = true; + + size = 0; + sets = -1u; + lsize = bsize = cur_cpu_spec->dcache_bsize; + sizep = of_get_property(np, propnames[0], NULL); + if (sizep != NULL) + size = be32_to_cpu(*sizep); + setsp = of_get_property(np, propnames[1], NULL); + if (setsp != NULL) + sets = be32_to_cpu(*setsp); + bsizep = of_get_property(np, propnames[2], NULL); + lsizep = of_get_property(np, propnames[3], NULL); + if (bsizep == NULL) + bsizep = lsizep; + if (lsizep != NULL) + lsize = be32_to_cpu(*lsizep); + if (bsizep != NULL) + bsize = be32_to_cpu(*bsizep); + if (sizep == NULL || bsizep == NULL || lsizep == NULL) + success = false; + + /* + * OF is weird .. it represents fully associative caches + * as "1 way" which doesn't make much sense and doesn't + * leave room for direct mapped. We'll assume that 0 + * in OF means direct mapped for that reason. + */ + if (sets == 1) + sets = 0; + else if (sets == 0) + sets = 1; + + init_cache_info(info, size, lsize, bsize, sets); + + return success; +} + void __init initialize_cache_info(void) { - struct device_node *np; - unsigned long num_cpus = 0; + struct device_node *cpu = NULL, *l2, *l3 = NULL; + u32 pvr; DBG(" -> initialize_cache_info()\n"); - for_each_node_by_type(np, "cpu") { - num_cpus += 1; + /* + * All shipping POWER8 machines have a firmware bug that + * puts incorrect information in the device-tree. This will + * be (hopefully) fixed for future chips but for now hard + * code the values if we are running on one of these + */ + pvr = PVR_VER(mfspr(SPRN_PVR)); + if (pvr == PVR_POWER8 || pvr == PVR_POWER8E || + pvr == PVR_POWER8NVL) { + /* size lsize blk sets */ + init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32); + init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64); + init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512); + init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192); + } else + cpu = of_find_node_by_type(NULL, "cpu"); + + /* + * We're assuming *all* of the CPUs have the same + * d-cache and i-cache sizes... -Peter + */ + if (cpu) { + if (!parse_cache_info(cpu, false, &ppc64_caches.l1d)) + DBG("Argh, can't find dcache properties !\n"); + + if (!parse_cache_info(cpu, true, &ppc64_caches.l1i)) + DBG("Argh, can't find icache properties !\n"); /* - * We're assuming *all* of the CPUs have the same - * d-cache and i-cache sizes... -Peter + * Try to find the L2 and L3 if any. Assume they are + * unified and use the D-side properties. */ - if (num_cpus == 1) { - const __be32 *sizep, *lsizep; - u32 size, lsize; - - size = 0; - lsize = cur_cpu_spec->dcache_bsize; - sizep = of_get_property(np, "d-cache-size", NULL); - if (sizep != NULL) - size = be32_to_cpu(*sizep); - lsizep = of_get_property(np, "d-cache-block-size", - NULL); - /* fallback if block size missing */ - if (lsizep == NULL) - lsizep = of_get_property(np, - "d-cache-line-size", - NULL); - if (lsizep != NULL) - lsize = be32_to_cpu(*lsizep); - if (sizep == NULL || lsizep == NULL) - DBG("Argh, can't find dcache properties ! " - "sizep: %p, lsizep: %p\n", sizep, lsizep); - - ppc64_caches.dsize = size; - ppc64_caches.dline_size = lsize; - ppc64_caches.log_dline_size = __ilog2(lsize); - ppc64_caches.dlines_per_page = PAGE_SIZE / lsize; - - size = 0; - lsize = cur_cpu_spec->icache_bsize; - sizep = of_get_property(np, "i-cache-size", NULL); - if (sizep != NULL) - size = be32_to_cpu(*sizep); - lsizep = of_get_property(np, "i-cache-block-size", - NULL); - if (lsizep == NULL) - lsizep = of_get_property(np, - "i-cache-line-size", - NULL); - if (lsizep != NULL) - lsize = be32_to_cpu(*lsizep); - if (sizep == NULL || lsizep == NULL) - DBG("Argh, can't find icache properties ! " - "sizep: %p, lsizep: %p\n", sizep, lsizep); - - ppc64_caches.isize = size; - ppc64_caches.iline_size = lsize; - ppc64_caches.log_iline_size = __ilog2(lsize); - ppc64_caches.ilines_per_page = PAGE_SIZE / lsize; + l2 = of_find_next_cache_node(cpu); + of_node_put(cpu); + if (l2) { + parse_cache_info(l2, false, &ppc64_caches.l2); + l3 = of_find_next_cache_node(l2); + of_node_put(l2); + } + if (l3) { + parse_cache_info(l3, false, &ppc64_caches.l3); + of_node_put(l3); } } /* For use by binfmt_elf */ - dcache_bsize = ppc64_caches.dline_size; - icache_bsize = ppc64_caches.iline_size; + dcache_bsize = ppc64_caches.l1d.block_size; + icache_bsize = ppc64_caches.l1i.block_size; DBG(" <- initialize_cache_info()\n"); } diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 893bd7f79be6..46f89e66a273 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -19,7 +19,8 @@ #include <linux/kernel.h> #include <linux/export.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/topology.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/delay.h> @@ -707,7 +708,7 @@ void start_secondary(void *unused) unsigned int cpu = smp_processor_id(); int i, base; - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; smp_store_cpu_info(cpu); @@ -795,7 +796,7 @@ void __init smp_cpus_done(unsigned int max_cpus) * se we pin us down to CPU 0 for a short while */ alloc_cpumask_var(&old_mask, GFP_NOWAIT); - cpumask_copy(old_mask, tsk_cpus_allowed(current)); + cpumask_copy(old_mask, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); if (smp_ops && smp_ops->setup_cpu) diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index 4f24606afc3f..66711958493c 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -12,6 +12,7 @@ #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/stacktrace.h> #include <asm/ptrace.h> #include <asm/processor.h> diff --git a/arch/powerpc/kernel/swsusp_64.c b/arch/powerpc/kernel/swsusp_64.c index 0e899e47c325..51db012808f5 100644 --- a/arch/powerpc/kernel/swsusp_64.c +++ b/arch/powerpc/kernel/swsusp_64.c @@ -10,6 +10,7 @@ #include <linux/irq.h> #include <linux/sched.h> #include <linux/interrupt.h> +#include <linux/nmi.h> void do_after_copyback(void) { diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index bc2e08d415fa..07b90725855e 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -34,6 +34,7 @@ #include <linux/errno.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> @@ -57,6 +58,7 @@ #include <linux/clk-provider.h> #include <linux/suspend.h> #include <linux/rtc.h> +#include <linux/sched/cputime.h> #include <asm/trace.h> #include <asm/io.h> @@ -72,7 +74,6 @@ #include <asm/smp.h> #include <asm/vdso_datapage.h> #include <asm/firmware.h> -#include <asm/cputime.h> #include <asm/asm-prototypes.h> /* powerpc clocksource/clockevent code */ @@ -152,20 +153,11 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE /* - * Factors for converting from cputime_t (timebase ticks) to - * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). - * These are all stored as 0.64 fixed-point binary fractions. + * Factor for converting from cputime_t (timebase ticks) to + * microseconds. This is stored as 0.64 fixed-point binary fraction. */ -u64 __cputime_jiffies_factor; -EXPORT_SYMBOL(__cputime_jiffies_factor); u64 __cputime_usec_factor; EXPORT_SYMBOL(__cputime_usec_factor); -u64 __cputime_sec_factor; -EXPORT_SYMBOL(__cputime_sec_factor); -u64 __cputime_clockt_factor; -EXPORT_SYMBOL(__cputime_clockt_factor); - -cputime_t cputime_one_jiffy; #ifdef CONFIG_PPC_SPLPAR void (*dtl_consumer)(struct dtl_entry *, u64); @@ -181,14 +173,8 @@ static void calc_cputime_factors(void) { struct div_result res; - div128_by_32(HZ, 0, tb_ticks_per_sec, &res); - __cputime_jiffies_factor = res.result_low; div128_by_32(1000000, 0, tb_ticks_per_sec, &res); __cputime_usec_factor = res.result_low; - div128_by_32(1, 0, tb_ticks_per_sec, &res); - __cputime_sec_factor = res.result_low; - div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); - __cputime_clockt_factor = res.result_low; } /* @@ -271,25 +257,19 @@ void accumulate_stolen_time(void) sst = scan_dispatch_log(acct->starttime_user); ust = scan_dispatch_log(acct->starttime); - acct->system_time -= sst; - acct->user_time -= ust; - local_paca->stolen_time += ust + sst; + acct->stime -= sst; + acct->utime -= ust; + acct->steal_time += ust + sst; local_paca->soft_enabled = save_soft_enabled; } static inline u64 calculate_stolen_time(u64 stop_tb) { - u64 stolen = 0; + if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) + return scan_dispatch_log(stop_tb); - if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) { - stolen = scan_dispatch_log(stop_tb); - get_paca()->accounting.system_time -= stolen; - } - - stolen += get_paca()->stolen_time; - get_paca()->stolen_time = 0; - return stolen; + return 0; } #else /* CONFIG_PPC_SPLPAR */ @@ -305,28 +285,27 @@ static inline u64 calculate_stolen_time(u64 stop_tb) * or soft irq state. */ static unsigned long vtime_delta(struct task_struct *tsk, - unsigned long *sys_scaled, - unsigned long *stolen) + unsigned long *stime_scaled, + unsigned long *steal_time) { unsigned long now, nowscaled, deltascaled; - unsigned long udelta, delta, user_scaled; + unsigned long stime; + unsigned long utime, utime_scaled; struct cpu_accounting_data *acct = get_accounting(tsk); WARN_ON_ONCE(!irqs_disabled()); now = mftb(); nowscaled = read_spurr(now); - acct->system_time += now - acct->starttime; + stime = now - acct->starttime; acct->starttime = now; deltascaled = nowscaled - acct->startspurr; acct->startspurr = nowscaled; - *stolen = calculate_stolen_time(now); + *steal_time = calculate_stolen_time(now); - delta = acct->system_time; - acct->system_time = 0; - udelta = acct->user_time - acct->utime_sspurr; - acct->utime_sspurr = acct->user_time; + utime = acct->utime - acct->utime_sspurr; + acct->utime_sspurr = acct->utime; /* * Because we don't read the SPURR on every kernel entry/exit, @@ -338,62 +317,105 @@ static unsigned long vtime_delta(struct task_struct *tsk, * the user ticks get saved up in paca->user_time_scaled to be * used by account_process_tick. */ - *sys_scaled = delta; - user_scaled = udelta; - if (deltascaled != delta + udelta) { - if (udelta) { - *sys_scaled = deltascaled * delta / (delta + udelta); - user_scaled = deltascaled - *sys_scaled; + *stime_scaled = stime; + utime_scaled = utime; + if (deltascaled != stime + utime) { + if (utime) { + *stime_scaled = deltascaled * stime / (stime + utime); + utime_scaled = deltascaled - *stime_scaled; } else { - *sys_scaled = deltascaled; + *stime_scaled = deltascaled; } } - acct->user_time_scaled += user_scaled; + acct->utime_scaled += utime_scaled; - return delta; + return stime; } void vtime_account_system(struct task_struct *tsk) { - unsigned long delta, sys_scaled, stolen; + unsigned long stime, stime_scaled, steal_time; + struct cpu_accounting_data *acct = get_accounting(tsk); + + stime = vtime_delta(tsk, &stime_scaled, &steal_time); - delta = vtime_delta(tsk, &sys_scaled, &stolen); - account_system_time(tsk, 0, delta); - tsk->stimescaled += sys_scaled; - if (stolen) - account_steal_time(stolen); + stime -= min(stime, steal_time); + acct->steal_time += steal_time; + + if ((tsk->flags & PF_VCPU) && !irq_count()) { + acct->gtime += stime; + acct->utime_scaled += stime_scaled; + } else { + if (hardirq_count()) + acct->hardirq_time += stime; + else if (in_serving_softirq()) + acct->softirq_time += stime; + else + acct->stime += stime; + + acct->stime_scaled += stime_scaled; + } } EXPORT_SYMBOL_GPL(vtime_account_system); void vtime_account_idle(struct task_struct *tsk) { - unsigned long delta, sys_scaled, stolen; + unsigned long stime, stime_scaled, steal_time; + struct cpu_accounting_data *acct = get_accounting(tsk); - delta = vtime_delta(tsk, &sys_scaled, &stolen); - account_idle_time(delta + stolen); + stime = vtime_delta(tsk, &stime_scaled, &steal_time); + acct->idle_time += stime + steal_time; } /* - * Transfer the user time accumulated in the paca - * by the exception entry and exit code to the generic - * process user time records. + * Account the whole cputime accumulated in the paca * Must be called with interrupts disabled. * Assumes that vtime_account_system/idle() has been called * recently (i.e. since the last entry from usermode) so that * get_paca()->user_time_scaled is up to date. */ -void vtime_account_user(struct task_struct *tsk) +void vtime_flush(struct task_struct *tsk) { - cputime_t utime, utimescaled; struct cpu_accounting_data *acct = get_accounting(tsk); - utime = acct->user_time; - utimescaled = acct->user_time_scaled; - acct->user_time = 0; - acct->user_time_scaled = 0; + if (acct->utime) + account_user_time(tsk, cputime_to_nsecs(acct->utime)); + + if (acct->utime_scaled) + tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled); + + if (acct->gtime) + account_guest_time(tsk, cputime_to_nsecs(acct->gtime)); + + if (acct->steal_time) + account_steal_time(cputime_to_nsecs(acct->steal_time)); + + if (acct->idle_time) + account_idle_time(cputime_to_nsecs(acct->idle_time)); + + if (acct->stime) + account_system_index_time(tsk, cputime_to_nsecs(acct->stime), + CPUTIME_SYSTEM); + if (acct->stime_scaled) + tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled); + + if (acct->hardirq_time) + account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time), + CPUTIME_IRQ); + if (acct->softirq_time) + account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time), + CPUTIME_SOFTIRQ); + + acct->utime = 0; + acct->utime_scaled = 0; acct->utime_sspurr = 0; - account_user_time(tsk, utime); - tsk->utimescaled += utimescaled; + acct->gtime = 0; + acct->steal_time = 0; + acct->idle_time = 0; + acct->stime = 0; + acct->stime_scaled = 0; + acct->hardirq_time = 0; + acct->softirq_time = 0; } #ifdef CONFIG_PPC32 @@ -407,8 +429,7 @@ void arch_vtime_task_switch(struct task_struct *prev) struct cpu_accounting_data *acct = get_accounting(current); acct->starttime = get_accounting(prev)->starttime; - acct->system_time = 0; - acct->user_time = 0; + acct->startspurr = get_accounting(prev)->startspurr; } #endif /* CONFIG_PPC32 */ @@ -689,7 +710,7 @@ unsigned long long running_clock(void) * time and on a host which doesn't do any virtualisation TB *should* equal * VTB so it makes no difference anyway. */ - return local_clock() - cputime_to_nsecs(kcpustat_this_cpu->cpustat[CPUTIME_STEAL]); + return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL]; } #endif @@ -1018,7 +1039,6 @@ void __init time_init(void) tb_ticks_per_sec = ppc_tb_freq; tb_ticks_per_usec = ppc_tb_freq / 1000000; calc_cputime_factors(); - setup_cputime_one_jiffy(); /* * Compute scale factor for sched_clock. diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index e6cc56b61d01..ff365f9de27a 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -17,6 +17,7 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 4111d30badfa..22b01a3962f0 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -736,16 +736,14 @@ static int __init vdso_init(void) if (firmware_has_feature(FW_FEATURE_LPAR)) vdso_data->platform |= 1; vdso_data->physicalMemorySize = memblock_phys_mem_size(); - vdso_data->dcache_size = ppc64_caches.dsize; - vdso_data->dcache_line_size = ppc64_caches.dline_size; - vdso_data->icache_size = ppc64_caches.isize; - vdso_data->icache_line_size = ppc64_caches.iline_size; - - /* XXXOJN: Blocks should be added to ppc64_caches and used instead */ - vdso_data->dcache_block_size = ppc64_caches.dline_size; - vdso_data->icache_block_size = ppc64_caches.iline_size; - vdso_data->dcache_log_block_size = ppc64_caches.log_dline_size; - vdso_data->icache_log_block_size = ppc64_caches.log_iline_size; + vdso_data->dcache_size = ppc64_caches.l1d.size; + vdso_data->dcache_line_size = ppc64_caches.l1d.line_size; + vdso_data->icache_size = ppc64_caches.l1i.size; + vdso_data->icache_line_size = ppc64_caches.l1i.line_size; + vdso_data->dcache_block_size = ppc64_caches.l1d.block_size; + vdso_data->icache_block_size = ppc64_caches.l1i.block_size; + vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size; + vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size; /* * Calculate the size of the 64 bits vDSO |