diff options
Diffstat (limited to 'arch/powerpc/include/asm/hw_irq.h')
-rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 77 |
1 files changed, 22 insertions, 55 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 674e5aaafcbd..26ede09c521d 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -113,14 +113,7 @@ static inline void __hard_RI_enable(void) static inline notrace unsigned long irq_soft_mask_return(void) { - unsigned long flags; - - asm volatile( - "lbz %0,%1(13)" - : "=r" (flags) - : "i" (offsetof(struct paca_struct, irq_soft_mask))); - - return flags; + return READ_ONCE(local_paca->irq_soft_mask); } /* @@ -130,7 +123,6 @@ static inline notrace unsigned long irq_soft_mask_return(void) */ static inline notrace void irq_soft_mask_set(unsigned long mask) { -#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG /* * The irq mask must always include the STD bit if any are set. * @@ -145,49 +137,27 @@ static inline notrace void irq_soft_mask_set(unsigned long mask) * unmasks to be replayed, among other things. For now, take * the simple approach. */ - WARN_ON(mask && !(mask & IRQS_DISABLED)); -#endif + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) + WARN_ON(mask && !(mask & IRQS_DISABLED)); - asm volatile( - "stb %0,%1(13)" - : - : "r" (mask), - "i" (offsetof(struct paca_struct, irq_soft_mask)) - : "memory"); + WRITE_ONCE(local_paca->irq_soft_mask, mask); + barrier(); } static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask) { - unsigned long flags; - -#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG - WARN_ON(mask && !(mask & IRQS_DISABLED)); -#endif + unsigned long flags = irq_soft_mask_return(); - asm volatile( - "lbz %0,%1(13); stb %2,%1(13)" - : "=&r" (flags) - : "i" (offsetof(struct paca_struct, irq_soft_mask)), - "r" (mask) - : "memory"); + irq_soft_mask_set(mask); return flags; } static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask) { - unsigned long flags, tmp; - - asm volatile( - "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)" - : "=&r" (flags), "=r" (tmp) - : "i" (offsetof(struct paca_struct, irq_soft_mask)), - "r" (mask) - : "memory"); + unsigned long flags = irq_soft_mask_return(); -#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG - WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED)); -#endif + irq_soft_mask_set(flags | mask); return flags; } @@ -312,9 +282,7 @@ static inline bool pmi_irq_pending(void) flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \ local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ if (!arch_irqs_disabled_flags(flags)) { \ - asm ("stdx %%r1, 0, %1 ;" \ - : "=m" (local_paca->saved_r1) \ - : "b" (&local_paca->saved_r1)); \ + WRITE_ONCE(local_paca->saved_r1, current_stack_pointer);\ trace_hardirqs_off(); \ } \ } while(0) @@ -353,11 +321,13 @@ bool power_pmu_wants_prompt_pmi(void); */ static inline bool should_hard_irq_enable(void) { -#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG - WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); - WARN_ON(mfmsr() & MSR_EE); -#endif -#ifdef CONFIG_PERF_EVENTS + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { + WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); + WARN_ON(mfmsr() & MSR_EE); + } + + if (!IS_ENABLED(CONFIG_PERF_EVENTS)) + return false; /* * If the PMU is not running, there is not much reason to enable * MSR[EE] in irq handlers because any interrupts would just be @@ -372,9 +342,6 @@ static inline bool should_hard_irq_enable(void) return false; return true; -#else - return false; -#endif } /* @@ -382,11 +349,11 @@ static inline bool should_hard_irq_enable(void) */ static inline void do_hard_irq_enable(void) { -#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG - WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); - WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK); - WARN_ON(mfmsr() & MSR_EE); -#endif + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { + WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); + WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK); + WARN_ON(mfmsr() & MSR_EE); + } /* * This allows PMI interrupts (and watchdog soft-NMIs) through. * There is no other reason to enable this way. |