aboutsummaryrefslogtreecommitdiffstats
path: root/arch/loongarch/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch/include')
-rw-r--r--arch/loongarch/include/asm/atomic.h4
-rw-r--r--arch/loongarch/include/asm/barrier.h108
-rw-r--r--arch/loongarch/include/asm/cmpxchg.h1
-rw-r--r--arch/loongarch/include/asm/futex.h1
-rw-r--r--arch/loongarch/include/asm/hardirq.h2
-rw-r--r--arch/loongarch/include/asm/irq.h2
-rw-r--r--arch/loongarch/include/asm/percpu.h194
-rw-r--r--arch/loongarch/include/asm/pgtable.h21
-rw-r--r--arch/loongarch/include/asm/smp.h124
-rw-r--r--arch/loongarch/include/asm/stackframe.h17
-rw-r--r--arch/loongarch/include/asm/tlbflush.h13
-rw-r--r--arch/loongarch/include/asm/topology.h7
12 files changed, 488 insertions, 6 deletions
diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h
index 932352342b12..979367ad4e2c 100644
--- a/arch/loongarch/include/asm/atomic.h
+++ b/arch/loongarch/include/asm/atomic.h
@@ -162,6 +162,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
" sc.w %1, %2 \n"
" beq $zero, %1, 1b \n"
"2: \n"
+ __WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter)
: "I" (-i));
@@ -174,6 +175,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
" sc.w %1, %2 \n"
" beq $zero, %1, 1b \n"
"2: \n"
+ __WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter)
: "r" (i));
@@ -323,6 +325,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
" sc.d %1, %2 \n"
" beq %1, $zero, 1b \n"
"2: \n"
+ __WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter)
: "I" (-i));
@@ -335,6 +338,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
" sc.d %1, %2 \n"
" beq %1, $zero, 1b \n"
"2: \n"
+ __WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter)
: "r" (i));
diff --git a/arch/loongarch/include/asm/barrier.h b/arch/loongarch/include/asm/barrier.h
index e57571bcaf4f..b6517eeeb141 100644
--- a/arch/loongarch/include/asm/barrier.h
+++ b/arch/loongarch/include/asm/barrier.h
@@ -18,6 +18,19 @@
#define mb() fast_mb()
#define iob() fast_iob()
+#define __smp_mb() __asm__ __volatile__("dbar 0" : : : "memory")
+#define __smp_rmb() __asm__ __volatile__("dbar 0" : : : "memory")
+#define __smp_wmb() __asm__ __volatile__("dbar 0" : : : "memory")
+
+#ifdef CONFIG_SMP
+#define __WEAK_LLSC_MB " dbar 0 \n"
+#else
+#define __WEAK_LLSC_MB " \n"
+#endif
+
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
+
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index
@@ -46,6 +59,101 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
return mask;
}
+#define __smp_load_acquire(p) \
+({ \
+ union { typeof(*p) __val; char __c[1]; } __u; \
+ unsigned long __tmp = 0; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ *(__u8 *)__u.__c = *(volatile __u8 *)p; \
+ __smp_mb(); \
+ break; \
+ case 2: \
+ *(__u16 *)__u.__c = *(volatile __u16 *)p; \
+ __smp_mb(); \
+ break; \
+ case 4: \
+ __asm__ __volatile__( \
+ "amor_db.w %[val], %[tmp], %[mem] \n" \
+ : [val] "=&r" (*(__u32 *)__u.__c) \
+ : [mem] "ZB" (*(u32 *) p), [tmp] "r" (__tmp) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__( \
+ "amor_db.d %[val], %[tmp], %[mem] \n" \
+ : [val] "=&r" (*(__u64 *)__u.__c) \
+ : [mem] "ZB" (*(u64 *) p), [tmp] "r" (__tmp) \
+ : "memory"); \
+ break; \
+ } \
+ (typeof(*p))__u.__val; \
+})
+
+#define __smp_store_release(p, v) \
+do { \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(*p)) (v) }; \
+ unsigned long __tmp; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ __smp_mb(); \
+ *(volatile __u8 *)p = *(__u8 *)__u.__c; \
+ break; \
+ case 2: \
+ __smp_mb(); \
+ *(volatile __u16 *)p = *(__u16 *)__u.__c; \
+ break; \
+ case 4: \
+ __asm__ __volatile__( \
+ "amswap_db.w %[tmp], %[val], %[mem] \n" \
+ : [mem] "+ZB" (*(u32 *)p), [tmp] "=&r" (__tmp) \
+ : [val] "r" (*(__u32 *)__u.__c) \
+ : ); \
+ break; \
+ case 8: \
+ __asm__ __volatile__( \
+ "amswap_db.d %[tmp], %[val], %[mem] \n" \
+ : [mem] "+ZB" (*(u64 *)p), [tmp] "=&r" (__tmp) \
+ : [val] "r" (*(__u64 *)__u.__c) \
+ : ); \
+ break; \
+ } \
+} while (0)
+
+#define __smp_store_mb(p, v) \
+do { \
+ union { typeof(p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(p)) (v) }; \
+ unsigned long __tmp; \
+ switch (sizeof(p)) { \
+ case 1: \
+ *(volatile __u8 *)&p = *(__u8 *)__u.__c; \
+ __smp_mb(); \
+ break; \
+ case 2: \
+ *(volatile __u16 *)&p = *(__u16 *)__u.__c; \
+ __smp_mb(); \
+ break; \
+ case 4: \
+ __asm__ __volatile__( \
+ "amswap_db.w %[tmp], %[val], %[mem] \n" \
+ : [mem] "+ZB" (*(u32 *)&p), [tmp] "=&r" (__tmp) \
+ : [val] "r" (*(__u32 *)__u.__c) \
+ : ); \
+ break; \
+ case 8: \
+ __asm__ __volatile__( \
+ "amswap_db.d %[tmp], %[val], %[mem] \n" \
+ : [mem] "+ZB" (*(u64 *)&p), [tmp] "=&r" (__tmp) \
+ : [val] "r" (*(__u64 *)__u.__c) \
+ : ); \
+ break; \
+ } \
+} while (0)
+
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */
diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h
index 48613b872bc8..75b3a4478652 100644
--- a/arch/loongarch/include/asm/cmpxchg.h
+++ b/arch/loongarch/include/asm/cmpxchg.h
@@ -59,6 +59,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
" " st " $t0, %1 \n" \
" beq $zero, $t0, 1b \n" \
"2: \n" \
+ __WEAK_LLSC_MB \
: "=&r" (__ret), "=ZB"(*m) \
: "ZB"(*m), "Jr" (old), "Jr" (new) \
: "t0", "memory"); \
diff --git a/arch/loongarch/include/asm/futex.h b/arch/loongarch/include/asm/futex.h
index b27d55f92db7..9de8231694ec 100644
--- a/arch/loongarch/include/asm/futex.h
+++ b/arch/loongarch/include/asm/futex.h
@@ -86,6 +86,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
"2: sc.w $t0, %2 \n"
" beq $zero, $t0, 1b \n"
"3: \n"
+ __WEAK_LLSC_MB
" .section .fixup,\"ax\" \n"
"4: li.d %0, %6 \n"
" b 3b \n"
diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h
index d32f83938880..befe8184aa08 100644
--- a/arch/loongarch/include/asm/hardirq.h
+++ b/arch/loongarch/include/asm/hardirq.h
@@ -21,4 +21,6 @@ typedef struct {
DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
+#define __ARCH_IRQ_STAT
+
#endif /* _ASM_HARDIRQ_H */
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index f3f1baf7027c..ace3ea6da72e 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -125,6 +125,8 @@ extern struct irq_domain *pch_lpc_domain;
extern struct irq_domain *pch_msi_domain[MAX_IO_PICS];
extern struct irq_domain *pch_pic_domain[MAX_IO_PICS];
+extern irqreturn_t loongson3_ipi_interrupt(int irq, void *dev);
+
#include <asm-generic/irq.h>
#endif /* _ASM_IRQ_H */
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index b03d8f8b9fd3..34f15a6fb1e7 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -5,6 +5,8 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#include <asm/cmpxchg.h>
+
/* Use r21 for fast access */
register unsigned long __my_cpu_offset __asm__("$r21");
@@ -15,6 +17,198 @@ static inline void set_my_cpu_offset(unsigned long off)
}
#define __my_cpu_offset __my_cpu_offset
+#define PERCPU_OP(op, asm_op, c_op) \
+static inline unsigned long __percpu_##op(void *ptr, \
+ unsigned long val, int size) \
+{ \
+ unsigned long ret; \
+ \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__( \
+ "am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \
+ : [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \
+ : [val] "r" (val)); \
+ break; \
+ case 8: \
+ __asm__ __volatile__( \
+ "am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \
+ : [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \
+ : [val] "r" (val)); \
+ break; \
+ default: \
+ ret = 0; \
+ BUILD_BUG(); \
+ } \
+ \
+ return ret c_op val; \
+}
+
+PERCPU_OP(add, add, +)
+PERCPU_OP(and, and, &)
+PERCPU_OP(or, or, |)
+#undef PERCPU_OP
+
+static inline unsigned long __percpu_read(void *ptr, int size)
+{
+ unsigned long ret;
+
+ switch (size) {
+ case 1:
+ __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n"
+ : [ret] "=&r"(ret)
+ : [ptr] "r"(ptr)
+ : "memory");
+ break;
+ case 2:
+ __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n"
+ : [ret] "=&r"(ret)
+ : [ptr] "r"(ptr)
+ : "memory");
+ break;
+ case 4:
+ __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n"
+ : [ret] "=&r"(ret)
+ : [ptr] "r"(ptr)
+ : "memory");
+ break;
+ case 8:
+ __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n"
+ : [ret] "=&r"(ret)
+ : [ptr] "r"(ptr)
+ : "memory");
+ break;
+ default:
+ ret = 0;
+ BUILD_BUG();
+ }
+
+ return ret;
+}
+
+static inline void __percpu_write(void *ptr, unsigned long val, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n"
+ :
+ : [val] "r" (val), [ptr] "r" (ptr)
+ : "memory");
+ break;
+ case 2:
+ __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n"
+ :
+ : [val] "r" (val), [ptr] "r" (ptr)
+ : "memory");
+ break;
+ case 4:
+ __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n"
+ :
+ : [val] "r" (val), [ptr] "r" (ptr)
+ : "memory");
+ break;
+ case 8:
+ __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n"
+ :
+ : [val] "r" (val), [ptr] "r" (ptr)
+ : "memory");
+ break;
+ default:
+ BUILD_BUG();
+ }
+}
+
+static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+ int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
+
+ case 8:
+ return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val);
+
+ default:
+ BUILD_BUG();
+ }
+
+ return 0;
+}
+
+/* this_cpu_cmpxchg */
+#define _protect_cmpxchg_local(pcp, o, n) \
+({ \
+ typeof(*raw_cpu_ptr(&(pcp))) __ret; \
+ preempt_disable_notrace(); \
+ __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
+ preempt_enable_notrace(); \
+ __ret; \
+})
+
+#define _percpu_read(pcp) \
+({ \
+ typeof(pcp) __retval; \
+ __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \
+ __retval; \
+})
+
+#define _percpu_write(pcp, val) \
+do { \
+ __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \
+} while (0) \
+
+#define _pcp_protect(operation, pcp, val) \
+({ \
+ typeof(pcp) __retval; \
+ preempt_disable_notrace(); \
+ __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
+ (val), sizeof(pcp)); \
+ preempt_enable_notrace(); \
+ __retval; \
+})
+
+#define _percpu_add(pcp, val) \
+ _pcp_protect(__percpu_add, pcp, val)
+
+#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
+
+#define _percpu_and(pcp, val) \
+ _pcp_protect(__percpu_and, pcp, val)
+
+#define _percpu_or(pcp, val) \
+ _pcp_protect(__percpu_or, pcp, val)
+
+#define _percpu_xchg(pcp, val) ((typeof(pcp)) \
+ _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
+
+#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
+#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
+
+#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
+#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
+
+#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
+#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
+
+#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
+#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
+
+#define this_cpu_read_1(pcp) _percpu_read(pcp)
+#define this_cpu_read_2(pcp) _percpu_read(pcp)
+#define this_cpu_read_4(pcp) _percpu_read(pcp)
+#define this_cpu_read_8(pcp) _percpu_read(pcp)
+
+#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
+
+#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
+
+#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+
#include <asm-generic/percpu.h>
#endif /* __ASM_PERCPU_H */
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 8920dd8b297b..5e33987d0a13 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -279,8 +279,29 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
+#ifdef CONFIG_SMP
+ /*
+ * For SMP, multiple CPUs can race, so we need to do
+ * this atomically.
+ */
+ unsigned long page_global = _PAGE_GLOBAL;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ "1:" __LL "%[tmp], %[buddy] \n"
+ " bnez %[tmp], 2f \n"
+ " or %[tmp], %[tmp], %[global] \n"
+ __SC "%[tmp], %[buddy] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ "2: \n"
+ __WEAK_LLSC_MB
+ : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
+ : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
}
}
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
new file mode 100644
index 000000000000..551e1f37c705
--- /dev/null
+++ b/arch/loongarch/include/asm/smp.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/linkage.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+
+void loongson3_smp_setup(void);
+void loongson3_prepare_cpus(unsigned int max_cpus);
+void loongson3_boot_secondary(int cpu, struct task_struct *idle);
+void loongson3_init_secondary(void);
+void loongson3_smp_finish(void);
+void loongson3_send_ipi_single(int cpu, unsigned int action);
+void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action);
+#ifdef CONFIG_HOTPLUG_CPU
+int loongson3_cpu_disable(void);
+void loongson3_cpu_die(unsigned int cpu);
+#endif
+
+#ifdef CONFIG_SMP
+
+static inline void plat_smp_setup(void)
+{
+ loongson3_smp_setup();
+}
+
+#else /* !CONFIG_SMP */
+
+static inline void plat_smp_setup(void) { }
+
+#endif /* !CONFIG_SMP */
+
+extern int smp_num_siblings;
+extern int num_processors;
+extern int disabled_cpus;
+extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map[];
+
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+ extern int vdso_smp_processor_id(void)
+ __compiletime_error("VDSO should not call smp_processor_id()");
+ return vdso_smp_processor_id();
+#else
+ return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
+
+/* Map from cpu id to sequential logical cpu number. This will only
+ * not be idempotent when cpus failed to come on-line. */
+extern int __cpu_number_map[NR_CPUS];
+#define cpu_number_map(cpu) __cpu_number_map[cpu]
+
+/* The reverse map from sequential logical cpu number to cpu id. */
+extern int __cpu_logical_map[NR_CPUS];
+#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+
+#define cpu_physical_id(cpu) cpu_logical_map(cpu)
+
+#define SMP_BOOT_CPU 0x1
+#define SMP_RESCHEDULE 0x2
+#define SMP_CALL_FUNCTION 0x4
+
+struct secondary_data {
+ unsigned long stack;
+ unsigned long thread_info;
+};
+extern struct secondary_data cpuboot_data;
+
+extern asmlinkage void smpboot_entry(void);
+
+extern void calculate_cpu_foreign_map(void);
+
+/*
+ * Generate IPI list text
+ */
+extern void show_ipi_list(struct seq_file *p, int prec);
+
+/*
+ * This function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+static inline void smp_send_reschedule(int cpu)
+{
+ loongson3_send_ipi_single(cpu, SMP_RESCHEDULE);
+}
+
+static inline void arch_send_call_function_single_ipi(int cpu)
+{
+ loongson3_send_ipi_single(cpu, SMP_CALL_FUNCTION);
+}
+
+static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ loongson3_send_ipi_mask(mask, SMP_CALL_FUNCTION);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static inline int __cpu_disable(void)
+{
+ return loongson3_cpu_disable();
+}
+
+static inline void __cpu_die(unsigned int cpu)
+{
+ loongson3_cpu_die(cpu);
+}
+
+extern void play_dead(void);
+#endif
+
+#endif /* __ASM_SMP_H */
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index 44151b878d00..4ca953062b5b 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -77,17 +77,24 @@
* new value in sp.
*/
.macro get_saved_sp docfi=0
- la.abs t1, kernelsp
- move t0, sp
+ la.abs t1, kernelsp
+#ifdef CONFIG_SMP
+ csrrd t0, PERCPU_BASE_KS
+ LONG_ADD t1, t1, t0
+#endif
+ move t0, sp
.if \docfi
.cfi_register sp, t0
.endif
- LONG_L sp, t1, 0
+ LONG_L sp, t1, 0
.endm
.macro set_saved_sp stackp temp temp2
- la.abs \temp, kernelsp
- LONG_S \stackp, \temp, 0
+ la.abs \temp, kernelsp
+#ifdef CONFIG_SMP
+ LONG_ADD \temp, \temp, u0
+#endif
+ LONG_S \stackp, \temp, 0
.endm
.macro SAVE_SOME docfi=0
diff --git a/arch/loongarch/include/asm/tlbflush.h b/arch/loongarch/include/asm/tlbflush.h
index 36bd6d11dc2d..a0785e590681 100644
--- a/arch/loongarch/include/asm/tlbflush.h
+++ b/arch/loongarch/include/asm/tlbflush.h
@@ -25,6 +25,17 @@ extern void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern void local_flush_tlb_one(unsigned long vaddr);
+#ifdef CONFIG_SMP
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, unsigned long);
+extern void flush_tlb_kernel_range(unsigned long, unsigned long);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_one(unsigned long vaddr);
+
+#else /* CONFIG_SMP */
+
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
@@ -32,4 +43,6 @@ extern void local_flush_tlb_one(unsigned long vaddr);
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr)
+#endif /* CONFIG_SMP */
+
#endif /* __ASM_TLBFLUSH_H */
diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h
index 9ac71a25207a..da135841e5b1 100644
--- a/arch/loongarch/include/asm/topology.h
+++ b/arch/loongarch/include/asm/topology.h
@@ -7,7 +7,12 @@
#include <linux/smp.h>
-#define cpu_logical_map(cpu) 0
+#ifdef CONFIG_SMP
+#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
+#define topology_core_id(cpu) (cpu_data[cpu].core)
+#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
+#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
+#endif
#include <asm-generic/topology.h>