diff options
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r-- | arch/parisc/kernel/cache.c | 331 | ||||
-rw-r--r-- | arch/parisc/kernel/entry.S | 22 | ||||
-rw-r--r-- | arch/parisc/kernel/kprobes.c | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/pacache.S | 94 | ||||
-rw-r--r-- | arch/parisc/kernel/process.c | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/processor.c | 11 | ||||
-rw-r--r-- | arch/parisc/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/time.c | 6 | ||||
-rw-r--r-- | arch/parisc/kernel/traps.c | 2 |
9 files changed, 263 insertions, 211 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 23348199f3f8..c8a11fcecf4c 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -27,6 +27,7 @@ #include <asm/processor.h> #include <asm/sections.h> #include <asm/shmparam.h> +#include <asm/mmu_context.h> int split_tlb __ro_after_init; int dcache_stride __ro_after_init; @@ -91,7 +92,7 @@ static inline void flush_data_cache(void) } -/* Virtual address of pfn. */ +/* Kernel virtual address of pfn. */ #define pfn_va(pfn) __va(PFN_PHYS(pfn)) void @@ -124,11 +125,13 @@ show_cache_info(struct seq_file *m) cache_info.ic_size/1024 ); if (cache_info.dc_loop != 1) snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop); - seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n", + seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n", cache_info.dc_size/1024, (cache_info.dc_conf.cc_wt ? "WT":"WB"), (cache_info.dc_conf.cc_sh ? ", shared I/D":""), - ((cache_info.dc_loop == 1) ? "direct mapped" : buf)); + ((cache_info.dc_loop == 1) ? "direct mapped" : buf), + cache_info.dc_conf.cc_alias + ); seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n", cache_info.it_size, cache_info.dt_size, @@ -324,25 +327,81 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, preempt_enable(); } -static inline void -__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, - unsigned long physaddr) +static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) { - if (!static_branch_likely(&parisc_has_cache)) - return; + unsigned long flags, space, pgd, prot; +#ifdef CONFIG_TLB_PTLOCK + unsigned long pgd_lock; +#endif + + vmaddr &= PAGE_MASK; + preempt_disable(); - purge_dcache_page_asm(physaddr, vmaddr); + + /* Set context for flush */ + local_irq_save(flags); + prot = mfctl(8); + space = mfsp(SR_USER); + pgd = mfctl(25); +#ifdef CONFIG_TLB_PTLOCK + pgd_lock = mfctl(28); +#endif + switch_mm_irqs_off(NULL, vma->vm_mm, NULL); + local_irq_restore(flags); + + flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE); if (vma->vm_flags & VM_EXEC) - flush_icache_page_asm(physaddr, vmaddr); + flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE); + flush_tlb_page(vma, vmaddr); + + /* Restore previous context */ + local_irq_save(flags); +#ifdef CONFIG_TLB_PTLOCK + mtctl(pgd_lock, 28); +#endif + mtctl(pgd, 25); + mtsp(space, SR_USER); + mtctl(prot, 8); + local_irq_restore(flags); + preempt_enable(); } +static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) +{ + pte_t *ptep = NULL; + pgd_t *pgd = mm->pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + if (!pgd_none(*pgd)) { + p4d = p4d_offset(pgd, addr); + if (!p4d_none(*p4d)) { + pud = pud_offset(p4d, addr); + if (!pud_none(*pud)) { + pmd = pmd_offset(pud, addr); + if (!pmd_none(*pmd)) + ptep = pte_offset_map(pmd, addr); + } + } + } + return ptep; +} + +static inline bool pte_needs_flush(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)) + == (_PAGE_PRESENT | _PAGE_ACCESSED); +} + void flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping_file(page); struct vm_area_struct *mpnt; unsigned long offset; unsigned long addr, old_addr = 0; + unsigned long count = 0; pgoff_t pgoff; if (mapping && !mapping_mapped(mapping)) { @@ -357,33 +416,52 @@ void flush_dcache_page(struct page *page) pgoff = page->index; - /* We have carefully arranged in arch_get_unmapped_area() that + /* + * We have carefully arranged in arch_get_unmapped_area() that * *any* mappings of a file are always congruently mapped (whether * declared as MAP_PRIVATE or MAP_SHARED), so we only need - * to flush one address here for them all to become coherent */ - + * to flush one address here for them all to become coherent + * on machines that support equivalent aliasing + */ flush_dcache_mmap_lock(mapping); vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; addr = mpnt->vm_start + offset; + if (parisc_requires_coherency()) { + pte_t *ptep; - /* The TLB is the engine of coherence on parisc: The - * CPU is entitled to speculate any page with a TLB - * mapping, so here we kill the mapping then flush the - * page along a special flush only alias mapping. - * This guarantees that the page is no-longer in the - * cache for any process and nor may it be - * speculatively read in (until the user or kernel - * specifically accesses it, of course) */ - - flush_tlb_page(mpnt, addr); - if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) - != (addr & (SHM_COLOUR - 1))) { - __flush_cache_page(mpnt, addr, page_to_phys(page)); - if (parisc_requires_coherency() && old_addr) - printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file); - old_addr = addr; + ptep = get_ptep(mpnt->vm_mm, addr); + if (ptep && pte_needs_flush(*ptep)) + flush_user_cache_page(mpnt, addr); + } else { + /* + * The TLB is the engine of coherence on parisc: + * The CPU is entitled to speculate any page + * with a TLB mapping, so here we kill the + * mapping then flush the page along a special + * flush only alias mapping. This guarantees that + * the page is no-longer in the cache for any + * process and nor may it be speculatively read + * in (until the user or kernel specifically + * accesses it, of course) + */ + flush_tlb_page(mpnt, addr); + if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) + != (addr & (SHM_COLOUR - 1))) { + __flush_cache_page(mpnt, addr, page_to_phys(page)); + /* + * Software is allowed to have any number + * of private mappings to a page. + */ + if (!(mpnt->vm_flags & VM_SHARED)) + continue; + if (old_addr) + pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", + old_addr, addr, mpnt->vm_file); + old_addr = addr; + } } + WARN_ON(++count == 4096); } flush_dcache_mmap_unlock(mapping); } @@ -417,23 +495,16 @@ void __init parisc_setup_cache_timing(void) printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", alltime, size, rangetime); - threshold = L1_CACHE_ALIGN(size * alltime / rangetime); + threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime)); + pr_info("Calculated flush threshold is %lu KiB\n", + threshold/1024); /* - * The threshold computed above isn't very reliable since the - * flush times depend greatly on the percentage of dirty lines - * in the flush range. Further, the whole cache time doesn't - * include the time to refill lines that aren't in the mm/vma - * being flushed. By timing glibc build and checks on mako cpus, - * the following formula seems to work reasonably well. The - * value from the timing calculation is too small, and increases - * build and check times by almost a factor two. + * The threshold computed above isn't very reliable. The following + * heuristic works reasonably well on c8000/rp3440. */ threshold2 = cache_info.dc_size * num_online_cpus(); - if (threshold2 > threshold) - threshold = threshold2; - if (threshold) - parisc_cache_flush_threshold = threshold; + parisc_cache_flush_threshold = threshold2; printk(KERN_INFO "Cache flush threshold set to %lu KiB\n", parisc_cache_flush_threshold/1024); @@ -489,19 +560,47 @@ void flush_kernel_dcache_page_addr(void *addr) } EXPORT_SYMBOL(flush_kernel_dcache_page_addr); -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, - struct page *pg) +static void flush_cache_page_if_present(struct vm_area_struct *vma, + unsigned long vmaddr, unsigned long pfn) { - /* Copy using kernel mapping. No coherency is needed (all in - kunmap) for the `to' page. However, the `from' page needs to - be flushed through a mapping equivalent to the user mapping - before it can be accessed through the kernel mapping. */ - preempt_disable(); - flush_dcache_page_asm(__pa(vfrom), vaddr); - copy_page_asm(vto, vfrom); - preempt_enable(); + pte_t *ptep = get_ptep(vma->vm_mm, vmaddr); + + /* + * The pte check is racy and sometimes the flush will trigger + * a non-access TLB miss. Hopefully, the page has already been + * flushed. + */ + if (ptep && pte_needs_flush(*ptep)) + flush_cache_page(vma, vmaddr, pfn); +} + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + void *kto, *kfrom; + + kfrom = kmap_local_page(from); + kto = kmap_local_page(to); + flush_cache_page_if_present(vma, vaddr, page_to_pfn(from)); + copy_page_asm(kto, kfrom); + kunmap_local(kto); + kunmap_local(kfrom); +} + +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long user_vaddr, void *dst, void *src, int len) +{ + flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page)); + memcpy(dst, src, len); + flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); +} + +void copy_from_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long user_vaddr, void *dst, void *src, int len) +{ + flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page)); + memcpy(dst, src, len); } -EXPORT_SYMBOL(copy_user_page); /* __flush_tlb_range() * @@ -532,92 +631,105 @@ int __flush_tlb_range(unsigned long sid, unsigned long start, return 0; } -static inline unsigned long mm_total_size(struct mm_struct *mm) +static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - struct vm_area_struct *vma; - unsigned long usize = 0; - - for (vma = mm->mmap; vma; vma = vma->vm_next) - usize += vma->vm_end - vma->vm_start; - return usize; -} - -static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr) -{ - pte_t *ptep = NULL; + unsigned long addr, pfn; + pte_t *ptep; - if (!pgd_none(*pgd)) { - p4d_t *p4d = p4d_offset(pgd, addr); - if (!p4d_none(*p4d)) { - pud_t *pud = pud_offset(p4d, addr); - if (!pud_none(*pud)) { - pmd_t *pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) - ptep = pte_offset_map(pmd, addr); + for (addr = start; addr < end; addr += PAGE_SIZE) { + /* + * The vma can contain pages that aren't present. Although + * the pte search is expensive, we need the pte to find the + * page pfn and to check whether the page should be flushed. + */ + ptep = get_ptep(vma->vm_mm, addr); + if (ptep && pte_needs_flush(*ptep)) { + if (parisc_requires_coherency()) { + flush_user_cache_page(vma, addr); + } else { + pfn = pte_pfn(*ptep); + if (WARN_ON(!pfn_valid(pfn))) + return; + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); } } } - return ptep; } -static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm, - unsigned long start, unsigned long end) +static inline unsigned long mm_total_size(struct mm_struct *mm) { - unsigned long addr, pfn; - pte_t *ptep; + struct vm_area_struct *vma; + unsigned long usize = 0; - for (addr = start; addr < end; addr += PAGE_SIZE) { - ptep = get_ptep(mm->pgd, addr); - if (ptep) { - pfn = pte_pfn(*ptep); - flush_cache_page(vma, addr, pfn); - } - } + for (vma = mm->mmap; vma && usize < parisc_cache_flush_threshold; vma = vma->vm_next) + usize += vma->vm_end - vma->vm_start; + return usize; } void flush_cache_mm(struct mm_struct *mm) { struct vm_area_struct *vma; - /* Flushing the whole cache on each cpu takes forever on - rp3440, etc. So, avoid it if the mm isn't too big. */ - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - mm_total_size(mm) >= parisc_cache_flush_threshold) { - if (mm->context.space_id) - flush_tlb_all(); + /* + * Flushing the whole cache on each cpu takes forever on + * rp3440, etc. So, avoid it if the mm isn't too big. + * + * Note that we must flush the entire cache on machines + * with aliasing caches to prevent random segmentation + * faults. + */ + if (!parisc_requires_coherency() + || mm_total_size(mm) >= parisc_cache_flush_threshold) { + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) + return; + flush_tlb_all(); flush_cache_all(); return; } + /* Flush mm */ for (vma = mm->mmap; vma; vma = vma->vm_next) - flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end); + flush_cache_pages(vma, vma->vm_start, vma->vm_end); } -void flush_cache_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - end - start >= parisc_cache_flush_threshold) { - if (vma->vm_mm->context.space_id) - flush_tlb_range(vma, start, end); + if (!parisc_requires_coherency() + || end - start >= parisc_cache_flush_threshold) { + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) + return; + flush_tlb_range(vma, start, end); flush_cache_all(); return; } - flush_cache_pages(vma, vma->vm_mm, start, end); + flush_cache_pages(vma, start, end); } -void -flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) +void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { - if (pfn_valid(pfn)) { - if (likely(vma->vm_mm->context.space_id)) { - flush_tlb_page(vma, vmaddr); - __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); - } else { - __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn)); - } + if (WARN_ON(!pfn_valid(pfn))) + return; + if (parisc_requires_coherency()) + flush_user_cache_page(vma, vmaddr); + else + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); +} + +void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) +{ + if (!PageAnon(page)) + return; + + if (parisc_requires_coherency()) { + flush_user_cache_page(vma, vmaddr); + return; } + + flush_tlb_page(vma, vmaddr); + preempt_disable(); + flush_dcache_page_asm(page_to_phys(page), vmaddr); + preempt_enable(); } void flush_kernel_vmap_range(void *vaddr, int size) @@ -642,6 +754,9 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; + /* Ensure DMA is complete */ + asm_syncdma(); + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && (unsigned long)size >= parisc_cache_flush_threshold) { flush_tlb_kernel_range(start, end); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index ecf50159359e..df8102fb435f 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -554,8 +554,9 @@ extrd,s \pte,63,25,\pte .endm - /* The alias region is an 8MB aligned 16MB to do clear and - * copy user pages at addresses congruent with the user + /* The alias region is comprised of a pair of 4 MB regions + * aligned to 8 MB. It is used to clear/copy/flush user pages + * using kernel virtual addresses congruent with the user * virtual address. * * To use the alias page, you set %r26 up with the to TLB @@ -565,13 +566,8 @@ .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype cmpib,COND(<>),n 0,\spc,\fault ldil L%(TMPALIAS_MAP_START),\tmp -#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) - /* on LP64, ldi will sign extend into the upper 32 bits, - * which is behaviour we don't want */ - depdi 0,31,32,\tmp -#endif copy \va,\tmp1 - depi 0,31,23,\tmp1 + depi_safe 0,31,TMPALIAS_SIZE_BITS+1,\tmp1 cmpb,COND(<>),n \tmp,\tmp1,\fault mfctl %cr19,\tmp /* iir */ /* get the opcode (first six bits) into \tmp */ @@ -604,13 +600,13 @@ * OK, it is in the temp alias region, check whether "from" or "to". * Check "subtle" note in pacache.S re: r23/r26. */ -#ifdef CONFIG_64BIT - extrd,u,*= \va,41,1,%r0 -#else - extrw,u,= \va,9,1,%r0 -#endif + extrw,u,= \va,31-TMPALIAS_SIZE_BITS,1,%r0 or,COND(tr) %r23,%r0,\pte or %r26,%r0,\pte + + /* convert phys addr in \pte (from r23 or r26) to tlb insert format */ + SHRREG \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte + depi_safe _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte .endm diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c index 3343d2fb7889..6e0b86652f30 100644 --- a/arch/parisc/kernel/kprobes.c +++ b/arch/parisc/kernel/kprobes.c @@ -152,7 +152,7 @@ int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs) /* for absolute branch instructions we can copy iaoq_b. for relative * branch instructions we need to calculate the new address based on the * difference between iaoq_f and iaoq_b. We cannot use iaoq_b without - * modificationt because it's based on our ainsn.insn address. + * modifications because it's based on our ainsn.insn address. */ if (p->post_handler) diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index b4c3f01e2399..9a0018f1f42c 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -300,7 +300,6 @@ fdoneloop2: fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */ fdsync: - syncdma sync mtsm %r22 /* restore I-bit */ 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) @@ -488,6 +487,8 @@ ENDPROC_CFI(copy_page_asm) * parisc chip designers that there will not ever be a parisc * chip with a larger alias boundary (Never say never :-) ). * + * Yah, what about the PA8800 and PA8900 processors? + * * Subtle: the dtlb miss handlers support the temp alias region by * "knowing" that if a dtlb miss happens within the temp alias * region it must have occurred while in clear_user_page. Since @@ -499,19 +500,10 @@ ENDPROC_CFI(copy_page_asm) * miss on the translation, the dtlb miss handler inserts the * translation into the tlb using these values: * - * %r26 physical page (shifted for tlb insert) of "to" translation - * %r23 physical page (shifted for tlb insert) of "from" translation + * %r26 physical address of "to" translation + * %r23 physical address of "from" translation */ - /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ - #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) - .macro convert_phys_for_tlb_insert20 phys - extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys -#if _PAGE_SIZE_ENCODING_DEFAULT - depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys -#endif - .endm - /* * copy_user_page_asm() performs a page copy using mappings * equivalent to the user page mappings. It can be used to @@ -540,24 +532,10 @@ ENTRY_CFI(copy_user_page_asm) sub %r25, %r1, %r23 ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */ - depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ - copy %r28, %r29 - depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */ - depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ + dep_safe %r24, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ copy %r28, %r29 - depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */ -#endif + depi_safe 1, 31-TMPALIAS_SIZE_BITS,1, %r29 /* Form aliased virtual address 'from' */ /* Purge any old translations */ @@ -687,18 +665,8 @@ ENTRY_CFI(clear_user_page_asm) tophys_r1 %r26 ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#endif + dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ /* Purge any old translation */ @@ -763,18 +731,8 @@ ENDPROC_CFI(clear_user_page_asm) ENTRY_CFI(flush_dcache_page_asm) ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#endif + dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ /* Purge any old translation */ @@ -822,18 +780,8 @@ ENDPROC_CFI(flush_dcache_page_asm) ENTRY_CFI(purge_dcache_page_asm) ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#endif + dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ /* Purge any old translation */ @@ -881,18 +829,8 @@ ENDPROC_CFI(purge_dcache_page_asm) ENTRY_CFI(flush_icache_page_asm) ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#endif + dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ /* Purge any old translation. Note that the FIC instruction * may use either the instruction or data TLB. Given that we @@ -1098,7 +1036,6 @@ ENTRY_CFI(flush_kernel_dcache_range_asm) sync 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) - syncdma bv %r0(%r2) nop ENDPROC_CFI(flush_kernel_dcache_range_asm) @@ -1140,7 +1077,6 @@ ENTRY_CFI(purge_kernel_dcache_range_asm) sync 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) - syncdma bv %r0(%r2) nop ENDPROC_CFI(purge_kernel_dcache_range_asm) diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index a6a2a558fc5b..7c37e09c92da 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/personality.h> #include <linux/ptrace.h> +#include <linux/reboot.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task.h> @@ -116,8 +117,7 @@ void machine_power_off(void) pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); /* ipmi_poweroff may have been installed. */ - if (pm_power_off) - pm_power_off(); + do_kernel_power_off(); /* It seems we have no way to power the system off via * software. The user has to press the button himself. */ diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index d98692115221..26eb568f8b96 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -171,6 +171,7 @@ static int __init processor_probe(struct parisc_device *dev) p->cpu_num = cpu_info.cpu_num; p->cpu_loc = cpu_info.cpu_loc; + set_cpu_possible(cpuid, true); store_cpu_topology(cpuid); #ifdef CONFIG_SMP @@ -419,8 +420,7 @@ show_cpuinfo (struct seq_file *m, void *v) } seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities); - seq_printf(m, "model\t\t: %s\n" - "model name\t: %s\n", + seq_printf(m, "model\t\t: %s - %s\n", boot_cpu_data.pdc.sys_model_name, cpuinfo->dev ? cpuinfo->dev->name : "Unknown"); @@ -461,6 +461,13 @@ static struct parisc_driver cpu_driver __refdata = { */ void __init processor_init(void) { + unsigned int cpu; + reset_cpu_topology(); + + /* reset possible mask. We will mark those which are possible. */ + for_each_possible_cpu(cpu) + set_cpu_possible(cpu, false); + register_parisc_driver(&cpu_driver); } diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index b91cb45ffd4e..f005ddedb50e 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -161,6 +161,8 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_PA11 dma_ops_init(); #endif + + clear_sched_clock_stable(); } /* diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index bb27dfeeddfc..9714fbd7c42d 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -251,13 +251,9 @@ void __init time_init(void) static int __init init_cr16_clocksource(void) { /* - * The cr16 interval timers are not syncronized across CPUs, even if - * they share the same socket. + * The cr16 interval timers are not synchronized across CPUs. */ if (num_online_cpus() > 1 && !running_on_qemu) { - /* mark sched_clock unstable */ - clear_sched_clock_stable(); - clocksource_cr16.name = "cr16_unstable"; clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; clocksource_cr16.rating = 0; diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index a6e61cf2cad0..b78f1b9d45c1 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -469,7 +469,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o * panic notifiers, and we should call panic * directly from the location that we wish. * e.g. We should not call panic from - * parisc_terminate, but rather the oter way around. + * parisc_terminate, but rather the other way around. * This hack works, prints the panic message twice, * and it enables reboot timers! */ |