Acked-by: Guo Ren guoren@kernel.org
On Sat, Sep 19, 2020 at 5:50 PM Thomas Gleixner tglx@linutronix.de wrote:
Signed-off-by: Thomas Gleixner tglx@linutronix.de Cc: Guo Ren guoren@kernel.org Cc: linux-csky@vger.kernel.org
Note: Completely untested
arch/csky/Kconfig | 1 arch/csky/include/asm/highmem.h | 4 +- arch/csky/mm/highmem.c | 75 ---------------------------------------- 3 files changed, 5 insertions(+), 75 deletions(-)
--- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -285,6 +285,7 @@ config NR_CPUS config HIGHMEM bool "High Memory Support" depends on !CPU_CK610
select KMAP_ATOMIC_GENERIC default y
config FORCE_MAX_ZONEORDER --- a/arch/csky/include/asm/highmem.h +++ b/arch/csky/include/asm/highmem.h @@ -32,10 +32,12 @@ extern pte_t *pkmap_page_table;
#define ARCH_HAS_KMAP_FLUSH_TLB extern void kmap_flush_tlb(unsigned long addr); -extern void *kmap_atomic_pfn(unsigned long pfn);
#define flush_cache_kmaps() do {} while (0)
+#define arch_kmap_temp_post_map(vaddr, pteval) kmap_flush_tlb(vaddr) +#define arch_kmap_temp_post_unmap(vaddr) kmap_flush_tlb(vaddr)
extern void kmap_init(void);
#endif /* __KERNEL__ */ --- a/arch/csky/mm/highmem.c +++ b/arch/csky/mm/highmem.c @@ -9,8 +9,6 @@ #include <asm/tlbflush.h> #include <asm/cacheflush.h>
-static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn;
void kmap_flush_tlb(unsigned long addr) @@ -19,67 +17,7 @@ void kmap_flush_tlb(unsigned long addr) } EXPORT_SYMBOL(kmap_flush_tlb);
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) -{
unsigned long vaddr;
int idx, type;
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
-#endif
set_pte(kmap_pte-idx, mk_pte(page, prot));
flush_tlb_one((unsigned long)vaddr);
return (void *)vaddr;
-} -EXPORT_SYMBOL(kmap_atomic_high_prot);
-void kunmap_atomic_high(void *kvaddr) -{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int idx;
if (vaddr < FIXADDR_START)
return;
-#ifdef CONFIG_DEBUG_HIGHMEM
idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
pte_clear(&init_mm, vaddr, kmap_pte - idx);
flush_tlb_one(vaddr);
-#else
(void) idx; /* to kill a warning */
-#endif
kmap_atomic_idx_pop();
-} -EXPORT_SYMBOL(kunmap_atomic_high);
-/*
- This is the same as kmap_atomic() but can map memory that doesn't
- have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn) -{
unsigned long vaddr;
int idx, type;
pagefault_disable();
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
flush_tlb_one(vaddr);
return (void *) vaddr;
-}
-static void __init kmap_pages_init(void) +void __init kmap_init(void) { unsigned long vaddr; pgd_t *pgd; @@ -96,14 +34,3 @@ static void __init kmap_pages_init(void) pte = pte_offset_kernel(pmd, vaddr); pkmap_page_table = pte; }
-void __init kmap_init(void) -{
unsigned long vaddr;
kmap_pages_init();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
-}