]> git.baikalelectronics.ru Git - kernel.git/commitdiff
csky: Fixup defer cache flush for 610
authorGuo Ren <ren_guo@c-sky.com>
Tue, 20 Aug 2019 04:47:24 +0000 (12:47 +0800)
committerGuo Ren <ren_guo@c-sky.com>
Tue, 20 Aug 2019 12:09:14 +0000 (20:09 +0800)
We use defer cache flush mechanism to improve the performance of
610, but the implementation is wrong. We fix it up now and update
the mechanism:

 - Zero page needn't be flushed.
 - If page is file mapping & non-touched in user space, defer flush.
 - If page is anon mapping or dirty file mapping, flush immediately.
 - In update_mmu_cache finish the defer flush by flush_dcache_page().

For 610 we need take care the dcache aliasing issue:
 - VIPT cache with 8K-bytes size per way in 4K page granularity.

Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Cc: Arnd Bergmann <arnd@arndb.de>
arch/csky/abiv1/cacheflush.c
arch/csky/abiv1/inc/abi/cacheflush.h

index 10af8b6fe322f28d21db8c1fa6b4b7f961109ee7..fee99fc6612f178d57113ce7c471b65a898fb384 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/cachectl.h>
 
+#define PG_dcache_clean                PG_arch_1
+
 void flush_dcache_page(struct page *page)
 {
-       struct address_space *mapping = page_mapping(page);
-       unsigned long addr;
+       struct address_space *mapping;
 
-       if (mapping && !mapping_mapped(mapping)) {
-               set_bit(PG_arch_1, &(page)->flags);
+       if (page == ZERO_PAGE(0))
                return;
-       }
 
-       /*
-        * We could delay the flush for the !page_mapping case too.  But that
-        * case is for exec env/arg pages and those are %99 certainly going to
-        * get faulted into the tlb (and thus flushed) anyways.
-        */
-       addr = (unsigned long) page_address(page);
-       dcache_wb_range(addr, addr + PAGE_SIZE);
+       mapping = page_mapping_file(page);
+
+       if (mapping && !page_mapcount(page))
+               clear_bit(PG_dcache_clean, &page->flags);
+       else {
+               dcache_wbinv_all();
+               if (mapping)
+                       icache_inv_all();
+               set_bit(PG_dcache_clean, &page->flags);
+       }
 }
+EXPORT_SYMBOL(flush_dcache_page);
 
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
-                     pte_t *pte)
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
+       pte_t *ptep)
 {
-       unsigned long addr;
+       unsigned long pfn = pte_pfn(*ptep);
        struct page *page;
-       unsigned long pfn;
 
-       pfn = pte_pfn(*pte);
-       if (unlikely(!pfn_valid(pfn)))
+       if (!pfn_valid(pfn))
                return;
 
        page = pfn_to_page(pfn);
-       addr = (unsigned long) page_address(page);
+       if (page == ZERO_PAGE(0))
+               return;
 
-       if (vma->vm_flags & VM_EXEC ||
-           pages_do_alias(addr, address & PAGE_MASK))
-               cache_wbinv_all();
+       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+               dcache_wbinv_all();
 
-       clear_bit(PG_arch_1, &(page)->flags);
+       if (page_mapping_file(page)) {
+               if (vma->vm_flags & VM_EXEC)
+                       icache_inv_all();
+       }
 }
index 5f663aef9b1b8b019ed0a631a681ddcdc827d93f..fce5604cef400dbe3df8899c754dfc70f2a3bff3 100644 (file)
@@ -26,8 +26,8 @@ extern void flush_dcache_page(struct page *);
 #define flush_icache_page(vma, page)           cache_wbinv_all()
 #define flush_icache_range(start, end)         cache_wbinv_range(start, end)
 
-#define flush_icache_user_range(vma, pg, adr, len) \
-                               cache_wbinv_range(adr, adr + len)
+#define flush_icache_user_range(vma,page,addr,len) \
+       flush_dcache_page(page)
 
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 do { \