forked from luck/tmp_suning_uos_patched
2277ab4a1d
This inverts the delayed dcache flush a bit to be more in line with other platforms. At the same time this also gives us the ability to do some more optimizations and cleanup. Now that the update_mmu_cache() callsite only tests for the bit, the implementation can gradually be split out and made generic, rather than relying on special implementations for each of the peculiar CPU types. SH7705 in 32kB mode and SH-4 still need slightly different handling, but this is something that can remain isolated in the varying page copy/clear routines. On top of that, SH-X3 is dcache coherent, so there is no need to bother with any of these tests in the PTEAEX version of update_mmu_cache(), so we kill that off too. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
44 lines
1.4 KiB
C
44 lines
1.4 KiB
C
/*
|
|
* include/asm-sh/cpu-sh4/cacheflush.h
|
|
*
|
|
* Copyright (C) 1999 Niibe Yutaka
|
|
* Copyright (C) 2003 Paul Mundt
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*/
|
|
#ifndef __ASM_CPU_SH4_CACHEFLUSH_H
|
|
#define __ASM_CPU_SH4_CACHEFLUSH_H
|
|
|
|
/*
|
|
* Caches are broken on SH-4 (unless we use write-through
|
|
* caching; in which case they're only semi-broken),
|
|
* so we need them.
|
|
*/
|
|
void flush_cache_all(void);
|
|
void flush_dcache_all(void);
|
|
void flush_cache_mm(struct mm_struct *mm);
|
|
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
|
|
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end);
|
|
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
|
|
unsigned long pfn);
|
|
void flush_dcache_page(struct page *pg);
|
|
|
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
|
|
|
void flush_icache_range(unsigned long start, unsigned long end);
|
|
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
|
unsigned long addr, int len);
|
|
|
|
#define flush_icache_page(vma,pg) do { } while (0)
|
|
|
|
/* Initialization of P3 area for copy_user_page */
|
|
void p3_cache_init(void);
|
|
|
|
#define PG_dcache_dirty PG_arch_1
|
|
|
|
#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
|