forked from luck/tmp_suning_uos_patched
7e5a69e83b
The VIVT cache of a highmem page is always flushed before the page is unmapped. This cache flush is explicit through flush_cache_kmaps() in flush_all_zero_pkmaps(), or through __cpuc_flush_dcache_area() in kunmap_atomic(). There is also an implicit flush of those highmem pages that were part of a process that just terminated making those pages free as the whole VIVT cache has to be flushed on every task switch. Hence unmapped highmem pages need no cache maintenance in that case. However unmapped pages may still be cached with a VIPT cache because the cache is tagged with physical addresses. There is no need for a whole cache flush during task switching for that reason, and despite the explicit cache flushes in flush_all_zero_pkmaps() and kunmap_atomic(), some highmem pages that were mapped in user space end up still cached even when they become unmapped. So, we do have to perform cache maintenance on those unmapped highmem pages in the context of DMA when using a VIPT cache. Unfortunately, it is not possible to perform that cache maintenance using physical addresses as all the L1 cache maintenance coprocessor functions accept virtual addresses only. Therefore we have no choice but to set up a temporary virtual mapping for that purpose. And of course the explicit cache flushing when unmapping a highmem page on a system with a VIPT cache now can go, which should increase performance. While at it, because the code in __flush_dcache_page() has to be modified anyway, let's also make sure the mapped highmem pages are pinned with kmap_high_get() for the duration of the cache maintenance operation. Because kunmap() does unmap highmem pages lazily, it was reported by Gary King <GKing@nvidia.com> that those pages ended up being unmapped during cache maintenance on SMP causing segmentation faults. Signed-off-by: Nicolas Pitre <nico@marvell.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
303 lines
7.6 KiB
C
303 lines
7.6 KiB
C
/*
|
|
* linux/arch/arm/mm/flush.c
|
|
*
|
|
* Copyright (C) 1995-2002 Russell King
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#include <linux/module.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cachetype.h>
|
|
#include <asm/highmem.h>
|
|
#include <asm/smp_plat.h>
|
|
#include <asm/system.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include "mm.h"
|
|
|
|
#ifdef CONFIG_CPU_CACHE_VIPT
|
|
|
|
#define ALIAS_FLUSH_START 0xffff4000
|
|
|
|
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
|
|
{
|
|
unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
|
|
const int zero = 0;
|
|
|
|
set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
|
|
flush_tlb_kernel_page(to);
|
|
|
|
asm( "mcrr p15, 0, %1, %0, c14\n"
|
|
" mcr p15, 0, %2, c7, c10, 4"
|
|
:
|
|
: "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
|
|
: "cc");
|
|
}
|
|
|
|
void flush_cache_mm(struct mm_struct *mm)
|
|
{
|
|
if (cache_is_vivt()) {
|
|
vivt_flush_cache_mm(mm);
|
|
return;
|
|
}
|
|
|
|
if (cache_is_vipt_aliasing()) {
|
|
asm( "mcr p15, 0, %0, c7, c14, 0\n"
|
|
" mcr p15, 0, %0, c7, c10, 4"
|
|
:
|
|
: "r" (0)
|
|
: "cc");
|
|
}
|
|
}
|
|
|
|
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
|
|
{
|
|
if (cache_is_vivt()) {
|
|
vivt_flush_cache_range(vma, start, end);
|
|
return;
|
|
}
|
|
|
|
if (cache_is_vipt_aliasing()) {
|
|
asm( "mcr p15, 0, %0, c7, c14, 0\n"
|
|
" mcr p15, 0, %0, c7, c10, 4"
|
|
:
|
|
: "r" (0)
|
|
: "cc");
|
|
}
|
|
|
|
if (vma->vm_flags & VM_EXEC)
|
|
__flush_icache_all();
|
|
}
|
|
|
|
void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
|
|
{
|
|
if (cache_is_vivt()) {
|
|
vivt_flush_cache_page(vma, user_addr, pfn);
|
|
return;
|
|
}
|
|
|
|
if (cache_is_vipt_aliasing()) {
|
|
flush_pfn_alias(pfn, user_addr);
|
|
__flush_icache_all();
|
|
}
|
|
|
|
if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
|
|
__flush_icache_all();
|
|
}
|
|
#else
|
|
#define flush_pfn_alias(pfn,vaddr) do { } while (0)
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
static void flush_ptrace_access_other(void *args)
|
|
{
|
|
__flush_icache_all();
|
|
}
|
|
#endif
|
|
|
|
static
|
|
void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
|
|
unsigned long uaddr, void *kaddr, unsigned long len)
|
|
{
|
|
if (cache_is_vivt()) {
|
|
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
|
|
unsigned long addr = (unsigned long)kaddr;
|
|
__cpuc_coherent_kern_range(addr, addr + len);
|
|
}
|
|
return;
|
|
}
|
|
|
|
if (cache_is_vipt_aliasing()) {
|
|
flush_pfn_alias(page_to_pfn(page), uaddr);
|
|
__flush_icache_all();
|
|
return;
|
|
}
|
|
|
|
/* VIPT non-aliasing cache */
|
|
if (vma->vm_flags & VM_EXEC) {
|
|
unsigned long addr = (unsigned long)kaddr;
|
|
__cpuc_coherent_kern_range(addr, addr + len);
|
|
#ifdef CONFIG_SMP
|
|
if (cache_ops_need_broadcast())
|
|
smp_call_function(flush_ptrace_access_other,
|
|
NULL, 1);
|
|
#endif
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Copy user data from/to a page which is mapped into a different
|
|
* processes address space. Really, we want to allow our "user
|
|
* space" model to handle this.
|
|
*
|
|
* Note that this code needs to run on the current CPU.
|
|
*/
|
|
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
|
unsigned long uaddr, void *dst, const void *src,
|
|
unsigned long len)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
preempt_disable();
|
|
#endif
|
|
memcpy(dst, src, len);
|
|
flush_ptrace_access(vma, page, uaddr, dst, len);
|
|
#ifdef CONFIG_SMP
|
|
preempt_enable();
|
|
#endif
|
|
}
|
|
|
|
void __flush_dcache_page(struct address_space *mapping, struct page *page)
|
|
{
|
|
/*
|
|
* Writeback any data associated with the kernel mapping of this
|
|
* page. This ensures that data in the physical page is mutually
|
|
* coherent with the kernels mapping.
|
|
*/
|
|
if (!PageHighMem(page)) {
|
|
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
|
|
} else {
|
|
void *addr = kmap_high_get(page);
|
|
if (addr) {
|
|
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
|
|
kunmap_high(page);
|
|
} else if (cache_is_vipt()) {
|
|
pte_t saved_pte;
|
|
addr = kmap_high_l1_vipt(page, &saved_pte);
|
|
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
|
|
kunmap_high_l1_vipt(page, saved_pte);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* If this is a page cache page, and we have an aliasing VIPT cache,
|
|
* we only need to do one flush - which would be at the relevant
|
|
* userspace colour, which is congruent with page->index.
|
|
*/
|
|
if (mapping && cache_is_vipt_aliasing())
|
|
flush_pfn_alias(page_to_pfn(page),
|
|
page->index << PAGE_CACHE_SHIFT);
|
|
}
|
|
|
|
static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
|
|
{
|
|
struct mm_struct *mm = current->active_mm;
|
|
struct vm_area_struct *mpnt;
|
|
struct prio_tree_iter iter;
|
|
pgoff_t pgoff;
|
|
|
|
/*
|
|
* There are possible user space mappings of this page:
|
|
* - VIVT cache: we need to also write back and invalidate all user
|
|
* data in the current VM view associated with this page.
|
|
* - aliasing VIPT: we only need to find one mapping of this page.
|
|
*/
|
|
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
|
|
|
flush_dcache_mmap_lock(mapping);
|
|
vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
unsigned long offset;
|
|
|
|
/*
|
|
* If this VMA is not in our MM, we can ignore it.
|
|
*/
|
|
if (mpnt->vm_mm != mm)
|
|
continue;
|
|
if (!(mpnt->vm_flags & VM_MAYSHARE))
|
|
continue;
|
|
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
|
|
flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
|
|
}
|
|
flush_dcache_mmap_unlock(mapping);
|
|
}
|
|
|
|
/*
|
|
* Ensure cache coherency between kernel mapping and userspace mapping
|
|
* of this page.
|
|
*
|
|
* We have three cases to consider:
|
|
* - VIPT non-aliasing cache: fully coherent so nothing required.
|
|
* - VIVT: fully aliasing, so we need to handle every alias in our
|
|
* current VM view.
|
|
* - VIPT aliasing: need to handle one alias in our current VM view.
|
|
*
|
|
* If we need to handle aliasing:
|
|
* If the page only exists in the page cache and there are no user
|
|
* space mappings, we can be lazy and remember that we may have dirty
|
|
* kernel cache lines for later. Otherwise, we assume we have
|
|
* aliasing mappings.
|
|
*
|
|
* Note that we disable the lazy flush for SMP.
|
|
*/
|
|
void flush_dcache_page(struct page *page)
|
|
{
|
|
struct address_space *mapping;
|
|
|
|
/*
|
|
* The zero page is never written to, so never has any dirty
|
|
* cache lines, and therefore never needs to be flushed.
|
|
*/
|
|
if (page == ZERO_PAGE(0))
|
|
return;
|
|
|
|
mapping = page_mapping(page);
|
|
|
|
#ifndef CONFIG_SMP
|
|
if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
|
|
set_bit(PG_dcache_dirty, &page->flags);
|
|
else
|
|
#endif
|
|
{
|
|
__flush_dcache_page(mapping, page);
|
|
if (mapping && cache_is_vivt())
|
|
__flush_dcache_aliases(mapping, page);
|
|
else if (mapping)
|
|
__flush_icache_all();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(flush_dcache_page);
|
|
|
|
/*
|
|
* Flush an anonymous page so that users of get_user_pages()
|
|
* can safely access the data. The expected sequence is:
|
|
*
|
|
* get_user_pages()
|
|
* -> flush_anon_page
|
|
* memcpy() to/from page
|
|
* if written to page, flush_dcache_page()
|
|
*/
|
|
void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
|
|
{
|
|
unsigned long pfn;
|
|
|
|
/* VIPT non-aliasing caches need do nothing */
|
|
if (cache_is_vipt_nonaliasing())
|
|
return;
|
|
|
|
/*
|
|
* Write back and invalidate userspace mapping.
|
|
*/
|
|
pfn = page_to_pfn(page);
|
|
if (cache_is_vivt()) {
|
|
flush_cache_page(vma, vmaddr, pfn);
|
|
} else {
|
|
/*
|
|
* For aliasing VIPT, we can flush an alias of the
|
|
* userspace address only.
|
|
*/
|
|
flush_pfn_alias(pfn, vmaddr);
|
|
__flush_icache_all();
|
|
}
|
|
|
|
/*
|
|
* Invalidate kernel mapping. No data should be contained
|
|
* in this mapping of the page. FIXME: this is overkill
|
|
* since we actually ask for a write-back and invalidate.
|
|
*/
|
|
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
|
|
}
|