forked from luck/tmp_suning_uos_patched
xtensa: disable preemption around cache alias management calls
Although cache alias management calls set up and tear down TLB entries and fast_second_level_miss is able to restore TLB entry should it be evicted they absolutely cannot preempt each other because they use the same TLBTEMP area for different purposes. Disable preemption around all cache alias management calls to enforce that. Cc: stable@vger.kernel.org Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
parent
481535c5b4
commit
3a860d165e
|
@ -70,8 +70,10 @@ static inline void kmap_invalidate_coherent(struct page *page,
|
|||
kvaddr = TLBTEMP_BASE_1 +
|
||||
(page_to_phys(page) & DCACHE_ALIAS_MASK);
|
||||
|
||||
preempt_disable();
|
||||
__invalidate_dcache_page_alias(kvaddr,
|
||||
page_to_phys(page));
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -156,6 +158,7 @@ void flush_dcache_page(struct page *page)
|
|||
if (!alias && !mapping)
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
|
||||
__flush_invalidate_dcache_page_alias(virt, phys);
|
||||
|
||||
|
@ -166,6 +169,7 @@ void flush_dcache_page(struct page *page)
|
|||
|
||||
if (mapping)
|
||||
__invalidate_icache_page_alias(virt, phys);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/* There shouldn't be an entry in the cache for this page anymore. */
|
||||
|
@ -199,8 +203,10 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
|
|||
unsigned long phys = page_to_phys(pfn_to_page(pfn));
|
||||
unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
|
||||
|
||||
preempt_disable();
|
||||
__flush_invalidate_dcache_page_alias(virt, phys);
|
||||
__invalidate_icache_page_alias(virt, phys);
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(local_flush_cache_page);
|
||||
|
||||
|
@ -227,11 +233,13 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
|
|||
unsigned long phys = page_to_phys(page);
|
||||
unsigned long tmp;
|
||||
|
||||
preempt_disable();
|
||||
tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
|
||||
__flush_invalidate_dcache_page_alias(tmp, phys);
|
||||
tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
|
||||
__flush_invalidate_dcache_page_alias(tmp, phys);
|
||||
__invalidate_icache_page_alias(tmp, phys);
|
||||
preempt_enable();
|
||||
|
||||
clear_bit(PG_arch_1, &page->flags);
|
||||
}
|
||||
|
@ -265,7 +273,9 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
|||
|
||||
if (alias) {
|
||||
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
|
||||
preempt_disable();
|
||||
__flush_invalidate_dcache_page_alias(t, phys);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/* Copy data */
|
||||
|
@ -280,9 +290,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
|||
if (alias) {
|
||||
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
|
||||
|
||||
preempt_disable();
|
||||
__flush_invalidate_dcache_range((unsigned long) dst, len);
|
||||
if ((vma->vm_flags & VM_EXEC) != 0)
|
||||
__invalidate_icache_page_alias(t, phys);
|
||||
preempt_enable();
|
||||
|
||||
} else if ((vma->vm_flags & VM_EXEC) != 0) {
|
||||
__flush_dcache_range((unsigned long)dst,len);
|
||||
|
@ -304,7 +316,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
|||
|
||||
if (alias) {
|
||||
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
|
||||
preempt_disable();
|
||||
__flush_invalidate_dcache_page_alias(t, phys);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
memcpy(dst, src, len);
|
||||
|
|
Loading…
Reference in New Issue
Block a user