kernel_optimize_test/arch/sh/include/cpu-sh4/cpu/sq.h
Paul Mundt 7bdda6209f sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB.
Both the store queue API and the PMB remapping take unsigned long for
their pgprot flags, which cuts off the extended protection bits. In the
case of the PMB this isn't really a problem since the cache attribute
bits that we care about are all in the lower 32-bits, but we do it just
to be safe. The store queue remapping on the other hand depends on the
extended prot bits for enabling userspace access to the mappings.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2010-02-17 13:23:00 +09:00

37 lines
1.2 KiB
C

/*
* include/asm-sh/cpu-sh4/sq.h
*
* Copyright (C) 2001, 2002, 2003 Paul Mundt
* Copyright (C) 2001, 2002 M. R. Brown
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_CPU_SH4_SQ_H
#define __ASM_CPU_SH4_SQ_H
#include <asm/addrspace.h>
#include <asm/page.h>
/*
* Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be
* mapped to any physical address space. Since data is written (and aligned)
* to 32-byte boundaries, we need to be sure that all allocations are aligned.
*/
#define SQ_SIZE 32
#define SQ_ALIGN_MASK (~(SQ_SIZE - 1))
#define SQ_ALIGN(addr) (((addr)+SQ_SIZE-1) & SQ_ALIGN_MASK)
#define SQ_QACR0 (P4SEG_REG_BASE + 0x38)
#define SQ_QACR1 (P4SEG_REG_BASE + 0x3c)
#define SQ_ADDRMAX (P4SEG_STORE_QUE + 0x04000000)
/* arch/sh/kernel/cpu/sh4/sq.c */
unsigned long sq_remap(unsigned long phys, unsigned int size,
const char *name, pgprot_t prot);
void sq_unmap(unsigned long vaddr);
void sq_flush_range(unsigned long start, unsigned int len);
#endif /* __ASM_CPU_SH4_SQ_H */