forked from luck/tmp_suning_uos_patched
MIPS: asm: asm-eva: Introduce kernel load/store variants
Introduce new macros for kernel load/store variants which will be used to perform regular kernel space load/store operations in EVA mode. Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: <stable@vger.kernel.org> # v3.15+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9500/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
872cd4c2c6
commit
60cd7e08e4
|
@ -11,6 +11,36 @@
|
|||
#define __ASM_ASM_EVA_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* Kernel variants */
|
||||
|
||||
#define kernel_cache(op, base) "cache " op ", " base "\n"
|
||||
#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
|
||||
#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
|
||||
#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
|
||||
#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n"
|
||||
#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n"
|
||||
#define kernel_lh(reg, addr) "lh " reg ", " addr "\n"
|
||||
#define kernel_lb(reg, addr) "lb " reg ", " addr "\n"
|
||||
#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n"
|
||||
#define kernel_sw(reg, addr) "sw " reg ", " addr "\n"
|
||||
#define kernel_swl(reg, addr) "swl " reg ", " addr "\n"
|
||||
#define kernel_swr(reg, addr) "swr " reg ", " addr "\n"
|
||||
#define kernel_sh(reg, addr) "sh " reg ", " addr "\n"
|
||||
#define kernel_sb(reg, addr) "sb " reg ", " addr "\n"
|
||||
|
||||
#ifdef CONFIG_32BIT
|
||||
/*
|
||||
* No 'sd' or 'ld' instructions in 32-bit but the code will
|
||||
* do the correct thing
|
||||
*/
|
||||
#define kernel_sd(reg, addr) user_sw(reg, addr)
|
||||
#define kernel_ld(reg, addr) user_lw(reg, addr)
|
||||
#else
|
||||
#define kernel_sd(reg, addr) "sd " reg", " addr "\n"
|
||||
#define kernel_ld(reg, addr) "ld " reg", " addr "\n"
|
||||
#endif /* CONFIG_32BIT */
|
||||
|
||||
#ifdef CONFIG_EVA
|
||||
|
||||
#define __BUILD_EVA_INSN(insn, reg, addr) \
|
||||
|
@ -41,37 +71,60 @@
|
|||
|
||||
#else
|
||||
|
||||
#define user_cache(op, base) "cache " op ", " base "\n"
|
||||
#define user_ll(reg, addr) "ll " reg ", " addr "\n"
|
||||
#define user_sc(reg, addr) "sc " reg ", " addr "\n"
|
||||
#define user_lw(reg, addr) "lw " reg ", " addr "\n"
|
||||
#define user_lwl(reg, addr) "lwl " reg ", " addr "\n"
|
||||
#define user_lwr(reg, addr) "lwr " reg ", " addr "\n"
|
||||
#define user_lh(reg, addr) "lh " reg ", " addr "\n"
|
||||
#define user_lb(reg, addr) "lb " reg ", " addr "\n"
|
||||
#define user_lbu(reg, addr) "lbu " reg ", " addr "\n"
|
||||
#define user_sw(reg, addr) "sw " reg ", " addr "\n"
|
||||
#define user_swl(reg, addr) "swl " reg ", " addr "\n"
|
||||
#define user_swr(reg, addr) "swr " reg ", " addr "\n"
|
||||
#define user_sh(reg, addr) "sh " reg ", " addr "\n"
|
||||
#define user_sb(reg, addr) "sb " reg ", " addr "\n"
|
||||
#define user_cache(op, base) kernel_cache(op, base)
|
||||
#define user_ll(reg, addr) kernel_ll(reg, addr)
|
||||
#define user_sc(reg, addr) kernel_sc(reg, addr)
|
||||
#define user_lw(reg, addr) kernel_lw(reg, addr)
|
||||
#define user_lwl(reg, addr) kernel_lwl(reg, addr)
|
||||
#define user_lwr(reg, addr) kernel_lwr(reg, addr)
|
||||
#define user_lh(reg, addr) kernel_lh(reg, addr)
|
||||
#define user_lb(reg, addr) kernel_lb(reg, addr)
|
||||
#define user_lbu(reg, addr) kernel_lbu(reg, addr)
|
||||
#define user_sw(reg, addr) kernel_sw(reg, addr)
|
||||
#define user_swl(reg, addr) kernel_swl(reg, addr)
|
||||
#define user_swr(reg, addr) kernel_swr(reg, addr)
|
||||
#define user_sh(reg, addr) kernel_sh(reg, addr)
|
||||
#define user_sb(reg, addr) kernel_sb(reg, addr)
|
||||
|
||||
#ifdef CONFIG_32BIT
|
||||
#define user_sd(reg, addr) kernel_sw(reg, addr)
|
||||
#define user_ld(reg, addr) kernel_lw(reg, addr)
|
||||
#else
|
||||
#define user_sd(reg, addr) kernel_sd(reg, addr)
|
||||
#define user_ld(reg, addr) kernel_ld(reg, addr)
|
||||
#endif /* CONFIG_32BIT */
|
||||
|
||||
#endif /* CONFIG_EVA */
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#define kernel_cache(op, base) cache op, base
|
||||
#define kernel_ll(reg, addr) ll reg, addr
|
||||
#define kernel_sc(reg, addr) sc reg, addr
|
||||
#define kernel_lw(reg, addr) lw reg, addr
|
||||
#define kernel_lwl(reg, addr) lwl reg, addr
|
||||
#define kernel_lwr(reg, addr) lwr reg, addr
|
||||
#define kernel_lh(reg, addr) lh reg, addr
|
||||
#define kernel_lb(reg, addr) lb reg, addr
|
||||
#define kernel_lbu(reg, addr) lbu reg, addr
|
||||
#define kernel_sw(reg, addr) sw reg, addr
|
||||
#define kernel_swl(reg, addr) swl reg, addr
|
||||
#define kernel_swr(reg, addr) swr reg, addr
|
||||
#define kernel_sh(reg, addr) sh reg, addr
|
||||
#define kernel_sb(reg, addr) sb reg, addr
|
||||
|
||||
#ifdef CONFIG_32BIT
|
||||
/*
|
||||
* No 'sd' or 'ld' instructions in 32-bit but the code will
|
||||
* do the correct thing
|
||||
*/
|
||||
#define user_sd(reg, addr) user_sw(reg, addr)
|
||||
#define user_ld(reg, addr) user_lw(reg, addr)
|
||||
#define kernel_sd(reg, addr) user_sw(reg, addr)
|
||||
#define kernel_ld(reg, addr) user_lw(reg, addr)
|
||||
#else
|
||||
#define user_sd(reg, addr) "sd " reg", " addr "\n"
|
||||
#define user_ld(reg, addr) "ld " reg", " addr "\n"
|
||||
#define kernel_sd(reg, addr) sd reg, addr
|
||||
#define kernel_ld(reg, addr) ld reg, addr
|
||||
#endif /* CONFIG_32BIT */
|
||||
|
||||
#endif /* CONFIG_EVA */
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_EVA
|
||||
|
||||
#define __BUILD_EVA_INSN(insn, reg, addr) \
|
||||
|
@ -101,31 +154,27 @@
|
|||
#define user_sd(reg, addr) user_sw(reg, addr)
|
||||
#else
|
||||
|
||||
#define user_cache(op, base) cache op, base
|
||||
#define user_ll(reg, addr) ll reg, addr
|
||||
#define user_sc(reg, addr) sc reg, addr
|
||||
#define user_lw(reg, addr) lw reg, addr
|
||||
#define user_lwl(reg, addr) lwl reg, addr
|
||||
#define user_lwr(reg, addr) lwr reg, addr
|
||||
#define user_lh(reg, addr) lh reg, addr
|
||||
#define user_lb(reg, addr) lb reg, addr
|
||||
#define user_lbu(reg, addr) lbu reg, addr
|
||||
#define user_sw(reg, addr) sw reg, addr
|
||||
#define user_swl(reg, addr) swl reg, addr
|
||||
#define user_swr(reg, addr) swr reg, addr
|
||||
#define user_sh(reg, addr) sh reg, addr
|
||||
#define user_sb(reg, addr) sb reg, addr
|
||||
#define user_cache(op, base) kernel_cache(op, base)
|
||||
#define user_ll(reg, addr) kernel_ll(reg, addr)
|
||||
#define user_sc(reg, addr) kernel_sc(reg, addr)
|
||||
#define user_lw(reg, addr) kernel_lw(reg, addr)
|
||||
#define user_lwl(reg, addr) kernel_lwl(reg, addr)
|
||||
#define user_lwr(reg, addr) kernel_lwr(reg, addr)
|
||||
#define user_lh(reg, addr) kernel_lh(reg, addr)
|
||||
#define user_lb(reg, addr) kernel_lb(reg, addr)
|
||||
#define user_lbu(reg, addr) kernel_lbu(reg, addr)
|
||||
#define user_sw(reg, addr) kernel_sw(reg, addr)
|
||||
#define user_swl(reg, addr) kernel_swl(reg, addr)
|
||||
#define user_swr(reg, addr) kernel_swr(reg, addr)
|
||||
#define user_sh(reg, addr) kernel_sh(reg, addr)
|
||||
#define user_sb(reg, addr) kernel_sb(reg, addr)
|
||||
|
||||
#ifdef CONFIG_32BIT
|
||||
/*
|
||||
* No 'sd' or 'ld' instructions in 32-bit but the code will
|
||||
* do the correct thing
|
||||
*/
|
||||
#define user_sd(reg, addr) user_sw(reg, addr)
|
||||
#define user_ld(reg, addr) user_lw(reg, addr)
|
||||
#define user_sd(reg, addr) kernel_sw(reg, addr)
|
||||
#define user_ld(reg, addr) kernel_lw(reg, addr)
|
||||
#else
|
||||
#define user_sd(reg, addr) sd reg, addr
|
||||
#define user_ld(reg, addr) ld reg, addr
|
||||
#define user_sd(reg, addr) kernel_sd(reg, addr)
|
||||
#define user_ld(reg, addr) kernel_sd(reg, addr)
|
||||
#endif /* CONFIG_32BIT */
|
||||
|
||||
#endif /* CONFIG_EVA */
|
||||
|
|
Loading…
Reference in New Issue
Block a user