KVM: s390: Get rid of ar_t

sparse with __CHECK_ENDIAN__ shows that ar_t was never properly
used across KVM on s390. We can now:
- fix all places
- do not make ar_t special
Since ar_t is just used as a register number (no endianness issues
for u8), and all other register numbers are also just plain int
variables, let's just use u8, which matches the __u8 in the userspace
ABI for the memop ioctl.

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Janosch Frank <frankja@linux.vnet.ibm.com>
Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
This commit is contained in:
Christian Borntraeger 2016-12-09 12:44:40 +01:00
parent d051ae5313
commit 27f67f8727
4 changed files with 31 additions and 33 deletions

View File

@ -373,7 +373,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
ipte_unlock_simple(vcpu); ipte_unlock_simple(vcpu);
} }
static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
enum gacc_mode mode) enum gacc_mode mode)
{ {
union alet alet; union alet alet;
@ -487,7 +487,7 @@ enum prot_type {
}; };
static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
ar_t ar, enum gacc_mode mode, enum prot_type prot) u8 ar, enum gacc_mode mode, enum prot_type prot)
{ {
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
struct trans_exc_code_bits *tec; struct trans_exc_code_bits *tec;
@ -545,7 +545,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
} }
static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
unsigned long ga, ar_t ar, enum gacc_mode mode) unsigned long ga, u8 ar, enum gacc_mode mode)
{ {
int rc; int rc;
struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
@ -777,7 +777,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
return 1; return 1;
} }
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
unsigned long *pages, unsigned long nr_pages, unsigned long *pages, unsigned long nr_pages,
const union asce asce, enum gacc_mode mode) const union asce asce, enum gacc_mode mode)
{ {
@ -809,7 +809,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
return 0; return 0;
} }
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len, enum gacc_mode mode) unsigned long len, enum gacc_mode mode)
{ {
psw_t *psw = &vcpu->arch.sie_block->gpsw; psw_t *psw = &vcpu->arch.sie_block->gpsw;
@ -883,7 +883,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* Note: The IPTE lock is not taken during this function, so the caller * Note: The IPTE lock is not taken during this function, so the caller
* has to take care of this. * has to take care of this.
*/ */
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long *gpa, enum gacc_mode mode) unsigned long *gpa, enum gacc_mode mode)
{ {
psw_t *psw = &vcpu->arch.sie_block->gpsw; psw_t *psw = &vcpu->arch.sie_block->gpsw;
@ -916,7 +916,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
/** /**
* check_gva_range - test a range of guest virtual addresses for accessibility * check_gva_range - test a range of guest virtual addresses for accessibility
*/ */
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode) unsigned long length, enum gacc_mode mode)
{ {
unsigned long gpa; unsigned long gpa;

View File

@ -162,11 +162,11 @@ enum gacc_mode {
}; };
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
ar_t ar, unsigned long *gpa, enum gacc_mode mode); u8 ar, unsigned long *gpa, enum gacc_mode mode);
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode); unsigned long length, enum gacc_mode mode);
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len, enum gacc_mode mode); unsigned long len, enum gacc_mode mode);
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
@ -218,7 +218,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* if data has been changed in guest space in case of an exception. * if data has been changed in guest space in case of an exception.
*/ */
static inline __must_check static inline __must_check
int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len) unsigned long len)
{ {
return access_guest(vcpu, ga, ar, data, len, GACC_STORE); return access_guest(vcpu, ga, ar, data, len, GACC_STORE);
@ -238,7 +238,7 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
* data will be copied from guest space to kernel space. * data will be copied from guest space to kernel space.
*/ */
static inline __must_check static inline __must_check
int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len) unsigned long len)
{ {
return access_guest(vcpu, ga, ar, data, len, GACC_FETCH); return access_guest(vcpu, ga, ar, data, len, GACC_FETCH);

View File

@ -86,9 +86,7 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
} }
typedef u8 __bitwise ar_t; static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
{ {
u32 base2 = vcpu->arch.sie_block->ipb >> 28; u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
@ -101,7 +99,7 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
u64 *address1, u64 *address2, u64 *address1, u64 *address2,
ar_t *ar_b1, ar_t *ar_b2) u8 *ar_b1, u8 *ar_b2)
{ {
u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
@ -125,7 +123,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
} }
static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar) static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
{ {
u32 base2 = vcpu->arch.sie_block->ipb >> 28; u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
@ -140,7 +138,7 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
} }
static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar) static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
{ {
u32 base2 = vcpu->arch.sie_block->ipb >> 28; u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);

View File

@ -54,7 +54,7 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
static int handle_set_clock(struct kvm_vcpu *vcpu) static int handle_set_clock(struct kvm_vcpu *vcpu)
{ {
int rc; int rc;
ar_t ar; u8 ar;
u64 op2, val; u64 op2, val;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@ -79,7 +79,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
u64 operand2; u64 operand2;
u32 address; u32 address;
int rc; int rc;
ar_t ar; u8 ar;
vcpu->stat.instruction_spx++; vcpu->stat.instruction_spx++;
@ -117,7 +117,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
u64 operand2; u64 operand2;
u32 address; u32 address;
int rc; int rc;
ar_t ar; u8 ar;
vcpu->stat.instruction_stpx++; vcpu->stat.instruction_stpx++;
@ -147,7 +147,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
u16 vcpu_id = vcpu->vcpu_id; u16 vcpu_id = vcpu->vcpu_id;
u64 ga; u64 ga;
int rc; int rc;
ar_t ar; u8 ar;
vcpu->stat.instruction_stap++; vcpu->stat.instruction_stap++;
@ -380,7 +380,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
u32 tpi_data[3]; u32 tpi_data[3];
int rc; int rc;
u64 addr; u64 addr;
ar_t ar; u8 ar;
addr = kvm_s390_get_base_disp_s(vcpu, &ar); addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 3) if (addr & 3)
@ -548,7 +548,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
psw_compat_t new_psw; psw_compat_t new_psw;
u64 addr; u64 addr;
int rc; int rc;
ar_t ar; u8 ar;
if (gpsw->mask & PSW_MASK_PSTATE) if (gpsw->mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@ -575,7 +575,7 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
psw_t new_psw; psw_t new_psw;
u64 addr; u64 addr;
int rc; int rc;
ar_t ar; u8 ar;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@ -597,7 +597,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
u64 stidp_data = vcpu->kvm->arch.model.cpuid; u64 stidp_data = vcpu->kvm->arch.model.cpuid;
u64 operand2; u64 operand2;
int rc; int rc;
ar_t ar; u8 ar;
vcpu->stat.instruction_stidp++; vcpu->stat.instruction_stidp++;
@ -644,7 +644,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
ASCEBC(mem->vm[0].cpi, 16); ASCEBC(mem->vm[0].cpi, 16);
} }
static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
u8 fc, u8 sel1, u16 sel2) u8 fc, u8 sel1, u16 sel2)
{ {
vcpu->run->exit_reason = KVM_EXIT_S390_STSI; vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
@ -663,7 +663,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
unsigned long mem = 0; unsigned long mem = 0;
u64 operand2; u64 operand2;
int rc = 0; int rc = 0;
ar_t ar; u8 ar;
vcpu->stat.instruction_stsi++; vcpu->stat.instruction_stsi++;
VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
@ -970,7 +970,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs; int reg, rc, nr_regs;
u32 ctl_array[16]; u32 ctl_array[16];
u64 ga; u64 ga;
ar_t ar; u8 ar;
vcpu->stat.instruction_lctl++; vcpu->stat.instruction_lctl++;
@ -1009,7 +1009,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs; int reg, rc, nr_regs;
u32 ctl_array[16]; u32 ctl_array[16];
u64 ga; u64 ga;
ar_t ar; u8 ar;
vcpu->stat.instruction_stctl++; vcpu->stat.instruction_stctl++;
@ -1043,7 +1043,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs; int reg, rc, nr_regs;
u64 ctl_array[16]; u64 ctl_array[16];
u64 ga; u64 ga;
ar_t ar; u8 ar;
vcpu->stat.instruction_lctlg++; vcpu->stat.instruction_lctlg++;
@ -1081,7 +1081,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs; int reg, rc, nr_regs;
u64 ctl_array[16]; u64 ctl_array[16];
u64 ga; u64 ga;
ar_t ar; u8 ar;
vcpu->stat.instruction_stctg++; vcpu->stat.instruction_stctg++;
@ -1132,7 +1132,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
unsigned long hva, gpa; unsigned long hva, gpa;
int ret = 0, cc = 0; int ret = 0, cc = 0;
bool writable; bool writable;
ar_t ar; u8 ar;
vcpu->stat.instruction_tprot++; vcpu->stat.instruction_tprot++;