forked from luck/tmp_suning_uos_patched
KVM: Fix spelling in code comments
Fix spelling and typos (e.g., repeated words) in comments. Signed-off-by: Fuad Tabba <tabba@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20200401140310.29701-1-tabba@google.com
This commit is contained in:
parent
ce6f8f02f9
commit
656012c731
|
@ -455,9 +455,9 @@ void force_vm_exit(const cpumask_t *mask)
|
||||||
*
|
*
|
||||||
* The hardware supports a limited set of values with the value zero reserved
|
* The hardware supports a limited set of values with the value zero reserved
|
||||||
* for the host, so we check if an assigned value belongs to a previous
|
* for the host, so we check if an assigned value belongs to a previous
|
||||||
* generation, which which requires us to assign a new value. If we're the
|
* generation, which requires us to assign a new value. If we're the first to
|
||||||
* first to use a VMID for the new generation, we must flush necessary caches
|
* use a VMID for the new generation, we must flush necessary caches and TLBs
|
||||||
* and TLBs on all CPUs.
|
* on all CPUs.
|
||||||
*/
|
*/
|
||||||
static bool need_new_vmid_gen(struct kvm_vmid *vmid)
|
static bool need_new_vmid_gen(struct kvm_vmid *vmid)
|
||||||
{
|
{
|
||||||
|
|
|
@ -267,7 +267,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||||
/*
|
/*
|
||||||
* Vector lengths supported by the host can't currently be
|
* Vector lengths supported by the host can't currently be
|
||||||
* hidden from the guest individually: instead we can only set a
|
* hidden from the guest individually: instead we can only set a
|
||||||
* maxmium via ZCR_EL2.LEN. So, make sure the available vector
|
* maximum via ZCR_EL2.LEN. So, make sure the available vector
|
||||||
* lengths match the set requested exactly up to the requested
|
* lengths match the set requested exactly up to the requested
|
||||||
* maximum:
|
* maximum:
|
||||||
*/
|
*/
|
||||||
|
@ -337,7 +337,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
|
||||||
unsigned int reg_num;
|
unsigned int reg_num;
|
||||||
|
|
||||||
unsigned int reqoffset, reqlen; /* User-requested offset and length */
|
unsigned int reqoffset, reqlen; /* User-requested offset and length */
|
||||||
unsigned int maxlen; /* Maxmimum permitted length */
|
unsigned int maxlen; /* Maximum permitted length */
|
||||||
|
|
||||||
size_t sve_state_size;
|
size_t sve_state_size;
|
||||||
|
|
||||||
|
|
|
@ -577,7 +577,7 @@ static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The priority value is independent of any of the BPR values, so we
|
* The priority value is independent of any of the BPR values, so we
|
||||||
* normalize it using the minumal BPR value. This guarantees that no
|
* normalize it using the minimal BPR value. This guarantees that no
|
||||||
* matter what the guest does with its BPR, we can always set/get the
|
* matter what the guest does with its BPR, we can always set/get the
|
||||||
* same value of a priority.
|
* same value of a priority.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -131,7 +131,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No valid syndrome? Ask userspace for help if it has
|
* No valid syndrome? Ask userspace for help if it has
|
||||||
* voluntered to do so, and bail out otherwise.
|
* volunteered to do so, and bail out otherwise.
|
||||||
*/
|
*/
|
||||||
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
|
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
|
||||||
if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
|
if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
|
||||||
|
|
|
@ -784,7 +784,7 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
|
||||||
mutex_lock(&kvm_hyp_pgd_mutex);
|
mutex_lock(&kvm_hyp_pgd_mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This assumes that we we have enough space below the idmap
|
* This assumes that we have enough space below the idmap
|
||||||
* page to allocate our VAs. If not, the check below will
|
* page to allocate our VAs. If not, the check below will
|
||||||
* kick. A potential alternative would be to detect that
|
* kick. A potential alternative would be to detect that
|
||||||
* overflow and switch to an allocation above the idmap.
|
* overflow and switch to an allocation above the idmap.
|
||||||
|
@ -964,7 +964,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
|
||||||
* stage2_unmap_vm - Unmap Stage-2 RAM mappings
|
* stage2_unmap_vm - Unmap Stage-2 RAM mappings
|
||||||
* @kvm: The struct kvm pointer
|
* @kvm: The struct kvm pointer
|
||||||
*
|
*
|
||||||
* Go through the memregions and unmap any reguler RAM
|
* Go through the memregions and unmap any regular RAM
|
||||||
* backing memory already mapped to the VM.
|
* backing memory already mapped to the VM.
|
||||||
*/
|
*/
|
||||||
void stage2_unmap_vm(struct kvm *kvm)
|
void stage2_unmap_vm(struct kvm *kvm)
|
||||||
|
@ -2262,7 +2262,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* At this point memslot has been committed and there is an
|
* At this point memslot has been committed and there is an
|
||||||
* allocated dirty_bitmap[], dirty pages will be be tracked while the
|
* allocated dirty_bitmap[], dirty pages will be tracked while the
|
||||||
* memory slot is write protected.
|
* memory slot is write protected.
|
||||||
*/
|
*/
|
||||||
if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
|
if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
|
||||||
|
|
|
@ -94,7 +94,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NOTE: We always update r0 (or x0) because for PSCI v0.1
|
* NOTE: We always update r0 (or x0) because for PSCI v0.1
|
||||||
* the general puspose registers are undefined upon CPU_ON.
|
* the general purpose registers are undefined upon CPU_ON.
|
||||||
*/
|
*/
|
||||||
reset_state->r0 = smccc_get_arg3(source_vcpu);
|
reset_state->r0 = smccc_get_arg3(source_vcpu);
|
||||||
|
|
||||||
|
@ -265,10 +265,10 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||||
case PSCI_0_2_FN_SYSTEM_OFF:
|
case PSCI_0_2_FN_SYSTEM_OFF:
|
||||||
kvm_psci_system_off(vcpu);
|
kvm_psci_system_off(vcpu);
|
||||||
/*
|
/*
|
||||||
* We should'nt be going back to guest VCPU after
|
* We shouldn't be going back to guest VCPU after
|
||||||
* receiving SYSTEM_OFF request.
|
* receiving SYSTEM_OFF request.
|
||||||
*
|
*
|
||||||
* If user space accidently/deliberately resumes
|
* If user space accidentally/deliberately resumes
|
||||||
* guest VCPU after SYSTEM_OFF request then guest
|
* guest VCPU after SYSTEM_OFF request then guest
|
||||||
* VCPU should see internal failure from PSCI return
|
* VCPU should see internal failure from PSCI return
|
||||||
* value. To achieve this, we preload r0 (or x0) with
|
* value. To achieve this, we preload r0 (or x0) with
|
||||||
|
|
|
@ -163,7 +163,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
|
||||||
vl = vcpu->arch.sve_max_vl;
|
vl = vcpu->arch.sve_max_vl;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Resposibility for these properties is shared between
|
* Responsibility for these properties is shared between
|
||||||
* kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
|
* kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
|
||||||
* set_sve_vls(). Double-check here just to be sure:
|
* set_sve_vls(). Double-check here just to be sure:
|
||||||
*/
|
*/
|
||||||
|
@ -249,7 +249,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
|
||||||
* ioctl or as part of handling a request issued by another VCPU in the PSCI
|
* ioctl or as part of handling a request issued by another VCPU in the PSCI
|
||||||
* handling code. In the first case, the VCPU will not be loaded, and in the
|
* handling code. In the first case, the VCPU will not be loaded, and in the
|
||||||
* second case the VCPU will be loaded. Because this function operates purely
|
* second case the VCPU will be loaded. Because this function operates purely
|
||||||
* on the memory-backed valus of system registers, we want to do a full put if
|
* on the memory-backed values of system registers, we want to do a full put if
|
||||||
* we were loaded (handling a request) and load the values back at the end of
|
* we were loaded (handling a request) and load the values back at the end of
|
||||||
* the function. Otherwise we leave the state alone. In both cases, we
|
* the function. Otherwise we leave the state alone. In both cases, we
|
||||||
* disable preemption around the vcpu reset as we would otherwise race with
|
* disable preemption around the vcpu reset as we would otherwise race with
|
||||||
|
@ -357,7 +357,7 @@ void kvm_set_ipa_limit(void)
|
||||||
*
|
*
|
||||||
* So clamp the ipa limit further down to limit the number of levels.
|
* So clamp the ipa limit further down to limit the number of levels.
|
||||||
* Since we can concatenate upto 16 tables at entry level, we could
|
* Since we can concatenate upto 16 tables at entry level, we could
|
||||||
* go upto 4bits above the maximum VA addressible with the current
|
* go upto 4bits above the maximum VA addressable with the current
|
||||||
* number of levels.
|
* number of levels.
|
||||||
*/
|
*/
|
||||||
va_max = PGDIR_SHIFT + PAGE_SHIFT - 3;
|
va_max = PGDIR_SHIFT + PAGE_SHIFT - 3;
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All of this file is extremly similar to the ARM coproc.c, but the
|
* All of this file is extremely similar to the ARM coproc.c, but the
|
||||||
* types are different. My gut feeling is that it should be pretty
|
* types are different. My gut feeling is that it should be pretty
|
||||||
* easy to merge, but that would be an ABI breakage -- again. VFP
|
* easy to merge, but that would be an ABI breakage -- again. VFP
|
||||||
* would also need to be abstracted.
|
* would also need to be abstracted.
|
||||||
|
@ -118,8 +118,8 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
||||||
* entry to the guest but are only restored on vcpu_load.
|
* entry to the guest but are only restored on vcpu_load.
|
||||||
*
|
*
|
||||||
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
|
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
|
||||||
* should never be listed below, because the the MPIDR should only be
|
* should never be listed below, because the MPIDR should only be set
|
||||||
* set once, before running the VCPU, and never changed later.
|
* once, before running the VCPU, and never changed later.
|
||||||
*/
|
*/
|
||||||
switch (reg) {
|
switch (reg) {
|
||||||
case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;
|
case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;
|
||||||
|
|
|
@ -587,7 +587,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The ListRegs field is 5 bits, but there is a architectural
|
* The ListRegs field is 5 bits, but there is an architectural
|
||||||
* maximum of 16 list registers. Just ignore bit 4...
|
* maximum of 16 list registers. Just ignore bit 4...
|
||||||
*/
|
*/
|
||||||
kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
|
kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
|
||||||
|
|
|
@ -119,7 +119,7 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We're using this spinlock to sync access to the coalesced ring.
|
* We're using this spinlock to sync access to the coalesced ring.
|
||||||
* The list doesn't need it's own lock since device registration and
|
* The list doesn't need its own lock since device registration and
|
||||||
* unregistration should only happen when kvm->slots_lock is held.
|
* unregistration should only happen when kvm->slots_lock is held.
|
||||||
*/
|
*/
|
||||||
spin_lock_init(&kvm->ring_lock);
|
spin_lock_init(&kvm->ring_lock);
|
||||||
|
|
|
@ -116,7 +116,7 @@ irqfd_shutdown(struct work_struct *work)
|
||||||
struct kvm *kvm = irqfd->kvm;
|
struct kvm *kvm = irqfd->kvm;
|
||||||
u64 cnt;
|
u64 cnt;
|
||||||
|
|
||||||
/* Make sure irqfd has been initalized in assign path. */
|
/* Make sure irqfd has been initialized in assign path. */
|
||||||
synchronize_srcu(&kvm->irq_srcu);
|
synchronize_srcu(&kvm->irq_srcu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -2799,7 +2799,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
|
||||||
*
|
*
|
||||||
* (a) VCPU which has not done pl-exit or cpu relax intercepted recently
|
* (a) VCPU which has not done pl-exit or cpu relax intercepted recently
|
||||||
* (preempted lock holder), indicated by @in_spin_loop.
|
* (preempted lock holder), indicated by @in_spin_loop.
|
||||||
* Set at the beiginning and cleared at the end of interception/PLE handler.
|
* Set at the beginning and cleared at the end of interception/PLE handler.
|
||||||
*
|
*
|
||||||
* (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
|
* (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
|
||||||
* chance last time (mostly it has become eligible now since we have probably
|
* chance last time (mostly it has become eligible now since we have probably
|
||||||
|
|
Loading…
Reference in New Issue
Block a user