forked from luck/tmp_suning_uos_patched
KVM: use gfn_to_pfn_memslot in kvm_iommu_map_pages
So its possible to iommu map a memslot before making it visible to kvm. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
506f0d6f9c
commit
3ad26d8139
|
@ -440,8 +440,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
|
|||
#define KVM_IOMMU_CACHE_COHERENCY 0x1
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
|
||||
unsigned long npages);
|
||||
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
|
||||
int kvm_iommu_map_guest(struct kvm *kvm);
|
||||
int kvm_iommu_unmap_guest(struct kvm *kvm);
|
||||
int kvm_assign_device(struct kvm *kvm,
|
||||
|
|
|
@ -32,10 +32,10 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm);
|
|||
static void kvm_iommu_put_pages(struct kvm *kvm,
|
||||
gfn_t base_gfn, unsigned long npages);
|
||||
|
||||
int kvm_iommu_map_pages(struct kvm *kvm,
|
||||
gfn_t base_gfn, unsigned long npages)
|
||||
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
||||
{
|
||||
gfn_t gfn = base_gfn;
|
||||
gfn_t gfn = slot->base_gfn;
|
||||
unsigned long npages = slot->npages;
|
||||
pfn_t pfn;
|
||||
int i, r = 0;
|
||||
struct iommu_domain *domain = kvm->arch.iommu_domain;
|
||||
|
@ -54,7 +54,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
|
|||
if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
|
||||
continue;
|
||||
|
||||
pfn = gfn_to_pfn(kvm, gfn);
|
||||
pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
|
||||
r = iommu_map_range(domain,
|
||||
gfn_to_gpa(gfn),
|
||||
pfn_to_hpa(pfn),
|
||||
|
@ -69,7 +69,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
|
|||
return 0;
|
||||
|
||||
unmap_pages:
|
||||
kvm_iommu_put_pages(kvm, base_gfn, i);
|
||||
kvm_iommu_put_pages(kvm, slot->base_gfn, i);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -81,8 +81,7 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
|
|||
slots = kvm->memslots;
|
||||
|
||||
for (i = 0; i < slots->nmemslots; i++) {
|
||||
r = kvm_iommu_map_pages(kvm, slots->memslots[i].base_gfn,
|
||||
slots->memslots[i].npages);
|
||||
r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -684,7 +684,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
spin_unlock(&kvm->mmu_lock);
|
||||
#ifdef CONFIG_DMAR
|
||||
/* map the pages in iommu page table */
|
||||
r = kvm_iommu_map_pages(kvm, base_gfn, npages);
|
||||
r = kvm_iommu_map_pages(kvm, memslot);
|
||||
if (r)
|
||||
goto out;
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue
Block a user