KVM: use the new intel iommu APIs
intel iommu APIs are updated, use the new APIs.
In addition, change kvm_iommu_map_guest() to just create the domain, let kvm_iommu_assign_device() assign device.
Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index eafabd5..c96739b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -330,9 +330,10 @@
#ifdef CONFIG_DMAR
int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
unsigned long npages);
-int kvm_iommu_map_guest(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev);
+int kvm_iommu_map_guest(struct kvm *kvm);
int kvm_iommu_unmap_guest(struct kvm *kvm);
+int kvm_assign_device(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel *assigned_dev);
#else /* CONFIG_DMAR */
static inline int kvm_iommu_map_pages(struct kvm *kvm,
gfn_t base_gfn,
@@ -341,9 +342,7 @@
return 0;
}
-static inline int kvm_iommu_map_guest(struct kvm *kvm,
- struct kvm_assigned_dev_kernel
- *assigned_dev)
+static inline int kvm_iommu_map_guest(struct kvm *kvm)
{
return -ENODEV;
}
@@ -352,6 +351,12 @@
{
return 0;
}
+
+static inline int kvm_assign_device(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel *assigned_dev)
+{
+ return 0;
+}
#endif /* CONFIG_DMAR */
static inline void kvm_guest_enter(void)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index fc6127c..c92b634 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -503,7 +503,12 @@
list_add(&match->list, &kvm->arch.assigned_dev_head);
if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
- r = kvm_iommu_map_guest(kvm, match);
+ if (!kvm->arch.intel_iommu_domain) {
+ r = kvm_iommu_map_guest(kvm);
+ if (r)
+ goto out_list_del;
+ }
+ r = kvm_assign_device(kvm, match);
if (r)
goto out_list_del;
}
diff --git a/virt/kvm/vtd.c b/virt/kvm/vtd.c
index a770874..44bb58a 100644
--- a/virt/kvm/vtd.c
+++ b/virt/kvm/vtd.c
@@ -45,20 +45,18 @@
for (i = 0; i < npages; i++) {
/* check if already mapped */
- pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
- gfn_to_gpa(gfn));
- if (pfn)
+ if (intel_iommu_iova_to_phys(domain,
+ gfn_to_gpa(gfn)))
continue;
pfn = gfn_to_pfn(kvm, gfn);
- r = intel_iommu_page_mapping(domain,
- gfn_to_gpa(gfn),
- pfn_to_hpa(pfn),
- PAGE_SIZE,
- DMA_PTE_READ |
- DMA_PTE_WRITE);
+ r = intel_iommu_map_address(domain,
+ gfn_to_gpa(gfn),
+ pfn_to_hpa(pfn),
+ PAGE_SIZE,
+ DMA_PTE_READ | DMA_PTE_WRITE);
if (r) {
- printk(KERN_ERR "kvm_iommu_map_pages:"
+ printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%lx\n", pfn);
goto unmap_pages;
}
@@ -86,10 +84,40 @@
return r;
}
-int kvm_iommu_map_guest(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
+int kvm_assign_device(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel *assigned_dev)
{
struct pci_dev *pdev = NULL;
+ struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
+ int r;
+
+ /* check if iommu exists and in use */
+ if (!domain)
+ return 0;
+
+ pdev = assigned_dev->dev;
+ if (pdev == NULL)
+ return -ENODEV;
+
+ r = intel_iommu_attach_device(domain, pdev);
+ if (r) {
+ printk(KERN_ERR "assign device %x:%x.%x failed",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+ return r;
+ }
+
+ printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n",
+ assigned_dev->host_busnr,
+ PCI_SLOT(assigned_dev->host_devfn),
+ PCI_FUNC(assigned_dev->host_devfn));
+
+ return 0;
+}
+
+int kvm_iommu_map_guest(struct kvm *kvm)
+{
int r;
if (!intel_iommu_found()) {
@@ -97,39 +125,14 @@
return -ENODEV;
}
- printk(KERN_DEBUG "VT-d direct map: host bdf = %x:%x:%x\n",
- assigned_dev->host_busnr,
- PCI_SLOT(assigned_dev->host_devfn),
- PCI_FUNC(assigned_dev->host_devfn));
-
- pdev = assigned_dev->dev;
-
- if (pdev == NULL) {
- if (kvm->arch.intel_iommu_domain) {
- intel_iommu_domain_exit(kvm->arch.intel_iommu_domain);
- kvm->arch.intel_iommu_domain = NULL;
- }
- return -ENODEV;
- }
-
- kvm->arch.intel_iommu_domain = intel_iommu_domain_alloc(pdev);
+ kvm->arch.intel_iommu_domain = intel_iommu_alloc_domain();
if (!kvm->arch.intel_iommu_domain)
- return -ENODEV;
+ return -ENOMEM;
r = kvm_iommu_map_memslots(kvm);
if (r)
goto out_unmap;
- intel_iommu_detach_dev(kvm->arch.intel_iommu_domain,
- pdev->bus->number, pdev->devfn);
-
- r = intel_iommu_context_mapping(kvm->arch.intel_iommu_domain,
- pdev);
- if (r) {
- printk(KERN_ERR "Domain context map for %s failed",
- pci_name(pdev));
- goto out_unmap;
- }
return 0;
out_unmap:
@@ -138,19 +141,29 @@
}
static void kvm_iommu_put_pages(struct kvm *kvm,
- gfn_t base_gfn, unsigned long npages)
+ gfn_t base_gfn, unsigned long npages)
{
gfn_t gfn = base_gfn;
pfn_t pfn;
struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
- int i;
+ unsigned long i;
+ u64 phys;
+
+ /* check if iommu exists and in use */
+ if (!domain)
+ return;
for (i = 0; i < npages; i++) {
- pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
- gfn_to_gpa(gfn));
+ phys = intel_iommu_iova_to_phys(domain,
+ gfn_to_gpa(gfn));
+ pfn = phys >> PAGE_SHIFT;
kvm_release_pfn_clean(pfn);
gfn++;
}
+
+ intel_iommu_unmap_address(domain,
+ gfn_to_gpa(base_gfn),
+ PAGE_SIZE * npages);
}
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
@@ -182,10 +195,9 @@
PCI_FUNC(entry->host_devfn));
/* detach kvm dmar domain */
- intel_iommu_detach_dev(domain, entry->host_busnr,
- entry->host_devfn);
+ intel_iommu_detach_device(domain, entry->dev);
}
kvm_iommu_unmap_memslots(kvm);
- intel_iommu_domain_exit(domain);
+ intel_iommu_free_domain(domain);
return 0;
}