| #include <linux/dma-mapping.h> |
| #include <linux/dmar.h> |
| |
| #include <asm/gart.h> |
| #include <asm/calgary.h> |
| |
| const struct dma_mapping_ops *dma_ops; |
| EXPORT_SYMBOL(dma_ops); |
| |
| #ifdef CONFIG_IOMMU_DEBUG |
| int panic_on_overflow __read_mostly = 1; |
| int force_iommu __read_mostly = 1; |
| #else |
| int panic_on_overflow __read_mostly = 0; |
| int force_iommu __read_mostly = 0; |
| #endif |
| |
| int dma_set_mask(struct device *dev, u64 mask) |
| { |
| if (!dev->dma_mask || !dma_supported(dev, mask)) |
| return -EIO; |
| |
| *dev->dma_mask = mask; |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(dma_set_mask); |
| |
| static int __init pci_iommu_init(void) |
| { |
| #ifdef CONFIG_CALGARY_IOMMU |
| calgary_iommu_init(); |
| #endif |
| |
| intel_iommu_init(); |
| |
| #ifdef CONFIG_GART_IOMMU |
| gart_iommu_init(); |
| #endif |
| |
| no_iommu_init(); |
| return 0; |
| } |
| |
| void pci_iommu_shutdown(void) |
| { |
| gart_iommu_shutdown(); |
| } |
| /* Must execute after PCI subsystem */ |
| fs_initcall(pci_iommu_init); |