summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig13
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c589
-rw-r--r--drivers/iommu/amd_iommu_init.c34
-rw-r--r--drivers/iommu/amd_iommu_proto.h2
-rw-r--r--drivers/iommu/amd_iommu_types.h11
-rw-r--r--drivers/iommu/arm-smmu-v3.c2670
-rw-r--r--drivers/iommu/arm-smmu.c23
-rw-r--r--drivers/iommu/exynos-iommu.c527
-rw-r--r--drivers/iommu/iommu.c373
-rw-r--r--drivers/iommu/iova.c4
-rw-r--r--drivers/iommu/rockchip-iommu.c27
12 files changed, 3529 insertions, 745 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 1ae4e547b419..40f37a2b4a8a 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -339,6 +339,7 @@ config SPAPR_TCE_IOMMU
Enables bits of IOMMU API required by VFIO. The iommu_ops
is not implemented as it is not necessary for VFIO.
+# ARM IOMMU support
config ARM_SMMU
bool "ARM Ltd. System MMU (SMMU) Support"
depends on (ARM64 || ARM) && MMU
@@ -352,4 +353,16 @@ config ARM_SMMU
Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture.
+config ARM_SMMU_V3
+ bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
+ depends on ARM64 && PCI
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ help
+ Support for implementations of the ARM System MMU architecture
+ version 3 providing translation support to a PCIe root complex.
+
+ Say Y here if your system includes an IOMMU device implementing
+ the ARM SMMUv3 architecture.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 080ffab4ed1c..c6dcc513d711 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
+obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index e1c7e9e51045..c5677ed2cd89 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -64,10 +64,6 @@
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
-/* A list of preallocated protection domains */
-static LIST_HEAD(iommu_pd_list);
-static DEFINE_SPINLOCK(iommu_pd_list_lock);
-
/* List of all available dev_data structures */
static LIST_HEAD(dev_data_list);
static DEFINE_SPINLOCK(dev_data_list_lock);
@@ -119,7 +115,7 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
-static int __init alloc_passthrough_domain(void);
+static int alloc_passthrough_domain(void);
/****************************************************************************
*
@@ -234,31 +230,38 @@ static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
}
/*
- * In this function the list of preallocated protection domains is traversed to
- * find the domain for a specific device
+ * This function actually applies the mapping to the page table of the
+ * dma_ops domain.
*/
-static struct dma_ops_domain *find_protection_domain(u16 devid)
+static void alloc_unity_mapping(struct dma_ops_domain *dma_dom,
+ struct unity_map_entry *e)
{
- struct dma_ops_domain *entry, *ret = NULL;
- unsigned long flags;
- u16 alias = amd_iommu_alias_table[devid];
-
- if (list_empty(&iommu_pd_list))
- return NULL;
-
- spin_lock_irqsave(&iommu_pd_list_lock, flags);
+ u64 addr;
- list_for_each_entry(entry, &iommu_pd_list, list) {
- if (entry->target_dev == devid ||
- entry->target_dev == alias) {
- ret = entry;
- break;
- }
+ for (addr = e->address_start; addr < e->address_end;
+ addr += PAGE_SIZE) {
+ if (addr < dma_dom->aperture_size)
+ __set_bit(addr >> PAGE_SHIFT,
+ dma_dom->aperture[0]->bitmap);
}
+}
+
+/*
+ * Inits the unity mappings required for a specific device
+ */
+static void init_unity_mappings_for_device(struct device *dev,
+ struct dma_ops_domain *dma_dom)
+{
+ struct unity_map_entry *e;
+ u16 devid;
- spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+ devid = get_device_id(dev);
- return ret;
+ list_for_each_entry(e, &amd_iommu_unity_map, list) {
+ if (!(devid >= e->devid_start && devid <= e->devid_end))
+ continue;
+ alloc_unity_mapping(dma_dom, e);
+ }
}
/*
@@ -290,11 +293,23 @@ static bool check_device(struct device *dev)
static void init_iommu_group(struct device *dev)
{
+ struct dma_ops_domain *dma_domain;
+ struct iommu_domain *domain;
struct iommu_group *group;
group = iommu_group_get_for_dev(dev);
- if (!IS_ERR(group))
- iommu_group_put(group);
+ if (IS_ERR(group))
+ return;
+
+ domain = iommu_group_default_domain(group);
+ if (!domain)
+ goto out;
+
+ dma_domain = to_pdomain(domain)->priv;
+
+ init_unity_mappings_for_device(dev, dma_domain);
+out:
+ iommu_group_put(group);
}
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
@@ -434,64 +449,15 @@ static void iommu_uninit_device(struct device *dev)
/* Unlink from alias, it may change if another device is re-plugged */
dev_data->alias_data = NULL;
+ /* Remove dma-ops */
+ dev->archdata.dma_ops = NULL;
+
/*
* We keep dev_data around for unplugged devices and reuse it when the
* device is re-plugged - not doing so would introduce a ton of races.
*/
}
-void __init amd_iommu_uninit_devices(void)
-{
- struct iommu_dev_data *dev_data, *n;
- struct pci_dev *pdev = NULL;
-
- for_each_pci_dev(pdev) {
-
- if (!check_device(&pdev->dev))
- continue;
-
- iommu_uninit_device(&pdev->dev);
- }
-
- /* Free all of our dev_data structures */
- list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list)
- free_dev_data(dev_data);
-}
-
-int __init amd_iommu_init_devices(void)
-{
- struct pci_dev *pdev = NULL;
- int ret = 0;
-
- for_each_pci_dev(pdev) {
-
- if (!check_device(&pdev->dev))
- continue;
-
- ret = iommu_init_device(&pdev->dev);
- if (ret == -ENOTSUPP)
- iommu_ignore_device(&pdev->dev);
- else if (ret)
- goto out_free;
- }
-
- /*
- * Initialize IOMMU groups only after iommu_init_device() has
- * had a chance to populate any IVRS defined aliases.
- */
- for_each_pci_dev(pdev) {
- if (check_device(&pdev->dev))
- init_iommu_group(&pdev->dev);
- }
-
- return 0;
-
-out_free:
-
- amd_iommu_uninit_devices();
-
- return ret;
-}
#ifdef CONFIG_AMD_IOMMU_STATS
/*
@@ -1463,94 +1429,6 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
return unmapped;
}
-/*
- * This function checks if a specific unity mapping entry is needed for
- * this specific IOMMU.
- */
-static int iommu_for_unity_map(struct amd_iommu *iommu,
- struct unity_map_entry *entry)
-{
- u16 bdf, i;
-
- for (i = entry->devid_start; i <= entry->devid_end; ++i) {
- bdf = amd_iommu_alias_table[i];
- if (amd_iommu_rlookup_table[bdf] == iommu)
- return 1;
- }
-
- return 0;
-}
-
-/*
- * This function actually applies the mapping to the page table of the
- * dma_ops domain.
- */
-static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
- struct unity_map_entry *e)
-{
- u64 addr;
- int ret;
-
- for (addr = e->address_start; addr < e->address_end;
- addr += PAGE_SIZE) {
- ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
- PAGE_SIZE);
- if (ret)
- return ret;
- /*
- * if unity mapping is in aperture range mark the page
- * as allocated in the aperture
- */
- if (addr < dma_dom->aperture_size)
- __set_bit(addr >> PAGE_SHIFT,
- dma_dom->aperture[0]->bitmap);
- }
-
- return 0;
-}
-
-/*
- * Init the unity mappings for a specific IOMMU in the system
- *
- * Basically iterates over all unity mapping entries and applies them to
- * the default domain DMA of that IOMMU if necessary.
- */
-static int iommu_init_unity_mappings(struct amd_iommu *iommu)
-{
- struct unity_map_entry *entry;
- int ret;
-
- list_for_each_entry(entry, &amd_iommu_unity_map, list) {
- if (!iommu_for_unity_map(iommu, entry))
- continue;
- ret = dma_ops_unity_map(iommu->default_dom, entry);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Inits the unity mappings required for a specific device
- */
-static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
- u16 devid)
-{
- struct unity_map_entry *e;
- int ret;
-
- list_for_each_entry(e, &amd_iommu_unity_map, list) {
- if (!(devid >= e->devid_start && devid <= e->devid_end))
- continue;
- ret = dma_ops_unity_map(dma_dom, e);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
/****************************************************************************
*
* The next functions belong to the address allocator for the dma_ops
@@ -1704,14 +1582,16 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
int i = start >> APERTURE_RANGE_SHIFT;
- unsigned long boundary_size;
+ unsigned long boundary_size, mask;
unsigned long address = -1;
unsigned long limit;
next_bit >>= PAGE_SHIFT;
- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- PAGE_SIZE) >> PAGE_SHIFT;
+ mask = dma_get_seg_boundary(dev);
+
+ boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
+ 1UL << (BITS_PER_LONG - PAGE_SHIFT);
for (;i < max_index; ++i) {
unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
@@ -1869,9 +1749,15 @@ static void free_pt_##LVL (unsigned long __pt) \
pt = (u64 *)__pt; \
\
for (i = 0; i < 512; ++i) { \
+ /* PTE present? */ \
if (!IOMMU_PTE_PRESENT(pt[i])) \
continue; \
\
+ /* Large PTE? */ \
+ if (PM_PTE_LEVEL(pt[i]) == 0 || \
+ PM_PTE_LEVEL(pt[i]) == 7) \
+ continue; \
+ \
p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
FN(p); \
} \
@@ -2008,7 +1894,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
goto free_dma_dom;
dma_dom->need_flush = false;
- dma_dom->target_dev = 0xffff;
add_domain_to_list(&dma_dom->domain);
@@ -2373,110 +2258,67 @@ static void detach_device(struct device *dev)
dev_data->ats.enabled = false;
}
-/*
- * Find out the protection domain structure for a given PCI device. This
- * will give us the pointer to the page table root for example.
- */
-static struct protection_domain *domain_for_device(struct device *dev)
-{
- struct iommu_dev_data *dev_data;
- struct protection_domain *dom = NULL;
- unsigned long flags;
-
- dev_data = get_dev_data(dev);
-
- if (dev_data->domain)
- return dev_data->domain;
-
- if (dev_data->alias_data != NULL) {
- struct iommu_dev_data *alias_data = dev_data->alias_data;
-
- read_lock_irqsave(&amd_iommu_devtable_lock, flags);
- if (alias_data->domain != NULL) {
- __attach_device(dev_data, alias_data->domain);
- dom = alias_data->domain;
- }
- read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
- }
-
- return dom;
-}
-
-static int device_change_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+static int amd_iommu_add_device(struct device *dev)
{
- struct dma_ops_domain *dma_domain;
- struct protection_domain *domain;
struct iommu_dev_data *dev_data;
- struct device *dev = data;
+ struct iommu_domain *domain;
struct amd_iommu *iommu;
- unsigned long flags;
u16 devid;
+ int ret;
- if (!check_device(dev))
+ if (!check_device(dev) || get_dev_data(dev))
return 0;
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
- dev_data = get_dev_data(dev);
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
+ devid = get_device_id(dev);
+ iommu = amd_iommu_rlookup_table[devid];
- iommu_init_device(dev);
- init_iommu_group(dev);
+ ret = iommu_init_device(dev);
+ if (ret) {
+ if (ret != -ENOTSUPP)
+ pr_err("Failed to initialize device %s - trying to proceed anyway\n",
+ dev_name(dev));
- /*
- * dev_data is still NULL and
- * got initialized in iommu_init_device
- */
- dev_data = get_dev_data(dev);
+ iommu_ignore_device(dev);
+ dev->archdata.dma_ops = &nommu_dma_ops;
+ goto out;
+ }
+ init_iommu_group(dev);
- if (iommu_pass_through || dev_data->iommu_v2) {
- dev_data->passthrough = true;
- attach_device(dev, pt_domain);
- break;
- }
+ dev_data = get_dev_data(dev);
- domain = domain_for_device(dev);
+ BUG_ON(!dev_data);
- /* allocate a protection domain if a device is added */
- dma_domain = find_protection_domain(devid);
- if (!dma_domain) {
- dma_domain = dma_ops_domain_alloc();
- if (!dma_domain)
- goto out;
- dma_domain->target_dev = devid;
-
- spin_lock_irqsave(&iommu_pd_list_lock, flags);
- list_add_tail(&dma_domain->list, &iommu_pd_list);
- spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
- }
+ if (dev_data->iommu_v2)
+ iommu_request_dm_for_dev(dev);
+ /* Domains are initialized for this device - have a look what we ended up with */
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ dev_data->passthrough = true;
+ dev->archdata.dma_ops = &nommu_dma_ops;
+ } else {
dev->archdata.dma_ops = &amd_iommu_dma_ops;
-
- break;
- case BUS_NOTIFY_REMOVED_DEVICE:
-
- iommu_uninit_device(dev);
-
- default:
- goto out;
}
+out:
iommu_completion_wait(iommu);
-out:
return 0;
}
-static struct notifier_block device_nb = {
- .notifier_call = device_change_notifier,
-};
-
-void amd_iommu_init_notifier(void)
+static void amd_iommu_remove_device(struct device *dev)
{
- bus_register_notifier(&pci_bus_type, &device_nb);
+ struct amd_iommu *iommu;
+ u16 devid;
+
+ if (!check_device(dev))
+ return;
+
+ devid = get_device_id(dev);
+ iommu = amd_iommu_rlookup_table[devid];
+
+ iommu_uninit_device(dev);
+ iommu_completion_wait(iommu);
}
/*****************************************************************************
@@ -2495,28 +2337,20 @@ void amd_iommu_init_notifier(void)
static struct protection_domain *get_domain(struct device *dev)
{
struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- u16 devid = get_device_id(dev);
+ struct iommu_domain *io_domain;
if (!check_device(dev))
return ERR_PTR(-EINVAL);
- domain = domain_for_device(dev);
- if (domain != NULL && !dma_ops_domain(domain))
- return ERR_PTR(-EBUSY);
-
- if (domain != NULL)
- return domain;
+ io_domain = iommu_get_domain_for_dev(dev);
+ if (!io_domain)
+ return NULL;
- /* Device not bound yet - bind it */
- dma_dom = find_protection_domain(devid);
- if (!dma_dom)
- dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
- attach_device(dev, &dma_dom->domain);
- DUMP_printk("Using protection domain %d for device %s\n",
- dma_dom->domain.id, dev_name(dev));
+ domain = to_pdomain(io_domain);
+ if (!dma_ops_domain(domain))
+ return ERR_PTR(-EBUSY);
- return &dma_dom->domain;
+ return domain;
}
static void update_device_table(struct protection_domain *domain)
@@ -3012,54 +2846,6 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
return check_device(dev);
}
-/*
- * The function for pre-allocating protection domains.
- *
- * If the driver core informs the DMA layer if a driver grabs a device
- * we don't need to preallocate the protection domains anymore.
- * For now we have to.
- */
-static void __init prealloc_protection_domains(void)
-{
- struct iommu_dev_data *dev_data;
- struct dma_ops_domain *dma_dom;
- struct pci_dev *dev = NULL;
- u16 devid;
-
- for_each_pci_dev(dev) {
-
- /* Do we handle this device? */
- if (!check_device(&dev->dev))
- continue;
-
- dev_data = get_dev_data(&dev->dev);
- if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
- /* Make sure passthrough domain is allocated */
- alloc_passthrough_domain();
- dev_data->passthrough = true;
- attach_device(&dev->dev, pt_domain);
- pr_info("AMD-Vi: Using passthrough domain for device %s\n",
- dev_name(&dev->dev));
- }
-
- /* Is there already any domain for it? */
- if (domain_for_device(&dev->dev))
- continue;
-
- devid = get_device_id(&dev->dev);
-
- dma_dom = dma_ops_domain_alloc();
- if (!dma_dom)
- continue;
- init_unity_mappings_for_device(dma_dom, devid);
- dma_dom->target_dev = devid;
-
- attach_device(&dev->dev, &dma_dom->domain);
-
- list_add_tail(&dma_dom->list, &iommu_pd_list);
- }
-}
-
static struct dma_map_ops amd_iommu_dma_ops = {
.alloc = alloc_coherent,
.free = free_coherent,
@@ -3070,76 +2856,16 @@ static struct dma_map_ops amd_iommu_dma_ops = {
.dma_supported = amd_iommu_dma_supported,
};
-static unsigned device_dma_ops_init(void)
-{
- struct iommu_dev_data *dev_data;
- struct pci_dev *pdev = NULL;
- unsigned unhandled = 0;
-
- for_each_pci_dev(pdev) {
- if (!check_device(&pdev->dev)) {
-
- iommu_ignore_device(&pdev->dev);
-
- unhandled += 1;
- continue;
- }
-
- dev_data = get_dev_data(&pdev->dev);
-
- if (!dev_data->passthrough)
- pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
- else
- pdev->dev.archdata.dma_ops = &nommu_dma_ops;
- }
-
- return unhandled;
-}
-
-/*
- * The function which clues the AMD IOMMU driver into dma_ops.
- */
-
-void __init amd_iommu_init_api(void)
+int __init amd_iommu_init_api(void)
{
- bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
+ return bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
}
int __init amd_iommu_init_dma_ops(void)
{
- struct amd_iommu *iommu;
- int ret, unhandled;
-
- /*
- * first allocate a default protection domain for every IOMMU we
- * found in the system. Devices not assigned to any other
- * protection domain will be assigned to the default one.
- */
- for_each_iommu(iommu) {
- iommu->default_dom = dma_ops_domain_alloc();
- if (iommu->default_dom == NULL)
- return -ENOMEM;
- iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
- ret = iommu_init_unity_mappings(iommu);
- if (ret)
- goto free_domains;
- }
-
- /*
- * Pre-allocate the protection domains for each device.
- */
- prealloc_protection_domains();
-
iommu_detected = 1;
swiotlb = 0;
- /* Make the driver finally visible to the drivers */
- unhandled = device_dma_ops_init();
- if (unhandled && max_pfn > MAX_DMA32_PFN) {
- /* There are unhandled devices - initialize swiotlb for them */
- swiotlb = 1;
- }
-
amd_iommu_stats_init();
if (amd_iommu_unmap_flush)
@@ -3148,14 +2874,6 @@ int __init amd_iommu_init_dma_ops(void)
pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
return 0;
-
-free_domains:
-
- for_each_iommu(iommu) {
- dma_ops_domain_free(iommu->default_dom);
- }
-
- return ret;
}
/*****************************************************************************
@@ -3222,7 +2940,7 @@ out_err:
return NULL;
}
-static int __init alloc_passthrough_domain(void)
+static int alloc_passthrough_domain(void)
{
if (pt_domain != NULL)
return 0;
@@ -3240,30 +2958,46 @@ static int __init alloc_passthrough_domain(void)
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *pdomain;
+ struct dma_ops_domain *dma_domain;
- /* We only support unmanaged domains for now */
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
- pdomain = protection_domain_alloc();
- if (!pdomain)
- goto out_free;
+ switch (type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ pdomain = protection_domain_alloc();
+ if (!pdomain)
+ return NULL;
- pdomain->mode = PAGE_MODE_3_LEVEL;
- pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!pdomain->pt_root)
- goto out_free;
+ pdomain->mode = PAGE_MODE_3_LEVEL;
+ pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pdomain->pt_root) {
+ protection_domain_free(pdomain);
+ return NULL;
+ }
- pdomain->domain.geometry.aperture_start = 0;
- pdomain->domain.geometry.aperture_end = ~0ULL;
- pdomain->domain.geometry.force_aperture = true;
+ pdomain->domain.geometry.aperture_start = 0;
+ pdomain->domain.geometry.aperture_end = ~0ULL;
+ pdomain->domain.geometry.force_aperture = true;
- return &pdomain->domain;
+ break;
+ case IOMMU_DOMAIN_DMA:
+ dma_domain = dma_ops_domain_alloc();
+ if (!dma_domain) {
+ pr_err("AMD-Vi: Failed to allocate\n");
+ return NULL;
+ }
+ pdomain = &dma_domain->domain;
+ break;
+ case IOMMU_DOMAIN_IDENTITY:
+ pdomain = protection_domain_alloc();
+ if (!pdomain)
+ return NULL;
-out_free:
- protection_domain_free(pdomain);
+ pdomain->mode = PAGE_MODE_NONE;
+ break;
+ default:
+ return NULL;
+ }
- return NULL;
+ return &pdomain->domain;
}
static void amd_iommu_domain_free(struct iommu_domain *dom)
@@ -3413,6 +3147,47 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return false;
}
+static void amd_iommu_get_dm_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct unity_map_entry *entry;
+ u16 devid;
+
+ devid = get_device_id(dev);
+
+ list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+ struct iommu_dm_region *region;
+
+ if (devid < entry->devid_start || devid > entry->devid_end)
+ continue;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ pr_err("Out of memory allocating dm-regions for %s\n",
+ dev_name(dev));
+ return;
+ }
+
+ region->start = entry->address_start;
+ region->length = entry->address_end - entry->address_start;
+ if (entry->prot & IOMMU_PROT_IR)
+ region->prot |= IOMMU_READ;
+ if (entry->prot & IOMMU_PROT_IW)
+ region->prot |= IOMMU_WRITE;
+
+ list_add_tail(&region->list, head);
+ }
+}
+
+static void amd_iommu_put_dm_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_dm_region *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, list)
+ kfree(entry);
+}
+
static const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
@@ -3423,6 +3198,10 @@ static const struct iommu_ops amd_iommu_ops = {
.unmap = amd_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = amd_iommu_iova_to_phys,
+ .add_device = amd_iommu_add_device,
+ .remove_device = amd_iommu_remove_device,
+ .get_dm_regions = amd_iommu_get_dm_regions,
+ .put_dm_regions = amd_iommu_put_dm_regions,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 450ef5001a65..dbac49cea7a1 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -226,6 +226,7 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
+static void init_device_table_dma(void);
static inline void update_last_devid(u16 devid)
{
@@ -1385,9 +1386,15 @@ static int __init amd_iommu_init_pci(void)
break;
}
- ret = amd_iommu_init_devices();
+ init_device_table_dma();
+
+ for_each_iommu(iommu)
+ iommu_flush_all_caches(iommu);
+
+ ret = amd_iommu_init_api();
- print_iommu_info();
+ if (!ret)
+ print_iommu_info();
return ret;
}
@@ -1825,8 +1832,6 @@ static bool __init check_ioapic_information(void)
static void __init free_dma_resources(void)
{
- amd_iommu_uninit_devices();
-
free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
get_order(MAX_DOMAIN_ID/8));
@@ -2019,27 +2024,10 @@ static bool detect_ivrs(void)
static int amd_iommu_init_dma(void)
{
- struct amd_iommu *iommu;
- int ret;
-
if (iommu_pass_through)
- ret = amd_iommu_init_passthrough();
+ return amd_iommu_init_passthrough();
else
- ret = amd_iommu_init_dma_ops();
-
- if (ret)
- return ret;
-
- init_device_table_dma();
-
- for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
-
- amd_iommu_init_api();
-
- amd_iommu_init_notifier();
-
- return 0;
+ return amd_iommu_init_dma_ops();
}
/****************************************************************************
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 72b0fd455e24..9ed1c4330551 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -30,7 +30,7 @@ extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
-extern void amd_iommu_init_api(void);
+extern int amd_iommu_init_api(void);
/* Needed for interrupt remapping */
extern int amd_iommu_prepare(void);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 05030e523771..bb56560ac6ca 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -446,8 +446,6 @@ struct aperture_range {
* Data container for a dma_ops specific protection domain
*/
struct dma_ops_domain {
- struct list_head list;
-
/* generic protection domain information */
struct protection_domain domain;
@@ -462,12 +460,6 @@ struct dma_ops_domain {
/* This will be set to true when TLB needs to be flushed */
bool need_flush;
-
- /*
- * if this is a preallocated domain, keep the device for which it was
- * preallocated in this variable
- */
- u16 target_dev;
};
/*
@@ -552,9 +544,6 @@ struct amd_iommu {
/* if one, we need to send a completion wait command */
bool need_sync;
- /* default dma_ops domain for that IOMMU */
- struct dma_ops_domain *default_dom;
-
/* IOMMU sysfs device */
struct device *iommu_dev;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
new file mode 100644
index 000000000000..f14130121298
--- /dev/null
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -0,0 +1,2670 @@
+/*
+ * IOMMU API for ARM architected SMMUv3 implementations.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This driver is powered by bad coffee and bombay mix.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "io-pgtable.h"
+
+/* MMIO registers */
+#define ARM_SMMU_IDR0 0x0
+#define IDR0_ST_LVL_SHIFT 27
+#define IDR0_ST_LVL_MASK 0x3
+#define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
+#define IDR0_STALL_MODEL (3 << 24)
+#define IDR0_TTENDIAN_SHIFT 21
+#define IDR0_TTENDIAN_MASK 0x3
+#define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
+#define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
+#define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
+#define IDR0_CD2L (1 << 19)
+#define IDR0_VMID16 (1 << 18)
+#define IDR0_PRI (1 << 16)
+#define IDR0_SEV (1 << 14)
+#define IDR0_MSI (1 << 13)
+#define IDR0_ASID16 (1 << 12)
+#define IDR0_ATS (1 << 10)
+#define IDR0_HYP (1 << 9)
+#define IDR0_COHACC (1 << 4)
+#define IDR0_TTF_SHIFT 2
+#define IDR0_TTF_MASK 0x3
+#define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
+#define IDR0_S1P (1 << 1)
+#define IDR0_S2P (1 << 0)
+
+#define ARM_SMMU_IDR1 0x4
+#define IDR1_TABLES_PRESET (1 << 30)
+#define IDR1_QUEUES_PRESET (1 << 29)
+#define IDR1_REL (1 << 28)
+#define IDR1_CMDQ_SHIFT 21
+#define IDR1_CMDQ_MASK 0x1f
+#define IDR1_EVTQ_SHIFT 16
+#define IDR1_EVTQ_MASK 0x1f
+#define IDR1_PRIQ_SHIFT 11
+#define IDR1_PRIQ_MASK 0x1f
+#define IDR1_SSID_SHIFT 6
+#define IDR1_SSID_MASK 0x1f
+#define IDR1_SID_SHIFT 0
+#define IDR1_SID_MASK 0x3f
+
+#define ARM_SMMU_IDR5 0x14
+#define IDR5_STALL_MAX_SHIFT 16
+#define IDR5_STALL_MAX_MASK 0xffff
+#define IDR5_GRAN64K (1 << 6)
+#define IDR5_GRAN16K (1 << 5)
+#define IDR5_GRAN4K (1 << 4)
+#define IDR5_OAS_SHIFT 0
+#define IDR5_OAS_MASK 0x7
+#define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
+#define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
+#define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
+#define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
+#define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
+#define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
+
+#define ARM_SMMU_CR0 0x20
+#define CR0_CMDQEN (1 << 3)
+#define CR0_EVTQEN (1 << 2)
+#define CR0_PRIQEN (1 << 1)
+#define CR0_SMMUEN (1 << 0)
+
+#define ARM_SMMU_CR0ACK 0x24
+
+#define ARM_SMMU_CR1 0x28
+#define CR1_SH_NSH 0
+#define CR1_SH_OSH 2
+#define CR1_SH_ISH 3
+#define CR1_CACHE_NC 0
+#define CR1_CACHE_WB 1
+#define CR1_CACHE_WT 2
+#define CR1_TABLE_SH_SHIFT 10
+#define CR1_TABLE_OC_SHIFT 8
+#define CR1_TABLE_IC_SHIFT 6
+#define CR1_QUEUE_SH_SHIFT 4
+#define CR1_QUEUE_OC_SHIFT 2
+#define CR1_QUEUE_IC_SHIFT 0
+
+#define ARM_SMMU_CR2 0x2c
+#define CR2_PTM (1 << 2)
+#define CR2_RECINVSID (1 << 1)
+#define CR2_E2H (1 << 0)
+
+#define ARM_SMMU_IRQ_CTRL 0x50
+#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
+#define IRQ_CTRL_GERROR_IRQEN (1 << 0)
+
+#define ARM_SMMU_IRQ_CTRLACK 0x54
+
+#define ARM_SMMU_GERROR 0x60
+#define GERROR_SFM_ERR (1 << 8)
+#define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
+#define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
+#define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
+#define GERROR_MSI_CMDQ_ABT_ER