summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 07:23:49 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 07:23:49 +0200
commit23971bdffff5f7c904131dfb41c186711dc2c418 (patch)
tree0cd5e379932a72af47dba17f8958908e7dd029cd
parentc0fa2373f8cfed90437d8d7b17e0b1a84009a10a (diff)
parent09b5269a1b3d47525d7c25efeb16f5407ef82ea2 (diff)
Merge tag 'iommu-updates-v3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: "This pull-request includes: - change in the IOMMU-API to convert the former iommu_domain_capable function to just iommu_capable - various fixes in handling RMRR ranges for the VT-d driver (one fix requires a device driver core change which was acked by Greg KH) - the AMD IOMMU driver now assigns and deassigns complete alias groups to fix issues with devices using the wrong PCI request-id - MMU-401 support for the ARM SMMU driver - multi-master IOMMU group support for the ARM SMMU driver - various other small fixes all over the place" * tag 'iommu-updates-v3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (41 commits) iommu/vt-d: Work around broken RMRR firmware entries iommu/vt-d: Store bus information in RMRR PCI device path iommu/vt-d: Only remove domain when device is removed driver core: Add BUS_NOTIFY_REMOVED_DEVICE event iommu/amd: Fix devid mapping for ivrs_ioapic override iommu/irq_remapping: Fix the regression of hpet irq remapping iommu: Fix bus notifier breakage iommu/amd: Split init_iommu_group() from iommu_init_device() iommu: Rework iommu_group_get_for_pci_dev() iommu: Make of_device_id array const amd_iommu: do not dereference a NULL pointer address. iommu/omap: Remove omap_iommu unused owner field iommu: Remove iommu_domain_has_cap() API function IB/usnic: Convert to use new iommu_capable() API function vfio: Convert to use new iommu_capable() API function kvm: iommu: Convert to use new iommu_capable() API function iommu/tegra: Convert to iommu_capable() API function iommu/msm: Convert to iommu_capable() API function iommu/vt-d: Convert to iommu_capable() API function iommu/fsl: Convert to iommu_capable() API function ...
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.txt1
-rw-r--r--drivers/base/core.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/iommu/amd_iommu.c117
-rw-r--r--drivers/iommu/amd_iommu_init.c21
-rw-r--r--drivers/iommu/amd_iommu_types.h21
-rw-r--r--drivers/iommu/arm-smmu.c220
-rw-r--r--drivers/iommu/dmar.c25
-rw-r--r--drivers/iommu/exynos-iommu.c51
-rw-r--r--drivers/iommu/fsl_pamu_domain.c5
-rw-r--r--drivers/iommu/intel-iommu.c16
-rw-r--r--drivers/iommu/intel_irq_remapping.c7
-rw-r--r--drivers/iommu/iommu.c208
-rw-r--r--drivers/iommu/irq_remapping.c11
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--drivers/iommu/msm_iommu.c7
-rw-r--r--drivers/iommu/omap-iommu.c27
-rw-r--r--drivers/iommu/omap-iommu.h1
-rw-r--r--drivers/iommu/tegra-gart.c9
-rw-r--r--drivers/iommu/tegra-smmu.c9
-rw-r--r--drivers/vfio/vfio_iommu_type1.c4
-rw-r--r--include/linux/device.h11
-rw-r--r--include/linux/dmar.h8
-rw-r--r--include/linux/iommu.h25
-rw-r--r--virt/kvm/iommu.c6
25 files changed, 461 insertions, 356 deletions
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 2d0f7cd867ea..06760503a819 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -14,6 +14,7 @@ conditions.
"arm,smmu-v1"
"arm,smmu-v2"
"arm,mmu-400"
+ "arm,mmu-401"
"arm,mmu-500"
depending on the particular implementation and/or the
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 28b808c73e8e..14d162952c3b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1211,6 +1211,9 @@ void device_del(struct device *dev)
*/
if (platform_notify_remove)
platform_notify_remove(dev);
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_REMOVED_DEVICE, dev);
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
cleanup_device_parent(dev);
kobject_del(&dev->kobj);
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 801a1d6937e4..417de1f32960 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -507,7 +507,7 @@ int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
if (err)
goto out_free_dev;
- if (!iommu_domain_has_cap(pd->domain, IOMMU_CAP_CACHE_COHERENCY)) {
+ if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
usnic_err("IOMMU of %s does not support cache coherency\n",
dev_name(dev));
err = -EINVAL;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index ecb0109a5360..505a9adac2d5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -88,6 +88,27 @@ int amd_iommu_max_glx_val = -1;
static struct dma_map_ops amd_iommu_dma_ops;
/*
+ * This struct contains device specific data for the IOMMU
+ */
+struct iommu_dev_data {
+ struct list_head list; /* For domain->dev_list */
+ struct list_head dev_data_list; /* For global dev_data_list */
+ struct list_head alias_list; /* Link alias-groups together */
+ struct iommu_dev_data *alias_data;/* The alias dev_data */
+ struct protection_domain *domain; /* Domain the device is bound to */
+ u16 devid; /* PCI Device ID */
+ bool iommu_v2; /* Device can make use of IOMMUv2 */
+ bool passthrough; /* Default for device is pt_domain */
+ struct {
+ bool enabled;
+ int qdep;
+ } ats; /* ATS state */
+ bool pri_tlp; /* PASID TLB required for
+ PPR completions */
+ u32 errata; /* Bitmap for errata to apply */
+};
+
+/*
* general struct to manage commands send to an IOMMU
*/
struct iommu_cmd {
@@ -114,8 +135,9 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
if (!dev_data)
return NULL;
+ INIT_LIST_HEAD(&dev_data->alias_list);
+
dev_data->devid = devid;
- atomic_set(&dev_data->bind, 0);
spin_lock_irqsave(&dev_data_list_lock, flags);
list_add_tail(&dev_data->dev_data_list, &dev_data_list);
@@ -260,17 +282,13 @@ static bool check_device(struct device *dev)
return true;
}
-static int init_iommu_group(struct device *dev)
+static void init_iommu_group(struct device *dev)
{
struct iommu_group *group;
group = iommu_group_get_for_dev(dev);
-
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
- return 0;
+ if (!IS_ERR(group))
+ iommu_group_put(group);
}
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
@@ -340,7 +358,6 @@ static int iommu_init_device(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data;
u16 alias;
- int ret;
if (dev->archdata.iommu)
return 0;
@@ -362,12 +379,9 @@ static int iommu_init_device(struct device *dev)
return -ENOTSUPP;
}
dev_data->alias_data = alias_data;
- }
- ret = init_iommu_group(dev);
- if (ret) {
- free_dev_data(dev_data);
- return ret;
+ /* Add device to the alias_list */
+ list_add(&dev_data->alias_list, &alias_data->alias_list);
}
if (pci_iommuv2_capable(pdev)) {
@@ -455,6 +469,15 @@ int __init amd_iommu_init_devices(void)
goto out_free;
}
+ /*
+ * Initialize IOMMU groups only after iommu_init_device() has
+ * had a chance to populate any IVRS defined aliases.
+ */
+ for_each_pci_dev(pdev) {
+ if (check_device(&pdev->dev))
+ init_iommu_group(&pdev->dev);
+ }
+
return 0;
out_free:
@@ -1368,6 +1391,9 @@ static int iommu_map_page(struct protection_domain *dom,
count = PAGE_SIZE_PTE_COUNT(page_size);
pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
+ if (!pte)
+ return -ENOMEM;
+
for (i = 0; i < count; ++i)
if (IOMMU_PTE_PRESENT(pte[i]))
return -EBUSY;
@@ -2122,35 +2148,29 @@ static void do_detach(struct iommu_dev_data *dev_data)
static int __attach_device(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
+ struct iommu_dev_data *head, *entry;
int ret;
/* lock domain */
spin_lock(&domain->lock);
- if (dev_data->alias_data != NULL) {
- struct iommu_dev_data *alias_data = dev_data->alias_data;
+ head = dev_data;
- /* Some sanity checks */
- ret = -EBUSY;
- if (alias_data->domain != NULL &&
- alias_data->domain != domain)
- goto out_unlock;
+ if (head->alias_data != NULL)
+ head = head->alias_data;
- if (dev_data->domain != NULL &&
- dev_data->domain != domain)
- goto out_unlock;
+ /* Now we have the root of the alias group, if any */
- /* Do real assignment */
- if (alias_data->domain == NULL)
- do_attach(alias_data, domain);
-
- atomic_inc(&alias_data->bind);
- }
+ ret = -EBUSY;
+ if (head->domain != NULL)
+ goto out_unlock;
- if (dev_data->domain == NULL)
- do_attach(dev_data, domain);
+ /* Attach alias group root */
+ do_attach(head, domain);
- atomic_inc(&dev_data->bind);
+ /* Attach other devices in the alias group */
+ list_for_each_entry(entry, &head->alias_list, alias_list)
+ do_attach(entry, domain);
ret = 0;
@@ -2298,6 +2318,7 @@ static int attach_device(struct device *dev,
*/
static void __detach_device(struct iommu_dev_data *dev_data)
{
+ struct iommu_dev_data *head, *entry;
struct protection_domain *domain;
unsigned long flags;
@@ -2307,15 +2328,14 @@ static void __detach_device(struct iommu_dev_data *dev_data)
spin_lock_irqsave(&domain->lock, flags);
- if (dev_data->alias_data != NULL) {
- struct iommu_dev_data *alias_data = dev_data->alias_data;
+ head = dev_data;
+ if (head->alias_data != NULL)
+ head = head->alias_data;
- if (atomic_dec_and_test(&alias_data->bind))
- do_detach(alias_data);
- }
+ list_for_each_entry(entry, &head->alias_list, alias_list)
+ do_detach(entry);
- if (atomic_dec_and_test(&dev_data->bind))
- do_detach(dev_data);
+ do_detach(head);
spin_unlock_irqrestore(&domain->lock, flags);
@@ -2415,6 +2435,7 @@ static int device_change_notifier(struct notifier_block *nb,
case BUS_NOTIFY_ADD_DEVICE:
iommu_init_device(dev);
+ init_iommu_group(dev);
/*
* dev_data is still NULL and
@@ -3158,7 +3179,6 @@ static void cleanup_domain(struct protection_domain *domain)
entry = list_first_entry(&domain->dev_list,
struct iommu_dev_data, list);
__detach_device(entry);
- atomic_set(&entry->bind, 0);
}
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
@@ -3384,20 +3404,20 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
return paddr;
}
-static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
- unsigned long cap)
+static bool amd_iommu_capable(enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
- return 1;
+ return true;
case IOMMU_CAP_INTR_REMAP:
- return irq_remapping_enabled;
+ return (irq_remapping_enabled == 1);
}
- return 0;
+ return false;
}
static const struct iommu_ops amd_iommu_ops = {
+ .capable = amd_iommu_capable,
.domain_init = amd_iommu_domain_init,
.domain_destroy = amd_iommu_domain_destroy,
.attach_dev = amd_iommu_attach_device,
@@ -3405,7 +3425,6 @@ static const struct iommu_ops amd_iommu_ops = {
.map = amd_iommu_map,
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
- .domain_has_cap = amd_iommu_domain_has_cap,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
@@ -4235,7 +4254,7 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
return 0;
}
-static int setup_hpet_msi(unsigned int irq, unsigned int id)
+static int alloc_hpet_msi(unsigned int irq, unsigned int id)
{
struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
@@ -4274,6 +4293,6 @@ struct irq_remap_ops amd_iommu_irq_ops = {
.compose_msi_msg = compose_msi_msg,
.msi_alloc_irq = msi_alloc_irq,
.msi_setup_irq = msi_setup_irq,
- .setup_hpet_msi = setup_hpet_msi,
+ .alloc_hpet_msi = alloc_hpet_msi,
};
#endif
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 3783e0b44df6..b0522f15730f 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -712,7 +712,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
set_iommu_for_device(iommu, devid);
}
-static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
+static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
{
struct devid_map *entry;
struct list_head *list;
@@ -731,6 +731,8 @@ static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
+ *devid = entry->devid;
+
return 0;
}
@@ -739,7 +741,7 @@ static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
return -ENOMEM;
entry->id = id;
- entry->devid = devid;
+ entry->devid = *devid;
entry->cmd_line = cmd_line;
list_add_tail(&entry->list, list);
@@ -754,7 +756,7 @@ static int __init add_early_maps(void)
for (i = 0; i < early_ioapic_map_size; ++i) {
ret = add_special_device(IVHD_SPECIAL_IOAPIC,
early_ioapic_map[i].id,
- early_ioapic_map[i].devid,
+ &early_ioapic_map[i].devid,
early_ioapic_map[i].cmd_line);
if (ret)
return ret;
@@ -763,7 +765,7 @@ static int __init add_early_maps(void)
for (i = 0; i < early_hpet_map_size; ++i) {
ret = add_special_device(IVHD_SPECIAL_HPET,
early_hpet_map[i].id,
- early_hpet_map[i].devid,
+ &early_hpet_map[i].devid,
early_hpet_map[i].cmd_line);
if (ret)
return ret;
@@ -978,10 +980,17 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
PCI_SLOT(devid),
PCI_FUNC(devid));
- set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
- ret = add_special_device(type, handle, devid, false);
+ ret = add_special_device(type, handle, &devid, false);
if (ret)
return ret;
+
+ /*
+ * add_special_device might update the devid in case a
+ * command-line override is present. So call
+ * set_dev_entry_from_acpi after add_special_device.
+ */
+ set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+
break;
}
default:
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 8e43b7cba133..cec51a8ba844 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -418,27 +418,6 @@ struct protection_domain {
};
/*
- * This struct contains device specific data for the IOMMU
- */
-struct iommu_dev_data {
- struct list_head list; /* For domain->dev_list */
- struct list_head dev_data_list; /* For global dev_data_list */
- struct iommu_dev_data *alias_data;/* The alias dev_data */
- struct protection_domain *domain; /* Domain the device is bound to */
- atomic_t bind; /* Domain attach reference count */
- u16 devid; /* PCI Device ID */
- bool iommu_v2; /* Device can make use of IOMMUv2 */
- bool passthrough; /* Default for device is pt_domain */
- struct {
- bool enabled;
- int qdep;
- } ats; /* ATS state */
- bool pri_tlp; /* PASID TLB required for
- PPR completions */
- u32 errata; /* Bitmap for errata to apply */
-};
-
-/*
* For dynamic growth the aperture size is split into ranges of 128MB of
* DMA address space each. This struct represents one such range.
*/
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index a83cc2a2a2ca..60558f794922 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -24,7 +24,7 @@
* - v7/v8 long-descriptor format
* - Non-secure access to the SMMU
* - 4k and 64k pages, with contiguous pte hints.
- * - Up to 42-bit addressing (dependent on VA_BITS)
+ * - Up to 48-bit addressing (dependent on VA_BITS)
* - Context fault reporting
*/
@@ -59,7 +59,7 @@
/* SMMU global address space */
#define ARM_SMMU_GR0(smmu) ((smmu)->base)
-#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
+#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
/*
* SMMU global address space with conditional offset to access secure
@@ -224,7 +224,7 @@
/* Translation context bank */
#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
-#define ARM_SMMU_CB(smmu, n) ((n) * (smmu)->pagesize)
+#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
#define ARM_SMMU_CB_SCTLR 0x0
#define ARM_SMMU_CB_RESUME 0x8
@@ -326,6 +326,16 @@
#define FSYNR0_WNR (1 << 4)
+static int force_stage;
+module_param_named(force_stage, force_stage, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(force_stage,
+ "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
+
+enum arm_smmu_arch_version {
+ ARM_SMMU_V1 = 1,
+ ARM_SMMU_V2,
+};
+
struct arm_smmu_smr {
u8 idx;
u16 mask;
@@ -349,7 +359,7 @@ struct arm_smmu_device {
void __iomem *base;
unsigned long size;
- unsigned long pagesize;
+ unsigned long pgshift;
#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
@@ -360,7 +370,7 @@ struct arm_smmu_device {
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
u32 options;
- int version;
+ enum arm_smmu_arch_version version;
u32 num_context_banks;
u32 num_s2_context_banks;
@@ -370,8 +380,9 @@ struct arm_smmu_device {
u32 num_mapping_groups;
DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
- unsigned long input_size;
+ unsigned long s1_input_size;
unsigned long s1_output_size;
+ unsigned long s2_input_size;
unsigned long s2_output_size;
u32 num_global_irqs;
@@ -426,17 +437,17 @@ static void parse_driver_options(struct arm_smmu_device *smmu)
} while (arm_smmu_options[++i].opt);
}
-static struct device *dev_get_master_dev(struct device *dev)
+static struct device_node *dev_get_dev_node(struct device *dev)
{
if (dev_is_pci(dev)) {
struct pci_bus *bus = to_pci_dev(dev)->bus;
while (!pci_is_root_bus(bus))
bus = bus->parent;
- return bus->bridge->parent;
+ return bus->bridge->parent->of_node;
}
- return dev;
+ return dev->of_node;
}
static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
@@ -461,15 +472,17 @@ static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
}
static struct arm_smmu_master_cfg *
-find_smmu_master_cfg(struct arm_smmu_device *smmu, struct device *dev)
+find_smmu_master_cfg(struct device *dev)
{
- struct arm_smmu_master *master;
+ struct arm_smmu_master_cfg *cfg = NULL;
+ struct iommu_group *group = iommu_group_get(dev);
- if (dev_is_pci(dev))
- return dev->archdata.iommu;
+ if (group) {
+ cfg = iommu_group_get_iommudata(group);
+ iommu_group_put(group);
+ }
- master = find_smmu_master(smmu, dev->of_node);
- return master ? &master->cfg : NULL;
+ return cfg;
}
static int insert_smmu_master(struct arm_smmu_device *smmu,
@@ -545,7 +558,7 @@ static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
{
struct arm_smmu_device *smmu;
struct arm_smmu_master *master = NULL;
- struct device_node *dev_node = dev_get_master_dev(dev)->of_node;
+ struct device_node *dev_node = dev_get_dev_node(dev);
spin_lock(&arm_smmu_devices_lock);
list_for_each_entry(smmu, &arm_smmu_devices, list) {
@@ -729,7 +742,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
/* CBAR */
reg = cfg->cbar;
- if (smmu->version == 1)
+ if (smmu->version == ARM_SMMU_V1)
reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
/*
@@ -744,7 +757,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
}
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
- if (smmu->version > 1) {
+ if (smmu->version > ARM_SMMU_V1) {
/* CBA2R */
#ifdef CONFIG_64BIT
reg = CBA2R_RW64_64BIT;
@@ -755,7 +768,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
/* TTBCR2 */
- switch (smmu->input_size) {
+ switch (smmu->s1_input_size) {
case 32:
reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
break;
@@ -817,14 +830,14 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
* TTBCR
* We use long descriptor, with inner-shareable WBWA tables in TTBR0.
*/
- if (smmu->version > 1) {
+ if (smmu->version > ARM_SMMU_V1) {
if (PAGE_SIZE == SZ_4K)
reg = TTBCR_TG0_4K;
else
reg = TTBCR_TG0_64K;
if (!stage1) {
- reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT;
+ reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT;
switch (smmu->s2_output_size) {
case 32:
@@ -847,7 +860,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
break;
}
} else {
- reg |= (64 - smmu->input_size) << TTBCR_T0SZ_SHIFT;
+ reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT;
}
} else {
reg = 0;
@@ -914,7 +927,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
goto out_unlock;
cfg->cbndx = ret;
- if (smmu->version == 1) {
+ if (smmu->version == ARM_SMMU_V1) {
cfg->irptndx = atomic_inc_return(&smmu->irptndx);
cfg->irptndx %= smmu->num_context_irqs;
} else {
@@ -1151,9 +1164,10 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ /* Devices in an IOMMU group may already be configured */
ret = arm_smmu_master_configure_smrs(smmu, cfg);
if (ret)
- return ret;
+ return ret == -EEXIST ? 0 : ret;
for (i = 0; i < cfg->num_streamids; ++i) {
u32 idx, s2cr;
@@ -1174,6 +1188,10 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ /* An IOMMU group is torn down by the first device to be removed */
+ if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
+ return;
+
/*
* We *must* clear the S2CR first, because freeing the SMR means
* that it can be re-allocated immediately.
@@ -1195,12 +1213,17 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct arm_smmu_device *smmu, *dom_smmu;
struct arm_smmu_master_cfg *cfg;
- smmu = dev_get_master_dev(dev)->archdata.iommu;
+ smmu = find_smmu_for_device(dev);
if (!smmu) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
return -ENXIO;
}
+ if (dev->archdata.iommu) {
+ dev_err(dev, "already attached to IOMMU domain\n");
+ return -EEXIST;
+ }
+
/*
* Sanity check the domain. We don't support domains across
* different SMMUs.
@@ -1223,11 +1246,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
/* Looks ok, so add the device to the domain */
- cfg = find_smmu_master_cfg(smmu_domain->smmu, dev);
+ cfg = find_smmu_master_cfg(dev);
if (!cfg)
return -ENODEV;
- return arm_smmu_domain_add_master(smmu_domain, cfg);
+ ret = arm_smmu_domain_add_master(smmu_domain, cfg);
+ if (!ret)
+ dev->archdata.iommu = domain;
+ return ret;
}
static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1235,9 +1261,12 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_master_cfg *cfg;
- cfg = find_smmu_master_cfg(smmu_domain->smmu, dev);
- if (cfg)
- arm_smmu_domain_remove_master(smmu_domain, cfg);
+ cfg = find_smmu_master_cfg(dev);
+ if (!cfg)
+ return;
+
+ dev->archdata.iommu = NULL;
+ arm_smmu_domain_remove_master(smmu_domain, cfg);
}
static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
@@ -1379,6 +1408,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
prot, stage);
phys += next - addr;
+ pfn = __phys_to_pfn(phys);
} while (pmd++, addr = next, addr < end);
return ret;
@@ -1431,9 +1461,11 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
if (cfg->cbar == CBAR_TYPE_S2_TRANS) {
stage = 2;
+ input_mask = (1ULL << smmu->s2_input_size) - 1;
output_mask = (1ULL << smmu->s2_output_size) - 1;
} else {
stage = 1;
+ input_mask = (1ULL << smmu->s1_input_size) - 1;
output_mask = (1ULL << smmu->s1_output_size) - 1;
}
@@ -1443,7 +1475,6 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
if (size & ~PAGE_MASK)
return -EINVAL;
- input_mask = (1ULL << smmu->input_size) - 1;
if ((phys_addr_t)iova & ~input_mask)
return -ERANGE;
@@ -1526,20 +1557,19 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
}
-static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
- unsigned long cap)
+static bool arm_smmu_capable(enum iommu_cap cap)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- u32 features = smmu ? smmu->features : 0;
-
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
- return features & ARM_SMMU_FEAT_COHERENT_WALK;
+ /*
+ * Return true here as the SMMU can always send out coherent
+ * requests.
+ */
+ return true;
case IOMMU_CAP_INTR_REMAP:
- return 1; /* MSIs are just memory writes */
+ return true; /* MSIs are just memory writes */
default:
- return 0;
+ return false;
}
}
@@ -1549,17 +1579,19 @@ static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
return 0; /* Continue walking */
}
+static void __arm_smmu_release_pci_iommudata(void *data)
+{
+ kfree(data);
+}
+
static int arm_smmu_add_device(struct device *dev)
{
struct arm_smmu_device *smmu;
+ struct arm_smmu_master_cfg *cfg;
struct iommu_group *group;
+ void (*releasefn)(void *) = NULL;
int ret;
- if (dev->archdata.iommu) {
- dev_warn(dev, "IOMMU driver already assigned to device\n");
- return -EINVAL;
- }
-
smmu = find_smmu_for_device(dev);
if (!smmu)
return -ENODEV;
@@ -1571,7 +1603,6 @@ static int arm_smmu_add_device(struct device *dev)
}
if (dev_is_pci(dev)) {
- struct arm_smmu_master_cfg *cfg;
struct pci_dev *pdev = to_pci_dev(dev);
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
@@ -1587,11 +1618,20 @@ static int arm_smmu_add_device(struct device *dev)
*/
pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid,
&cfg->streamids[0]);
- dev->archdata.iommu = cfg;
+ releasefn = __arm_smmu_release_pci_iommudata;
} else {
- dev->archdata.iommu = smmu;
+ struct arm_smmu_master *master;
+
+ master = find_smmu_master(smmu, dev->of_node);
+ if (!master) {
+ ret = -ENODEV;
+ goto out_put_group;
+ }
+
+ cfg = &master->cfg;
}
+ iommu_group_set_iommudata(group, cfg, releasefn);
ret = iommu_group_add_device(group, dev);
out_put_group:
@@ -1601,14 +1641,11 @@ out_put_group:
static void arm_smmu_remove_device(struct device *dev)
{
- if (dev_is_pci(dev))
- kfree(dev->archdata.iommu);
-
- dev->archdata.iommu = NULL;
iommu_group_remove_device(dev);
}
static const struct iommu_ops arm_smmu_ops = {
+ .capable = arm_smmu_capable,
.domain_init = arm_smmu_domain_init,
.domain_destroy = arm_smmu_domain_destroy,
.attach_dev = arm_smmu_attach_dev,
@@ -1616,7 +1653,6 @@ static const struct iommu_ops arm_smmu_ops = {
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.iova_to_phys = arm_smmu_iova_to_phys,
- .domain_has_cap = arm_smmu_domain_has_cap,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
.pgsize_bitmap = (SECTION_SIZE |
@@ -1702,10 +1738,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
u32 id;
dev_notice(smmu->dev, "probing hardware configuration...\n");
-
- /* Primecell ID */
- id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2);
- smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1;
dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
/* ID0 */
@@ -1716,6 +1748,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV;
}
#endif
+
+ /* Restrict available stages based on module parameter */
+ if (force_stage == 1)
+ id &= ~(ID0_S2TS | ID0_NTS);
+ else if (force_stage == 2)
+ id &= ~(ID0_S1TS | ID0_NTS);
+
if (id & ID0_S1TS) {
smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
dev_notice(smmu->dev, "\tstage 1 translation\n");
@@ -1732,8 +1771,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
}
if (!(smmu->features &
- (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 |
- ARM_SMMU_FEAT_TRANS_NESTED))) {
+ (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
dev_err(smmu->dev, "\tno translation support!\n");
return -ENODEV;
}
@@ -1779,12 +1817,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* ID1 */
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
- smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
+ smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
/* Check for size mismatch of SMMU address space from mapped region */
size = 1 <<
(((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
- size *= (smmu->pagesize << 1);
+ size *= 2 << smmu->pgshift;
if (smmu->size != size)
dev_warn(smmu->dev,
"SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
@@ -1803,28 +1841,21 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* ID2 */
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
size = arm_smmu