From f534d98b9d27053279da015d0187a09e3a090ccd Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 18 Sep 2020 12:18:51 +0200 Subject: iommu/arm-smmu-v3: Add SVA device feature Implement the IOMMU device feature callbacks to support the SVA feature. At the moment dev_has_feat() returns false since I/O Page Faults and BTM aren't yet implemented. Signed-off-by: Jean-Philippe Brucker Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/20200918101852.582559-12-jean-philippe@linaro.org Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 49 +++++++++++++++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 79 +++++++++++++++++++++++++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 26 ++++++++ 3 files changed, 154 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index cb94c0924196..9255c9600fb8 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -10,6 +10,8 @@ #include "arm-smmu-v3.h" #include "../../io-pgtable-arm.h" +static DEFINE_MUTEX(sva_lock); + /* * Check if the CPU ASID is available on the SMMU side. If a private context * descriptor is using it, try to replace it. @@ -197,3 +199,50 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) return true; } + +static bool arm_smmu_iopf_supported(struct arm_smmu_master *master) +{ + return false; +} + +bool arm_smmu_master_sva_supported(struct arm_smmu_master *master) +{ + if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) + return false; + + /* SSID and IOPF support are mandatory for the moment */ + return master->ssid_bits && arm_smmu_iopf_supported(master); +} + +bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master) +{ + bool enabled; + + mutex_lock(&sva_lock); + enabled = master->sva_enabled; + mutex_unlock(&sva_lock); + return enabled; +} + +int arm_smmu_master_enable_sva(struct arm_smmu_master *master) +{ + mutex_lock(&sva_lock); + master->sva_enabled = true; + mutex_unlock(&sva_lock); + + return 0; +} + +int arm_smmu_master_disable_sva(struct arm_smmu_master *master) +{ + mutex_lock(&sva_lock); + if (!list_empty(&master->bonds)) { + dev_err(master->dev, "cannot disable SVA, device is bound\n"); + mutex_unlock(&sva_lock); + return -EBUSY; + } + master->sva_enabled = false; + mutex_unlock(&sva_lock); + + return 0; +} diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index cc02d6d815e6..e634bbe60573 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2176,6 +2176,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) master = dev_iommu_priv_get(dev); smmu = master->smmu; + /* + * Checking that SVA is disabled ensures that this device isn't bound to + * any mm, and can be safely detached from its old domain. Bonds cannot + * be removed concurrently since we're holding the group mutex. + */ + if (arm_smmu_master_sva_enabled(master)) { + dev_err(dev, "cannot attach - SVA enabled\n"); + return -EBUSY; + } + arm_smmu_detach_dev(master); mutex_lock(&smmu_domain->init_mutex); @@ -2323,6 +2333,7 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) master->smmu = smmu; master->sids = fwspec->ids; master->num_sids = fwspec->num_ids; + INIT_LIST_HEAD(&master->bonds); dev_iommu_priv_set(dev, master); /* Check the SIDs are in range of the SMMU and our stream table */ @@ -2375,6 +2386,7 @@ static void arm_smmu_release_device(struct device *dev) return; master = dev_iommu_priv_get(dev); + WARN_ON(arm_smmu_master_sva_enabled(master)); arm_smmu_detach_dev(master); arm_smmu_disable_pasid(master); kfree(master); @@ -2492,6 +2504,69 @@ static void arm_smmu_get_resv_regions(struct device *dev, iommu_dma_get_resv_regions(dev, head); } +static bool arm_smmu_dev_has_feature(struct device *dev, + enum iommu_dev_features feat) +{ + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + + if (!master) + return false; + + switch (feat) { + case IOMMU_DEV_FEAT_SVA: + return arm_smmu_master_sva_supported(master); + default: + return false; + } +} + +static bool arm_smmu_dev_feature_enabled(struct device *dev, + enum iommu_dev_features feat) +{ + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + + if (!master) + return false; + + switch (feat) { + case IOMMU_DEV_FEAT_SVA: + return arm_smmu_master_sva_enabled(master); + default: + return false; + } +} + +static int arm_smmu_dev_enable_feature(struct device *dev, + enum iommu_dev_features feat) +{ + if (!arm_smmu_dev_has_feature(dev, feat)) + return -ENODEV; + + if (arm_smmu_dev_feature_enabled(dev, feat)) + return -EBUSY; + + switch (feat) { + case IOMMU_DEV_FEAT_SVA: + return arm_smmu_master_enable_sva(dev_iommu_priv_get(dev)); + default: + return -EINVAL; + } +} + +static int arm_smmu_dev_disable_feature(struct device *dev, + enum iommu_dev_features feat) +{ + if (!arm_smmu_dev_feature_enabled(dev, feat)) + return -EINVAL; + + switch (feat) { + case IOMMU_DEV_FEAT_SVA: + return arm_smmu_master_disable_sva(dev_iommu_priv_get(dev)); + default: + return -EINVAL; + } +} + static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, @@ -2510,6 +2585,10 @@ static struct iommu_ops arm_smmu_ops = { .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, + .dev_has_feat = arm_smmu_dev_has_feature, + .dev_feat_enabled = arm_smmu_dev_feature_enabled, + .dev_enable_feat = arm_smmu_dev_enable_feature, + .dev_disable_feat = arm_smmu_dev_disable_feature, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 454a935de445..d4b7f40ccb02 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -647,6 +647,8 @@ struct arm_smmu_master { u32 *sids; unsigned int num_sids; bool ats_enabled; + bool sva_enabled; + struct list_head bonds; unsigned int ssid_bits; }; @@ -688,10 +690,34 @@ bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd); #ifdef CONFIG_ARM_SMMU_V3_SVA bool arm_smmu_sva_supported(struct arm_smmu_device *smmu); +bool arm_smmu_master_sva_supported(struct arm_smmu_master *master); +bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master); +int arm_smmu_master_enable_sva(struct arm_smmu_master *master); +int arm_smmu_master_disable_sva(struct arm_smmu_master *master); #else /* CONFIG_ARM_SMMU_V3_SVA */ static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) { return false; } + +static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master *master) +{ + return false; +} + +static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master) +{ + return false; +} + +static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master) +{ + return -ENODEV; +} + +static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master) +{ + return -ENODEV; +} #endif /* CONFIG_ARM_SMMU_V3_SVA */ #endif /* _ARM_SMMU_V3_H */ -- cgit v1.2.3