summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/Kconfig37
-rw-r--r--drivers/iommu/Makefile4
-rw-r--r--drivers/iommu/amd_iommu.c38
-rw-r--r--drivers/iommu/amd_iommu_debugfs.c33
-rw-r--r--drivers/iommu/amd_iommu_init.c57
-rw-r--r--drivers/iommu/amd_iommu_proto.h6
-rw-r--r--drivers/iommu/amd_iommu_types.h22
-rw-r--r--drivers/iommu/arm-smmu-v3.c26
-rw-r--r--drivers/iommu/arm-smmu.c17
-rw-r--r--drivers/iommu/dmar.c6
-rw-r--r--drivers/iommu/exynos-iommu.c1
-rw-r--r--drivers/iommu/intel-iommu.c160
-rw-r--r--drivers/iommu/intel-pasid.c239
-rw-r--r--drivers/iommu/intel-pasid.h39
-rw-r--r--drivers/iommu/intel-svm.c79
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c7
-rw-r--r--drivers/iommu/io-pgtable-arm.c3
-rw-r--r--drivers/iommu/iommu-debugfs.c66
-rw-r--r--drivers/iommu/iommu.c44
-rw-r--r--drivers/iommu/ipmmu-vmsa.c59
-rw-r--r--drivers/iommu/msm_iommu.c17
-rw-r--r--drivers/iommu/mtk_iommu.c1
-rw-r--r--drivers/iommu/mtk_iommu_v1.c1
-rw-r--r--drivers/iommu/omap-iommu.c5
-rw-r--r--drivers/iommu/qcom_iommu.c1
-rw-r--r--drivers/iommu/rockchip-iommu.c1
-rw-r--r--drivers/iommu/tegra-gart.c1
-rw-r--r--drivers/iommu/tegra-smmu.c1
28 files changed, 739 insertions, 232 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 689ffe538370..c60395b7470f 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -60,6 +60,27 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
endmenu
+config IOMMU_DEBUGFS
+ bool "Export IOMMU internals in DebugFS"
+ depends on DEBUG_FS
+ help
+ Allows exposure of IOMMU device internals. This option enables
+ the use of debugfs by IOMMU drivers as required. Devices can,
+ at initialization time, cause the IOMMU code to create a top-level
+ debug/iommu directory, and then populate a subdirectory with
+ entries as required.
+
+config IOMMU_DEFAULT_PASSTHROUGH
+ bool "IOMMU passthrough by default"
+ depends on IOMMU_API
+ help
+ Enable passthrough by default, removing the need to pass in
+ iommu.passthrough=on or iommu=pt through command line. If this
+ is enabled, you can still disable with iommu.passthrough=off
+ or iommu=nopt depending on the architecture.
+
+ If unsure, say N here.
+
config IOMMU_IOVA
tristate
@@ -135,6 +156,18 @@ config AMD_IOMMU_V2
hardware. Select this option if you want to use devices that support
the PCI PRI and PASID interface.
+config AMD_IOMMU_DEBUGFS
+ bool "Enable AMD IOMMU internals in DebugFS"
+ depends on AMD_IOMMU && IOMMU_DEBUGFS
+ ---help---
+ !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! !!!WARNING!!!
+
+ DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!!
+ Exposes AMD IOMMU device internals in DebugFS.
+
+ This option is -NOT- intended for production environments, and should
+ not generally be enabled.
+
# Intel IOMMU support
config DMAR_TABLE
bool
@@ -284,8 +317,8 @@ config IPMMU_VMSA
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
help
- Support for the Renesas VMSA-compatible IPMMU Renesas found in the
- R-Mobile APE6 and R-Car H2/M2 SoCs.
+ Support for the Renesas VMSA-compatible IPMMU found in the R-Mobile
+ APE6, R-Car Gen2, and R-Car Gen3 SoCs.
If unsure, say N.
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 1fb695854809..ab5eba6edf82 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
+obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o
obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
@@ -10,11 +11,12 @@ obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o
+obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 60b2eab29cd8..4e04fff23977 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1404,6 +1404,8 @@ static u64 *fetch_pte(struct protection_domain *domain,
int level;
u64 *pte;
+ *page_size = 0;
+
if (address > PM_LEVEL_SIZE(domain->mode))
return NULL;
@@ -1944,12 +1946,6 @@ static int __attach_device(struct iommu_dev_data *dev_data,
{
int ret;
- /*
- * Must be called with IRQs disabled. Warn here to detect early
- * when its not.
- */
- WARN_ON(!irqs_disabled());
-
/* lock domain */
spin_lock(&domain->lock);
@@ -2115,12 +2111,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
{
struct protection_domain *domain;
- /*
- * Must be called with IRQs disabled. Warn here to detect early
- * when its not.
- */
- WARN_ON(!irqs_disabled());
-
domain = dev_data->domain;
spin_lock(&domain->lock);
@@ -2405,9 +2395,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
}
if (amd_iommu_unmap_flush) {
- dma_ops_free_iova(dma_dom, dma_addr, pages);
domain_flush_tlb(&dma_dom->domain);
domain_flush_complete(&dma_dom->domain);
+ dma_ops_free_iova(dma_dom, dma_addr, pages);
} else {
pages = __roundup_pow_of_two(pages);
queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0);
@@ -3192,7 +3182,6 @@ const struct iommu_ops amd_iommu_ops = {
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
.unmap = amd_iommu_unmap,
- .map_sg = default_iommu_map_sg,
.iova_to_phys = amd_iommu_iova_to_phys,
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
@@ -3874,7 +3863,8 @@ static void irte_ga_prepare(void *entry,
irte->lo.fields_remap.int_type = delivery_mode;
irte->lo.fields_remap.dm = dest_mode;
irte->hi.fields.vector = vector;
- irte->lo.fields_remap.destination = dest_apicid;
+ irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
+ irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid);
irte->lo.fields_remap.valid = 1;
}
@@ -3927,7 +3917,10 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
if (!irte->lo.fields_remap.guest_mode) {
irte->hi.fields.vector = vector;
- irte->lo.fields_remap.destination = dest_apicid;
+ irte->lo.fields_remap.destination =
+ APICID_TO_IRTE_DEST_LO(dest_apicid);
+ irte->hi.fields.destination =
+ APICID_TO_IRTE_DEST_HI(dest_apicid);
modify_irte_ga(devid, index, irte, NULL);
}
}
@@ -4344,7 +4337,10 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
irte->lo.val = 0;
irte->hi.fields.vector = cfg->vector;
irte->lo.fields_remap.guest_mode = 0;
- irte->lo.fields_remap.destination = cfg->dest_apicid;
+ irte->lo.fields_remap.destination =
+ APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
+ irte->hi.fields.destination =
+ APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
irte->lo.fields_remap.dm = apic->irq_dest_mode;
@@ -4461,8 +4457,12 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
raw_spin_lock_irqsave(&table->lock, flags);
if (ref->lo.fields_vapic.guest_mode) {
- if (cpu >= 0)
- ref->lo.fields_vapic.destination = cpu;
+ if (cpu >= 0) {
+ ref->lo.fields_vapic.destination =
+ APICID_TO_IRTE_DEST_LO(cpu);
+ ref->hi.fields.destination =
+ APICID_TO_IRTE_DEST_HI(cpu);
+ }
ref->lo.fields_vapic.is_run = is_run;
barrier();
}
diff --git a/drivers/iommu/amd_iommu_debugfs.c b/drivers/iommu/amd_iommu_debugfs.c
new file mode 100644
index 000000000000..c6a5c737ef09
--- /dev/null
+++ b/drivers/iommu/amd_iommu_debugfs.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD IOMMU driver
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/iommu.h>
+#include <linux/pci.h>
+#include "amd_iommu_proto.h"
+#include "amd_iommu_types.h"
+
+static struct dentry *amd_iommu_debugfs;
+static DEFINE_MUTEX(amd_iommu_debugfs_lock);
+
+#define MAX_NAME_LEN 20
+
+void amd_iommu_debugfs_setup(struct amd_iommu *iommu)
+{
+ char name[MAX_NAME_LEN + 1];
+
+ mutex_lock(&amd_iommu_debugfs_lock);
+ if (!amd_iommu_debugfs)
+ amd_iommu_debugfs = debugfs_create_dir("amd",
+ iommu_debugfs_dir);
+ mutex_unlock(&amd_iommu_debugfs_lock);
+
+ snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index);
+ iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs);
+}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 904c575d1677..84b3e4445d46 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -153,6 +153,7 @@ bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;
int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
+static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled;
@@ -280,9 +281,9 @@ static void clear_translation_pre_enabled(struct amd_iommu *iommu)
static void init_translation_status(struct amd_iommu *iommu)
{
- u32 ctrl;
+ u64 ctrl;
- ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
if (ctrl & (1<<CONTROL_IOMMU_EN))
iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
}
@@ -386,30 +387,30 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
/* Generic functions to enable/disable certain features of the IOMMU. */
static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
{
- u32 ctrl;
+ u64 ctrl;
- ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
- ctrl |= (1 << bit);
- writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl |= (1ULL << bit);
+ writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}
static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
{
- u32 ctrl;
+ u64 ctrl;
- ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
- ctrl &= ~(1 << bit);
- writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl &= ~(1ULL << bit);
+ writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}
static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
{
- u32 ctrl;
+ u64 ctrl;
- ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
ctrl &= ~CTRL_INV_TO_MASK;
ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
- writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}
/* Function to enable the hardware */
@@ -827,6 +828,19 @@ static int iommu_init_ga(struct amd_iommu *iommu)
return ret;
}
+static void iommu_enable_xt(struct amd_iommu *iommu)
+{
+#ifdef CONFIG_IRQ_REMAP
+ /*
+ * XT mode (32-bit APIC destination ID) requires
+ * GA mode (128-bit IRTE support) as a prerequisite.
+ */
+ if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
+ amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+ iommu_feature_enable(iommu, CONTROL_XT_EN);
+#endif /* CONFIG_IRQ_REMAP */
+}
+
static void iommu_enable_gt(struct amd_iommu *iommu)
{
if (!iommu_feature(iommu, FEATURE_GT))
@@ -1507,6 +1521,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
+ amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
break;
case 0x11:
case 0x40:
@@ -1516,6 +1532,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0))
+ amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
break;
default:
return -EINVAL;
@@ -1832,6 +1850,8 @@ static void print_iommu_info(void)
pr_info("AMD-Vi: Interrupt remapping enabled\n");
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
pr_info("AMD-Vi: virtual APIC enabled\n");
+ if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+ pr_info("AMD-Vi: X2APIC enabled\n");
}
}
@@ -2168,6 +2188,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
iommu_enable_ga(iommu);
+ iommu_enable_xt(iommu);
iommu_enable(iommu);
iommu_flush_all_caches(iommu);
}
@@ -2212,6 +2233,7 @@ static void early_enable_iommus(void)
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
iommu_enable_ga(iommu);
+ iommu_enable_xt(iommu);
iommu_set_device_table(iommu);
iommu_flush_all_caches(iommu);
}
@@ -2691,8 +2713,7 @@ int __init amd_iommu_enable(void)
return ret;
irq_remapping_enabled = 1;
-
- return 0;
+ return amd_iommu_xt_mode;
}
void amd_iommu_disable(void)
@@ -2721,6 +2742,7 @@ int __init amd_iommu_enable_faulting(void)
*/
static int __init amd_iommu_init(void)
{
+ struct amd_iommu *iommu;
int ret;
ret = iommu_go_to_state(IOMMU_INITIALIZED);
@@ -2730,14 +2752,15 @@ static int __init amd_iommu_init(void)
disable_iommus();
free_iommu_resources();
} else {
- struct amd_iommu *iommu;
-
uninit_device_table_dma();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
}
}
+ for_each_iommu(iommu)
+ amd_iommu_debugfs_setup(iommu);
+
return ret;
}
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 640c286a0ab9..a8cd0296fb16 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -33,6 +33,12 @@ extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
extern int amd_iommu_init_api(void);
+#ifdef CONFIG_AMD_IOMMU_DEBUGFS
+void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
+#else
+static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
+#endif
+
/* Needed for interrupt remapping */
extern int amd_iommu_prepare(void);
extern int amd_iommu_enable(void);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 986cbe0cc189..e2b342e65a7b 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -161,6 +161,7 @@
#define CONTROL_GAM_EN 0x19ULL
#define CONTROL_GALOG_EN 0x1CULL
#define CONTROL_GAINT_EN 0x1DULL
+#define CONTROL_XT_EN 0x32ULL
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
#define CTRL_INV_TO_NONE 0
@@ -378,9 +379,11 @@
#define IOMMU_CAP_EFR 27
/* IOMMU Feature Reporting Field (for IVHD type 10h */
+#define IOMMU_FEAT_XTSUP_SHIFT 0
#define IOMMU_FEAT_GASUP_SHIFT 6
/* IOMMU Extended Feature Register (EFR) */
+#define IOMMU_EFR_XTSUP_SHIFT 2
#define IOMMU_EFR_GASUP_SHIFT 7
#define MAX_DOMAIN_ID 65536
@@ -437,7 +440,6 @@ extern struct kmem_cache *amd_iommu_irq_cache;
#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
-
/*
* This struct is used to pass information about
* incoming PPR faults around.
@@ -594,6 +596,11 @@ struct amd_iommu {
u32 flags;
volatile u64 __aligned(8) cmd_sem;
+
+#ifdef CONFIG_AMD_IOMMU_DEBUGFS
+ /* DebugFS Info */
+ struct dentry *debugfs;
+#endif
};
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
@@ -810,6 +817,9 @@ union irte {
} fields;
};
+#define APICID_TO_IRTE_DEST_LO(x) (x & 0xffffff)
+#define APICID_TO_IRTE_DEST_HI(x) ((x >> 24) & 0xff)
+
union irte_ga_lo {
u64 val;
@@ -823,8 +833,8 @@ union irte_ga_lo {
dm : 1,
/* ------ */
guest_mode : 1,
- destination : 8,
- rsvd : 48;
+ destination : 24,
+ ga_tag : 32;
} fields_remap;
/* For guest vAPIC */
@@ -837,8 +847,7 @@ union irte_ga_lo {
is_run : 1,
/* ------ */
guest_mode : 1,
- destination : 8,
- rsvd2 : 16,
+ destination : 24,
ga_tag : 32;
} fields_vapic;
};
@@ -849,7 +858,8 @@ union irte_ga_hi {
u64 vector : 8,
rsvd_1 : 4,
ga_root_ptr : 40,
- rsvd_2 : 12;
+ rsvd_2 : 4,
+ destination : 8;
} fields;
};
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 22bdabd3d8e0..5059d09f3202 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -24,6 +24,7 @@
#include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/err.h>
@@ -366,7 +367,7 @@
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
-static bool disable_bypass;
+static bool disable_bypass = 1;
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
@@ -1301,6 +1302,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
/* Sync our overflow flag, as we believe we're up to speed */
q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
+ writel(q->cons, q->cons_reg);
return IRQ_HANDLED;
}
@@ -1997,7 +1999,6 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
- .map_sg = default_iommu_map_sg,
.flush_iotlb_all = arm_smmu_iotlb_sync,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
@@ -2211,8 +2212,12 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
reg &= ~clr;
reg |= set;
writel_relaxed(reg | GBPA_UPDATE, gbpa);
- return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
- 1, ARM_SMMU_POLL_TIMEOUT_US);
+ ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
+ 1, ARM_SMMU_POLL_TIMEOUT_US);
+
+ if (ret)
+ dev_err(smmu->dev, "GBPA not responding to update\n");
+ return ret;
}
static void arm_smmu_free_msis(void *data)
@@ -2392,8 +2397,15 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Clear CR0 and sync (disables SMMU and queue processing) */
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
- if (reg & CR0_SMMUEN)
+ if (reg & CR0_SMMUEN) {
+ if (is_kdump_kernel()) {
+ arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
+ arm_smmu_device_disable(smmu);
+ return -EBUSY;
+ }
+
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
+ }
ret = arm_smmu_device_disable(smmu);
if (ret)
@@ -2491,10 +2503,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
enables |= CR0_SMMUEN;
} else {
ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
- if (ret) {
- dev_err(smmu->dev, "GBPA not responding to update\n");
+ if (ret)
return ret;
- }
}
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c73cfce1ccc0..fd1b80ef9490 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1562,7 +1562,6 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
- .map_sg = default_iommu_map_sg,
.flush_iotlb_all = arm_smmu_iotlb_sync,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
@@ -2103,12 +2102,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
- if (smmu->version == ARM_SMMU_V2 &&
- smmu->num_context_banks != smmu->num_context_irqs) {
- dev_err(dev,
- "found only %d context interrupt(s) but %d required\n",
- smmu->num_context_irqs, smmu->num_context_banks);
- return -ENODEV;
+ if (smmu->version == ARM_SMMU_V2) {
+ if (smmu->num_context_banks > smmu->num_context_irqs) {
+ dev_err(dev,
+ "found only %d context irq(s) but %d required\n",
+ smmu->num_context_irqs, smmu->num_context_banks);
+ return -ENODEV;
+ }
+
+ /* Ignore superfluous interrupts */
+ smmu->num_context_irqs = smmu->num_context_banks;
}
for (i = 0; i < smmu->num_global_irqs; ++i) {
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 75456b5aa825..d9c748b6f9e4 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1339,8 +1339,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
qi_submit_sync(&desc, iommu);
}
-void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
- u64 addr, unsigned mask)
+void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u16 qdep, u64 addr, unsigned mask)
{
struct qi_desc desc;
@@ -1355,7 +1355,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
qdep = 0;
desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
- QI_DIOTLB_TYPE;
+ QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
qi_submit_sync(&desc, iommu);
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index b128cb4372d3..1bd0cd7168df 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1332,7 +1332,6 @@ static const struct iommu_ops exynos_iommu_ops = {
.detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,
.unmap = exynos_iommu_unmap,
- .map_sg = default_iommu_map_sg,
.iova_to_phys = exynos_iommu_iova_to_phys,
.device_group = generic_device_group,
.add_device = exynos_iommu_add_device,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6a237d18fabf..5f3f10cf9d9d 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -52,6 +52,7 @@
#include <asm/iommu.h>
#include "irq_remapping.h"
+#include "intel-pasid.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -379,60 +380,6 @@ static int hw_pass_through = 1;
for (idx = 0; idx < g_num_of_iommus; idx++) \
if (domain->iommu_refcnt[idx])
-struct dmar_domain {
- int nid; /* node id */
-
- unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
- /* Refcount of devices per iommu */
-
-
- u16 iommu_did[DMAR_UNITS_SUPPORTED];
- /* Domain ids per IOMMU. Use u16 since
- * domain ids are 16 bit wide according
- * to VT-d spec, section 9.3 */
-
- bool has_iotlb_device;
- struct list_head devices; /* all devices' list */
- struct iova_domain iovad; /* iova's that belong to this domain */
-
- struct dma_pte *pgd; /* virtual address */
- int gaw; /* max guest address width */
-
- /* adjusted guest address width, 0 is level 2 30-bit */
- int agaw;
-
- int flags; /* flags to find out type of domain */
-
- int iommu_coherency;/* indicate coherency of iommu access */
- int iommu_snooping; /* indicate snooping control feature*/
- int iommu_count; /* reference count of iommu */
- int iommu_superpage;/* Level of superpages supported:
- 0 == 4KiB (no superpages), 1 == 2MiB,
- 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
- u64 max_addr; /* maximum mapped address */
-
- struct iommu_domain domain; /* generic domain data structure for
- iommu core */
-};
-
-/* PCI domain-device relationship */
-struct device_domain_info {
- struct list_head link; /* link to domain siblings */
- struct list_head global; /* link to global list */
- u8 bus; /* PCI bus number */
- u8 devfn; /* PCI devfn number */
- u8 pasid_supported:3;
- u8 pasid_enabled:1;
- u8 pri_supported:1;
- u8 pri_enabled:1;
- u8 ats_supported:1;
- u8 ats_enabled:1;
- u8 ats_qdep;
- struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
- struct intel_iommu *iommu; /* IOMMU used by this device */
- struct dmar_domain *domain; /* pointer to domain */
-};
-
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */
@@ -523,6 +470,27 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
+/*
+ * Iterate over elements in device_domain_list and call the specified
+ * callback @fn against each element. This helper should only be used
+ * in the context where the device_domain_lock has already been holden.
+ */
+int for_each_device_domain(int (*fn)(struct device_domain_info *info,
+ void *data), void *data)
+{
+ int ret = 0;
+ struct device_domain_info *info;
+
+ assert_spin_locked(&device_domain_lock);
+ list_for_each_entry(info, &device_domain_list, global) {
+ ret = fn(info, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
const struct iommu_ops intel_iommu_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
@@ -629,7 +597,7 @@ static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
domains[did & 0xff] = domain;
}
-static inline void *alloc_pgtable_page(int node)
+void *alloc_pgtable_page(int node)
{
struct page *page;
void *vaddr = NULL;
@@ -640,7 +608,7 @@ static inline void *alloc_pgtable_page(int node)
return vaddr;
}
-static inline void free_pgtable_page(void *vaddr)
+void free_pgtable_page(void *vaddr)
{
free_page((unsigned long)vaddr);
}
@@ -723,7 +691,7 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
}
/* This functionin only returns single iommu in a domain */
-static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
+struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{
int iommu_id;
@@ -1501,6 +1469,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
return;
pdev = to_pci_dev(info->dev);
+ /* For IOMMU that supports device IOTLB throttling (DIT), we assign
+ * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
+ * queue depth at PF level. If DIT is not set, PFSID will be treated as
+ * reserved, which should be set to 0.
+ */
+ if (!ecap_dit(info->iommu->ecap))
+ info->pfsid = 0;
+ else {
+ struct pci_dev *pf_pdev;
+
+ /* pdev will be returned if device is not a vf */
+ pf_pdev = pci_physfn(pdev);
+ info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
+ }
#ifdef CONFIG_INTEL_IOMMU_SVM
/* The PCIe spec, in its wisdom, declares that the behaviour of
@@ -1566,7 +1548,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -1800,7 +1783,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if (pasid_enabled(iommu)) {
if (ecap_prs(iommu->ecap))
intel_svm_finish_prq(iommu);
- intel_svm_free_pasid_tables(iommu);
+ intel_svm_exit(iommu);
}
#endif
}
@@ -2495,6 +2478,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->dev = dev;
info->domain = domain;
info->iommu = iommu;
+ info->pasid_table = NULL;
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -2552,6 +2536,15 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
list_add(&info->global, &device_domain_list);
if (dev)
dev->archdata.iommu = info;
+
+ if (dev && dev_is_pci(dev) && info-