summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig17
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c19
-rw-r--r--drivers/iommu/arm-smmu-v3.c3
-rw-r--r--drivers/iommu/arm-smmu.c2
-rw-r--r--drivers/iommu/dma-iommu.c3
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/hyperv-iommu.c196
-rw-r--r--drivers/iommu/intel-iommu.c168
-rw-r--r--drivers/iommu/intel-pasid.c2
-rw-r--r--drivers/iommu/intel-svm.c88
-rw-r--r--drivers/iommu/intel_irq_remapping.c32
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c3
-rw-r--r--drivers/iommu/io-pgtable.c5
-rw-r--r--drivers/iommu/io-pgtable.h213
-rw-r--r--drivers/iommu/iommu-debugfs.c23
-rw-r--r--drivers/iommu/iommu.c16
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/iommu/irq_remapping.c3
-rw-r--r--drivers/iommu/irq_remapping.h1
-rw-r--r--drivers/iommu/msm_iommu.c10
-rw-r--r--drivers/iommu/mtk_iommu.h3
-rw-r--r--drivers/iommu/mtk_iommu_v1.c6
-rw-r--r--drivers/iommu/qcom_iommu.c2
-rw-r--r--drivers/iommu/tegra-gart.c473
-rw-r--r--drivers/iommu/tegra-smmu.c4
27 files changed, 608 insertions, 696 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index d9a25715650e..6f07f3b21816 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -1,3 +1,7 @@
+# The IOVA library may also be used by non-IOMMU_API users
+config IOMMU_IOVA
+ tristate
+
# IOMMU_API always gets selected by whoever wants it.
config IOMMU_API
bool
@@ -81,9 +85,6 @@ config IOMMU_DEFAULT_PASSTHROUGH
If unsure, say N here.
-config IOMMU_IOVA
- tristate
-
config OF_IOMMU
def_bool y
depends on OF && IOMMU_API
@@ -282,6 +283,7 @@ config ROCKCHIP_IOMMU
config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support"
depends on ARCH_TEGRA_2x_SOC
+ depends on TEGRA_MC
select IOMMU_API
help
Enables support for remapping discontiguous physical memory
@@ -435,4 +437,13 @@ config QCOM_IOMMU
help
Support for IOMMU on certain Qualcomm SoCs.
+config HYPERV_IOMMU
+ bool "Hyper-V x2APIC IRQ Handling"
+ depends on HYPERV
+ select IOMMU_API
+ default HYPERV
+ help
+ Stub IOMMU driver to handle IRQs as to allow Hyper-V Linux
+ guests to run with x2APIC mode enabled.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index a158a68c8ea8..8c71a15e986b 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -32,3 +32,4 @@ obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
+obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index c0dc7c33335e..6b0760dafb3e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1991,16 +1991,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
static void do_detach(struct iommu_dev_data *dev_data)
{
+ struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
u16 alias;
iommu = amd_iommu_rlookup_table[dev_data->devid];
alias = dev_data->alias;
- /* decrease reference counters */
- dev_data->domain->dev_iommu[iommu->index] -= 1;
- dev_data->domain->dev_cnt -= 1;
-
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
@@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
/* Flush the DTE entry */
device_flush_dte(dev_data);
+
+ /* Flush IOTLB */
+ domain_flush_tlb_pde(domain);
+
+ /* Wait for the flushes to finish */
+ domain_flush_complete(domain);
+
+ /* decrease reference counters - needs to happen after the flushes */
+ domain->dev_iommu[iommu->index] -= 1;
+ domain->dev_cnt -= 1;
}
/*
@@ -2616,13 +2623,13 @@ out_unmap:
bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
- if (--mapped_pages)
+ if (--mapped_pages == 0)
goto out_free_iova;
}
}
out_free_iova:
- free_iova_fast(&dma_dom->iovad, address, npages);
+ free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
out_err:
return 0;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 0d284029dc73..d3880010c6cf 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -18,6 +18,7 @@
#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/init.h>
@@ -32,8 +33,6 @@
#include <linux/amba/bus.h>
-#include "io-pgtable.h"
-
/* MMIO registers */
#define ARM_SMMU_IDR0 0x0
#define IDR0_ST_LVL GENMASK(28, 27)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index af18a7e7f917..045d93884164 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -39,6 +39,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/init.h>
@@ -56,7 +57,6 @@
#include <linux/amba/bus.h>
#include <linux/fsl/mc.h>
-#include "io-pgtable.h"
#include "arm-smmu-regs.h"
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d19f3d6b43c1..77aabe637a60 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -289,7 +289,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
- unsigned long order, base_pfn, end_pfn;
+ unsigned long order, base_pfn;
int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
@@ -298,7 +298,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order);
- end_pfn = (base + size - 1) >> order;
/* Check the domain allows at least some access to the device... */
if (domain->geometry.force_aperture) {
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index dc9f14811e0f..58dc70bffd5b 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -144,7 +144,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
for (tmp = dev; tmp; tmp = tmp->bus->self)
level++;
- size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
+ size = sizeof(*info) + level * sizeof(info->path[0]);
if (size <= sizeof(dmar_pci_notify_info_buf)) {
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
} else {
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
new file mode 100644
index 000000000000..a386b83e0e34
--- /dev/null
+++ b/drivers/iommu/hyperv-iommu.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Hyper-V stub IOMMU driver.
+ *
+ * Copyright (C) 2019, Microsoft, Inc.
+ *
+ * Author : Lan Tianyu <Tianyu.Lan@microsoft.com>
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+
+#include <asm/apic.h>
+#include <asm/cpu.h>
+#include <asm/hw_irq.h>
+#include <asm/io_apic.h>
+#include <asm/irq_remapping.h>
+#include <asm/hypervisor.h>
+
+#include "irq_remapping.h"
+
+#ifdef CONFIG_IRQ_REMAP
+
+/*
+ * According 82093AA IO-APIC spec , IO APIC has a 24-entry Interrupt
+ * Redirection Table. Hyper-V exposes one single IO-APIC and so define
+ * 24 IO APIC remmapping entries.
+ */
+#define IOAPIC_REMAPPING_ENTRY 24
+
+static cpumask_t ioapic_max_cpumask = { CPU_BITS_NONE };
+static struct irq_domain *ioapic_ir_domain;
+
+static int hyperv_ir_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ struct irq_data *parent = data->parent_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
+ struct IO_APIC_route_entry *entry;
+ int ret;
+
+ /* Return error If new irq affinity is out of ioapic_max_cpumask. */
+ if (!cpumask_subset(mask, &ioapic_max_cpumask))
+ return -EINVAL;
+
+ ret = parent->chip->irq_set_affinity(parent, mask, force);
+ if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
+ return ret;
+
+ entry = data->chip_data;
+ entry->dest = cfg->dest_apicid;
+ entry->vector = cfg->vector;
+ send_cleanup_vector(cfg);
+
+ return 0;
+}
+
+static struct irq_chip hyperv_ir_chip = {
+ .name = "HYPERV-IR",
+ .irq_ack = apic_ack_irq,
+ .irq_set_affinity = hyperv_ir_set_affinity,
+};
+
+static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *arg)
+{
+ struct irq_alloc_info *info = arg;
+ struct irq_data *irq_data;
+ struct irq_desc *desc;
+ int ret = 0;
+
+ if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs > 1)
+ return -EINVAL;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+ if (ret < 0)
+ return ret;
+
+ irq_data = irq_domain_get_irq_data(domain, virq);
+ if (!irq_data) {
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ return -EINVAL;
+ }
+
+ irq_data->chip = &hyperv_ir_chip;
+
+ /*
+ * If there is interrupt remapping function of IOMMU, setting irq
+ * affinity only needs to change IRTE of IOMMU. But Hyper-V doesn't
+ * support interrupt remapping function, setting irq affinity of IO-APIC
+ * interrupts still needs to change IO-APIC registers. But ioapic_
+ * configure_entry() will ignore value of cfg->vector and cfg->
+ * dest_apicid when IO-APIC's parent irq domain is not the vector
+ * domain.(See ioapic_configure_entry()) In order to setting vector
+ * and dest_apicid to IO-APIC register, IO-APIC entry pointer is saved
+ * in the chip_data and hyperv_irq_remapping_activate()/hyperv_ir_set_
+ * affinity() set vector and dest_apicid directly into IO-APIC entry.
+ */
+ irq_data->chip_data = info->ioapic_entry;
+
+ /*
+ * Hypver-V IO APIC irq affinity should be in the scope of
+ * ioapic_max_cpumask because no irq remapping support.
+ */
+ desc = irq_data_to_desc(irq_data);
+ cpumask_copy(desc->irq_common_data.affinity, &ioapic_max_cpumask);
+
+ return 0;
+}
+
+static void hyperv_irq_remapping_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
+
+static int hyperv_irq_remapping_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool reserve)
+{
+ struct irq_cfg *cfg = irqd_cfg(irq_data);
+ struct IO_APIC_route_entry *entry = irq_data->chip_data;
+
+ entry->dest = cfg->dest_apicid;
+ entry->vector = cfg->vector;
+
+ return 0;
+}
+
+static struct irq_domain_ops hyperv_ir_domain_ops = {
+ .alloc = hyperv_irq_remapping_alloc,
+ .free = hyperv_irq_remapping_free,
+ .activate = hyperv_irq_remapping_activate,
+};
+
+static int __init hyperv_prepare_irq_remapping(void)
+{
+ struct fwnode_handle *fn;
+ int i;
+
+ if (!hypervisor_is_type(X86_HYPER_MS_HYPERV) ||
+ !x2apic_supported())
+ return -ENODEV;
+
+ fn = irq_domain_alloc_named_id_fwnode("HYPERV-IR", 0);
+ if (!fn)
+ return -ENOMEM;
+
+ ioapic_ir_domain =
+ irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
+ 0, IOAPIC_REMAPPING_ENTRY, fn,
+ &hyperv_ir_domain_ops, NULL);
+
+ irq_domain_free_fwnode(fn);
+
+ /*
+ * Hyper-V doesn't provide irq remapping function for
+ * IO-APIC and so IO-APIC only accepts 8-bit APIC ID.
+ * Cpu's APIC ID is read from ACPI MADT table and APIC IDs
+ * in the MADT table on Hyper-v are sorted monotonic increasingly.
+ * APIC ID reflects cpu topology. There maybe some APIC ID
+ * gaps when cpu number in a socket is not power of two. Prepare
+ * max cpu affinity for IOAPIC irqs. Scan cpu 0-255 and set cpu
+ * into ioapic_max_cpumask if its APIC ID is less than 256.
+ */
+ for (i = min_t(unsigned int, num_possible_cpus() - 1, 255); i >= 0; i--)
+ if (cpu_physical_id(i) < 256)
+ cpumask_set_cpu(i, &ioapic_max_cpumask);
+
+ return 0;
+}
+
+static int __init hyperv_enable_irq_remapping(void)
+{
+ return IRQ_REMAP_X2APIC_MODE;
+}
+
+static struct irq_domain *hyperv_get_ir_irq_domain(struct irq_alloc_info *info)
+{
+ if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC)
+ return ioapic_ir_domain;
+ else
+ return NULL;
+}
+
+struct irq_remap_ops hyperv_irq_remap_ops = {
+ .prepare = hyperv_prepare_irq_remapping,
+ .enable = hyperv_enable_irq_remapping,
+ .get_ir_irq_domain = hyperv_get_ir_irq_domain,
+};
+
+#endif
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2bd9ac285c0d..c968b3c7bae0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -19,6 +19,7 @@
*/
#define pr_fmt(fmt) "DMAR: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
#include <linux/init.h>
#include <linux/bitmap.h>
@@ -342,8 +343,7 @@ static int g_num_of_iommus;
static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
-static void dmar_remove_one_dev_info(struct dmar_domain *domain,
- struct device *dev);
+static void dmar_remove_one_dev_info(struct device *dev);
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
static void domain_context_clear(struct intel_iommu *iommu,
struct device *dev);
@@ -363,7 +363,7 @@ static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
-static int intel_iommu_sm = 1;
+static int intel_iommu_sm;
static int iommu_identity_mapping;
#define IDENTMAP_ALL 1
@@ -456,9 +456,9 @@ static int __init intel_iommu_setup(char *str)
} else if (!strncmp(str, "sp_off", 6)) {
pr_info("Disable supported super page\n");
intel_iommu_superpage = 0;
- } else if (!strncmp(str, "sm_off", 6)) {
- pr_info("Intel-IOMMU: disable scalable mode support\n");
- intel_iommu_sm = 0;
+ } else if (!strncmp(str, "sm_on", 5)) {
+ pr_info("Intel-IOMMU: scalable mode supported\n");
+ intel_iommu_sm = 1;
} else if (!strncmp(str, "tboot_noforce", 13)) {
printk(KERN_INFO
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@ -864,7 +864,7 @@ out:
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn, int *target_level)
{
- struct dma_pte *parent, *pte = NULL;
+ struct dma_pte *parent, *pte;
int level = agaw_to_level(domain->agaw);
int offset;
@@ -921,7 +921,7 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
unsigned long pfn,
int level, int *large_page)
{
- struct dma_pte *parent, *pte = NULL;
+ struct dma_pte *parent, *pte;
int total = agaw_to_level(domain->agaw);
int offset;
@@ -953,7 +953,7 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
- unsigned int large_page = 1;
+ unsigned int large_page;
struct dma_pte *first_pte, *pte;
BUG_ON(!domain_pfn_supported(domain, start_pfn));
@@ -1131,7 +1131,7 @@ static struct page *domain_unmap(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
- struct page *freelist = NULL;
+ struct page *freelist;
BUG_ON(!domain_pfn_supported(domain, start_pfn));
BUG_ON(!domain_pfn_supported(domain, last_pfn));
@@ -1402,10 +1402,13 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
info->pasid_enabled = 1;
- if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
+ if (info->pri_supported &&
+ (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
+ !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
info->pri_enabled = 1;
#endif
if (!pdev->untrusted && info->ats_supported &&
+ pci_ats_page_aligned(pdev) &&
!pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
domain_update_iotlb(info->domain);
@@ -1762,7 +1765,7 @@ static int domain_attach_iommu(struct dmar_domain *domain,
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
- int num, count = INT_MAX;
+ int num, count;
assert_spin_locked(&device_domain_lock);
assert_spin_locked(&iommu->lock);
@@ -1815,7 +1818,7 @@ static int dmar_init_reserved_ranges(void)
IOVA_PFN(r->start),
IOVA_PFN(r->end));
if (!iova) {
- pr_err("Reserve iova failed\n");
+ pci_err(pdev, "Reserve iova for %pR failed\n", r);
return -ENODEV;
}
}
@@ -1901,11 +1904,7 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
static void domain_exit(struct dmar_domain *domain)
{
- struct page *freelist = NULL;
-
- /* Domain 0 is reserved, so dont process it */
- if (!domain)
- return;
+ struct page *freelist;
/* Remove associated devices and clear attached or cached domains */
rcu_read_lock();
@@ -2057,7 +2056,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
int agaw;
context_set_domain_id(context, did);
- context_set_translation_type(context, translation);
if (translation != CONTEXT_TT_PASS_THROUGH) {
/*
@@ -2087,6 +2085,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
*/
context_set_address_width(context, iommu->msagaw);
}
+
+ context_set_translation_type(context, translation);
}
context_set_fault_enable(context);
@@ -2485,7 +2485,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
- if (!pci_ats_disabled() &&
+ if (!pdev->untrusted &&
+ !pci_ats_disabled() &&
ecap_dev_iotlb_support(iommu->ecap) &&
pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
dmar_find_matched_atsr_unit(pdev))
@@ -2544,9 +2545,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
- pr_err("PASID table allocation for %s failed\n",
- dev_name(dev));
- dmar_remove_one_dev_info(domain, dev);
+ dev_err(dev, "PASID table allocation failed\n");
+ dmar_remove_one_dev_info(dev);
return NULL;
}
@@ -2560,16 +2560,15 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
dev, PASID_RID2PASID);
spin_unlock(&iommu->lock);
if (ret) {
- pr_err("Setup RID2PASID for %s failed\n",
- dev_name(dev));
- dmar_remove_one_dev_info(domain, dev);
+ dev_err(dev, "Setup RID2PASID failed\n");
+ dmar_remove_one_dev_info(dev);
return NULL;
}
}
if (dev && domain_context_mapping(domain, dev)) {
- pr_err("Domain context map for %s failed\n", dev_name(dev));
- dmar_remove_one_dev_info(domain, dev);
+ dev_err(dev, "Domain context map failed\n");
+ dmar_remove_one_dev_info(dev);
return NULL;
}
@@ -2584,7 +2583,7 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
{
- struct device_domain_info *info = NULL;
+ struct device_domain_info *info;
struct dmar_domain *domain = NULL;
struct intel_iommu *iommu;
u16 dma_alias;
@@ -2723,13 +2722,12 @@ static int domain_prepare_identity_map(struct device *dev,
range which is reserved in E820, so which didn't get set
up to start with in si_domain */
if (domain == si_domain && hw_pass_through) {
- pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
- dev_name(dev), start, end);
+ dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
+ start, end);
return 0;
}
- pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
- dev_name(dev), start, end);
+ dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
if (end < start) {
WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
@@ -2809,7 +2807,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_init(int hw)
{
- int nid, ret = 0;
+ int nid, ret;
si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
if (!si_domain)
@@ -2933,7 +2931,6 @@ static bool device_is_rmrr_locked(struct device *dev)
static int iommu_should_identity_map(struct device *dev, int startup)
{
-
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
@@ -3016,8 +3013,8 @@ static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw
ret = domain_add_dev_info(si_domain, dev);
if (!ret)
- pr_info("%s identity mapping for device %s\n",
- hw ? "Hardware" : "Software", dev_name(dev));
+ dev_info(dev, "%s identity mapping\n",
+ hw ? "Hardware" : "Software");
else if (ret == -ENODEV)
/* device not associated with an iommu */
ret = 0;
@@ -3529,7 +3526,7 @@ static unsigned long intel_alloc_iova(struct device *dev,
struct dmar_domain *domain,
unsigned long nrpages, uint64_t dma_mask)
{
- unsigned long iova_pfn = 0;
+ unsigned long iova_pfn;
/* Restrict dma_mask to the width that the iommu can handle */
dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
@@ -3550,8 +3547,7 @@ static unsigned long intel_alloc_iova(struct device *dev,
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(dma_mask), true);
if (unlikely(!iova_pfn)) {
- pr_err("Allocating %ld-page iova for %s failed",
- nrpages, dev_name(dev));
+ dev_err(dev, "Allocating %ld-page iova failed", nrpages);
return 0;
}
@@ -3599,7 +3595,7 @@ struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
out:
if (!domain)
- pr_err("Allocating domain for %s failed\n", dev_name(dev));
+ dev_err(dev, "Allocating domain failed\n");
return domain;
@@ -3625,9 +3621,8 @@ static int iommu_no_mapping(struct device *dev)
* 32 bit DMA is removed from si_domain and fall back
* to non-identity mapping.
*/
- dmar_remove_one_dev_info(si_domain, dev);
- pr_info("32bit %s uses non-identity mapping\n",
- dev_name(dev));
+ dmar_remove_one_dev_info(dev);
+ dev_info(dev, "32bit DMA uses non-identity mapping\n");
return 0;
}
} else {
@@ -3639,8 +3634,7 @@ static int iommu_no_mapping(struct device *dev)
int ret;
ret = domain_add_dev_info(si_domain, dev);
if (!ret) {
- pr_info("64bit %s uses identity mapping\n",
- dev_name(dev));
+ dev_info(dev, "64bit DMA uses identity mapping\n");
return 1;
}
}
@@ -3649,11 +3643,9 @@ static int iommu_no_mapping(struct device *dev)
return 0;
}
-static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, int dir,
- u64 dma_mask)
+static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
+ size_t size, int dir, u64 dma_mask)
{
- phys_addr_t paddr = page_to_phys(page) + offset;
struct dmar_domain *domain;
phys_addr_t start_paddr;
unsigned long iova_pfn;
@@ -3705,8 +3697,8 @@ static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
error:
if (iova_pfn)
free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
- pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
- dev_name(dev), size, (unsigned long long)paddr, dir);
+ dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
+ size, (unsigned long long)paddr, dir);
return DMA_MAPPING_ERROR;
}
@@ -3715,7 +3707,15 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- return __intel_map_page(dev, page, offset, size, dir, *dev->dma_mask);
+ return __intel_map_single(dev, page_to_phys(page) + offset, size,
+ dir, *dev->dma_mask);
+}
+
+static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
}
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3741,8 +3741,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
start_pfn = mm_to_dma_pfn(iova_pfn);
last_pfn = start_pfn + nrpages - 1;
- pr_debug("Device %s unmapping: pfn %lx-%lx\n",
- dev_name(dev), start_pfn, last_pfn);
+ dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
freelist = domain_unmap(domain, start_pfn, last_pfn);
@@ -3806,8 +3805,9 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
return NULL;
memset(page_address(page), 0, size);
- *dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL,
- dev->coherent_dma_mask);
+ *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
+ DMA_BIDIRECTIONAL,
+ dev->coherent_dma_mask);
if (*dma_handle != DMA_MAPPING_ERROR)
return page_address(page);
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
@@ -3924,6 +3924,8 @@ static const struct dma_map_ops intel_dma_ops = {
.unmap_sg = intel_unmap_sg,
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
+ .map_resource = intel_map_resource,
+ .unmap_resource = intel_unmap_page,
.dma_supported = dma_direct_supported,
};
@@ -4339,7 +4341,7 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{
- int sp, ret = 0;
+ int sp, ret;
struct intel_iommu *iommu = dmaru->iommu;
if (g_iommus[iommu->seq_id])
@@ -4503,7 +4505,7 @@ out:
int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
- int ret = 0;
+ int ret;
struct dmar_rmrr_unit *rmrru;
struct dmar_atsr_unit *atsru;
struct acpi_dmar_atsr *atsr;
@@ -4520,7 +4522,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
((void *)rmrr) + rmrr->header.length,
rmrr->segment, rmrru->devices,
rmrru->devices_cnt);
- if(ret < 0)
+ if (ret < 0)
return ret;
} else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
dmar_remove_dev_scope(info, rmrr->segment,
@@ -4540,7 +4542,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
atsru->devices_cnt);
if (ret > 0)
break;
- else if(ret < 0)
+ else if (ret < 0)
return ret;
} else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
if (dmar_remove_dev_scope(info, atsr->segment,
@@ -4567,16 +4569,19 @@ static int device_notifier(struct notifier_block *nb,
if (iommu_dummy(dev))
return 0;
- if (action != BUS_NOTIFY_REMOVED_DEVICE)
- return 0;
-
- domain = find_domain(dev);
- if (!domain)
- return 0;
+ if (action == BUS_NOTIFY_REMOVED_DEVICE) {
+ domain = find_domain(dev);
+ if (!domain)
+ return 0;
- dmar_remove_one_dev_info(domain, dev);
- if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
- domain_exit(domain);
+ dmar_remove_one_dev_info(dev);
+ if (!domain_type_is_vm_or_si(domain) &&
+ list_empty(&domain->devices))
+ domain_exit(domain);
+ } else if (action == BUS_NOTIFY_ADD_DEVICE) {
+ if (iommu_should_identity_map(dev, 1))
+ domain_add_dev_info(si_domain, dev);
+ }
return 0;
}
@@ -4987,8 +4992,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
free_devinfo_mem(info);
}
-static void dmar_remove_one_dev_info(struct dmar_domain *domain,
- struct device *dev)
+static void dmar_remove_one_dev_info(struct device *dev)
{
struct device_domain_info *info;
unsigned long flags;
@@ -5077,7 +5081,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
old_domain = find_domain(dev);
if (old_domain) {
rcu_read_lock();
- dmar_remove_one_dev_info(old_domain, dev);
+ dmar_remove_one_dev_info(dev);
rcu_read_unlock();
if (!domain_type_is_vm_or_si(old_domain) &&
@@ -5096,9 +5100,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
addr_width = cap_mgaw(iommu->cap);
if (dmar_domain->max_addr > (1LL << addr_width)) {
- pr_err("%s: iommu width (%d) is not "
- "sufficient for the mapped address (%llx)\n",
- __func__, addr_width, dmar_domain->max_addr);
+ dev_err(dev, "%s: iommu width (%d) is not "
+ "sufficient for the mapped address (%llx)\n",
+ __func__, addr_width, dmar_domain->max