summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-20 10:50:05 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-20 10:50:05 -0700
commit79319a052cb0ae862954fe9f6e606417f1698ddb (patch)
tree8de4379dd3534fd5a92e15a4781d25f759e4f8b7 /drivers/iommu
parent6496edfce95f943e1da43631c2f437509e56af7f (diff)
parent7f65ef01e131650d455875598099cd06fea6096b (diff)
Merge tag 'iommu-updates-v4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: "Not much this time, but the changes include: - moving domain allocation into the iommu drivers to prepare for the introduction of default domains for devices - fixing the IO page-table code in the AMD IOMMU driver to correctly encode large page sizes - extension of the PCI support in the ARM-SMMU driver - various fixes and cleanups" * tag 'iommu-updates-v4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (34 commits) iommu/amd: Correctly encode huge pages in iommu page tables iommu/amd: Optimize amd_iommu_iova_to_phys for new fetch_pte interface iommu/amd: Optimize alloc_new_range for new fetch_pte interface iommu/amd: Optimize iommu_unmap_page for new fetch_pte interface iommu/amd: Return the pte page-size in fetch_pte iommu/amd: Add support for contiguous dma allocator iommu/amd: Don't allocate with __GFP_ZERO in alloc_coherent iommu/amd: Ignore BUS_NOTIFY_UNBOUND_DRIVER event iommu/amd: Use BUS_NOTIFY_REMOVED_DEVICE iommu/tegra: smmu: Compute PFN mask at runtime iommu/tegra: gart: Set aperture at domain initialization time iommu/tegra: Setup aperture iommu: Remove domain_init and domain_free iommu_ops iommu/fsl: Make use of domain_alloc and domain_free iommu/rockchip: Make use of domain_alloc and domain_free iommu/ipmmu-vmsa: Make use of domain_alloc and domain_free iommu/shmobile: Make use of domain_alloc and domain_free iommu/msm: Make use of domain_alloc and domain_free iommu/tegra-gart: Make use of domain_alloc and domain_free iommu/tegra-smmu: Make use of domain_alloc and domain_free ...
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c250
-rw-r--r--drivers/iommu/amd_iommu_types.h13
-rw-r--r--drivers/iommu/amd_iommu_v2.c2
-rw-r--r--drivers/iommu/arm-smmu.c171
-rw-r--r--drivers/iommu/exynos-iommu.c87
-rw-r--r--drivers/iommu/fsl_pamu_domain.c60
-rw-r--r--drivers/iommu/fsl_pamu_domain.h2
-rw-r--r--drivers/iommu/intel-iommu.c61
-rw-r--r--drivers/iommu/intel_irq_remapping.c12
-rw-r--r--drivers/iommu/io-pgtable-arm.c5
-rw-r--r--drivers/iommu/iommu.c26
-rw-r--r--drivers/iommu/ipmmu-vmsa.c41
-rw-r--r--drivers/iommu/msm_iommu.c73
-rw-r--r--drivers/iommu/omap-iommu.c49
-rw-r--r--drivers/iommu/rockchip-iommu.c40
-rw-r--r--drivers/iommu/shmobile-iommu.c39
-rw-r--r--drivers/iommu/tegra-gart.c88
-rw-r--r--drivers/iommu/tegra-smmu.c59
18 files changed, 604 insertions, 474 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 48882c126245..e43d48956dea 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -33,6 +33,7 @@
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/msi.h>
+#include <linux/dma-contiguous.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -126,6 +127,11 @@ static int __init alloc_passthrough_domain(void);
*
****************************************************************************/
+static struct protection_domain *to_pdomain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct protection_domain, domain);
+}
+
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -1321,7 +1327,9 @@ static u64 *alloc_pte(struct protection_domain *domain,
* This function checks if there is a PTE for a given dma address. If
* there is one, it returns the pointer to it.
*/
-static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
+static u64 *fetch_pte(struct protection_domain *domain,
+ unsigned long address,
+ unsigned long *page_size)
{
int level;
u64 *pte;
@@ -1329,8 +1337,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
if (address > PM_LEVEL_SIZE(domain->mode))
return NULL;
- level = domain->mode - 1;
- pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+ level = domain->mode - 1;
+ pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
@@ -1339,19 +1348,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
return NULL;
/* Large PTE */
- if (PM_PTE_LEVEL(*pte) == 0x07) {
- unsigned long pte_mask, __pte;
-
- /*
- * If we have a series of large PTEs, make
- * sure to return a pointer to the first one.
- */
- pte_mask = PTE_PAGE_SIZE(*pte);
- pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
- __pte = ((unsigned long)pte) & pte_mask;
-
- return (u64 *)__pte;
- }
+ if (PM_PTE_LEVEL(*pte) == 7 ||
+ PM_PTE_LEVEL(*pte) == 0)
+ break;
/* No level skipping support yet */
if (PM_PTE_LEVEL(*pte) != level)
@@ -1360,8 +1359,21 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
level -= 1;
/* Walk to the next level */
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[PM_LEVEL_INDEX(level, address)];
+ pte = IOMMU_PTE_PAGE(*pte);
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
+ }
+
+ if (PM_PTE_LEVEL(*pte) == 0x07) {
+ unsigned long pte_mask;
+
+ /*
+ * If we have a series of large PTEs, make
+ * sure to return a pointer to the first one.
+ */
+ *page_size = pte_mask = PTE_PAGE_SIZE(*pte);
+ pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
+ pte = (u64 *)(((unsigned long)pte) & pte_mask);
}
return pte;
@@ -1383,13 +1395,14 @@ static int iommu_map_page(struct protection_domain *dom,
u64 __pte, *pte;
int i, count;
+ BUG_ON(!IS_ALIGNED(bus_addr, page_size));
+ BUG_ON(!IS_ALIGNED(phys_addr, page_size));
+
if (!(prot & IOMMU_PROT_MASK))
return -EINVAL;
- bus_addr = PAGE_ALIGN(bus_addr);
- phys_addr = PAGE_ALIGN(phys_addr);
- count = PAGE_SIZE_PTE_COUNT(page_size);
- pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
+ count = PAGE_SIZE_PTE_COUNT(page_size);
+ pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
if (!pte)
return -ENOMEM;
@@ -1398,7 +1411,7 @@ static int iommu_map_page(struct protection_domain *dom,
if (IOMMU_PTE_PRESENT(pte[i]))
return -EBUSY;
- if (page_size > PAGE_SIZE) {
+ if (count > 1) {
__pte = PAGE_SIZE_PTE(phys_addr, page_size);
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
} else
@@ -1421,7 +1434,8 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
unsigned long bus_addr,
unsigned long page_size)
{
- unsigned long long unmap_size, unmapped;
+ unsigned long long unmapped;
+ unsigned long unmap_size;
u64 *pte;
BUG_ON(!is_power_of_2(page_size));
@@ -1430,28 +1444,12 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
while (unmapped < page_size) {
- pte = fetch_pte(dom, bus_addr);
-
- if (!pte) {
- /*
- * No PTE for this address
- * move forward in 4kb steps
- */
- unmap_size = PAGE_SIZE;
- } else if (PM_PTE_LEVEL(*pte) == 0) {
- /* 4kb PTE found for this address */
- unmap_size = PAGE_SIZE;
- *pte = 0ULL;
- } else {
- int count, i;
-
- /* Large PTE found which maps this address */
- unmap_size = PTE_PAGE_SIZE(*pte);
-
- /* Only unmap from the first pte in the page */
- if ((unmap_size - 1) & bus_addr)
- break;
- count = PAGE_SIZE_PTE_COUNT(unmap_size);
+ pte = fetch_pte(dom, bus_addr, &unmap_size);
+
+ if (pte) {
+ int i, count;
+
+ count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++)
pte[i] = 0ULL;
}
@@ -1599,7 +1597,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
{
int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
struct amd_iommu *iommu;
- unsigned long i, old_size;
+ unsigned long i, old_size, pte_pgsize;
#ifdef CONFIG_IOMMU_STRESS
populate = false;
@@ -1672,12 +1670,13 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
*/
for (i = dma_dom->aperture[index]->offset;
i < dma_dom->aperture_size;
- i += PAGE_SIZE) {
- u64 *pte = fetch_pte(&dma_dom->domain, i);
+ i += pte_pgsize) {
+ u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
continue;
- dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
+ dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT,
+ pte_pgsize >> 12);
}
update_domain(&dma_dom->domain);
@@ -2422,16 +2421,6 @@ static int device_change_notifier(struct notifier_block *nb,
dev_data = get_dev_data(dev);
switch (action) {
- case BUS_NOTIFY_UNBOUND_DRIVER:
-
- domain = domain_for_device(dev);
-
- if (!domain)
- goto out;
- if (dev_data->passthrough)
- break;
- detach_device(dev);
- break;
case BUS_NOTIFY_ADD_DEVICE:
iommu_init_device(dev);
@@ -2467,7 +2456,7 @@ static int device_change_notifier(struct notifier_block *nb,
dev->archdata.dma_ops = &amd_iommu_dma_ops;
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_REMOVED_DEVICE:
iommu_uninit_device(dev);
@@ -2923,38 +2912,42 @@ static void *alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs)
{
- unsigned long flags;
- void *virt_addr;
- struct protection_domain *domain;
- phys_addr_t paddr;
u64 dma_mask = dev->coherent_dma_mask;
+ struct protection_domain *domain;
+ unsigned long flags;
+ struct page *page;
INC_STATS_COUNTER(cnt_alloc_coherent);
domain = get_domain(dev);
if (PTR_ERR(domain) == -EINVAL) {
- virt_addr = (void *)__get_free_pages(flag, get_order(size));
- *dma_addr = __pa(virt_addr);
- return virt_addr;
+ page = alloc_pages(flag, get_order(size));
+ *dma_addr = page_to_phys(page);
+ return page_address(page);
} else if (IS_ERR(domain))
return NULL;
+ size = PAGE_ALIGN(size);
dma_mask = dev->coherent_dma_mask;
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
- flag |= __GFP_ZERO;
- virt_addr = (void *)__get_free_pages(flag, get_order(size));
- if (!virt_addr)
- return NULL;
+ page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
+ if (!page) {
+ if (!(flag & __GFP_WAIT))
+ return NULL;
- paddr = virt_to_phys(virt_addr);
+ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+ get_order(size));
+ if (!page)
+ return NULL;
+ }
if (!dma_mask)
dma_mask = *dev->dma_mask;
spin_lock_irqsave(&domain->lock, flags);
- *dma_addr = __map_single(dev, domain->priv, paddr,
+ *dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
size, DMA_BIDIRECTIONAL, true, dma_mask);
if (*dma_addr == DMA_ERROR_CODE) {
@@ -2966,11 +2959,12 @@ static void *alloc_coherent(struct device *dev, size_t size,
spin_unlock_irqrestore(&domain->lock, flags);
- return virt_addr;
+ return page_address(page);
out_free:
- free_pages((unsigned long)virt_addr, get_order(size));
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, get_order(size));
return NULL;
}
@@ -2982,11 +2976,15 @@ static void free_coherent(struct device *dev, size_t size,
void *virt_addr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
- unsigned long flags;
struct protection_domain *domain;
+ unsigned long flags;
+ struct page *page;
INC_STATS_COUNTER(cnt_free_coherent);
+ page = virt_to_page(virt_addr);
+ size = PAGE_ALIGN(size);
+
domain = get_domain(dev);
if (IS_ERR(domain))
goto free_mem;
@@ -3000,7 +2998,8 @@ static void free_coherent(struct device *dev, size_t size,
spin_unlock_irqrestore(&domain->lock, flags);
free_mem:
- free_pages((unsigned long)virt_addr, get_order(size));
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, get_order(size));
}
/*
@@ -3236,42 +3235,45 @@ static int __init alloc_passthrough_domain(void)
return 0;
}
-static int amd_iommu_domain_init(struct iommu_domain *dom)
+
+static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
- struct protection_domain *domain;
+ struct protection_domain *pdomain;
- domain = protection_domain_alloc();
- if (!domain)
- goto out_free;
+ /* We only support unmanaged domains for now */
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
- domain->mode = PAGE_MODE_3_LEVEL;
- domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!domain->pt_root)
+ pdomain = protection_domain_alloc();
+ if (!pdomain)
goto out_free;
- domain->iommu_domain = dom;
-
- dom->priv = domain;
+ pdomain->mode = PAGE_MODE_3_LEVEL;
+ pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pdomain->pt_root)
+ goto out_free;
- dom->geometry.aperture_start = 0;
- dom->geometry.aperture_end = ~0ULL;
- dom->geometry.force_aperture = true;
+ pdomain->domain.geometry.aperture_start = 0;
+ pdomain->domain.geometry.aperture_end = ~0ULL;
+ pdomain->domain.geometry.force_aperture = true;
- return 0;
+ return &pdomain->domain;
out_free:
- protection_domain_free(domain);
+ protection_domain_free(pdomain);
- return -ENOMEM;
+ return NULL;
}
-static void amd_iommu_domain_destroy(struct iommu_domain *dom)
+static void amd_iommu_domain_free(struct iommu_domain *dom)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain;
- if (!domain)
+ if (!dom)
return;
+ domain = to_pdomain(dom);
+
if (domain->dev_cnt > 0)
cleanup_domain(domain);
@@ -3284,8 +3286,6 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
free_gcr3_table(domain);
protection_domain_free(domain);
-
- dom->priv = NULL;
}
static void amd_iommu_detach_device(struct iommu_domain *dom,
@@ -3313,7 +3313,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
struct iommu_dev_data *dev_data;
struct amd_iommu *iommu;
int ret;
@@ -3340,7 +3340,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
phys_addr_t paddr, size_t page_size, int iommu_prot)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
int prot = 0;
int ret;
@@ -3362,7 +3362,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
size_t page_size)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
size_t unmap_size;
if (domain->mode == PAGE_MODE_NONE)
@@ -3380,28 +3380,22 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
dma_addr_t iova)
{
- struct protection_domain *domain = dom->priv;
- unsigned long offset_mask;
- phys_addr_t paddr;
+ struct protection_domain *domain = to_pdomain(dom);
+ unsigned long offset_mask, pte_pgsize;
u64 *pte, __pte;
if (domain->mode == PAGE_MODE_NONE)
return iova;
- pte = fetch_pte(domain, iova);
+ pte = fetch_pte(domain, iova, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
return 0;
- if (PM_PTE_LEVEL(*pte) == 0)
- offset_mask = PAGE_SIZE - 1;
- else
- offset_mask = PTE_PAGE_SIZE(*pte) - 1;
-
- __pte = *pte & PM_ADDR_MASK;
- paddr = (__pte & ~offset_mask) | (iova & offset_mask);
+ offset_mask = pte_pgsize - 1;
+ __pte = *pte & PM_ADDR_MASK;
- return paddr;
+ return (__pte & ~offset_mask) | (iova & offset_mask);
}
static bool amd_iommu_capable(enum iommu_cap cap)
@@ -3420,8 +3414,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
static const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
- .domain_init = amd_iommu_domain_init,
- .domain_destroy = amd_iommu_domain_destroy,
+ .domain_alloc = amd_iommu_domain_alloc,
+ .domain_free = amd_iommu_domain_free,
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
@@ -3483,7 +3477,7 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
void amd_iommu_domain_direct_map(struct iommu_domain *dom)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
@@ -3504,7 +3498,7 @@ EXPORT_SYMBOL(amd_iommu_domain_direct_map);
int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int levels, ret;
@@ -3616,7 +3610,7 @@ static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
u64 address)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@@ -3638,7 +3632,7 @@ static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@@ -3718,7 +3712,7 @@ static int __clear_gcr3(struct protection_domain *domain, int pasid)
int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
unsigned long cr3)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@@ -3732,7 +3726,7 @@ EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@@ -3765,17 +3759,17 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr);
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
{
- struct protection_domain *domain;
+ struct protection_domain *pdomain;
- domain = get_domain(&pdev->dev);
- if (IS_ERR(domain))
+ pdomain = get_domain(&pdev->dev);
+ if (IS_ERR(pdomain))
return NULL;
/* Only return IOMMUv2 domains */
- if (!(domain->flags & PD_IOMMUV2_MASK))
+ if (!(pdomain->flags & PD_IOMMUV2_MASK))
return NULL;
- return domain->iommu_domain;
+ return &pdomain->domain;
}
EXPORT_SYMBOL(amd_iommu_get_v2_domain);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index c4fffb710c58..05030e523771 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -282,6 +282,12 @@
#define PTE_PAGE_SIZE(pte) \
(1ULL << (1 + ffz(((pte) | 0xfffULL))))
+/*
+ * Takes a page-table level and returns the default page-size for this level
+ */
+#define PTE_LEVEL_PAGE_SIZE(level) \
+ (1ULL << (12 + (9 * (level))))
+
#define IOMMU_PTE_P (1ULL << 0)
#define IOMMU_PTE_TV (1ULL << 1)
#define IOMMU_PTE_U (1ULL << 59)
@@ -400,6 +406,8 @@ struct iommu_domain;
struct protection_domain {
struct list_head list; /* for list of all protection domains */
struct list_head dev_list; /* List of all devices in this domain */
+ struct iommu_domain domain; /* generic domain handle used by
+ iommu core code */
spinlock_t lock; /* mostly used to lock the page table*/
struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */
@@ -411,10 +419,7 @@ struct protection_domain {
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
- void *priv; /* private data */
- struct iommu_domain *iommu_domain; /* Pointer to generic
- domain structure */
-
+ void *priv; /* private data */
};
/*
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 6d5a5c44453b..a1cbba9056fd 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -417,7 +417,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
dev_state = pasid_state->device_state;
run_inv_ctx_cb = !pasid_state->invalid;
- if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb)
+ if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
unbind_pasid(pasid_state);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index a3adde6519f0..9f7e1d34a32b 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -343,6 +343,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */
+ struct iommu_domain domain;
};
static struct iommu_ops arm_smmu_ops;
@@ -360,6 +361,11 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
+static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct arm_smmu_domain, domain);
+}
+
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@@ -645,7 +651,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
u32 fsr, far, fsynr, resume;
unsigned long iova;
struct iommu_domain *domain = dev;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *cb_base;
@@ -730,6 +736,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ if (smmu->version > ARM_SMMU_V1) {
+ /*
+ * CBA2R.
+ * *Must* be initialised before CBAR thanks to VMID16
+ * architectural oversight affected some implementations.
+ */
+#ifdef CONFIG_64BIT
+ reg = CBA2R_RW64_64BIT;
+#else
+ reg = CBA2R_RW64_32BIT;
+#endif
+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
+ }
+
/* CBAR */
reg = cfg->cbar;
if (smmu->version == ARM_SMMU_V1)
@@ -747,16 +767,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
}
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
- if (smmu->version > ARM_SMMU_V1) {
- /* CBA2R */
-#ifdef CONFIG_64BIT
- reg = CBA2R_RW64_64BIT;
-#else
- reg = CBA2R_RW64_32BIT;
-#endif
- writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
- }
-
/* TTBRs */
if (stage1) {
reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
@@ -836,7 +846,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct io_pgtable_ops *pgtbl_ops;
struct io_pgtable_cfg pgtbl_cfg;
enum io_pgtable_fmt fmt;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
mutex_lock(&smmu_domain->init_mutex);
@@ -958,7 +968,7 @@ out_unlock:
static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
void __iomem *cb_base;
@@ -985,10 +995,12 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
}
-static int arm_smmu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@@ -996,17 +1008,17 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
*/
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
if (!smmu_domain)
- return -ENOMEM;
+ return NULL;
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->pgtbl_lock);
- domain->priv = smmu_domain;
- return 0;
+
+ return &smmu_domain->domain;
}
-static void arm_smmu_domain_destroy(struct iommu_domain *domain)
+static void arm_smmu_domain_free(struct iommu_domain *domain)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
/*
* Free the domain resources. We assume that all devices have
@@ -1143,7 +1155,7 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg;
@@ -1187,7 +1199,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_master_cfg *cfg;
cfg = find_smmu_master_cfg(dev);
@@ -1203,7 +1215,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
{
int ret;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops)
@@ -1220,7 +1232,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
{
size_t ret;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops)
@@ -1235,7 +1247,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
@@ -1281,7 +1293,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
{
phys_addr_t ret;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops)
@@ -1329,61 +1341,83 @@ static void __arm_smmu_release_pci_iommudata(void *data)
kfree(data);
}
-static int arm_smmu_add_device(struct device *dev)
+static int arm_smmu_add_pci_device(struct pci_dev *pdev)
{
- struct arm_smmu_device *smmu;
- struct arm_smmu_master_cfg *cfg;
+ int i, ret;
+ u16 sid;
struct iommu_group *group;
- void (*releasefn)(void *) = NULL;
- int ret;
-
- smmu = find_smmu_for_device(dev);
- if (!smmu)
- return -ENODEV;
+ struct arm_smmu_master_cfg *cfg;
- group = iommu_group_alloc();
- if (IS_ERR(group)) {
- dev_err(dev, "Failed to allocate IOMMU group\n");
+ group = iommu_group_get_for_dev(&pdev->dev);
+ if (IS_ERR(group))
return PTR_ERR(group);
- }
-
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
+ cfg = iommu_group_get_iommudata(group);
+ if (!cfg) {
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
ret = -ENOMEM;
goto out_put_group;
}
- cfg->num_streamids = 1;
- /*
- * Assume Stream ID == Requester ID for now.
- * We need a way to describe the ID mappings in FDT.
- */
- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid,
- &cfg->streamids[0]);
- releasefn = __arm_smmu_release_pci_iommudata;
- } else {
- struct arm_smmu_master *master;
-
- master = find_smmu_master(smmu, dev->of_node);
- if (!master) {
- ret = -ENODEV;
- goto out_put_group;
- }
+ iommu_group_set_iommudata(group, cfg,
+ __arm_smmu_release_pci_iommudata);
+ }
- cfg = &master->cfg;
+ if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) {
+ ret = -ENOSPC;
+ goto out_put_group;
}
- iommu_group_set_iommudata(group, cfg, releasefn);
- ret = iommu_group_add_device(group, dev);
+ /*
+ * Assume Stream ID == Requester ID for now.
+ * We need a way to describe the ID mappings in FDT.
+ */
+ pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
+ for (i = 0; i < cfg->num_streamids; ++i)
+ if (cfg->streamids[i] == sid)
+ break;
+
+ /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
+ if (i == cfg->num_streamids)
+ cfg->streamids[cfg->num_streamids++] = sid;
+ return 0;
out_put_group:
iommu_group_put(group);
return ret;
}
+static int arm_smmu_add_platform_device(struct device *dev)
+{
+ struct iommu_group *group;
+ struct arm_smmu_master *master;
+ struct arm_smmu_device *smmu = find_smmu_for_device(dev);
+
+ if (!smmu)
+ return -ENODEV;
+
+ master = find_smmu_master(smmu, dev->of_node);
+ if (!master)
+ return -ENODEV;
+
+ /* No automatic group creation for platform devices */
+ group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ iommu_group_set_iommudata(group, &master->cfg, NULL);
+ return iommu_group_add_device(group, dev);
+}
+
+static int arm_smmu_add_device(struct device *dev)
+{
+ if (dev_is_pci(dev))
+ return arm_smmu_add_pci_device(to_pci_dev(dev));
+
+ return arm_smmu_add_platform_device(dev);
+}
+
static void arm_smmu_remove_device(struct device *dev)
{
iommu_group_remove_device(dev);
@@ -1392,7 +1426,7 @@ static void arm_smmu_remove_device(struct device *dev)
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
switch (attr) {
case DOMAIN_ATTR_NESTING:
@@ -1407,7 +1441,7 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
int ret = 0;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
mutex_lock(&smmu_domain->init_mutex);
@@ -1435,8 +1469,8 @@ out_unlock:
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
- .domain_init = arm_smmu_domain_init,
- .domain_destroy = arm_smmu_domain_destroy,
+ .domain_alloc = arm_smmu_domain_alloc,
+ .domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
.detach_dev = arm_smmu_detach_dev,