summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/dma-iommu.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2020-09-03 12:34:04 +0100
committerJoerg Roedel <jroedel@suse.de>2020-09-04 11:55:07 +0200
commit4604393ca0c6e4a73c04798c3f1d9878e70b7251 (patch)
tree02fd445b4e5f95fc4ef3a7c78f6b1942f28af34c /drivers/iommu/dma-iommu.c
parentaae4c8e27bd7567132bb931488e2faf1a57c66e9 (diff)
iommu/dma: Remove broken huge page handling
The attempt to handle huge page allocations was originally added since the comments around stripping __GFP_COMP in other implementations were nonsensical, and we naively assumed that split_huge_page() could simply be called equivalently to split_page(). It turns out that this doesn't actually work correctly, so just get rid of it - there's little point going to the effort of allocating huge pages if we're only going to split them anyway. Reported-by: Roman Gushchin <guro@fb.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/e287dbe69aa0933abafd97c80631940fd188ddd1.1599132844.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r--drivers/iommu/dma-iommu.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 3afd076e9f36..79b4d1073dcb 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -524,6 +524,9 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
/* IOMMU can map any pages, so himem can also be used here */
gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+ /* It makes no sense to muck about with huge pages */
+ gfp &= ~__GFP_COMP;
+
while (count) {
struct page *page = NULL;
unsigned int order_size;
@@ -544,15 +547,9 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
page = alloc_pages_node(nid, alloc_flags, order);
if (!page)
continue;
- if (!order)
- break;
- if (!PageCompound(page)) {
+ if (order)
split_page(page, order);
- break;
- } else if (!split_huge_page(page)) {
- break;
- }
- __free_pages(page, order);
+ break;
}
if (!page) {
__iommu_dma_free_pages(pages, i);