From 4ac1c68e360653cad34488a5a1915d55f7c3fa86 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 19 May 2018 09:17:01 +0200 Subject: nds32: consolidate DMA cache maintainance routines Make sure all other DMA methods call nds32_dma_sync_single_for_{device,cpu} to perform cache maintaince, and remove the consisteny_sync helper that implemented both with entirely separate code based off an argument. Also make sure these helpers handled highmem properly, for which code is copy and pasted from mips. Signed-off-by: Christoph Hellwig Acked-by: Greentime Hu Tested-by: Greentime Hu --- arch/nds32/kernel/dma.c | 187 ++++++++++++++++++++++++------------------------ 1 file changed, 93 insertions(+), 94 deletions(-) (limited to 'arch/nds32') diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c index d291800fc621..e0c94a2889c5 100644 --- a/arch/nds32/kernel/dma.c +++ b/arch/nds32/kernel/dma.c @@ -22,11 +22,6 @@ static pte_t *consistent_pte; static DEFINE_RAW_SPINLOCK(consistent_lock); -enum master_type { - FOR_CPU = 0, - FOR_DEVICE = 1, -}; - /* * VM region handling support. * @@ -333,106 +328,105 @@ static int __init consistent_init(void) } core_initcall(consistent_init); -static void consistent_sync(void *vaddr, size_t size, int direction, int master_type); -static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - consistent_sync((void *)(page_address(page) + offset), size, dir, FOR_DEVICE); - return page_to_phys(page) + offset; -} - -static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU); -} -/* - * Make an area consistent for devices. - */ -static void consistent_sync(void *vaddr, size_t size, int direction, int master_type) +static inline void cache_op(phys_addr_t paddr, size_t size, + void (*fn)(unsigned long start, unsigned long end)) { - unsigned long start = (unsigned long)vaddr; - unsigned long end = start + size; + struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); + unsigned offset = paddr & ~PAGE_MASK; + size_t left = size; + unsigned long start; - if (master_type == FOR_CPU) { - switch (direction) { - case DMA_TO_DEVICE: - break; - case DMA_FROM_DEVICE: - case DMA_BIDIRECTIONAL: - cpu_dma_inval_range(start, end); - break; - default: - BUG(); - } - } else { - /* FOR_DEVICE */ - switch (direction) { - case DMA_FROM_DEVICE: - break; - case DMA_TO_DEVICE: - case DMA_BIDIRECTIONAL: - cpu_dma_wb_range(start, end); - break; - default: - BUG(); - } - } -} - -static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - int i; - - for (i = 0; i < nents; i++, sg++) { - void *virt; - unsigned long pfn; - struct page *page = sg_page(sg); + do { + size_t len = left; - sg->dma_address = sg_phys(sg); - pfn = page_to_pfn(page) + sg->offset / PAGE_SIZE; - page = pfn_to_page(pfn); if (PageHighMem(page)) { - virt = kmap_atomic(page); - consistent_sync(virt, sg->length, dir, FOR_CPU); - kunmap_atomic(virt); + void *addr; + + if (offset + len > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; + } + len = PAGE_SIZE - offset; + } + + addr = kmap_atomic(page); + start = (unsigned long)(addr + offset); + fn(start, start + len); + kunmap_atomic(addr); } else { - if (sg->offset > PAGE_SIZE) - panic("sg->offset:%08x > PAGE_SIZE\n", - sg->offset); - virt = page_address(page) + sg->offset; - consistent_sync(virt, sg->length, dir, FOR_CPU); + start = (unsigned long)phys_to_virt(paddr); + fn(start, start + size); } - } - return nents; + offset = 0; + page++; + left -= len; + } while (left); } -static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nhwentries, enum dma_data_direction dir, - unsigned long attrs) +static void +nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) { + switch (dir) { + case DMA_FROM_DEVICE: + break; + case DMA_TO_DEVICE: + case DMA_BIDIRECTIONAL: + cache_op(handle, size, cpu_dma_wb_range); + break; + default: + BUG(); + } } static void nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU); + switch (dir) { + case DMA_TO_DEVICE: + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + cache_op(handle, size, cpu_dma_inval_range); + break; + default: + BUG(); + } +} + +static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + dma_addr_t dma_addr = page_to_phys(page) + offset; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + nds32_dma_sync_single_for_device(dev, handle, size, dir); + return dma_addr; +} + +static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + nds32_dma_sync_single_for_cpu(dev, handle, size, dir); } static void -nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) +nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) { - consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE); + int i; + + for (i = 0; i < nents; i++, sg++) { + nds32_dma_sync_single_for_device(dev, sg_dma_address(sg), + sg->length, dir); + } } static void @@ -442,23 +436,28 @@ nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, int i; for (i = 0; i < nents; i++, sg++) { - char *virt = - page_address((struct page *)sg->page_link) + sg->offset; - consistent_sync(virt, sg->length, dir, FOR_CPU); + nds32_dma_sync_single_for_cpu(dev, sg_dma_address(sg), + sg->length, dir); } } -static void -nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) +static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) { int i; for (i = 0; i < nents; i++, sg++) { - char *virt = - page_address((struct page *)sg->page_link) + sg->offset; - consistent_sync(virt, sg->length, dir, FOR_DEVICE); + nds32_dma_sync_single_for_device(dev, sg_dma_address(sg), + sg->length, dir); } + return nents; +} + +static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nhwentries, enum dma_data_direction dir, + unsigned long attrs) +{ } struct dma_map_ops nds32_dma_ops = { -- cgit v1.2.3 From f860122c512215df598fc5b5cfe36e35335f6c6f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 19 May 2018 09:22:05 +0200 Subject: nds32: implement the unmap_sg DMA operation This matches the implementation of the more commonly used unmap_single routines and the sync_sg_for_cpu method which should provide equivalent cache maintainance. Signed-off-by: Christoph Hellwig Acked-by: Greentime Hu Tested-by: Greentime Hu --- arch/nds32/kernel/dma.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/nds32') diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c index e0c94a2889c5..b9973317c734 100644 --- a/arch/nds32/kernel/dma.c +++ b/arch/nds32/kernel/dma.c @@ -458,6 +458,12 @@ static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction dir, unsigned long attrs) { + int i; + + for (i = 0; i < nhwentries; i++, sg++) { + nds32_dma_sync_single_for_cpu(dev, sg_dma_address(sg), + sg->length, dir); + } } struct dma_map_ops nds32_dma_ops = { -- cgit v1.2.3 From 267d2e18a281c965229a3c3ccce51c471f9c4f8a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 28 May 2018 09:55:35 +0200 Subject: nds32: use generic dma_noncoherent_ops Switch to the generic noncoherent direct mapping implementation. Signed-off-by: Christoph Hellwig Acked-by: Greentime Hu Tested-by: Greentime Hu --- arch/nds32/Kconfig | 3 + arch/nds32/include/asm/Kbuild | 1 + arch/nds32/include/asm/dma-mapping.h | 14 ----- arch/nds32/kernel/dma.c | 113 ++++------------------------------- 4 files changed, 15 insertions(+), 116 deletions(-) delete mode 100644 arch/nds32/include/asm/dma-mapping.h (limited to 'arch/nds32') diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 249f38d3388f..67d0ac0a989c 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -5,10 +5,13 @@ config NDS32 def_bool y + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_WANT_FRAME_POINTERS if FTRACE select CLKSRC_MMIO select CLONE_BACKWARDS select COMMON_CLK + select DMA_NONCOHERENT_OPS select GENERIC_ATOMIC64 select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild index 06bdf8167f5a..b3e951f805f8 100644 --- a/arch/nds32/include/asm/Kbuild +++ b/arch/nds32/include/asm/Kbuild @@ -13,6 +13,7 @@ generic-y += cputime.h generic-y += device.h generic-y += div64.h generic-y += dma.h +generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h diff --git a/arch/nds32/include/asm/dma-mapping.h b/arch/nds32/include/asm/dma-mapping.h deleted file mode 100644 index 2dd47d245c25..000000000000 --- a/arch/nds32/include/asm/dma-mapping.h +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2005-2017 Andes Technology Corporation - -#ifndef ASMNDS32_DMA_MAPPING_H -#define ASMNDS32_DMA_MAPPING_H - -extern struct dma_map_ops nds32_dma_ops; - -static inline struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - return &nds32_dma_ops; -} - -#endif diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c index b9973317c734..d0dbd4fe9645 100644 --- a/arch/nds32/kernel/dma.c +++ b/arch/nds32/kernel/dma.c @@ -3,17 +3,14 @@ #include #include -#include #include -#include -#include +#include #include #include #include #include #include #include -#include #include /* @@ -119,10 +116,8 @@ out: return c; } -/* FIXME: attrs is not used. */ -static void *nds32_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * handle, gfp_t gfp, - unsigned long attrs) +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp, unsigned long attrs) { struct page *page; struct arch_vm_region *c; @@ -227,8 +222,8 @@ no_page: return NULL; } -static void nds32_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, unsigned long attrs) +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, unsigned long attrs) { struct arch_vm_region *c; unsigned long flags, addr; @@ -365,118 +360,32 @@ static inline void cache_op(phys_addr_t paddr, size_t size, } while (left); } -static void -nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_FROM_DEVICE: break; case DMA_TO_DEVICE: case DMA_BIDIRECTIONAL: - cache_op(handle, size, cpu_dma_wb_range); + cache_op(paddr, size, cpu_dma_wb_range); break; default: BUG(); } } -static void -nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_TO_DEVICE: break; case DMA_FROM_DEVICE: case DMA_BIDIRECTIONAL: - cache_op(handle, size, cpu_dma_inval_range); + cache_op(paddr, size, cpu_dma_inval_range); break; default: BUG(); } } - -static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t dma_addr = page_to_phys(page) + offset; - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - nds32_dma_sync_single_for_device(dev, handle, size, dir); - return dma_addr; -} - -static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - nds32_dma_sync_single_for_cpu(dev, handle, size, dir); -} - -static void -nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - int i; - - for (i = 0; i < nents; i++, sg++) { - nds32_dma_sync_single_for_device(dev, sg_dma_address(sg), - sg->length, dir); - } -} - -static void -nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir) -{ - int i; - - for (i = 0; i < nents; i++, sg++) { - nds32_dma_sync_single_for_cpu(dev, sg_dma_address(sg), - sg->length, dir); - } -} - -static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - int i; - - for (i = 0; i < nents; i++, sg++) { - nds32_dma_sync_single_for_device(dev, sg_dma_address(sg), - sg->length, dir); - } - return nents; -} - -static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nhwentries, enum dma_data_direction dir, - unsigned long attrs) -{ - int i; - - for (i = 0; i < nhwentries; i++, sg++) { - nds32_dma_sync_single_for_cpu(dev, sg_dma_address(sg), - sg->length, dir); - } -} - -struct dma_map_ops nds32_dma_ops = { - .alloc = nds32_dma_alloc_coherent, - .free = nds32_dma_free, - .map_page = nds32_dma_map_page, - .unmap_page = nds32_dma_unmap_page, - .map_sg = nds32_dma_map_sg, - .unmap_sg = nds32_dma_unmap_sg, - .sync_single_for_device = nds32_dma_sync_single_for_device, - .sync_single_for_cpu = nds32_dma_sync_single_for_cpu, - .sync_sg_for_cpu = nds32_dma_sync_sg_for_cpu, - .sync_sg_for_device = nds32_dma_sync_sg_for_device, -}; - -EXPORT_SYMBOL(nds32_dma_ops); -- cgit v1.2.3