From a98d9ae937d256ed679a935fc82d9deaa710d98e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 30 Apr 2019 06:51:50 -0400 Subject: arm64/iommu: handle non-remapped addresses in ->mmap and ->get_sgtable DMA allocations that can't sleep may return non-remapped addresses, but we do not properly handle them in the mmap and get_sgtable methods. Resolve non-vmalloc addresses using virt_to_page to handle this corner case. Cc: Acked-by: Catalin Marinas Reviewed-by: Robin Murphy Signed-off-by: Christoph Hellwig Signed-off-by: Will Deacon --- arch/arm64/mm/dma-mapping.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 78c0a72f822c..674860e3e478 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -249,6 +249,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; + if (!is_vmalloc_addr(cpu_addr)) { + unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); + return __swiotlb_mmap_pfn(vma, pfn, size); + } + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { /* * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, @@ -272,6 +277,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct vm_struct *area = find_vm_area(cpu_addr); + if (!is_vmalloc_addr(cpu_addr)) { + struct page *page = virt_to_page(cpu_addr); + return __swiotlb_get_sgtable_page(sgt, page, size); + } + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { /* * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, -- cgit v1.2.3 From 0e4add4ae79e4466ae2a7e7772dfbe39fb28787a Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Tue, 14 May 2019 16:34:19 +0800 Subject: arm64: assembler: Update comment above cond_yield_neon() macro Since commit 7faa313f05ca ("arm64: preempt: Fix big-endian when checking preempt count in assembly") both the preempt count and the 'need_resched' flag are checked as part of a single 64-bit load in cond_yield_neon(), so update the stale comment to reflect reality. Cc: Ard Biesheuvel Cc: Dave Martin Cc: Will Deacon Cc: Peter Zijlstra Signed-off-by: Hillf Danton Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 039fbd822ec6..92b6b7cf67dd 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -718,12 +718,11 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * the output section, any use of such directives is undefined. * * The yield itself consists of the following: - * - Check whether the preempt count is exactly 1, in which case disabling - * preemption once will make the task preemptible. If this is not the case, - * yielding is pointless. - * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable - * kernel mode NEON (which will trigger a reschedule), and branch to the - * yield fixup code. + * - Check whether the preempt count is exactly 1 and a reschedule is also + * needed. If so, calling of preempt_enable() in kernel_neon_end() will + * trigger a reschedule. If it is not the case, yielding is pointless. + * - Disable and re-enable kernel mode NEON, and branch to the yield fixup + * code. * * This macro sequence may clobber all CPU state that is not guaranteed by the * AAPCS to be preserved across an ordinary function call. -- cgit v1.2.3 From 84c187afa210fcd5e34a7506c6e0dc2d69bc6641 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Tue, 7 May 2019 13:52:28 -0700 Subject: arm64: don't trash config with compat symbol if COMPAT is disabled ARCH_WANT_COMPAT_IPC_PARSE_VERSION is selected unconditionally. It makes little sense if kernel is compiled without COMPAT support. Fix it. This patch makes no functional changes since all existing code which is guarded with ARCH_WANT_COMPAT_IPC_PARSE_VERSION is also guarded with COMPAT. Signed-off-by: Yury Norov Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1c0cb5131c2a..a4c34934bc52 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -65,7 +65,7 @@ config ARM64 select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG select ARCH_SUPPORTS_NUMA_BALANCING - select ARCH_WANT_COMPAT_IPC_PARSE_VERSION + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_FRAME_POINTERS select ARCH_HAS_UBSAN_SANITIZE_ALL select ARM_AMBA -- cgit v1.2.3 From 48caebf7e1313eb9f0a06fe59a07ac05b38a5806 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 May 2019 12:25:28 +0100 Subject: arm64: Print physical address of page table base in show_pte() When dumping the page table in response to an unexpected kernel page fault, we print the virtual (hashed) address of the page table base, but display physical addresses for everything else. Make the page table dumping code in show_pte() consistent, by printing the page table base pointer as a physical address. Reported-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 0cb0e09995e1..dda234bcc020 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -171,9 +171,10 @@ static void show_pte(unsigned long addr) return; } - pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n", + pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp=%016lx\n", mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, - mm == &init_mm ? VA_BITS : (int) vabits_user, mm->pgd); + mm == &init_mm ? VA_BITS : (int)vabits_user, + (unsigned long)virt_to_phys(mm->pgd)); pgdp = pgd_offset(mm, addr); pgd = READ_ONCE(*pgdp); pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); -- cgit v1.2.3 From 7ba36eccb3f83983a651efd570b4f933ecad1b5c Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 14 May 2019 14:30:06 +0530 Subject: arm64/mm: Inhibit huge-vmap with ptdump The arm64 ptdump code can race with concurrent modification of the kernel page tables. At the time this was added, this was sound as: * Modifications to leaf entries could result in stale information being logged, but would not result in a functional problem. * Boot time modifications to non-leaf entries (e.g. freeing of initmem) were performed when the ptdump code cannot be invoked. * At runtime, modifications to non-leaf entries only occurred in the vmalloc region, and these were strictly additive, as intermediate entries were never freed. However, since commit: commit 324420bf91f6 ("arm64: add support for ioremap() block mappings") ... it has been possible to create huge mappings in the vmalloc area at runtime, and as part of this existing intermediate levels of table my be removed and freed. It's possible for the ptdump code to race with this, and continue to walk tables which have been freed (and potentially poisoned or reallocated). As a result of this, the ptdump code may dereference bogus addresses, which could be fatal. Since huge-vmap is a TLB and memory optimization, we can disable it when the runtime ptdump code is in use to avoid this problem. Cc: Catalin Marinas Fixes: 324420bf91f60582 ("arm64: add support for ioremap() block mappings") Acked-by: Ard Biesheuvel Signed-off-by: Mark Rutland Signed-off-by: Anshuman Khandual Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index ef82312860ac..37a902cad918 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -955,13 +955,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) int __init arch_ioremap_pud_supported(void) { - /* only 4k granule supports level 1 block mappings */ - return IS_ENABLED(CONFIG_ARM64_4K_PAGES); + /* + * Only 4k granule supports level 1 block mappings. + * SW table walks can't handle removal of intermediate entries. + */ + return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && + !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); } int __init arch_ioremap_pmd_supported(void) { - return 1; + /* See arch_ioremap_pud_supported() */ + return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); } int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) -- cgit v1.2.3 From 7a0a93c51799edc45ee57c6cc1679aa94f1e03d5 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 15 May 2019 12:48:24 -0700 Subject: arm64: vdso: Explicitly add build-id option Commit 691efbedc60d ("arm64: vdso: use $(LD) instead of $(CC) to link VDSO") switched to using LD explicitly. The --build-id option needs to be passed explicitly, similar to x86. Add this option. Fixes: 691efbedc60d ("arm64: vdso: use $(LD) instead of $(CC) to link VDSO") Reviewed-by: Masahiro Yamada Signed-off-by: Laura Abbott [will: drop redundant use of 'call ld-option' as requested by Masahiro] Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 744b9dbaba03..fa230ff09aa1 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -12,8 +12,8 @@ obj-vdso := gettimeofday.o note.o sigreturn.o targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) -ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 \ - $(call ld-option, --hash-style=sysv) -n -T +ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ + --build-id -n -T # Disable gcov profiling for VDSO code GCOV_PROFILE := n -- cgit v1.2.3