summaryrefslogtreecommitdiffstats
path: root/arch/metag/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/metag/mm')
-rw-r--r--arch/metag/mm/Kconfig147
-rw-r--r--arch/metag/mm/Makefile20
-rw-r--r--arch/metag/mm/cache.c521
-rw-r--r--arch/metag/mm/extable.c15
-rw-r--r--arch/metag/mm/fault.c247
-rw-r--r--arch/metag/mm/highmem.c122
-rw-r--r--arch/metag/mm/hugetlbpage.c251
-rw-r--r--arch/metag/mm/init.c408
-rw-r--r--arch/metag/mm/ioremap.c90
-rw-r--r--arch/metag/mm/l2cache.c193
-rw-r--r--arch/metag/mm/maccess.c69
-rw-r--r--arch/metag/mm/mmu-meta1.c157
-rw-r--r--arch/metag/mm/mmu-meta2.c208
-rw-r--r--arch/metag/mm/numa.c82
14 files changed, 0 insertions, 2530 deletions
diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig
deleted file mode 100644
index 9d4b2c67dcc1..000000000000
--- a/arch/metag/mm/Kconfig
+++ /dev/null
@@ -1,147 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-menu "Memory management options"
-
-config PAGE_OFFSET
- hex "Kernel page offset address"
- default "0x40000000"
- help
- This option allows you to set the virtual address at which the
- kernel will be mapped to.
-endmenu
-
-config KERNEL_4M_PAGES
- bool "Map kernel with 4MB pages"
- depends on METAG_META21_MMU
- default y
- help
- Map the kernel with large pages to reduce TLB pressure.
-
-choice
- prompt "User page size"
- default PAGE_SIZE_4K
-
-config PAGE_SIZE_4K
- bool "4kB"
- help
- This is the default page size used by all Meta cores.
-
-config PAGE_SIZE_8K
- bool "8kB"
- depends on METAG_META21_MMU
- help
- This enables 8kB pages as supported by Meta 2.x and later MMUs.
-
-config PAGE_SIZE_16K
- bool "16kB"
- depends on METAG_META21_MMU
- help
- This enables 16kB pages as supported by Meta 2.x and later MMUs.
-
-endchoice
-
-config NUMA
- bool "Non Uniform Memory Access (NUMA) Support"
- select ARCH_WANT_NUMA_VARIABLE_LOCALITY
- help
- Some Meta systems have MMU-mappable on-chip memories with
- lower latencies than main memory. This enables support for
- these blocks by binding them to nodes and allowing
- memory policies to be used for prioritizing and controlling
- allocation behaviour.
-
-config FORCE_MAX_ZONEORDER
- int "Maximum zone order"
- range 10 32
- default "10"
- help
- The kernel memory allocator divides physically contiguous memory
- blocks into "zones", where each zone is a power of two number of
- pages. This option selects the largest power of two that the kernel
- keeps in the memory allocator. If you need to allocate very large
- blocks of physically contiguous memory, then you may need to
- increase this value.
-
- This config option is actually maximum order plus one. For example,
- a value of 11 means that the largest free memory block is 2^10 pages.
-
- The page size is not necessarily 4KB. Keep this in mind
- when choosing a value for this option.
-
-config METAG_L2C
- bool "Level 2 Cache Support"
- depends on METAG_META21
- help
- Press y here to enable support for the Meta Level 2 (L2) cache. This
- will enable the cache at start up if it hasn't already been enabled
- by the bootloader.
-
- If the bootloader enables the L2 you must press y here to ensure the
- kernel takes the appropriate actions to keep the cache coherent.
-
-config NODES_SHIFT
- int
- default "1"
- depends on NEED_MULTIPLE_NODES
-
-config ARCH_FLATMEM_ENABLE
- def_bool y
- depends on !NUMA
-
-config ARCH_SPARSEMEM_ENABLE
- def_bool y
- select SPARSEMEM_STATIC
-
-config ARCH_SPARSEMEM_DEFAULT
- def_bool y
-
-config ARCH_SELECT_MEMORY_MODEL
- def_bool y
-
-config SYS_SUPPORTS_HUGETLBFS
- def_bool y
- depends on METAG_META21_MMU
-
-choice
- prompt "HugeTLB page size"
- depends on METAG_META21_MMU && HUGETLB_PAGE
- default HUGETLB_PAGE_SIZE_1M
-
-config HUGETLB_PAGE_SIZE_8K
- bool "8kB"
- depends on PAGE_SIZE_4K
-
-config HUGETLB_PAGE_SIZE_16K
- bool "16kB"
- depends on PAGE_SIZE_4K || PAGE_SIZE_8K
-
-config HUGETLB_PAGE_SIZE_32K
- bool "32kB"
-
-config HUGETLB_PAGE_SIZE_64K
- bool "64kB"
-
-config HUGETLB_PAGE_SIZE_128K
- bool "128kB"
-
-config HUGETLB_PAGE_SIZE_256K
- bool "256kB"
-
-config HUGETLB_PAGE_SIZE_512K
- bool "512kB"
-
-config HUGETLB_PAGE_SIZE_1M
- bool "1MB"
-
-config HUGETLB_PAGE_SIZE_2M
- bool "2MB"
-
-config HUGETLB_PAGE_SIZE_4M
- bool "4MB"
-
-endchoice
-
-config METAG_COREMEM
- bool
- default y if SUSPEND
-
-source "mm/Kconfig"
diff --git a/arch/metag/mm/Makefile b/arch/metag/mm/Makefile
deleted file mode 100644
index 0c7c91ba9fb9..000000000000
--- a/arch/metag/mm/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the linux Meta-specific parts of the memory manager.
-#
-
-obj-y += cache.o
-obj-y += extable.o
-obj-y += fault.o
-obj-y += init.o
-obj-y += ioremap.o
-obj-y += maccess.o
-
-mmu-y := mmu-meta1.o
-mmu-$(CONFIG_METAG_META21_MMU) := mmu-meta2.o
-obj-y += $(mmu-y)
-
-obj-$(CONFIG_HIGHMEM) += highmem.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_METAG_L2C) += l2cache.o
-obj-$(CONFIG_NUMA) += numa.o
diff --git a/arch/metag/mm/cache.c b/arch/metag/mm/cache.c
deleted file mode 100644
index a62285284ab8..000000000000
--- a/arch/metag/mm/cache.c
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * arch/metag/mm/cache.c
- *
- * Copyright (C) 2001, 2002, 2005, 2007, 2012 Imagination Technologies.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- *
- * Cache control code
- */
-
-#include <linux/export.h>
-#include <linux/io.h>
-#include <asm/cacheflush.h>
-#include <asm/core_reg.h>
-#include <asm/global_lock.h>
-#include <asm/metag_isa.h>
-#include <asm/metag_mem.h>
-#include <asm/metag_regs.h>
-
-#define DEFAULT_CACHE_WAYS_LOG2 2
-
-/*
- * Size of a set in the caches. Initialised for default 16K stride, adjusted
- * according to values passed through TBI global heap segment via LDLK (on ATP)
- * or config registers (on HTP/MTP)
- */
-static int dcache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
- - DEFAULT_CACHE_WAYS_LOG2;
-static int icache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
- - DEFAULT_CACHE_WAYS_LOG2;
-/*
- * The number of sets in the caches. Initialised for HTP/ATP, adjusted
- * according to NOMMU setting in config registers
- */
-static unsigned char dcache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
-static unsigned char icache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
-
-#ifndef CONFIG_METAG_META12
-/**
- * metag_lnkget_probe() - Probe whether lnkget/lnkset go around the cache
- */
-static volatile u32 lnkget_testdata[16] __initdata __aligned(64);
-
-#define LNKGET_CONSTANT 0xdeadbeef
-
-static void __init metag_lnkget_probe(void)
-{
- int temp;
- long flags;
-
- /*
- * It's conceivable the user has configured a globally coherent cache
- * shared with non-Linux hardware threads, so use LOCK2 to prevent them
- * from executing and causing cache eviction during the test.
- */
- __global_lock2(flags);
-
- /* read a value to bring it into the cache */
- (void)lnkget_testdata[0];
- lnkget_testdata[0] = 0;
-
- /* lnkget/lnkset it to modify it */
- asm volatile(
- "1: LNKGETD %0, [%1]\n"
- " LNKSETD [%1], %2\n"
- " DEFR %0, TXSTAT\n"
- " ANDT %0, %0, #HI(0x3f000000)\n"
- " CMPT %0, #HI(0x02000000)\n"
- " BNZ 1b\n"
- : "=&d" (temp)
- : "da" (&lnkget_testdata[0]), "bd" (LNKGET_CONSTANT)
- : "cc");
-
- /* re-read it to see if the cached value changed */
- temp = lnkget_testdata[0];
-
- __global_unlock2(flags);
-
- /* flush the cache line to fix any incoherency */
- __builtin_dcache_flush((void *)&lnkget_testdata[0]);
-
-#if defined(CONFIG_METAG_LNKGET_AROUND_CACHE)
- /* if the cache is right, LNKGET_AROUND_CACHE is unnecessary */
- if (temp == LNKGET_CONSTANT)
- pr_info("LNKGET/SET go through cache but CONFIG_METAG_LNKGET_AROUND_CACHE=y\n");
-#elif defined(CONFIG_METAG_ATOMICITY_LNKGET)
- /*
- * if the cache is wrong, LNKGET_AROUND_CACHE is really necessary
- * because the kernel is configured to use LNKGET/SET for atomicity
- */
- WARN(temp != LNKGET_CONSTANT,
- "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n"
- "Expect kernel failure as it's used for atomicity primitives\n");
-#elif defined(CONFIG_SMP)
- /*
- * if the cache is wrong, LNKGET_AROUND_CACHE should be used or the
- * gateway page won't flush and userland could break.
- */
- WARN(temp != LNKGET_CONSTANT,
- "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n"
- "Expect userland failure as it's used for user gateway page\n");
-#else
- /*
- * if the cache is wrong, LNKGET_AROUND_CACHE is set wrong, but it
- * doesn't actually matter as it doesn't have any effect on !SMP &&
- * !ATOMICITY_LNKGET.
- */
- if (temp != LNKGET_CONSTANT)
- pr_warn("LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n");
-#endif
-}
-#endif /* !CONFIG_METAG_META12 */
-
-/**
- * metag_cache_probe() - Probe L1 cache configuration.
- *
- * Probe the L1 cache configuration to aid the L1 physical cache flushing
- * functions.
- */
-void __init metag_cache_probe(void)
-{
-#ifndef CONFIG_METAG_META12
- int coreid = metag_in32(METAC_CORE_ID);
- int config = metag_in32(METAC_CORE_CONFIG2);
- int cfgcache = coreid & METAC_COREID_CFGCACHE_BITS;
-
- if (cfgcache == METAC_COREID_CFGCACHE_TYPE0 ||
- cfgcache == METAC_COREID_CFGCACHE_PRIVNOMMU) {
- icache_sets_log2 = 1;
- dcache_sets_log2 = 1;
- }
-
- /* For normal size caches, the smallest size is 4Kb.
- For small caches, the smallest size is 64b */
- icache_set_shift = (config & METAC_CORECFG2_ICSMALL_BIT)
- ? 6 : 12;
- icache_set_shift += (config & METAC_CORE_C2ICSZ_BITS)
- >> METAC_CORE_C2ICSZ_S;
- icache_set_shift -= icache_sets_log2;
-
- dcache_set_shift = (config & METAC_CORECFG2_DCSMALL_BIT)
- ? 6 : 12;
- dcache_set_shift += (config & METAC_CORECFG2_DCSZ_BITS)
- >> METAC_CORECFG2_DCSZ_S;
- dcache_set_shift -= dcache_sets_log2;
-
- metag_lnkget_probe();
-#else
- /* Extract cache sizes from global heap segment */
- unsigned long val, u;
- int width, shift, addend;
- PTBISEG seg;
-
- seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL,
- TBID_SEGSCOPE_GLOBAL,
- TBID_SEGTYPE_HEAP));
- if (seg != NULL) {
- val = seg->Data[1];
-
- /* Work out width of I-cache size bit-field */
- u = ((unsigned long) METAG_TBI_ICACHE_SIZE_BITS)
- >> METAG_TBI_ICACHE_SIZE_S;
- width = 0;
- while (u & 1) {
- width++;
- u >>= 1;
- }
- /* Extract sign-extended size addend value */
- shift = 32 - (METAG_TBI_ICACHE_SIZE_S + width);
- addend = (long) ((val & METAG_TBI_ICACHE_SIZE_BITS)
- << shift)
- >> (shift + METAG_TBI_ICACHE_SIZE_S);
- /* Now calculate I-cache set size */
- icache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
- - DEFAULT_CACHE_WAYS_LOG2)
- + addend;
-
- /* Similarly for D-cache */
- u = ((unsigned long) METAG_TBI_DCACHE_SIZE_BITS)
- >> METAG_TBI_DCACHE_SIZE_S;
- width = 0;
- while (u & 1) {
- width++;
- u >>= 1;
- }
- shift = 32 - (METAG_TBI_DCACHE_SIZE_S + width);
- addend = (long) ((val & METAG_TBI_DCACHE_SIZE_BITS)
- << shift)
- >> (shift + METAG_TBI_DCACHE_SIZE_S);
- dcache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
- - DEFAULT_CACHE_WAYS_LOG2)
- + addend;
- }
-#endif
-}
-
-static void metag_phys_data_cache_flush(const void *start)
-{
- unsigned long flush0, flush1, flush2, flush3;
- int loops, step;
- int thread;
- int part, offset;
- int set_shift;
-
- /* Use a sequence of writes to flush the cache region requested */
- thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
- >> TXENABLE_THREAD_S;
-
- /* Cache is broken into sets which lie in contiguous RAMs */
- set_shift = dcache_set_shift;
-
- /* Move to the base of the physical cache flush region */
- flush0 = LINSYSCFLUSH_DCACHE_LINE;
- step = 64;
-
- /* Get partition data for this thread */
- part = metag_in32(SYSC_DCPART0 +
- (SYSC_xCPARTn_STRIDE * thread));
-
- if ((int)start < 0)
- /* Access Global vs Local partition */
- part >>= SYSC_xCPARTG_AND_S
- - SYSC_xCPARTL_AND_S;
-
- /* Extract offset and move SetOff */
- offset = (part & SYSC_xCPARTL_OR_BITS)
- >> SYSC_xCPARTL_OR_S;
- flush0 += (offset << (set_shift - 4));
-
- /* Shrink size */
- part = (part & SYSC_xCPARTL_AND_BITS)
- >> SYSC_xCPARTL_AND_S;
- loops = ((part + 1) << (set_shift - 4));
-
- /* Reduce loops by step of cache line size */
- loops /= step;
-
- flush1 = flush0 + (1 << set_shift);
- flush2 = flush0 + (2 << set_shift);
- flush3 = flush0 + (3 << set_shift);
-
- if (dcache_sets_log2 == 1) {
- flush2 = flush1;
- flush3 = flush1 + step;
- flush1 = flush0 + step;
- step <<= 1;
- loops >>= 1;
- }
-
- /* Clear loops ways in cache */
- while (loops-- != 0) {
- /* Clear the ways. */
-#if 0
- /*
- * GCC doesn't generate very good code for this so we
- * provide inline assembly instead.
- */
- metag_out8(0, flush0);
- metag_out8(0, flush1);
- metag_out8(0, flush2);
- metag_out8(0, flush3);
-
- flush0 += step;
- flush1 += step;
- flush2 += step;
- flush3 += step;
-#else
- asm volatile (
- "SETB\t[%0+%4++],%5\n"
- "SETB\t[%1+%4++],%5\n"
- "SETB\t[%2+%4++],%5\n"
- "SETB\t[%3+%4++],%5\n"
- : "+e" (flush0),
- "+e" (flush1),
- "+e" (flush2),
- "+e" (flush3)
- : "e" (step), "a" (0));
-#endif
- }
-}
-
-void metag_data_cache_flush_all(const void *start)
-{
- if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
- /* No need to flush the data cache it's not actually enabled */
- return;
-
- metag_phys_data_cache_flush(start);
-}
-
-void metag_data_cache_flush(const void *start, int bytes)
-{
- unsigned long flush0;
- int loops, step;
-
- if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
- /* No need to flush the data cache it's not actually enabled */
- return;
-
- if (bytes >= 4096) {
- metag_phys_data_cache_flush(start);
- return;
- }
-
- /* Use linear cache flush mechanism on META IP */
- flush0 = (int)start;
- loops = ((int)start & (DCACHE_LINE_BYTES - 1)) + bytes +
- (DCACHE_LINE_BYTES - 1);
- loops >>= DCACHE_LINE_S;
-
-#define PRIM_FLUSH(addr, offset) do { \
- int __addr = ((int) (addr)) + ((offset) * 64); \
- __builtin_dcache_flush((void *)(__addr)); \
- } while (0)
-
-#define LOOP_INC (4*64)
-
- do {
- /* By default stop */
- step = 0;
-
- switch (loops) {
- /* Drop Thru Cases! */
- default:
- PRIM_FLUSH(flush0, 3);
- loops -= 4;
- step = 1;
- case 3:
- PRIM_FLUSH(flush0, 2);
- case 2:
- PRIM_FLUSH(flush0, 1);
- case 1:
- PRIM_FLUSH(flush0, 0);
- flush0 += LOOP_INC;
- case 0:
- break;
- }
- } while (step);
-}
-EXPORT_SYMBOL(metag_data_cache_flush);
-
-static void metag_phys_code_cache_flush(const void *start, int bytes)
-{
- unsigned long flush0, flush1, flush2, flush3, end_set;
- int loops, step;
- int thread;
- int set_shift, set_size;
- int part, offset;
-
- /* Use a sequence of writes to flush the cache region requested */
- thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
- >> TXENABLE_THREAD_S;
- set_shift = icache_set_shift;
-
- /* Move to the base of the physical cache flush region */
- flush0 = LINSYSCFLUSH_ICACHE_LINE;
- step = 64;
-
- /* Get partition code for this thread */
- part = metag_in32(SYSC_ICPART0 +
- (SYSC_xCPARTn_STRIDE * thread));
-
- if ((int)start < 0)
- /* Access Global vs Local partition */
- part >>= SYSC_xCPARTG_AND_S-SYSC_xCPARTL_AND_S;
-
- /* Extract offset and move SetOff */
- offset = (part & SYSC_xCPARTL_OR_BITS)
- >> SYSC_xCPARTL_OR_S;
- flush0 += (offset << (set_shift - 4));
-
- /* Shrink size */
- part = (part & SYSC_xCPARTL_AND_BITS)
- >> SYSC_xCPARTL_AND_S;
- loops = ((part + 1) << (set_shift - 4));
-
- /* Where does the Set end? */
- end_set = flush0 + loops;
- set_size = loops;
-
-#ifdef CONFIG_METAG_META12
- if ((bytes < 4096) && (bytes < loops)) {
- /* Unreachable on HTP/MTP */
- /* Only target the sets that could be relavent */
- flush0 += (loops - step) & ((int) start);
- loops = (((int) start) & (step-1)) + bytes + step - 1;
- }
-#endif
-
- /* Reduce loops by step of cache line size */
- loops /= step;
-
- flush1 = flush0 + (1<<set_shift);
- flush2 = flush0 + (2<<set_shift);
- flush3 = flush0 + (3<<set_shift);
-
- if (icache_sets_log2 == 1) {
- flush2 = flush1;
- flush3 = flush1 + step;
- flush1 = flush0 + step;
-#if 0
- /* flush0 will stop one line early in this case
- * (flush1 will do the final line).
- * However we don't correct end_set here at the moment
- * because it will never wrap on HTP/MTP
- */
- end_set -= step;
-#endif
- step <<= 1;
- loops >>= 1;
- }
-
- /* Clear loops ways in cache */
- while (loops-- != 0) {
-#if 0
- /*
- * GCC doesn't generate very good code for this so we
- * provide inline assembly instead.
- */
- /* Clear the ways */
- metag_out8(0, flush0);
- metag_out8(0, flush1);
- metag_out8(0, flush2);
- metag_out8(0, flush3);
-
- flush0 += step;
- flush1 += step;
- flush2 += step;
- flush3 += step;
-#else
- asm volatile (
- "SETB\t[%0+%4++],%5\n"
- "SETB\t[%1+%4++],%5\n"
- "SETB\t[%2+%4++],%5\n"
- "SETB\t[%3+%4++],%5\n"
- : "+e" (flush0),
- "+e" (flush1),
- "+e" (flush2),
- "+e" (flush3)
- : "e" (step), "a" (0));
-#endif
-
- if (flush0 == end_set) {
- /* Wrap within Set 0 */
- flush0 -= set_size;
- flush1 -= set_size;
- flush2 -= set_size;
- flush3 -= set_size;
- }
- }
-}
-
-void metag_code_cache_flush_all(const void *start)
-{
- if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
- /* No need to flush the code cache it's not actually enabled */
- return;
-
- metag_phys_code_cache_flush(start, 4096);
-}
-EXPORT_SYMBOL(metag_code_cache_flush_all);
-
-void metag_code_cache_flush(const void *start, int bytes)
-{
-#ifndef CONFIG_METAG_META12
- void *flush;
- int loops, step;
-#endif /* !CONFIG_METAG_META12 */
-
- if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
- /* No need to flush the code cache it's not actually enabled */
- return;
-
-#ifdef CONFIG_METAG_META12
- /* CACHEWD isn't available on Meta1, so always do full cache flush */
- metag_phys_code_cache_flush(start, bytes);
-
-#else /* CONFIG_METAG_META12 */
- /* If large size do full physical cache flush */
- if (bytes >= 4096) {
- metag_phys_code_cache_flush(start, bytes);
- return;
- }
-
- /* Use linear cache flush mechanism on META IP */
- flush = (void *)((int)start & ~(ICACHE_LINE_BYTES-1));
- loops = ((int)start & (ICACHE_LINE_BYTES-1)) + bytes +
- (ICACHE_LINE_BYTES-1);
- loops >>= ICACHE_LINE_S;
-
-#define PRIM_IFLUSH(addr, offset) \
- __builtin_meta2_cachewd(((addr) + ((offset) * 64)), CACHEW_ICACHE_BIT)
-
-#define LOOP_INC (4*64)
-
- do {
- /* By default stop */
- step = 0;
-
- switch (loops) {
- /* Drop Thru Cases! */
- default:
- PRIM_IFLUSH(flush, 3);
- loops -= 4;
- step = 1;
- case 3:
- PRIM_IFLUSH(flush, 2);
- case 2:
- PRIM_IFLUSH(flush, 1);
- case 1:
- PRIM_IFLUSH(flush, 0);
- flush += LOOP_INC;
- case 0:
- break;
- }
- } while (step);
-#endif /* !CONFIG_METAG_META12 */
-}
-EXPORT_SYMBOL(metag_code_cache_flush);
diff --git a/arch/metag/mm/extable.c b/arch/metag/mm/extable.c
deleted file mode 100644
index 9b92d3ad7f9c..000000000000
--- a/arch/metag/mm/extable.c
+++ /dev/null
@@ -1,15 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/extable.h>
-#include <linux/uaccess.h>
-
-int fixup_exception(struct pt_regs *regs)
-{
- const struct exception_table_entry *fixup;
- unsigned long pc = instruction_pointer(regs);
-
- fixup = search_exception_tables(pc);
- if (fixup)
- regs->ctx.CurrPC = fixup->fixup;
-
- return fixup != NULL;
-}
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
deleted file mode 100644
index de54fe686080..000000000000
--- a/arch/metag/mm/fault.c
+++ /dev/null
@@ -1,247 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Meta page fault handling.
- *
- * Copyright (C) 2005-2012 Imagination Technologies Ltd.
- */
-
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/ptrace.h>
-#include <linux/sched/debug.h>
-#include <linux/interrupt.h>
-#include <linux/uaccess.h>
-
-#include <asm/tlbflush.h>
-#include <asm/mmu.h>
-#include <asm/traps.h>
-
-/* Clear any pending catch buffer state. */
-static void clear_cbuf_entry(struct pt_regs *regs, unsigned long addr,
- unsigned int trapno)
-{
- PTBICTXEXTCB0 cbuf = regs->extcb0;
-
- switch (trapno) {
- /* Instruction fetch faults leave no catch buffer state. */
- case TBIXXF_SIGNUM_IGF:
- case TBIXXF_SIGNUM_IPF:
- return;
- default:
- if (cbuf[0].CBAddr == addr) {
- cbuf[0].CBAddr = 0;
- cbuf[0].CBFlags &= ~TXCATCH0_FAULT_BITS;
-
- /* And, as this is the ONLY catch entry, we
- * need to clear the cbuf bit from the context!
- */
- regs->ctx.SaveMask &= ~(TBICTX_CBUF_BIT |
- TBICTX_XCBF_BIT);
-
- return;
- }
- pr_err("Failed to clear cbuf entry!\n");
- }
-}
-
-int show_unhandled_signals = 1;
-
-int do_page_fault(struct pt_regs *regs, unsigned long address,
- unsigned int write_access, unsigned int trapno)
-{
- struct task_struct *tsk;
- struct mm_struct *mm;
- struct vm_area_struct *vma, *prev_vma;
- siginfo_t info;
- int fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
-
- tsk = current;
-
- if ((address >= VMALLOC_START) && (address < VMALLOC_END)) {
- /*
- * Synchronize this task's top level page-table
- * with the 'reference' page table.
- *
- * Do _not_ use "tsk" here. We might be inside
- * an interrupt in the middle of a task switch..
- */
- int offset = pgd_index(address);
- pgd_t *pgd, *pgd_k;
- pud_t *pud, *pud_k;
- pmd_t *pmd, *pmd_k;
- pte_t *pte_k;
-
- pgd = ((pgd_t *)mmu_get_base()) + offset;
- pgd_k = swapper_pg_dir + offset;
-
- /* This will never happen with the folded page table. */
- if (!pgd_present(*pgd)) {
- if (!pgd_present(*pgd_k))
- goto bad_area_nosemaphore;
- set_pgd(pgd, *pgd_k);
- return 0;
- }
-
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
- if (!pud_present(*pud_k))
- goto bad_area_nosemaphore;
- set_pud(pud, *pud_k);
-
- pmd = pmd_offset(pud, address);
- pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd_k))
- goto bad_area_nosemaphore;
- set_pmd(pmd, *pmd_k);
-
- pte_k = pte_offset_kernel(pmd_k, address);
- if (!pte_present(*pte_k))
- goto bad_area_nosemaphore;
-
- /* May only be needed on Chorus2 */
- flush_tlb_all();
- return 0;
- }
-
- mm = tsk->mm;
-
- if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
- flags |= FAULT_FLAG_USER;
-retry:
- down_read(&mm->mmap_sem);
-
- vma = find_vma_prev(mm, address, &prev_vma);
-
- if (!vma || address < vma->vm_start)
- goto check_expansion;
-
-good_area:
- if (write_access) {
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- flags |= FAULT_FLAG_WRITE;
- } else {
- if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
- goto bad_area;
- }
-
- /*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
- */
- fault = handle_mm_fault(vma, address, flags);
-
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
- return 0;
-
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGSEGV)
- goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
- }
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
- if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
-
- /*
- * No need to up_read(&mm->mmap_sem) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
-
- goto retry;
- }
- }
-
- up_read(&mm->mmap_sem);
- return 0;
-
-check_expansion:
- vma = prev_vma;
- if (vma && (expand_stack(vma, address) == 0))
- goto good_area;
-
-bad_area:
- up_read(&mm->mmap_sem);
-
-bad_area_nosemaphore:
- if (user_mode(regs)) {
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = SEGV_MAPERR;
- info.si_addr = (__force void __user *)address;
- info.si_trapno = trapno;
-
- if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
- printk_ratelimit()) {
- printk("%s%s[%d]: segfault at %lx pc %08x sp %08x write %d trap %#x (%s)",
- task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
- tsk->comm, task_pid_nr(tsk), address,
- regs->ctx.CurrPC, regs->ctx.AX[0].U0,
- write_access, trapno, trap_name(trapno));
- print_vma_addr(" in ", regs->ctx.CurrPC);
- print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
- printk("\n");
- show_regs(regs);
- }
- force_sig_info(SIGSEGV, &info, tsk);
- return 1;
- }
- goto no_context;
-
-do_sigbus:
- up_read(&mm->mmap_sem);
-
- /*
- * Send a sigbus, regardless of whether we were in kernel
- * or user mode.
- */
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (__force void __user *)address;
- info.si_trapno = trapno;
- force_sig_info(SIGBUS, &info, tsk);
-
- /* Kernel mode? Handle exceptions or die */
- if (!user_mode(regs))
- goto no_context;
-
- return 1;
-
- /*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
- */
-out_of_memory:
- up_read(&mm->mmap_sem);
- if (user_mode(regs)) {
- pagefault_out_of_memory();
- return 1;
- }
-
-no_context:
- /* Are we prepared to handle this kernel fault? */
- if (fixup_exception(regs)) {
- clear_cbuf_entry(regs, address, trapno);
- return 1;
- }
-
- die("Oops", regs, (write_access << 15) | trapno, address);
- do_exit(SIGKILL);
-}
diff --git a/arch/metag/mm/highmem.c b/arch/metag/mm/highmem.c
deleted file mode 100644
index 83527fc7c8a7..000000000000
--- a/arch/metag/mm/highmem.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/export.h>
-#include <linux/highmem.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <asm/fixmap.h>
-#include <asm/tlbflush.h>
-
-static pte_t *kmap_pte;
-
-unsigned long highstart_pfn, highend_pfn;
-
-void *kmap(struct page *page)
-{
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- return kmap_high(page);
-}
-EXPORT_SYMBOL(kmap);
-
-void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-EXPORT_SYMBOL(kunmap);
-
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
- *
- * However when holding an atomic kmap is is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- */
-
-void *kmap_atomic(struct page *page)
-{
- enum fixed_addresses idx;
- unsigned long vaddr;
- int type;
-
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte - idx)));
-#endif
- set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL));
-
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic);
-
-void __kunmap_atomic(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int idx, type;
-
- if (kvaddr >= (void *)FIXADDR_START) {
- type = kmap_atomic_idx();
- idx = type + KM_TYPE_NR * smp_processor_id();
-
- /*
- * Force other mappings to Oops if they'll try to access this
- * pte without first remap it. Keeping stale mappings around
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
-
- kmap_atomic_idx_pop();
- }
-
- pagefault_enable();
- preempt_enable();
-}
-EXPORT_SYMBOL(__kunmap_atomic);
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn)
-{
- enum fixed_addresses idx;
- unsigned long vaddr;
- int type;
-
- preempt_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte - idx)));
-#endif
- set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL));
- flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
-
- return (void *)vaddr;
-}
-
-void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-}
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
deleted file mode 100644
index 012ee4c80dc7..000000000000
--- a/arch/metag/mm/hugetlbpage.c
+++ /dev/null
@@ -1,251 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/metag/mm/hugetlbpage.c
- *
- * METAG HugeTLB page support.
- *
- * Cloned from SuperH
- *
- * Cloned from sparc64 by Paul Mundt.
- *
- * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
- */
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/pagemap.h>
-#include <linux/sysctl.h>
-
-#include <asm/mman.h>
-#include <asm/pgalloc.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
-
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-int prepare_hugepage_range(struct file *file, unsigned long addr,
- unsigned long len)
-{
- struct mm_struct *mm = current->mm;
- struct hstate *h = hstate_file(file);
- struct vm_area_struct *vma;
-
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (addr &am