summaryrefslogtreecommitdiffstats
path: root/mm/nommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/nommu.c')
-rw-r--r--mm/nommu.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index 24f9f5f39145..2d131b97a851 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -17,6 +17,7 @@
#include <linux/export.h>
#include <linux/mm.h>
+#include <linux/sched/mm.h>
#include <linux/vmacache.h>
#include <linux/mman.h>
#include <linux/swap.h>
@@ -517,7 +518,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
}
/*
- * initialise the VMA and region record slabs
+ * initialise the percpu counter for VM and region record slabs
*/
void __init mmap_init(void)
{
@@ -757,7 +758,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
mm->map_count--;
for (i = 0; i < VMACACHE_SIZE; i++) {
/* if the vma is cached, invalidate the entire cache */
- if (curr->vmacache[i] == vma) {
+ if (curr->vmacache.vmas[i] == vma) {
vmacache_invalidate(mm);
break;
}
@@ -1084,7 +1085,7 @@ static int do_mmap_shared_file(struct vm_area_struct *vma)
{
int ret;
- ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
+ ret = call_mmap(vma->vm_file, vma);
if (ret == 0) {
vma->vm_region->vm_top = vma->vm_region->vm_end;
return 0;
@@ -1115,7 +1116,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
* - VM_MAYSHARE will be set if it may attempt to share
*/
if (capabilities & NOMMU_MAP_DIRECT) {
- ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
+ ret = call_mmap(vma->vm_file, vma);
if (ret == 0) {
/* shouldn't return success if we're not sharing */
BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
@@ -1191,7 +1192,7 @@ error_free:
enomem:
pr_err("Allocation of length %lu from process %d (%s) failed\n",
len, current->pid, current->comm);
- show_free_areas(0);
+ show_free_areas(0, NULL);
return -ENOMEM;
}
@@ -1205,7 +1206,8 @@ unsigned long do_mmap(struct file *file,
unsigned long flags,
vm_flags_t vm_flags,
unsigned long pgoff,
- unsigned long *populate)
+ unsigned long *populate,
+ struct list_head *uf)
{
struct vm_area_struct *vma;
struct vm_region *region;
@@ -1412,13 +1414,13 @@ error_getting_vma:
kmem_cache_free(vm_region_jar, region);
pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
len, current->pid);
- show_free_areas(0);
+ show_free_areas(0, NULL);
return -ENOMEM;
error_getting_region:
pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
len, current->pid);
- show_free_areas(0);
+ show_free_areas(0, NULL);
return -ENOMEM;
}
@@ -1577,7 +1579,7 @@ static int shrink_vma(struct mm_struct *mm,
* - under NOMMU conditions the chunk to be unmapped must be backed by a single
* VMA, though it need not cover the whole VMA
*/
-int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
{
struct vm_area_struct *vma;
unsigned long end;
@@ -1643,7 +1645,7 @@ int vm_munmap(unsigned long addr, size_t len)
int ret;
down_write(&mm->mmap_sem);
- ret = do_munmap(mm, addr, len);
+ ret = do_munmap(mm, addr, len, NULL);
up_write(&mm->mmap_sem);
return ret;
}
@@ -1794,7 +1796,7 @@ void unmap_mapping_range(struct address_space *mapping,
}
EXPORT_SYMBOL(unmap_mapping_range);
-int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int filemap_fault(struct vm_fault *vmf)
{
BUG();
return 0;