summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-02 14:05:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-02 14:05:27 -0700
commitcfa3b8068b09f25037146bfd5eed041b78878bee (patch)
tree8053e072074c05cb1fe176181cef3fdeedbaa24b
parent194098915ac74daddca9d6ed46fd11be57f45e16 (diff)
parentf07e2f6be37a750737b93f5635485171ad459eb9 (diff)
Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull hmm updates from Jason Gunthorpe: "This series adds a selftest for hmm_range_fault() and several of the DEVICE_PRIVATE migration related actions, and another simplification for hmm_range_fault()'s API. - Simplify hmm_range_fault() with a simpler return code, no HMM_PFN_SPECIAL, and no customizable output PFN format - Add a selftest for hmm_range_fault() and DEVICE_PRIVATE related functionality" * tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: MAINTAINERS: add HMM selftests mm/hmm/test: add selftests for HMM mm/hmm/test: add selftest driver for HMM mm/hmm: remove the customizable pfn format from hmm_range_fault mm/hmm: remove HMM_PFN_SPECIAL drm/amdgpu: remove dead code after hmm_range_fault() mm/hmm: make hmm_range_fault return 0 or -1
-rw-r--r--Documentation/vm/hmm.rst30
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c56
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c94
-rw-r--r--include/linux/hmm.h111
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/Makefile1
-rw-r--r--lib/test_hmm.c1164
-rw-r--r--lib/test_hmm_uapi.h59
-rw-r--r--mm/hmm.c185
-rw-r--r--tools/testing/selftests/vm/.gitignore1
-rw-r--r--tools/testing/selftests/vm/Makefile3
-rw-r--r--tools/testing/selftests/vm/config2
-rw-r--r--tools/testing/selftests/vm/hmm-tests.c1359
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests16
-rwxr-xr-xtools/testing/selftests/vm/test_hmm.sh97
18 files changed, 2934 insertions, 289 deletions
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index 4e3e9362afeb..561969754bc0 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -161,7 +161,7 @@ device must complete the update before the driver callback returns.
When the device driver wants to populate a range of virtual addresses, it can
use::
- long hmm_range_fault(struct hmm_range *range);
+ int hmm_range_fault(struct hmm_range *range);
It will trigger a page fault on missing or read-only entries if write access is
requested (see below). Page faults use the generic mm page fault code path just
@@ -184,10 +184,7 @@ The usage pattern is::
range.notifier = &interval_sub;
range.start = ...;
range.end = ...;
- range.pfns = ...;
- range.flags = ...;
- range.values = ...;
- range.pfn_shift = ...;
+ range.hmm_pfns = ...;
if (!mmget_not_zero(interval_sub->notifier.mm))
return -EFAULT;
@@ -229,15 +226,10 @@ The hmm_range struct has 2 fields, default_flags and pfn_flags_mask, that specif
fault or snapshot policy for the whole range instead of having to set them
for each entry in the pfns array.
-For instance, if the device flags for range.flags are::
+For instance if the device driver wants pages for a range with at least read
+permission, it sets::
- range.flags[HMM_PFN_VALID] = (1 << 63);
- range.flags[HMM_PFN_WRITE] = (1 << 62);
-
-and the device driver wants pages for a range with at least read permission,
-it sets::
-
- range->default_flags = (1 << 63);
+ range->default_flags = HMM_PFN_REQ_FAULT;
range->pfn_flags_mask = 0;
and calls hmm_range_fault() as described above. This will fill fault all pages
@@ -246,18 +238,18 @@ in the range with at least read permission.
Now let's say the driver wants to do the same except for one page in the range for
which it wants to have write permission. Now driver set::
- range->default_flags = (1 << 63);
- range->pfn_flags_mask = (1 << 62);
- range->pfns[index_of_write] = (1 << 62);
+ range->default_flags = HMM_PFN_REQ_FAULT;
+ range->pfn_flags_mask = HMM_PFN_REQ_WRITE;
+ range->pfns[index_of_write] = HMM_PFN_REQ_WRITE;
With this, HMM will fault in all pages with at least read (i.e., valid) and for the
address == range->start + (index_of_write << PAGE_SHIFT) it will fault with
write permission i.e., if the CPU pte does not have write permission set then HMM
will call handle_mm_fault().
-Note that HMM will populate the pfns array with write permission for any page
-that is mapped with CPU write permission no matter what values are set
-in default_flags or pfn_flags_mask.
+After hmm_range_fault completes the flag bits are set to the current state of
+the page tables, ie HMM_PFN_VALID | HMM_PFN_WRITE will be set if the page is
+writable.
Represent and manage device memory from core kernel point of view
diff --git a/MAINTAINERS b/MAINTAINERS
index 0e04d5321caf..ef7414d7de3f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7768,7 +7768,9 @@ L: linux-mm@kvack.org
S: Maintained
F: Documentation/vm/hmm.rst
F: include/linux/hmm*
+F: lib/test_hmm*
F: mm/hmm*
+F: tools/testing/selftests/vm/*hmm*
HOST AP DRIVER
M: Jouni Malinen <j@w1.fi>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6309ff72bd78..b1c62da527c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -766,18 +766,6 @@ struct amdgpu_ttm_tt {
};
#ifdef CONFIG_DRM_AMDGPU_USERPTR
-/* flags used by HMM internal, not related to CPU/GPU PTE flags */
-static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
- (1 << 0), /* HMM_PFN_VALID */
- (1 << 1), /* HMM_PFN_WRITE */
-};
-
-static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
- 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
- 0, /* HMM_PFN_NONE */
- 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
-};
-
/**
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
* memory and start HMM tracking CPU page table update
@@ -816,18 +804,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
goto out;
}
range->notifier = &bo->notifier;
- range->flags = hmm_range_flags;
- range->values = hmm_range_values;
- range->pfn_shift = PAGE_SHIFT;
range->start = bo->notifier.interval_tree.start;
range->end = bo->notifier.interval_tree.last + 1;
- range->default_flags = hmm_range_flags[HMM_PFN_VALID];
+ range->default_flags = HMM_PFN_REQ_FAULT;
if (!amdgpu_ttm_tt_is_readonly(ttm))
- range->default_flags |= range->flags[HMM_PFN_WRITE];
+ range->default_flags |= HMM_PFN_REQ_WRITE;
- range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns),
- GFP_KERNEL);
- if (unlikely(!range->pfns)) {
+ range->hmm_pfns = kvmalloc_array(ttm->num_pages,
+ sizeof(*range->hmm_pfns), GFP_KERNEL);
+ if (unlikely(!range->hmm_pfns)) {
r = -ENOMEM;
goto out_free_ranges;
}
@@ -852,27 +837,23 @@ retry:
down_read(&mm->mmap_sem);
r = hmm_range_fault(range);
up_read(&mm->mmap_sem);
- if (unlikely(r <= 0)) {
+ if (unlikely(r)) {
/*
* FIXME: This timeout should encompass the retry from
* mmu_interval_read_retry() as well.
*/
- if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout))
+ if (r == -EBUSY && !time_after(jiffies, timeout))
goto retry;
goto out_free_pfns;
}
- for (i = 0; i < ttm->num_pages; i++) {
- /* FIXME: The pages cannot be touched outside the notifier_lock */
- pages[i] = hmm_device_entry_to_page(range, range->pfns[i]);
- if (unlikely(!pages[i])) {
- pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
- i, range->pfns[i]);
- r = -ENOMEM;
-
- goto out_free_pfns;
- }
- }
+ /*
+ * Due to default_flags, all pages are HMM_PFN_VALID or
+ * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
+ * the notifier_lock, and mmu_interval_read_retry() must be done first.
+ */
+ for (i = 0; i < ttm->num_pages; i++)
+ pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
gtt->range = range;
mmput(mm);
@@ -882,7 +863,7 @@ retry:
out_unlock:
up_read(&mm->mmap_sem);
out_free_pfns:
- kvfree(range->pfns);
+ kvfree(range->hmm_pfns);
out_free_ranges:
kfree(range);
out:
@@ -907,7 +888,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
gtt->userptr, ttm->num_pages);
- WARN_ONCE(!gtt->range || !gtt->range->pfns,
+ WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
"No user pages to check\n");
if (gtt->range) {
@@ -917,7 +898,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
*/
r = mmu_interval_read_retry(gtt->range->notifier,
gtt->range->notifier_seq);
- kvfree(gtt->range->pfns);
+ kvfree(gtt->range->hmm_pfns);
kfree(gtt->range);
gtt->range = NULL;
}
@@ -1008,8 +989,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) {
if (ttm->pages[i] !=
- hmm_device_entry_to_page(gtt->range,
- gtt->range->pfns[i]))
+ hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index ad89e09a0be3..3364904eccff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -85,7 +85,7 @@ static inline struct nouveau_dmem *page_to_dmem(struct page *page)
return container_of(page->pgmap, struct nouveau_dmem, pagemap);
}
-static unsigned long nouveau_dmem_page_addr(struct page *page)
+unsigned long nouveau_dmem_page_addr(struct page *page)
{
struct nouveau_dmem_chunk *chunk = page->zone_device_data;
unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
@@ -671,28 +671,3 @@ out_free_src:
out:
return ret;
}
-
-void
-nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
- struct hmm_range *range)
-{
- unsigned long i, npages;
-
- npages = (range->end - range->start) >> PAGE_SHIFT;
- for (i = 0; i < npages; ++i) {
- struct page *page;
- uint64_t addr;
-
- page = hmm_device_entry_to_page(range, range->pfns[i]);
- if (page == NULL)
- continue;
-
- if (!is_device_private_page(page))
- continue;
-
- addr = nouveau_dmem_page_addr(page);
- range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
- range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
- range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM;
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h
index 92394be5d649..db3b59b210af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h
@@ -37,9 +37,8 @@ int nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
+unsigned long nouveau_dmem_page_addr(struct page *page);
-void nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
- struct hmm_range *range);
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
static inline void nouveau_dmem_init(struct nouveau_drm *drm) {}
static inline void nouveau_dmem_fini(struct nouveau_drm *drm) {}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 645fedd77e21..407e34a5c0ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -369,19 +369,6 @@ out_free:
return ret;
}
-static const u64
-nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
- [HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V,
- [HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W,
-};
-
-static const u64
-nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
- [HMM_PFN_ERROR ] = ~NVIF_VMM_PFNMAP_V0_V,
- [HMM_PFN_NONE ] = NVIF_VMM_PFNMAP_V0_NONE,
- [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V,
-};
-
/* Issue fault replay for GPU to retry accesses that faulted previously. */
static void
nouveau_svm_fault_replay(struct nouveau_svm *svm)
@@ -519,9 +506,45 @@ static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
.invalidate = nouveau_svm_range_invalidate,
};
+static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
+ struct hmm_range *range, u64 *ioctl_addr)
+{
+ unsigned long i, npages;
+
+ /*
+ * The ioctl_addr prepared here is passed through nvif_object_ioctl()
+ * to an eventual DMA map in something like gp100_vmm_pgt_pfn()
+ *
+ * This is all just encoding the internal hmm representation into a
+ * different nouveau internal representation.
+ */
+ npages = (range->end - range->start) >> PAGE_SHIFT;
+ for (i = 0; i < npages; ++i) {
+ struct page *page;
+
+ if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) {
+ ioctl_addr[i] = 0;
+ continue;
+ }
+
+ page = hmm_pfn_to_page(range->hmm_pfns[i]);
+ if (is_device_private_page(page))
+ ioctl_addr[i] = nouveau_dmem_page_addr(page) |
+ NVIF_VMM_PFNMAP_V0_V |
+ NVIF_VMM_PFNMAP_V0_VRAM;
+ else
+ ioctl_addr[i] = page_to_phys(page) |
+ NVIF_VMM_PFNMAP_V0_V |
+ NVIF_VMM_PFNMAP_V0_HOST;
+ if (range->hmm_pfns[i] & HMM_PFN_WRITE)
+ ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W;
+ }
+}
+
static int nouveau_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm, void *data, u32 size,
- u64 *pfns, struct svm_notifier *notifier)
+ unsigned long hmm_pfns[], u64 *ioctl_addr,
+ struct svm_notifier *notifier)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
@@ -530,26 +553,27 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
.notifier = &notifier->notifier,
.start = notifier->notifier.interval_tree.start,
.end = notifier->notifier.interval_tree.last + 1,
- .pfns = pfns,
- .flags = nouveau_svm_pfn_flags,
- .values = nouveau_svm_pfn_values,
- .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT,
+ .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
+ .hmm_pfns = hmm_pfns,
};
struct mm_struct *mm = notifier->notifier.mm;
- long ret;
+ int ret;
while (true) {
if (time_after(jiffies, timeout))
return -EBUSY;
range.notifier_seq = mmu_interval_read_begin(range.notifier);
- range.default_flags = 0;
- range.pfn_flags_mask = -1UL;
down_read(&mm->mmap_sem);
ret = hmm_range_fault(&range);
up_read(&mm->mmap_sem);
- if (ret <= 0) {
- if (ret == 0 || ret == -EBUSY)
+ if (ret) {
+ /*
+ * FIXME: the input PFN_REQ flags are destroyed on
+ * -EBUSY, we need to regenerate them, also for the
+ * other continue below
+ */
+ if (ret == -EBUSY)
continue;
return ret;
}
@@ -563,7 +587,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
break;
}
- nouveau_dmem_convert_pfn(drm, &range);
+ nouveau_hmm_convert_pfn(drm, &range, ioctl_addr);
svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL);
@@ -590,6 +614,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
} i;
u64 phys[16];
} args;
+ unsigned long hmm_pfns[ARRAY_SIZE(args.phys)];
struct vm_area_struct *vma;
u64 inst, start, limit;
int fi, fn, pi, fill;
@@ -705,12 +730,17 @@ nouveau_svm_fault(struct nvif_notify *notify)
* access flags.
*XXX: atomic?
*/
- if (buffer->fault[fn]->access != 0 /* READ. */ &&
- buffer->fault[fn]->access != 3 /* PREFETCH. */) {
- args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V |
- NVIF_VMM_PFNMAP_V0_W;
- } else {
- args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V;
+ switch (buffer->fault[fn]->access) {
+ case 0: /* READ. */
+ hmm_pfns[pi++] = HMM_PFN_REQ_FAULT;
+ break;
+ case 3: /* PREFETCH. */
+ hmm_pfns[pi++] = 0;
+ break;
+ default:
+ hmm_pfns[pi++] = HMM_PFN_REQ_FAULT |
+ HMM_PFN_REQ_WRITE;
+ break;
}
args.i.p.size = pi << PAGE_SHIFT;
@@ -738,7 +768,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
fill = (buffer->fault[fn ]->addr -
buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
while (--fill)
- args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE;
+ hmm_pfns[pi++] = 0;
}
SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
@@ -754,7 +784,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
ret = nouveau_range_fault(
svmm, svm->drm, &args,
sizeof(args.i) + pi * sizeof(args.phys[0]),
- args.phys, &notifier);
+ hmm_pfns, args.phys, &notifier);
mmu_interval_notifier_remove(&notifier.notifier);
}
mmput(mm);
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 7475051100c7..e912b9dc4633 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -19,51 +19,47 @@
#include <linux/mmu_notifier.h>
/*
- * hmm_pfn_flag_e - HMM flag enums
+ * On output:
+ * 0 - The page is faultable and a future call with
+ * HMM_PFN_REQ_FAULT could succeed.
+ * HMM_PFN_VALID - the pfn field points to a valid PFN. This PFN is at
+ * least readable. If dev_private_owner is !NULL then this could
+ * point at a DEVICE_PRIVATE page.
+ * HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID)
+ * HMM_PFN_ERROR - accessing the pfn is impossible and the device should
+ * fail. ie poisoned memory, special pages, no vma, etc
*
- * Flags:
- * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
- * HMM_PFN_WRITE: CPU page table has write permission set
- *
- * The driver provides a flags array for mapping page protections to device
- * PTE bits. If the driver valid bit for an entry is bit 3,
- * i.e., (entry & (1 << 3)), then the driver must provide
- * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
- * Same logic apply to all flags. This is the same idea as vm_page_prot in vma
- * except that this is per device driver rather than per architecture.
+ * On input:
+ * 0 - Return the current state of the page, do not fault it.
+ * HMM_PFN_REQ_FAULT - The output must have HMM_PFN_VALID or hmm_range_fault()
+ * will fail
+ * HMM_PFN_REQ_WRITE - The output must have HMM_PFN_WRITE or hmm_range_fault()
+ * will fail. Must be combined with HMM_PFN_REQ_FAULT.
*/
-enum hmm_pfn_flag_e {
- HMM_PFN_VALID = 0,
- HMM_PFN_WRITE,
- HMM_PFN_FLAG_MAX
+enum hmm_pfn_flags {
+ /* Output flags */
+ HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
+ HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
+ HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
+
+ /* Input flags */
+ HMM_PFN_REQ_FAULT = HMM_PFN_VALID,
+ HMM_PFN_REQ_WRITE = HMM_PFN_WRITE,
+
+ HMM_PFN_FLAGS = HMM_PFN_VALID | HMM_PFN_WRITE | HMM_PFN_ERROR,
};
/*
- * hmm_pfn_value_e - HMM pfn special value
- *
- * Flags:
- * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
- * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
- * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
- * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
- * be mirrored by a device, because the entry will never have HMM_PFN_VALID
- * set and the pfn value is undefined.
+ * hmm_pfn_to_page() - return struct page pointed to by a device entry
*
- * Driver provides values for none entry, error entry, and special entry.
- * Driver can alias (i.e., use same value) error and special, but
- * it should not alias none with error or special.
- *
- * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
- * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
- * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry,
- * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
+ * This must be called under the caller 'user_lock' after a successful
+ * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
+ * already.
*/
-enum hmm_pfn_value_e {
- HMM_PFN_ERROR,
- HMM_PFN_NONE,
- HMM_PFN_SPECIAL,
- HMM_PFN_VALUE_MAX
-};
+static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
+{
+ return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS);
+}
/*
* struct hmm_range - track invalidation lock on virtual address range
@@ -72,12 +68,9 @@ enum hmm_pfn_value_e {
* @notifier_seq: result of mmu_interval_read_begin()
* @start: range virtual start address (inclusive)
* @end: range virtual end address (exclusive)
- * @pfns: array of pfns (big enough for the range)
- * @flags: pfn flags to match device driver page table
- * @values: pfn value for some special case (none, special, error, ...)
+ * @hmm_pfns: array of pfns (big enough for the range)
* @default_flags: default flags for the range (write, read, ... see hmm doc)
* @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
- * @pfn_shift: pfn shift value (should be <= PAGE_SHIFT)
* @dev_private_owner: owner of device private pages
*/
struct hmm_range {
@@ -85,42 +78,16 @@ struct hmm_range {
unsigned long notifier_seq;
unsigned long start;
unsigned long end;
- uint64_t *pfns;
- const uint64_t *flags;
- const uint64_t *values;
- uint64_t default_flags;
- uint64_t pfn_flags_mask;
- uint8_t pfn_shift;
+ unsigned long *hmm_pfns;
+ unsigned long default_flags;
+ unsigned long pfn_flags_mask;
void *dev_private_owner;
};
/*
- * hmm_device_entry_to_page() - return struct page pointed to by a device entry
- * @range: range use to decode device entry value
- * @entry: device entry value to get corresponding struct page from
- * Return: struct page pointer if entry is a valid, NULL otherwise
- *
- * If the device entry is valid (ie valid flag set) then return the struct page
- * matching the entry value. Otherwise return NULL.
- */
-static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range,
- uint64_t entry)
-{
- if (entry == range->values[HMM_PFN_NONE])
- return NULL;
- if (entry == range->values[HMM_PFN_ERROR])
- return NULL;
- if (entry == range->values[HMM_PFN_SPECIAL])
- return NULL;
- if (!(entry & range->flags[HMM_PFN_VALID]))
- return NULL;
- return pfn_to_page(entry >> range->pfn_shift);
-}
-
-/*
* Please see Documentation/vm/hmm.rst for how to use the range API.
*/
-long hmm_range_fault(struct hmm_range *range);
+int hmm_range_fault(struct hmm_range *range);
/*
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b3b05adab575..0217ed126f77 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2218,6 +2218,19 @@ config TEST_MEMINIT
If unsure, say N.
+config TEST_HMM
+ tristate "Test HMM (Heterogeneous Memory Management)"
+ depends on TRANSPARENT_HUGEPAGE
+ depends on DEVICE_PRIVATE
+ select HMM_MIRROR
+ select MMU_NOTIFIER
+ help
+ This is a pseudo device driver solely for testing HMM.
+ Say M here if you want to build the HMM test module.
+ Doing so will allow you to run tools/testing/selftest/vm/hmm-tests.
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config MEMTEST
diff --git a/lib/Makefile b/lib/Makefile
index cd548bfa8df9..5adf8949a757 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -92,6 +92,7 @@ obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
+obj-$(CONFIG_TEST_HMM) += test_hmm.o
obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
new file mode 100644
index 000000000000..5c1858e325ba
--- /dev/null
+++ b/lib/test_hmm.c
@@ -0,0 +1,1164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is a module to test the HMM (Heterogeneous Memory Management)
+ * mirror and zone device private memory migration APIs of the kernel.
+ * Userspace programs can register with the driver to mirror their own address
+ * space and can use the device to read/write any valid virtual address.
+ */
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/pagemap.h>
+#include <linux/hmm.h>
+#include <linux/vmalloc.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/sched/mm.h>
+#include <linux/platform_device.h>
+
+#include "test_hmm_uapi.h"
+
+#define DMIRROR_NDEVICES 2
+#define DMIRROR_RANGE_FAULT_TIMEOUT 1000
+#define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U)
+#define DEVMEM_CHUNKS_RESERVE 16
+
+static const struct dev_pagemap_ops dmirror_devmem_ops;
+static const struct mmu_interval_notifier_ops dmirror_min_ops;
+static dev_t dmirror_dev;
+static struct page *dmirror_zero_page;
+
+struct dmirror_device;
+
+struct dmirror_bounce {
+ void *ptr;
+ unsigned long size;
+ unsigned long addr;
+ unsigned long cpages;
+};
+
+#define DPT_XA_TAG_WRITE 3UL
+
+/*
+ * Data structure to track address ranges and register for mmu interval
+ * notifier updates.
+ */
+struct dmirror_interval {
+ struct mmu_interval_notifier notifier;
+ struct dmirror *dmirror;
+};
+
+/*
+ * Data attached to the open device file.
+ * Note that it might be shared after a fork().
+ */
+struct dmirror {
+ struct dmirror_device *mdevice;
+ struct xarray pt;
+ struct mmu_interval_notifier notifier;
+ struct mutex mutex;
+};
+
+/*
+ * ZONE_DEVICE pages for migration and simulating device memory.
+ */
+struct dmirror_chunk {
+ struct dev_pagemap pagemap;
+ struct dmirror_device *mdevice;
+};
+
+/*
+ * Per device data.
+ */
+struct dmirror_device {
+ struct cdev cdevice;
+ struct hmm_devmem *devmem;
+
+ unsigned int devmem_capacity;
+ unsigned int devmem_count;
+ struct dmirror_chunk **devmem_chunks;
+ struct mutex devmem_lock; /* protects the above */
+
+ unsigned long calloc;
+ unsigned long cfree;
+ struct page *free_pages;
+ spinlock_t lock; /* protects the above */
+};
+
+static struct dmirror_device dmirror_devices[DMIRROR_NDEVICES];
+
+static int dmirror_bounce_init(struct dmirror_bounce *bounce,
+ unsigned long addr,
+ unsigned long size)
+{
+ bounce->addr = addr;
+ bounce->size = size;
+ bounce->cpages = 0;
+ bounce->ptr = vmalloc(size);
+ if (!bounce->ptr)
+ return -ENOMEM;
+ return 0;
+}
+
+static void dmirror_bounce_fini(struct dmirror_bounce *bounce)
+{
+ vfree(bounce->ptr);
+}
+
+static int dmirror_fops_open(struct inode *inode, struct file *filp)
+{
+ struct cdev *cdev = inode->i_cdev;
+ struct dmirror *dmirror;
+ int ret;
+
+ /* Mirror this process address space */
+ dmirror = kzalloc(sizeof(*dmirror), GFP_KERNEL);
+ if (dmirror == NULL)
+ return -ENOMEM;
+
+ dmirror->mdevice = container_of(cdev, struct dmirror_device, cdevice);
+ mutex_init(&dmirror->mutex);
+ xa_init(&dmirror->pt);
+
+ ret = mmu_interval_notifier_insert(&dmirror->notifier, current->mm,
+ 0, ULONG_MAX & PAGE_MASK, &dmirror_min_ops);
+ if (ret) {
+ kfree(dmirror);
+ return ret;
+ }
+
+ filp->private_data = dmirror;
+ return 0;
+}
+
+static int dmirror_fops_release(struct inode *inode, struct file *filp)
+{
+ struct dmirror *dmirror = filp->private_data;
+
+ mmu_interval_notifier_remove(&dmirror->notifier);
+ xa_destroy(&dmirror->pt);
+ kfree(dmirror);
+ return 0;
+}
+
+static struct dmirror_device *dmirror_page_to_device(struct page *page)
+
+{
+ return container_of(page->pgmap, struct dmirror_chunk,
+ pagemap)->mdevice;
+}
+
+static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range)
+{
+ unsigned long *pfns = range->hmm_pfns;
+ unsigned long pfn;
+
+ for (pfn = (range->start >> PAGE_SHIFT);
+ pfn < (range->end >> PAGE_SHIFT);
+ pfn++, pfns++) {
+ struct page *page;
+ void *entry;
+
+ /*
+ * Since we asked for hmm_range_fault() to populate pages,
+ * it shouldn't return an error entry on success.
+ */
+ WARN_ON(*pfns & HMM_PFN_ERROR);
+ WARN_ON(!(*pfns & HMM_PFN_VALID));
+
+ page = hmm_pfn_to_page(*pfns);
+ WARN_ON(!page);
+
+ entry = page;
+ if (*pfns & HMM_PFN_WRITE)
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
+ else if (WARN_ON(range->default_flags & HMM_PFN_WRITE))
+ return -EFAULT;
+ entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+ if (xa_is_err(entry))
+ return xa_err(entry);
+ }
+
+ return 0;
+}
+
+static void dmirror_do_update(struct dmirror *dmirror, unsigned long start,
+ unsigned long end)
+{
+ unsigned long pfn;
+ void *entry;
+
+ /*
+ * The XArray doesn't hold references to pages since it relies on
+ * the mmu notifier to clear page pointers when they become stale.
+ * Therefore, it is OK to just clear the entry.
+ */
+ xa_for_each_range(&dmirror->pt, pfn, entry, start >> PAGE_SHIFT,
+ end >> PAGE_SHIFT)
+ xa_erase(&dmirror->pt, pfn);
+}
+
+static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);
+
+ if (mmu_notifier_range_blockable(range))
+ mutex_lock(&dmirror->mutex);
+ else if (!mutex_trylock(&dmirror->mutex))
+ return false;
+
+ mmu_interval_set_seq(mni, cur_seq);
+ dmirror_do_update(dmirror, range->start, range->end);
+
+ mutex_unlock(&dmirror->mutex);
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops dmirror_min_ops = {
+ .invalidate = dmirror_interval_invalidate,
+};
+
+static int dmirror_range_fault(struct dmirror *dmirror,
+ struct hmm_range *range)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ int ret;
+
+ while (true) {
+ if (time_after(jiffies, timeout)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ range->notifier_seq = mmu_interval_read_begin(range->notifier);
+ down_read(&mm->mmap_sem);
+ ret = hmm_range_fault(range);
+ up_read(&mm->mmap_sem);
+ if (ret) {
+ if (ret == -EBUSY)
+ continue;
+ goto out;
+ }
+
+ mutex_lock(&dmirror->mutex);
+ if (mmu_interval_read_retry(range->notifier,
+ range->notifier_seq)) {
+ mutex_unlock(&dmirror->mutex);
+ continue;
+ }
+ break;
+ }
+
+ ret = dmirror_do_fault(dmirror, range);
+
+ mutex_unlock(&dmirror->mutex);
+out:
+ return ret;
+}
+
+static int dmirror_fault(struct dmirror *dmirror, unsigned long sta