summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-01-29 19:56:50 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-01-29 19:56:50 -0800
commit39bed42de2e7d74686a2d5a45638d6a5d7e7d473 (patch)
tree524e1686fe0f34c74bb87512e95282b35bef2a8e
parent83fa805bcbfc53ae82eedd65132794ae324798e5 (diff)
parent5292e24a6acf5694e0a32c31e3321964176bc17e (diff)
Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull mmu_notifier updates from Jason Gunthorpe: "This small series revises the names in mmu_notifier to make the code clearer and more readable" * tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: mm/mmu_notifiers: Use 'interval_sub' as the variable for mmu_interval_notifier mm/mmu_notifiers: Use 'subscription' as the variable name for mmu_notifier mm/mmu_notifier: Rename struct mmu_notifier_mm to mmu_notifier_subscriptions
-rw-r--r--Documentation/vm/hmm.rst20
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmu_notifier.h86
-rw-r--r--kernel/fork.c4
-rw-r--r--mm/debug.c4
-rw-r--r--mm/mmu_notifier.c585
6 files changed, 375 insertions, 326 deletions
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index 893a8ba0e9fe..95fec5968362 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -149,14 +149,14 @@ CPU page table into a device page table; HMM helps keep both synchronized. A
device driver that wants to mirror a process address space must start with the
registration of a mmu_interval_notifier::
- mni->ops = &driver_ops;
- int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
- unsigned long start, unsigned long length,
- struct mm_struct *mm);
+ int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long length,
+ const struct mmu_interval_notifier_ops *ops);
-During the driver_ops->invalidate() callback the device driver must perform
-the update action to the range (mark range read only, or fully unmap,
-etc.). The device must complete the update before the driver callback returns.
+During the ops->invalidate() callback the device driver must perform the
+update action to the range (mark range read only, or fully unmap, etc.). The
+device must complete the update before the driver callback returns.
When the device driver wants to populate a range of virtual addresses, it can
use::
@@ -183,7 +183,7 @@ The usage pattern is::
struct hmm_range range;
...
- range.notifier = &mni;
+ range.notifier = &interval_sub;
range.start = ...;
range.end = ...;
range.pfns = ...;
@@ -191,11 +191,11 @@ The usage pattern is::
range.values = ...;
range.pfn_shift = ...;
- if (!mmget_not_zero(mni->notifier.mm))
+ if (!mmget_not_zero(interval_sub->notifier.mm))
return -EFAULT;
again:
- range.notifier_seq = mmu_interval_read_begin(&mni);
+ range.notifier_seq = mmu_interval_read_begin(&interval_sub);
down_read(&mm->mmap_sem);
ret = hmm_range_fault(&range, HMM_RANGE_SNAPSHOT);
if (ret) {
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 270aa8fd2800..e87bb864bdb2 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -490,7 +490,7 @@ struct mm_struct {
/* store ref to file /proc/<pid>/exe symlink points to */
struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier_mm *mmu_notifier_mm;
+ struct mmu_notifier_subscriptions *notifier_subscriptions;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 9e6caa8ecd19..736f6918335e 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -8,7 +8,7 @@
#include <linux/srcu.h>
#include <linux/interval_tree.h>
-struct mmu_notifier_mm;
+struct mmu_notifier_subscriptions;
struct mmu_notifier;
struct mmu_notifier_range;
struct mmu_interval_notifier;
@@ -73,7 +73,7 @@ struct mmu_notifier_ops {
* through the gart alias address, so leading to memory
* corruption.
*/
- void (*release)(struct mmu_notifier *mn,
+ void (*release)(struct mmu_notifier *subscription,
struct mm_struct *mm);
/*
@@ -85,7 +85,7 @@ struct mmu_notifier_ops {
* Start-end is necessary in case the secondary MMU is mapping the page
* at a smaller granularity than the primary MMU.
*/
- int (*clear_flush_young)(struct mmu_notifier *mn,
+ int (*clear_flush_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long start,
unsigned long end);
@@ -95,7 +95,7 @@ struct mmu_notifier_ops {
* latter, it is supposed to test-and-clear the young/accessed bitflag
* in the secondary pte, but it may omit flushing the secondary tlb.
*/
- int (*clear_young)(struct mmu_notifier *mn,
+ int (*clear_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long start,
unsigned long end);
@@ -106,7 +106,7 @@ struct mmu_notifier_ops {
* frequently used without actually clearing the flag or tearing
* down the secondary mapping on the page.
*/
- int (*test_young)(struct mmu_notifier *mn,
+ int (*test_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long address);
@@ -114,7 +114,7 @@ struct mmu_notifier_ops {
* change_pte is called in cases that pte mapping to page is changed:
* for example, when ksm remaps pte to point to a new shared page.
*/
- void (*change_pte)(struct mmu_notifier *mn,
+ void (*change_pte)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long address,
pte_t pte);
@@ -169,9 +169,9 @@ struct mmu_notifier_ops {
* invalidate_range_end.
*
*/
- int (*invalidate_range_start)(struct mmu_notifier *mn,
+ int (*invalidate_range_start)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);
- void (*invalidate_range_end)(struct mmu_notifier *mn,
+ void (*invalidate_range_end)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);
/*
@@ -192,8 +192,10 @@ struct mmu_notifier_ops {
* of what was passed to invalidate_range_start()/end(), if
* called between those functions.
*/
- void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
- unsigned long start, unsigned long end);
+ void (*invalidate_range)(struct mmu_notifier *subscription,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
/*
* These callbacks are used with the get/put interface to manage the
@@ -206,7 +208,7 @@ struct mmu_notifier_ops {
* and cannot sleep.
*/
struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
- void (*free_notifier)(struct mmu_notifier *mn);
+ void (*free_notifier)(struct mmu_notifier *subscription);
};
/*
@@ -235,7 +237,7 @@ struct mmu_notifier {
* was required but mmu_notifier_range_blockable(range) is false.
*/
struct mmu_interval_notifier_ops {
- bool (*invalidate)(struct mmu_interval_notifier *mni,
+ bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
const struct mmu_notifier_range *range,
unsigned long cur_seq);
};
@@ -265,7 +267,7 @@ struct mmu_notifier_range {
static inline int mm_has_notifiers(struct mm_struct *mm)
{
- return unlikely(mm->mmu_notifier_mm);
+ return unlikely(mm->notifier_subscriptions);
}
struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
@@ -280,30 +282,31 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
up_write(&mm->mmap_sem);
return ret;
}
-void mmu_notifier_put(struct mmu_notifier *mn);
+void mmu_notifier_put(struct mmu_notifier *subscription);
void mmu_notifier_synchronize(void);
-extern int mmu_notifier_register(struct mmu_notifier *mn,
+extern int mmu_notifier_register(struct mmu_notifier *subscription,
struct mm_struct *mm);
-extern int __mmu_notifier_register(struct mmu_notifier *mn,
+extern int __mmu_notifier_register(struct mmu_notifier *subscription,
struct mm_struct *mm);
-extern void mmu_notifier_unregister(struct mmu_notifier *mn,
+extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
struct mm_struct *mm);
-unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni);
-int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
+unsigned long
+mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
+int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
struct mm_struct *mm, unsigned long start,
unsigned long length,
const struct mmu_interval_notifier_ops *ops);
int mmu_interval_notifier_insert_locked(
- struct mmu_interval_notifier *mni, struct mm_struct *mm,
+ struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
unsigned long start, unsigned long length,
const struct mmu_interval_notifier_ops *ops);
-void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
+void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
/**
* mmu_interval_set_seq - Save the invalidation sequence
- * @mni - The mni passed to invalidate
+ * @interval_sub - The subscription passed to invalidate
* @cur_seq - The cur_seq passed to the invalidate() callback
*
* This must be called unconditionally from the invalidate callback of a
@@ -314,15 +317,16 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
* If the caller does not call mmu_interval_read_begin() or
* mmu_interval_read_retry() then this call is not required.
*/
-static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
- unsigned long cur_seq)
+static inline void
+mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
+ unsigned long cur_seq)
{
- WRITE_ONCE(mni->invalidate_seq, cur_seq);
+ WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
}
/**
* mmu_interval_read_retry - End a read side critical section against a VA range
- * mni: The range
+ * interval_sub: The subscription
* seq: The return of the paired mmu_interval_read_begin()
*
* This MUST be called under a user provided lock that is also held
@@ -334,15 +338,16 @@ static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
* Returns true if an invalidation collided with this critical section, and
* the caller should retry.
*/
-static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
- unsigned long seq)
+static inline bool
+mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
+ unsigned long seq)
{
- return mni->invalidate_seq != seq;
+ return interval_sub->invalidate_seq != seq;
}
/**
* mmu_interval_check_retry - Test if a collision has occurred
- * mni: The range
+ * interval_sub: The subscription
* seq: The return of the matching mmu_interval_read_begin()
*
* This can be used in the critical section between mmu_interval_read_begin()
@@ -357,14 +362,15 @@ static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
* This call can be used as part of loops and other expensive operations to
* expedite a retry.
*/
-static inline bool mmu_interval_check_retry(struct mmu_interval_notifier *mni,
- unsigned long seq)
+static inline bool
+mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
+ unsigned long seq)
{
/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
- return READ_ONCE(mni->invalidate_seq) != seq;
+ return READ_ONCE(interval_sub->invalidate_seq) != seq;
}
-extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
+extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
@@ -480,15 +486,15 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
__mmu_notifier_invalidate_range(mm, start, end);
}
-static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
- mm->mmu_notifier_mm = NULL;
+ mm->notifier_subscriptions = NULL;
}
-static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
if (mm_has_notifiers(mm))
- __mmu_notifier_mm_destroy(mm);
+ __mmu_notifier_subscriptions_destroy(mm);
}
@@ -692,11 +698,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
{
}
-static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
}
-static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
}
diff --git a/kernel/fork.c b/kernel/fork.c
index ef82feb4bddc..60a1295f4384 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -692,7 +692,7 @@ void __mmdrop(struct mm_struct *mm)
WARN_ON_ONCE(mm == current->active_mm);
mm_free_pgd(mm);
destroy_context(mm);
- mmu_notifier_mm_destroy(mm);
+ mmu_notifier_subscriptions_destroy(mm);
check_mm(mm);
put_user_ns(mm->user_ns);
free_mm(mm);
@@ -1025,7 +1025,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm_init_aio(mm);
mm_init_owner(mm, p);
RCU_INIT_POINTER(mm->exe_file, NULL);
- mmu_notifier_mm_init(mm);
+ mmu_notifier_subscriptions_init(mm);
init_tlb_flush_pending(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
mm->pmd_huge_pte = NULL;
diff --git a/mm/debug.c b/mm/debug.c
index 0461df1207cb..74ee73cf7079 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -153,7 +153,7 @@ void dump_mm(const struct mm_struct *mm)
#endif
"exe_file %px\n"
#ifdef CONFIG_MMU_NOTIFIER
- "mmu_notifier_mm %px\n"
+ "notifier_subscriptions %px\n"
#endif
#ifdef CONFIG_NUMA_BALANCING
"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
@@ -185,7 +185,7 @@ void dump_mm(const struct mm_struct *mm)
#endif
mm->exe_file,
#ifdef CONFIG_MMU_NOTIFIER
- mm->mmu_notifier_mm,
+ mm->notifier_subscriptions,
#endif
#ifdef CONFIG_NUMA_BALANCING
mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index f76ea05b1cb0..ef3973a5d34a 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -29,12 +29,12 @@ struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
#endif
/*
- * The mmu notifier_mm structure is allocated and installed in
- * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
+ * The mmu_notifier_subscriptions structure is allocated and installed in
+ * mm->notifier_subscriptions inside the mm_take_all_locks() protected
* critical section and it's released only when mm_count reaches zero
* in mmdrop().
*/
-struct mmu_notifier_mm {
+struct mmu_notifier_subscriptions {
/* all mmu notifiers registered in this mm are queued in this list */
struct hlist_head list;
bool has_itree;
@@ -65,80 +65,81 @@ struct mmu_notifier_mm {
*
* The write side has two states, fully excluded:
* - mm->active_invalidate_ranges != 0
- * - mnn->invalidate_seq & 1 == True (odd)
+ * - subscriptions->invalidate_seq & 1 == True (odd)
* - some range on the mm_struct is being invalidated
* - the itree is not allowed to change
*
* And partially excluded:
* - mm->active_invalidate_ranges != 0
- * - mnn->invalidate_seq & 1 == False (even)
+ * - subscriptions->invalidate_seq & 1 == False (even)
* - some range on the mm_struct is being invalidated
* - the itree is allowed to change
*
- * Operations on mmu_notifier_mm->invalidate_seq (under spinlock):
+ * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
* seq |= 1 # Begin writing
* seq++ # Release the writing state
* seq & 1 # True if a writer exists
*
* The later state avoids some expensive work on inv_end in the common case of
- * no mni monitoring the VA.
+ * no mmu_interval_notifier monitoring the VA.
*/
-static bool mn_itree_is_invalidating(struct mmu_notifier_mm *mmn_mm)
+static bool
+mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
{
- lockdep_assert_held(&mmn_mm->lock);
- return mmn_mm->invalidate_seq & 1;
+ lockdep_assert_held(&subscriptions->lock);
+ return subscriptions->invalidate_seq & 1;
}
static struct mmu_interval_notifier *
-mn_itree_inv_start_range(struct mmu_notifier_mm *mmn_mm,
+mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
const struct mmu_notifier_range *range,
unsigned long *seq)
{
struct interval_tree_node *node;
struct mmu_interval_notifier *res = NULL;
- spin_lock(&mmn_mm->lock);
- mmn_mm->active_invalidate_ranges++;
- node = interval_tree_iter_first(&mmn_mm->itree, range->start,
+ spin_lock(&subscriptions->lock);
+ subscriptions->active_invalidate_ranges++;
+ node = interval_tree_iter_first(&subscriptions->itree, range->start,
range->end - 1);
if (node) {
- mmn_mm->invalidate_seq |= 1;
+ subscriptions->invalidate_seq |= 1;
res = container_of(node, struct mmu_interval_notifier,
interval_tree);
}
- *seq = mmn_mm->invalidate_seq;
- spin_unlock(&mmn_mm->lock);
+ *seq = subscriptions->invalidate_seq;
+ spin_unlock(&subscriptions->lock);
return res;
}
static struct mmu_interval_notifier *
-mn_itree_inv_next(struct mmu_interval_notifier *mni,
+mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
const struct mmu_notifier_range *range)
{
struct interval_tree_node *node;
- node = interval_tree_iter_next(&mni->interval_tree, range->start,
- range->end - 1);
+ node = interval_tree_iter_next(&interval_sub->interval_tree,
+ range->start, range->end - 1);
if (!node)
return NULL;
return container_of(node, struct mmu_interval_notifier, interval_tree);
}
-static void mn_itree_inv_end(struct mmu_notifier_mm *mmn_mm)
+static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
{
- struct mmu_interval_notifier *mni;
+ struct mmu_interval_notifier *interval_sub;
struct hlist_node *next;
- spin_lock(&mmn_mm->lock);
- if (--mmn_mm->active_invalidate_ranges ||
- !mn_itree_is_invalidating(mmn_mm)) {
- spin_unlock(&mmn_mm->lock);
+ spin_lock(&subscriptions->lock);
+ if (--subscriptions->active_invalidate_ranges ||
+ !mn_itree_is_invalidating(subscriptions)) {
+ spin_unlock(&subscriptions->lock);
return;
}
/* Make invalidate_seq even */
- mmn_mm->invalidate_seq++;
+ subscriptions->invalidate_seq++;
/*
* The inv_end incorporates a deferred mechanism like rtnl_unlock().
@@ -146,30 +147,31 @@ static void mn_itree_inv_end(struct mmu_notifier_mm *mmn_mm)
* they are progressed. This arrangement for tree updates is used to
* avoid using a blocking lock during invalidate_range_start.
*/
- hlist_for_each_entry_safe(mni, next, &mmn_mm->deferred_list,
+ hlist_for_each_entry_safe(interval_sub, next,
+ &subscriptions->deferred_list,
deferred_item) {
- if (RB_EMPTY_NODE(&mni->interval_tree.rb))
- interval_tree_insert(&mni->interval_tree,
- &mmn_mm->itree);
+ if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
+ interval_tree_insert(&interval_sub->interval_tree,
+ &subscriptions->itree);
else
- interval_tree_remove(&mni->interval_tree,
- &mmn_mm->itree);
- hlist_del(&mni->deferred_item);
+ interval_tree_remove(&interval_sub->interval_tree,
+ &subscriptions->itree);
+ hlist_del(&interval_sub->deferred_item);
}
- spin_unlock(&mmn_mm->lock);
+ spin_unlock(&subscriptions->lock);
- wake_up_all(&mmn_mm->wq);
+ wake_up_all(&subscriptions->wq);
}
/**
* mmu_interval_read_begin - Begin a read side critical section against a VA
* range
- * mni: The range to use
+ * interval_sub: The interval subscription
*
* mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
- * collision-retry scheme similar to seqcount for the VA range under mni. If
- * the mm invokes invalidation during the critical section then
- * mmu_interval_read_retry() will return true.
+ * collision-retry scheme similar to seqcount for the VA range under
+ * subscription. If the mm invokes invalidation during the critical section
+ * then mmu_interval_read_retry() will return true.
*
* This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
* require a blocking context. The critical region formed by this can sleep,
@@ -180,68 +182,71 @@ static void mn_itree_inv_end(struct mmu_notifier_mm *mmn_mm)
*
* The return value should be passed to mmu_interval_read_retry().
*/
-unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni)
+unsigned long
+mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
{
- struct mmu_notifier_mm *mmn_mm = mni->mm->mmu_notifier_mm;
+ struct mmu_notifier_subscriptions *subscriptions =
+ interval_sub->mm->notifier_subscriptions;
unsigned long seq;
bool is_invalidating;
/*
- * If the mni has a different seq value under the user_lock than we
- * started with then it has collided.
+ * If the subscription has a different seq value under the user_lock
+ * than we started with then it has collided.
*
- * If the mni currently has the same seq value as the mmn_mm seq, then
- * it is currently between invalidate_start/end and is colliding.
+ * If the subscription currently has the same seq value as the
+ * subscriptions seq, then it is currently between
+ * invalidate_start/end and is colliding.
*
* The locking looks broadly like this:
* mn_tree_invalidate_start(): mmu_interval_read_begin():
* spin_lock
- * seq = READ_ONCE(mni->invalidate_seq);
- * seq == mmn_mm->invalidate_seq
+ * seq = READ_ONCE(interval_sub->invalidate_seq);
+ * seq == subs->invalidate_seq
* spin_unlock
* spin_lock
- * seq = ++mmn_mm->invalidate_seq
+ * seq = ++subscriptions->invalidate_seq
* spin_unlock
* op->invalidate_range():
* user_lock
* mmu_interval_set_seq()
- * mni->invalidate_seq = seq
+ * interval_sub->invalidate_seq = seq
* user_unlock
*
* [Required: mmu_interval_read_retry() == true]
*
* mn_itree_inv_end():
* spin_lock
- * seq = ++mmn_mm->invalidate_seq
+ * seq = ++subscriptions->invalidate_seq
* spin_unlock
*
* user_lock
* mmu_interval_read_retry():
- * mni->invalidate_seq != seq
+ * interval_sub->invalidate_seq != seq
* user_unlock
*
* Barriers are not needed here as any races here are closed by an
* eventual mmu_interval_read_retry(), which provides a barrier via the
* user_lock.
*/
- spin_lock(&mmn_mm->lock);
+ spin_lock(&subscriptions->lock);
/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
- seq = READ_ONCE(mni->invalidate_seq);
- is_invalidating = seq == mmn_mm->invalidate_seq;
- spin_unlock(&mmn_mm->lock);
+ seq = READ_ONCE(interval_sub->invalidate_seq);
+ is_invalidating = seq == subscriptions->invalidate_seq;
+ spin_unlock(&subscriptions->lock);
/*
- * mni->invalidate_seq must always be set to an odd value via
+ * interval_sub->invalidate_seq must always be set to an odd value via
* mmu_interval_set_seq() using the provided cur_seq from
* mn_itree_inv_start_range(). This ensures that if seq does wrap we
* will always clear the below sleep in some reasonable time as
- * mmn_mm->invalidate_seq is even in the idle state.
+ * subscriptions->invalidate_seq is even in the idle state.
*/
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
if (is_invalidating)
- wait_event(mmn_mm->wq,
- READ_ONCE(mmn_mm->invalidate_seq) != seq);
+ wait_event(subscriptions->wq,
+ READ_ONCE(subscriptions->invalidate_seq) != seq);
/*
* Notice that mmu_interval_read_retry() can already be true at this
@@ -253,7 +258,7 @@ unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni)
}
EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
-static void mn_itree_release(struct mmu_notifier_mm *mmn_mm,
+static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
struct mm_struct *mm)
{
struct mmu_notifier_range range = {
@@ -263,17 +268,20 @@ static void mn_itree_release(struct mmu_notifier_mm *mmn_mm,
.start = 0,
.end = ULONG_MAX,
};
- struct mmu_interval_notifier *mni;
+ struct mmu_interval_notifier *interval_sub;
unsigned long cur_seq;
bool ret;
- for (mni = mn_itree_inv_start_range(mmn_mm, &range, &cur_seq); mni;
- mni = mn_itree_inv_next(mni, &range)) {
- ret = mni->ops->invalidate(mni, &range, cur_seq);
+ for (interval_sub =
+ mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
+ interval_sub;
+ interval_sub = mn_itree_inv_next(interval_sub, &range)) {
+ ret = interval_sub->ops->invalidate(interval_sub, &range,
+ cur_seq);
WARN_ON(!ret);
}
- mn_itree_inv_end(mmn_mm);
+ mn_itree_inv_end(subscriptions);
}
/*
@@ -283,15 +291,15 @@ static void mn_itree_release(struct mmu_notifier_mm *mmn_mm,
* in parallel despite there being no task using this mm any more,
* through the vmas outside of the exit_mmap context, such as with
* vmtruncate. This serializes against mmu_notifier_unregister with
- * the mmu_notifier_mm->lock in addition to SRCU and it serializes
- * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
+ * the notifier_subscriptions->lock in addition to SRCU and it serializes
+ * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
* can't go away from under us as exit_mmap holds an mm_count pin
* itself.
*/
-static void mn_hlist_release(struct mmu_notifier_mm *mmn_mm,
+static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
struct mm_struct *mm)
{
- struct mmu_notifier *mn;
+ struct mmu_notifier *subscription;
int id;
/*
@@ -299,29 +307,29 @@ static void mn_hlist_release(struct mmu_notifier_mm *mmn_mm,
* ->release returns.
*/
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist)
+ hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist)
/*
* If ->release runs before mmu_notifier_unregister it must be
* handled, as it's the only way for the driver to flush all
* existing sptes and stop the driver from establishing any more
* sptes before all the pages in the mm are freed.
*/
- if (mn->ops->release)
- mn->ops->release(mn, mm);
+ if (subscription->ops->release)
+ subscription->ops->release(subscription, mm);
- spin_lock(&mmn_mm->lock);
- while (unlikely(!hlist_empty(&mmn_mm->list))) {
- mn = hlist_entry(mmn_mm->list.first, struct mmu_notifier,
- hlist);
+ spin_lock(&subscriptions->lock);
+ while (unlikely(!hlist_empty(&subscriptions->list))) {
+ subscription = hlist_entry(subscriptions->list.first,
+ struct mmu_notifier, hlist);
/*
* We arrived before mmu_notifier_unregister so
* mmu_notifier_unregister will do nothing other than to wait
* for ->release to finish and for mmu_notifier_unregister to
* return.
*/
- hlist_del_init_rcu(&mn->hlist);
+ hlist_del_init_rcu(&subscription->hlist);
}
- spin_unlock(&mmn_mm->lock);
+ spin_unlock(&subscriptions->lock);
srcu_read_unlock(&srcu, id);
/*
@@ -330,21 +338,22 @@ static void mn_hlist_release(struct mmu_notifier_mm *mmn_mm,
* until the ->release method returns, if it was invoked by
* mmu_notifier_unregister.
*
- * The mmu_notifier_mm can't go away from under us because one mm_count
- * is held by exit_mmap.
+ * The notifier_subscriptions can't go away from under us because
+ * one mm_count is held by exit_mmap.
*/
synchronize_srcu(&srcu);
}
void __mmu_notifier_release(struct mm_struct *mm)
{
- struct mmu_notifier_mm *mmn_mm = mm->mmu_notifier_mm;
+ struct mmu_notifier_subscriptions *subscriptions =
+ mm->notifier_subscriptions;
- if (mmn_mm->has_itree)
- mn_itree_release(mmn_mm, mm);
+ if (subscriptions->has_itree)
+ mn_itree_release(subscriptions, mm);
- if (!hlist_empty(&mmn_mm->list))
- mn_hlist_release(mmn_mm, mm);
+ if (!hlist_empty(&subscriptions->list))
+ mn_hlist_release(subscriptions, mm);
}
/*
@@ -356,13 +365,15 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
- struct mmu_notifier *mn;
+ struct mmu_notifier *subscription;
int young = 0, id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
- if (mn->ops->clear_flush_young)
- young |= mn->ops->clear_flush_young(mn, mm, start, end);
+ hlist_for_each_entry_rcu(subscription,
+ &mm->notifier_subscriptions->list, hlist) {
+ if (subscription->ops->clear_flush_young)
+ young |= subscription->ops->clear_flush_young(
+ subscription, mm, start, end);
}
srcu_read_unlock(&srcu, id);
@@ -373,13 +384,15 @@ int __mmu_notifier_clear_young(struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
- struct mmu_notifier *mn;
+ struct mmu_notifier *subscription;
int young = 0, id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
- if (mn->ops->clear_young)
- young |= mn->ops->clear_young(mn, mm, start, end);
+ hlist_for_each_entry_rcu(subscription,
+ &mm->notifier_subscriptions->list, hlist) {
+ if (subscription->ops->clear_young)
+ young |= subscription->ops->clear_young(subscription,
+ mm, start, end);
}
srcu_read_unlock(&srcu, id);
@@ -389,13 +402,15 @@ int __mmu_notifier_clear_young(struct mm_struct *mm,
int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{
- struct mmu_notifier *mn;
+ struct mmu_notifier *subscription;
int young = 0, id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
- if (mn->ops->test_young) {
- young = mn->ops->test_young(mn, mm, address);
+ hlist_for_each_entry_rcu(subscription,
+ &mm->notifier_subscriptions->list, hlist) {
+ if (subscription->ops->test_young) {
+ young = subscription->ops->test_young(subscription, mm,
+ address);
if (young)
break;
}
@@ -408,28 +423,33 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
pte_t pte)
{
- struct mmu_notifier *mn;
+ struct mmu_notifier *subscription;
int id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
- if (mn->ops->change_pte)
- mn->ops->change_pte(mn, mm, address, pte);
+ hlist_for_each_entry_rcu(subscription,
+ &mm->notifier_subscriptions->list, hlist) {
+ if (subscription->ops->change_pte)
+ subscription->ops->change_pte(subscription, mm, address,
+ pte);
}
srcu_read_unlock(&srcu, id);
}
-static int mn_itree_invalidate(struct mmu_notifier_mm *mmn_mm,
+static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
const struct mmu_notifier_range *range)
{
- struct mmu_interval_notifier *mni;
+ struct mmu_interval_notifier *interval_sub;
unsigned long cur_seq;
- for (mni = mn_itree_inv_start_range(mmn_mm, range, &cur_seq); mni;
- mni = mn_itree_inv_next(mni, range)) {
+ for (interval_sub =
+ mn_itree_inv_start_range(subscriptions, range, &cur_seq);
+ interval_sub;
+ interval_sub = mn_itree_inv_next(interval_sub, range)) {
bool ret;
- ret = mni->ops->invalidate(mni, range, cur_seq);
+ ret = interval_sub->ops->invalidate(interval_sub, range,
+ cur_seq);
if (!ret) {
if (WARN_ON(mmu_notifier_range_blockable(range)))
continue;
@@ -443,31 +463,36 @@ out_would_block:
* On -EAGAIN the non-blocking caller is not allowed to call
* invalidate_range_end()
*/
- mn_itree_inv_end(mmn_mm);
+ mn_itree_inv_end(subscriptions);
return -EAGAIN;
}
-static int mn_hlist_invalidate_range_start(struct mmu_notifier_mm *mmn_mm,
- struct mmu_notifier_range *range)
+static int mn_hlist_invalidate_range_start(
+ struct mmu_notifier_subscriptions *subscriptions,
+ struct mmu_notifier_range *range)
{
- struct mmu_notifier *mn;
+ struct mmu_notifier *subscription;
int ret = 0;
int id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist) {
- if (mn->ops->invalidate_range_start) {
+ hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist) {
+ const struct mmu_notifier_ops *ops = subscription->ops;
+
+ if (ops->invalidate_range_start) {
int _ret;
if (!mmu_notifier_range_blockable(range))
non_block_start();
- _ret = mn->ops->invalidate_range_start(mn, range);
+ _ret = ops->invalidate_range_start(subscription, range);
if (!mmu_notifier_range_blockable(range))
non_block_end();
if (_ret) {
pr_info("%pS callback failed with %d in %sblockable context.\n",
- mn->ops->invalidate_range_start, _ret,
- !mmu_notifier_range_blockable(range) ? "non-" : "");
+ ops->invalidate_range_start, _ret,
+ !mmu_notifier_range_blockable(range) ?
+ "non-" :
+ "");
WARN_ON(mmu_notifier_range_blockable(range) ||
_ret != -EAGAIN);
ret = _ret;
@@ -481,28 +506,29 @@ static int mn_hlist_invalidate_range_start(struct mmu_notifier_mm *mmn_mm,
int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
- struct mmu_notifier_mm *mmn_mm = range->mm->mmu_notifier_mm;
+ struct mmu_notifier_subscriptions *subscriptions =
+ range->mm->notifier_subscriptions;
int ret;
- if (mmn_mm->has_itree) {
- ret = mn_itree_invalidate(mmn_mm, range);
+ if (subscriptions->has_itree) {
+ ret = mn_itree_invalidate(subscriptions, range);
if (ret)
return ret;
}
- if (!hlist_empty(&mmn_mm->list))
- return mn_hlist_invalidate_range_start(mmn_mm, range);
+ if (!hlist_empty(&subscriptions->list))
+ return mn_hlist_invalidate_range_start(subscriptions, range);
return 0;
}
-static void mn_hlist_invalidate_end(struct mmu_notifier_mm *mmn_mm,
- struct mmu_notifier_range *range,
- bool only_end)
+static void
+mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
+ struct mmu_notifier_range *range, bool only_end)
{
- struct mmu_notifier *mn;
+ struct mmu_notifier *subscription;
int id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist) {
+ hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist) {
/*
* Call invalidate_range here too to avoid the need for the
* subsystem of having to register an invalidate_range_end
@@ -516,14 +542,16 @@ static void mn_hlist_invalidate_end(struct mmu_notifier_mm *mm