[PATCH v12 03/29] mmu_notifier: pass page pointer to mmu_notifier_invalidate_page() v2
From: JÃrÃme Glisse
Date: Tue Mar 08 2016 - 14:47:13 EST
Listener of mm event might not have easy way to get the struct page
behind an address invalidated with mmu_notifier_invalidate_page()
function as this happens after the cpu page table have been clear/
updated. This happens for instance if the listener is storing a dma
mapping inside its secondary page table. To avoid complex reverse
dma mapping lookup just pass along a pointer to the page being
invalidated.
Changed since v1:
- English syntax fixes.
Signed-off-by: JÃrÃme Glisse <jglisse@xxxxxxxxxx>
---
drivers/infiniband/core/umem_odp.c | 1 +
drivers/iommu/amd_iommu_v2.c | 1 +
drivers/misc/sgi-gru/grutlbpurge.c | 1 +
drivers/xen/gntdev.c | 1 +
include/linux/mmu_notifier.h | 6 +++++-
mm/mmu_notifier.c | 3 ++-
mm/rmap.c | 4 ++--
virt/kvm/kvm_main.c | 1 +
8 files changed, 14 insertions(+), 4 deletions(-)
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 58d9a00..0541761 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -166,6 +166,7 @@ static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address,
+ struct page *page,
enum mmu_event event)
{
struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 2b4be22..bb5c678 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -393,6 +393,7 @@ static int mn_clear_flush_young(struct mmu_notifier *mn,
static void mn_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address,
+ struct page *page,
enum mmu_event event)
{
__mn_flush_page(mn, address);
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index 40cf589..4268649 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -250,6 +250,7 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn,
static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
unsigned long address,
+ struct page *page,
enum mmu_event event)
{
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 0ca3720..c318aff 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -485,6 +485,7 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
static void mn_invl_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address,
+ struct page *page,
enum mmu_event event)
{
struct mmu_notifier_range range;
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index c4ba044..9e65a3f 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -184,6 +184,7 @@ struct mmu_notifier_ops {
void (*invalidate_page)(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address,
+ struct page *page,
enum mmu_event event);
/*
@@ -305,6 +306,7 @@ extern void __mmu_notifier_change_pte(struct mm_struct *mm,
enum mmu_event event);
extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address,
+ struct page *page,
enum mmu_event event);
extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
struct mmu_notifier_range *range);
@@ -362,10 +364,11 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address,
+ struct page *page,
enum mmu_event event)
{
if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_page(mm, address, event);
+ __mmu_notifier_invalidate_page(mm, address, page, event);
}
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
@@ -538,6 +541,7 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address,
+ struct page *page,
enum mmu_event event)
{
}
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index c43c851..316e4a9 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -177,6 +177,7 @@ void __mmu_notifier_change_pte(struct mm_struct *mm,
void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address,
+ struct page *page,
enum mmu_event event)
{
struct mmu_notifier *mn;
@@ -185,7 +186,7 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_page)
- mn->ops->invalidate_page(mn, mm, address, event);
+ mn->ops->invalidate_page(mn, mm, address, page, event);
}
srcu_read_unlock(&srcu, id);
}
diff --git a/mm/rmap.c b/mm/rmap.c
index a24d0b2..063f8de 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1054,7 +1054,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
pte_unmap_unlock(pte, ptl);
if (ret) {
- mmu_notifier_invalidate_page(mm, address, MMU_WRITE_BACK);
+ mmu_notifier_invalidate_page(mm, address, page, MMU_WRITE_BACK);
(*cleaned)++;
}
out:
@@ -1552,7 +1552,7 @@ discard:
out_unmap:
pte_unmap_unlock(pte, ptl);
if (ret != SWAP_FAIL && ret != SWAP_MLOCK && !(flags & TTU_MUNLOCK))
- mmu_notifier_invalidate_page(mm, address, MMU_MIGRATE);
+ mmu_notifier_invalidate_page(mm, address, page, MMU_MIGRATE);
out:
return ret;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b059307..d6dbaab 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -273,6 +273,7 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address,
+ struct page *page,
enum mmu_event event)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
--
2.4.3