Jesse Keating 2f82dda
When using mmu notifiers, we are allowed to remove the page count
Jesse Keating 2f82dda
reference tooken by get_user_pages to a specific page that is mapped
Jesse Keating 2f82dda
inside the shadow page tables.
Jesse Keating 2f82dda
Jesse Keating 2f82dda
This is needed so we can balance the pagecount against mapcount
Jesse Keating 2f82dda
checking.
Jesse Keating 2f82dda
Jesse Keating 2f82dda
(Right now kvm increase the pagecount and does not increase the
Jesse Keating 2f82dda
mapcount when mapping page into shadow page table entry,
Jesse Keating 2f82dda
so when comparing pagecount against mapcount, you have no
Jesse Keating 2f82dda
reliable result.)
Jesse Keating 2f82dda
Jesse Keating 2f82dda
add SPTE_HOST_WRITEABLE flag notify that the host physical page we are
Jesse Keating 2f82dda
pointing to from the spte is write protected, and therefore we cant
Jesse Keating 2f82dda
change its access to be write unless we run get_user_pages(write = 1).
Jesse Keating 2f82dda
Jesse Keating 2f82dda
(this is needed for change_pte support in kvm)
Jesse Keating 2f82dda
Jesse Keating 2f82dda
support for change_pte mmu notifiers is needed for kvm if it want ksm to
Jesse Keating 2f82dda
directly map pages into its shadow page tables.
Jesse Keating 2f82dda
Jesse Keating 2f82dda
Signed-off-by: Izik Eidus <ieidus@redhat.com>
Jesse Keating 2f82dda
Signed-off-by: Justin M. Forbes <jforbes@redhat.com>
Jesse Keating 2f82dda
---
Jesse Keating 2f82dda
--- linux-2.6.30.x86_64/arch/x86/include/asm/kvm_host.h	2009-08-20 10:37:37.784886414 -0500
Jesse Keating 2f82dda
+++ linux-2.6.30.x86_64.kvm/arch/x86/include/asm/kvm_host.h	2009-08-20 10:39:33.742641558 -0500
Jesse Keating 2f82dda
@@ -796,5 +796,6 @@ asmlinkage void kvm_handle_fault_on_rebo
Jesse Keating 2f82dda
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
Jesse Keating 2f82dda
 int kvm_age_hva(struct kvm *kvm, unsigned long hva);
Jesse Keating 2f82dda
 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
Jesse Keating 2f82dda
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
 #endif /* _ASM_X86_KVM_HOST_H */
Jesse Keating 2f82dda
--- linux-2.6.30.x86_64/arch/x86/kvm/mmu.c	2009-08-20 10:37:37.964887039 -0500
Jesse Keating 2f82dda
+++ linux-2.6.30.x86_64.kvm/arch/x86/kvm/mmu.c	2009-08-20 10:41:15.231638028 -0500
Jesse Keating 2f82dda
@@ -139,6 +139,8 @@ module_param(oos_shadow, bool, 0644);
Jesse Keating 2f82dda
 #define ACC_USER_MASK    PT_USER_MASK
Jesse Keating 2f82dda
 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
+#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
Jesse Keating 2f82dda
+
Jesse Keating 2f82dda
 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
 struct kvm_rmap_desc {
Jesse Keating 2f82dda
@@ -254,6 +256,11 @@ static pfn_t spte_to_pfn(u64 pte)
Jesse Keating 2f82dda
 	return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
Jesse Keating 2f82dda
 }
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
+static pte_t ptep_val(pte_t *ptep)
Jesse Keating 2f82dda
+{
Jesse Keating 2f82dda
+	return *ptep;
Jesse Keating 2f82dda
+}
Jesse Keating 2f82dda
+
Jesse Keating 2f82dda
 static gfn_t pse36_gfn_delta(u32 gpte)
Jesse Keating 2f82dda
 {
Jesse Keating 2f82dda
 	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
Jesse Keating 2f82dda
@@ -573,9 +580,7 @@ static void rmap_remove(struct kvm *kvm,
Jesse Keating 2f82dda
 	if (*spte & shadow_accessed_mask)
Jesse Keating 2f82dda
 		kvm_set_pfn_accessed(pfn);
Jesse Keating 2f82dda
 	if (is_writeble_pte(*spte))
Jesse Keating 2f82dda
-		kvm_release_pfn_dirty(pfn);
Jesse Keating 2f82dda
-	else
Jesse Keating 2f82dda
-		kvm_release_pfn_clean(pfn);
Jesse Keating 2f82dda
+		kvm_set_pfn_dirty(pfn);
Jesse Keating 2f82dda
 	rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
Jesse Keating 2f82dda
 	if (!*rmapp) {
Jesse Keating 2f82dda
 		printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
Jesse Keating 2f82dda
@@ -684,7 +689,8 @@ static int rmap_write_protect(struct kvm
Jesse Keating 2f82dda
 	return write_protected;
Jesse Keating 2f82dda
 }
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
-static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
Jesse Keating 2f82dda
+static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
Jesse Keating 2f82dda
+			   unsigned long data)
Jesse Keating 2f82dda
 {
Jesse Keating 2f82dda
 	u64 *spte;
Jesse Keating 2f82dda
 	int need_tlb_flush = 0;
Jesse Keating 2f82dda
@@ -699,8 +705,48 @@ static int kvm_unmap_rmapp(struct kvm *k
Jesse Keating 2f82dda
 	return need_tlb_flush;
Jesse Keating 2f82dda
 }
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
+static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
Jesse Keating 2f82dda
+			     unsigned long data)
Jesse Keating 2f82dda
+{
Jesse Keating 2f82dda
+	int need_flush = 0;
Jesse Keating 2f82dda
+	u64 *spte, new_spte;
Jesse Keating 2f82dda
+	pte_t *ptep = (pte_t *)data;
Jesse Keating 2f82dda
+	pfn_t new_pfn;
Jesse Keating 2f82dda
+
Jesse Keating 2f82dda
+	new_pfn = pte_pfn(ptep_val(ptep));
Jesse Keating 2f82dda
+	spte = rmap_next(kvm, rmapp, NULL);
Jesse Keating 2f82dda
+	while (spte) {
Jesse Keating 2f82dda
+		BUG_ON(!is_shadow_present_pte(*spte));
Jesse Keating 2f82dda
+		rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
Jesse Keating 2f82dda
+		need_flush = 1;
Jesse Keating 2f82dda
+		if (pte_write(ptep_val(ptep))) {
Jesse Keating 2f82dda
+			rmap_remove(kvm, spte);
Jesse Keating 2f82dda
+			set_shadow_pte(spte, shadow_trap_nonpresent_pte);
Jesse Keating 2f82dda
+			spte = rmap_next(kvm, rmapp, NULL);
Jesse Keating 2f82dda
+		} else {
Jesse Keating 2f82dda
+			new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
Jesse Keating 2f82dda
+			new_spte |= new_pfn << PAGE_SHIFT;
Jesse Keating 2f82dda
+
Jesse Keating 2f82dda
+			if (!pte_write(ptep_val(ptep))) {
Jesse Keating 2f82dda
+				new_spte &= ~PT_WRITABLE_MASK;
Jesse Keating 2f82dda
+				new_spte &= ~SPTE_HOST_WRITEABLE;
Jesse Keating 2f82dda
+				if (is_writeble_pte(*spte))
Jesse Keating 2f82dda
+					kvm_set_pfn_dirty(spte_to_pfn(*spte));
Jesse Keating 2f82dda
+			}
Jesse Keating 2f82dda
+			set_shadow_pte(spte, new_spte);
Jesse Keating 2f82dda
+			spte = rmap_next(kvm, rmapp, spte);
Jesse Keating 2f82dda
+		}
Jesse Keating 2f82dda
+	}
Jesse Keating 2f82dda
+	if (need_flush)
Jesse Keating 2f82dda
+		kvm_flush_remote_tlbs(kvm);
Jesse Keating 2f82dda
+
Jesse Keating 2f82dda
+	return 0;
Jesse Keating 2f82dda
+}
Jesse Keating 2f82dda
+
Jesse Keating 2f82dda
 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
Jesse Keating 2f82dda
-			  int (*handler)(struct kvm *kvm, unsigned long *rmapp))
Jesse Keating 2f82dda
+			  unsigned long data,
Jesse Keating 2f82dda
+			  int (*handler)(struct kvm *kvm, unsigned long *rmapp,
Jesse Keating 2f82dda
+					 unsigned long data))
Jesse Keating 2f82dda
 {
Jesse Keating 2f82dda
 	int i;
Jesse Keating 2f82dda
 	int retval = 0;
Jesse Keating 2f82dda
@@ -721,11 +767,13 @@ static int kvm_handle_hva(struct kvm *kv
Jesse Keating 2f82dda
 		end = start + (memslot->npages << PAGE_SHIFT);
Jesse Keating 2f82dda
 		if (hva >= start && hva < end) {
Jesse Keating 2f82dda
 			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
Jesse Keating 2f82dda
-			retval |= handler(kvm, &memslot->rmap[gfn_offset]);
Jesse Keating 2f82dda
+			retval |= handler(kvm, &memslot->rmap[gfn_offset],
Jesse Keating 2f82dda
+					  data);
Jesse Keating 2f82dda
 			retval |= handler(kvm,
Jesse Keating 2f82dda
 					  &memslot->lpage_info[
Jesse Keating 2f82dda
 						  gfn_offset /
Jesse Keating 2f82dda
-						  KVM_PAGES_PER_HPAGE].rmap_pde);
Jesse Keating 2f82dda
+						  KVM_PAGES_PER_HPAGE].rmap_pde,
Jesse Keating 2f82dda
+						  data);
Jesse Keating 2f82dda
 		}
Jesse Keating 2f82dda
 	}
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
@@ -734,10 +782,16 @@ static int kvm_handle_hva(struct kvm *kv
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
Jesse Keating 2f82dda
 {
Jesse Keating 2f82dda
-	return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
Jesse Keating 2f82dda
+	return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
Jesse Keating 2f82dda
+}
Jesse Keating 2f82dda
+
Jesse Keating 2f82dda
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
Jesse Keating 2f82dda
+{
Jesse Keating 2f82dda
+	kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
Jesse Keating 2f82dda
 }
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
-static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
Jesse Keating 2f82dda
+static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
Jesse Keating 2f82dda
+			 unsigned long data)
Jesse Keating 2f82dda
 {
Jesse Keating 2f82dda
 	u64 *spte;
Jesse Keating 2f82dda
 	int young = 0;
Jesse Keating 2f82dda
@@ -770,13 +824,13 @@ static void rmap_recycle(struct kvm_vcpu
Jesse Keating 2f82dda
 	gfn = unalias_gfn(vcpu->kvm, gfn);
Jesse Keating 2f82dda
 	rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
-	kvm_unmap_rmapp(vcpu->kvm, rmapp);
Jesse Keating 2f82dda
+	kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
Jesse Keating 2f82dda
 	kvm_flush_remote_tlbs(vcpu->kvm);
Jesse Keating 2f82dda
 }
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
Jesse Keating 2f82dda
 {
Jesse Keating 2f82dda
-	return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
Jesse Keating 2f82dda
+	return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
Jesse Keating 2f82dda
 }
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
 #ifdef MMU_DEBUG
Jesse Keating 2f82dda
@@ -1686,7 +1740,7 @@ static int set_spte(struct kvm_vcpu *vcp
Jesse Keating 2f82dda
 		    unsigned pte_access, int user_fault,
Jesse Keating 2f82dda
 		    int write_fault, int dirty, int largepage,
Jesse Keating 2f82dda
 		    gfn_t gfn, pfn_t pfn, bool speculative,
Jesse Keating 2f82dda
-		    bool can_unsync)
Jesse Keating 2f82dda
+		    bool can_unsync, bool reset_host_protection)
Jesse Keating 2f82dda
 {
Jesse Keating 2f82dda
 	u64 spte;
Jesse Keating 2f82dda
 	int ret = 0;
Jesse Keating 2f82dda
@@ -1744,6 +1798,8 @@ static int set_spte(struct kvm_vcpu *vcp
Jesse Keating 2f82dda
 				spte &= ~PT_WRITABLE_MASK;
Jesse Keating 2f82dda
 		}
Jesse Keating 2f82dda
 	}
Jesse Keating 2f82dda
+	if (reset_host_protection)
Jesse Keating 2f82dda
+		spte |= SPTE_HOST_WRITEABLE;
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
 	if (pte_access & ACC_WRITE_MASK)
Jesse Keating 2f82dda
 		mark_page_dirty(vcpu->kvm, gfn);
Jesse Keating 2f82dda
@@ -1757,7 +1813,8 @@ static void mmu_set_spte(struct kvm_vcpu
Jesse Keating 2f82dda
 			 unsigned pt_access, unsigned pte_access,
Jesse Keating 2f82dda
 			 int user_fault, int write_fault, int dirty,
Jesse Keating 2f82dda
 			 int *ptwrite, int largepage, gfn_t gfn,
Jesse Keating 2f82dda
-			 pfn_t pfn, bool speculative)
Jesse Keating 2f82dda
+			 pfn_t pfn, bool speculative,
Jesse Keating 2f82dda
+             bool reset_host_protection)
Jesse Keating 2f82dda
 {
Jesse Keating 2f82dda
 	int was_rmapped = 0;
Jesse Keating 2f82dda
 	int was_writeble = is_writeble_pte(*shadow_pte);
Jesse Keating 2f82dda
@@ -1787,7 +1844,8 @@ static void mmu_set_spte(struct kvm_vcpu
Jesse Keating 2f82dda
 			was_rmapped = 1;
Jesse Keating 2f82dda
 	}
Jesse Keating 2f82dda
 	if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
Jesse Keating 2f82dda
-		      dirty, largepage, gfn, pfn, speculative, true)) {
Jesse Keating 2f82dda
+		      dirty, largepage, gfn, pfn, speculative, true,
Jesse Keating 2f82dda
+              reset_host_protection)) {
Jesse Keating 2f82dda
 		if (write_fault)
Jesse Keating 2f82dda
 			*ptwrite = 1;
Jesse Keating 2f82dda
 		kvm_x86_ops->tlb_flush(vcpu);
Jesse Keating 2f82dda
@@ -1804,8 +1862,7 @@ static void mmu_set_spte(struct kvm_vcpu
Jesse Keating 2f82dda
 	page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
Jesse Keating 2f82dda
 	if (!was_rmapped) {
Jesse Keating 2f82dda
 		rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage);
Jesse Keating 2f82dda
-		if (!is_rmap_pte(*shadow_pte))
Jesse Keating 2f82dda
-			kvm_release_pfn_clean(pfn);
Jesse Keating 2f82dda
+		kvm_release_pfn_clean(pfn);
Jesse Keating 2f82dda
 		if (rmap_count > RMAP_RECYCLE_THRESHOLD)
Jesse Keating 2f82dda
 			rmap_recycle(vcpu, gfn, largepage);
Jesse Keating 2f82dda
 	} else {
Jesse Keating 2f82dda
@@ -1837,7 +1894,7 @@ static int __direct_map(struct kvm_vcpu 
Jesse Keating 2f82dda
 		    || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
Jesse Keating 2f82dda
 			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
Jesse Keating 2f82dda
 				     0, write, 1, &pt_write,
Jesse Keating 2f82dda
-				     largepage, gfn, pfn, false);
Jesse Keating 2f82dda
+				     largepage, gfn, pfn, false, true);
Jesse Keating 2f82dda
 			++vcpu->stat.pf_fixed;
Jesse Keating 2f82dda
 			break;
Jesse Keating 2f82dda
 		}
Jesse Keating 2f82dda
--- linux-2.6.30.x86_64/arch/x86/kvm/paging_tmpl.h	2009-08-20 10:37:37.966889166 -0500
Jesse Keating 2f82dda
+++ linux-2.6.30.x86_64.kvm/arch/x86/kvm/paging_tmpl.h	2009-08-20 10:39:33.747636180 -0500
Jesse Keating 2f82dda
@@ -266,9 +266,13 @@ static void FNAME(update_pte)(struct kvm
Jesse Keating 2f82dda
 	if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
Jesse Keating 2f82dda
 		return;
Jesse Keating 2f82dda
 	kvm_get_pfn(pfn);
Jesse Keating 2f82dda
+    /*
Jesse Keating 2f82dda
+     * we call mmu_set_spte() with reset_host_protection = true beacuse that
Jesse Keating 2f82dda
+     * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
Jesse Keating 2f82dda
+     */ 
Jesse Keating 2f82dda
 	mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
Jesse Keating 2f82dda
 		     gpte & PT_DIRTY_MASK, NULL, largepage,
Jesse Keating 2f82dda
-		     gpte_to_gfn(gpte), pfn, true);
Jesse Keating 2f82dda
+		     gpte_to_gfn(gpte), pfn, true, true);
Jesse Keating 2f82dda
 }
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
 /*
Jesse Keating 2f82dda
@@ -302,7 +306,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu
Jesse Keating 2f82dda
 				     user_fault, write_fault,
Jesse Keating 2f82dda
 				     gw->ptes[gw->level-1] & PT_DIRTY_MASK,
Jesse Keating 2f82dda
 				     ptwrite, largepage,
Jesse Keating 2f82dda
-				     gw->gfn, pfn, false);
Jesse Keating 2f82dda
+				     gw->gfn, pfn, false, true);
Jesse Keating 2f82dda
 			break;
Jesse Keating 2f82dda
 		}
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
@@ -552,6 +556,7 @@ static void FNAME(prefetch_page)(struct 
Jesse Keating 2f82dda
 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
Jesse Keating 2f82dda
 {
Jesse Keating 2f82dda
 	int i, offset, nr_present;
Jesse Keating 2f82dda
+        bool reset_host_protection = 1;
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
 	offset = nr_present = 0;
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
@@ -589,9 +594,13 @@ static int FNAME(sync_page)(struct kvm_v
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
 		nr_present++;
Jesse Keating 2f82dda
 		pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
Jesse Keating 2f82dda
+        if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
Jesse Keating 2f82dda
+            pte_access &= ~PT_WRITABLE_MASK;
Jesse Keating 2f82dda
+                         reset_host_protection = 0;
Jesse Keating 2f82dda
+                } else { reset_host_protection = 1; }
Jesse Keating 2f82dda
 		set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
Jesse Keating 2f82dda
 			 is_dirty_pte(gpte), 0, gfn,
Jesse Keating 2f82dda
-			 spte_to_pfn(sp->spt[i]), true, false);
Jesse Keating 2f82dda
+			 spte_to_pfn(sp->spt[i]), true, false, reset_host_protection);
Jesse Keating 2f82dda
 	}
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
 	return !nr_present;
Jesse Keating 2f82dda
--- linux-2.6.30.x86_64/virt/kvm/kvm_main.c	2009-08-20 10:37:45.448886340 -0500
Jesse Keating 2f82dda
+++ linux-2.6.30.x86_64.kvm/virt/kvm/kvm_main.c	2009-08-20 10:39:33.749636212 -0500
Jesse Keating 2f82dda
@@ -859,6 +859,19 @@ static void kvm_mmu_notifier_invalidate_
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
 }
Jesse Keating 2f82dda
 
Jesse Keating 2f82dda
+static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
Jesse Keating 2f82dda
+					struct mm_struct *mm,
Jesse Keating 2f82dda
+					unsigned long address,
Jesse Keating 2f82dda
+					pte_t pte)
Jesse Keating 2f82dda
+{
Jesse Keating 2f82dda
+	struct kvm *kvm = mmu_notifier_to_kvm(mn);
Jesse Keating 2f82dda
+
Jesse Keating 2f82dda
+	spin_lock(&kvm->mmu_lock);
Jesse Keating 2f82dda
+	kvm->mmu_notifier_seq++;
Jesse Keating 2f82dda
+	kvm_set_spte_hva(kvm, address, pte);
Jesse Keating 2f82dda
+	spin_unlock(&kvm->mmu_lock);
Jesse Keating 2f82dda
+}
Jesse Keating 2f82dda
+
Jesse Keating 2f82dda
 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
Jesse Keating 2f82dda
 						    struct mm_struct *mm,
Jesse Keating 2f82dda
 						    unsigned long start,
Jesse Keating 2f82dda
@@ -938,6 +951,7 @@ static const struct mmu_notifier_ops kvm
Jesse Keating 2f82dda
 	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
Jesse Keating 2f82dda
 	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
Jesse Keating 2f82dda
 	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
Jesse Keating 2f82dda
+	.change_pte		= kvm_mmu_notifier_change_pte,
Jesse Keating 2f82dda
 	.release		= kvm_mmu_notifier_release,
Jesse Keating 2f82dda
 };
Jesse Keating 2f82dda
 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */