+void update_new_page_and_free(struct mm_struct *mm, struct page *old_page)
+{
+
+ int type;
+
+ /* Currently we only worry about cpu bits in ptes. */
+ if (old_page->page_table_type == PAGE_TABLE_PTE) {
+ struct page *new_page = old_page->migrated_page;
+ spinlock_t *ptl;
+ pte_t *old;
+ pte_t *new;
+ int i;
+#ifdef __pte_lockptr
+ ptl = __pte_lockptr(new_page);
+#else
+ ptl = &mm->page_table_lock;
+#endif
+ spin_lock(ptl);
+ old = (pte_t *)kmap_atomic(old_page, KM_PTE0);
+ new = (pte_t *)kmap_atomic(new_page, KM_PTE1);
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ arch_copy_cpu_bits_pte(old, new);
+
+ kunmap_atomic(new_page, KM_PTE1);
+ kunmap_atomic(old_page, KM_PTE0);
+ new_page->migrated_page = NULL;
+ spin_unlock(ptl);
+ }
+
+ old_page->migrated_page = NULL;
+ type = old_page->page_table_type;
+ reset_page_mapcount(old_page);
+
+ free_page_table_page(mm, old_page, type);
+}