[PATCH v2 01/17] mm/gup: Fixup p*_access_permitted()
From: Peter Zijlstra
Date: Thu Dec 14 2017 - 06:46:27 EST
The gup_*_range() functions which implement __get_user_pages_fast() do
a p*_access_permitted() test to see if the memory is at all accessible
(tests both _PAGE_USER|_PAGE_RW as well as architectural things like
pkeys).
But the follow_*() functions which implement __get_user_pages() do not
have this test. Recently, commit:
5c9d2d5c269c ("mm: replace pte_write with pte_access_permitted in fault + gup paths")
added it to a few specific write paths, but it failed to consistently
apply it (I've not audited anything outside of gup).
Revert the change from that patch and insert the tests in the right
locations such that they cover all READ / WRITE accesses for all
pte/pmd/pud levels.
In particular I care about the _PAGE_USER test, we should not ever,
allow access to pages not marked with it, but it also makes the pkey
accesses more consistent.
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
mm/gup.c | 25 ++++++++++++++++++++++++-
1 file changed, 24 insertions(+), 1 deletion(-)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -66,7 +66,7 @@ static int follow_pfn_pte(struct vm_area
*/
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
- return pte_access_permitted(pte, WRITE) ||
+ return pte_write(pte) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}
@@ -153,6 +153,11 @@ static struct page *follow_page_pte(stru
}
if (flags & FOLL_GET) {
+ if (!pte_access_permitted(pte, !!(flags & FOLL_WRITE))) {
+ page = ERR_PTR(-EFAULT);
+ goto out;
+ }
+
get_page(page);
/* drop the pgmap reference now that we hold the page */
@@ -244,6 +249,15 @@ static struct page *follow_pmd_mask(stru
pmd_migration_entry_wait(mm, pmd);
goto retry;
}
+
+ if (flags & FOLL_GET) {
+ if (!pmd_access_permitted(*pmd, !!(flags & FOLL_WRITE))) {
+ page = ERR_PTR(-EFAULT);
+ spin_unlock(ptr);
+ return page;
+ }
+ }
+
if (pmd_devmap(*pmd)) {
ptl = pmd_lock(mm, pmd);
page = follow_devmap_pmd(vma, address, pmd, flags);
@@ -326,6 +340,15 @@ static struct page *follow_pud_mask(stru
return page;
return no_page_table(vma, flags);
}
+
+ if (flags & FOLL_GET) {
+ if (!pud_access_permitted(*pud, !!(flags & FOLL_WRITE))) {
+ page = ERR_PTR(-EFAULT);
+ spin_unlock(ptr);
+ return page;
+ }
+ }
+
if (pud_devmap(*pud)) {
ptl = pud_lock(mm, pud);
page = follow_devmap_pud(vma, address, pud, flags);