[PATCH 3.16 124/366] pagemap: hide physical addresses from non-privileged users
From: Ben Hutchings
Date: Sun Nov 11 2018 - 15:21:09 EST
3.16.61-rc1 review patch. If anyone has any objections, please let me know.
------------------
From: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx>
commit 1c90308e7a77af6742a97d1021cca923b23b7f0d upstream.
This patch makes pagemap readable for normal users and hides physical
addresses from them. For some use-cases PFN isn't required at all.
See http://lkml.kernel.org/r/1425935472-17949-1-git-send-email-kirill@xxxxxxxxxxxxx
Fixes: ab676b7d6fbf ("pagemap: do not leak physical addresses to non-privileged userspace")
Signed-off-by: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx>
Cc: Naoya Horiguchi <n-horiguchi@xxxxxxxxxxxxx>
Reviewed-by: Mark Williamson <mwilliamson@xxxxxxxxxxxxxxxxx>
Tested-by: Mark Williamson <mwilliamson@xxxxxxxxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
[bwh: Backported to 3.16:
- Add the same check in the places where we look up a PFN
- Adjust context]
Signed-off-by: Ben Hutchings <ben@xxxxxxxxxxxxxxx>
---
fs/proc/task_mmu.c | 25 ++++++++++++++-----------
1 file changed, 14 insertions(+), 11 deletions(-)
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -862,6 +862,7 @@ struct pagemapread {
int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
pagemap_entry_t *buffer;
bool v2;
+ bool show_pfn;
};
#define PAGEMAP_WALK_SIZE (PMD_SIZE)
@@ -921,12 +922,13 @@ static int pagemap_pte_hole(unsigned lon
static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
- u64 frame, flags;
+ u64 frame = 0, flags;
struct page *page = NULL;
int flags2 = 0;
if (pte_present(pte)) {
- frame = pte_pfn(pte);
+ if (pm->show_pfn)
+ frame = pte_pfn(pte);
flags = PM_PRESENT;
page = vm_normal_page(vma, addr, pte);
if (pte_soft_dirty(pte))
@@ -966,7 +968,7 @@ static void thp_pmd_to_pagemap_entry(pag
* This if-check is just to prepare for future implementation.
*/
if (pmd_present(pmd))
- *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
+ *pme = make_pme((pm->show_pfn ? PM_PFRAME(pmd_pfn(pmd) + offset) : 0)
| PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
else
*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
@@ -1075,7 +1077,7 @@ static void huge_pte_to_pagemap_entry(pa
pte_t pte, int offset, int flags2)
{
if (pte_present(pte))
- *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) |
+ *pme = make_pme((pm->show_pfn ? PM_PFRAME(pte_pfn(pte) + offset) : 0) |
PM_STATUS2(pm->v2, flags2) |
PM_PRESENT);
else
@@ -1167,6 +1169,10 @@ static ssize_t pagemap_read(struct file
goto out_task;
pm.v2 = soft_dirty_cleared;
+
+ /* do not disclose physical addresses: attack vector */
+ pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
+
pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
ret = -ENOMEM;
@@ -1241,9 +1247,6 @@ out:
static int pagemap_open(struct inode *inode, struct file *file)
{
- /* do not disclose physical addresses: attack vector */
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
"to stop being page-shift some time soon. See the "
"linux/Documentation/vm/pagemap.txt for details.\n");