Re: Question regarding x86_64 __PHYSICAL_MASK_SHIFT

From: Tejun Heo
Date: Tue Oct 04 2005 - 13:54:41 EST


Hello, Andi.

On Tue, Oct 04, 2005 at 07:24:56PM +0200, Andi Kleen wrote:
>
> You're right - PHYSICAL_MASK shouldn't be applied to PFNs, only to full
> addresses. Fixed with appended patch.
>
> The 46bits limit is because half of the 48bit virtual space
> is used for user space and the other 47 bit half is divided into
> direct mapping and other mappings (ioremap, vmalloc etc.). All
> physical memory has to fit into the direct mapping, so you
> end with a 46 bit limit.

__PHYSICAL_MASK is only used to mask out non-pfn bits from page table
entries. I don't really see how it's related to virtual space
construction.

>
> See also Documentation/x86-64/mm.txt
>

Thanks. :-)

I think PHYSICAL_PAGE_MASK and PTE_FILE_MAX_BITS should also be
updated. How about the following patch? Compile & boot tested.


This patch cleans up __PHYSICAL_MASK and related constants.

- __PHYSICAL_MASK_SHIFT is changed to 52 to reflect architectural
physical address limit.

- PHYSICAL_PAGE_MASK is fixed to properly mask pfn bits of page table
entries.

- PTE_FILE_MAX_BITS is fixed to properly reflect available bits in a
page table entry (40bits).

- Macros dealing with page table entries are modified to universally
use PTE_MASK (which equals PHYSICAL_PAGE_MASK) when extracting pfn
for consistency.

Signed-off-by: Tejun Heo <htejun@xxxxxxxxx>

diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -11,7 +11,7 @@
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#endif
#define PAGE_MASK (~(PAGE_SIZE-1))
-#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT))
+#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)

#define THREAD_ORDER 1
#ifdef __ASSEMBLY__
@@ -81,7 +81,7 @@ typedef struct { unsigned long pgprot; }
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)

/* See Documentation/x86_64/mm.txt for a description of the memory map. */
-#define __PHYSICAL_MASK_SHIFT 46
+#define __PHYSICAL_MASK_SHIFT 52
#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
#define __VIRTUAL_MASK_SHIFT 48
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -101,7 +101,7 @@ static inline void pgd_clear (pgd_t * pg
}

#define pud_page(pud) \
-((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
+((unsigned long) __va(pud_val(pud) & PTE_MASK))

#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0))

@@ -245,7 +245,7 @@ static inline unsigned long pud_bad(pud_
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
right? */
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_pfn(x) ((pte_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK)
+#define pte_pfn(x) ((pte_val(x) & PTE_MASK) >> PAGE_SHIFT)

static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
@@ -352,11 +352,11 @@ static inline pud_t *__pud_offset_k(pud_
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE )
#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
-#define pmd_pfn(x) ((pmd_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK)
+#define pmd_pfn(x) ((pmd_val(x) & PTE_MASK) >> PAGE_SHIFT)

-#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+#define pte_to_pgoff(pte) ((pte_val(pte) & PTE_MASK) >> PAGE_SHIFT)
#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
-#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
+#define PTE_FILE_MAX_BITS (__PHYSICAL_MASK_SHIFT - PAGE_SHIFT)

/* PTE - Level 1 access. */

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/