[PATCH 1/2] mm/hmm: a few more C style and comment clean ups

From: Ralph Campbell
Date: Tue Jul 23 2019 - 19:30:36 EST


A few more comments and minor programming style clean ups.
There should be no functional changes.

Signed-off-by: Ralph Campbell <rcampbell@xxxxxxxxxx>
Cc: "JÃrÃme Glisse" <jglisse@xxxxxxxxxx>
Cc: Jason Gunthorpe <jgg@xxxxxxxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
---
mm/hmm.c | 34 ++++++++++++++++------------------
1 file changed, 16 insertions(+), 18 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index b810a4fa3de9..8271f110c243 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -32,7 +32,7 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
* hmm_get_or_create - register HMM against an mm (HMM internal)
*
* @mm: mm struct to attach to
- * Returns: returns an HMM object, either by referencing the existing
+ * Return: an HMM object, either by referencing the existing
* (per-process) object, or by creating a new one.
*
* This is not intended to be used directly by device drivers. If mm already
@@ -323,8 +323,8 @@ static int hmm_pfns_bad(unsigned long addr,
}

/*
- * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
- * @start: range virtual start address (inclusive)
+ * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
+ * @addr: range virtual start address (inclusive)
* @end: range virtual end address (exclusive)
* @fault: should we fault or not ?
* @write_fault: write fault ?
@@ -374,9 +374,9 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
/*
* So we not only consider the individual per page request we also
* consider the default flags requested for the range. The API can
- * be use in 2 fashions. The first one where the HMM user coalesce
- * multiple page fault into one request and set flags per pfns for
- * of those faults. The second one where the HMM user want to pre-
+ * be used 2 ways. The first one where the HMM user coalesces
+ * multiple page faults into one request and sets flags per pfn for
+ * those faults. The second one where the HMM user wants to pre-
* fault a range with specific flags. For the latter one it is a
* waste to have the user pre-fill the pfn arrays with a default
* flags value.
@@ -386,7 +386,7 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
/* We aren't ask to do anything ... */
if (!(pfns & range->flags[HMM_PFN_VALID]))
return;
- /* If this is device memory than only fault if explicitly requested */
+ /* If this is device memory then only fault if explicitly requested */
if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
/* Do we fault on device memory ? */
if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
@@ -500,7 +500,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
hmm_vma_walk->last = end;
return 0;
#else
- /* If THP is not enabled then we should never reach that code ! */
+ /* If THP is not enabled then we should never reach this code ! */
return -EINVAL;
#endif
}
@@ -624,13 +624,12 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
pte_t *ptep;
pmd_t pmd;

-
again:
pmd = READ_ONCE(*pmdp);
if (pmd_none(pmd))
return hmm_vma_walk_hole(start, end, walk);

- if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
+ if (pmd_huge(pmd) && is_vm_hugetlb_page(vma))
return hmm_pfns_bad(start, end, walk);

if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
@@ -655,11 +654,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,

if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
/*
- * No need to take pmd_lock here, even if some other threads
+ * No need to take pmd_lock here, even if some other thread
* is splitting the huge pmd we will get that event through
* mmu_notifier callback.
*
- * So just read pmd value and check again its a transparent
+ * So just read pmd value and check again it's a transparent
* huge or device mapping one and compute corresponding pfn
* values.
*/
@@ -673,7 +672,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
}

/*
- * We have handled all the valid case above ie either none, migration,
+ * We have handled all the valid cases above ie either none, migration,
* huge or transparent huge. At this point either it is a valid pmd
* entry pointing to pte directory or it is a bad pmd that will not
* recover.
@@ -793,10 +792,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
pte_t entry;
int ret = 0;

- size = 1UL << huge_page_shift(h);
+ size = huge_page_size(h);
mask = size - 1;
if (range->page_shift != PAGE_SHIFT) {
- /* Make sure we are looking at full page. */
+ /* Make sure we are looking at a full page. */
if (start & mask)
return -EINVAL;
if (end < (start + size))
@@ -807,8 +806,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
size = PAGE_SIZE;
}

-
- ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
+ ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);

i = (start - range->start) >> range->page_shift;
@@ -857,7 +855,7 @@ static void hmm_pfns_clear(struct hmm_range *range,
* @start: start virtual address (inclusive)
* @end: end virtual address (exclusive)
* @page_shift: expect page shift for the range
- * Returns 0 on success, -EFAULT if the address space is no longer valid
+ * Return: 0 on success, -EFAULT if the address space is no longer valid
*
* Track updates to the CPU page table see include/linux/hmm.h
*/
--
2.20.1