[PATCH] writeback: Fix some comment errors

From: Quanfa Fu
Date: Sun Oct 31 2021 - 02:20:10 EST


Signed-off-by: Quanfa Fu <fuqf0919@xxxxxxxxx>
---
mm/khugepaged.c | 2 +-
mm/memory-failure.c | 4 ++--
mm/slab_common.c | 2 +-
mm/swap.c | 2 +-
4 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 8a8b3aa92937..f482a7861141 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1306,7 +1306,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
/*
* Record which node the original page is from and save this
* information to khugepaged_node_load[].
- * Khupaged will allocate hugepage from the node has the max
+ * Khugepaged will allocate hugepage from the node has the max
* hit record.
*/
node = page_to_nid(page);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index bdbbb32211a5..21fa983e52e4 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1227,7 +1227,7 @@ static int get_any_page(struct page *p, unsigned long flags)
*
* get_hwpoison_page() takes a page refcount of an error page to handle memory
* error on it, after checking that the error page is in a well-defined state
- * (defined as a page-type we can successfully handle the memor error on it,
+ * (defined as a page-type we can successfully handle the memory error on it,
* such as LRU page and hugetlb page).
*
* Memory error handling could be triggered at any time on any type of page,
@@ -1653,7 +1653,7 @@ int memory_failure(unsigned long pfn, int flags)

/*
* We need/can do nothing about count=0 pages.
- * 1) it's a free page, and therefore in safe hand:
+ * 1) it's a freed page, and therefore in safe hand:
* prep_new_page() will be the gate keeper.
* 2) it's part of a non-compound high order page.
* Implies some kernel user: cannot stop them from
diff --git a/mm/slab_common.c b/mm/slab_common.c
index ec2bb0beed75..e845a8286f2c 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -832,7 +832,7 @@ void __init setup_kmalloc_cache_index_table(void)

if (KMALLOC_MIN_SIZE >= 64) {
/*
- * The 96 byte size cache is not used if the alignment
+ * The 96 byte sized cache is not used if the alignment
* is 64 byte.
*/
for (i = 64 + 8; i <= 96; i += 8)
diff --git a/mm/swap.c b/mm/swap.c
index af3cad4e5378..0ab1aa4a79b6 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -866,7 +866,7 @@ void lru_cache_disable(void)
* all online CPUs so any calls of lru_cache_disabled wrapped by
* local_lock or preemption disabled would be ordered by that.
* The atomic operation doesn't need to have stronger ordering
- * requirements because that is enforeced by the scheduling
+ * requirements because that is enforced by the scheduling
* guarantees.
*/
__lru_add_drain_all(true);
--
2.25.1