[PATCH v2 3/8] mm: swap: use swap_entries_free() to free swap entry in swap_entry_put_locked()

From: Kemeng Shi
Date: Tue Mar 18 2025 - 02:10:50 EST


In swap_entry_put_locked(), we will set slot to SWAP_HAS_CACHE before
using swap_entries_free() to do actual swap entry freeing. This
introduce an unnecessary intermediate state.
By using swap_entries_free() in swap_entry_put_locked(), we can
eliminate the need to set slot to SWAP_HAS_CACHE.
This change would make the behavior of swap_entry_put_locked() more
consistent with other put() operations which will do actual free work
after put last reference.

Signed-off-by: Kemeng Shi <shikemeng@xxxxxxxxxxxxxxx>
Reviewed-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
---
mm/swapfile.c | 23 ++++++++++-------------
1 file changed, 10 insertions(+), 13 deletions(-)

diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0aa7ce82c013..40e41e514813 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1348,9 +1348,11 @@ static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
}

static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
- unsigned long offset,
+ struct swap_cluster_info *ci,
+ swp_entry_t entry,
unsigned char usage)
{
+ unsigned long offset = swp_offset(entry);
unsigned char count;
unsigned char has_cache;

@@ -1382,7 +1384,7 @@ static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
if (usage)
WRITE_ONCE(si->swap_map[offset], usage);
else
- WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
+ swap_entries_free(si, ci, entry, 1);

return usage;
}
@@ -1461,9 +1463,7 @@ static unsigned char swap_entry_put(struct swap_info_struct *si,
unsigned char usage;

ci = lock_cluster(si, offset);
- usage = swap_entry_put_locked(si, offset, 1);
- if (!usage)
- swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
+ usage = swap_entry_put_locked(si, ci, entry, 1);
unlock_cluster(ci);

return usage;
@@ -1551,8 +1551,8 @@ static void cluster_swap_free_nr(struct swap_info_struct *si,

ci = lock_cluster(si, offset);
do {
- if (!swap_entry_put_locked(si, offset, usage))
- swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
+ swap_entry_put_locked(si, ci, swp_entry(si->type, offset),
+ usage);
} while (++offset < end);
unlock_cluster(ci);
}
@@ -1596,12 +1596,9 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
ci = lock_cluster(si, offset);
if (swap_only_has_cache(si, offset, size))
swap_entries_free(si, ci, entry, size);
- else {
- for (int i = 0; i < size; i++, entry.val++) {
- if (!swap_entry_put_locked(si, offset + i, SWAP_HAS_CACHE))
- swap_entries_free(si, ci, entry, 1);
- }
- }
+ else
+ for (int i = 0; i < size; i++, entry.val++)
+ swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE);
unlock_cluster(ci);
}

--
2.30.0