Re: [PATCH 1/9] mm: swap: rename __swap_[entry/entries]_free[_locked] to swap_[entry/entries]_put[_locked]
From: Tim Chen
Date: Fri Mar 14 2025 - 16:37:18 EST
On Fri, 2025-03-14 at 05:05 +0800, Kemeng Shi wrote:
> In __swap_entry_free[_locked] and __swap_entries_free, we decrease count
> first and only free swap entry if count drops to zero. This behavior is
> more akin to a put() operation rather than a free() operation. Therefore,
> rename these functions with "put" instead of "free".
> Additionally, add "_nr" suffix to swap_entries_put to indicate the input
> range may span swap clusters.
>
Reviewed-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
> Signed-off-by: Kemeng Shi <shikemeng@xxxxxxxxxxxxxxx>
> ---
> mm/swapfile.c | 28 ++++++++++++++--------------
> 1 file changed, 14 insertions(+), 14 deletions(-)
>
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index 628f67974a7c..5a775456e26c 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -1347,9 +1347,9 @@ static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
> return NULL;
> }
>
> -static unsigned char __swap_entry_free_locked(struct swap_info_struct *si,
> - unsigned long offset,
> - unsigned char usage)
> +static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
> + unsigned long offset,
> + unsigned char usage)
> {
> unsigned char count;
> unsigned char has_cache;
> @@ -1453,15 +1453,15 @@ struct swap_info_struct *get_swap_device(swp_entry_t entry)
> return NULL;
> }
>
> -static unsigned char __swap_entry_free(struct swap_info_struct *si,
> - swp_entry_t entry)
> +static unsigned char swap_entry_put(struct swap_info_struct *si,
> + swp_entry_t entry)
> {
> struct swap_cluster_info *ci;
> unsigned long offset = swp_offset(entry);
> unsigned char usage;
>
> ci = lock_cluster(si, offset);
> - usage = __swap_entry_free_locked(si, offset, 1);
> + usage = swap_entry_put_locked(si, offset, 1);
> if (!usage)
> swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
> unlock_cluster(ci);
> @@ -1469,8 +1469,8 @@ static unsigned char __swap_entry_free(struct swap_info_struct *si,
> return usage;
> }
>
> -static bool __swap_entries_free(struct swap_info_struct *si,
> - swp_entry_t entry, int nr)
> +static bool swap_entries_put_nr(struct swap_info_struct *si,
> + swp_entry_t entry, int nr)
> {
> unsigned long offset = swp_offset(entry);
> unsigned int type = swp_type(entry);
> @@ -1501,7 +1501,7 @@ static bool __swap_entries_free(struct swap_info_struct *si,
> fallback:
> for (i = 0; i < nr; i++) {
> if (data_race(si->swap_map[offset + i])) {
> - count = __swap_entry_free(si, swp_entry(type, offset + i));
> + count = swap_entry_put(si, swp_entry(type, offset + i));
> if (count == SWAP_HAS_CACHE)
> has_cache = true;
> } else {
> @@ -1552,7 +1552,7 @@ static void cluster_swap_free_nr(struct swap_info_struct *si,
>
> ci = lock_cluster(si, offset);
> do {
> - if (!__swap_entry_free_locked(si, offset, usage))
> + if (!swap_entry_put_locked(si, offset, usage))
> swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
> } while (++offset < end);
> unlock_cluster(ci);
> @@ -1599,7 +1599,7 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
> swap_entry_range_free(si, ci, entry, size);
> else {
> for (int i = 0; i < size; i++, entry.val++) {
> - if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE))
> + if (!swap_entry_put_locked(si, offset + i, SWAP_HAS_CACHE))
> swap_entry_range_free(si, ci, entry, 1);
> }
> }
> @@ -1798,7 +1798,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
> /*
> * First free all entries in the range.
> */
> - any_only_cache = __swap_entries_free(si, entry, nr);
> + any_only_cache = swap_entries_put_nr(si, entry, nr);
>
> /*
> * Short-circuit the below loop if none of the entries had their
> @@ -1811,7 +1811,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
> * Now go back over the range trying to reclaim the swap cache. This is
> * more efficient for large folios because we will only try to reclaim
> * the swap once per folio in the common case. If we do
> - * __swap_entry_free() and __try_to_reclaim_swap() in the same loop, the
> + * swap_entry_put() and __try_to_reclaim_swap() in the same loop, the
> * latter will get a reference and lock the folio for every individual
> * page but will only succeed once the swap slot for every subpage is
> * zero.
> @@ -3758,7 +3758,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
> * into, carry if so, or else fail until a new continuation page is allocated;
> * when the original swap_map count is decremented from 0 with continuation,
> * borrow from the continuation and report whether it still holds more.
> - * Called while __swap_duplicate() or caller of __swap_entry_free_locked()
> + * Called while __swap_duplicate() or caller of swap_entry_put_locked()
> * holds cluster lock.
> */
> static bool swap_count_continued(struct swap_info_struct *si,