[PATCH v2] mm/swapfile.c: simplify the scan loop in scan_swap_map_slots()

From: Wei Yang
Date: Wed Apr 22 2020 - 17:41:22 EST


After commit c60aa176c6de8 ("swapfile: swap allocation cycle if
nonrot"), swap allocation is cyclic. Current approach is done with two
separate loop on the upper and lower half. This looks a little
redundant.

>From another point of view, the loop iterates [lowest_bit, highest_bit]
range starting with (offset + 1) but except scan_base. So we can
simplify the loop with condition (next_offset() != scan_base) by
introducing next_offset() which makes sure offset fit in that range
with correct order.

Signed-off-by: Wei Yang <richard.weiyang@xxxxxxxxx>
CC: Hugh Dickins <hughd@xxxxxxxxxx>
CC: "Huang, Ying" <ying.huang@xxxxxxxxx>

---
v2:
* return scan_base if the lower part is eaten
* only start over when iterating on the upper part
---
mm/swapfile.c | 31 ++++++++++++++-----------------
1 file changed, 14 insertions(+), 17 deletions(-)

diff --git a/mm/swapfile.c b/mm/swapfile.c
index f903e5a165d5..0005a4a1c1b4 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -729,6 +729,19 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
}
}

+static unsigned long next_offset(struct swap_info_struct *si,
+ unsigned long *offset, unsigned long scan_base)
+{
+ /* only start over when iterating on the upper part */
+ if (++(*offset) > si->highest_bit && *offset > scan_base) {
+ *offset = si->lowest_bit;
+ /* someone has eaten the lower part */
+ if (si->lowest_bit >= scan_base)
+ return scan_base;
+ }
+ return *offset;
+}
+
static int scan_swap_map_slots(struct swap_info_struct *si,
unsigned char usage, int nr,
swp_entry_t slots[])
@@ -876,22 +889,7 @@ static int scan_swap_map_slots(struct swap_info_struct *si,

scan:
spin_unlock(&si->lock);
- while (++offset <= si->highest_bit) {
- if (!si->swap_map[offset]) {
- spin_lock(&si->lock);
- goto checks;
- }
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
- spin_lock(&si->lock);
- goto checks;
- }
- if (unlikely(--latency_ration < 0)) {
- cond_resched();
- latency_ration = LATENCY_LIMIT;
- }
- }
- offset = si->lowest_bit;
- while (offset < scan_base) {
+ while (next_offset(si, &offset, scan_base) != scan_base) {
if (!si->swap_map[offset]) {
spin_lock(&si->lock);
goto checks;
@@ -904,7 +902,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
cond_resched();
latency_ration = LATENCY_LIMIT;
}
- offset++;
}
spin_lock(&si->lock);

--
2.23.0