[PATCH v13 05/15] mm/damon: Adaptively adjust regions

From: SeongJae Park
Date: Mon May 25 2020 - 05:20:24 EST


From: SeongJae Park <sjpark@xxxxxxxxx>

At the beginning of the monitoring, DAMON constructs the initial regions
by evenly splitting the memory mapped address space of the process into
the user-specified minimal number of regions. In this initial state,
the assumption of the regions (pages in same region have similar access
frequencies) is normally not kept and thus the monitoring quality could
be low. To keep the assumption as much as possible, DAMON adaptively
merges and splits each region.

For each ``aggregation interval``, it compares the access frequencies of
adjacent regions and merges those if the frequency difference is small.
Then, after it reports and clears the aggregated access frequency of
each region, it splits each region into two regions if the total number
of regions is smaller than the half of the user-specified maximum number
of regions.

In this way, DAMON provides its best-effort quality and minimal overhead
while keeping the bounds users set for their trade-off.

Signed-off-by: SeongJae Park <sjpark@xxxxxxxxx>
---
include/linux/damon.h | 7 +-
mm/damon.c | 175 +++++++++++++++++++++++++++++++++++++++---
2 files changed, 170 insertions(+), 12 deletions(-)

diff --git a/include/linux/damon.h b/include/linux/damon.h
index 9901545c9c8b..4294640036d8 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -56,6 +56,7 @@ struct damon_task {
* @sample_interval: The time between access samplings.
* @aggr_interval: The time between monitor results aggregations.
* @min_nr_regions: The number of initial monitoring regions.
+ * @max_nr_regions: The maximum number of monitoring regions.
*
* For each @sample_interval, DAMON checks whether each region is accessed or
* not. It aggregates and keeps the access information (number of accesses to
@@ -81,6 +82,7 @@ struct damon_ctx {
unsigned long sample_interval;
unsigned long aggr_interval;
unsigned long min_nr_regions;
+ unsigned long max_nr_regions;

struct timespec64 last_aggregation;

@@ -92,8 +94,9 @@ struct damon_ctx {
};

int damon_set_pids(struct damon_ctx *ctx, int *pids, ssize_t nr_pids);
-int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
- unsigned long aggr_int, unsigned long min_nr_reg);
+int damon_set_attrs(struct damon_ctx *ctx,
+ unsigned long sample_int, unsigned long aggr_int,
+ unsigned long min_nr_reg, unsigned long max_nr_reg);
int damon_start(struct damon_ctx *ctx);
int damon_stop(struct damon_ctx *ctx);

diff --git a/mm/damon.c b/mm/damon.c
index 1830f19c2b98..68f95b1d75aa 100644
--- a/mm/damon.c
+++ b/mm/damon.c
@@ -332,9 +332,12 @@ static int damon_three_regions_of(struct damon_task *t,
* regions is wasteful. That said, because we can deal with small noises,
* tracking every mapping is not strictly required but could even incur a high
* overhead if the mapping frequently changes or the number of mappings is
- * high. Nonetheless, this may seems very weird. DAMON's dynamic regions
- * adjustment mechanism, which will be implemented with following commit will
- * make this more sense.
+ * high. The adaptive regions adjustment mechanism will further help to deal
+ * with the noise by simply identifying the unmapped areas as a region that
+ * has no access. Moreover, applying the real mappings that would have many
+ * unmapped areas inside will make the adaptive mechanism quite complex. That
+ * said, too huge unmapped areas inside the monitoring target should be removed
+ * to not take the time for the adaptive mechanism.
*
* For the reason, we convert the complex mappings to three distinct regions
* that cover every mapped area of the address space. Also the two gaps
@@ -508,20 +511,25 @@ static void damon_check_access(struct damon_ctx *ctx,
last_addr = r->sampling_addr;
}

-static void kdamond_check_accesses(struct damon_ctx *ctx)
+static unsigned int kdamond_check_accesses(struct damon_ctx *ctx)
{
struct damon_task *t;
struct mm_struct *mm;
struct damon_region *r;
+ unsigned int max_nr_accesses = 0;

damon_for_each_task(t, ctx) {
mm = damon_get_mm(t);
if (!mm)
continue;
- damon_for_each_region(r, t)
+ damon_for_each_region(r, t) {
damon_check_access(ctx, mm, r);
+ max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
+ }
+
mmput(mm);
}
+ return max_nr_accesses;
}

/*
@@ -570,6 +578,142 @@ static void kdamond_reset_aggregated(struct damon_ctx *c)
}
}

+#define sz_damon_region(r) (r->vm_end - r->vm_start)
+
+/*
+ * Merge two adjacent regions into one region
+ */
+static void damon_merge_two_regions(struct damon_region *l,
+ struct damon_region *r)
+{
+ l->nr_accesses = (l->nr_accesses * sz_damon_region(l) +
+ r->nr_accesses * sz_damon_region(r)) /
+ (sz_damon_region(l) + sz_damon_region(r));
+ l->vm_end = r->vm_end;
+ damon_destroy_region(r);
+}
+
+#define diff_of(a, b) (a > b ? a - b : b - a)
+
+/*
+ * Merge adjacent regions having similar access frequencies
+ *
+ * t task affected by merge operation
+ * thres '->nr_accesses' diff threshold for the merge
+ */
+static void damon_merge_regions_of(struct damon_task *t, unsigned int thres)
+{
+ struct damon_region *r, *prev = NULL, *next;
+
+ damon_for_each_region_safe(r, next, t) {
+ if (!prev || prev->vm_end != r->vm_start ||
+ diff_of(prev->nr_accesses, r->nr_accesses) > thres) {
+ prev = r;
+ continue;
+ }
+ damon_merge_two_regions(prev, r);
+ }
+}
+
+/*
+ * Merge adjacent regions having similar access frequencies
+ *
+ * threshold '->nr_accesses' diff threshold for the merge
+ *
+ * This function merges monitoring target regions which are adjacent and their
+ * access frequencies are similar. This is for minimizing the monitoring
+ * overhead under the dynamically changeable access pattern. If a merge was
+ * unnecessarily made, later 'kdamond_split_regions()' will revert it.
+ */
+static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold)
+{
+ struct damon_task *t;
+
+ damon_for_each_task(t, c)
+ damon_merge_regions_of(t, threshold);
+}
+
+/*
+ * Split a region in two
+ *
+ * r the region to be split
+ * sz_r size of the first sub-region that will be made
+ */
+static void damon_split_region_at(struct damon_ctx *ctx,
+ struct damon_region *r, unsigned long sz_r)
+{
+ struct damon_region *new;
+
+ new = damon_new_region(ctx, r->vm_start + sz_r, r->vm_end);
+ r->vm_end = new->vm_start;
+
+ damon_insert_region(new, r, damon_next_region(r));
+}
+
+/* Split every region in the given task into 'nr_subs' regions */
+static void damon_split_regions_of(struct damon_ctx *ctx,
+ struct damon_task *t, int nr_subs)
+{
+ struct damon_region *r, *next;
+ unsigned long sz_region, sz_sub = 0;
+ int i;
+
+ damon_for_each_region_safe(r, next, t) {
+ sz_region = r->vm_end - r->vm_start;
+
+ for (i = 0; i < nr_subs - 1 &&
+ sz_region > 2 * MIN_REGION; i++) {
+ /*
+ * Randomly select size of left sub-region to be at
+ * least 10 percent and at most 90% of original region
+ */
+ sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
+ sz_region / 10, MIN_REGION);
+ /* Do not allow blank region */
+ if (sz_sub == 0 || sz_sub >= sz_region)
+ continue;
+
+ damon_split_region_at(ctx, r, sz_sub);
+ sz_region = sz_sub;
+ }
+ }
+}
+
+/*
+ * splits every target region into two randomly-sized regions
+ *
+ * This function splits every target region into two random-sized regions if
+ * current total number of the regions is equal or smaller than half of the
+ * user-specified maximum number of regions. This is for maximizing the
+ * monitoring accuracy under the dynamically changeable access patterns. If a
+ * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
+ * it.
+ */
+static void kdamond_split_regions(struct damon_ctx *ctx)
+{
+ struct damon_task *t;
+ unsigned int nr_regions = 0;
+ static unsigned int last_nr_regions;
+ int nr_subregions = 2;
+
+ damon_for_each_task(t, ctx)
+ nr_regions += nr_damon_regions(t);
+
+ if (nr_regions > ctx->max_nr_regions / 2)
+ return;
+
+ /* If number of regions is not changed, we are maybe in corner case */
+ if (last_nr_regions == nr_regions &&
+ nr_regions < ctx->max_nr_regions / 3)
+ nr_subregions = 3;
+
+ damon_for_each_task(t, ctx)
+ damon_split_regions_of(ctx, t, nr_subregions);
+
+ if (!last_nr_regions)
+ last_nr_regions = nr_regions;
+}
+
/*
* Check whether current monitoring should be stopped
*
@@ -609,6 +753,7 @@ static int kdamond_fn(void *data)
struct damon_ctx *ctx = (struct damon_ctx *)data;
struct damon_task *t;
struct damon_region *r, *next;
+ unsigned int max_nr_accesses = 0;

pr_info("kdamond (%d) starts\n", ctx->kdamond->pid);
kdamond_init_regions(ctx);
@@ -617,11 +762,13 @@ static int kdamond_fn(void *data)

usleep_range(ctx->sample_interval, ctx->sample_interval + 1);

- kdamond_check_accesses(ctx);
+ max_nr_accesses = kdamond_check_accesses(ctx);

- if (kdamond_aggregate_interval_passed(ctx))
+ if (kdamond_aggregate_interval_passed(ctx)) {
+ kdamond_merge_regions(ctx, max_nr_accesses / 10);
kdamond_reset_aggregated(ctx);
-
+ kdamond_split_regions(ctx);
+ }
}
damon_for_each_task(t, ctx) {
damon_for_each_region_safe(r, next, t)
@@ -731,24 +878,32 @@ int damon_set_pids(struct damon_ctx *ctx, int *pids, ssize_t nr_pids)
* @sample_int: time interval between samplings
* @aggr_int: time interval between aggregations
* @min_nr_reg: minimal number of regions
+ * @max_nr_reg: maximum number of regions
*
* This function should not be called while the kdamond is running.
* Every time interval is in micro-seconds.
*
* Return: 0 on success, negative error code otherwise.
*/
-int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
- unsigned long aggr_int, unsigned long min_nr_reg)
+int damon_set_attrs(struct damon_ctx *ctx,
+ unsigned long sample_int, unsigned long aggr_int,
+ unsigned long min_nr_reg, unsigned long max_nr_reg)
{
if (min_nr_reg < 3) {
pr_err("min_nr_regions (%lu) must be at least 3\n",
min_nr_reg);
return -EINVAL;
}
+ if (min_nr_reg > max_nr_reg) {
+ pr_err("invalid nr_regions. min (%lu) > max (%lu)\n",
+ min_nr_reg, max_nr_reg);
+ return -EINVAL;
+ }

ctx->sample_interval = sample_int;
ctx->aggr_interval = aggr_int;
ctx->min_nr_regions = min_nr_reg;
+ ctx->max_nr_regions = max_nr_reg;

return 0;
}
--
2.17.1