Re: [RFC PATCH v3 07/37] mm/damon/core: apply access reports to high level snapshot
From: JaeJoon Jung
Date: Fri Dec 12 2025 - 08:20:16 EST
On Mon, 8 Dec 2025 at 15:35, SeongJae Park <sj@xxxxxxxxxx> wrote:
>
> Now any DAMON API callers can report their observed access information.
> The DAMON core layer is just ignoring those, though. Update the core to
> use the reported information at building the high level access pattern
> snapshot.
It seems inefficient to repeatedly access the damon_access_reports[1000] array
using a for loop in the kdamond_check_reported_accesses() function.
It is inefficient to for loop through the entire
damon_access_reports[1000] array.
When CONFIG_HZ and jiffies are increased as follows and
damond sample_interval is 5000us (5ms), the time flow diagram is as follows.
CONFIG_HZ 1000, jiffies == 1ms
damond sample_interval == 5000us (5ms)
reports_len(==): [0 ... 5]
[*]
0 1 2 3 4 5 6 7 8
9 997 998 999
[====|====|====|====|====]-----|----|----|----| .... |------|-------|
jiffies++ 1 2 3 4 5 0 0 0 0
0 0 0
damond_fn(sample interval) -5[0<]
reports_len(==): [997 ... 2]
[*]
0 1 2 3 4 5 6 7 8 9
997 998 999
[======|======]----|----|----|-----|----|----|----| .... [=====|=====]
jiffies++ 1001 1002 3 4 5 6 7 8 9 997
998 999
damond_fn(sample interval)
-5[997<]
It seems that only the section corresponding to the sample interval ([==|==])
can be cycled as follows. And, how about enjoying damon_access_reports[1000]
as damon_access_reports[500]? Even if it reduce the 1000ms to 500ms
array space, it seems that it can sufficiently report and process within
the sample interval of 5ms.
static unsigned int kdamond_check_reported_accesses(struct damon_ctx *ctx)
{
- int i;
+ int i = damon_access_reports_len;
+ unsigned int nr = 0;
struct damon_access_report *report;
struct damon_target *t;
@@ -2904,16 +2905,18 @@ static unsigned int
kdamond_check_reported_accesses(struct damon_ctx *ctx)
return 0;
mutex_lock(&damon_access_reports_lock);
- for (i = 0; i < damon_access_reports_len; i++) {
- report = &damon_access_reports[i];
- if (time_before(report->report_jiffies,
- jiffies -
- usecs_to_jiffies(
- ctx->attrs.sample_interval)))
- continue;
+ report = &damon_access_reports[i];
+ while (time_after(report->report_jiffies,
+ jiffies - usecs_to_jiffies(ctx->attrs.sample_interval))) {
damon_for_each_target(t, ctx)
kdamond_apply_access_report(report, t, ctx);
+ if (++nr >= DAMON_ACCESS_REPORTS_CAP)
+ break;
+
+ i = (i == 0) ? (DAMON_ACCESS_REPORTS_CAP - 1) : (i - 1);
+ report = &damon_access_reports[i];
}
+
mutex_unlock(&damon_access_reports_lock);
/* For nr_accesses_bp, absence of access should also be reported. */
return kdamond_apply_zero_access_report(ctx);
}
Thanks,
JaeJoon
>
> Signed-off-by: SeongJae Park <sj@xxxxxxxxxx>
> ---
> include/linux/damon.h | 1 +
> mm/damon/core.c | 68 ++++++++++++++++++++++++++++++++++++++++++-
> 2 files changed, 68 insertions(+), 1 deletion(-)
>
> diff --git a/include/linux/damon.h b/include/linux/damon.h
> index b8ebb2aa02c8..b04c2e36833a 100644
> --- a/include/linux/damon.h
> +++ b/include/linux/damon.h
> @@ -83,6 +83,7 @@ struct damon_region {
> unsigned int age;
> /* private: Internal value for age calculation. */
> unsigned int last_nr_accesses;
> + bool access_reported;
> };
>
> /**
> diff --git a/mm/damon/core.c b/mm/damon/core.c
> index 296117d5e7f7..a14754a47c7f 100644
> --- a/mm/damon/core.c
> +++ b/mm/damon/core.c
> @@ -137,6 +137,7 @@ struct damon_region *damon_new_region(unsigned long start, unsigned long end)
>
> region->age = 0;
> region->last_nr_accesses = 0;
> + region->access_reported = false;
>
> return region;
> }
> @@ -2745,6 +2746,68 @@ static void kdamond_init_ctx(struct damon_ctx *ctx)
> }
> }
>
> +static void kdamond_apply_access_report(struct damon_access_report *report,
> + struct damon_target *t, struct damon_ctx *ctx)
> +{
> + struct damon_region *r;
> +
> + /* todo: make search faster, e.g., binary search? */
> + damon_for_each_region(r, t) {
> + if (report->addr < r->ar.start)
> + continue;
> + if (r->ar.end < report->addr + report->size)
> + continue;
> + if (!r->access_reported)
> + damon_update_region_access_rate(r, true, &ctx->attrs);
> + r->access_reported = true;
> + }
> +}
> +
> +static unsigned int kdamond_apply_zero_access_report(struct damon_ctx *ctx)
> +{
> + struct damon_target *t;
> + struct damon_region *r;
> + unsigned int max_nr_accesses = 0;
> +
> + damon_for_each_target(t, ctx) {
> + damon_for_each_region(r, t) {
> + if (r->access_reported)
> + r->access_reported = false;
> + else
> + damon_update_region_access_rate(r, false,
> + &ctx->attrs);
> + max_nr_accesses = max(max_nr_accesses, r->nr_accesses);
> + }
> + }
> + return max_nr_accesses;
> +}
> +
> +static unsigned int kdamond_check_reported_accesses(struct damon_ctx *ctx)
> +{
> + int i;
> + struct damon_access_report *report;
> + struct damon_target *t;
> +
> + /* currently damon_access_report supports only physical address */
> + if (damon_target_has_pid(ctx))
> + return 0;
> +
> + mutex_lock(&damon_access_reports_lock);
> + for (i = 0; i < damon_access_reports_len; i++) {
> + report = &damon_access_reports[i];
> + if (time_before(report->report_jiffies,
> + jiffies -
> + usecs_to_jiffies(
> + ctx->attrs.sample_interval)))
> + continue;
> + damon_for_each_target(t, ctx)
> + kdamond_apply_access_report(report, t, ctx);
> + }
> + mutex_unlock(&damon_access_reports_lock);
> + /* For nr_accesses_bp, absence of access should also be reported. */
> + return kdamond_apply_zero_access_report(ctx);
> +}
> +
> /*
> * The monitoring daemon that runs as a kernel thread
> */
> @@ -2790,7 +2853,10 @@ static int kdamond_fn(void *data)
> kdamond_usleep(sample_interval);
> ctx->passed_sample_intervals++;
>
> - if (ctx->ops.check_accesses)
> + /* todo: make these non-exclusive */
> + if (ctx->sample_control.primitives_enabled.page_fault)
> + max_nr_accesses = kdamond_check_reported_accesses(ctx);
> + else if (ctx->ops.check_accesses)
> max_nr_accesses = ctx->ops.check_accesses(ctx);
>
> if (ctx->passed_sample_intervals >= next_aggregation_sis)
> --
> 2.47.3
>