mm/damon/vaddr: use damos->migrate_dests in migrate_{hot,cold}

damos->migrate_dests provides a list of nodes the migrate_{hot,cold}
actions should migrate to, as well as the weights which specify the ratio
pages should be migrated to each destination node.

This patch interleaves pages in the migrate_{hot,cold} actions according
to the information provided in damos->migrate_dests if it is used.  The
interleaving algorithm used is similar to the one used in
weighted_interleave_nid().  If damos->migration_dests is not provided, the
actions migrate pages to the node specified in damos->target_nid as
before.

Link: https://lkml.kernel.org/r/20250709005952.17776-12-bijan311@gmail.com
Co-developed-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Signed-off-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Signed-off-by: Bijan Tabatabai <bijantabatab@micron.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Bijan Tabatabai 2025-07-08 19:59:41 -05:00 committed by Andrew Morton
parent 0af934b131
commit 19c1dc15c8

View file

@ -611,11 +611,69 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
return max_nr_accesses;
}
struct damos_va_migrate_private {
struct list_head *migration_lists;
struct damos_migrate_dests *dests;
};
/*
* Place the given folio in the migration_list corresponding to where the folio
* should be migrated.
*
* The algorithm used here is similar to weighted_interleave_nid()
*/
static void damos_va_migrate_dests_add(struct folio *folio,
struct vm_area_struct *vma, unsigned long addr,
struct damos_migrate_dests *dests,
struct list_head *migration_lists)
{
pgoff_t ilx;
int order;
unsigned int target;
unsigned int weight_total = 0;
int i;
/*
* If dests is empty, there is only one migration list corresponding
* to s->target_nid.
*/
if (!dests->nr_dests) {
i = 0;
goto isolate;
}
order = folio_order(folio);
ilx = vma->vm_pgoff >> order;
ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order);
for (i = 0; i < dests->nr_dests; i++)
weight_total += dests->weight_arr[i];
/* If the total weights are somehow 0, don't migrate at all */
if (!weight_total)
return;
target = ilx % weight_total;
for (i = 0; i < dests->nr_dests; i++) {
if (target < dests->weight_arr[i])
break;
target -= dests->weight_arr[i];
}
isolate:
if (!folio_isolate_lru(folio))
return;
list_add(&folio->lru, &migration_lists[i]);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
struct list_head *migration_list = walk->private;
struct damos_va_migrate_private *priv = walk->private;
struct list_head *migration_lists = priv->migration_lists;
struct damos_migrate_dests *dests = priv->dests;
struct folio *folio;
spinlock_t *ptl;
pmd_t pmde;
@ -633,12 +691,9 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
if (!folio)
goto unlock;
if (!folio_isolate_lru(folio))
goto put_folio;
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
migration_lists);
list_add(&folio->lru, migration_list);
put_folio:
folio_put(folio);
unlock:
spin_unlock(ptl);
@ -651,7 +706,9 @@ unlock:
static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
struct list_head *migration_list = walk->private;
struct damos_va_migrate_private *priv = walk->private;
struct list_head *migration_lists = priv->migration_lists;
struct damos_migrate_dests *dests = priv->dests;
struct folio *folio;
pte_t ptent;
@ -663,12 +720,9 @@ static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr,
if (!folio)
return 0;
if (!folio_isolate_lru(folio))
goto out;
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
migration_lists);
list_add(&folio->lru, migration_list);
out:
folio_put(folio);
return 0;
}
@ -721,26 +775,48 @@ static unsigned long damos_va_migrate(struct damon_target *target,
unsigned long *sz_filter_passed)
{
LIST_HEAD(folio_list);
struct damos_va_migrate_private priv;
struct mm_struct *mm;
int nr_dests;
int nid;
bool use_target_nid;
unsigned long applied = 0;
struct damos_migrate_dests *dests = &s->migrate_dests;
struct mm_walk_ops walk_ops = {
.pmd_entry = damos_va_migrate_pmd_entry,
.pte_entry = damos_va_migrate_pte_entry,
.walk_lock = PGWALK_RDLOCK,
};
mm = damon_get_mm(target);
if (!mm)
use_target_nid = dests->nr_dests == 0;
nr_dests = use_target_nid ? 1 : dests->nr_dests;
priv.dests = dests;
priv.migration_lists = kmalloc_array(nr_dests,
sizeof(*priv.migration_lists), GFP_KERNEL);
if (!priv.migration_lists)
return 0;
for (int i = 0; i < nr_dests; i++)
INIT_LIST_HEAD(&priv.migration_lists[i]);
mm = damon_get_mm(target);
if (!mm)
goto free_lists;
mmap_read_lock(mm);
walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &folio_list);
walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv);
mmap_read_unlock(mm);
mmput(mm);
applied = damon_migrate_pages(&folio_list, s->target_nid);
cond_resched();
for (int i = 0; i < nr_dests; i++) {
nid = use_target_nid ? s->target_nid : dests->node_id_arr[i];
applied += damon_migrate_pages(&priv.migration_lists[i], nid);
cond_resched();
}
free_lists:
kfree(priv.migration_lists);
return applied * PAGE_SIZE;
}