mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
15 hotfixes. 11 are cc:stable and the remainder address post-6.16 issues
or aren't considered necessary for -stable kernels. 13 of these fixes are for MM. The usual shower of singletons, plus - A 5 patch series from Hugh which addresses various misbehaviors in get_user_pages() - A 2 patch series from SeongJae which addresses a quite severe issue in DAMON - A 3 patch series also from SeongJae which completes some fixes for a DAMON startup issue -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaMuGSgAKCRDdBJ7gKXxA junjAP9b9pqZ+xh/MhDWObiRilS8wRDF76NDj237x2oqKTTnmAEA1Rxnqf9nQotP XyuXfMZnHDcAHLc1EnsG7OjtMd7QDgU= =lifP -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2025-09-17-21-10' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "15 hotfixes. 11 are cc:stable and the remainder address post-6.16 issues or aren't considered necessary for -stable kernels. 13 of these fixes are for MM. The usual shower of singletons, plus - fixes from Hugh to address various misbehaviors in get_user_pages() - patches from SeongJae to address a quite severe issue in DAMON - another series also from SeongJae which completes some fixes for a DAMON startup issue" * tag 'mm-hotfixes-stable-2025-09-17-21-10' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: zram: fix slot write race condition nilfs2: fix CFI failure when accessing /sys/fs/nilfs2/features/* samples/damon/mtier: avoid starting DAMON before initialization samples/damon/prcl: avoid starting DAMON before initialization samples/damon/wsse: avoid starting DAMON before initialization MAINTAINERS: add Lance Yang as a THP reviewer MAINTAINERS: add Jann Horn as rmap reviewer mm/damon/sysfs: use dynamically allocated repeat mode damon_call_control mm/damon/core: introduce damon_call_control->dealloc_on_cancel mm: folio_may_be_lru_cached() unless folio_test_large() mm: revert "mm: vmscan.c: fix OOM on swap stress test" mm: revert "mm/gup: clear the LRU flag of a page before adding to LRU batch" mm/gup: local lru_add_drain() to avoid lru_add_drain_all() mm/gup: check ref_count instead of lru before migration
This commit is contained in:
commit
8b789f2b76
15 changed files with 94 additions and 52 deletions
|
@ -16196,6 +16196,7 @@ R: Rik van Riel <riel@surriel.com>
|
|||
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
R: Vlastimil Babka <vbabka@suse.cz>
|
||||
R: Harry Yoo <harry.yoo@oracle.com>
|
||||
R: Jann Horn <jannh@google.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: include/linux/rmap.h
|
||||
|
@ -16240,6 +16241,7 @@ R: Nico Pache <npache@redhat.com>
|
|||
R: Ryan Roberts <ryan.roberts@arm.com>
|
||||
R: Dev Jain <dev.jain@arm.com>
|
||||
R: Barry Song <baohua@kernel.org>
|
||||
R: Lance Yang <lance.yang@linux.dev>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
|
|
|
@ -1795,6 +1795,7 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill,
|
|||
u32 index)
|
||||
{
|
||||
zram_slot_lock(zram, index);
|
||||
zram_free_page(zram, index);
|
||||
zram_set_flag(zram, index, ZRAM_SAME);
|
||||
zram_set_handle(zram, index, fill);
|
||||
zram_slot_unlock(zram, index);
|
||||
|
@ -1832,6 +1833,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
|
|||
kunmap_local(src);
|
||||
|
||||
zram_slot_lock(zram, index);
|
||||
zram_free_page(zram, index);
|
||||
zram_set_flag(zram, index, ZRAM_HUGE);
|
||||
zram_set_handle(zram, index, handle);
|
||||
zram_set_obj_size(zram, index, PAGE_SIZE);
|
||||
|
@ -1855,11 +1857,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
|
|||
unsigned long element;
|
||||
bool same_filled;
|
||||
|
||||
/* First, free memory allocated to this slot (if any) */
|
||||
zram_slot_lock(zram, index);
|
||||
zram_free_page(zram, index);
|
||||
zram_slot_unlock(zram, index);
|
||||
|
||||
mem = kmap_local_page(page);
|
||||
same_filled = page_same_filled(mem, &element);
|
||||
kunmap_local(mem);
|
||||
|
@ -1901,6 +1898,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
|
|||
zcomp_stream_put(zstrm);
|
||||
|
||||
zram_slot_lock(zram, index);
|
||||
zram_free_page(zram, index);
|
||||
zram_set_handle(zram, index, handle);
|
||||
zram_set_obj_size(zram, index, comp_len);
|
||||
zram_slot_unlock(zram, index);
|
||||
|
|
|
@ -1075,7 +1075,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
|
|||
************************************************************************/
|
||||
|
||||
static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%d.%d\n",
|
||||
NILFS_CURRENT_REV, NILFS_MINOR_REV);
|
||||
|
@ -1087,7 +1087,7 @@ static const char features_readme_str[] =
|
|||
"(1) revision\n\tshow current revision of NILFS file system driver.\n";
|
||||
|
||||
static ssize_t nilfs_feature_README_show(struct kobject *kobj,
|
||||
struct attribute *attr,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, features_readme_str);
|
||||
|
|
|
@ -50,16 +50,16 @@ struct nilfs_sysfs_dev_subgroups {
|
|||
struct completion sg_segments_kobj_unregister;
|
||||
};
|
||||
|
||||
#define NILFS_COMMON_ATTR_STRUCT(name) \
|
||||
#define NILFS_KOBJ_ATTR_STRUCT(name) \
|
||||
struct nilfs_##name##_attr { \
|
||||
struct attribute attr; \
|
||||
ssize_t (*show)(struct kobject *, struct attribute *, \
|
||||
ssize_t (*show)(struct kobject *, struct kobj_attribute *, \
|
||||
char *); \
|
||||
ssize_t (*store)(struct kobject *, struct attribute *, \
|
||||
ssize_t (*store)(struct kobject *, struct kobj_attribute *, \
|
||||
const char *, size_t); \
|
||||
}
|
||||
|
||||
NILFS_COMMON_ATTR_STRUCT(feature);
|
||||
NILFS_KOBJ_ATTR_STRUCT(feature);
|
||||
|
||||
#define NILFS_DEV_ATTR_STRUCT(name) \
|
||||
struct nilfs_##name##_attr { \
|
||||
|
|
|
@ -636,6 +636,7 @@ struct damon_operations {
|
|||
* @data: Data that will be passed to @fn.
|
||||
* @repeat: Repeat invocations.
|
||||
* @return_code: Return code from @fn invocation.
|
||||
* @dealloc_on_cancel: De-allocate when canceled.
|
||||
*
|
||||
* Control damon_call(), which requests specific kdamond to invoke a given
|
||||
* function. Refer to damon_call() for more details.
|
||||
|
@ -645,6 +646,7 @@ struct damon_call_control {
|
|||
void *data;
|
||||
bool repeat;
|
||||
int return_code;
|
||||
bool dealloc_on_cancel;
|
||||
/* private: internal use only */
|
||||
/* informs if the kdamond finished handling of the request */
|
||||
struct completion completion;
|
||||
|
|
|
@ -385,6 +385,16 @@ void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
|
|||
void mark_page_accessed(struct page *);
|
||||
void folio_mark_accessed(struct folio *);
|
||||
|
||||
static inline bool folio_may_be_lru_cached(struct folio *folio)
|
||||
{
|
||||
/*
|
||||
* Holding PMD-sized folios in per-CPU LRU cache unbalances accounting.
|
||||
* Holding small numbers of low-order mTHP folios in per-CPU LRU cache
|
||||
* will be sensible, but nobody has implemented and tested that yet.
|
||||
*/
|
||||
return !folio_test_large(folio);
|
||||
}
|
||||
|
||||
extern atomic_t lru_disable_count;
|
||||
|
||||
static inline bool lru_cache_disabled(void)
|
||||
|
|
|
@ -2479,10 +2479,14 @@ static void kdamond_call(struct damon_ctx *ctx, bool cancel)
|
|||
mutex_lock(&ctx->call_controls_lock);
|
||||
list_del(&control->list);
|
||||
mutex_unlock(&ctx->call_controls_lock);
|
||||
if (!control->repeat)
|
||||
if (!control->repeat) {
|
||||
complete(&control->completion);
|
||||
else
|
||||
} else if (control->canceled && control->dealloc_on_cancel) {
|
||||
kfree(control);
|
||||
continue;
|
||||
} else {
|
||||
list_add(&control->list, &repeat_controls);
|
||||
}
|
||||
}
|
||||
control = list_first_entry_or_null(&repeat_controls,
|
||||
struct damon_call_control, list);
|
||||
|
|
|
@ -1534,14 +1534,10 @@ static int damon_sysfs_repeat_call_fn(void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct damon_call_control damon_sysfs_repeat_call_control = {
|
||||
.fn = damon_sysfs_repeat_call_fn,
|
||||
.repeat = true,
|
||||
};
|
||||
|
||||
static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
|
||||
{
|
||||
struct damon_ctx *ctx;
|
||||
struct damon_call_control *repeat_call_control;
|
||||
int err;
|
||||
|
||||
if (damon_sysfs_kdamond_running(kdamond))
|
||||
|
@ -1554,18 +1550,29 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
|
|||
damon_destroy_ctx(kdamond->damon_ctx);
|
||||
kdamond->damon_ctx = NULL;
|
||||
|
||||
repeat_call_control = kmalloc(sizeof(*repeat_call_control),
|
||||
GFP_KERNEL);
|
||||
if (!repeat_call_control)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
|
||||
if (IS_ERR(ctx))
|
||||
if (IS_ERR(ctx)) {
|
||||
kfree(repeat_call_control);
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
err = damon_start(&ctx, 1, false);
|
||||
if (err) {
|
||||
kfree(repeat_call_control);
|
||||
damon_destroy_ctx(ctx);
|
||||
return err;
|
||||
}
|
||||
kdamond->damon_ctx = ctx;
|
||||
|
||||
damon_sysfs_repeat_call_control.data = kdamond;
|
||||
damon_call(ctx, &damon_sysfs_repeat_call_control);
|
||||
repeat_call_control->fn = damon_sysfs_repeat_call_fn;
|
||||
repeat_call_control->data = kdamond;
|
||||
repeat_call_control->repeat = true;
|
||||
repeat_call_control->dealloc_on_cancel = true;
|
||||
damon_call(ctx, repeat_call_control);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
14
mm/gup.c
14
mm/gup.c
|
@ -2287,8 +2287,8 @@ static unsigned long collect_longterm_unpinnable_folios(
|
|||
struct pages_or_folios *pofs)
|
||||
{
|
||||
unsigned long collected = 0;
|
||||
bool drain_allow = true;
|
||||
struct folio *folio;
|
||||
int drained = 0;
|
||||
long i = 0;
|
||||
|
||||
for (folio = pofs_get_folio(pofs, i); folio;
|
||||
|
@ -2307,9 +2307,17 @@ static unsigned long collect_longterm_unpinnable_folios(
|
|||
continue;
|
||||
}
|
||||
|
||||
if (!folio_test_lru(folio) && drain_allow) {
|
||||
if (drained == 0 && folio_may_be_lru_cached(folio) &&
|
||||
folio_ref_count(folio) !=
|
||||
folio_expected_ref_count(folio) + 1) {
|
||||
lru_add_drain();
|
||||
drained = 1;
|
||||
}
|
||||
if (drained == 1 && folio_may_be_lru_cached(folio) &&
|
||||
folio_ref_count(folio) !=
|
||||
folio_expected_ref_count(folio) + 1) {
|
||||
lru_add_drain_all();
|
||||
drain_allow = false;
|
||||
drained = 2;
|
||||
}
|
||||
|
||||
if (!folio_isolate_lru(folio))
|
||||
|
|
|
@ -255,7 +255,7 @@ void mlock_folio(struct folio *folio)
|
|||
|
||||
folio_get(folio);
|
||||
if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
|
||||
folio_test_large(folio) || lru_cache_disabled())
|
||||
!folio_may_be_lru_cached(folio) || lru_cache_disabled())
|
||||
mlock_folio_batch(fbatch);
|
||||
local_unlock(&mlock_fbatch.lock);
|
||||
}
|
||||
|
@ -278,7 +278,7 @@ void mlock_new_folio(struct folio *folio)
|
|||
|
||||
folio_get(folio);
|
||||
if (!folio_batch_add(fbatch, mlock_new(folio)) ||
|
||||
folio_test_large(folio) || lru_cache_disabled())
|
||||
!folio_may_be_lru_cached(folio) || lru_cache_disabled())
|
||||
mlock_folio_batch(fbatch);
|
||||
local_unlock(&mlock_fbatch.lock);
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ void munlock_folio(struct folio *folio)
|
|||
*/
|
||||
folio_get(folio);
|
||||
if (!folio_batch_add(fbatch, folio) ||
|
||||
folio_test_large(folio) || lru_cache_disabled())
|
||||
!folio_may_be_lru_cached(folio) || lru_cache_disabled())
|
||||
mlock_folio_batch(fbatch);
|
||||
local_unlock(&mlock_fbatch.lock);
|
||||
}
|
||||
|
|
50
mm/swap.c
50
mm/swap.c
|
@ -164,6 +164,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
|
|||
for (i = 0; i < folio_batch_count(fbatch); i++) {
|
||||
struct folio *folio = fbatch->folios[i];
|
||||
|
||||
/* block memcg migration while the folio moves between lru */
|
||||
if (move_fn != lru_add && !folio_test_clear_lru(folio))
|
||||
continue;
|
||||
|
||||
folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
|
||||
move_fn(lruvec, folio);
|
||||
|
||||
|
@ -176,14 +180,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
|
|||
}
|
||||
|
||||
static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
|
||||
struct folio *folio, move_fn_t move_fn,
|
||||
bool on_lru, bool disable_irq)
|
||||
struct folio *folio, move_fn_t move_fn, bool disable_irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (on_lru && !folio_test_clear_lru(folio))
|
||||
return;
|
||||
|
||||
folio_get(folio);
|
||||
|
||||
if (disable_irq)
|
||||
|
@ -191,8 +191,8 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
|
|||
else
|
||||
local_lock(&cpu_fbatches.lock);
|
||||
|
||||
if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) ||
|
||||
lru_cache_disabled())
|
||||
if (!folio_batch_add(this_cpu_ptr(fbatch), folio) ||
|
||||
!folio_may_be_lru_cached(folio) || lru_cache_disabled())
|
||||
folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
|
||||
|
||||
if (disable_irq)
|
||||
|
@ -201,13 +201,13 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
|
|||
local_unlock(&cpu_fbatches.lock);
|
||||
}
|
||||
|
||||
#define folio_batch_add_and_move(folio, op, on_lru) \
|
||||
__folio_batch_add_and_move( \
|
||||
&cpu_fbatches.op, \
|
||||
folio, \
|
||||
op, \
|
||||
on_lru, \
|
||||
offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \
|
||||
#define folio_batch_add_and_move(folio, op) \
|
||||
__folio_batch_add_and_move( \
|
||||
&cpu_fbatches.op, \
|
||||
folio, \
|
||||
op, \
|
||||
offsetof(struct cpu_fbatches, op) >= \
|
||||
offsetof(struct cpu_fbatches, lock_irq) \
|
||||
)
|
||||
|
||||
static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
|
||||
|
@ -231,10 +231,10 @@ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
|
|||
void folio_rotate_reclaimable(struct folio *folio)
|
||||
{
|
||||
if (folio_test_locked(folio) || folio_test_dirty(folio) ||
|
||||
folio_test_unevictable(folio))
|
||||
folio_test_unevictable(folio) || !folio_test_lru(folio))
|
||||
return;
|
||||
|
||||
folio_batch_add_and_move(folio, lru_move_tail, true);
|
||||
folio_batch_add_and_move(folio, lru_move_tail);
|
||||
}
|
||||
|
||||
void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
|
||||
|
@ -328,10 +328,11 @@ static void folio_activate_drain(int cpu)
|
|||
|
||||
void folio_activate(struct folio *folio)
|
||||
{
|
||||
if (folio_test_active(folio) || folio_test_unevictable(folio))
|
||||
if (folio_test_active(folio) || folio_test_unevictable(folio) ||
|
||||
!folio_test_lru(folio))
|
||||
return;
|
||||
|
||||
folio_batch_add_and_move(folio, lru_activate, true);
|
||||
folio_batch_add_and_move(folio, lru_activate);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -507,7 +508,7 @@ void folio_add_lru(struct folio *folio)
|
|||
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
|
||||
folio_set_active(folio);
|
||||
|
||||
folio_batch_add_and_move(folio, lru_add, false);
|
||||
folio_batch_add_and_move(folio, lru_add);
|
||||
}
|
||||
EXPORT_SYMBOL(folio_add_lru);
|
||||
|
||||
|
@ -685,13 +686,13 @@ void lru_add_drain_cpu(int cpu)
|
|||
void deactivate_file_folio(struct folio *folio)
|
||||
{
|
||||
/* Deactivating an unevictable folio will not accelerate reclaim */
|
||||
if (folio_test_unevictable(folio))
|
||||
if (folio_test_unevictable(folio) || !folio_test_lru(folio))
|
||||
return;
|
||||
|
||||
if (lru_gen_enabled() && lru_gen_clear_refs(folio))
|
||||
return;
|
||||
|
||||
folio_batch_add_and_move(folio, lru_deactivate_file, true);
|
||||
folio_batch_add_and_move(folio, lru_deactivate_file);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -704,13 +705,13 @@ void deactivate_file_folio(struct folio *folio)
|
|||
*/
|
||||
void folio_deactivate(struct folio *folio)
|
||||
{
|
||||
if (folio_test_unevictable(folio))
|
||||
if (folio_test_unevictable(folio) || !folio_test_lru(folio))
|
||||
return;
|
||||
|
||||
if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio))
|
||||
return;
|
||||
|
||||
folio_batch_add_and_move(folio, lru_deactivate, true);
|
||||
folio_batch_add_and_move(folio, lru_deactivate);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -723,10 +724,11 @@ void folio_deactivate(struct folio *folio)
|
|||
void folio_mark_lazyfree(struct folio *folio)
|
||||
{
|
||||
if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
|
||||
!folio_test_lru(folio) ||
|
||||
folio_test_swapcache(folio) || folio_test_unevictable(folio))
|
||||
return;
|
||||
|
||||
folio_batch_add_and_move(folio, lru_lazyfree, true);
|
||||
folio_batch_add_and_move(folio, lru_lazyfree);
|
||||
}
|
||||
|
||||
void lru_add_drain(void)
|
||||
|
|
|
@ -4507,7 +4507,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
|
|||
}
|
||||
|
||||
/* ineligible */
|
||||
if (!folio_test_lru(folio) || zone > sc->reclaim_idx) {
|
||||
if (zone > sc->reclaim_idx) {
|
||||
gen = folio_inc_gen(lruvec, folio, false);
|
||||
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
|
||||
return true;
|
||||
|
|
|
@ -208,6 +208,9 @@ static int damon_sample_mtier_enable_store(
|
|||
if (enabled == is_enabled)
|
||||
return 0;
|
||||
|
||||
if (!init_called)
|
||||
return 0;
|
||||
|
||||
if (enabled) {
|
||||
err = damon_sample_mtier_start();
|
||||
if (err)
|
||||
|
|
|
@ -137,6 +137,9 @@ static int damon_sample_prcl_enable_store(
|
|||
if (enabled == is_enabled)
|
||||
return 0;
|
||||
|
||||
if (!init_called)
|
||||
return 0;
|
||||
|
||||
if (enabled) {
|
||||
err = damon_sample_prcl_start();
|
||||
if (err)
|
||||
|
|
|
@ -118,6 +118,9 @@ static int damon_sample_wsse_enable_store(
|
|||
return 0;
|
||||
|
||||
if (enabled) {
|
||||
if (!init_called)
|
||||
return 0;
|
||||
|
||||
err = damon_sample_wsse_start();
|
||||
if (err)
|
||||
enabled = false;
|
||||
|
|
Loading…
Add table
Reference in a new issue