mm: migrate high-order folios in swap cache correctly
commit fc346d0a70a13d52fe1c4bc49516d83a42cd7c4c upstream.
Large folios occupy N consecutive entries in the swap cache instead of
using multi-index entries like the page cache. However, if a large folio
is re-added to the LRU list, it can be migrated. The migration code was
not aware of the difference between the swap cache and the page cache and
assumed that a single xas_store() would be sufficient.
This leaves potentially many stale pointers to the now-migrated folio in
the swap cache, which can lead to almost arbitrary data corruption in the
future. This can also manifest as infinite loops with the RCU read lock
held.
[willy@infradead.org: modifications to the changelog & tweaked the fix]
Fixes: 3417013e0d
("mm/migrate: Add folio_migrate_mapping()")
Link: https://lkml.kernel.org/r/20231214045841.961776-1-willy@infradead.org
Signed-off-by: Charan Teja Kalla <quic_charante@quicinc.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reported-by: Charan Teja Kalla <quic_charante@quicinc.com>
Closes: https://lkml.kernel.org/r/1700569840-17327-1-git-send-email-quic_charante@quicinc.com
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
a8df791470
commit
be72d197b2
|
@ -388,6 +388,7 @@ int folio_migrate_mapping(struct address_space *mapping,
|
||||||
int dirty;
|
int dirty;
|
||||||
int expected_count = folio_expected_refs(mapping, folio) + extra_count;
|
int expected_count = folio_expected_refs(mapping, folio) + extra_count;
|
||||||
long nr = folio_nr_pages(folio);
|
long nr = folio_nr_pages(folio);
|
||||||
|
long entries, i;
|
||||||
|
|
||||||
if (!mapping) {
|
if (!mapping) {
|
||||||
/* Anonymous page without mapping */
|
/* Anonymous page without mapping */
|
||||||
|
@ -425,8 +426,10 @@ int folio_migrate_mapping(struct address_space *mapping,
|
||||||
folio_set_swapcache(newfolio);
|
folio_set_swapcache(newfolio);
|
||||||
newfolio->private = folio_get_private(folio);
|
newfolio->private = folio_get_private(folio);
|
||||||
}
|
}
|
||||||
|
entries = nr;
|
||||||
} else {
|
} else {
|
||||||
VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
|
VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
|
||||||
|
entries = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Move dirty while page refs frozen and newpage not yet exposed */
|
/* Move dirty while page refs frozen and newpage not yet exposed */
|
||||||
|
@ -436,7 +439,11 @@ int folio_migrate_mapping(struct address_space *mapping,
|
||||||
folio_set_dirty(newfolio);
|
folio_set_dirty(newfolio);
|
||||||
}
|
}
|
||||||
|
|
||||||
xas_store(&xas, newfolio);
|
/* Swap cache still stores N entries instead of a high-order entry */
|
||||||
|
for (i = 0; i < entries; i++) {
|
||||||
|
xas_store(&xas, newfolio);
|
||||||
|
xas_next(&xas);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Drop cache reference from old page by unfreezing
|
* Drop cache reference from old page by unfreezing
|
||||||
|
|
Loading…
Reference in New Issue