mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
mm: cma: set early_pfn and bitmap as a union in cma_memrange
Since early_pfn and bitmap are never used at the same time, they can be defined as a union to reduce the size of the data structure. This change can save 8 * u64 entries per CMA. Link: https://lkml.kernel.org/r/20250509083528.1360952-1-hezhongkun.hzk@bytedance.com Signed-off-by: Zhongkun He <hezhongkun.hzk@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
2616b37032
commit
83b6d498d0
2 changed files with 10 additions and 7 deletions
11
mm/cma.c
11
mm/cma.c
|
@ -143,13 +143,14 @@ bool cma_validate_zones(struct cma *cma)
|
||||||
|
|
||||||
static void __init cma_activate_area(struct cma *cma)
|
static void __init cma_activate_area(struct cma *cma)
|
||||||
{
|
{
|
||||||
unsigned long pfn, end_pfn;
|
unsigned long pfn, end_pfn, early_pfn[CMA_MAX_RANGES];
|
||||||
int allocrange, r;
|
int allocrange, r;
|
||||||
struct cma_memrange *cmr;
|
struct cma_memrange *cmr;
|
||||||
unsigned long bitmap_count, count;
|
unsigned long bitmap_count, count;
|
||||||
|
|
||||||
for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
|
for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
|
||||||
cmr = &cma->ranges[allocrange];
|
cmr = &cma->ranges[allocrange];
|
||||||
|
early_pfn[allocrange] = cmr->early_pfn;
|
||||||
cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr),
|
cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!cmr->bitmap)
|
if (!cmr->bitmap)
|
||||||
|
@ -161,13 +162,13 @@ static void __init cma_activate_area(struct cma *cma)
|
||||||
|
|
||||||
for (r = 0; r < cma->nranges; r++) {
|
for (r = 0; r < cma->nranges; r++) {
|
||||||
cmr = &cma->ranges[r];
|
cmr = &cma->ranges[r];
|
||||||
if (cmr->early_pfn != cmr->base_pfn) {
|
if (early_pfn[r] != cmr->base_pfn) {
|
||||||
count = cmr->early_pfn - cmr->base_pfn;
|
count = early_pfn[r] - cmr->base_pfn;
|
||||||
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
|
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
|
||||||
bitmap_set(cmr->bitmap, 0, bitmap_count);
|
bitmap_set(cmr->bitmap, 0, bitmap_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (pfn = cmr->early_pfn; pfn < cmr->base_pfn + cmr->count;
|
for (pfn = early_pfn[r]; pfn < cmr->base_pfn + cmr->count;
|
||||||
pfn += pageblock_nr_pages)
|
pfn += pageblock_nr_pages)
|
||||||
init_cma_reserved_pageblock(pfn_to_page(pfn));
|
init_cma_reserved_pageblock(pfn_to_page(pfn));
|
||||||
}
|
}
|
||||||
|
@ -193,7 +194,7 @@ cleanup:
|
||||||
for (r = 0; r < allocrange; r++) {
|
for (r = 0; r < allocrange; r++) {
|
||||||
cmr = &cma->ranges[r];
|
cmr = &cma->ranges[r];
|
||||||
end_pfn = cmr->base_pfn + cmr->count;
|
end_pfn = cmr->base_pfn + cmr->count;
|
||||||
for (pfn = cmr->early_pfn; pfn < end_pfn; pfn++)
|
for (pfn = early_pfn[r]; pfn < end_pfn; pfn++)
|
||||||
free_reserved_page(pfn_to_page(pfn));
|
free_reserved_page(pfn_to_page(pfn));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
6
mm/cma.h
6
mm/cma.h
|
@ -25,9 +25,11 @@ struct cma_kobject {
|
||||||
*/
|
*/
|
||||||
struct cma_memrange {
|
struct cma_memrange {
|
||||||
unsigned long base_pfn;
|
unsigned long base_pfn;
|
||||||
unsigned long early_pfn;
|
|
||||||
unsigned long count;
|
unsigned long count;
|
||||||
unsigned long *bitmap;
|
union {
|
||||||
|
unsigned long early_pfn;
|
||||||
|
unsigned long *bitmap;
|
||||||
|
};
|
||||||
#ifdef CONFIG_CMA_DEBUGFS
|
#ifdef CONFIG_CMA_DEBUGFS
|
||||||
struct debugfs_u32_array dfs_bitmap;
|
struct debugfs_u32_array dfs_bitmap;
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Add table
Reference in a new issue