2018-04-11 11:56:55 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2024-12-11 11:06:19 +01:00
|
|
|
#define boot_fmt(fmt) "physmem: " fmt
|
2023-02-02 13:59:36 +01:00
|
|
|
#include <linux/processor.h>
|
2018-10-09 12:23:43 +02:00
|
|
|
#include <linux/errno.h>
|
2018-04-11 11:56:55 +02:00
|
|
|
#include <linux/init.h>
|
2023-02-08 18:11:25 +01:00
|
|
|
#include <asm/physmem_info.h>
|
2023-02-02 13:59:36 +01:00
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
#include <asm/boot_data.h>
|
2018-04-11 19:15:24 +02:00
|
|
|
#include <asm/sparsemem.h>
|
2023-02-02 13:59:36 +01:00
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
#include <asm/sclp.h>
|
2024-11-07 16:11:49 +01:00
|
|
|
#include <asm/asm.h>
|
2023-02-02 13:59:36 +01:00
|
|
|
#include <asm/uv.h>
|
2022-04-23 21:31:22 +02:00
|
|
|
#include "decompressor.h"
|
2018-04-11 11:56:55 +02:00
|
|
|
#include "boot.h"
|
|
|
|
|
2023-02-08 18:11:25 +01:00
|
|
|
struct physmem_info __bootdata(physmem_info);
|
2023-02-02 13:59:36 +01:00
|
|
|
static unsigned int physmem_alloc_ranges;
|
|
|
|
static unsigned long physmem_alloc_pos;
|
2018-04-11 11:56:55 +02:00
|
|
|
|
|
|
|
/* up to 256 storage elements, 1020 subincrements each */
|
|
|
|
#define ENTRIES_EXTENDED_MAX \
|
2023-02-08 18:11:25 +01:00
|
|
|
(256 * (1020 / 2) * sizeof(struct physmem_range))
|
2018-04-11 11:56:55 +02:00
|
|
|
|
2023-02-08 18:11:25 +01:00
|
|
|
static struct physmem_range *__get_physmem_range_ptr(u32 n)
|
2018-04-11 11:56:55 +02:00
|
|
|
{
|
|
|
|
if (n < MEM_INLINED_ENTRIES)
|
2023-02-08 18:11:25 +01:00
|
|
|
return &physmem_info.online[n];
|
2023-02-02 13:59:36 +01:00
|
|
|
if (unlikely(!physmem_info.online_extended)) {
|
|
|
|
physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
|
2024-12-11 11:06:19 +01:00
|
|
|
RR_MEM_DETECT_EXT, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
|
2023-02-02 13:59:36 +01:00
|
|
|
physmem_alloc_pos, true);
|
|
|
|
}
|
2023-02-08 18:11:25 +01:00
|
|
|
return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
|
2018-04-11 11:56:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-02-08 18:11:25 +01:00
|
|
|
* sequential calls to add_physmem_online_range with adjacent memory ranges
|
|
|
|
* are merged together into single memory range.
|
2018-04-11 11:56:55 +02:00
|
|
|
*/
|
2023-02-08 18:11:25 +01:00
|
|
|
void add_physmem_online_range(u64 start, u64 end)
|
2018-04-11 11:56:55 +02:00
|
|
|
{
|
2023-02-08 18:11:25 +01:00
|
|
|
struct physmem_range *range;
|
2018-04-11 11:56:55 +02:00
|
|
|
|
2023-02-08 18:11:25 +01:00
|
|
|
if (physmem_info.range_count) {
|
|
|
|
range = __get_physmem_range_ptr(physmem_info.range_count - 1);
|
|
|
|
if (range->end == start) {
|
|
|
|
range->end = end;
|
2018-04-11 11:56:55 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-08 18:11:25 +01:00
|
|
|
range = __get_physmem_range_ptr(physmem_info.range_count);
|
|
|
|
range->start = start;
|
|
|
|
range->end = end;
|
|
|
|
physmem_info.range_count++;
|
2018-04-11 11:56:55 +02:00
|
|
|
}
|
|
|
|
|
2018-04-11 18:48:20 +02:00
|
|
|
static int __diag260(unsigned long rx1, unsigned long rx2)
|
|
|
|
{
|
2021-06-14 21:57:58 +02:00
|
|
|
union register_pair rx;
|
2024-11-07 16:11:49 +01:00
|
|
|
int cc, exception;
|
2025-02-24 15:59:07 +01:00
|
|
|
unsigned long ry;
|
2018-04-11 18:48:20 +02:00
|
|
|
|
2021-06-14 21:57:58 +02:00
|
|
|
rx.even = rx1;
|
|
|
|
rx.odd = rx2;
|
|
|
|
ry = 0x10; /* storage configuration */
|
2024-11-07 16:11:49 +01:00
|
|
|
exception = 1;
|
2025-03-17 16:22:35 +01:00
|
|
|
asm_inline volatile(
|
2018-04-11 18:48:20 +02:00
|
|
|
" diag %[rx],%[ry],0x260\n"
|
2025-02-24 15:59:07 +01:00
|
|
|
"0: lhi %[exc],0\n"
|
|
|
|
"1:\n"
|
2024-11-07 16:11:49 +01:00
|
|
|
CC_IPM(cc)
|
2025-02-24 15:59:07 +01:00
|
|
|
EX_TABLE(0b, 1b)
|
|
|
|
: CC_OUT(cc, cc), [exc] "+d" (exception), [ry] "+d" (ry)
|
|
|
|
: [rx] "d" (rx.pair)
|
2024-11-07 16:11:49 +01:00
|
|
|
: CC_CLOBBER_LIST("memory"));
|
|
|
|
cc = exception ? -1 : CC_TRANSFORM(cc);
|
|
|
|
return cc == 0 ? ry : -1;
|
2018-04-11 18:48:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int diag260(void)
|
|
|
|
{
|
|
|
|
int rc, i;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
unsigned long start;
|
|
|
|
unsigned long end;
|
|
|
|
} storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
|
|
|
|
|
|
|
|
memset(storage_extents, 0, sizeof(storage_extents));
|
|
|
|
rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
|
|
|
|
if (rc == -1)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
|
2023-02-08 18:11:25 +01:00
|
|
|
add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
|
2018-04-11 18:48:20 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
s390/physmem_info: Query diag500(STORAGE LIMIT) to support QEMU/KVM memory devices
To support memory devices under QEMU/KVM, such as virtio-mem,
we have to prepare our kernel virtual address space accordingly and
have to know the highest possible physical memory address we might see
later: the storage limit. The good old SCLP interface is not suitable for
this use case.
In particular, memory owned by memory devices has no relationship to
storage increments, it is always detected using the device driver, and
unaware OSes (no driver) must never try making use of that memory.
Consequently this memory is located outside of the "maximum storage
increment"-indicated memory range.
Let's use our new diag500 STORAGE_LIMIT subcode to query this storage
limit that can exceed the "maximum storage increment", and use the
existing interfaces (i.e., SCLP) to obtain information about the initial
memory that is not owned+managed by memory devices.
If a hypervisor does not support such memory devices, the address exposed
through diag500 STORAGE_LIMIT will correspond to the maximum storage
increment exposed through SCLP.
To teach kdump on s390 to include memory owned by memory devices, there
will be ways to query the relevant memory ranges from the device via a
driver running in special kdump mode (like virtio-mem already implements
to filter /proc/vmcore access so we don't end up reading from unplugged
device blocks).
Update setup_ident_map_size(), to clarify that there can be more than
just online and standby memory.
Tested-by: Mario Casquero <mcasquer@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Tested-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
Acked-by: Christian Borntraeger <borntraeger@linux.ibm.com>
Link: https://lore.kernel.org/r/20241025141453.1210600-4-david@redhat.com
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2024-10-25 16:14:48 +02:00
|
|
|
#define DIAG500_SC_STOR_LIMIT 4
|
|
|
|
|
|
|
|
static int diag500_storage_limit(unsigned long *max_physmem_end)
|
|
|
|
{
|
|
|
|
unsigned long storage_limit;
|
|
|
|
|
2025-03-17 16:22:35 +01:00
|
|
|
asm_inline volatile(
|
2025-02-24 15:59:09 +01:00
|
|
|
" lghi %%r1,%[subcode]\n"
|
|
|
|
" lghi %%r2,0\n"
|
|
|
|
" diag %%r2,%%r4,0x500\n"
|
|
|
|
"0: lgr %[slimit],%%r2\n"
|
|
|
|
EX_TABLE(0b, 0b)
|
|
|
|
: [slimit] "=d" (storage_limit)
|
|
|
|
: [subcode] "i" (DIAG500_SC_STOR_LIMIT)
|
s390/physmem_info: Query diag500(STORAGE LIMIT) to support QEMU/KVM memory devices
To support memory devices under QEMU/KVM, such as virtio-mem,
we have to prepare our kernel virtual address space accordingly and
have to know the highest possible physical memory address we might see
later: the storage limit. The good old SCLP interface is not suitable for
this use case.
In particular, memory owned by memory devices has no relationship to
storage increments, it is always detected using the device driver, and
unaware OSes (no driver) must never try making use of that memory.
Consequently this memory is located outside of the "maximum storage
increment"-indicated memory range.
Let's use our new diag500 STORAGE_LIMIT subcode to query this storage
limit that can exceed the "maximum storage increment", and use the
existing interfaces (i.e., SCLP) to obtain information about the initial
memory that is not owned+managed by memory devices.
If a hypervisor does not support such memory devices, the address exposed
through diag500 STORAGE_LIMIT will correspond to the maximum storage
increment exposed through SCLP.
To teach kdump on s390 to include memory owned by memory devices, there
will be ways to query the relevant memory ranges from the device via a
driver running in special kdump mode (like virtio-mem already implements
to filter /proc/vmcore access so we don't end up reading from unplugged
device blocks).
Update setup_ident_map_size(), to clarify that there can be more than
just online and standby memory.
Tested-by: Mario Casquero <mcasquer@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Tested-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
Acked-by: Christian Borntraeger <borntraeger@linux.ibm.com>
Link: https://lore.kernel.org/r/20241025141453.1210600-4-david@redhat.com
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2024-10-25 16:14:48 +02:00
|
|
|
: "memory", "1", "2");
|
|
|
|
if (!storage_limit)
|
|
|
|
return -EINVAL;
|
|
|
|
/* Convert inclusive end to exclusive end */
|
|
|
|
*max_physmem_end = storage_limit + 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-11 11:56:55 +02:00
|
|
|
static int tprot(unsigned long addr)
|
|
|
|
{
|
2024-11-07 16:11:49 +01:00
|
|
|
int cc, exception;
|
2018-04-11 11:56:55 +02:00
|
|
|
|
2024-11-07 16:11:49 +01:00
|
|
|
exception = 1;
|
2025-03-17 16:22:35 +01:00
|
|
|
asm_inline volatile(
|
2018-04-11 11:56:55 +02:00
|
|
|
" tprot 0(%[addr]),0\n"
|
2025-02-24 15:59:08 +01:00
|
|
|
"0: lhi %[exc],0\n"
|
|
|
|
"1:\n"
|
2024-11-07 16:11:49 +01:00
|
|
|
CC_IPM(cc)
|
2025-02-24 15:59:08 +01:00
|
|
|
EX_TABLE(0b, 1b)
|
|
|
|
: CC_OUT(cc, cc), [exc] "+d" (exception)
|
|
|
|
: [addr] "a" (addr)
|
2024-11-07 16:11:49 +01:00
|
|
|
: CC_CLOBBER_LIST("memory"));
|
|
|
|
cc = exception ? -EFAULT : CC_TRANSFORM(cc);
|
|
|
|
return cc;
|
2018-04-11 11:56:55 +02:00
|
|
|
}
|
|
|
|
|
2023-01-23 12:49:47 +01:00
|
|
|
static unsigned long search_mem_end(void)
|
2018-04-11 11:56:55 +02:00
|
|
|
{
|
2018-04-11 19:15:24 +02:00
|
|
|
unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
|
|
|
|
unsigned long offset = 0;
|
|
|
|
unsigned long pivot;
|
|
|
|
|
|
|
|
while (range > 1) {
|
|
|
|
range >>= 1;
|
|
|
|
pivot = offset + range;
|
|
|
|
if (!tprot(pivot << 20))
|
|
|
|
offset = pivot;
|
|
|
|
}
|
2023-01-23 12:49:47 +01:00
|
|
|
return (offset + 1) << 20;
|
2018-04-11 11:56:55 +02:00
|
|
|
}
|
|
|
|
|
2023-02-02 13:59:36 +01:00
|
|
|
unsigned long detect_max_physmem_end(void)
|
2018-04-11 11:56:55 +02:00
|
|
|
{
|
2023-01-27 14:03:07 +01:00
|
|
|
unsigned long max_physmem_end = 0;
|
2020-10-19 11:01:33 +02:00
|
|
|
|
s390/physmem_info: Query diag500(STORAGE LIMIT) to support QEMU/KVM memory devices
To support memory devices under QEMU/KVM, such as virtio-mem,
we have to prepare our kernel virtual address space accordingly and
have to know the highest possible physical memory address we might see
later: the storage limit. The good old SCLP interface is not suitable for
this use case.
In particular, memory owned by memory devices has no relationship to
storage increments, it is always detected using the device driver, and
unaware OSes (no driver) must never try making use of that memory.
Consequently this memory is located outside of the "maximum storage
increment"-indicated memory range.
Let's use our new diag500 STORAGE_LIMIT subcode to query this storage
limit that can exceed the "maximum storage increment", and use the
existing interfaces (i.e., SCLP) to obtain information about the initial
memory that is not owned+managed by memory devices.
If a hypervisor does not support such memory devices, the address exposed
through diag500 STORAGE_LIMIT will correspond to the maximum storage
increment exposed through SCLP.
To teach kdump on s390 to include memory owned by memory devices, there
will be ways to query the relevant memory ranges from the device via a
driver running in special kdump mode (like virtio-mem already implements
to filter /proc/vmcore access so we don't end up reading from unplugged
device blocks).
Update setup_ident_map_size(), to clarify that there can be more than
just online and standby memory.
Tested-by: Mario Casquero <mcasquer@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Tested-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
Acked-by: Christian Borntraeger <borntraeger@linux.ibm.com>
Link: https://lore.kernel.org/r/20241025141453.1210600-4-david@redhat.com
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2024-10-25 16:14:48 +02:00
|
|
|
if (!diag500_storage_limit(&max_physmem_end)) {
|
|
|
|
physmem_info.info_source = MEM_DETECT_DIAG500_STOR_LIMIT;
|
|
|
|
} else if (!sclp_early_get_memsize(&max_physmem_end)) {
|
2023-02-02 13:59:36 +01:00
|
|
|
physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
|
|
|
|
} else {
|
|
|
|
max_physmem_end = search_mem_end();
|
|
|
|
physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
|
|
|
|
}
|
2024-12-11 11:06:19 +01:00
|
|
|
boot_debug("Max physical memory: 0x%016lx (info source: %s)\n", max_physmem_end,
|
|
|
|
get_physmem_info_source());
|
2023-02-02 13:59:36 +01:00
|
|
|
return max_physmem_end;
|
|
|
|
}
|
2018-04-11 18:42:37 +02:00
|
|
|
|
2023-02-02 13:59:36 +01:00
|
|
|
void detect_physmem_online_ranges(unsigned long max_physmem_end)
|
|
|
|
{
|
2024-12-11 11:06:19 +01:00
|
|
|
unsigned long start, end;
|
|
|
|
int i;
|
|
|
|
|
2018-04-11 18:42:37 +02:00
|
|
|
if (!sclp_early_read_storage_info()) {
|
2023-02-08 18:11:25 +01:00
|
|
|
physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
|
s390/physmem_info: Query diag500(STORAGE LIMIT) to support QEMU/KVM memory devices
To support memory devices under QEMU/KVM, such as virtio-mem,
we have to prepare our kernel virtual address space accordingly and
have to know the highest possible physical memory address we might see
later: the storage limit. The good old SCLP interface is not suitable for
this use case.
In particular, memory owned by memory devices has no relationship to
storage increments, it is always detected using the device driver, and
unaware OSes (no driver) must never try making use of that memory.
Consequently this memory is located outside of the "maximum storage
increment"-indicated memory range.
Let's use our new diag500 STORAGE_LIMIT subcode to query this storage
limit that can exceed the "maximum storage increment", and use the
existing interfaces (i.e., SCLP) to obtain information about the initial
memory that is not owned+managed by memory devices.
If a hypervisor does not support such memory devices, the address exposed
through diag500 STORAGE_LIMIT will correspond to the maximum storage
increment exposed through SCLP.
To teach kdump on s390 to include memory owned by memory devices, there
will be ways to query the relevant memory ranges from the device via a
driver running in special kdump mode (like virtio-mem already implements
to filter /proc/vmcore access so we don't end up reading from unplugged
device blocks).
Update setup_ident_map_size(), to clarify that there can be more than
just online and standby memory.
Tested-by: Mario Casquero <mcasquer@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Tested-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
Acked-by: Christian Borntraeger <borntraeger@linux.ibm.com>
Link: https://lore.kernel.org/r/20241025141453.1210600-4-david@redhat.com
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2024-10-25 16:14:48 +02:00
|
|
|
} else if (physmem_info.info_source == MEM_DETECT_DIAG500_STOR_LIMIT) {
|
|
|
|
unsigned long online_end;
|
|
|
|
|
|
|
|
if (!sclp_early_get_memsize(&online_end)) {
|
|
|
|
physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
|
|
|
|
add_physmem_online_range(0, online_end);
|
|
|
|
}
|
2023-01-23 12:49:47 +01:00
|
|
|
} else if (!diag260()) {
|
2023-02-08 18:11:25 +01:00
|
|
|
physmem_info.info_source = MEM_DETECT_DIAG260;
|
2023-01-23 12:49:47 +01:00
|
|
|
} else if (max_physmem_end) {
|
2023-02-08 18:11:25 +01:00
|
|
|
add_physmem_online_range(0, max_physmem_end);
|
2023-01-23 12:49:47 +01:00
|
|
|
}
|
2024-12-11 11:06:19 +01:00
|
|
|
boot_debug("Online memory ranges (info source: %s):\n", get_physmem_info_source());
|
|
|
|
for_each_physmem_online_range(i, &start, &end)
|
|
|
|
boot_debug(" online [%d]: 0x%016lx-0x%016lx\n", i, start, end);
|
2023-02-02 13:59:36 +01:00
|
|
|
}
|
2023-01-23 12:49:47 +01:00
|
|
|
|
2023-02-02 13:59:36 +01:00
|
|
|
void physmem_set_usable_limit(unsigned long limit)
|
|
|
|
{
|
|
|
|
physmem_info.usable = limit;
|
|
|
|
physmem_alloc_pos = limit;
|
2024-12-11 11:06:19 +01:00
|
|
|
boot_debug("Usable memory limit: 0x%016lx\n", limit);
|
2023-02-02 13:59:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
|
|
|
|
{
|
|
|
|
unsigned long start, end, total_mem = 0, total_reserved_mem = 0;
|
|
|
|
struct reserved_range *range;
|
|
|
|
enum reserved_range_type t;
|
|
|
|
int i;
|
|
|
|
|
2024-11-20 17:07:56 +01:00
|
|
|
boot_emerg("Linux version %s\n", kernel_version);
|
2023-02-02 13:59:36 +01:00
|
|
|
if (!is_prot_virt_guest() && early_command_line[0])
|
2024-11-20 17:07:56 +01:00
|
|
|
boot_emerg("Kernel command line: %s\n", early_command_line);
|
2024-11-24 14:47:58 +01:00
|
|
|
boot_emerg("Out of memory allocating %lu bytes 0x%lx aligned in range %lx:%lx\n",
|
2024-11-20 17:07:56 +01:00
|
|
|
size, align, min, max);
|
|
|
|
boot_emerg("Reserved memory ranges:\n");
|
2023-02-02 13:59:36 +01:00
|
|
|
for_each_physmem_reserved_range(t, range, &start, &end) {
|
2024-11-20 17:07:56 +01:00
|
|
|
boot_emerg("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
|
2023-02-02 13:59:36 +01:00
|
|
|
total_reserved_mem += end - start;
|
|
|
|
}
|
2024-11-24 14:47:58 +01:00
|
|
|
boot_emerg("Usable online memory ranges (info source: %s [%d]):\n",
|
2024-11-20 17:07:56 +01:00
|
|
|
get_physmem_info_source(), physmem_info.info_source);
|
2023-02-02 13:59:36 +01:00
|
|
|
for_each_physmem_usable_range(i, &start, &end) {
|
2024-11-20 17:07:56 +01:00
|
|
|
boot_emerg("%016lx %016lx\n", start, end);
|
2023-02-02 13:59:36 +01:00
|
|
|
total_mem += end - start;
|
2018-04-11 18:54:40 +02:00
|
|
|
}
|
2024-11-24 14:47:58 +01:00
|
|
|
boot_emerg("Usable online memory total: %lu Reserved: %lu Free: %lu\n",
|
2024-11-20 17:07:56 +01:00
|
|
|
total_mem, total_reserved_mem,
|
|
|
|
total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
|
2023-02-02 13:59:36 +01:00
|
|
|
print_stacktrace(current_frame_address());
|
2024-11-20 17:07:56 +01:00
|
|
|
boot_emerg(" -- System halted\n");
|
2023-02-02 13:59:36 +01:00
|
|
|
disabled_wait();
|
|
|
|
}
|
2018-04-11 18:54:40 +02:00
|
|
|
|
2024-12-11 11:06:19 +01:00
|
|
|
static void _physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
|
2023-02-02 13:59:36 +01:00
|
|
|
{
|
|
|
|
physmem_info.reserved[type].start = addr;
|
|
|
|
physmem_info.reserved[type].end = addr + size;
|
2018-04-11 11:56:55 +02:00
|
|
|
}
|
2023-01-28 23:55:04 +01:00
|
|
|
|
2024-12-11 11:06:19 +01:00
|
|
|
void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
|
|
|
|
{
|
|
|
|
_physmem_reserve(type, addr, size);
|
|
|
|
boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Reserve:", addr, addr + size,
|
|
|
|
get_rr_type_name(type));
|
|
|
|
}
|
|
|
|
|
2023-02-02 13:59:36 +01:00
|
|
|
void physmem_free(enum reserved_range_type type)
|
2023-01-28 23:55:04 +01:00
|
|
|
{
|
2024-12-11 11:06:19 +01:00
|
|
|
boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Free:", physmem_info.reserved[type].start,
|
|
|
|
physmem_info.reserved[type].end, get_rr_type_name(type));
|
2023-02-02 13:59:36 +01:00
|
|
|
physmem_info.reserved[type].start = 0;
|
|
|
|
physmem_info.reserved[type].end = 0;
|
|
|
|
}
|
2023-01-28 23:55:04 +01:00
|
|
|
|
2023-02-02 13:59:36 +01:00
|
|
|
static bool __physmem_alloc_intersects(unsigned long addr, unsigned long size,
|
|
|
|
unsigned long *intersection_start)
|
|
|
|
{
|
|
|
|
unsigned long res_addr, res_size;
|
|
|
|
int t;
|
|
|
|
|
|
|
|
for (t = 0; t < RR_MAX; t++) {
|
|
|
|
if (!get_physmem_reserved(t, &res_addr, &res_size))
|
|
|
|
continue;
|
|
|
|
if (intersects(addr, size, res_addr, res_size)) {
|
|
|
|
*intersection_start = res_addr;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ipl_report_certs_intersects(addr, size, intersection_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long __physmem_alloc_range(unsigned long size, unsigned long align,
|
|
|
|
unsigned long min, unsigned long max,
|
|
|
|
unsigned int from_ranges, unsigned int *ranges_left,
|
|
|
|
bool die_on_oom)
|
|
|
|
{
|
|
|
|
unsigned int nranges = from_ranges ?: physmem_info.range_count;
|
|
|
|
unsigned long range_start, range_end;
|
|
|
|
unsigned long intersection_start;
|
|
|
|
unsigned long addr, pos = max;
|
|
|
|
|
|
|
|
align = max(align, 8UL);
|
|
|
|
while (nranges) {
|
|
|
|
__get_physmem_range(nranges - 1, &range_start, &range_end, false);
|
|
|
|
pos = min(range_end, pos);
|
|
|
|
|
|
|
|
if (round_up(min, align) + size > pos)
|
2023-01-28 23:55:04 +01:00
|
|
|
break;
|
2023-02-02 13:59:36 +01:00
|
|
|
addr = round_down(pos - size, align);
|
|
|
|
if (range_start > addr) {
|
|
|
|
nranges--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (__physmem_alloc_intersects(addr, size, &intersection_start)) {
|
|
|
|
pos = intersection_start;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ranges_left)
|
|
|
|
*ranges_left = nranges;
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
if (die_on_oom)
|
|
|
|
die_oom(size, align, min, max);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
|
|
|
|
unsigned long align, unsigned long min, unsigned long max,
|
|
|
|
bool die_on_oom)
|
|
|
|
{
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
max = min(max, physmem_alloc_pos);
|
|
|
|
addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
|
|
|
|
if (addr)
|
2024-12-11 11:06:19 +01:00
|
|
|
_physmem_reserve(type, addr, size);
|
|
|
|
boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Alloc range:", addr, addr + size,
|
|
|
|
get_rr_type_name(type));
|
2023-02-02 13:59:36 +01:00
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2024-11-29 02:26:19 +01:00
|
|
|
unsigned long physmem_alloc(enum reserved_range_type type, unsigned long size,
|
|
|
|
unsigned long align, bool die_on_oom)
|
2023-02-02 13:59:36 +01:00
|
|
|
{
|
|
|
|
struct reserved_range *range = &physmem_info.reserved[type];
|
2024-12-11 11:06:19 +01:00
|
|
|
struct reserved_range *new_range = NULL;
|
2023-02-02 13:59:36 +01:00
|
|
|
unsigned int ranges_left;
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
|
2024-11-29 02:26:19 +01:00
|
|
|
&ranges_left, die_on_oom);
|
|
|
|
if (!addr)
|
|
|
|
return 0;
|
2023-02-02 13:59:36 +01:00
|
|
|
/* if not a consecutive allocation of the same type or first allocation */
|
|
|
|
if (range->start != addr + size) {
|
|
|
|
if (range->end) {
|
2024-11-29 02:26:19 +01:00
|
|
|
addr = __physmem_alloc_range(sizeof(struct reserved_range), 0, 0,
|
|
|
|
physmem_alloc_pos, physmem_alloc_ranges,
|
|
|
|
&ranges_left, true);
|
|
|
|
new_range = (struct reserved_range *)addr;
|
|
|
|
addr = __physmem_alloc_range(size, align, 0, addr, ranges_left,
|
|
|
|
&ranges_left, die_on_oom);
|
|
|
|
if (!addr)
|
|
|
|
return 0;
|
2023-02-02 13:59:36 +01:00
|
|
|
*new_range = *range;
|
|
|
|
range->chain = new_range;
|
2023-01-28 23:55:04 +01:00
|
|
|
}
|
2023-02-02 13:59:36 +01:00
|
|
|
range->end = addr + size;
|
2023-01-28 23:55:04 +01:00
|
|
|
}
|
2024-12-11 11:06:19 +01:00
|
|
|
if (type != RR_VMEM) {
|
|
|
|
boot_debug("%-14s 0x%016lx-0x%016lx %-20s align 0x%lx split %d\n", "Alloc topdown:",
|
|
|
|
addr, addr + size, get_rr_type_name(type), align, !!new_range);
|
|
|
|
}
|
2023-02-02 13:59:36 +01:00
|
|
|
range->start = addr;
|
|
|
|
physmem_alloc_pos = addr;
|
|
|
|
physmem_alloc_ranges = ranges_left;
|
|
|
|
return addr;
|
2023-01-28 23:55:04 +01:00
|
|
|
}
|
2023-02-21 23:08:42 +01:00
|
|
|
|
2024-11-29 02:26:19 +01:00
|
|
|
unsigned long physmem_alloc_or_die(enum reserved_range_type type, unsigned long size,
|
|
|
|
unsigned long align)
|
|
|
|
{
|
|
|
|
return physmem_alloc(type, size, align, true);
|
|
|
|
}
|
|
|
|
|
2023-02-21 23:08:42 +01:00
|
|
|
unsigned long get_physmem_alloc_pos(void)
|
|
|
|
{
|
|
|
|
return physmem_alloc_pos;
|
|
|
|
}
|
2024-12-11 11:06:19 +01:00
|
|
|
|
|
|
|
void dump_physmem_reserved(void)
|
|
|
|
{
|
|
|
|
struct reserved_range *range;
|
|
|
|
enum reserved_range_type t;
|
|
|
|
unsigned long start, end;
|
|
|
|
|
|
|
|
boot_debug("Reserved memory ranges:\n");
|
|
|
|
for_each_physmem_reserved_range(t, range, &start, &end) {
|
|
|
|
if (end) {
|
|
|
|
boot_debug("%-14s 0x%016lx-0x%016lx @%012lx chain %012lx\n",
|
|
|
|
get_rr_type_name(t), start, end, (unsigned long)range,
|
|
|
|
(unsigned long)range->chain);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|