We want to specify flags when hotplugging memory. Let's prepare to pass
flags to memblock_add_node() by adjusting all existing users.
Note that when hotplugging memory the system is already up and running
and we might have concurrent memblock users: for example, while we're
hotplugging memory, kexec_file code might search for suitable memory
regions to place kexec images. It's important to add the memory
directly to memblock via a single call with the right flags, instead of
adding the memory first and apply flags later: otherwise, concurrent
memblock users might temporarily stumble over memblocks with wrong
flags, which will be important in a follow-up patch that introduces a
new flag to properly handle add_memory_driver_managed().
Link: https://lkml.kernel.org/r/20211004093605.5830-4-david@redhat.com
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Acked-by: Shahab Vahedi <shahab@synopsys.com> [arch/arc]
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Jianyong Wu <Jianyong.Wu@arm.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
low_mem_sz = size;
in_use = 1;
- memblock_add_node(base, size, 0);
+ memblock_add_node(base, size, 0, MEMBLOCK_NONE);
} else {
#ifdef CONFIG_HIGHMEM
high_mem_start = base;
high_mem_sz = size;
in_use = 1;
- memblock_add_node(base, size, 1);
+ memblock_add_node(base, size, 1, MEMBLOCK_NONE);
memblock_reserve(base, size);
#endif
}
efi_memmap_walk(find_max_min_low_pfn, NULL);
max_pfn = max_low_pfn;
- memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
+ memblock_add_node(0, PFN_PHYS(max_low_pfn), 0, MEMBLOCK_NONE);
find_initrd();
#endif
if (start < end)
- memblock_add_node(__pa(start), end - start, nid);
+ memblock_add_node(__pa(start), end - start, nid, MEMBLOCK_NONE);
return 0;
}
m68k_memory[0].addr = _rambase;
m68k_memory[0].size = _ramend - _rambase;
- memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
+ memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
+ MEMBLOCK_NONE);
/* compute total pages in system */
num_pages = PFN_DOWN(_ramend - _rambase);
min_addr = m68k_memory[0].addr;
max_addr = min_addr + m68k_memory[0].size;
- memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
+ memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
+ MEMBLOCK_NONE);
for (i = 1; i < m68k_num_memory;) {
if (m68k_memory[i].addr < min_addr) {
printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
continue;
}
- memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i);
+ memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i,
+ MEMBLOCK_NONE);
addr = m68k_memory[i].addr + m68k_memory[i].size;
if (addr > max_addr)
max_addr = addr;
(u32)node_id, mem_type, mem_start, mem_size);
pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
start_pfn, end_pfn, num_physpages);
- memblock_add_node(PFN_PHYS(start_pfn), PFN_PHYS(node_psize), node);
+ memblock_add_node(PFN_PHYS(start_pfn),
+ PFN_PHYS(node_psize), node,
+ MEMBLOCK_NONE);
break;
case SYSTEM_RAM_RESERVED:
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
continue;
}
memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
- PFN_PHYS(slot_psize), node);
+ PFN_PHYS(slot_psize), node,
+ MEMBLOCK_NONE);
}
}
}
* part of the System RAM resource.
*/
if (crashk_res.end) {
- memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
+ memblock_add_node(crashk_res.start, resource_size(&crashk_res),
+ 0, MEMBLOCK_NONE);
memblock_reserve(crashk_res.start, resource_size(&crashk_res));
insert_resource(&iomem_resource, &crashk_res);
}
#endif
void memblock_allow_resize(void);
-int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
+ enum memblock_flags flags);
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_phys_free(phys_addr_t base, phys_addr_t size);
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn};
* for_each_valid_physical_page_range()
- * memblock_add_node(base, size, nid)
+ * memblock_add_node(base, size, nid, MEMBLOCK_NONE)
* free_area_init(max_zone_pfns);
*/
void free_area_init(unsigned long *max_zone_pfn);
* @base: base address of the new region
* @size: size of the new region
* @nid: nid of the new region
+ * @flags: flags of the new region
*
* Add new memblock region [@base, @base + @size) to the "memory"
* type. See memblock_add_range() description for mode details
* 0 on success, -errno on failure.
*/
int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
- int nid)
+ int nid, enum memblock_flags flags)
{
phys_addr_t end = base + size - 1;
- memblock_dbg("%s: [%pa-%pa] nid=%d %pS\n", __func__,
- &base, &end, nid, (void *)_RET_IP_);
+ memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
+ &base, &end, nid, flags, (void *)_RET_IP_);
- return memblock_add_range(&memblock.memory, base, size, nid, 0);
+ return memblock_add_range(&memblock.memory, base, size, nid, flags);
}
/**
mem_hotplug_begin();
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
- ret = memblock_add_node(start, size, nid);
+ ret = memblock_add_node(start, size, nid, MEMBLOCK_NONE);
if (ret)
goto error_mem_hotplug_end;
}