See Documentation/admin-guide/mm/hugetlbpage.rst
+hugetlb_optimize_vmemmap
+========================
+
+This knob is not available when memory_hotplug.memmap_on_memory (kernel parameter)
+is configured or the size of 'struct page' (a structure defined in
+include/linux/mm_types.h) is not power of two (an unusual system config could
+result in this).
+
+Enable (set to 1) or disable (set to 0) the feature of optimizing vmemmap pages
+associated with each HugeTLB page.
+
+Once enabled, the vmemmap pages of subsequent allocation of HugeTLB pages from
+buddy allocator will be optimized (7 pages per 2MB HugeTLB page and 4095 pages
+per 1GB HugeTLB page), whereas already allocated HugeTLB pages will not be
+optimized. When those optimized HugeTLB pages are freed from the HugeTLB pool
+to the buddy allocator, the vmemmap pages representing that range needs to be
+remapped again and the vmemmap pages discarded earlier need to be rellocated
+again. If your use case is that HugeTLB pages are allocated 'on the fly' (e.g.
+never explicitly allocating HugeTLB pages with 'nr_hugepages' but only set
+'nr_overcommit_hugepages', those overcommitted HugeTLB pages are allocated 'on
+the fly') instead of being pulled from the HugeTLB pool, you should weigh the
+benefits of memory savings against the more overhead (~2x slower than before)
+of allocation or freeing HugeTLB pages between the HugeTLB pool and the buddy
+allocator. Another behavior to note is that if the system is under heavy memory
+pressure, it could prevent the user from freeing HugeTLB pages from the HugeTLB
+pool to the buddy allocator since the allocation of vmemmap pages could be
+failed, you have to retry later if your system encounter this situation.
+
+Once disabled, the vmemmap pages of subsequent allocation of HugeTLB pages from
+buddy allocator will not be optimized meaning the extra overhead at allocation
+time from buddy allocator disappears, whereas already optimized HugeTLB pages
+will not be affected. If you want to make sure there are no optimized HugeTLB
+pages, you can set "nr_hugepages" to 0 first and then disable this. Note that
+writing 0 to nr_hugepages will make any "in use" HugeTLB pages become surplus
+pages. So, those surplus pages are still optimized until they are no longer
+in use. You would need to wait for those surplus pages to be released before
+there are no optimized pages in the system.
+
+
nr_hugepages_mempolicy
======================
*/
#define pr_fmt(fmt) "HugeTLB: " fmt
+#include <linux/memory_hotplug.h>
#include "hugetlb_vmemmap.h"
/*
#define RESERVE_VMEMMAP_NR 1U
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
+enum vmemmap_optimize_mode {
+ VMEMMAP_OPTIMIZE_OFF,
+ VMEMMAP_OPTIMIZE_ON,
+};
+
DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
hugetlb_optimize_vmemmap_key);
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
+static enum vmemmap_optimize_mode vmemmap_optimize_mode =
+ IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON);
+
+static void vmemmap_optimize_mode_switch(enum vmemmap_optimize_mode to)
+{
+ if (vmemmap_optimize_mode == to)
+ return;
+
+ if (to == VMEMMAP_OPTIMIZE_OFF)
+ static_branch_dec(&hugetlb_optimize_vmemmap_key);
+ else
+ static_branch_inc(&hugetlb_optimize_vmemmap_key);
+ WRITE_ONCE(vmemmap_optimize_mode, to);
+}
+
static int __init hugetlb_vmemmap_early_param(char *buf)
{
bool enable;
+ enum vmemmap_optimize_mode mode;
if (kstrtobool(buf, &enable))
return -EINVAL;
- if (enable)
- static_branch_enable(&hugetlb_optimize_vmemmap_key);
- else
- static_branch_disable(&hugetlb_optimize_vmemmap_key);
+ mode = enable ? VMEMMAP_OPTIMIZE_ON : VMEMMAP_OPTIMIZE_OFF;
+ vmemmap_optimize_mode_switch(mode);
return 0;
}
*/
ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse,
GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
- if (!ret)
+ if (!ret) {
ClearHPageVmemmapOptimized(head);
+ static_branch_dec(&hugetlb_optimize_vmemmap_key);
+ }
return ret;
}
if (!vmemmap_pages)
return;
+ if (READ_ONCE(vmemmap_optimize_mode) == VMEMMAP_OPTIMIZE_OFF)
+ return;
+
+ static_branch_inc(&hugetlb_optimize_vmemmap_key);
+
vmemmap_addr += RESERVE_VMEMMAP_SIZE;
vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
* to the page which @vmemmap_reuse is mapped to, then free the pages
* which the range [@vmemmap_addr, @vmemmap_end] is mapped to.
*/
- if (!vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse))
+ if (vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse))
+ static_branch_dec(&hugetlb_optimize_vmemmap_key);
+ else
SetHPageVmemmapOptimized(head);
}
BUILD_BUG_ON(__NR_USED_SUBPAGE >=
RESERVE_VMEMMAP_SIZE / sizeof(struct page));
- if (!hugetlb_optimize_vmemmap_enabled())
- return;
-
if (!is_power_of_2(sizeof(struct page))) {
pr_warn_once("cannot optimize vmemmap pages because \"struct page\" crosses page boundaries\n");
static_branch_disable(&hugetlb_optimize_vmemmap_key);
pr_info("can optimize %d vmemmap pages for %s\n",
h->optimize_vmemmap_pages, h->name);
}
+
+#ifdef CONFIG_PROC_SYSCTL
+static int hugetlb_optimize_vmemmap_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *length,
+ loff_t *ppos)
+{
+ int ret;
+ enum vmemmap_optimize_mode mode;
+ static DEFINE_MUTEX(sysctl_mutex);
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ mutex_lock(&sysctl_mutex);
+ mode = vmemmap_optimize_mode;
+ table->data = &mode;
+ ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+ if (write && !ret)
+ vmemmap_optimize_mode_switch(mode);
+ mutex_unlock(&sysctl_mutex);
+
+ return ret;
+}
+
+static struct ctl_table hugetlb_vmemmap_sysctls[] = {
+ {
+ .procname = "hugetlb_optimize_vmemmap",
+ .maxlen = sizeof(enum vmemmap_optimize_mode),
+ .mode = 0644,
+ .proc_handler = hugetlb_optimize_vmemmap_handler,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ { }
+};
+
+static __init int hugetlb_vmemmap_sysctls_init(void)
+{
+ /*
+ * If "memory_hotplug.memmap_on_memory" is enabled or "struct page"
+ * crosses page boundaries, the vmemmap pages cannot be optimized.
+ */
+ if (!mhp_memmap_on_memory() && is_power_of_2(sizeof(struct page)))
+ register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
+
+ return 0;
+}
+late_initcall(hugetlb_vmemmap_sysctls_init);
+#endif /* CONFIG_PROC_SYSCTL */