unsigned long vmemmap_start;
unsigned long rte_size;
unsigned long pages;
+ unsigned long vmax;
pages = ident_map_size / PAGE_SIZE;
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
vmalloc_size > _REGION2_SIZE ||
vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
_REGION2_SIZE) {
- MODULES_END = _REGION1_SIZE;
+ vmax = _REGION1_SIZE;
rte_size = _REGION2_SIZE;
} else {
- MODULES_END = _REGION2_SIZE;
+ vmax = _REGION2_SIZE;
rte_size = _REGION3_SIZE;
}
/*
* secure storage limit, so that any vmalloc allocation
* we do could be used to back secure guest storage.
*/
- adjust_to_uv_max(&MODULES_END);
+ vmax = adjust_to_uv_max(vmax);
#ifdef CONFIG_KASAN
/* force vmalloc and modules below kasan shadow */
- MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
+ vmax = min(vmax, KASAN_SHADOW_START);
#endif
+ MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
}
#if IS_ENABLED(CONFIG_KVM)
-void adjust_to_uv_max(unsigned long *vmax)
+unsigned long adjust_to_uv_max(unsigned long limit)
{
if (is_prot_virt_host() && uv_info.max_sec_stor_addr)
- *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
+ limit = min_t(unsigned long, limit, uv_info.max_sec_stor_addr);
+ return limit;
}
static int is_prot_virt_host_capable(void)
#define BOOT_UV_H
#if IS_ENABLED(CONFIG_KVM)
-void adjust_to_uv_max(unsigned long *vmax);
+unsigned long adjust_to_uv_max(unsigned long limit);
void sanitize_prot_virt_host(void);
#else
-static inline void adjust_to_uv_max(unsigned long *vmax) {}
+static inline unsigned long adjust_to_uv_max(unsigned long limit)
+{
+ return limit;
+}
static inline void sanitize_prot_virt_host(void) {}
#endif