return __pa(p);
}
+static phys_addr_t __init kasan_alloc_raw_page(int node)
+{
+ void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_KASAN, node);
+ return __pa(p);
+}
+
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
bool early)
{
do {
phys_addr_t page_phys = early ?
__pa_symbol(kasan_early_shadow_page)
- : kasan_alloc_zeroed_page(node);
+ : kasan_alloc_raw_page(node);
+ if (!early)
+ memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
next = addr + PAGE_SIZE;
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
pfn_pte(sym_to_pfn(kasan_early_shadow_page),
PAGE_KERNEL_RO));
- memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+ memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
/* At this point kasan is fully initialized. Enable error messages */
#ifdef CONFIG_KASAN_GENERIC
+#define KASAN_SHADOW_INIT 0
+
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
#endif /* CONFIG_KASAN_GENERIC */
+#ifdef CONFIG_KASAN_SW_TAGS
+
+#define KASAN_SHADOW_INIT 0xFF
+
+#endif /* CONFIG_KASAN_SW_TAGS */
+
#endif /* LINUX_KASAN_H */
ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
shadow_start + shadow_size,
- GFP_KERNEL | __GFP_ZERO,
+ GFP_KERNEL,
PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
__builtin_return_address(0));
if (ret) {
+ __memset(ret, KASAN_SHADOW_INIT, shadow_size);
find_vm_area(addr)->flags |= VM_KASAN;
kmemleak_ignore(ret);
return 0;