page of shadow space. Allocating a full shadow page per mapping would
therefore be wasteful. Furthermore, to ensure that different mappings
use different shadow pages, mappings would have to be aligned to
-``KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE``.
+``KASAN_GRANULE_SIZE * PAGE_SIZE``.
Instead, we share backing space across multiple mappings. We allocate
a backing page when a mapping in vmalloc space uses a particular page
#include "../mm/kasan/kasan.h"
-#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
+#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
/*
* We assign some test results to these globals to make sure the tests
#include "../mm/kasan/kasan.h"
-#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
+#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
static noinline void __init copy_user_test(void)
{
/*
* Poisons the shadow memory for 'size' bytes starting from 'addr'.
- * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
+ * Memory addresses should be aligned to KASAN_GRANULE_SIZE.
*/
void poison_range(const void *address, size_t size, u8 value)
{
poison_range(address, size, tag);
- if (size & KASAN_SHADOW_MASK) {
+ if (size & KASAN_GRANULE_MASK) {
u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
*shadow = tag;
else
- *shadow = size & KASAN_SHADOW_MASK;
+ *shadow = size & KASAN_GRANULE_MASK;
}
}
void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
poison_range(object,
- round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
+ round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_KMALLOC_REDZONE);
}
{
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
return shadow_byte < 0 ||
- shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
+ shadow_byte >= KASAN_GRANULE_SIZE;
/* else CONFIG_KASAN_SW_TAGS: */
if ((u8)shadow_byte == KASAN_TAG_INVALID)
return true;
}
- rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
+ rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
return NULL;
redzone_start = round_up((unsigned long)(object + size),
- KASAN_SHADOW_SCALE_SIZE);
+ KASAN_GRANULE_SIZE);
redzone_end = round_up((unsigned long)object + cache->object_size,
- KASAN_SHADOW_SCALE_SIZE);
+ KASAN_GRANULE_SIZE);
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
tag = assign_tag(cache, object, false, keep_tag);
page = virt_to_page(ptr);
redzone_start = round_up((unsigned long)(ptr + size),
- KASAN_SHADOW_SCALE_SIZE);
+ KASAN_GRANULE_SIZE);
redzone_end = (unsigned long)ptr + page_size(page);
unpoison_range(ptr, size);
shadow_size = nr_shadow_pages << PAGE_SHIFT;
shadow_end = shadow_start + shadow_size;
- if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
- WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
+ if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
+ WARN_ON(start_kaddr % (KASAN_GRANULE_SIZE << PAGE_SHIFT)))
return NOTIFY_BAD;
switch (action) {
if (!is_vmalloc_or_module_addr(start))
return;
- size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+ size = round_up(size, KASAN_GRANULE_SIZE);
poison_range(start, size, KASAN_VMALLOC_INVALID);
}
unsigned long region_start, region_end;
unsigned long size;
- region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
- region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+ region_start = ALIGN(start, PAGE_SIZE * KASAN_GRANULE_SIZE);
+ region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_GRANULE_SIZE);
free_region_start = ALIGN(free_region_start,
- PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+ PAGE_SIZE * KASAN_GRANULE_SIZE);
if (start != region_start &&
free_region_start < region_start)
- region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
+ region_start -= PAGE_SIZE * KASAN_GRANULE_SIZE;
free_region_end = ALIGN_DOWN(free_region_end,
- PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+ PAGE_SIZE * KASAN_GRANULE_SIZE);
if (end != region_end &&
free_region_end > region_end)
- region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
+ region_end += PAGE_SIZE * KASAN_GRANULE_SIZE;
shadow_start = kasan_mem_to_shadow((void *)region_start);
shadow_end = kasan_mem_to_shadow((void *)region_end);
unsigned long shadow_start;
shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
- scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+ scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
+ KASAN_SHADOW_SCALE_SHIFT;
shadow_size = round_up(scaled_size, PAGE_SIZE);
if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(shadow_value)) {
- s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
+ s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
return unlikely(last_accessible_byte >= shadow_value);
}
* Access crosses 8(shadow size)-byte boundary. Such access maps
* into 2 shadow bytes, so we need to check them both.
*/
- if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
+ if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
return memory_is_poisoned_1(addr + size - 1);
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
/* Unaligned 16-bytes access maps into 3 shadow bytes. */
- if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+ if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
return *shadow_addr || memory_is_poisoned_1(addr + 15);
return *shadow_addr;
s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
if (unlikely(ret != (unsigned long)last_shadow ||
- ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
+ ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
return true;
}
return false;
static void register_global(struct kasan_global *global)
{
- size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
+ size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
unpoison_range(global->beg, global->size);
/* Emitted by compiler to poison alloca()ed objects. */
void __asan_alloca_poison(unsigned long addr, size_t size)
{
- size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+ size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
rounded_up_size;
- size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
+ size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
const void *left_redzone = (const void *)(addr -
KASAN_ALLOCA_REDZONE_SIZE);
void *p = addr;
while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p)))
- p += KASAN_SHADOW_SCALE_SIZE;
+ p += KASAN_GRANULE_SIZE;
return p;
}
shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
/*
- * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
+ * If shadow byte value is in [0, KASAN_GRANULE_SIZE) we can look
* at the next shadow byte to determine the type of the bad access.
*/
- if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1)
+ if (*shadow_addr > 0 && *shadow_addr <= KASAN_GRANULE_SIZE - 1)
shadow_addr++;
switch (*shadow_addr) {
- case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
+ case 0 ... KASAN_GRANULE_SIZE - 1:
/*
* In theory it's still possible to see these shadow values
* due to a data race in the kernel code.
end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
if (WARN_ON((unsigned long)start %
- (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
- WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
+ (KASAN_GRANULE_SIZE * PAGE_SIZE)) ||
+ WARN_ON(size % (KASAN_GRANULE_SIZE * PAGE_SIZE)))
return;
for (; addr < end; addr = next) {
shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
if (WARN_ON((unsigned long)start %
- (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
- WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
+ (KASAN_GRANULE_SIZE * PAGE_SIZE)) ||
+ WARN_ON(size % (KASAN_GRANULE_SIZE * PAGE_SIZE)))
return -EINVAL;
ret = kasan_populate_early_shadow(shadow_start, shadow_end);
#include <linux/kasan.h>
#include <linux/stackdepot.h>
-#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
-#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
+#define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
+#define KASAN_GRANULE_MASK (KASAN_GRANULE_SIZE - 1)
#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
return false;
aligned_addr = round_down((unsigned long)addr, sizeof(long));
- mem_ptr = round_down(aligned_addr, KASAN_SHADOW_SCALE_SIZE);
+ mem_ptr = round_down(aligned_addr, KASAN_GRANULE_SIZE);
shadow_ptr = kasan_mem_to_shadow((void *)aligned_addr);
shadow_bottom = kasan_mem_to_shadow(end_of_stack(current));
while (shadow_ptr >= shadow_bottom && *shadow_ptr != KASAN_STACK_LEFT) {
shadow_ptr--;
- mem_ptr -= KASAN_SHADOW_SCALE_SIZE;
+ mem_ptr -= KASAN_GRANULE_SIZE;
}
while (shadow_ptr >= shadow_bottom && *shadow_ptr == KASAN_STACK_LEFT) {
shadow_ptr--;
- mem_ptr -= KASAN_SHADOW_SCALE_SIZE;
+ mem_ptr -= KASAN_GRANULE_SIZE;
}
if (shadow_ptr < shadow_bottom)
return false;
- frame = (const unsigned long *)(mem_ptr + KASAN_SHADOW_SCALE_SIZE);
+ frame = (const unsigned long *)(mem_ptr + KASAN_GRANULE_SIZE);
if (frame[0] != KASAN_CURRENT_STACK_FRAME_MAGIC) {
pr_err("KASAN internal error: frame info validation failed; invalid marker: %lu\n",
frame[0]);
else
bug_type = "maybe wild-memory-access";
pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
- orig_addr, orig_addr + KASAN_SHADOW_MASK);
+ orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
}
#endif
void *end = p + size;
while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p))
- p += KASAN_SHADOW_SCALE_SIZE;
+ p += KASAN_GRANULE_SIZE;
return p;
}