From: Linus Torvalds Date: Wed, 30 Jun 2021 00:29:11 +0000 (-0700) Subject: Merge branch 'akpm' (patches from Andrew) X-Git-Url: https://git.baikalelectronics.ru/sdk/?a=commitdiff_plain;h=65090f30ab791810a3dc840317e57df05018559c;p=kernel.git Merge branch 'akpm' (patches from Andrew) Merge misc updates from Andrew Morton: "191 patches. Subsystems affected by this patch series: kthread, ia64, scripts, ntfs, squashfs, ocfs2, kernel/watchdog, and mm (gup, pagealloc, slab, slub, kmemleak, dax, debug, pagecache, gup, swap, memcg, pagemap, mprotect, bootmem, dma, tracing, vmalloc, kasan, initialization, pagealloc, and memory-failure)" * emailed patches from Andrew Morton : (191 commits) mm,hwpoison: make get_hwpoison_page() call get_any_page() mm,hwpoison: send SIGBUS with error virutal address mm/page_alloc: split pcp->high across all online CPUs for cpuless nodes mm/page_alloc: allow high-order pages to be stored on the per-cpu lists mm: replace CONFIG_FLAT_NODE_MEM_MAP with CONFIG_FLATMEM mm: replace CONFIG_NEED_MULTIPLE_NODES with CONFIG_NUMA docs: remove description of DISCONTIGMEM arch, mm: remove stale mentions of DISCONIGMEM mm: remove CONFIG_DISCONTIGMEM m68k: remove support for DISCONTIGMEM arc: remove support for DISCONTIGMEM arc: update comment about HIGHMEM implementation alpha: remove DISCONTIGMEM and NUMA mm/page_alloc: move free_the_page mm/page_alloc: fix counting of managed_pages mm/page_alloc: improve memmap_pages dbg msg mm: drop SECTION_SHIFT in code comments mm/page_alloc: introduce vm.percpu_pagelist_high_fraction mm/page_alloc: limit the number of pages on PCP lists when reclaim is active mm/page_alloc: scale the number of pages that are batch freed ... --- 65090f30ab791810a3dc840317e57df05018559c diff --cc arch/arm64/kvm/mmu.c index 57292dc5ce357,74b3c1a3ff5a2..f23dfa06433b3 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@@ -924,12 -853,9 +924,12 @@@ static int user_mem_abort(struct kvm_vc return -EFAULT; } - /* Let's check if we will get back a huge page backed by hugetlbfs */ + /* + * Let's check if we will get back a huge page backed by hugetlbfs, or + * get block mapping for device MMIO region. + */ mmap_read_lock(current->mm); - vma = find_vma_intersection(current->mm, hva, hva + 1); + vma = vma_lookup(current->mm, hva); if (unlikely(!vma)) { kvm_err("Failed to find VMA for hva 0x%lx\n", hva); mmap_read_unlock(current->mm); diff --cc lib/dump_stack.c index 5ebf4375fa8c9,586e3f2c6a158..27f16872320d3 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@@ -84,16 -84,56 +84,22 @@@ static void __dump_stack(const char *lo * * Architectures can override this implementation by implementing its own. */ - asmlinkage __visible void dump_stack(void) -#ifdef CONFIG_SMP -static atomic_t dump_lock = ATOMIC_INIT(-1); - + asmlinkage __visible void dump_stack_lvl(const char *log_lvl) { unsigned long flags; - int was_locked; - int old; - int cpu; /* * Permit this cpu to perform nested stack dumps while serialising * against other CPUs */ -retry: - local_irq_save(flags); - cpu = smp_processor_id(); - old = atomic_cmpxchg(&dump_lock, -1, cpu); - if (old == -1) { - was_locked = 0; - } else if (old == cpu) { - was_locked = 1; - } else { - local_irq_restore(flags); - /* - * Wait for the lock to release before jumping to - * atomic_cmpxchg() in order to mitigate the thundering herd - * problem. - */ - do { cpu_relax(); } while (atomic_read(&dump_lock) != -1); - goto retry; - } - - __dump_stack(log_lvl); - - if (!was_locked) - atomic_set(&dump_lock, -1); - - local_irq_restore(flags); -} -#else -asmlinkage __visible void dump_stack_lvl(const char *log_lvl) -{ + printk_cpu_lock_irqsave(flags); - __dump_stack(); + __dump_stack(log_lvl); + printk_cpu_unlock_irqrestore(flags); } -#endif + EXPORT_SYMBOL(dump_stack_lvl); + + asmlinkage __visible void dump_stack(void) + { + dump_stack_lvl(KERN_DEFAULT); + } EXPORT_SYMBOL(dump_stack); diff --cc mm/kasan/hw_tags.c index ed5e5b833d610,d867b22ddbb78..4ea8c368b5b8b --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@@ -216,60 -216,6 +216,38 @@@ void __init kasan_init_hw_tags(void pr_info("KernelAddressSanitizer initialized\n"); } - void kasan_set_free_info(struct kmem_cache *cache, - void *object, u8 tag) - { - struct kasan_alloc_meta *alloc_meta; - - alloc_meta = kasan_get_alloc_meta(cache, object); - if (alloc_meta) - kasan_set_track(&alloc_meta->free_track[0], GFP_NOWAIT); - } - - struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, - void *object, u8 tag) - { - struct kasan_alloc_meta *alloc_meta; - - alloc_meta = kasan_get_alloc_meta(cache, object); - if (!alloc_meta) - return NULL; - - return &alloc_meta->free_track[0]; - } - +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags) +{ + /* + * This condition should match the one in post_alloc_hook() in + * page_alloc.c. + */ + bool init = !want_init_on_free() && want_init_on_alloc(flags); + + if (flags & __GFP_SKIP_KASAN_POISON) + SetPageSkipKASanPoison(page); + + if (flags & __GFP_ZEROTAGS) { + int i; + + for (i = 0; i != 1 << order; ++i) + tag_clear_highpage(page + i); + } else { + kasan_unpoison_pages(page, order, init); + } +} + +void kasan_free_pages(struct page *page, unsigned int order) +{ + /* + * This condition should match the one in free_pages_prepare() in + * page_alloc.c. + */ + bool init = want_init_on_free(); + + kasan_poison_pages(page, order, init); +} + #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) void kasan_set_tagging_report_once(bool state) diff --cc mm/kasan/sw_tags.c index 9362938abbfa5,675e67375fb54..bd3f540feb472 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@@ -166,51 -166,3 +166,10 @@@ void __hwasan_tag_memory(unsigned long kasan_poison((void *)addr, size, tag, false); } EXPORT_SYMBOL(__hwasan_tag_memory); + - void kasan_set_free_info(struct kmem_cache *cache, - void *object, u8 tag) - { - struct kasan_alloc_meta *alloc_meta; - u8 idx = 0; - - alloc_meta = kasan_get_alloc_meta(cache, object); - if (!alloc_meta) - return; - - #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY - idx = alloc_meta->free_track_idx; - alloc_meta->free_pointer_tag[idx] = tag; - alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS; - #endif - - kasan_set_track(&alloc_meta->free_track[idx], GFP_NOWAIT); - } - - struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, - void *object, u8 tag) - { - struct kasan_alloc_meta *alloc_meta; - int i = 0; - - alloc_meta = kasan_get_alloc_meta(cache, object); - if (!alloc_meta) - return NULL; - - #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY - for (i = 0; i < KASAN_NR_FREE_STACKS; i++) { - if (alloc_meta->free_pointer_tag[i] == tag) - break; - } - if (i == KASAN_NR_FREE_STACKS) - i = alloc_meta->free_track_idx; - #endif - - return &alloc_meta->free_track[i]; - } - +void kasan_tag_mismatch(unsigned long addr, unsigned long access_info, + unsigned long ret_ip) +{ + kasan_report(addr, 1 << (access_info & 0xf), access_info & 0x10, + ret_ip); +}