]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: fix missing handler for __GFP_NOWARN
authorQi Zheng <zhengqi.arch@bytedance.com>
Thu, 19 May 2022 21:08:55 +0000 (14:08 -0700)
committerakpm <akpm@linux-foundation.org>
Thu, 19 May 2022 21:08:55 +0000 (14:08 -0700)
We expect no warnings to be issued when we specify __GFP_NOWARN, but
currently in paths like alloc_pages() and kmalloc(), there are still some
warnings printed, fix it.

But for some warnings that report usage problems, we don't deal with them.
If such warnings are printed, then we should fix the usage problems.
Such as the following case:

WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));

[zhengqi.arch@bytedance.com: v2]
Link: https://lkml.kernel.org/r/20220511061951.1114-1-zhengqi.arch@bytedance.com
Link: https://lkml.kernel.org/r/20220510113809.80626-1-zhengqi.arch@bytedance.com
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Jiri Slaby <jirislaby@kernel.org>
Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/fault-inject.h
lib/fault-inject.c
mm/failslab.c
mm/internal.h
mm/page_alloc.c

index 2d04f6448cde3f4c189fd435ae5256ad4816caab..9f6e25467844ae1ac708a955cebbff254abf0f3c 100644 (file)
@@ -20,6 +20,7 @@ struct fault_attr {
        atomic_t space;
        unsigned long verbose;
        bool task_filter;
+       bool no_warn;
        unsigned long stacktrace_depth;
        unsigned long require_start;
        unsigned long require_end;
@@ -39,6 +40,7 @@ struct fault_attr {
                .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED,       \
                .verbose = 2,                                           \
                .dname = NULL,                                          \
+               .no_warn = false,                                       \
        }
 
 #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
index ce12621b42756ada9704a80c6dc33778531b77c0..423784d9c058eaf7d8833eade65bd389bd6a993e 100644 (file)
@@ -41,6 +41,9 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
 
 static void fail_dump(struct fault_attr *attr)
 {
+       if (attr->no_warn)
+               return;
+
        if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
                printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
                       "name %pd, interval %lu, probability %lu, "
index f92fed91ac2360aa9dafef57f773b882ab6c4251..58df9789f1d22627f569e92d527c50068f276062 100644 (file)
@@ -30,6 +30,9 @@ bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
        if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB))
                return false;
 
+       if (gfpflags & __GFP_NOWARN)
+               failslab.attr.no_warn = true;
+
        return should_fail(&failslab.attr, s->object_size);
 }
 
index 6d188161b20e8dab7e10df17ce521c6b5205e112..64e61b032dacaf349ad44a62d79e9906deaf14a8 100644 (file)
@@ -35,6 +35,21 @@ struct folio_batch;
 /* Do not use these with a slab allocator */
 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
 
+/*
+ * Different from WARN_ON_ONCE(), no warning will be issued
+ * when we specify __GFP_NOWARN.
+ */
+#define WARN_ON_ONCE_GFP(cond, gfp)    ({                              \
+       static bool __section(".data.once") __warned;                   \
+       int __ret_warn_once = !!(cond);                                 \
+                                                                       \
+       if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
+               __warned = true;                                        \
+               WARN_ON(1);                                             \
+       }                                                               \
+       unlikely(__ret_warn_once);                                      \
+})
+
 void page_writeback_init(void);
 
 static inline void *folio_raw_mapping(struct folio *folio)
index 10305b10fe93d55ad8a4dc5d5035a361ca0db8eb..267599dd970631e71b791d8f4d760fb15150cfbe 100644 (file)
@@ -3786,6 +3786,9 @@ static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
                        (gfp_mask & __GFP_DIRECT_RECLAIM))
                return false;
 
+       if (gfp_mask & __GFP_NOWARN)
+               fail_page_alloc.attr.no_warn = true;
+
        return should_fail(&fail_page_alloc.attr, 1 << order);
 }
 
@@ -4334,7 +4337,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
         */
 
        /* Exhausted what can be done so it's blame time */
-       if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
+       if (out_of_memory(&oc) ||
+           WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
                *did_some_progress = 1;
 
                /*
@@ -5108,7 +5112,7 @@ nopage:
                 * All existing users of the __GFP_NOFAIL are blockable, so warn
                 * of any new users that actually require GFP_NOWAIT
                 */
-               if (WARN_ON_ONCE(!can_direct_reclaim))
+               if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
                        goto fail;
 
                /*
@@ -5116,7 +5120,7 @@ nopage:
                 * because we cannot reclaim anything and only can loop waiting
                 * for somebody to do a work for us
                 */
-               WARN_ON_ONCE(current->flags & PF_MEMALLOC);
+               WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
 
                /*
                 * non failing costly orders are a hard requirement which we
@@ -5124,7 +5128,7 @@ nopage:
                 * so that we can identify them and convert them to something
                 * else.
                 */
-               WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
+               WARN_ON_ONCE_GFP(order > PAGE_ALLOC_COSTLY_ORDER, gfp_mask);
 
                /*
                 * Help non-failing allocations by giving them access to memory
@@ -5370,10 +5374,8 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
         * There are several places where we assume that the order value is sane
         * so bail out early if the request is out of bound.
         */
-       if (unlikely(order >= MAX_ORDER)) {
-               WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
+       if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp))
                return NULL;
-       }
 
        gfp &= gfp_allowed_mask;
        /*
@@ -9025,7 +9027,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
 
        lru_cache_enable();
        if (ret < 0) {
-               if (ret == -EBUSY)
+               if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
                        alloc_contig_dump_pages(&cc->migratepages);
                putback_movable_pages(&cc->migratepages);
                return ret;