]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: cma_alloc: allow to specify GFP mask
authorLucas Stach <l.stach@pengutronix.de>
Fri, 24 Feb 2017 22:58:41 +0000 (14:58 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 25 Feb 2017 01:46:55 +0000 (17:46 -0800)
Most users of this interface just want to use it with the default
GFP_KERNEL flags, but for cases where DMA memory is allocated it may be
called from a different context.

No functional change yet, just passing through the flag to the
underlying alloc_contig_range function.

Link: http://lkml.kernel.org/r/20170127172328.18574-2-l.stach@pengutronix.de
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Alexander Graf <agraf@suse.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/powerpc/kvm/book3s_hv_builtin.c
drivers/base/dma-contiguous.c
include/linux/cma.h
mm/cma.c
mm/cma_debug.c

index c42a7e63b39e6425b2d57a047a6ab6c90181d7b1..4d6c64b3041c3aa4f58d1a504ad868c27b6924df 100644 (file)
@@ -56,7 +56,8 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
 {
        VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 
-       return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
+       return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
+                        GFP_KERNEL);
 }
 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
 
index e167a1e1bccb062efef2595fcd5299301a97df80..d1a9cbabc627afc7b150ab55b67fe55bb1475562 100644 (file)
@@ -193,7 +193,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
        if (align > CONFIG_CMA_ALIGNMENT)
                align = CONFIG_CMA_ALIGNMENT;
 
-       return cma_alloc(dev_get_cma_area(dev), count, align);
+       return cma_alloc(dev_get_cma_area(dev), count, align, GFP_KERNEL);
 }
 
 /**
index 6f0a91b37f683fd4788ef82e6fb49f1701abb5d5..03f32d0bd1d8a7be5a0fddbc058e0a7f6b059efe 100644 (file)
@@ -29,6 +29,7 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
 extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
                                        unsigned int order_per_bit,
                                        struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
+extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
+                             gfp_t gfp_mask);
 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
 #endif
index c6aed23ca6dfce60b450cac320e1035b0693f06c..2906ae5a83ff8653bc9733517c2f103aa99262ce 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -357,7 +357,8 @@ err:
  * This function allocates part of contiguous memory on specific
  * contiguous memory area.
  */
-struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
+struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
+                      gfp_t gfp_mask)
 {
        unsigned long mask, offset;
        unsigned long pfn = -1;
@@ -403,7 +404,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
                pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
                mutex_lock(&cma_mutex);
                ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
-                                        GFP_KERNEL);
+                                        gfp_mask);
                mutex_unlock(&cma_mutex);
                if (ret == 0) {
                        page = pfn_to_page(pfn);
index f8e4b60db167215862824637d856ffb34332f071..ffc0c3d0ae64a610409d85a5ac5704c2a660ab0c 100644 (file)
@@ -138,7 +138,7 @@ static int cma_alloc_mem(struct cma *cma, int count)
        if (!mem)
                return -ENOMEM;
 
-       p = cma_alloc(cma, count, 0);
+       p = cma_alloc(cma, count, 0, GFP_KERNEL);
        if (!p) {
                kfree(mem);
                return -ENOMEM;