]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/vmalloc: remove vwrite()
authorDavid Hildenbrand <david@redhat.com>
Fri, 7 May 2021 01:06:06 +0000 (18:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 7 May 2021 07:26:34 +0000 (00:26 -0700)
The last user (/dev/kmem) is gone. Let's drop it.

Link: https://lkml.kernel.org/r/20210324102351.6932-4-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: huang ying <huang.ying.caritas@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/vmalloc.h
mm/nommu.c
mm/vmalloc.c

index f31ba59fb1ef65aea1a0df86168481438b2b2085..b6ff16393bf6410531fa6e9055438c4792d3863b 100644 (file)
@@ -229,7 +229,6 @@ static inline void set_vm_flush_reset_perms(void *addr)
 
 /* for /proc/kcore */
 extern long vread(char *buf, char *addr, unsigned long count);
-extern long vwrite(char *buf, char *addr, unsigned long count);
 
 /*
  *     Internals.  Dont't use..
index 5c9ab799c0e6395877cb781d8eb88493641f81d2..85a3a68dffb687ee7949a734156e6c2f2a1c8390 100644 (file)
@@ -210,16 +210,6 @@ long vread(char *buf, char *addr, unsigned long count)
        return count;
 }
 
-long vwrite(char *buf, char *addr, unsigned long count)
-{
-       /* Don't allow overflow */
-       if ((unsigned long) addr + count < count)
-               count = -(unsigned long) addr;
-
-       memcpy(addr, buf, count);
-       return count;
-}
-
 /*
  *     vmalloc  -  allocate virtually contiguous memory
  *
index 2868692c6807271e5e3b79612fe17e777ff936c1..a7f318c9e426af341926f999a657cfffbca958ab 100644 (file)
@@ -3146,10 +3146,7 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
                 * kmap() and get small overhead in this access function.
                 */
                if (p) {
-                       /*
-                        * we can expect USER0 is not used (see vread/vwrite's
-                        * function description)
-                        */
+                       /* We can expect USER0 is not used -- see vread() */
                        void *map = kmap_atomic(p);
                        memcpy(buf, map + offset, length);
                        kunmap_atomic(map);
@@ -3164,43 +3161,6 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
        return copied;
 }
 
-static int aligned_vwrite(char *buf, char *addr, unsigned long count)
-{
-       struct page *p;
-       int copied = 0;
-
-       while (count) {
-               unsigned long offset, length;
-
-               offset = offset_in_page(addr);
-               length = PAGE_SIZE - offset;
-               if (length > count)
-                       length = count;
-               p = vmalloc_to_page(addr);
-               /*
-                * To do safe access to this _mapped_ area, we need
-                * lock. But adding lock here means that we need to add
-                * overhead of vmalloc()/vfree() calles for this _debug_
-                * interface, rarely used. Instead of that, we'll use
-                * kmap() and get small overhead in this access function.
-                */
-               if (p) {
-                       /*
-                        * we can expect USER0 is not used (see vread/vwrite's
-                        * function description)
-                        */
-                       void *map = kmap_atomic(p);
-                       memcpy(map + offset, buf, length);
-                       kunmap_atomic(map);
-               }
-               addr += length;
-               buf += length;
-               copied += length;
-               count -= length;
-       }
-       return copied;
-}
-
 /**
  * vread() - read vmalloc area in a safe way.
  * @buf:     buffer for reading data
@@ -3283,80 +3243,6 @@ finished:
        return buflen;
 }
 
-/**
- * vwrite() - write vmalloc area in a safe way.
- * @buf:      buffer for source data
- * @addr:     vm address.
- * @count:    number of bytes to be read.
- *
- * This function checks that addr is a valid vmalloc'ed area, and
- * copy data from a buffer to the given addr. If specified range of
- * [addr...addr+count) includes some valid address, data is copied from
- * proper area of @buf. If there are memory holes, no copy to hole.
- * IOREMAP area is treated as memory hole and no copy is done.
- *
- * If [addr...addr+count) doesn't includes any intersects with alive
- * vm_struct area, returns 0. @buf should be kernel's buffer.
- *
- * Note: In usual ops, vwrite() is never necessary because the caller
- * should know vmalloc() area is valid and can use memcpy().
- * This is for routines which have to access vmalloc area without
- * any information, as /dev/kmem.
- *
- * Return: number of bytes for which addr and buf should be
- * increased (same number as @count) or %0 if [addr...addr+count)
- * doesn't include any intersection with valid vmalloc area
- */
-long vwrite(char *buf, char *addr, unsigned long count)
-{
-       struct vmap_area *va;
-       struct vm_struct *vm;
-       char *vaddr;
-       unsigned long n, buflen;
-       int copied = 0;
-
-       /* Don't allow overflow */
-       if ((unsigned long) addr + count < count)
-               count = -(unsigned long) addr;
-       buflen = count;
-
-       spin_lock(&vmap_area_lock);
-       list_for_each_entry(va, &vmap_area_list, list) {
-               if (!count)
-                       break;
-
-               if (!va->vm)
-                       continue;
-
-               vm = va->vm;
-               vaddr = (char *) vm->addr;
-               if (addr >= vaddr + get_vm_area_size(vm))
-                       continue;
-               while (addr < vaddr) {
-                       if (count == 0)
-                               goto finished;
-                       buf++;
-                       addr++;
-                       count--;
-               }
-               n = vaddr + get_vm_area_size(vm) - addr;
-               if (n > count)
-                       n = count;
-               if (!(vm->flags & VM_IOREMAP)) {
-                       aligned_vwrite(buf, addr, n);
-                       copied++;
-               }
-               buf += n;
-               addr += n;
-               count -= n;
-       }
-finished:
-       spin_unlock(&vmap_area_lock);
-       if (!copied)
-               return 0;
-       return buflen;
-}
-
 /**
  * remap_vmalloc_range_partial - map vmalloc pages to userspace
  * @vma:               vma to cover