Skip to content

Commit

Permalink
mm: vmalloc: convert vread() to vread_iter()
Browse files Browse the repository at this point in the history
Having previously laid the foundation for converting vread() to an
iterator function, pull the trigger and do so.

This patch attempts to provide minimal refactoring and to reflect the
existing logic as best we can, with the exception of aligned_vread_iter()
which drops the use of the deprecated kmap_atomic() in favour of
kmap_local_page().

All existing logic to zero portions of memory not read remain and there
should be no functional difference other than a performance improvement in
/proc/kcore access to vmalloc regions.

Now we have discarded with the need for a bounce buffer at all in
read_kcore_iter(), we dispense with the one allocated there altogether.

Link: https://lkml.kernel.org/r/7f9dad4deade9639cf7af7a8b01143bca882ff02.1679209395.git.lstoakes@gmail.com
Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Liu Shixin <liushixin2@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Lorenzo Stoakes authored and Andrew Morton committed Mar 19, 2023
1 parent 11f1bdc commit 4e29dd9
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 73 deletions.
21 changes: 2 additions & 19 deletions fs/proc/kcore.c
Original file line number Diff line number Diff line change
Expand Up @@ -307,13 +307,9 @@ static void append_kcore_note(char *notes, size_t *i, const char *name,
*i = ALIGN(*i + descsz, 4);
}

static ssize_t
read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
char *buf = file->private_data;
loff_t *ppos = &iocb->ki_pos;

size_t phdrs_offset, notes_offset, data_offset;
size_t page_offline_frozen = 1;
size_t phdrs_len, notes_len;
Expand Down Expand Up @@ -507,9 +503,7 @@ read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)

switch (m->type) {
case KCORE_VMALLOC:
vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */
if (copy_to_iter(buf, tsz, iter) != tsz) {
if (vread_iter(iter, (char *)start, tsz) != tsz) {
ret = -EFAULT;
goto out;
}
Expand Down Expand Up @@ -582,10 +576,6 @@ static int open_kcore(struct inode *inode, struct file *filp)
if (ret)
return ret;

filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!filp->private_data)
return -ENOMEM;

if (kcore_need_update)
kcore_update_ram();
if (i_size_read(inode) != proc_root_kcore->size) {
Expand All @@ -596,16 +586,9 @@ static int open_kcore(struct inode *inode, struct file *filp)
return 0;
}

static int release_kcore(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}

static const struct proc_ops kcore_proc_ops = {
.proc_read_iter = read_kcore_iter,
.proc_open = open_kcore,
.proc_release = release_kcore,
.proc_lseek = default_llseek,
};

Expand Down
3 changes: 2 additions & 1 deletion include/linux/vmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <asm/page.h> /* pgprot_t */
#include <linux/rbtree.h>
#include <linux/overflow.h>
#include <linux/uio.h>

#include <asm/vmalloc.h>

Expand Down Expand Up @@ -251,7 +252,7 @@ static inline void set_vm_flush_reset_perms(void *addr)
#endif

/* for /proc/kcore */
extern long vread(char *buf, char *addr, unsigned long count);
extern long vread_iter(struct iov_iter *iter, char *addr, size_t count);

/*
* Internals. Don't use..
Expand Down
10 changes: 5 additions & 5 deletions mm/nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
#include <linux/printk.h>

#include <linux/uaccess.h>
#include <linux/uio.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
Expand Down Expand Up @@ -198,14 +199,13 @@ unsigned long vmalloc_to_pfn(const void *addr)
}
EXPORT_SYMBOL(vmalloc_to_pfn);

long vread(char *buf, char *addr, unsigned long count)
long vread_iter(struct iov_iter *iter, char *addr, size_t count)
{
/* Don't allow overflow */
if ((unsigned long) buf + count < count)
count = -(unsigned long) buf;
if ((unsigned long) addr + count < count)
count = -(unsigned long) addr;

memcpy(buf, addr, count);
return count;
return copy_to_iter(addr, count, iter);
}

/*
Expand Down
101 changes: 53 additions & 48 deletions mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
#include <linux/rbtree_augmented.h>
#include <linux/overflow.h>
#include <linux/pgtable.h>
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <linux/sched/mm.h>
#include <linux/rwsem.h>
Expand Down Expand Up @@ -3446,44 +3445,45 @@ EXPORT_SYMBOL(vmalloc_32_user);
* small helper routine , copy contents to buf from addr.
* If the page is not present, fill zero.
*/

static int aligned_vread(char *buf, char *addr, unsigned long count)
static void aligned_vread_iter(struct iov_iter *iter,
char *addr, size_t count)
{
struct page *p;
int copied = 0;
struct page *page;

while (count) {
while (count > 0) {
unsigned long offset, length;
size_t copied = 0;

offset = offset_in_page(addr);
length = PAGE_SIZE - offset;
if (length > count)
length = count;
p = vmalloc_to_page(addr);
page = vmalloc_to_page(addr);
/*
* To do safe access to this _mapped_ area, we need
* lock. But adding lock here means that we need to add
* overhead of vmalloc()/vfree() calls for this _debug_
* interface, rarely used. Instead of that, we'll use
* kmap() and get small overhead in this access function.
*/
if (p) {
if (page) {
/* We can expect USER0 is not used -- see vread() */
void *map = kmap_atomic(p);
memcpy(buf, map + offset, length);
kunmap_atomic(map);
} else
memset(buf, 0, length);
void *map = kmap_local_page(page);

copied = copy_to_iter(map + offset, length, iter);
kunmap_local(map);
}

if (copied < length)
iov_iter_zero(length - copied, iter);

addr += length;
buf += length;
copied += length;
count -= length;
}
return copied;
}

static void vmap_ram_vread(char *buf, char *addr, int count, unsigned long flags)
static void vmap_ram_vread_iter(struct iov_iter *iter, char *addr, int count,
unsigned long flags)
{
char *start;
struct vmap_block *vb;
Expand All @@ -3496,7 +3496,7 @@ static void vmap_ram_vread(char *buf, char *addr, int count, unsigned long flags
* handle it here.
*/
if (!(flags & VMAP_BLOCK)) {
aligned_vread(buf, addr, count);
aligned_vread_iter(iter, addr, count);
return;
}

Expand All @@ -3517,22 +3517,24 @@ static void vmap_ram_vread(char *buf, char *addr, int count, unsigned long flags
if (!count)
break;
start = vmap_block_vaddr(vb->va->va_start, rs);
while (addr < start) {

if (addr < start) {
size_t to_zero = min_t(size_t, start - addr, count);

iov_iter_zero(to_zero, iter);
addr += to_zero;
count -= (int)to_zero;
if (count == 0)
goto unlock;
*buf = '\0';
buf++;
addr++;
count--;
}

/*it could start reading from the middle of used region*/
offset = offset_in_page(addr);
n = ((re - rs + 1) << PAGE_SHIFT) - offset;
if (n > count)
n = count;
aligned_vread(buf, start+offset, n);
aligned_vread_iter(iter, start + offset, n);

buf += n;
addr += n;
count -= n;
}
Expand All @@ -3541,15 +3543,15 @@ static void vmap_ram_vread(char *buf, char *addr, int count, unsigned long flags

finished:
/* zero-fill the left dirty or free regions */
if (count)
memset(buf, 0, count);
if (count > 0)
iov_iter_zero(count, iter);
}

/**
* vread() - read vmalloc area in a safe way.
* @buf: buffer for reading data
* @addr: vm address.
* @count: number of bytes to be read.
* vread_iter() - read vmalloc area in a safe way to an iterator.
* @iter: the iterator to which data should be written.
* @addr: vm address.
* @count: number of bytes to be read.
*
* This function checks that addr is a valid vmalloc'ed area, and
* copy data from that area to a given buffer. If the given memory range
Expand All @@ -3569,13 +3571,13 @@ static void vmap_ram_vread(char *buf, char *addr, int count, unsigned long flags
* (same number as @count) or %0 if [addr...addr+count) doesn't
* include any intersection with valid vmalloc area
*/
long vread(char *buf, char *addr, unsigned long count)
long vread_iter(struct iov_iter *iter, char *addr, size_t count)
{
struct vmap_area *va;
struct vm_struct *vm;
char *vaddr, *buf_start = buf;
unsigned long buflen = count;
unsigned long n, size, flags;
char *vaddr;
size_t buflen = count;
size_t n, size, flags;

might_sleep();

Expand All @@ -3595,7 +3597,7 @@ long vread(char *buf, char *addr, unsigned long count)
goto finished;

list_for_each_entry_from(va, &vmap_area_list, list) {
if (!count)
if (count == 0)
break;

vm = va->vm;
Expand All @@ -3619,36 +3621,39 @@ long vread(char *buf, char *addr, unsigned long count)

if (addr >= vaddr + size)
continue;
while (addr < vaddr) {

if (addr < vaddr) {
size_t to_zero = min_t(size_t, vaddr - addr, count);

iov_iter_zero(to_zero, iter);
addr += to_zero;
count -= to_zero;
if (count == 0)
goto finished;
*buf = '\0';
buf++;
addr++;
count--;
}

n = vaddr + size - addr;
if (n > count)
n = count;

if (flags & VMAP_RAM)
vmap_ram_vread(buf, addr, n, flags);
vmap_ram_vread_iter(iter, addr, n, flags);
else if (!(vm->flags & VM_IOREMAP))
aligned_vread(buf, addr, n);
aligned_vread_iter(iter, addr, n);
else /* IOREMAP area is treated as memory hole */
memset(buf, 0, n);
buf += n;
iov_iter_zero(n, iter);

addr += n;
count -= n;
}
finished:
up_read(&vmap_area_lock);

if (buf == buf_start)
if (count == buflen)
return 0;
/* zero-fill memory holes */
if (buf != buf_start + buflen)
memset(buf, 0, buflen - (buf - buf_start));
if (count > 0)
iov_iter_zero(count, iter);

return buflen;
}
Expand Down

0 comments on commit 4e29dd9

Please sign in to comment.