Skip to content

Commit

Permalink
gpu: ion: Also shrink memory cached in the deferred free list
Browse files Browse the repository at this point in the history
When the system is low on memory, we want to shrink any cached
system memory ion is holding.  Previously we were shrinking memory
in the page pools, but not in the deferred free list.  This patch
makes it possible to shrink both.  It also moves the shrinker
code into the heaps so they can correctly manage any caches they
might contain.

Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
[jstultz: modified patch to apply to staging directory]
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
Rebecca Schultz Zavin authored and Greg Kroah-Hartman committed Dec 14, 2013
1 parent da4aab3 commit ea313b5
Show file tree
Hide file tree
Showing 5 changed files with 278 additions and 176 deletions.
115 changes: 45 additions & 70 deletions drivers/staging/android/ion/ion.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/rbtree.h>
#include <linux/rtmutex.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
Expand Down Expand Up @@ -143,7 +141,6 @@ static void ion_buffer_add(struct ion_device *dev,

static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);

static bool ion_heap_drain_freelist(struct ion_heap *heap);
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
Expand All @@ -170,7 +167,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
goto err2;

ion_heap_drain_freelist(heap);
ion_heap_freelist_drain(heap, 0);
ret = heap->ops->allocate(heap, buffer, len, align,
flags);
if (ret)
Expand Down Expand Up @@ -230,7 +227,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
return ERR_PTR(ret);
}

static void _ion_buffer_destroy(struct ion_buffer *buffer)
void ion_buffer_destroy(struct ion_buffer *buffer)
{
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Expand All @@ -241,7 +238,7 @@ static void _ion_buffer_destroy(struct ion_buffer *buffer)
kfree(buffer);
}

static void ion_buffer_destroy(struct kref *kref)
static void _ion_buffer_destroy(struct kref *kref)
{
struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
struct ion_heap *heap = buffer->heap;
Expand All @@ -251,14 +248,10 @@ static void ion_buffer_destroy(struct kref *kref)
rb_erase(&buffer->node, &dev->buffers);
mutex_unlock(&dev->buffer_lock);

if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
rt_mutex_lock(&heap->lock);
list_add(&buffer->list, &heap->free_list);
rt_mutex_unlock(&heap->lock);
wake_up(&heap->waitqueue);
return;
}
_ion_buffer_destroy(buffer);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
ion_heap_freelist_add(heap, buffer);
else
ion_buffer_destroy(buffer);
}

static void ion_buffer_get(struct ion_buffer *buffer)
Expand All @@ -268,7 +261,7 @@ static void ion_buffer_get(struct ion_buffer *buffer)

static int ion_buffer_put(struct ion_buffer *buffer)
{
return kref_put(&buffer->ref, ion_buffer_destroy);
return kref_put(&buffer->ref, _ion_buffer_destroy);
}

static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
Expand Down Expand Up @@ -1298,80 +1291,53 @@ static const struct file_operations debug_heap_fops = {
.release = single_release,
};

static size_t ion_heap_free_list_is_empty(struct ion_heap *heap)
#ifdef DEBUG_HEAP_SHRINKER
static int debug_shrink_set(void *data, u64 val)
{
bool is_empty;
struct ion_heap *heap = data;
struct shrink_control sc;
int objs;

rt_mutex_lock(&heap->lock);
is_empty = list_empty(&heap->free_list);
rt_mutex_unlock(&heap->lock);
sc.gfp_mask = -1;
sc.nr_to_scan = 0;

return is_empty;
}

static int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;
if (!val)
return 0;

while (true) {
struct ion_buffer *buffer;
objs = heap->shrinker.shrink(&heap->shrinker, &sc);
sc.nr_to_scan = objs;

wait_event_freezable(heap->waitqueue,
!ion_heap_free_list_is_empty(heap));

rt_mutex_lock(&heap->lock);
if (list_empty(&heap->free_list)) {
rt_mutex_unlock(&heap->lock);
continue;
}
buffer = list_first_entry(&heap->free_list, struct ion_buffer,
list);
list_del(&buffer->list);
rt_mutex_unlock(&heap->lock);
_ion_buffer_destroy(buffer);
}

return 0;
heap->shrinker.shrink(&heap->shrinker, &sc);
return 0;
}

static bool ion_heap_drain_freelist(struct ion_heap *heap)
static int debug_shrink_get(void *data, u64 *val)
{
struct ion_buffer *buffer, *tmp;

if (ion_heap_free_list_is_empty(heap))
return false;
rt_mutex_lock(&heap->lock);
list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
list_del(&buffer->list);
_ion_buffer_destroy(buffer);
}
BUG_ON(!list_empty(&heap->free_list));
rt_mutex_unlock(&heap->lock);
struct ion_heap *heap = data;
struct shrink_control sc;
int objs;

sc.gfp_mask = -1;
sc.nr_to_scan = 0;

return true;
objs = heap->shrinker.shrink(&heap->shrinker, &sc);
*val = objs;
return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
debug_shrink_set, "%llu\n");
#endif

void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
struct sched_param param = { .sched_priority = 0 };

if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
!heap->ops->unmap_dma)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);

if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
INIT_LIST_HEAD(&heap->free_list);
rt_mutex_init(&heap->lock);
init_waitqueue_head(&heap->waitqueue);
heap->task = kthread_run(ion_heap_deferred_free, heap,
"%s", heap->name);
sched_setscheduler(heap->task, SCHED_IDLE, &param);
if (IS_ERR(heap->task))
pr_err("%s: creating thread for deferred free failed\n",
__func__);
}
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
ion_heap_init_deferred_free(heap);

heap->dev = dev;
down_write(&dev->lock);
Expand All @@ -1381,6 +1347,15 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
plist_add(&heap->node, &dev->heaps);
debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
&debug_heap_fops);
#ifdef DEBUG_HEAP_SHRINKER
if (heap->shrinker.shrink) {
char debug_name[64];

snprintf(debug_name, 64, "%s_shrink", heap->name);
debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
&debug_shrink_fops);
}
#endif
up_write(&dev->lock);
}

Expand Down
107 changes: 107 additions & 0 deletions drivers/staging/android/ion/ion_heap.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,11 @@
*/

#include <linux/err.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/rtmutex.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
#include "ion.h"
Expand Down Expand Up @@ -130,6 +134,109 @@ int ion_heap_buffer_zero(struct ion_buffer *buffer)
return ret;
}

void ion_heap_free_page(struct ion_buffer *buffer, struct page *page,
unsigned int order)
{
int i;

if (!ion_buffer_fault_user_mappings(buffer)) {
__free_pages(page, order);
return;
}
for (i = 0; i < (1 << order); i++)
__free_page(page + i);
}

void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
{
rt_mutex_lock(&heap->lock);
list_add(&buffer->list, &heap->free_list);
heap->free_list_size += buffer->size;
rt_mutex_unlock(&heap->lock);
wake_up(&heap->waitqueue);
}

size_t ion_heap_freelist_size(struct ion_heap *heap)
{
size_t size;

rt_mutex_lock(&heap->lock);
size = heap->free_list_size;
rt_mutex_unlock(&heap->lock);

return size;
}

size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
{
struct ion_buffer *buffer, *tmp;
size_t total_drained = 0;

if (ion_heap_freelist_size(heap) == 0)
return 0;

rt_mutex_lock(&heap->lock);
if (size == 0)
size = heap->free_list_size;

list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
if (total_drained >= size)
break;
list_del(&buffer->list);
ion_buffer_destroy(buffer);
heap->free_list_size -= buffer->size;
total_drained += buffer->size;
}
rt_mutex_unlock(&heap->lock);

return total_drained;
}

int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;

while (true) {
struct ion_buffer *buffer;

wait_event_freezable(heap->waitqueue,
ion_heap_freelist_size(heap) > 0);

rt_mutex_lock(&heap->lock);
if (list_empty(&heap->free_list)) {
rt_mutex_unlock(&heap->lock);
continue;
}
buffer = list_first_entry(&heap->free_list, struct ion_buffer,
list);
list_del(&buffer->list);
heap->free_list_size -= buffer->size;
rt_mutex_unlock(&heap->lock);
ion_buffer_destroy(buffer);
}

return 0;
}

int ion_heap_init_deferred_free(struct ion_heap *heap)
{
struct sched_param param = { .sched_priority = 0 };

INIT_LIST_HEAD(&heap->free_list);
heap->free_list_size = 0;
rt_mutex_init(&heap->lock);
init_waitqueue_head(&heap->waitqueue);
heap->task = kthread_run(ion_heap_deferred_free, heap,
"%s", heap->name);
sched_setscheduler(heap->task, SCHED_IDLE, &param);
if (IS_ERR(heap->task)) {
pr_err("%s: creating thread for deferred free failed\n",
__func__);
return PTR_RET(heap->task);
}
return 0;
}

struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_heap *heap = NULL;
Expand Down
Loading

0 comments on commit ea313b5

Please sign in to comment.