summaryrefslogtreecommitdiffstats
path: root/drivers/staging/android/ion/ion_heap.c
diff options
context:
space:
mode:
authorMitchel Humpherys <mitchelh@codeaurora.org>2014-02-17 13:58:39 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-02-18 11:05:08 -0800
commit53a91c68fa7b5f3ca45d2f1c88bf36a988b74e81 (patch)
tree09ecd833ab1fd77bbdf0b0e0ac4cd8655a396ec9 /drivers/staging/android/ion/ion_heap.c
parentb9daf0b60b8a6a5151fca0e8cbb2dab763a3e92a (diff)
staging: ion: Add private buffer flag to skip page pooling on free
Currently, when we free a buffer it might actually just go back into a heap-specific page pool rather than going back to the system. This poses a problem because sometimes (like when we're running a shrinker in low memory conditions) we need to force the memory associated with the buffer to truly be relinquished to the system rather than just going back into a page pool. There isn't a use case for this flag by Ion clients, so make it a private flag. The main use case right now is to provide a mechanism for the deferred free code to force stale buffers to bypass page pooling. Cc: Colin Cross <ccross@android.com> Cc: Android Kernel Team <kernel-team@android.com> Signed-off-by: Mitchel Humpherys <mitchelh@codeaurora.org> [jstultz: Minor commit subject tweak] Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/android/ion/ion_heap.c')
-rw-r--r--drivers/staging/android/ion/ion_heap.c19
1 files changed, 16 insertions, 3 deletions
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 49ace13ac545..bdc6a28ba8c9 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -178,7 +178,8 @@ size_t ion_heap_freelist_size(struct ion_heap *heap)
return size;
}
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
+ bool skip_pools)
{
struct ion_buffer *buffer;
size_t total_drained = 0;
@@ -197,6 +198,8 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
list);
list_del(&buffer->list);
heap->free_list_size -= buffer->size;
+ if (skip_pools)
+ buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
total_drained += buffer->size;
spin_unlock(&heap->free_lock);
ion_buffer_destroy(buffer);
@@ -207,6 +210,16 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
return total_drained;
}
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, false);
+}
+
+size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, true);
+}
+
static int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;
@@ -278,10 +291,10 @@ static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
/*
* shrink the free list first, no point in zeroing the memory if we're
- * just going to reclaim it
+ * just going to reclaim it. Also, skip any possible page pooling.
*/
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- freed = ion_heap_freelist_drain(heap, to_scan * PAGE_SIZE) /
+ freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
PAGE_SIZE;
to_scan -= freed;