v2->v3 Split oom killer patch only. Based on Nishanth's patch, which change ion_debug_heap_total with id. 1. add heap_found 2. Solve the issue about serveral id share one type. Use ion_debug_heap_total(client, heap->id) instead of ion_debug_heap_total(client, heap->type) since id is unique while type can be shared. Fortunately Nishanth has update one patch, so rebase on the patch
v1->v2 Sync to Aug 30 common.git
v0->v1: 1. move ion_shrink out of mutex, suggested by Nishanth 2. check error flag of ERR_PTR(-ENOMEM) 3. add msleep to allow schedule out.
Base on common.git, android-3.4 branch
Add oom killer. Once heap is used off, SIGKILL is send to all tasks refered the buffer with descending oom_socre_adj
Nishanth Peethambaran (1): gpu: ion: Update debugfs to show for each id
Zhangfei Gao (1): gpu: ion: oom killer
drivers/gpu/ion/ion.c | 131 +++++++++++++++++++++++++++++++++++++++++++++---- 1 files changed, 121 insertions(+), 10 deletions(-)
From: Nishanth Peethambaran nishanth@broadcom.com
Update the debugfs read of client and heap to show based on 'id' instead of 'type'. Multiple heaps of the same type can be present, but id is unique.
Signed-off-by: Nishanth Peethambaran nishanth@broadcom.com --- drivers/gpu/ion/ion.c | 14 +++++++------- 1 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c index 207d00f..96f9cd7 100644 --- a/drivers/gpu/ion/ion.c +++ b/drivers/gpu/ion/ion.c @@ -579,11 +579,11 @@ static int ion_debug_client_show(struct seq_file *s, void *unused) for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); - enum ion_heap_type type = handle->buffer->heap->type; + int id = handle->buffer->heap->id;
- if (!names[type]) - names[type] = handle->buffer->heap->name; - sizes[type] += handle->buffer->size; + if (!names[id]) + names[id] = handle->buffer->heap->name; + sizes[id] += handle->buffer->size; } mutex_unlock(&client->lock);
@@ -1153,7 +1153,7 @@ static const struct file_operations ion_fops = { };
static size_t ion_debug_heap_total(struct ion_client *client, - enum ion_heap_type type) + int id) { size_t size = 0; struct rb_node *n; @@ -1163,7 +1163,7 @@ static size_t ion_debug_heap_total(struct ion_client *client, struct ion_handle *handle = rb_entry(n, struct ion_handle, node); - if (handle->buffer->heap->type == type) + if (handle->buffer->heap->id == id) size += handle->buffer->size; } mutex_unlock(&client->lock); @@ -1184,7 +1184,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) for (n = rb_first(&dev->clients); n; n = rb_next(n)) { struct ion_client *client = rb_entry(n, struct ion_client, node); - size_t size = ion_debug_heap_total(client, heap->type); + size_t size = ion_debug_heap_total(client, heap->id); if (!size) continue; if (client->task) {
ion_shrink is called when ION_CARVEOUT_ALLOCATE_FAIL
How to test: mount -t debugfs none /mnt cat /mnt/ion/carveout_heap echo adj > /mnt/ion/carveout_heap send_sig SIGKILL to the task with higher adj and using ion buffer Also kill all others tasks refered the buffers, in case using empty buffer
Example:
cat /mnt/ion/carveout_heap client pid size oom_score_adj ion_test 191 4096 0 ion_test 192 4096 0 ion_test 193 4096 0
echo -1 > /mnt/ion/carveout_heap [ 1333.689318] SIGKILL pid: 191 [ 1333.692192] SIGKILL pid: 192 [ 1333.695436] SIGKILL pid: 193 [ 1333.698312] SIGKILL pid: 193 size: 4096 adj: 0 [1]+ Killed ./ion_test 2
cat /mnt/ion/carveout_heap client pid size oom_score_adj
Signed-off-by: Zhangfei Gao zhangfei.gao@marvell.com --- drivers/gpu/ion/ion.c | 117 +++++++++++++++++++++++++++++++++++++++++++++++- 1 files changed, 114 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c index 96f9cd7..383f588 100644 --- a/drivers/gpu/ion/ion.c +++ b/drivers/gpu/ion/ion.c @@ -32,6 +32,8 @@ #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/dma-buf.h> +#include <linux/oom.h> +#include <linux/delay.h>
#include "ion_priv.h"
@@ -367,6 +369,8 @@ static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) rb_insert_color(&handle->node, &client->handles); }
+static int ion_shrink(struct ion_heap *heap, int kill_adj); + struct ion_handle *ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int heap_mask, unsigned int flags) @@ -375,6 +379,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; + struct ion_heap *heap_found = NULL;
pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len, align, heap_mask, flags); @@ -389,6 +394,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
len = PAGE_ALIGN(len);
+retry: mutex_lock(&dev->lock); for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { struct ion_heap *heap = rb_entry(n, struct ion_heap, node); @@ -398,12 +404,18 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, /* if the caller didn't specify this heap type */ if (!((1 << heap->id) & heap_mask)) continue; + heap_found = heap; buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR_OR_NULL(buffer)) break; } mutex_unlock(&dev->lock);
+ if ((buffer == ERR_PTR(-ENOMEM) && heap_found)) { + if (!ion_shrink(heap_found, 0)) + goto retry; + } + if (buffer == NULL) return ERR_PTR(-ENODEV);
@@ -1178,7 +1190,8 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) size_t total_size = 0; size_t total_orphaned_size = 0;
- seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); + seq_printf(s, "%16.s %16.s %16.s %16.s\n", + "client", "pid", "size", "oom_score_adj"); seq_printf(s, "----------------------------------------------------\n");
for (n = rb_first(&dev->clients); n; n = rb_next(n)) { @@ -1191,8 +1204,9 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) char task_comm[TASK_COMM_LEN];
get_task_comm(task_comm, client->task); - seq_printf(s, "%16.s %16u %16u\n", task_comm, - client->pid, size); + seq_printf(s, "%16.s %16u %16u %16d\n", task_comm, + client->pid, size, + client->task->signal->oom_score_adj); } else { seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, size); @@ -1227,13 +1241,110 @@ static int ion_debug_heap_open(struct inode *inode, struct file *file) return single_open(file, ion_debug_heap_show, inode->i_private); }
+static ssize_t +ion_debug_heap_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ion_heap *heap = + ((struct seq_file *)file->private_data)->private; + char buf[16]; + long kill_adj, ret; + + memset(buf, 0, sizeof(buf)); + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + ret = kstrtol(buf, 10, &kill_adj); + if (ret) + return ret; + + ion_shrink(heap, kill_adj); + + return count; +} static const struct file_operations debug_heap_fops = { .open = ion_debug_heap_open, + .write = ion_debug_heap_write, .read = seq_read, .llseek = seq_lseek, .release = single_release, };
+/* + * ion_shrink + * kill all tasks referd the buffer by selected task + */ +static int ion_shrink(struct ion_heap *heap, int kill_adj) +{ + struct rb_node *n; + struct ion_client *client = NULL; + struct ion_device *dev = heap->dev; + struct task_struct *selected = NULL; + int selected_size = 0; + int selected_oom_score_adj = 0; + + for (n = rb_first(&dev->clients); n; n = rb_next(n)) { + size_t size; + struct task_struct *p; + + client = rb_entry(n, struct ion_client, node); + if (!client->task) + continue; + + p = client->task; + + if ((p->signal->oom_score_adj <= kill_adj) || + (p->signal->oom_score_adj < selected_oom_score_adj)) + continue; + + size = ion_debug_heap_total(client, heap->id); + if (!size) + continue; + if (size < selected_size) + continue; + + selected = p; + selected_size = size; + selected_oom_score_adj = p->signal->oom_score_adj; + } + + if (selected) { + /* kill all proeces refer buffer shared with this client */ + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct rb_node *r; + struct ion_client *c; + struct ion_handle *handle = rb_entry(n, + struct ion_handle, + node); + + for (r = rb_first(&dev->clients); r; r = rb_next(r)) { + struct ion_handle *h; + + c = rb_entry(r, struct ion_client, node); + h = ion_handle_lookup(c, handle->buffer); + if (!IS_ERR_OR_NULL(h)) { + send_sig(SIGKILL, c->task, 0); + pr_info("SIGKILL pid: %u\n", + c->task->pid); + } + + } + } + mutex_unlock(&client->lock); + + send_sig(SIGKILL, selected, 0); + set_tsk_thread_flag(selected, TIF_MEMDIE); + pr_info("SIGKILL pid: %u size: %u adj: %u\n", + selected->pid, selected_size, + selected_oom_score_adj); + msleep(20); + return 0; + } + return -EAGAIN; +} + void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) { struct rb_node **p = &dev->heaps.rb_node;
linaro-mm-sig@lists.linaro.org