aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/android
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/android')
-rw-r--r--drivers/staging/android/logger.c192
-rw-r--r--drivers/staging/android/logger.h29
-rw-r--r--drivers/staging/android/lowmemorykiller.c313
3 files changed, 485 insertions, 49 deletions
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index 4a3ce20..54481b2 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -58,6 +58,8 @@ struct logger_reader {
struct logger_log *log; /* associated log */
struct list_head list; /* entry in logger_log's list */
size_t r_off; /* current read head offset */
+ bool r_all; /* reader can read all entries */
+ int r_ver; /* reader ABI version */
};
/* logger_offset - returns index 'n' into the log via (optimized) modulus */
@@ -87,25 +89,71 @@ static inline struct logger_log *file_get_log(struct file *file)
}
/*
- * get_entry_len - Grabs the length of the payload of the next entry starting
- * from 'off'.
+ * get_entry_header - returns a pointer to the logger_entry header within
+ * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
+ * be provided. Typically the return value will be a pointer within
+ * 'logger->buf'. However, a pointer to 'scratch' may be returned if
+ * the log entry spans the end and beginning of the circular buffer.
+ */
+static struct logger_entry *get_entry_header(struct logger_log *log,
+ size_t off, struct logger_entry *scratch)
+{
+ size_t len = min(sizeof(struct logger_entry), log->size - off);
+ if (len != sizeof(struct logger_entry)) {
+ memcpy(((void *) scratch), log->buffer + off, len);
+ memcpy(((void *) scratch) + len, log->buffer,
+ sizeof(struct logger_entry) - len);
+ return scratch;
+ }
+
+ return (struct logger_entry *) (log->buffer + off);
+}
+
+/*
+ * get_entry_msg_len - Grabs the length of the message of the entry
+ * starting from from 'off'.
*
* Caller needs to hold log->mutex.
*/
-static __u32 get_entry_len(struct logger_log *log, size_t off)
+static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
{
- __u16 val;
+ struct logger_entry scratch;
+ struct logger_entry *entry;
- switch (log->size - off) {
- case 1:
- memcpy(&val, log->buffer + off, 1);
- memcpy(((char *) &val) + 1, log->buffer, 1);
- break;
- default:
- memcpy(&val, log->buffer + off, 2);
+ entry = get_entry_header(log, off, &scratch);
+ return entry->len;
+}
+
+static size_t get_user_hdr_len(int ver)
+{
+ if (ver < 2)
+ return sizeof(struct user_logger_entry_compat);
+ else
+ return sizeof(struct logger_entry);
+}
+
+static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
+ char __user *buf)
+{
+ void *hdr;
+ size_t hdr_len;
+ struct user_logger_entry_compat v1;
+
+ if (ver < 2) {
+ v1.len = entry->len;
+ v1.__pad = 0;
+ v1.pid = entry->pid;
+ v1.tid = entry->tid;
+ v1.sec = entry->sec;
+ v1.nsec = entry->nsec;
+ hdr = &v1;
+ hdr_len = sizeof(struct user_logger_entry_compat);
+ } else {
+ hdr = entry;
+ hdr_len = sizeof(struct logger_entry);
}
- return sizeof(struct logger_entry) + val;
+ return copy_to_user(buf, hdr, hdr_len);
}
/*
@@ -119,15 +167,30 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
char __user *buf,
size_t count)
{
+ struct logger_entry scratch;
+ struct logger_entry *entry;
size_t len;
+ size_t msg_start;
/*
- * We read from the log in two disjoint operations. First, we read from
- * the current read head offset up to 'count' bytes or to the end of
+ * First, copy the header to userspace, using the version of
+ * the header requested
+ */
+ entry = get_entry_header(log, reader->r_off, &scratch);
+ if (copy_header_to_user(reader->r_ver, entry, buf))
+ return -EFAULT;
+
+ count -= get_user_hdr_len(reader->r_ver);
+ buf += get_user_hdr_len(reader->r_ver);
+ msg_start = logger_offset(reader->r_off + sizeof(struct logger_entry));
+
+ /*
+ * We read from the msg in two disjoint operations. First, we read from
+ * the current msg head offset up to 'count' bytes or to the end of
* the log, whichever comes first.
*/
- len = min(count, log->size - reader->r_off);
- if (copy_to_user(buf, log->buffer + reader->r_off, len))
+ len = min(count, log->size - msg_start);
+ if (copy_to_user(buf, log->buffer + msg_start, len))
return -EFAULT;
/*
@@ -138,9 +201,34 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
if (copy_to_user(buf + len, log->buffer, count - len))
return -EFAULT;
- reader->r_off = logger_offset(reader->r_off + count);
+ reader->r_off = logger_offset(reader->r_off +
+ sizeof(struct logger_entry) + count);
- return count;
+ return count + get_user_hdr_len(reader->r_ver);
+}
+
+/*
+ * get_next_entry_by_uid - Starting at 'off', returns an offset into
+ * 'log->buffer' which contains the first entry readable by 'euid'
+ */
+static size_t get_next_entry_by_uid(struct logger_log *log,
+ size_t off, uid_t euid)
+{
+ while (off != log->w_off) {
+ struct logger_entry *entry;
+ struct logger_entry scratch;
+ size_t next_len;
+
+ entry = get_entry_header(log, off, &scratch);
+
+ if (entry->euid == euid)
+ return off;
+
+ next_len = sizeof(struct logger_entry) + entry->len;
+ off = logger_offset(off + next_len);
+ }
+
+ return off;
}
/*
@@ -152,7 +240,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
* - If there are no log entries to read, blocks until log is written to
* - Atomically reads exactly one log entry
*
- * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
+ * Will set errno to EINVAL if read
* buffer is insufficient to hold next entry.
*/
static ssize_t logger_read(struct file *file, char __user *buf,
@@ -192,6 +280,10 @@ start:
mutex_lock(&log->mutex);
+ if (!reader->r_all)
+ reader->r_off = get_next_entry_by_uid(log,
+ reader->r_off, current_euid());
+
/* is there still something to read or did we race? */
if (unlikely(log->w_off == reader->r_off)) {
mutex_unlock(&log->mutex);
@@ -199,7 +291,8 @@ start:
}
/* get the size of the next entry */
- ret = get_entry_len(log, reader->r_off);
+ ret = get_user_hdr_len(reader->r_ver) +
+ get_entry_msg_len(log, reader->r_off);
if (count < ret) {
ret = -EINVAL;
goto out;
@@ -225,7 +318,8 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
size_t count = 0;
do {
- size_t nr = get_entry_len(log, off);
+ size_t nr = sizeof(struct logger_entry) +
+ get_entry_msg_len(log, off);
off = logger_offset(off + nr);
count += nr;
} while (count < len);
@@ -351,7 +445,9 @@ ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
header.tid = current->pid;
header.sec = now.tv_sec;
header.nsec = now.tv_nsec;
+ header.euid = current_euid();
header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+ header.hdr_size = sizeof(struct logger_entry);
/* null writes succeed, return zero */
if (unlikely(!header.len))
@@ -424,6 +520,10 @@ static int logger_open(struct inode *inode, struct file *file)
return -ENOMEM;
reader->log = log;
+ reader->r_ver = 1;
+ reader->r_all = in_egroup_p(inode->i_gid) ||
+ capable(CAP_SYSLOG);
+
INIT_LIST_HEAD(&reader->list);
mutex_lock(&log->mutex);
@@ -485,6 +585,10 @@ static unsigned int logger_poll(struct file *file, poll_table *wait)
poll_wait(file, &log->wq, wait);
mutex_lock(&log->mutex);
+ if (!reader->r_all)
+ reader->r_off = get_next_entry_by_uid(log,
+ reader->r_off, current_euid());
+
if (log->w_off != reader->r_off)
ret |= POLLIN | POLLRDNORM;
mutex_unlock(&log->mutex);
@@ -492,11 +596,25 @@ static unsigned int logger_poll(struct file *file, poll_table *wait)
return ret;
}
+static long logger_set_version(struct logger_reader *reader, void __user *arg)
+{
+ int version;
+ if (copy_from_user(&version, arg, sizeof(int)))
+ return -EFAULT;
+
+ if ((version < 1) || (version > 2))
+ return -EINVAL;
+
+ reader->r_ver = version;
+ return 0;
+}
+
static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct logger_log *log = file_get_log(file);
struct logger_reader *reader;
- long ret = -ENOTTY;
+ long ret = -EINVAL;
+ void __user *argp = (void __user *) arg;
mutex_lock(&log->mutex);
@@ -521,8 +639,14 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
reader = file->private_data;
+
+ if (!reader->r_all)
+ reader->r_off = get_next_entry_by_uid(log,
+ reader->r_off, current_euid());
+
if (log->w_off != reader->r_off)
- ret = get_entry_len(log, reader->r_off);
+ ret = get_user_hdr_len(reader->r_ver) +
+ get_entry_msg_len(log, reader->r_off);
else
ret = 0;
break;
@@ -536,6 +660,22 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
log->head = log->w_off;
ret = 0;
break;
+ case LOGGER_GET_VERSION:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ ret = reader->r_ver;
+ break;
+ case LOGGER_SET_VERSION:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ ret = logger_set_version(reader, argp);
+ break;
}
mutex_unlock(&log->mutex);
@@ -556,8 +696,8 @@ static const struct file_operations logger_fops = {
/*
* Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
- * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
- * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
+ * must be a power of two, and greater than
+ * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
*/
#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
static unsigned char _buf_ ## VAR[SIZE]; \
@@ -579,7 +719,7 @@ static struct logger_log VAR = { \
DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 2048*1024)
DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
-DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 512*1024)
+DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 1024*1024)
DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024)
DEFINE_LOGGER_DEVICE(log_sf, LOGGER_LOG_SF, 256*1024)
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
index 569518a..c2914d9 100644
--- a/drivers/staging/android/logger.h
+++ b/drivers/staging/android/logger.h
@@ -20,7 +20,12 @@
#include <linux/types.h>
#include <linux/ioctl.h>
-struct logger_entry {
+/*
+ * The userspace structure for version 1 of the logger_entry ABI.
+ * This structure is returned to userspace unless the caller requests
+ * an upgrade to a newer ABI version.
+ */
+struct user_logger_entry_compat {
__u16 len; /* length of the payload */
__u16 __pad; /* no matter what, we get 2 bytes of padding */
__s32 pid; /* generating process's pid */
@@ -30,15 +35,29 @@ struct logger_entry {
char msg[0]; /* the entry's payload */
};
+/*
+ * The structure for version 2 of the logger_entry ABI.
+ * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION)
+ * is called with version >= 2
+ */
+struct logger_entry {
+ __u16 len; /* length of the payload */
+ __u16 hdr_size; /* sizeof(struct logger_entry_v2) */
+ __s32 pid; /* generating process's pid */
+ __s32 tid; /* generating process's tid */
+ __s32 sec; /* seconds since Epoch */
+ __s32 nsec; /* nanoseconds */
+ uid_t euid; /* effective UID of logger */
+ char msg[0]; /* the entry's payload */
+};
+
#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */
#define LOGGER_LOG_MAIN "log_main" /* everything else */
#define LOGGER_LOG_SF "log_sf" /* surfaceflinger */
-#define LOGGER_ENTRY_MAX_LEN (4*1024)
-#define LOGGER_ENTRY_MAX_PAYLOAD \
- (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
+#define LOGGER_ENTRY_MAX_PAYLOAD 4076
#define __LOGGERIO 0xAE
@@ -46,5 +65,7 @@ struct logger_entry {
#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
+#define LOGGER_GET_VERSION _IO(__LOGGERIO, 5) /* abi version */
+#define LOGGER_SET_VERSION _IO(__LOGGERIO, 6) /* abi version */
#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index a502cba..15bbcd3 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -35,6 +35,12 @@
#include <linux/oom.h>
#include <linux/sched.h>
#include <linux/notifier.h>
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+#include <linux/swap.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/mm_inline.h>
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
#define ENHANCED_LMK_ROUTINE
#ifdef ENHANCED_LMK_ROUTINE
@@ -56,6 +62,35 @@ static size_t lowmem_minfree[6] = {
16 * 1024, /* 64MB */
};
static int lowmem_minfree_size = 4;
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+static struct class *lmk_class;
+static struct device *lmk_dev;
+static int lmk_kill_pid = 0;
+static int lmk_kill_ok = 0;
+
+extern atomic_t optimize_comp_on;
+
+extern int isolate_lru_page_compcache(struct page *page);
+extern void putback_lru_page(struct page *page);
+extern unsigned int zone_id_shrink_pagelist(struct zone *zone_id,struct list_head *page_list);
+
+#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
+
+#define SWAP_PROCESS_DEBUG_LOG 0
+/* free RAM 8M(2048 pages) */
+#define CHECK_FREE_MEMORY 2048
+/* free swap (10240 pages) */
+#define CHECK_FREE_SWAPSPACE 10240
+
+static unsigned int check_free_memory = 0;
+
+enum pageout_io {
+ PAGEOUT_IO_ASYNC,
+ PAGEOUT_IO_SYNC,
+};
+
+
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
#ifdef ENHANCED_LMK_ROUTINE
static struct task_struct *lowmem_deathpending[LOWMEM_DEATHPENDING_DEPTH] = {NULL,};
@@ -112,12 +147,18 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
int selected_tasksize[LOWMEM_DEATHPENDING_DEPTH] = {0,};
int selected_oom_adj[LOWMEM_DEATHPENDING_DEPTH] = {OOM_ADJUST_MAX,};
int all_selected_oom = 0;
+ int max_selected_oom_idx = 0;
#else
int selected_tasksize = 0;
int selected_oom_adj;
#endif
int array_size = ARRAY_SIZE(lowmem_adj);
+#ifndef CONFIG_DMA_CMA
int other_free = global_page_state(NR_FREE_PAGES);
+#else
+ int other_free = global_page_state(NR_FREE_PAGES) -
+ global_page_state(NR_FREE_CMA_PAGES);
+#endif
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
@@ -177,7 +218,9 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
struct mm_struct *mm;
struct signal_struct *sig;
int oom_adj;
-
+#ifdef ENHANCED_LMK_ROUTINE
+ int is_exist_oom_task = 0;
+#endif
task_lock(p);
mm = p->mm;
sig = p->signal;
@@ -196,27 +239,40 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
continue;
#ifdef ENHANCED_LMK_ROUTINE
- for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
- if (all_selected_oom >= LOWMEM_DEATHPENDING_DEPTH) {
- if (oom_adj < selected_oom_adj[i])
- continue;
- if (oom_adj == selected_oom_adj[i] &&
- tasksize <= selected_tasksize[i])
- continue;
- } else if (selected[i])
- continue;
+ if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH) {
+ for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
+ if (!selected[i]) {
+ is_exist_oom_task = 1;
+ max_selected_oom_idx = i;
+ break;
+ }
+ }
+ } else if (selected_oom_adj[max_selected_oom_idx] < oom_adj ||
+ (selected_oom_adj[max_selected_oom_idx] == oom_adj &&
+ selected_tasksize[max_selected_oom_idx] < tasksize)) {
+ is_exist_oom_task = 1;
+ }
- selected[i] = p;
- selected_tasksize[i] = tasksize;
- selected_oom_adj[i] = oom_adj;
+ if (is_exist_oom_task) {
+ selected[max_selected_oom_idx] = p;
+ selected_tasksize[max_selected_oom_idx] = tasksize;
+ selected_oom_adj[max_selected_oom_idx] = oom_adj;
if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH)
all_selected_oom++;
+ if (all_selected_oom == LOWMEM_DEATHPENDING_DEPTH) {
+ for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
+ if (selected_oom_adj[i] < selected_oom_adj[max_selected_oom_idx])
+ max_selected_oom_idx = i;
+ else if (selected_oom_adj[i] == selected_oom_adj[max_selected_oom_idx] &&
+ selected_tasksize[i] < selected_tasksize[max_selected_oom_idx])
+ max_selected_oom_idx = i;
+ }
+ }
+
lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
p->pid, p->comm, oom_adj, tasksize);
-
- break;
}
#else
if (selected) {
@@ -239,10 +295,10 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
selected[i]->pid, selected[i]->comm,
selected_oom_adj[i], selected_tasksize[i]);
- lowmem_deathpending[i] = selected[i];
- lowmem_deathpending_timeout = jiffies + HZ;
- force_sig(SIGKILL, selected[i]);
- rem -= selected_tasksize[i];
+ lowmem_deathpending[i] = selected[i];
+ lowmem_deathpending_timeout = jiffies + HZ;
+ force_sig(SIGKILL, selected[i]);
+ rem -= selected_tasksize[i];
}
}
#else
@@ -267,10 +323,229 @@ static struct shrinker lowmem_shrinker = {
.seeks = DEFAULT_SEEKS * 16
};
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+/*
+ * zone_id_shrink_pagelist() clear page flags,
+ * update the memory zone status, and swap pagelist
+ */
+
+static unsigned int shrink_pages(struct mm_struct *mm,
+ struct list_head *zone0_page_list,
+ struct list_head *zone1_page_list,
+ unsigned int num_to_scan)
+{
+ unsigned long addr;
+ unsigned int isolate_pages_countter = 0;
+
+ struct vm_area_struct *vma = mm->mmap;
+ while (vma != NULL) {
+
+ for (addr = vma->vm_start; addr < vma->vm_end;
+ addr += PAGE_SIZE) {
+ struct page *page;
+ /*get the page address from virtual memory address */
+ page = follow_page(vma, addr, FOLL_GET);
+
+ if (page && !IS_ERR(page)) {
+
+ put_page(page);
+ /* only moveable, anonymous and not dirty pages can be swapped */
+ if ((!PageUnevictable(page))
+ && (!PageDirty(page)) && ((PageAnon(page)))
+ && (0 == page_is_file_cache(page))) {
+ switch (page_zone_id(page)) {
+ case 0:
+ if (!isolate_lru_page_compcache(page)) {
+ /* isolate page from LRU and add to temp list */
+ /*create new page list, it will be used in shrink_page_list */
+ list_add_tail(&page->lru, zone0_page_list);
+ isolate_pages_countter++;
+ }
+ break;
+ case 1:
+ if (!isolate_lru_page_compcache(page)) {
+ /* isolate page from LRU and add to temp list */
+ /*create new page list, it will be used in shrink_page_list */
+ list_add_tail(&page->lru, zone1_page_list);
+ isolate_pages_countter++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (isolate_pages_countter >= num_to_scan) {
+ return isolate_pages_countter;
+ }
+ }
+
+ vma = vma->vm_next;
+ }
+
+ return isolate_pages_countter;
+}
+
+/*
+ * swap_application_pages() will search the
+ * pages which can be swapped, then call
+ * zone_id_shrink_pagelist to update zone
+ * status
+ */
+static unsigned int swap_pages(struct list_head *zone0_page_list,
+ struct list_head *zone1_page_list)
+{
+ struct zone *zone_id_0 = &NODE_DATA(0)->node_zones[0];
+ struct zone *zone_id_1 = &NODE_DATA(0)->node_zones[1];
+ unsigned int pages_counter = 0;
+
+ /*if the page list is not empty, call zone_id_shrink_pagelist to update zone status */
+ if ((zone_id_0) && (!list_empty(zone0_page_list))) {
+ pages_counter +=
+ zone_id_shrink_pagelist(zone_id_0, zone0_page_list);
+ }
+ if ((zone_id_1) && (!list_empty(zone1_page_list))) {
+ pages_counter +=
+ zone_id_shrink_pagelist(zone_id_1, zone1_page_list);
+ }
+ return pages_counter;
+}
+
+static ssize_t lmk_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d,%d\n", lmk_kill_pid, lmk_kill_ok);
+}
+
+/*
+ * lmk_state_store() will called by framework,
+ * the framework will send the pid of process that need to be swapped
+ */
+static ssize_t lmk_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ sscanf(buf, "%d,%d", &lmk_kill_pid, &lmk_kill_ok);
+
+ /* if the screen on, the optimized compcache will stop */
+ if (atomic_read(&optimize_comp_on) != 1)
+ return size;
+
+ if (lmk_kill_ok == 1) {
+ struct task_struct *p;
+ struct task_struct *selected = NULL;
+ struct sysinfo ramzswap_info = { 0 };
+ struct mm_struct *mm_scan = NULL;
+
+ /*
+ * check the free RAM and swap area,
+ * stop the optimized compcache in cpu idle case;
+ * leave some swap area for using in low memory case
+ */
+ si_swapinfo(&ramzswap_info);
+ si_meminfo(&ramzswap_info);
+
+ if ((ramzswap_info.freeswap < CHECK_FREE_SWAPSPACE) ||
+ (ramzswap_info.freeram < check_free_memory)) {
+#if SWAP_PROCESS_DEBUG_LOG > 0
+ printk(KERN_INFO "idletime compcache is ignored : free RAM %lu, free swap %lu\n",
+ ramzswap_info.freeram, ramzswap_info.freeswap);
+#endif
+ lmk_kill_ok = 0;
+ return size;
+ }
+
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ if ((p->pid == lmk_kill_pid) &&
+ (__task_cred(p)->uid > 10000)) {
+ task_lock(p);
+ selected = p;
+ if (!selected->mm || !selected->signal) {
+ task_unlock(p);
+ selected = NULL;
+ break;
+ }
+ mm_scan = selected->mm;
+ if (mm_scan) {
+ if (selected->flags & PF_KTHREAD)
+ mm_scan = NULL;
+ else
+ atomic_inc(&mm_scan->mm_users);
+ }
+ task_unlock(selected);
+
+#if SWAP_PROCESS_DEBUG_LOG > 0
+ printk(KERN_INFO "idle time compcache: swap process pid %d, name %s, oom %d, task size %ld\n",
+ p->pid, p->comm,
+ p->signal->oom_adj,
+ get_mm_rss(p->mm));
+#endif
+ break;
+ }
+ }
+ read_unlock(&tasklist_lock);
+
+ if (mm_scan) {
+ LIST_HEAD(zone0_page_list);
+ LIST_HEAD(zone1_page_list);
+ int pages_tofree = 0, pages_freed = 0;
+
+ down_read(&mm_scan->mmap_sem);
+ pages_tofree =
+ shrink_pages(mm_scan, &zone0_page_list,
+ &zone1_page_list, 0x7FFFFFFF);
+ up_read(&mm_scan->mmap_sem);
+ mmput(mm_scan);
+ pages_freed =
+ swap_pages(&zone0_page_list,
+ &zone1_page_list);
+ lmk_kill_ok = 0;
+
+ }
+ }
+
+ return size;
+}
+
+static DEVICE_ATTR(lmk_state, 0664, lmk_state_show, lmk_state_store);
+
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
+
static int __init lowmem_init(void)
{
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+ struct zone *zone;
+ unsigned int high_wmark = 0;
+#endif
task_free_register(&task_nb);
register_shrinker(&lowmem_shrinker);
+
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+ for_each_zone(zone) {
+ if (high_wmark < zone->watermark[WMARK_HIGH])
+ high_wmark = zone->watermark[WMARK_HIGH];
+ }
+ check_free_memory = (high_wmark != 0) ? high_wmark : CHECK_FREE_MEMORY;
+
+ lmk_class = class_create(THIS_MODULE, "lmk");
+ if (IS_ERR(lmk_class)) {
+ printk(KERN_ERR "Failed to create class(lmk)\n");
+ return 0;
+ }
+ lmk_dev = device_create(lmk_class, NULL, 0, NULL, "lowmemorykiller");
+ if (IS_ERR(lmk_dev)) {
+ printk(KERN_ERR
+ "Failed to create device(lowmemorykiller)!= %ld\n",
+ IS_ERR(lmk_dev));
+ return 0;
+ }
+ if (device_create_file(lmk_dev, &dev_attr_lmk_state) < 0)
+ printk(KERN_ERR "Failed to create device file(%s)!\n",
+ dev_attr_lmk_state.attr.name);
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
+
return 0;
}