android/lowmemorykiller: Only consider gfp_mask free pages

In certain memory configurations there can be a large number of
pages which are not suitable to satisfy certain memory requests
(such as when CMA is enabled).
This large number of unsuitable pages can cause the
lowmemorykiller to not kill any tasks because the
lowmemorykiller counts all free pages.

In order to ensure the lowmemorykiller properly evaluates the
free memory only count the free pages which are suitable for
satisfying the memory request.

Change-Id: Iaf8c06bb6f34d61e02d2f80f9d7e90e762ee75e8
Signed-off-by: Liam Mark <lmark@codeaurora.org>
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 91f4964..432abe5 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -62,6 +62,65 @@
 			printk(x);			\
 	} while (0)
 
+static int nr_free_zone_mtype_pages(struct zone *zone, int mtype)
+{
+	int order;
+	int sum = 0;
+
+	for (order = 0; order < MAX_ORDER; ++order) {
+		unsigned long freecount = 0;
+		struct free_area *area;
+		struct list_head *curr;
+
+		area = &(zone->free_area[order]);
+
+		list_for_each(curr, &area->free_list[mtype])
+			freecount++;
+
+		sum += freecount << order;
+	}
+	return sum;
+}
+
+static int nr_free_zone_pages(struct zone *zone, gfp_t gfp_mask)
+{
+	int sum = 0;
+	int mtype = allocflags_to_migratetype(gfp_mask);
+	int i = 0;
+	int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
+
+	sum = nr_free_zone_mtype_pages(zone, mtype);
+
+	/*
+	 * Also count the fallback pages
+	 */
+	for (i = 0;; i++) {
+		int fallbacktype = mtype_fallbacks[i];
+		sum += nr_free_zone_mtype_pages(zone, fallbacktype);
+
+		if (fallbacktype == MIGRATE_RESERVE)
+			break;
+	}
+
+	return sum;
+}
+
+static int nr_free_pages(gfp_t gfp_mask)
+{
+	struct zoneref *z;
+	struct zone *zone;
+	int sum = 0;
+
+	struct zonelist *zonelist = node_zonelist(numa_node_id(), gfp_mask);
+
+	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+		sum += nr_free_zone_pages(zone, gfp_mask);
+	}
+
+	return sum;
+}
+
+
 static int test_task_flag(struct task_struct *p, int flag)
 {
 	struct task_struct *t = p;
@@ -93,6 +152,15 @@
 	int other_file = global_page_state(NR_FILE_PAGES) -
 						global_page_state(NR_SHMEM);
 
+	if (sc->nr_to_scan > 0 && other_free > other_file) {
+		/*
+		 * If the number of free pages is going to affect the decision
+		 * of which process is selected then ensure only free pages
+		 * which can satisfy the request are considered.
+		 */
+		other_free = nr_free_pages(sc->gfp_mask);
+	}
+
 	if (lowmem_adj_size < array_size)
 		array_size = lowmem_adj_size;
 	if (lowmem_minfree_size < array_size)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f8a3a10..08f74e6 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -61,6 +61,14 @@
 	MIGRATE_TYPES
 };
 
+/*
+ * Returns a list which contains the migrate types on to which
+ * an allocation falls back when the free list for the migrate
+ * type mtype is depleted.
+ * The end of the list is delimited by the type MIGRATE_RESERVE.
+ */
+extern int *get_migratetype_fallbacks(int mtype);
+
 #ifdef CONFIG_CMA
 #  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
 #  define cma_wmark_pages(zone)	zone->min_cma_pages
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 831509c..c3142e8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -913,6 +913,11 @@
 	[MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
 };
 
+int *get_migratetype_fallbacks(int mtype)
+{
+	return fallbacks[mtype];
+}
+
 /*
  * Move the free pages in a range to the free lists of the requested type.
  * Note that start_page and end_pages are not aligned on a pageblock