remove PAGE_GROUP_BY_MOBILITY

Grouping pages by mobility can be disabled at compile-time. This was
considered undesirable by a number of people. However, in the current stack of
patches, it is not a simple case of just dropping the configurable patch as it
would cause merge conflicts.  This patch backs out the configuration option.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f7873a4..8aec4d4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -158,7 +158,6 @@
 EXPORT_SYMBOL(nr_node_ids);
 #endif
 
-#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
 int page_group_by_mobility_disabled __read_mostly;
 
 static inline int get_pageblock_migratetype(struct page *page)
@@ -192,22 +191,6 @@
 		((gfp_flags & __GFP_RECLAIMABLE) != 0);
 }
 
-#else
-static inline int get_pageblock_migratetype(struct page *page)
-{
-	return MIGRATE_UNMOVABLE;
-}
-
-static void set_pageblock_migratetype(struct page *page, int migratetype)
-{
-}
-
-static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
-{
-	return MIGRATE_UNMOVABLE;
-}
-#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
-
 #ifdef CONFIG_DEBUG_VM
 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 {
@@ -718,7 +701,6 @@
 }
 
 
-#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
 /*
  * This array describes the order lists are fallen back to when
  * the free lists for the desirable migrate type are depleted
@@ -750,7 +732,7 @@
 	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
 	 * anyway as we check zone boundaries in move_freepages_block().
 	 * Remove at a later date when no bug reports exist related to
-	 * CONFIG_PAGE_GROUP_BY_MOBILITY
+	 * grouping pages by mobility
 	 */
 	BUG_ON(page_zone(start_page) != page_zone(end_page));
 #endif
@@ -899,13 +881,6 @@
 	/* Use MIGRATE_RESERVE rather than fail an allocation */
 	return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
 }
-#else
-static struct page *__rmqueue_fallback(struct zone *zone, int order,
-						int start_migratetype)
-{
-	return NULL;
-}
-#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
 
 /*
  * Do the hard work of removing an element from the buddy allocator.
@@ -1033,7 +1008,6 @@
 }
 #endif /* CONFIG_PM */
 
-#if defined(CONFIG_HIBERNATION) || defined(CONFIG_PAGE_GROUP_BY_MOBILITY)
 /*
  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  */
@@ -1064,9 +1038,6 @@
 
 	smp_call_function(smp_drain_local_pages, NULL, 0, 1);
 }
-#else
-void drain_all_local_pages(void) {}
-#endif /* CONFIG_HIBERNATION || CONFIG_PAGE_GROUP_BY_MOBILITY */
 
 /*
  * Free a 0-order page
@@ -1157,7 +1128,6 @@
 				goto failed;
 		}
 
-#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
 		/* Find a page of the appropriate migrate type */
 		list_for_each_entry(page, &pcp->list, lru)
 			if (page_private(page) == migratetype)
@@ -1169,9 +1139,6 @@
 					pcp->batch, &pcp->list, migratetype);
 			page = list_entry(pcp->list.next, struct page, lru);
 		}
-#else
-		page = list_entry(pcp->list.next, struct page, lru);
-#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
 
 		list_del(&page->lru);
 		pcp->count--;
@@ -2525,7 +2492,6 @@
 
 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
 
-#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
 /*
  * Mark a number of MAX_ORDER_NR_PAGES blocks as MIGRATE_RESERVE. The number
  * of blocks reserved is based on zone->pages_min. The memory within the
@@ -2579,11 +2545,7 @@
 		}
 	}
 }
-#else
-static inline void setup_zone_migrate_reserve(struct zone *zone)
-{
-}
-#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
+
 /*
  * Initially all pages are reserved - free ones are freed
  * up by free_all_bootmem() once the early boot process is