blob: 85759928215b41f7be4ae56089eaaa7007f79659 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dave Chinner2c761272010-01-12 17:39:16 +11002#include <linux/kernel.h>
Rasmus Villemoes7259fa02015-02-12 15:02:48 -08003#include <linux/bug.h>
4#include <linux/compiler.h>
5#include <linux/export.h>
6#include <linux/string.h>
Dave Chinner2c761272010-01-12 17:39:16 +11007#include <linux/list_sort.h>
Dave Chinner2c761272010-01-12 17:39:16 +11008#include <linux/list.h>
9
Don Mullis835cc0c2010-03-05 13:43:15 -080010#define MAX_LIST_LENGTH_BITS 20
11
12/*
13 * Returns a list organized in an intermediate format suited
14 * to chaining of merge() calls: null-terminated, no reserved or
15 * sentinel head node, "prev" links not maintained.
16 */
17static struct list_head *merge(void *priv,
18 int (*cmp)(void *priv, struct list_head *a,
19 struct list_head *b),
20 struct list_head *a, struct list_head *b)
21{
22 struct list_head head, *tail = &head;
23
24 while (a && b) {
25 /* if equal, take 'a' -- important for sort stability */
26 if ((*cmp)(priv, a, b) <= 0) {
27 tail->next = a;
28 a = a->next;
29 } else {
30 tail->next = b;
31 b = b->next;
32 }
33 tail = tail->next;
34 }
35 tail->next = a?:b;
36 return head.next;
37}
38
39/*
40 * Combine final list merge with restoration of standard doubly-linked
41 * list structure. This approach duplicates code from merge(), but
42 * runs faster than the tidier alternatives of either a separate final
43 * prev-link restoration pass, or maintaining the prev links
44 * throughout.
45 */
46static void merge_and_restore_back_links(void *priv,
47 int (*cmp)(void *priv, struct list_head *a,
48 struct list_head *b),
49 struct list_head *head,
50 struct list_head *a, struct list_head *b)
51{
52 struct list_head *tail = head;
Rasmus Villemoes61b3d6c2014-08-06 16:09:44 -070053 u8 count = 0;
Don Mullis835cc0c2010-03-05 13:43:15 -080054
55 while (a && b) {
56 /* if equal, take 'a' -- important for sort stability */
57 if ((*cmp)(priv, a, b) <= 0) {
58 tail->next = a;
59 a->prev = tail;
60 a = a->next;
61 } else {
62 tail->next = b;
63 b->prev = tail;
64 b = b->next;
65 }
66 tail = tail->next;
67 }
68 tail->next = a ? : b;
69
70 do {
71 /*
72 * In worst cases this loop may run many iterations.
73 * Continue callbacks to the client even though no
74 * element comparison is needed, so the client's cmp()
75 * routine can invoke cond_resched() periodically.
76 */
Rasmus Villemoes61b3d6c2014-08-06 16:09:44 -070077 if (unlikely(!(++count)))
78 (*cmp)(priv, tail->next, tail->next);
Don Mullis835cc0c2010-03-05 13:43:15 -080079
80 tail->next->prev = tail;
81 tail = tail->next;
82 } while (tail->next);
83
84 tail->next = head;
85 head->prev = tail;
86}
87
Dave Chinner2c761272010-01-12 17:39:16 +110088/**
Don Mullis02b12b72010-03-05 13:43:15 -080089 * list_sort - sort a list
90 * @priv: private data, opaque to list_sort(), passed to @cmp
Dave Chinner2c761272010-01-12 17:39:16 +110091 * @head: the list to sort
92 * @cmp: the elements comparison function
93 *
Don Mullis02b12b72010-03-05 13:43:15 -080094 * This function implements "merge sort", which has O(nlog(n))
95 * complexity.
Dave Chinner2c761272010-01-12 17:39:16 +110096 *
Don Mullis02b12b72010-03-05 13:43:15 -080097 * The comparison function @cmp must return a negative value if @a
98 * should sort before @b, and a positive value if @a should sort after
99 * @b. If @a and @b are equivalent, and their original relative
100 * ordering is to be preserved, @cmp must return 0.
Dave Chinner2c761272010-01-12 17:39:16 +1100101 */
102void list_sort(void *priv, struct list_head *head,
Don Mullis835cc0c2010-03-05 13:43:15 -0800103 int (*cmp)(void *priv, struct list_head *a,
104 struct list_head *b))
Dave Chinner2c761272010-01-12 17:39:16 +1100105{
Don Mullis835cc0c2010-03-05 13:43:15 -0800106 struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
107 -- last slot is a sentinel */
108 int lev; /* index into part[] */
109 int max_lev = 0;
110 struct list_head *list;
Dave Chinner2c761272010-01-12 17:39:16 +1100111
112 if (list_empty(head))
113 return;
114
Don Mullis835cc0c2010-03-05 13:43:15 -0800115 memset(part, 0, sizeof(part));
116
117 head->prev->next = NULL;
Dave Chinner2c761272010-01-12 17:39:16 +1100118 list = head->next;
Dave Chinner2c761272010-01-12 17:39:16 +1100119
Don Mullis835cc0c2010-03-05 13:43:15 -0800120 while (list) {
121 struct list_head *cur = list;
122 list = list->next;
123 cur->next = NULL;
Dave Chinner2c761272010-01-12 17:39:16 +1100124
Don Mullis835cc0c2010-03-05 13:43:15 -0800125 for (lev = 0; part[lev]; lev++) {
126 cur = merge(priv, cmp, part[lev], cur);
127 part[lev] = NULL;
Dave Chinner2c761272010-01-12 17:39:16 +1100128 }
Don Mullis835cc0c2010-03-05 13:43:15 -0800129 if (lev > max_lev) {
130 if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
Andrew Mortond0da23b2014-08-06 16:09:46 -0700131 printk_once(KERN_DEBUG "list too long for efficiency\n");
Don Mullis835cc0c2010-03-05 13:43:15 -0800132 lev--;
133 }
134 max_lev = lev;
135 }
136 part[lev] = cur;
Dave Chinner2c761272010-01-12 17:39:16 +1100137 }
138
Don Mullis835cc0c2010-03-05 13:43:15 -0800139 for (lev = 0; lev < max_lev; lev++)
140 if (part[lev])
141 list = merge(priv, cmp, part[lev], list);
142
143 merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
144}
145EXPORT_SYMBOL(list_sort);