blob: 4f7a9561b8c415d069505dab632b922903d87d2e [file] [log] [blame]
Franck Bui-Huu82524742008-05-12 21:21:05 +02001#ifndef _LINUX_RCULIST_H
2#define _LINUX_RCULIST_H
3
4#ifdef __KERNEL__
5
6/*
7 * RCU-protected list version
8 */
9#include <linux/list.h>
Franck Bui-Huu10aa9d22008-05-12 21:21:06 +020010#include <linux/rcupdate.h>
Franck Bui-Huu82524742008-05-12 21:21:05 +020011
12/*
Paul E. McKenney65e6bf42010-08-19 21:43:09 -070013 * Why is there no list_empty_rcu()? Because list_empty() serves this
14 * purpose. The list_empty() function fetches the RCU-protected pointer
15 * and compares it to the address of the list head, but neither dereferences
16 * this pointer itself nor provides this pointer to the caller. Therefore,
17 * it is not necessary to use rcu_dereference(), so that list_empty() can
18 * be used anywhere you would want to use a list_empty_rcu().
19 */
20
21/*
Paul E. McKenney2a855b62013-08-23 09:40:42 -070022 * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
23 * @list: list to be initialized
24 *
25 * You should instead use INIT_LIST_HEAD() for normal initialization and
26 * cleanup tasks, when readers have no access to the list being initialized.
27 * However, if the list being initialized is visible to readers, you
28 * need to keep the compiler from being too mischievous.
29 */
30static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
31{
Paul E. McKenney7d0ae802015-03-03 14:57:58 -080032 WRITE_ONCE(list->next, list);
33 WRITE_ONCE(list->prev, list);
Paul E. McKenney2a855b62013-08-23 09:40:42 -070034}
35
36/*
Arnd Bergmann67bdbff2010-02-25 16:55:13 +010037 * return the ->next pointer of a list_head in an rcu safe
38 * way, we must not access it directly
39 */
40#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
41
42/*
Franck Bui-Huu82524742008-05-12 21:21:05 +020043 * Insert a new entry between two known consecutive entries.
44 *
45 * This is only for internal list manipulation where we know
46 * the prev/next entries already!
47 */
48static inline void __list_add_rcu(struct list_head *new,
49 struct list_head *prev, struct list_head *next)
50{
Kees Cooka7e7ca22016-08-17 14:42:09 -070051 if (!__list_add_valid(new, prev, next))
52 return;
53
Franck Bui-Huu82524742008-05-12 21:21:05 +020054 new->next = next;
55 new->prev = prev;
Arnd Bergmann67bdbff2010-02-25 16:55:13 +010056 rcu_assign_pointer(list_next_rcu(prev), new);
Franck Bui-Huu82524742008-05-12 21:21:05 +020057 next->prev = new;
Franck Bui-Huu82524742008-05-12 21:21:05 +020058}
59
60/**
61 * list_add_rcu - add a new entry to rcu-protected list
62 * @new: new entry to be added
63 * @head: list head to add it after
64 *
65 * Insert a new entry after the specified head.
66 * This is good for implementing stacks.
67 *
68 * The caller must take whatever precautions are necessary
69 * (such as holding appropriate locks) to avoid racing
70 * with another list-mutation primitive, such as list_add_rcu()
71 * or list_del_rcu(), running on this same list.
72 * However, it is perfectly legal to run concurrently with
73 * the _rcu list-traversal primitives, such as
74 * list_for_each_entry_rcu().
75 */
76static inline void list_add_rcu(struct list_head *new, struct list_head *head)
77{
78 __list_add_rcu(new, head, head->next);
79}
80
81/**
82 * list_add_tail_rcu - add a new entry to rcu-protected list
83 * @new: new entry to be added
84 * @head: list head to add it before
85 *
86 * Insert a new entry before the specified head.
87 * This is useful for implementing queues.
88 *
89 * The caller must take whatever precautions are necessary
90 * (such as holding appropriate locks) to avoid racing
91 * with another list-mutation primitive, such as list_add_tail_rcu()
92 * or list_del_rcu(), running on this same list.
93 * However, it is perfectly legal to run concurrently with
94 * the _rcu list-traversal primitives, such as
95 * list_for_each_entry_rcu().
96 */
97static inline void list_add_tail_rcu(struct list_head *new,
98 struct list_head *head)
99{
100 __list_add_rcu(new, head->prev, head);
101}
102
103/**
104 * list_del_rcu - deletes entry from list without re-initialization
105 * @entry: the element to delete from the list.
106 *
107 * Note: list_empty() on entry does not return true after this,
108 * the entry is in an undefined state. It is useful for RCU based
109 * lockfree traversal.
110 *
111 * In particular, it means that we can not poison the forward
112 * pointers that may still be used for walking the list.
113 *
114 * The caller must take whatever precautions are necessary
115 * (such as holding appropriate locks) to avoid racing
116 * with another list-mutation primitive, such as list_del_rcu()
117 * or list_add_rcu(), running on this same list.
118 * However, it is perfectly legal to run concurrently with
119 * the _rcu list-traversal primitives, such as
120 * list_for_each_entry_rcu().
121 *
122 * Note that the caller is not permitted to immediately free
123 * the newly deleted entry. Instead, either synchronize_rcu()
124 * or call_rcu() must be used to defer freeing until an RCU
125 * grace period has elapsed.
126 */
127static inline void list_del_rcu(struct list_head *entry)
128{
Dave Jones559f9ba2012-03-14 22:17:39 -0400129 __list_del_entry(entry);
Franck Bui-Huu82524742008-05-12 21:21:05 +0200130 entry->prev = LIST_POISON2;
131}
132
133/**
Andrea Arcangeli6beeac72008-07-28 15:46:22 -0700134 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
135 * @n: the element to delete from the hash list.
136 *
137 * Note: list_unhashed() on the node return true after this. It is
138 * useful for RCU based read lockfree traversal if the writer side
139 * must know if the list entry is still hashed or already unhashed.
140 *
141 * In particular, it means that we can not poison the forward pointers
142 * that may still be used for walking the hash list and we can only
143 * zero the pprev pointer so list_unhashed() will return true after
144 * this.
145 *
146 * The caller must take whatever precautions are necessary (such as
147 * holding appropriate locks) to avoid racing with another
148 * list-mutation primitive, such as hlist_add_head_rcu() or
149 * hlist_del_rcu(), running on this same list. However, it is
150 * perfectly legal to run concurrently with the _rcu list-traversal
151 * primitives, such as hlist_for_each_entry_rcu().
152 */
153static inline void hlist_del_init_rcu(struct hlist_node *n)
154{
155 if (!hlist_unhashed(n)) {
156 __hlist_del(n);
157 n->pprev = NULL;
158 }
159}
160
161/**
Franck Bui-Huu82524742008-05-12 21:21:05 +0200162 * list_replace_rcu - replace old entry by new one
163 * @old : the element to be replaced
164 * @new : the new element to insert
165 *
166 * The @old entry will be replaced with the @new entry atomically.
167 * Note: @old should not be empty.
168 */
169static inline void list_replace_rcu(struct list_head *old,
170 struct list_head *new)
171{
172 new->next = old->next;
173 new->prev = old->prev;
Arnd Bergmann67bdbff2010-02-25 16:55:13 +0100174 rcu_assign_pointer(list_next_rcu(new->prev), new);
Franck Bui-Huu82524742008-05-12 21:21:05 +0200175 new->next->prev = new;
Franck Bui-Huu82524742008-05-12 21:21:05 +0200176 old->prev = LIST_POISON2;
177}
178
179/**
Petko Manolov7d86dcc2015-10-12 18:23:51 +0300180 * __list_splice_init_rcu - join an RCU-protected list into an existing list.
Franck Bui-Huu82524742008-05-12 21:21:05 +0200181 * @list: the RCU-protected list to splice
Petko Manolov7d86dcc2015-10-12 18:23:51 +0300182 * @prev: points to the last element of the existing list
183 * @next: points to the first element of the existing list
Franck Bui-Huu82524742008-05-12 21:21:05 +0200184 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
185 *
Petko Manolov7d86dcc2015-10-12 18:23:51 +0300186 * The list pointed to by @prev and @next can be RCU-read traversed
187 * concurrently with this function.
Franck Bui-Huu82524742008-05-12 21:21:05 +0200188 *
189 * Note that this function blocks.
190 *
Petko Manolov7d86dcc2015-10-12 18:23:51 +0300191 * Important note: the caller must take whatever action is necessary to prevent
192 * any other updates to the existing list. In principle, it is possible to
193 * modify the list as soon as sync() begins execution. If this sort of thing
194 * becomes necessary, an alternative version based on call_rcu() could be
195 * created. But only if -really- needed -- there is no shortage of RCU API
196 * members.
Franck Bui-Huu82524742008-05-12 21:21:05 +0200197 */
Petko Manolov7d86dcc2015-10-12 18:23:51 +0300198static inline void __list_splice_init_rcu(struct list_head *list,
199 struct list_head *prev,
200 struct list_head *next,
201 void (*sync)(void))
Franck Bui-Huu82524742008-05-12 21:21:05 +0200202{
203 struct list_head *first = list->next;
204 struct list_head *last = list->prev;
Franck Bui-Huu82524742008-05-12 21:21:05 +0200205
Paul E. McKenney2a855b62013-08-23 09:40:42 -0700206 /*
207 * "first" and "last" tracking list, so initialize it. RCU readers
208 * have access to this list, so we must use INIT_LIST_HEAD_RCU()
209 * instead of INIT_LIST_HEAD().
210 */
Franck Bui-Huu82524742008-05-12 21:21:05 +0200211
Paul E. McKenney2a855b62013-08-23 09:40:42 -0700212 INIT_LIST_HEAD_RCU(list);
Franck Bui-Huu82524742008-05-12 21:21:05 +0200213
214 /*
215 * At this point, the list body still points to the source list.
216 * Wait for any readers to finish using the list before splicing
217 * the list body into the new list. Any new readers will see
218 * an empty list.
219 */
220
221 sync();
222
223 /*
224 * Readers are finished with the source list, so perform splice.
225 * The order is important if the new list is global and accessible
226 * to concurrent RCU readers. Note that RCU readers are not
227 * permitted to traverse the prev pointers without excluding
228 * this function.
229 */
230
Petko Manolov7d86dcc2015-10-12 18:23:51 +0300231 last->next = next;
232 rcu_assign_pointer(list_next_rcu(prev), first);
233 first->prev = prev;
234 next->prev = last;
235}
236
237/**
238 * list_splice_init_rcu - splice an RCU-protected list into an existing list,
239 * designed for stacks.
240 * @list: the RCU-protected list to splice
241 * @head: the place in the existing list to splice the first list into
242 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
243 */
244static inline void list_splice_init_rcu(struct list_head *list,
245 struct list_head *head,
246 void (*sync)(void))
247{
248 if (!list_empty(list))
249 __list_splice_init_rcu(list, head, head->next, sync);
250}
251
252/**
253 * list_splice_tail_init_rcu - splice an RCU-protected list into an existing
254 * list, designed for queues.
255 * @list: the RCU-protected list to splice
256 * @head: the place in the existing list to splice the first list into
257 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
258 */
259static inline void list_splice_tail_init_rcu(struct list_head *list,
260 struct list_head *head,
261 void (*sync)(void))
262{
263 if (!list_empty(list))
264 __list_splice_init_rcu(list, head->prev, head, sync);
Franck Bui-Huu82524742008-05-12 21:21:05 +0200265}
266
Jiri Pirko72c6a982009-04-14 17:33:57 +0200267/**
268 * list_entry_rcu - get the struct for this entry
269 * @ptr: the &struct list_head pointer.
270 * @type: the type of the struct this is embedded in.
Andrey Utkin3943f422014-11-14 05:09:55 +0400271 * @member: the name of the list_head within the struct.
Jiri Pirko72c6a982009-04-14 17:33:57 +0200272 *
273 * This primitive may safely run concurrently with the _rcu list-mutation
274 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
275 */
276#define list_entry_rcu(ptr, type, member) \
Patrick Marlier8db70b12015-09-11 15:50:35 -0700277 container_of(lockless_dereference(ptr), type, member)
Jiri Pirko72c6a982009-04-14 17:33:57 +0200278
279/**
Michel Machadof88022a2012-04-10 14:07:40 -0400280 * Where are list_empty_rcu() and list_first_entry_rcu()?
281 *
282 * Implementing those functions following their counterparts list_empty() and
283 * list_first_entry() is not advisable because they lead to subtle race
284 * conditions as the following snippet shows:
285 *
286 * if (!list_empty_rcu(mylist)) {
287 * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
288 * do_something(bar);
289 * }
290 *
291 * The list may not be empty when list_empty_rcu checks it, but it may be when
292 * list_first_entry_rcu rereads the ->next pointer.
293 *
294 * Rereading the ->next pointer is not a problem for list_empty() and
295 * list_first_entry() because they would be protected by a lock that blocks
296 * writers.
297 *
298 * See list_first_or_null_rcu for an alternative.
299 */
300
301/**
302 * list_first_or_null_rcu - get the first element from a list
Jiri Pirko72c6a982009-04-14 17:33:57 +0200303 * @ptr: the list head to take the element from.
304 * @type: the type of the struct this is embedded in.
Andrey Utkin3943f422014-11-14 05:09:55 +0400305 * @member: the name of the list_head within the struct.
Jiri Pirko72c6a982009-04-14 17:33:57 +0200306 *
Michel Machadof88022a2012-04-10 14:07:40 -0400307 * Note that if the list is empty, it returns NULL.
Jiri Pirko72c6a982009-04-14 17:33:57 +0200308 *
309 * This primitive may safely run concurrently with the _rcu list-mutation
310 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
311 */
Michel Machadof88022a2012-04-10 14:07:40 -0400312#define list_first_or_null_rcu(ptr, type, member) \
Joe Perches0adab9b2013-12-05 16:19:15 -0800313({ \
314 struct list_head *__ptr = (ptr); \
Paul E. McKenney7d0ae802015-03-03 14:57:58 -0800315 struct list_head *__next = READ_ONCE(__ptr->next); \
Joe Perches0adab9b2013-12-05 16:19:15 -0800316 likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
317})
Jiri Pirko72c6a982009-04-14 17:33:57 +0200318
Franck Bui-Huu82524742008-05-12 21:21:05 +0200319/**
Tom Herbertff3c44e2016-03-07 14:11:00 -0800320 * list_next_or_null_rcu - get the first element from a list
321 * @head: the head for the list.
322 * @ptr: the list head to take the next element from.
323 * @type: the type of the struct this is embedded in.
324 * @member: the name of the list_head within the struct.
325 *
326 * Note that if the ptr is at the end of the list, NULL is returned.
327 *
328 * This primitive may safely run concurrently with the _rcu list-mutation
329 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
330 */
331#define list_next_or_null_rcu(head, ptr, type, member) \
332({ \
333 struct list_head *__head = (head); \
334 struct list_head *__ptr = (ptr); \
335 struct list_head *__next = READ_ONCE(__ptr->next); \
336 likely(__next != __head) ? list_entry_rcu(__next, type, \
337 member) : NULL; \
338})
339
340/**
Franck Bui-Huu82524742008-05-12 21:21:05 +0200341 * list_for_each_entry_rcu - iterate over rcu list of given type
342 * @pos: the type * to use as a loop cursor.
343 * @head: the head for your list.
Andrey Utkin3943f422014-11-14 05:09:55 +0400344 * @member: the name of the list_head within the struct.
Franck Bui-Huu82524742008-05-12 21:21:05 +0200345 *
346 * This list-traversal primitive may safely run concurrently with
347 * the _rcu list-mutation primitives such as list_add_rcu()
348 * as long as the traversal is guarded by rcu_read_lock().
349 */
350#define list_for_each_entry_rcu(pos, head, member) \
Jiri Pirko72c6a982009-04-14 17:33:57 +0200351 for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
Linus Torvaldse66eed62011-05-19 14:15:29 -0700352 &pos->member != (head); \
Jiri Pirko72c6a982009-04-14 17:33:57 +0200353 pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
Franck Bui-Huu82524742008-05-12 21:21:05 +0200354
Franck Bui-Huu82524742008-05-12 21:21:05 +0200355/**
Alexey Kardashevskiy69b90722015-12-05 18:14:19 -0800356 * list_entry_lockless - get the struct for this entry
357 * @ptr: the &struct list_head pointer.
358 * @type: the type of the struct this is embedded in.
359 * @member: the name of the list_head within the struct.
360 *
361 * This primitive may safely run concurrently with the _rcu list-mutation
362 * primitives such as list_add_rcu(), but requires some implicit RCU
363 * read-side guarding. One example is running within a special
364 * exception-time environment where preemption is disabled and where
365 * lockdep cannot be invoked (in which case updaters must use RCU-sched,
366 * as in synchronize_sched(), call_rcu_sched(), and friends). Another
367 * example is when items are added to the list, but never deleted.
368 */
369#define list_entry_lockless(ptr, type, member) \
370 container_of((typeof(ptr))lockless_dereference(ptr), type, member)
371
372/**
373 * list_for_each_entry_lockless - iterate over rcu list of given type
374 * @pos: the type * to use as a loop cursor.
375 * @head: the head for your list.
376 * @member: the name of the list_struct within the struct.
377 *
378 * This primitive may safely run concurrently with the _rcu list-mutation
379 * primitives such as list_add_rcu(), but requires some implicit RCU
380 * read-side guarding. One example is running within a special
381 * exception-time environment where preemption is disabled and where
382 * lockdep cannot be invoked (in which case updaters must use RCU-sched,
383 * as in synchronize_sched(), call_rcu_sched(), and friends). Another
384 * example is when items are added to the list, but never deleted.
385 */
386#define list_for_each_entry_lockless(pos, head, member) \
387 for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
388 &pos->member != (head); \
389 pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
390
391/**
stephen hemminger254245d2009-11-10 07:54:47 +0000392 * list_for_each_entry_continue_rcu - continue iteration over list of given type
393 * @pos: the type * to use as a loop cursor.
394 * @head: the head for your list.
Andrey Utkin3943f422014-11-14 05:09:55 +0400395 * @member: the name of the list_head within the struct.
stephen hemminger254245d2009-11-10 07:54:47 +0000396 *
397 * Continue to iterate over list of given type, continuing after
398 * the current position.
399 */
400#define list_for_each_entry_continue_rcu(pos, head, member) \
401 for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
Linus Torvaldse66eed62011-05-19 14:15:29 -0700402 &pos->member != (head); \
stephen hemminger254245d2009-11-10 07:54:47 +0000403 pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
404
405/**
Franck Bui-Huu82524742008-05-12 21:21:05 +0200406 * hlist_del_rcu - deletes entry from hash list without re-initialization
407 * @n: the element to delete from the hash list.
408 *
409 * Note: list_unhashed() on entry does not return true after this,
410 * the entry is in an undefined state. It is useful for RCU based
411 * lockfree traversal.
412 *
413 * In particular, it means that we can not poison the forward
414 * pointers that may still be used for walking the hash list.
415 *
416 * The caller must take whatever precautions are necessary
417 * (such as holding appropriate locks) to avoid racing
418 * with another list-mutation primitive, such as hlist_add_head_rcu()
419 * or hlist_del_rcu(), running on this same list.
420 * However, it is perfectly legal to run concurrently with
421 * the _rcu list-traversal primitives, such as
422 * hlist_for_each_entry().
423 */
424static inline void hlist_del_rcu(struct hlist_node *n)
425{
426 __hlist_del(n);
427 n->pprev = LIST_POISON2;
428}
429
430/**
431 * hlist_replace_rcu - replace old entry by new one
432 * @old : the element to be replaced
433 * @new : the new element to insert
434 *
435 * The @old entry will be replaced with the @new entry atomically.
436 */
437static inline void hlist_replace_rcu(struct hlist_node *old,
438 struct hlist_node *new)
439{
440 struct hlist_node *next = old->next;
441
442 new->next = next;
443 new->pprev = old->pprev;
Arnd Bergmann67bdbff2010-02-25 16:55:13 +0100444 rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
Franck Bui-Huu82524742008-05-12 21:21:05 +0200445 if (next)
446 new->next->pprev = &new->next;
Franck Bui-Huu82524742008-05-12 21:21:05 +0200447 old->pprev = LIST_POISON2;
448}
449
Arnd Bergmann67bdbff2010-02-25 16:55:13 +0100450/*
451 * return the first or the next element in an RCU protected hlist
452 */
453#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first)))
454#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
455#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
456
Franck Bui-Huu82524742008-05-12 21:21:05 +0200457/**
458 * hlist_add_head_rcu
459 * @n: the element to add to the hash list.
460 * @h: the list to add to.
461 *
462 * Description:
463 * Adds the specified element to the specified hlist,
464 * while permitting racing traversals.
465 *
466 * The caller must take whatever precautions are necessary
467 * (such as holding appropriate locks) to avoid racing
468 * with another list-mutation primitive, such as hlist_add_head_rcu()
469 * or hlist_del_rcu(), running on this same list.
470 * However, it is perfectly legal to run concurrently with
471 * the _rcu list-traversal primitives, such as
472 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
473 * problems on Alpha CPUs. Regardless of the type of CPU, the
474 * list-traversal primitive must be guarded by rcu_read_lock().
475 */
476static inline void hlist_add_head_rcu(struct hlist_node *n,
477 struct hlist_head *h)
478{
479 struct hlist_node *first = h->first;
Franck Bui-Huu10aa9d22008-05-12 21:21:06 +0200480
Franck Bui-Huu82524742008-05-12 21:21:05 +0200481 n->next = first;
482 n->pprev = &h->first;
Arnd Bergmann67bdbff2010-02-25 16:55:13 +0100483 rcu_assign_pointer(hlist_first_rcu(h), n);
Franck Bui-Huu82524742008-05-12 21:21:05 +0200484 if (first)
485 first->pprev = &n->next;
Franck Bui-Huu82524742008-05-12 21:21:05 +0200486}
487
488/**
David S. Miller1602f492016-04-23 18:26:24 -0400489 * hlist_add_tail_rcu
490 * @n: the element to add to the hash list.
491 * @h: the list to add to.
492 *
493 * Description:
494 * Adds the specified element to the specified hlist,
495 * while permitting racing traversals.
496 *
497 * The caller must take whatever precautions are necessary
498 * (such as holding appropriate locks) to avoid racing
499 * with another list-mutation primitive, such as hlist_add_head_rcu()
500 * or hlist_del_rcu(), running on this same list.
501 * However, it is perfectly legal to run concurrently with
502 * the _rcu list-traversal primitives, such as
503 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
504 * problems on Alpha CPUs. Regardless of the type of CPU, the
505 * list-traversal primitive must be guarded by rcu_read_lock().
506 */
507static inline void hlist_add_tail_rcu(struct hlist_node *n,
508 struct hlist_head *h)
509{
510 struct hlist_node *i, *last = NULL;
511
512 for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
513 last = i;
514
515 if (last) {
516 n->next = last->next;
517 n->pprev = &last->next;
518 rcu_assign_pointer(hlist_next_rcu(last), n);
519 } else {
520 hlist_add_head_rcu(n, h);
521 }
522}
523
524/**
Franck Bui-Huu82524742008-05-12 21:21:05 +0200525 * hlist_add_before_rcu
526 * @n: the new element to add to the hash list.
527 * @next: the existing element to add the new element before.
528 *
529 * Description:
530 * Adds the specified element to the specified hlist
531 * before the specified node while permitting racing traversals.
532 *
533 * The caller must take whatever precautions are necessary
534 * (such as holding appropriate locks) to avoid racing
535 * with another list-mutation primitive, such as hlist_add_head_rcu()
536 * or hlist_del_rcu(), running on this same list.
537 * However, it is perfectly legal to run concurrently with
538 * the _rcu list-traversal primitives, such as
539 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
540 * problems on Alpha CPUs.
541 */
542static inline void hlist_add_before_rcu(struct hlist_node *n,
543 struct hlist_node *next)
544{
545 n->pprev = next->pprev;
546 n->next = next;
Arnd Bergmann67bdbff2010-02-25 16:55:13 +0100547 rcu_assign_pointer(hlist_pprev_rcu(n), n);
Franck Bui-Huu82524742008-05-12 21:21:05 +0200548 next->pprev = &n->next;
Franck Bui-Huu82524742008-05-12 21:21:05 +0200549}
550
551/**
Ken Helias1d023282014-08-06 16:09:16 -0700552 * hlist_add_behind_rcu
Franck Bui-Huu82524742008-05-12 21:21:05 +0200553 * @n: the new element to add to the hash list.
Ken Helias1d023282014-08-06 16:09:16 -0700554 * @prev: the existing element to add the new element after.
Franck Bui-Huu82524742008-05-12 21:21:05 +0200555 *
556 * Description:
557 * Adds the specified element to the specified hlist
558 * after the specified node while permitting racing traversals.
559 *
560 * The caller must take whatever precautions are necessary
561 * (such as holding appropriate locks) to avoid racing
562 * with another list-mutation primitive, such as hlist_add_head_rcu()
563 * or hlist_del_rcu(), running on this same list.
564 * However, it is perfectly legal to run concurrently with
565 * the _rcu list-traversal primitives, such as
566 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
567 * problems on Alpha CPUs.
568 */
Ken Helias1d023282014-08-06 16:09:16 -0700569static inline void hlist_add_behind_rcu(struct hlist_node *n,
570 struct hlist_node *prev)
Franck Bui-Huu82524742008-05-12 21:21:05 +0200571{
572 n->next = prev->next;
573 n->pprev = &prev->next;
Arnd Bergmann67bdbff2010-02-25 16:55:13 +0100574 rcu_assign_pointer(hlist_next_rcu(prev), n);
Franck Bui-Huu82524742008-05-12 21:21:05 +0200575 if (n->next)
576 n->next->pprev = &n->next;
577}
578
Arnd Bergmann67bdbff2010-02-25 16:55:13 +0100579#define __hlist_for_each_rcu(pos, head) \
580 for (pos = rcu_dereference(hlist_first_rcu(head)); \
Linus Torvalds75d65a42011-05-19 13:50:07 -0700581 pos; \
Arnd Bergmann67bdbff2010-02-25 16:55:13 +0100582 pos = rcu_dereference(hlist_next_rcu(pos)))
stephen hemminger1cc52322010-02-22 07:57:17 +0000583
Franck Bui-Huu82524742008-05-12 21:21:05 +0200584/**
585 * hlist_for_each_entry_rcu - iterate over rcu list of given type
Sasha Levinb67bfe02013-02-27 17:06:00 -0800586 * @pos: the type * to use as a loop cursor.
Franck Bui-Huu82524742008-05-12 21:21:05 +0200587 * @head: the head for your list.
588 * @member: the name of the hlist_node within the struct.
589 *
590 * This list-traversal primitive may safely run concurrently with
591 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
592 * as long as the traversal is guarded by rcu_read_lock().
593 */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800594#define hlist_for_each_entry_rcu(pos, head, member) \
595 for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
596 typeof(*(pos)), member); \
597 pos; \
598 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
599 &(pos)->member)), typeof(*(pos)), member))
Franck Bui-Huu82524742008-05-12 21:21:05 +0200600
stephen hemminger5c578ae2010-03-17 20:31:11 +0000601/**
Steven Rostedt12bcbe62013-05-28 14:38:42 -0400602 * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
603 * @pos: the type * to use as a loop cursor.
604 * @head: the head for your list.
605 * @member: the name of the hlist_node within the struct.
606 *
607 * This list-traversal primitive may safely run concurrently with
608 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
609 * as long as the traversal is guarded by rcu_read_lock().
610 *
611 * This is the same as hlist_for_each_entry_rcu() except that it does
612 * not do any RCU debugging or tracing.
613 */
614#define hlist_for_each_entry_rcu_notrace(pos, head, member) \
615 for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
616 typeof(*(pos)), member); \
617 pos; \
618 pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
619 &(pos)->member)), typeof(*(pos)), member))
620
621/**
Eric Dumazet4f70ecc2010-05-03 10:50:14 +0000622 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
Sasha Levinb67bfe02013-02-27 17:06:00 -0800623 * @pos: the type * to use as a loop cursor.
Eric Dumazet4f70ecc2010-05-03 10:50:14 +0000624 * @head: the head for your list.
625 * @member: the name of the hlist_node within the struct.
626 *
627 * This list-traversal primitive may safely run concurrently with
628 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
629 * as long as the traversal is guarded by rcu_read_lock().
630 */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800631#define hlist_for_each_entry_rcu_bh(pos, head, member) \
632 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
633 typeof(*(pos)), member); \
634 pos; \
635 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
636 &(pos)->member)), typeof(*(pos)), member))
Eric Dumazet4f70ecc2010-05-03 10:50:14 +0000637
638/**
stephen hemminger5c578ae2010-03-17 20:31:11 +0000639 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
Sasha Levinb67bfe02013-02-27 17:06:00 -0800640 * @pos: the type * to use as a loop cursor.
stephen hemminger5c578ae2010-03-17 20:31:11 +0000641 * @member: the name of the hlist_node within the struct.
642 */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800643#define hlist_for_each_entry_continue_rcu(pos, member) \
Ying Xuef520c982014-12-12 09:36:14 +0800644 for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
645 &(pos)->member)), typeof(*(pos)), member); \
Sasha Levinb67bfe02013-02-27 17:06:00 -0800646 pos; \
Ying Xuef520c982014-12-12 09:36:14 +0800647 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
648 &(pos)->member)), typeof(*(pos)), member))
stephen hemminger5c578ae2010-03-17 20:31:11 +0000649
Eric Dumazet4f70ecc2010-05-03 10:50:14 +0000650/**
651 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
Sasha Levinb67bfe02013-02-27 17:06:00 -0800652 * @pos: the type * to use as a loop cursor.
Eric Dumazet4f70ecc2010-05-03 10:50:14 +0000653 * @member: the name of the hlist_node within the struct.
654 */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800655#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
Ying Xuef520c982014-12-12 09:36:14 +0800656 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
657 &(pos)->member)), typeof(*(pos)), member); \
Sasha Levinb67bfe02013-02-27 17:06:00 -0800658 pos; \
Ying Xuef520c982014-12-12 09:36:14 +0800659 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
660 &(pos)->member)), typeof(*(pos)), member))
Eric Dumazet4f70ecc2010-05-03 10:50:14 +0000661
Ying Xue97ede292014-12-02 15:00:30 +0800662/**
663 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
664 * @pos: the type * to use as a loop cursor.
665 * @member: the name of the hlist_node within the struct.
666 */
667#define hlist_for_each_entry_from_rcu(pos, member) \
668 for (; pos; \
Ying Xuef5177002015-03-26 13:27:08 +0800669 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
670 &(pos)->member)), typeof(*(pos)), member))
stephen hemminger5c578ae2010-03-17 20:31:11 +0000671
Franck Bui-Huu82524742008-05-12 21:21:05 +0200672#endif /* __KERNEL__ */
673#endif