blob: 175002c046ede7dfae8f92eb6042e7da72323a3a [file] [log] [blame]
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -08001/*
Michael J. Ruhl34ab4de2017-08-28 11:23:27 -07002 * Copyright(c) 2016 - 2017 Intel Corporation.
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -08003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47#include <linux/list.h>
Mitko Haralanov67caea12016-05-12 10:23:09 -070048#include <linux/rculist.h>
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -080049#include <linux/mmu_notifier.h>
Mitko Haralanovdf5a00f82016-03-08 11:14:53 -080050#include <linux/interval_tree_generic.h>
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -080051
52#include "mmu_rb.h"
53#include "trace.h"
54
55struct mmu_rb_handler {
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -080056 struct mmu_notifier mn;
Davidlohr Buesof808c132017-09-08 16:15:08 -070057 struct rb_root_cached root;
Dean Luicke0b09ac2016-07-28 15:21:20 -040058 void *ops_arg;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -080059 spinlock_t lock; /* protect the RB tree */
60 struct mmu_rb_ops *ops;
Ira Weiny3faa3d92016-07-28 15:21:19 -040061 struct mm_struct *mm;
Dean Luick0636e9ab2016-07-28 15:21:27 -040062 struct list_head lru_list;
Dean Luickb85ced92016-07-28 15:21:24 -040063 struct work_struct del_work;
64 struct list_head del_list;
65 struct workqueue_struct *wq;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -080066};
67
Mitko Haralanovdf5a00f82016-03-08 11:14:53 -080068static unsigned long mmu_node_start(struct mmu_rb_node *);
69static unsigned long mmu_node_last(struct mmu_rb_node *);
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -080070static inline void mmu_notifier_range_start(struct mmu_notifier *,
71 struct mm_struct *,
72 unsigned long, unsigned long);
73static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
Mitko Haralanovf19bd642016-04-12 10:45:57 -070074 struct mm_struct *,
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -080075 unsigned long, unsigned long);
76static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
77 unsigned long, unsigned long);
Dean Luickb85ced92016-07-28 15:21:24 -040078static void do_remove(struct mmu_rb_handler *handler,
79 struct list_head *del_list);
80static void handle_remove(struct work_struct *work);
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -080081
Bhumika Goyal0fc859a2016-11-19 15:17:48 +053082static const struct mmu_notifier_ops mn_opts = {
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -080083 .invalidate_range_start = mmu_notifier_range_start,
84};
85
Mitko Haralanovdf5a00f82016-03-08 11:14:53 -080086INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
87 mmu_node_start, mmu_node_last, static, __mmu_int_rb);
88
89static unsigned long mmu_node_start(struct mmu_rb_node *node)
90{
91 return node->addr & PAGE_MASK;
92}
93
94static unsigned long mmu_node_last(struct mmu_rb_node *node)
95{
Mitko Haralanovde790932016-04-12 10:46:41 -070096 return PAGE_ALIGN(node->addr + node->len) - 1;
Mitko Haralanovdf5a00f82016-03-08 11:14:53 -080097}
98
Dean Luicke0b09ac2016-07-28 15:21:20 -040099int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
100 struct mmu_rb_ops *ops,
Dean Luickb85ced92016-07-28 15:21:24 -0400101 struct workqueue_struct *wq,
Dean Luicke0b09ac2016-07-28 15:21:20 -0400102 struct mmu_rb_handler **handler)
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800103{
104 struct mmu_rb_handler *handlr;
Ira Weiny3faa3d92016-07-28 15:21:19 -0400105 int ret;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800106
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800107 handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
108 if (!handlr)
109 return -ENOMEM;
110
Davidlohr Buesof808c132017-09-08 16:15:08 -0700111 handlr->root = RB_ROOT_CACHED;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800112 handlr->ops = ops;
Dean Luicke0b09ac2016-07-28 15:21:20 -0400113 handlr->ops_arg = ops_arg;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800114 INIT_HLIST_NODE(&handlr->mn.hlist);
115 spin_lock_init(&handlr->lock);
116 handlr->mn.ops = &mn_opts;
Ira Weiny3faa3d92016-07-28 15:21:19 -0400117 handlr->mm = mm;
Dean Luickb85ced92016-07-28 15:21:24 -0400118 INIT_WORK(&handlr->del_work, handle_remove);
119 INIT_LIST_HEAD(&handlr->del_list);
Dean Luick0636e9ab2016-07-28 15:21:27 -0400120 INIT_LIST_HEAD(&handlr->lru_list);
Dean Luickb85ced92016-07-28 15:21:24 -0400121 handlr->wq = wq;
Ira Weiny3faa3d92016-07-28 15:21:19 -0400122
123 ret = mmu_notifier_register(&handlr->mn, handlr->mm);
124 if (ret) {
125 kfree(handlr);
126 return ret;
127 }
128
Dean Luicke0b09ac2016-07-28 15:21:20 -0400129 *handler = handlr;
130 return 0;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800131}
132
Dean Luicke0b09ac2016-07-28 15:21:20 -0400133void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800134{
Dean Luick20a42d02016-07-28 12:27:36 -0400135 struct mmu_rb_node *rbnode;
136 struct rb_node *node;
Mitko Haralanovc81e1f62016-03-08 11:14:25 -0800137 unsigned long flags;
Dean Luickb85ced92016-07-28 15:21:24 -0400138 struct list_head del_list;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800139
Mitko Haralanov782f6692016-04-12 10:46:35 -0700140 /* Unregister first so we don't get any more notifications. */
Ira Weiny3faa3d92016-07-28 15:21:19 -0400141 mmu_notifier_unregister(&handler->mn, handler->mm);
Mitko Haralanov782f6692016-04-12 10:46:35 -0700142
Dean Luickb85ced92016-07-28 15:21:24 -0400143 /*
144 * Make sure the wq delete handler is finished running. It will not
145 * be triggered once the mmu notifiers are unregistered above.
146 */
147 flush_work(&handler->del_work);
148
149 INIT_LIST_HEAD(&del_list);
150
Mitko Haralanov782f6692016-04-12 10:46:35 -0700151 spin_lock_irqsave(&handler->lock, flags);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700152 while ((node = rb_first_cached(&handler->root))) {
Dean Luick20a42d02016-07-28 12:27:36 -0400153 rbnode = rb_entry(node, struct mmu_rb_node, node);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700154 rb_erase_cached(node, &handler->root);
Dean Luick0636e9ab2016-07-28 15:21:27 -0400155 /* move from LRU list to delete list */
156 list_move(&rbnode->list, &del_list);
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800157 }
Mitko Haralanov782f6692016-04-12 10:46:35 -0700158 spin_unlock_irqrestore(&handler->lock, flags);
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800159
Dean Luickb85ced92016-07-28 15:21:24 -0400160 do_remove(handler, &del_list);
161
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800162 kfree(handler);
163}
164
Dean Luicke0b09ac2016-07-28 15:21:20 -0400165int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
166 struct mmu_rb_node *mnode)
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800167{
Mitko Haralanovdf5a00f82016-03-08 11:14:53 -0800168 struct mmu_rb_node *node;
Mitko Haralanovc81e1f62016-03-08 11:14:25 -0800169 unsigned long flags;
Mitko Haralanovdf5a00f82016-03-08 11:14:53 -0800170 int ret = 0;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800171
Michael J. Ruhl34ab4de2017-08-28 11:23:27 -0700172 trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
Mitko Haralanovc81e1f62016-03-08 11:14:25 -0800173 spin_lock_irqsave(&handler->lock, flags);
Mitko Haralanovdf5a00f82016-03-08 11:14:53 -0800174 node = __mmu_rb_search(handler, mnode->addr, mnode->len);
175 if (node) {
176 ret = -EINVAL;
177 goto unlock;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800178 }
Dean Luicke0b09ac2016-07-28 15:21:20 -0400179 __mmu_int_rb_insert(mnode, &handler->root);
Dean Luick0636e9ab2016-07-28 15:21:27 -0400180 list_add(&mnode->list, &handler->lru_list);
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800181
Dean Luicke0b09ac2016-07-28 15:21:20 -0400182 ret = handler->ops->insert(handler->ops_arg, mnode);
Dean Luick0636e9ab2016-07-28 15:21:27 -0400183 if (ret) {
Dean Luicke0b09ac2016-07-28 15:21:20 -0400184 __mmu_int_rb_remove(mnode, &handler->root);
Dean Luick0636e9ab2016-07-28 15:21:27 -0400185 list_del(&mnode->list); /* remove from LRU list */
186 }
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800187unlock:
Mitko Haralanovc81e1f62016-03-08 11:14:25 -0800188 spin_unlock_irqrestore(&handler->lock, flags);
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800189 return ret;
190}
191
Mitko Haralanovde82bdf2016-04-12 10:46:03 -0700192/* Caller must hold handler lock */
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800193static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
194 unsigned long addr,
195 unsigned long len)
196{
Mitko Haralanov0f310a002016-03-08 11:15:10 -0800197 struct mmu_rb_node *node = NULL;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800198
Michael J. Ruhl34ab4de2017-08-28 11:23:27 -0700199 trace_hfi1_mmu_rb_search(addr, len);
Mitko Haralanov0f310a002016-03-08 11:15:10 -0800200 if (!handler->ops->filter) {
Dean Luicke0b09ac2016-07-28 15:21:20 -0400201 node = __mmu_int_rb_iter_first(&handler->root, addr,
Mitko Haralanov0f310a002016-03-08 11:15:10 -0800202 (addr + len) - 1);
203 } else {
Dean Luicke0b09ac2016-07-28 15:21:20 -0400204 for (node = __mmu_int_rb_iter_first(&handler->root, addr,
Mitko Haralanov0f310a002016-03-08 11:15:10 -0800205 (addr + len) - 1);
206 node;
207 node = __mmu_int_rb_iter_next(node, addr,
208 (addr + len) - 1)) {
209 if (handler->ops->filter(node, addr, len))
210 return node;
211 }
212 }
Mitko Haralanovdf5a00f82016-03-08 11:14:53 -0800213 return node;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800214}
215
Sebastian Sanchez7be85672017-05-26 05:35:12 -0700216bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
217 unsigned long addr, unsigned long len,
218 struct mmu_rb_node **rb_node)
Mitko Haralanovf53af852016-04-12 10:46:47 -0700219{
Mitko Haralanovf53af852016-04-12 10:46:47 -0700220 struct mmu_rb_node *node;
221 unsigned long flags;
Sebastian Sanchez7be85672017-05-26 05:35:12 -0700222 bool ret = false;
Mitko Haralanovf53af852016-04-12 10:46:47 -0700223
Mitko Haralanovf53af852016-04-12 10:46:47 -0700224 spin_lock_irqsave(&handler->lock, flags);
225 node = __mmu_rb_search(handler, addr, len);
Dean Luick0636e9ab2016-07-28 15:21:27 -0400226 if (node) {
Sebastian Sanchez7be85672017-05-26 05:35:12 -0700227 if (node->addr == addr && node->len == len)
228 goto unlock;
Dean Luicke0b09ac2016-07-28 15:21:20 -0400229 __mmu_int_rb_remove(node, &handler->root);
Dean Luick0636e9ab2016-07-28 15:21:27 -0400230 list_del(&node->list); /* remove from LRU list */
Sebastian Sanchez7be85672017-05-26 05:35:12 -0700231 ret = true;
Dean Luick0636e9ab2016-07-28 15:21:27 -0400232 }
Sebastian Sanchez7be85672017-05-26 05:35:12 -0700233unlock:
Mitko Haralanovf53af852016-04-12 10:46:47 -0700234 spin_unlock_irqrestore(&handler->lock, flags);
Sebastian Sanchez7be85672017-05-26 05:35:12 -0700235 *rb_node = node;
236 return ret;
Mitko Haralanovf53af852016-04-12 10:46:47 -0700237}
238
Dean Luick10345992016-07-28 15:21:22 -0400239void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
240{
Dean Luick0636e9ab2016-07-28 15:21:27 -0400241 struct mmu_rb_node *rbnode, *ptr;
Dean Luick10345992016-07-28 15:21:22 -0400242 struct list_head del_list;
243 unsigned long flags;
244 bool stop = false;
245
246 INIT_LIST_HEAD(&del_list);
247
248 spin_lock_irqsave(&handler->lock, flags);
Dean Luick0636e9ab2016-07-28 15:21:27 -0400249 list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
250 list) {
Dean Luick10345992016-07-28 15:21:22 -0400251 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
252 &stop)) {
253 __mmu_int_rb_remove(rbnode, &handler->root);
Dean Luick0636e9ab2016-07-28 15:21:27 -0400254 /* move from LRU list to delete list */
255 list_move(&rbnode->list, &del_list);
Dean Luick10345992016-07-28 15:21:22 -0400256 }
257 if (stop)
258 break;
259 }
260 spin_unlock_irqrestore(&handler->lock, flags);
261
Dean Luick10345992016-07-28 15:21:22 -0400262 while (!list_empty(&del_list)) {
263 rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
264 list_del(&rbnode->list);
Dean Luick082b3532016-07-28 15:21:25 -0400265 handler->ops->remove(handler->ops_arg, rbnode);
Dean Luick10345992016-07-28 15:21:22 -0400266 }
Dean Luick10345992016-07-28 15:21:22 -0400267}
268
Dean Luickb85ced92016-07-28 15:21:24 -0400269/*
270 * It is up to the caller to ensure that this function does not race with the
271 * mmu invalidate notifier which may be calling the users remove callback on
272 * 'node'.
273 */
Dean Luicke0b09ac2016-07-28 15:21:20 -0400274void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
275 struct mmu_rb_node *node)
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800276{
Ira Weiny3c1091aa2016-07-28 12:27:31 -0400277 unsigned long flags;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800278
Ira Weiny3c1091aa2016-07-28 12:27:31 -0400279 /* Validity of handler and node pointers has been checked by caller. */
Michael J. Ruhl34ab4de2017-08-28 11:23:27 -0700280 trace_hfi1_mmu_rb_remove(node->addr, node->len);
Ira Weiny3c1091aa2016-07-28 12:27:31 -0400281 spin_lock_irqsave(&handler->lock, flags);
Dean Luicke0b09ac2016-07-28 15:21:20 -0400282 __mmu_int_rb_remove(node, &handler->root);
Dean Luick0636e9ab2016-07-28 15:21:27 -0400283 list_del(&node->list); /* remove from LRU list */
Ira Weiny3c1091aa2016-07-28 12:27:31 -0400284 spin_unlock_irqrestore(&handler->lock, flags);
285
Dean Luick082b3532016-07-28 15:21:25 -0400286 handler->ops->remove(handler->ops_arg, node);
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800287}
288
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800289static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
290 struct mm_struct *mm,
291 unsigned long start,
292 unsigned long end)
293{
Mitko Haralanovf19bd642016-04-12 10:45:57 -0700294 mmu_notifier_mem_invalidate(mn, mm, start, end);
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800295}
296
297static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
Mitko Haralanovf19bd642016-04-12 10:45:57 -0700298 struct mm_struct *mm,
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800299 unsigned long start, unsigned long end)
300{
301 struct mmu_rb_handler *handler =
302 container_of(mn, struct mmu_rb_handler, mn);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700303 struct rb_root_cached *root = &handler->root;
Mitko Haralanovf19bd642016-04-12 10:45:57 -0700304 struct mmu_rb_node *node, *ptr = NULL;
Mitko Haralanovdf5a00f82016-03-08 11:14:53 -0800305 unsigned long flags;
Dean Luickb85ced92016-07-28 15:21:24 -0400306 bool added = false;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800307
Mitko Haralanovc81e1f62016-03-08 11:14:25 -0800308 spin_lock_irqsave(&handler->lock, flags);
Mitko Haralanovf19bd642016-04-12 10:45:57 -0700309 for (node = __mmu_int_rb_iter_first(root, start, end - 1);
310 node; node = ptr) {
311 /* Guard against node removal. */
312 ptr = __mmu_int_rb_iter_next(node, start, end - 1);
Michael J. Ruhl34ab4de2017-08-28 11:23:27 -0700313 trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
Dean Luicke0b09ac2016-07-28 15:21:20 -0400314 if (handler->ops->invalidate(handler->ops_arg, node)) {
Mitko Haralanove88c9272016-04-12 10:46:53 -0700315 __mmu_int_rb_remove(node, root);
Dean Luick0636e9ab2016-07-28 15:21:27 -0400316 /* move from LRU list to delete list */
317 list_move(&node->list, &handler->del_list);
Dean Luickb85ced92016-07-28 15:21:24 -0400318 added = true;
Mitko Haralanovde82bdf2016-04-12 10:46:03 -0700319 }
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800320 }
Mitko Haralanovc81e1f62016-03-08 11:14:25 -0800321 spin_unlock_irqrestore(&handler->lock, flags);
Dean Luickb85ced92016-07-28 15:21:24 -0400322
323 if (added)
324 queue_work(handler->wq, &handler->del_work);
325}
326
327/*
328 * Call the remove function for the given handler and the list. This
329 * is expected to be called with a delete list extracted from handler.
330 * The caller should not be holding the handler lock.
331 */
332static void do_remove(struct mmu_rb_handler *handler,
333 struct list_head *del_list)
334{
335 struct mmu_rb_node *node;
336
337 while (!list_empty(del_list)) {
338 node = list_first_entry(del_list, struct mmu_rb_node, list);
339 list_del(&node->list);
Dean Luick082b3532016-07-28 15:21:25 -0400340 handler->ops->remove(handler->ops_arg, node);
Dean Luickb85ced92016-07-28 15:21:24 -0400341 }
342}
343
344/*
345 * Work queue function to remove all nodes that have been queued up to
346 * be removed. The key feature is that mm->mmap_sem is not being held
347 * and the remove callback can sleep while taking it, if needed.
348 */
349static void handle_remove(struct work_struct *work)
350{
351 struct mmu_rb_handler *handler = container_of(work,
352 struct mmu_rb_handler,
353 del_work);
354 struct list_head del_list;
355 unsigned long flags;
356
357 /* remove anything that is queued to get removed */
358 spin_lock_irqsave(&handler->lock, flags);
359 list_replace_init(&handler->del_list, &del_list);
360 spin_unlock_irqrestore(&handler->lock, flags);
361
362 do_remove(handler, &del_list);
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800363}