blob: 49effd92144e2908ea95ce6ecdff1fb4be7a6940 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* krxiod.c: Rx I/O daemon
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/completion.h>
14#include <linux/spinlock.h>
15#include <linux/init.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080016#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <rxrpc/krxiod.h>
18#include <rxrpc/transport.h>
19#include <rxrpc/peer.h>
20#include <rxrpc/call.h>
21#include "internal.h"
22
23static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq);
24static DECLARE_COMPLETION(rxrpc_krxiod_dead);
25
26static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0);
27
28static LIST_HEAD(rxrpc_krxiod_transportq);
29static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock);
30
31static LIST_HEAD(rxrpc_krxiod_callq);
32static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock);
33
34static volatile int rxrpc_krxiod_die;
35
36/*****************************************************************************/
37/*
38 * Rx I/O daemon
39 */
40static int rxrpc_krxiod(void *arg)
41{
42 DECLARE_WAITQUEUE(krxiod,current);
43
44 printk("Started krxiod %d\n",current->pid);
45
46 daemonize("krxiod");
47
48 /* loop around waiting for work to do */
49 do {
50 /* wait for work or to be told to exit */
51 _debug("### Begin Wait");
52 if (!atomic_read(&rxrpc_krxiod_qcount)) {
53 set_current_state(TASK_INTERRUPTIBLE);
54
55 add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
56
57 for (;;) {
58 set_current_state(TASK_INTERRUPTIBLE);
59 if (atomic_read(&rxrpc_krxiod_qcount) ||
60 rxrpc_krxiod_die ||
61 signal_pending(current))
62 break;
63
64 schedule();
65 }
66
67 remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
68 set_current_state(TASK_RUNNING);
69 }
70 _debug("### End Wait");
71
72 /* do work if been given some to do */
73 _debug("### Begin Work");
74
75 /* see if there's a transport in need of attention */
76 if (!list_empty(&rxrpc_krxiod_transportq)) {
77 struct rxrpc_transport *trans = NULL;
78
79 spin_lock_irq(&rxrpc_krxiod_transportq_lock);
80
81 if (!list_empty(&rxrpc_krxiod_transportq)) {
82 trans = list_entry(
83 rxrpc_krxiod_transportq.next,
84 struct rxrpc_transport,
85 krxiodq_link);
86
87 list_del_init(&trans->krxiodq_link);
88 atomic_dec(&rxrpc_krxiod_qcount);
89
90 /* make sure it hasn't gone away and doesn't go
91 * away */
92 if (atomic_read(&trans->usage)>0)
93 rxrpc_get_transport(trans);
94 else
95 trans = NULL;
96 }
97
98 spin_unlock_irq(&rxrpc_krxiod_transportq_lock);
99
100 if (trans) {
101 rxrpc_trans_receive_packet(trans);
102 rxrpc_put_transport(trans);
103 }
104 }
105
106 /* see if there's a call in need of attention */
107 if (!list_empty(&rxrpc_krxiod_callq)) {
108 struct rxrpc_call *call = NULL;
109
110 spin_lock_irq(&rxrpc_krxiod_callq_lock);
111
112 if (!list_empty(&rxrpc_krxiod_callq)) {
113 call = list_entry(rxrpc_krxiod_callq.next,
114 struct rxrpc_call,
115 rcv_krxiodq_lk);
116 list_del_init(&call->rcv_krxiodq_lk);
117 atomic_dec(&rxrpc_krxiod_qcount);
118
119 /* make sure it hasn't gone away and doesn't go
120 * away */
121 if (atomic_read(&call->usage) > 0) {
122 _debug("@@@ KRXIOD"
123 " Begin Attend Call %p", call);
124 rxrpc_get_call(call);
125 }
126 else {
127 call = NULL;
128 }
129 }
130
131 spin_unlock_irq(&rxrpc_krxiod_callq_lock);
132
133 if (call) {
134 rxrpc_call_do_stuff(call);
135 rxrpc_put_call(call);
136 _debug("@@@ KRXIOD End Attend Call %p", call);
137 }
138 }
139
140 _debug("### End Work");
141
Christoph Lameter3e1d1d22005-06-24 23:13:50 -0700142 try_to_freeze();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144 /* discard pending signals */
145 rxrpc_discard_my_signals();
146
147 } while (!rxrpc_krxiod_die);
148
149 /* and that's all */
150 complete_and_exit(&rxrpc_krxiod_dead, 0);
151
152} /* end rxrpc_krxiod() */
153
154/*****************************************************************************/
155/*
156 * start up a krxiod daemon
157 */
158int __init rxrpc_krxiod_init(void)
159{
160 return kernel_thread(rxrpc_krxiod, NULL, 0);
161
162} /* end rxrpc_krxiod_init() */
163
164/*****************************************************************************/
165/*
166 * kill the krxiod daemon and wait for it to complete
167 */
168void rxrpc_krxiod_kill(void)
169{
170 rxrpc_krxiod_die = 1;
171 wake_up_all(&rxrpc_krxiod_sleepq);
172 wait_for_completion(&rxrpc_krxiod_dead);
173
174} /* end rxrpc_krxiod_kill() */
175
176/*****************************************************************************/
177/*
178 * queue a transport for attention by krxiod
179 */
180void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans)
181{
182 unsigned long flags;
183
184 _enter("");
185
186 if (list_empty(&trans->krxiodq_link)) {
187 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
188
189 if (list_empty(&trans->krxiodq_link)) {
190 if (atomic_read(&trans->usage) > 0) {
191 list_add_tail(&trans->krxiodq_link,
192 &rxrpc_krxiod_transportq);
193 atomic_inc(&rxrpc_krxiod_qcount);
194 }
195 }
196
197 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
198 wake_up_all(&rxrpc_krxiod_sleepq);
199 }
200
201 _leave("");
202
203} /* end rxrpc_krxiod_queue_transport() */
204
205/*****************************************************************************/
206/*
207 * dequeue a transport from krxiod's attention queue
208 */
209void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans)
210{
211 unsigned long flags;
212
213 _enter("");
214
215 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
216 if (!list_empty(&trans->krxiodq_link)) {
217 list_del_init(&trans->krxiodq_link);
218 atomic_dec(&rxrpc_krxiod_qcount);
219 }
220 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
221
222 _leave("");
223
224} /* end rxrpc_krxiod_dequeue_transport() */
225
226/*****************************************************************************/
227/*
228 * queue a call for attention by krxiod
229 */
230void rxrpc_krxiod_queue_call(struct rxrpc_call *call)
231{
232 unsigned long flags;
233
234 if (list_empty(&call->rcv_krxiodq_lk)) {
235 spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
236 if (atomic_read(&call->usage) > 0) {
237 list_add_tail(&call->rcv_krxiodq_lk,
238 &rxrpc_krxiod_callq);
239 atomic_inc(&rxrpc_krxiod_qcount);
240 }
241 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
242 }
243 wake_up_all(&rxrpc_krxiod_sleepq);
244
245} /* end rxrpc_krxiod_queue_call() */
246
247/*****************************************************************************/
248/*
249 * dequeue a call from krxiod's attention queue
250 */
251void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call)
252{
253 unsigned long flags;
254
255 spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
256 if (!list_empty(&call->rcv_krxiodq_lk)) {
257 list_del_init(&call->rcv_krxiodq_lk);
258 atomic_dec(&rxrpc_krxiod_qcount);
259 }
260 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
261
262} /* end rxrpc_krxiod_dequeue_call() */