blob: b8268c0882a713f81d9002bd345a8f444f6434e3 [file] [log] [blame]
Ying Xuec5fa7b32013-06-17 10:54:39 -04001/*
2 * net/tipc/server.c: TIPC server infrastructure
3 *
4 * Copyright (c) 2012-2013, Wind River Systems
Jon Maloydf79d042018-02-15 10:40:44 +01005 * Copyright (c) 2017, Ericsson AB
Ying Xuec5fa7b32013-06-17 10:54:39 -04006 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
Jon Maloyc901d262018-02-15 10:40:43 +010037#include "subscr.h"
Ying Xuec5fa7b32013-06-17 10:54:39 -040038#include "server.h"
39#include "core.h"
Ying Xue859fc7c2015-01-09 15:27:01 +080040#include "socket.h"
Jon Maloy14c04492017-10-13 11:04:17 +020041#include "addr.h"
42#include "msg.h"
Ying Xuec5fa7b32013-06-17 10:54:39 -040043#include <net/sock.h>
Ying Xue76100a82015-03-18 09:32:57 +080044#include <linux/module.h>
Ying Xuec5fa7b32013-06-17 10:54:39 -040045
46/* Number of messages to send before rescheduling */
47#define MAX_SEND_MSG_COUNT 25
48#define MAX_RECV_MSG_COUNT 25
49#define CF_CONNECTED 1
Ying Xue76100a82015-03-18 09:32:57 +080050#define CF_SERVER 2
Ying Xuec5fa7b32013-06-17 10:54:39 -040051
52#define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data)
53
54/**
55 * struct tipc_conn - TIPC connection structure
56 * @kref: reference counter to connection object
57 * @conid: connection identifier
58 * @sock: socket handler associated with connection
59 * @flags: indicates connection state
60 * @server: pointer to connected server
Jon Maloydf79d042018-02-15 10:40:44 +010061 * @sub_list: lsit to all pertaing subscriptions
62 * @sub_lock: lock protecting the subscription list
63 * @outqueue_lock: control access to the outqueue
Ying Xuec5fa7b32013-06-17 10:54:39 -040064 * @rwork: receive work item
Ying Xuec5fa7b32013-06-17 10:54:39 -040065 * @rx_action: what to do when connection socket is active
66 * @outqueue: pointer to first outbound message in queue
stephen hemminger963a18552014-01-12 12:48:00 -080067 * @outqueue_lock: control access to the outqueue
Ying Xuec5fa7b32013-06-17 10:54:39 -040068 * @swork: send work item
69 */
70struct tipc_conn {
71 struct kref kref;
72 int conid;
73 struct socket *sock;
74 unsigned long flags;
75 struct tipc_server *server;
Jon Maloydf79d042018-02-15 10:40:44 +010076 struct list_head sub_list;
77 spinlock_t sub_lock; /* for subscription list */
Ying Xuec5fa7b32013-06-17 10:54:39 -040078 struct work_struct rwork;
79 int (*rx_action) (struct tipc_conn *con);
Ying Xuec5fa7b32013-06-17 10:54:39 -040080 struct list_head outqueue;
81 spinlock_t outqueue_lock;
82 struct work_struct swork;
83};
84
85/* An entry waiting to be sent */
86struct outqueue_entry {
Jon Maloydf79d042018-02-15 10:40:44 +010087 u32 evt;
Ying Xuec5fa7b32013-06-17 10:54:39 -040088 struct list_head list;
89 struct kvec iov;
Ying Xuec5fa7b32013-06-17 10:54:39 -040090};
91
92static void tipc_recv_work(struct work_struct *work);
93static void tipc_send_work(struct work_struct *work);
94static void tipc_clean_outqueues(struct tipc_conn *con);
95
Jon Maloydf79d042018-02-15 10:40:44 +010096static bool connected(struct tipc_conn *con)
97{
98 return con && test_bit(CF_CONNECTED, &con->flags);
99}
100
101/**
102 * htohl - convert value to endianness used by destination
103 * @in: value to convert
104 * @swap: non-zero if endianness must be reversed
105 *
106 * Returns converted value
107 */
108static u32 htohl(u32 in, int swap)
109{
110 return swap ? swab32(in) : in;
111}
112
Ying Xuec5fa7b32013-06-17 10:54:39 -0400113static void tipc_conn_kref_release(struct kref *kref)
114{
115 struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
Parthasarathy Bhuvaraganfc0adfc2017-01-24 13:00:45 +0100116 struct tipc_server *s = con->server;
Ying Xue76100a82015-03-18 09:32:57 +0800117 struct socket *sock = con->sock;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400118
Ying Xue76100a82015-03-18 09:32:57 +0800119 if (sock) {
Ying Xue76100a82015-03-18 09:32:57 +0800120 if (test_bit(CF_SERVER, &con->flags)) {
121 __module_get(sock->ops->owner);
Jon Maloydf79d042018-02-15 10:40:44 +0100122 __module_get(sock->sk->sk_prot_creator->owner);
Ying Xue76100a82015-03-18 09:32:57 +0800123 }
Ying Xuedef81f62015-04-23 09:37:38 -0400124 sock_release(sock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400125 con->sock = NULL;
126 }
Jon Maloy14c04492017-10-13 11:04:17 +0200127 spin_lock_bh(&s->idr_lock);
128 idr_remove(&s->conn_idr, con->conid);
129 s->idr_in_use--;
130 spin_unlock_bh(&s->idr_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400131 tipc_clean_outqueues(con);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400132 kfree(con);
133}
134
135static void conn_put(struct tipc_conn *con)
136{
137 kref_put(&con->kref, tipc_conn_kref_release);
138}
139
140static void conn_get(struct tipc_conn *con)
141{
142 kref_get(&con->kref);
143}
144
145static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
146{
147 struct tipc_conn *con;
148
149 spin_lock_bh(&s->idr_lock);
150 con = idr_find(&s->conn_idr, conid);
Jon Maloydf79d042018-02-15 10:40:44 +0100151 if (!connected(con) || !kref_get_unless_zero(&con->kref))
152 con = NULL;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400153 spin_unlock_bh(&s->idr_lock);
154 return con;
155}
156
David S. Miller676d2362014-04-11 16:15:36 -0400157static void sock_data_ready(struct sock *sk)
Ying Xuec5fa7b32013-06-17 10:54:39 -0400158{
159 struct tipc_conn *con;
160
Eric Dumazetb91083a2016-05-17 17:44:09 -0700161 read_lock_bh(&sk->sk_callback_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400162 con = sock2con(sk);
Jon Maloydf79d042018-02-15 10:40:44 +0100163 if (connected(con)) {
Ying Xuec5fa7b32013-06-17 10:54:39 -0400164 conn_get(con);
165 if (!queue_work(con->server->rcv_wq, &con->rwork))
166 conn_put(con);
167 }
Eric Dumazetb91083a2016-05-17 17:44:09 -0700168 read_unlock_bh(&sk->sk_callback_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400169}
170
171static void sock_write_space(struct sock *sk)
172{
173 struct tipc_conn *con;
174
Eric Dumazetb91083a2016-05-17 17:44:09 -0700175 read_lock_bh(&sk->sk_callback_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400176 con = sock2con(sk);
Jon Maloydf79d042018-02-15 10:40:44 +0100177 if (connected(con)) {
Ying Xuec5fa7b32013-06-17 10:54:39 -0400178 conn_get(con);
179 if (!queue_work(con->server->send_wq, &con->swork))
180 conn_put(con);
181 }
Eric Dumazetb91083a2016-05-17 17:44:09 -0700182 read_unlock_bh(&sk->sk_callback_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400183}
184
185static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con)
186{
187 struct sock *sk = sock->sk;
188
189 write_lock_bh(&sk->sk_callback_lock);
190
191 sk->sk_data_ready = sock_data_ready;
192 sk->sk_write_space = sock_write_space;
193 sk->sk_user_data = con;
194
195 con->sock = sock;
196
197 write_unlock_bh(&sk->sk_callback_lock);
198}
199
Jon Maloydf79d042018-02-15 10:40:44 +0100200/* tipc_con_delete_sub - delete a specific or all subscriptions
201 * for a given subscriber
202 */
203static void tipc_con_delete_sub(struct tipc_conn *con, struct tipc_subscr *s)
204{
205 struct list_head *sub_list = &con->sub_list;
206 struct tipc_subscription *sub, *tmp;
207
208 spin_lock_bh(&con->sub_lock);
209 list_for_each_entry_safe(sub, tmp, sub_list, subscrp_list) {
210 if (!s || !memcmp(s, &sub->evt.s, sizeof(*s)))
211 tipc_sub_delete(sub);
212 else if (s)
213 break;
214 }
215 spin_unlock_bh(&con->sub_lock);
216}
217
Parthasarathy Bhuvaragan9dc3abd2017-01-24 13:00:46 +0100218static void tipc_close_conn(struct tipc_conn *con)
Parthasarathy Bhuvaragan333f7962016-04-12 13:05:21 +0200219{
Jon Maloye88f2be2018-01-15 17:56:28 +0100220 struct sock *sk = con->sock->sk;
221 bool disconnect = false;
Parthasarathy Bhuvaragan333f7962016-04-12 13:05:21 +0200222
Jon Maloye88f2be2018-01-15 17:56:28 +0100223 write_lock_bh(&sk->sk_callback_lock);
224 disconnect = test_and_clear_bit(CF_CONNECTED, &con->flags);
Jon Maloydf79d042018-02-15 10:40:44 +0100225
Jon Maloye88f2be2018-01-15 17:56:28 +0100226 if (disconnect) {
227 sk->sk_user_data = NULL;
Parthasarathy Bhuvaragan9dc3abd2017-01-24 13:00:46 +0100228 if (con->conid)
Jon Maloydf79d042018-02-15 10:40:44 +0100229 tipc_con_delete_sub(con, NULL);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400230 }
Jon Maloye88f2be2018-01-15 17:56:28 +0100231 write_unlock_bh(&sk->sk_callback_lock);
232
233 /* Handle concurrent calls from sending and receiving threads */
234 if (!disconnect)
235 return;
236
237 /* Don't flush pending works, -just let them expire */
238 kernel_sock_shutdown(con->sock, SHUT_RDWR);
239 conn_put(con);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400240}
241
242static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
243{
244 struct tipc_conn *con;
245 int ret;
246
247 con = kzalloc(sizeof(struct tipc_conn), GFP_ATOMIC);
248 if (!con)
249 return ERR_PTR(-ENOMEM);
250
251 kref_init(&con->kref);
252 INIT_LIST_HEAD(&con->outqueue);
Jon Maloydf79d042018-02-15 10:40:44 +0100253 INIT_LIST_HEAD(&con->sub_list);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400254 spin_lock_init(&con->outqueue_lock);
Jon Maloydf79d042018-02-15 10:40:44 +0100255 spin_lock_init(&con->sub_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400256 INIT_WORK(&con->swork, tipc_send_work);
257 INIT_WORK(&con->rwork, tipc_recv_work);
258
259 spin_lock_bh(&s->idr_lock);
260 ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
261 if (ret < 0) {
262 kfree(con);
263 spin_unlock_bh(&s->idr_lock);
264 return ERR_PTR(-ENOMEM);
265 }
266 con->conid = ret;
267 s->idr_in_use++;
268 spin_unlock_bh(&s->idr_lock);
269
270 set_bit(CF_CONNECTED, &con->flags);
271 con->server = s;
272
273 return con;
274}
275
Jon Maloydf79d042018-02-15 10:40:44 +0100276int tipc_con_rcv_sub(struct net *net, int conid, struct tipc_conn *con,
277 void *buf, size_t len)
278{
279 struct tipc_subscr *s = (struct tipc_subscr *)buf;
280 struct tipc_subscription *sub;
281 bool status;
282 int swap;
283
284 /* Determine subscriber's endianness */
285 swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE |
286 TIPC_SUB_CANCEL));
287
288 /* Detect & process a subscription cancellation request */
289 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
290 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
291 tipc_con_delete_sub(con, s);
292 return 0;
293 }
294 status = !(s->filter & htohl(TIPC_SUB_NO_STATUS, swap));
295 sub = tipc_subscrp_subscribe(net, s, conid, swap, status);
296 if (!sub)
297 return -1;
298
299 spin_lock_bh(&con->sub_lock);
300 list_add(&sub->subscrp_list, &con->sub_list);
301 spin_unlock_bh(&con->sub_lock);
302 return 0;
303}
304
Ying Xuec5fa7b32013-06-17 10:54:39 -0400305static int tipc_receive_from_sock(struct tipc_conn *con)
306{
Ying Xuec5fa7b32013-06-17 10:54:39 -0400307 struct tipc_server *s = con->server;
Jon Maloye88f2be2018-01-15 17:56:28 +0100308 struct sock *sk = con->sock->sk;
Jon Maloye88f2be2018-01-15 17:56:28 +0100309 struct msghdr msg = {};
Ying Xuec5fa7b32013-06-17 10:54:39 -0400310 struct kvec iov;
311 void *buf;
312 int ret;
313
314 buf = kmem_cache_alloc(s->rcvbuf_cache, GFP_ATOMIC);
315 if (!buf) {
316 ret = -ENOMEM;
317 goto out_close;
318 }
319
320 iov.iov_base = buf;
321 iov.iov_len = s->max_rcvbuf_size;
Jon Maloyc901d262018-02-15 10:40:43 +0100322 msg.msg_name = NULL;
Al Virobc480272017-09-20 22:08:04 -0400323 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len);
324 ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400325 if (ret <= 0) {
326 kmem_cache_free(s->rcvbuf_cache, buf);
327 goto out_close;
328 }
329
Jon Maloye88f2be2018-01-15 17:56:28 +0100330 read_lock_bh(&sk->sk_callback_lock);
Jon Maloydf79d042018-02-15 10:40:44 +0100331 ret = tipc_con_rcv_sub(s->net, con->conid, con, buf, ret);
Jon Maloye88f2be2018-01-15 17:56:28 +0100332 read_unlock_bh(&sk->sk_callback_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400333 kmem_cache_free(s->rcvbuf_cache, buf);
Jon Maloye88f2be2018-01-15 17:56:28 +0100334 if (ret < 0)
335 tipc_conn_terminate(s, con->conid);
336 return ret;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400337
338out_close:
339 if (ret != -EWOULDBLOCK)
340 tipc_close_conn(con);
341 else if (ret == 0)
342 /* Don't return success if we really got EOF */
343 ret = -EAGAIN;
344
345 return ret;
346}
347
348static int tipc_accept_from_sock(struct tipc_conn *con)
349{
Ying Xuec5fa7b32013-06-17 10:54:39 -0400350 struct socket *sock = con->sock;
351 struct socket *newsock;
352 struct tipc_conn *newcon;
353 int ret;
354
Ying Xue76100a82015-03-18 09:32:57 +0800355 ret = kernel_accept(sock, &newsock, O_NONBLOCK);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400356 if (ret < 0)
357 return ret;
358
359 newcon = tipc_alloc_conn(con->server);
360 if (IS_ERR(newcon)) {
361 ret = PTR_ERR(newcon);
362 sock_release(newsock);
363 return ret;
364 }
365
366 newcon->rx_action = tipc_receive_from_sock;
367 tipc_register_callbacks(newsock, newcon);
368
Ying Xuec5fa7b32013-06-17 10:54:39 -0400369 /* Wake up receive process in case of 'SYN+' message */
David S. Miller676d2362014-04-11 16:15:36 -0400370 newsock->sk->sk_data_ready(newsock->sk);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400371 return ret;
372}
373
374static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
375{
376 struct tipc_server *s = con->server;
377 struct socket *sock = NULL;
Jon Maloy27469b72018-02-15 10:40:42 +0100378 int imp = TIPC_CRITICAL_IMPORTANCE;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400379 int ret;
380
Ying Xuefa787ae2015-05-13 11:20:38 +0800381 ret = sock_create_kern(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400382 if (ret < 0)
383 return NULL;
384 ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
Jon Maloy27469b72018-02-15 10:40:42 +0100385 (char *)&imp, sizeof(imp));
Ying Xuec5fa7b32013-06-17 10:54:39 -0400386 if (ret < 0)
387 goto create_err;
388 ret = kernel_bind(sock, (struct sockaddr *)s->saddr, sizeof(*s->saddr));
389 if (ret < 0)
390 goto create_err;
391
Jon Maloy27469b72018-02-15 10:40:42 +0100392 con->rx_action = tipc_accept_from_sock;
393 ret = kernel_listen(sock, 0);
394 if (ret < 0)
Ying Xuec5fa7b32013-06-17 10:54:39 -0400395 goto create_err;
Ying Xue76100a82015-03-18 09:32:57 +0800396
397 /* As server's listening socket owner and creator is the same module,
398 * we have to decrease TIPC module reference count to guarantee that
399 * it remains zero after the server socket is created, otherwise,
400 * executing "rmmod" command is unable to make TIPC module deleted
401 * after TIPC module is inserted successfully.
402 *
403 * However, the reference count is ever increased twice in
404 * sock_create_kern(): one is to increase the reference count of owner
405 * of TIPC socket's proto_ops struct; another is to increment the
406 * reference count of owner of TIPC proto struct. Therefore, we must
407 * decrement the module reference count twice to ensure that it keeps
408 * zero after server's listening socket is created. Of course, we
409 * must bump the module reference count twice as well before the socket
410 * is closed.
411 */
412 module_put(sock->ops->owner);
413 module_put(sock->sk->sk_prot_creator->owner);
414 set_bit(CF_SERVER, &con->flags);
415
Ying Xuec5fa7b32013-06-17 10:54:39 -0400416 return sock;
417
418create_err:
Ying Xue76100a82015-03-18 09:32:57 +0800419 kernel_sock_shutdown(sock, SHUT_RDWR);
Ying Xuedef81f62015-04-23 09:37:38 -0400420 sock_release(sock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400421 return NULL;
422}
423
424static int tipc_open_listening_sock(struct tipc_server *s)
425{
426 struct socket *sock;
427 struct tipc_conn *con;
428
429 con = tipc_alloc_conn(s);
430 if (IS_ERR(con))
431 return PTR_ERR(con);
432
433 sock = tipc_create_listen_sock(con);
Ying Xuec756891a2013-08-01 08:29:18 -0400434 if (!sock) {
435 idr_remove(&s->conn_idr, con->conid);
436 s->idr_in_use--;
437 kfree(con);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400438 return -EINVAL;
Ying Xuec756891a2013-08-01 08:29:18 -0400439 }
Ying Xuec5fa7b32013-06-17 10:54:39 -0400440
441 tipc_register_callbacks(sock, con);
442 return 0;
443}
444
445static struct outqueue_entry *tipc_alloc_entry(void *data, int len)
446{
447 struct outqueue_entry *entry;
448 void *buf;
449
450 entry = kmalloc(sizeof(struct outqueue_entry), GFP_ATOMIC);
451 if (!entry)
452 return NULL;
453
Amitoj Kaur Chawla810bf112016-06-23 10:19:37 +0530454 buf = kmemdup(data, len, GFP_ATOMIC);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400455 if (!buf) {
456 kfree(entry);
457 return NULL;
458 }
459
Ying Xuec5fa7b32013-06-17 10:54:39 -0400460 entry->iov.iov_base = buf;
461 entry->iov.iov_len = len;
462
463 return entry;
464}
465
466static void tipc_free_entry(struct outqueue_entry *e)
467{
468 kfree(e->iov.iov_base);
469 kfree(e);
470}
471
472static void tipc_clean_outqueues(struct tipc_conn *con)
473{
474 struct outqueue_entry *e, *safe;
475
476 spin_lock_bh(&con->outqueue_lock);
477 list_for_each_entry_safe(e, safe, &con->outqueue, list) {
478 list_del(&e->list);
479 tipc_free_entry(e);
480 }
481 spin_unlock_bh(&con->outqueue_lock);
482}
483
484int tipc_conn_sendmsg(struct tipc_server *s, int conid,
Jon Maloydf79d042018-02-15 10:40:44 +0100485 u32 evt, void *data, size_t len)
Ying Xuec5fa7b32013-06-17 10:54:39 -0400486{
487 struct outqueue_entry *e;
488 struct tipc_conn *con;
489
490 con = tipc_conn_lookup(s, conid);
491 if (!con)
492 return -EINVAL;
493
Jon Maloydf79d042018-02-15 10:40:44 +0100494 if (!connected(con)) {
Parthasarathy Bhuvaragan4c887aa2017-01-24 13:00:47 +0100495 conn_put(con);
496 return 0;
497 }
498
Ying Xuec5fa7b32013-06-17 10:54:39 -0400499 e = tipc_alloc_entry(data, len);
500 if (!e) {
501 conn_put(con);
502 return -ENOMEM;
503 }
Jon Maloydf79d042018-02-15 10:40:44 +0100504 e->evt = evt;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400505 spin_lock_bh(&con->outqueue_lock);
506 list_add_tail(&e->list, &con->outqueue);
507 spin_unlock_bh(&con->outqueue_lock);
508
Parthasarathy Bhuvaragan4c887aa2017-01-24 13:00:47 +0100509 if (!queue_work(s->send_wq, &con->swork))
Ying Xue4652edb2014-03-06 14:40:17 +0100510 conn_put(con);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400511 return 0;
512}
513
514void tipc_conn_terminate(struct tipc_server *s, int conid)
515{
516 struct tipc_conn *con;
517
518 con = tipc_conn_lookup(s, conid);
519 if (con) {
520 tipc_close_conn(con);
521 conn_put(con);
522 }
523}
524
Jon Maloy232d07b2018-01-08 21:03:30 +0100525bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
526 u32 upper, u32 filter, int *conid)
Jon Maloy14c04492017-10-13 11:04:17 +0200527{
Jon Maloy14c04492017-10-13 11:04:17 +0200528 struct tipc_subscr sub;
Jon Maloy14c04492017-10-13 11:04:17 +0200529 struct tipc_conn *con;
Jon Maloydf79d042018-02-15 10:40:44 +0100530 int rc;
Jon Maloy14c04492017-10-13 11:04:17 +0200531
532 sub.seq.type = type;
533 sub.seq.lower = lower;
534 sub.seq.upper = upper;
535 sub.timeout = TIPC_WAIT_FOREVER;
Jon Maloy83485002018-01-08 21:03:29 +0100536 sub.filter = filter;
Jon Maloy14c04492017-10-13 11:04:17 +0200537 *(u32 *)&sub.usr_handle = port;
538
539 con = tipc_alloc_conn(tipc_topsrv(net));
Dan Carpenterc75e4272017-10-18 10:48:25 +0300540 if (IS_ERR(con))
Jon Maloy14c04492017-10-13 11:04:17 +0200541 return false;
542
543 *conid = con->conid;
Jon Maloy14c04492017-10-13 11:04:17 +0200544 con->sock = NULL;
Jon Maloydf79d042018-02-15 10:40:44 +0100545 rc = tipc_con_rcv_sub(net, *conid, con, &sub, sizeof(sub));
546 if (rc < 0)
547 tipc_close_conn(con);
548 return !rc;
Jon Maloy14c04492017-10-13 11:04:17 +0200549}
550
551void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
552{
553 struct tipc_conn *con;
554
555 con = tipc_conn_lookup(tipc_topsrv(net), conid);
556 if (!con)
557 return;
Jon Maloye88f2be2018-01-15 17:56:28 +0100558
559 test_and_clear_bit(CF_CONNECTED, &con->flags);
Jon Maloydf79d042018-02-15 10:40:44 +0100560 tipc_con_delete_sub(con, NULL);
Jon Maloye88f2be2018-01-15 17:56:28 +0100561 conn_put(con);
Jon Maloy14c04492017-10-13 11:04:17 +0200562 conn_put(con);
563}
564
565static void tipc_send_kern_top_evt(struct net *net, struct tipc_event *evt)
566{
567 u32 port = *(u32 *)&evt->s.usr_handle;
568 u32 self = tipc_own_addr(net);
569 struct sk_buff_head evtq;
570 struct sk_buff *skb;
571
572 skb = tipc_msg_create(TOP_SRV, 0, INT_H_SIZE, sizeof(*evt),
573 self, self, port, port, 0);
574 if (!skb)
575 return;
576 msg_set_dest_droppable(buf_msg(skb), true);
577 memcpy(msg_data(buf_msg(skb)), evt, sizeof(*evt));
578 skb_queue_head_init(&evtq);
579 __skb_queue_tail(&evtq, skb);
580 tipc_sk_rcv(net, &evtq);
581}
582
Ying Xuec5fa7b32013-06-17 10:54:39 -0400583static void tipc_send_to_sock(struct tipc_conn *con)
584{
Jon Maloydf79d042018-02-15 10:40:44 +0100585 struct list_head *queue = &con->outqueue;
586 struct tipc_server *srv = con->server;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400587 struct outqueue_entry *e;
Jon Maloy14c04492017-10-13 11:04:17 +0200588 struct tipc_event *evt;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400589 struct msghdr msg;
Jon Maloy14c04492017-10-13 11:04:17 +0200590 int count = 0;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400591 int ret;
592
593 spin_lock_bh(&con->outqueue_lock);
Jon Maloydf79d042018-02-15 10:40:44 +0100594
595 while (!list_empty(queue)) {
596 e = list_first_entry(queue, struct outqueue_entry, list);
Jon Maloy14c04492017-10-13 11:04:17 +0200597
Ying Xuec5fa7b32013-06-17 10:54:39 -0400598 spin_unlock_bh(&con->outqueue_lock);
599
Jon Maloydf79d042018-02-15 10:40:44 +0100600 if (e->evt == TIPC_SUBSCR_TIMEOUT) {
601 evt = (struct tipc_event *)e->iov.iov_base;
602 tipc_con_delete_sub(con, &evt->s);
603 }
604 memset(&msg, 0, sizeof(msg));
605 msg.msg_flags = MSG_DONTWAIT;
606
Jon Maloy14c04492017-10-13 11:04:17 +0200607 if (con->sock) {
Jon Maloy14c04492017-10-13 11:04:17 +0200608 ret = kernel_sendmsg(con->sock, &msg, &e->iov, 1,
609 e->iov.iov_len);
610 if (ret == -EWOULDBLOCK || ret == 0) {
611 cond_resched();
612 goto out;
613 } else if (ret < 0) {
614 goto send_err;
615 }
616 } else {
617 evt = e->iov.iov_base;
Jon Maloydf79d042018-02-15 10:40:44 +0100618 tipc_send_kern_top_evt(srv->net, evt);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400619 }
Jon Maloy27469b72018-02-15 10:40:42 +0100620
Ying Xuec5fa7b32013-06-17 10:54:39 -0400621 /* Don't starve users filling buffers */
622 if (++count >= MAX_SEND_MSG_COUNT) {
623 cond_resched();
624 count = 0;
625 }
Ying Xuec5fa7b32013-06-17 10:54:39 -0400626 spin_lock_bh(&con->outqueue_lock);
627 list_del(&e->list);
628 tipc_free_entry(e);
629 }
630 spin_unlock_bh(&con->outqueue_lock);
631out:
632 return;
633
634send_err:
635 tipc_close_conn(con);
636}
637
638static void tipc_recv_work(struct work_struct *work)
639{
640 struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
641 int count = 0;
642
Jon Maloydf79d042018-02-15 10:40:44 +0100643 while (connected(con)) {
Ying Xuec5fa7b32013-06-17 10:54:39 -0400644 if (con->rx_action(con))
645 break;
646
647 /* Don't flood Rx machine */
648 if (++count >= MAX_RECV_MSG_COUNT) {
649 cond_resched();
650 count = 0;
651 }
652 }
653 conn_put(con);
654}
655
656static void tipc_send_work(struct work_struct *work)
657{
658 struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
659
Jon Maloydf79d042018-02-15 10:40:44 +0100660 if (connected(con))
Ying Xuec5fa7b32013-06-17 10:54:39 -0400661 tipc_send_to_sock(con);
662
663 conn_put(con);
664}
665
666static void tipc_work_stop(struct tipc_server *s)
667{
668 destroy_workqueue(s->rcv_wq);
669 destroy_workqueue(s->send_wq);
670}
671
672static int tipc_work_start(struct tipc_server *s)
673{
Parthasarathy Bhuvaragan06c85812016-02-02 10:52:17 +0100674 s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400675 if (!s->rcv_wq) {
676 pr_err("can't start tipc receive workqueue\n");
677 return -ENOMEM;
678 }
679
Parthasarathy Bhuvaragan06c85812016-02-02 10:52:17 +0100680 s->send_wq = alloc_ordered_workqueue("tipc_send", 0);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400681 if (!s->send_wq) {
682 pr_err("can't start tipc send workqueue\n");
683 destroy_workqueue(s->rcv_wq);
684 return -ENOMEM;
685 }
686
687 return 0;
688}
689
690int tipc_server_start(struct tipc_server *s)
691{
692 int ret;
693
694 spin_lock_init(&s->idr_lock);
695 idr_init(&s->conn_idr);
696 s->idr_in_use = 0;
697
698 s->rcvbuf_cache = kmem_cache_create(s->name, s->max_rcvbuf_size,
699 0, SLAB_HWCACHE_ALIGN, NULL);
700 if (!s->rcvbuf_cache)
701 return -ENOMEM;
702
703 ret = tipc_work_start(s);
704 if (ret < 0) {
705 kmem_cache_destroy(s->rcvbuf_cache);
706 return ret;
707 }
Ying Xuec756891a2013-08-01 08:29:18 -0400708 ret = tipc_open_listening_sock(s);
709 if (ret < 0) {
710 tipc_work_stop(s);
711 kmem_cache_destroy(s->rcvbuf_cache);
712 return ret;
713 }
Ying Xuec756891a2013-08-01 08:29:18 -0400714 return ret;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400715}
716
717void tipc_server_stop(struct tipc_server *s)
718{
719 struct tipc_conn *con;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400720 int id;
721
Ying Xuec5fa7b32013-06-17 10:54:39 -0400722 spin_lock_bh(&s->idr_lock);
Parthasarathy Bhuvaragan35e22e42017-01-24 13:00:48 +0100723 for (id = 0; s->idr_in_use; id++) {
Ying Xuec5fa7b32013-06-17 10:54:39 -0400724 con = idr_find(&s->conn_idr, id);
725 if (con) {
Ying Xuec5fa7b32013-06-17 10:54:39 -0400726 spin_unlock_bh(&s->idr_lock);
727 tipc_close_conn(con);
728 spin_lock_bh(&s->idr_lock);
729 }
730 }
731 spin_unlock_bh(&s->idr_lock);
732
733 tipc_work_stop(s);
734 kmem_cache_destroy(s->rcvbuf_cache);
735 idr_destroy(&s->conn_idr);
736}