blob: 2cc5ced1cec942cb8eeec14071216dd2e4fec25d [file] [log] [blame]
Courtney Cavinbdabad32016-05-06 07:09:08 -07001/*
2 * Copyright (c) 2015, Sony Mobile Communications Inc.
3 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/module.h>
15#include <linux/netlink.h>
16#include <linux/qrtr.h>
17#include <linux/termios.h> /* For TIOCINQ/OUTQ */
18
19#include <net/sock.h>
20
21#include "qrtr.h"
22
23#define QRTR_PROTO_VER 1
24
25/* auto-bind range */
26#define QRTR_MIN_EPH_SOCKET 0x4000
27#define QRTR_MAX_EPH_SOCKET 0x7fff
28
29enum qrtr_pkt_type {
30 QRTR_TYPE_DATA = 1,
31 QRTR_TYPE_HELLO = 2,
32 QRTR_TYPE_BYE = 3,
33 QRTR_TYPE_NEW_SERVER = 4,
34 QRTR_TYPE_DEL_SERVER = 5,
35 QRTR_TYPE_DEL_CLIENT = 6,
36 QRTR_TYPE_RESUME_TX = 7,
37 QRTR_TYPE_EXIT = 8,
38 QRTR_TYPE_PING = 9,
39};
40
41/**
42 * struct qrtr_hdr - (I|R)PCrouter packet header
43 * @version: protocol version
44 * @type: packet type; one of QRTR_TYPE_*
45 * @src_node_id: source node
46 * @src_port_id: source port
47 * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
48 * @size: length of packet, excluding this header
49 * @dst_node_id: destination node
50 * @dst_port_id: destination port
51 */
52struct qrtr_hdr {
53 __le32 version;
54 __le32 type;
55 __le32 src_node_id;
56 __le32 src_port_id;
57 __le32 confirm_rx;
58 __le32 size;
59 __le32 dst_node_id;
60 __le32 dst_port_id;
61} __packed;
62
63#define QRTR_HDR_SIZE sizeof(struct qrtr_hdr)
64#define QRTR_NODE_BCAST ((unsigned int)-1)
65#define QRTR_PORT_CTRL ((unsigned int)-2)
66
67struct qrtr_sock {
68 /* WARNING: sk must be the first member */
69 struct sock sk;
70 struct sockaddr_qrtr us;
71 struct sockaddr_qrtr peer;
72};
73
74static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
75{
76 BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
77 return container_of(sk, struct qrtr_sock, sk);
78}
79
80static unsigned int qrtr_local_nid = -1;
81
82/* for node ids */
83static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
84/* broadcast list */
85static LIST_HEAD(qrtr_all_nodes);
86/* lock for qrtr_nodes, qrtr_all_nodes and node reference */
87static DEFINE_MUTEX(qrtr_node_lock);
88
89/* local port allocation management */
90static DEFINE_IDR(qrtr_ports);
91static DEFINE_MUTEX(qrtr_port_lock);
92
93/**
94 * struct qrtr_node - endpoint node
95 * @ep_lock: lock for endpoint management and callbacks
96 * @ep: endpoint
97 * @ref: reference count for node
98 * @nid: node id
99 * @rx_queue: receive queue
100 * @work: scheduled work struct for recv work
101 * @item: list item for broadcast list
102 */
103struct qrtr_node {
104 struct mutex ep_lock;
105 struct qrtr_endpoint *ep;
106 struct kref ref;
107 unsigned int nid;
108
109 struct sk_buff_head rx_queue;
110 struct work_struct work;
111 struct list_head item;
112};
113
114/* Release node resources and free the node.
115 *
116 * Do not call directly, use qrtr_node_release. To be used with
117 * kref_put_mutex. As such, the node mutex is expected to be locked on call.
118 */
119static void __qrtr_node_release(struct kref *kref)
120{
121 struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
122
123 if (node->nid != QRTR_EP_NID_AUTO)
124 radix_tree_delete(&qrtr_nodes, node->nid);
125
126 list_del(&node->item);
127 mutex_unlock(&qrtr_node_lock);
128
Bjorn Andersson68b042d2019-09-18 10:21:17 -0700129 cancel_work_sync(&node->work);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700130 skb_queue_purge(&node->rx_queue);
131 kfree(node);
132}
133
134/* Increment reference to node. */
135static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
136{
137 if (node)
138 kref_get(&node->ref);
139 return node;
140}
141
142/* Decrement reference to node and release as necessary. */
143static void qrtr_node_release(struct qrtr_node *node)
144{
145 if (!node)
146 return;
147 kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
148}
149
150/* Pass an outgoing packet socket buffer to the endpoint driver. */
151static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb)
152{
153 int rc = -ENODEV;
154
155 mutex_lock(&node->ep_lock);
156 if (node->ep)
157 rc = node->ep->xmit(node->ep, skb);
158 else
159 kfree_skb(skb);
160 mutex_unlock(&node->ep_lock);
161
162 return rc;
163}
164
165/* Lookup node by id.
166 *
167 * callers must release with qrtr_node_release()
168 */
169static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
170{
171 struct qrtr_node *node;
172
173 mutex_lock(&qrtr_node_lock);
174 node = radix_tree_lookup(&qrtr_nodes, nid);
175 node = qrtr_node_acquire(node);
176 mutex_unlock(&qrtr_node_lock);
177
178 return node;
179}
180
181/* Assign node id to node.
182 *
183 * This is mostly useful for automatic node id assignment, based on
184 * the source id in the incoming packet.
185 */
186static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
187{
188 if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
189 return;
190
191 mutex_lock(&qrtr_node_lock);
192 radix_tree_insert(&qrtr_nodes, nid, node);
193 node->nid = nid;
194 mutex_unlock(&qrtr_node_lock);
195}
196
197/**
198 * qrtr_endpoint_post() - post incoming data
199 * @ep: endpoint handle
200 * @data: data pointer
201 * @len: size of data in bytes
202 *
203 * Return: 0 on success; negative error code on failure
204 */
205int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
206{
207 struct qrtr_node *node = ep->node;
208 const struct qrtr_hdr *phdr = data;
209 struct sk_buff *skb;
210 unsigned int psize;
211 unsigned int size;
212 unsigned int type;
213 unsigned int ver;
214 unsigned int dst;
215
216 if (len < QRTR_HDR_SIZE || len & 3)
217 return -EINVAL;
218
219 ver = le32_to_cpu(phdr->version);
220 size = le32_to_cpu(phdr->size);
221 type = le32_to_cpu(phdr->type);
222 dst = le32_to_cpu(phdr->dst_port_id);
223
224 psize = (size + 3) & ~3;
225
226 if (ver != QRTR_PROTO_VER)
227 return -EINVAL;
228
229 if (len != psize + QRTR_HDR_SIZE)
230 return -EINVAL;
231
232 if (dst != QRTR_PORT_CTRL && type != QRTR_TYPE_DATA)
233 return -EINVAL;
234
Pavel Skripkin18905242021-03-01 02:22:40 +0300235 skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700236 if (!skb)
237 return -ENOMEM;
238
239 skb_reset_transport_header(skb);
240 memcpy(skb_put(skb, len), data, len);
241
242 skb_queue_tail(&node->rx_queue, skb);
243 schedule_work(&node->work);
244
245 return 0;
246}
247EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
248
249/* Allocate and construct a resume-tx packet. */
250static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
251 u32 dst_node, u32 port)
252{
253 const int pkt_len = 20;
254 struct qrtr_hdr *hdr;
255 struct sk_buff *skb;
Stephen Boyd71ab8622017-01-09 14:31:58 -0800256 __le32 *buf;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700257
258 skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
259 if (!skb)
260 return NULL;
261 skb_reset_transport_header(skb);
262
263 hdr = (struct qrtr_hdr *)skb_put(skb, QRTR_HDR_SIZE);
264 hdr->version = cpu_to_le32(QRTR_PROTO_VER);
265 hdr->type = cpu_to_le32(QRTR_TYPE_RESUME_TX);
266 hdr->src_node_id = cpu_to_le32(src_node);
267 hdr->src_port_id = cpu_to_le32(QRTR_PORT_CTRL);
268 hdr->confirm_rx = cpu_to_le32(0);
269 hdr->size = cpu_to_le32(pkt_len);
270 hdr->dst_node_id = cpu_to_le32(dst_node);
271 hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
272
Stephen Boyd71ab8622017-01-09 14:31:58 -0800273 buf = (__le32 *)skb_put(skb, pkt_len);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700274 memset(buf, 0, pkt_len);
275 buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
276 buf[1] = cpu_to_le32(src_node);
277 buf[2] = cpu_to_le32(port);
278
279 return skb;
280}
281
282static struct qrtr_sock *qrtr_port_lookup(int port);
283static void qrtr_port_put(struct qrtr_sock *ipc);
284
285/* Handle and route a received packet.
286 *
287 * This will auto-reply with resume-tx packet as necessary.
288 */
289static void qrtr_node_rx_work(struct work_struct *work)
290{
291 struct qrtr_node *node = container_of(work, struct qrtr_node, work);
292 struct sk_buff *skb;
293
294 while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
295 const struct qrtr_hdr *phdr;
296 u32 dst_node, dst_port;
297 struct qrtr_sock *ipc;
298 u32 src_node;
299 int confirm;
300
301 phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
302 src_node = le32_to_cpu(phdr->src_node_id);
303 dst_node = le32_to_cpu(phdr->dst_node_id);
304 dst_port = le32_to_cpu(phdr->dst_port_id);
305 confirm = !!phdr->confirm_rx;
306
307 qrtr_node_assign(node, src_node);
308
309 ipc = qrtr_port_lookup(dst_port);
310 if (!ipc) {
311 kfree_skb(skb);
312 } else {
313 if (sock_queue_rcv_skb(&ipc->sk, skb))
314 kfree_skb(skb);
315
316 qrtr_port_put(ipc);
317 }
318
319 if (confirm) {
320 skb = qrtr_alloc_resume_tx(dst_node, node->nid, dst_port);
321 if (!skb)
322 break;
323 if (qrtr_node_enqueue(node, skb))
324 break;
325 }
326 }
327}
328
329/**
330 * qrtr_endpoint_register() - register a new endpoint
331 * @ep: endpoint to register
332 * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
333 * Return: 0 on success; negative error code on failure
334 *
335 * The specified endpoint must have the xmit function pointer set on call.
336 */
337int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
338{
339 struct qrtr_node *node;
340
341 if (!ep || !ep->xmit)
342 return -EINVAL;
343
344 node = kzalloc(sizeof(*node), GFP_KERNEL);
345 if (!node)
346 return -ENOMEM;
347
348 INIT_WORK(&node->work, qrtr_node_rx_work);
349 kref_init(&node->ref);
350 mutex_init(&node->ep_lock);
351 skb_queue_head_init(&node->rx_queue);
352 node->nid = QRTR_EP_NID_AUTO;
353 node->ep = ep;
354
355 qrtr_node_assign(node, nid);
356
357 mutex_lock(&qrtr_node_lock);
358 list_add(&node->item, &qrtr_all_nodes);
359 mutex_unlock(&qrtr_node_lock);
360 ep->node = node;
361
362 return 0;
363}
364EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
365
366/**
367 * qrtr_endpoint_unregister - unregister endpoint
368 * @ep: endpoint to unregister
369 */
370void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
371{
372 struct qrtr_node *node = ep->node;
373
374 mutex_lock(&node->ep_lock);
375 node->ep = NULL;
376 mutex_unlock(&node->ep_lock);
377
378 qrtr_node_release(node);
379 ep->node = NULL;
380}
381EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
382
383/* Lookup socket by port.
384 *
385 * Callers must release with qrtr_port_put()
386 */
387static struct qrtr_sock *qrtr_port_lookup(int port)
388{
389 struct qrtr_sock *ipc;
390
391 if (port == QRTR_PORT_CTRL)
392 port = 0;
393
394 mutex_lock(&qrtr_port_lock);
395 ipc = idr_find(&qrtr_ports, port);
396 if (ipc)
397 sock_hold(&ipc->sk);
398 mutex_unlock(&qrtr_port_lock);
399
400 return ipc;
401}
402
403/* Release acquired socket. */
404static void qrtr_port_put(struct qrtr_sock *ipc)
405{
406 sock_put(&ipc->sk);
407}
408
409/* Remove port assignment. */
410static void qrtr_port_remove(struct qrtr_sock *ipc)
411{
412 int port = ipc->us.sq_port;
413
414 if (port == QRTR_PORT_CTRL)
415 port = 0;
416
417 __sock_put(&ipc->sk);
418
419 mutex_lock(&qrtr_port_lock);
420 idr_remove(&qrtr_ports, port);
421 mutex_unlock(&qrtr_port_lock);
422}
423
424/* Assign port number to socket.
425 *
426 * Specify port in the integer pointed to by port, and it will be adjusted
427 * on return as necesssary.
428 *
429 * Port may be:
430 * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
431 * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
432 * >QRTR_MIN_EPH_SOCKET: Specified; available to all
433 */
434static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
435{
436 int rc;
437
438 mutex_lock(&qrtr_port_lock);
439 if (!*port) {
440 rc = idr_alloc(&qrtr_ports, ipc,
441 QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
442 GFP_ATOMIC);
443 if (rc >= 0)
444 *port = rc;
445 } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
446 rc = -EACCES;
447 } else if (*port == QRTR_PORT_CTRL) {
448 rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
449 } else {
450 rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
451 if (rc >= 0)
452 *port = rc;
453 }
454 mutex_unlock(&qrtr_port_lock);
455
456 if (rc == -ENOSPC)
457 return -EADDRINUSE;
458 else if (rc < 0)
459 return rc;
460
461 sock_hold(&ipc->sk);
462
463 return 0;
464}
465
466/* Bind socket to address.
467 *
468 * Socket should be locked upon call.
469 */
470static int __qrtr_bind(struct socket *sock,
471 const struct sockaddr_qrtr *addr, int zapped)
472{
473 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
474 struct sock *sk = sock->sk;
475 int port;
476 int rc;
477
478 /* rebinding ok */
479 if (!zapped && addr->sq_port == ipc->us.sq_port)
480 return 0;
481
482 port = addr->sq_port;
483 rc = qrtr_port_assign(ipc, &port);
484 if (rc)
485 return rc;
486
487 /* unbind previous, if any */
488 if (!zapped)
489 qrtr_port_remove(ipc);
490 ipc->us.sq_port = port;
491
492 sock_reset_flag(sk, SOCK_ZAPPED);
493
494 return 0;
495}
496
497/* Auto bind to an ephemeral port. */
498static int qrtr_autobind(struct socket *sock)
499{
500 struct sock *sk = sock->sk;
501 struct sockaddr_qrtr addr;
502
503 if (!sock_flag(sk, SOCK_ZAPPED))
504 return 0;
505
506 addr.sq_family = AF_QIPCRTR;
507 addr.sq_node = qrtr_local_nid;
508 addr.sq_port = 0;
509
510 return __qrtr_bind(sock, &addr, 1);
511}
512
513/* Bind socket to specified sockaddr. */
514static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
515{
516 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
517 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
518 struct sock *sk = sock->sk;
519 int rc;
520
521 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
522 return -EINVAL;
523
524 if (addr->sq_node != ipc->us.sq_node)
525 return -EINVAL;
526
527 lock_sock(sk);
528 rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
529 release_sock(sk);
530
531 return rc;
532}
533
534/* Queue packet to local peer socket. */
535static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb)
536{
537 const struct qrtr_hdr *phdr;
538 struct qrtr_sock *ipc;
539
540 phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
541
542 ipc = qrtr_port_lookup(le32_to_cpu(phdr->dst_port_id));
543 if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
544 kfree_skb(skb);
545 return -ENODEV;
546 }
547
548 if (sock_queue_rcv_skb(&ipc->sk, skb)) {
549 qrtr_port_put(ipc);
550 kfree_skb(skb);
551 return -ENOSPC;
552 }
553
554 qrtr_port_put(ipc);
555
556 return 0;
557}
558
559/* Queue packet for broadcast. */
560static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb)
561{
562 struct sk_buff *skbn;
563
564 mutex_lock(&qrtr_node_lock);
565 list_for_each_entry(node, &qrtr_all_nodes, item) {
566 skbn = skb_clone(skb, GFP_KERNEL);
567 if (!skbn)
568 break;
569 skb_set_owner_w(skbn, skb->sk);
570 qrtr_node_enqueue(node, skbn);
571 }
572 mutex_unlock(&qrtr_node_lock);
573
Manivannan Sadhasivam4b55bd32020-05-19 23:44:16 +0530574 qrtr_local_enqueue(NULL, skb);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700575
576 return 0;
577}
578
579static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
580{
581 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
582 int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *);
583 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
584 struct sock *sk = sock->sk;
585 struct qrtr_node *node;
586 struct qrtr_hdr *hdr;
587 struct sk_buff *skb;
588 size_t plen;
589 int rc;
590
591 if (msg->msg_flags & ~(MSG_DONTWAIT))
592 return -EINVAL;
593
594 if (len > 65535)
595 return -EMSGSIZE;
596
597 lock_sock(sk);
598
599 if (addr) {
600 if (msg->msg_namelen < sizeof(*addr)) {
601 release_sock(sk);
602 return -EINVAL;
603 }
604
605 if (addr->sq_family != AF_QIPCRTR) {
606 release_sock(sk);
607 return -EINVAL;
608 }
609
610 rc = qrtr_autobind(sock);
611 if (rc) {
612 release_sock(sk);
613 return rc;
614 }
615 } else if (sk->sk_state == TCP_ESTABLISHED) {
616 addr = &ipc->peer;
617 } else {
618 release_sock(sk);
619 return -ENOTCONN;
620 }
621
622 node = NULL;
623 if (addr->sq_node == QRTR_NODE_BCAST) {
Wang Wenhu0f838a42020-04-08 19:53:53 -0700624 if (addr->sq_port != QRTR_PORT_CTRL &&
625 qrtr_local_nid != QRTR_NODE_BCAST) {
Arun Kumar Neelakantam865c4f92018-07-04 19:49:32 +0530626 release_sock(sk);
627 return -ENOTCONN;
628 }
Wang Wenhu0f838a42020-04-08 19:53:53 -0700629 enqueue_fn = qrtr_bcast_enqueue;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700630 } else if (addr->sq_node == ipc->us.sq_node) {
631 enqueue_fn = qrtr_local_enqueue;
632 } else {
Courtney Cavinbdabad32016-05-06 07:09:08 -0700633 node = qrtr_node_lookup(addr->sq_node);
634 if (!node) {
635 release_sock(sk);
636 return -ECONNRESET;
637 }
Wang Wenhu0f838a42020-04-08 19:53:53 -0700638 enqueue_fn = qrtr_node_enqueue;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700639 }
640
641 plen = (len + 3) & ~3;
642 skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_SIZE,
643 msg->msg_flags & MSG_DONTWAIT, &rc);
644 if (!skb)
645 goto out_node;
646
647 skb_reset_transport_header(skb);
648 skb_put(skb, len + QRTR_HDR_SIZE);
649
650 hdr = (struct qrtr_hdr *)skb_transport_header(skb);
651 hdr->version = cpu_to_le32(QRTR_PROTO_VER);
652 hdr->src_node_id = cpu_to_le32(ipc->us.sq_node);
653 hdr->src_port_id = cpu_to_le32(ipc->us.sq_port);
654 hdr->confirm_rx = cpu_to_le32(0);
655 hdr->size = cpu_to_le32(len);
656 hdr->dst_node_id = cpu_to_le32(addr->sq_node);
657 hdr->dst_port_id = cpu_to_le32(addr->sq_port);
658
659 rc = skb_copy_datagram_from_iter(skb, QRTR_HDR_SIZE,
660 &msg->msg_iter, len);
661 if (rc) {
662 kfree_skb(skb);
663 goto out_node;
664 }
665
666 if (plen != len) {
667 skb_pad(skb, plen - len);
668 skb_put(skb, plen - len);
669 }
670
671 if (ipc->us.sq_port == QRTR_PORT_CTRL) {
672 if (len < 4) {
673 rc = -EINVAL;
674 kfree_skb(skb);
675 goto out_node;
676 }
677
678 /* control messages already require the type as 'command' */
679 skb_copy_bits(skb, QRTR_HDR_SIZE, &hdr->type, 4);
680 } else {
681 hdr->type = cpu_to_le32(QRTR_TYPE_DATA);
682 }
683
684 rc = enqueue_fn(node, skb);
685 if (rc >= 0)
686 rc = len;
687
688out_node:
689 qrtr_node_release(node);
690 release_sock(sk);
691
692 return rc;
693}
694
695static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
696 size_t size, int flags)
697{
698 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
699 const struct qrtr_hdr *phdr;
700 struct sock *sk = sock->sk;
701 struct sk_buff *skb;
702 int copied, rc;
703
704 lock_sock(sk);
705
706 if (sock_flag(sk, SOCK_ZAPPED)) {
707 release_sock(sk);
708 return -EADDRNOTAVAIL;
709 }
710
711 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
712 flags & MSG_DONTWAIT, &rc);
713 if (!skb) {
714 release_sock(sk);
715 return rc;
716 }
717
718 phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
719 copied = le32_to_cpu(phdr->size);
720 if (copied > size) {
721 copied = size;
722 msg->msg_flags |= MSG_TRUNC;
723 }
724
725 rc = skb_copy_datagram_msg(skb, QRTR_HDR_SIZE, msg, copied);
726 if (rc < 0)
727 goto out;
728 rc = copied;
729
730 if (addr) {
Eric Dumazetab29b022021-03-12 08:59:48 -0800731 /* There is an anonymous 2-byte hole after sq_family,
732 * make sure to clear it.
733 */
734 memset(addr, 0, sizeof(*addr));
735
Courtney Cavinbdabad32016-05-06 07:09:08 -0700736 addr->sq_family = AF_QIPCRTR;
737 addr->sq_node = le32_to_cpu(phdr->src_node_id);
738 addr->sq_port = le32_to_cpu(phdr->src_port_id);
739 msg->msg_namelen = sizeof(*addr);
740 }
741
742out:
743 skb_free_datagram(sk, skb);
744 release_sock(sk);
745
746 return rc;
747}
748
749static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
750 int len, int flags)
751{
752 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
753 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
754 struct sock *sk = sock->sk;
755 int rc;
756
757 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
758 return -EINVAL;
759
760 lock_sock(sk);
761
762 sk->sk_state = TCP_CLOSE;
763 sock->state = SS_UNCONNECTED;
764
765 rc = qrtr_autobind(sock);
766 if (rc) {
767 release_sock(sk);
768 return rc;
769 }
770
771 ipc->peer = *addr;
772 sock->state = SS_CONNECTED;
773 sk->sk_state = TCP_ESTABLISHED;
774
775 release_sock(sk);
776
777 return 0;
778}
779
780static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
781 int *len, int peer)
782{
783 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
784 struct sockaddr_qrtr qaddr;
785 struct sock *sk = sock->sk;
786
787 lock_sock(sk);
788 if (peer) {
789 if (sk->sk_state != TCP_ESTABLISHED) {
790 release_sock(sk);
791 return -ENOTCONN;
792 }
793
794 qaddr = ipc->peer;
795 } else {
796 qaddr = ipc->us;
797 }
798 release_sock(sk);
799
800 *len = sizeof(qaddr);
801 qaddr.sq_family = AF_QIPCRTR;
802
803 memcpy(saddr, &qaddr, sizeof(qaddr));
804
805 return 0;
806}
807
808static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
809{
810 void __user *argp = (void __user *)arg;
811 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
812 struct sock *sk = sock->sk;
813 struct sockaddr_qrtr *sq;
814 struct sk_buff *skb;
815 struct ifreq ifr;
816 long len = 0;
817 int rc = 0;
818
819 lock_sock(sk);
820
821 switch (cmd) {
822 case TIOCOUTQ:
823 len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
824 if (len < 0)
825 len = 0;
826 rc = put_user(len, (int __user *)argp);
827 break;
828 case TIOCINQ:
829 skb = skb_peek(&sk->sk_receive_queue);
830 if (skb)
831 len = skb->len - QRTR_HDR_SIZE;
832 rc = put_user(len, (int __user *)argp);
833 break;
834 case SIOCGIFADDR:
835 if (copy_from_user(&ifr, argp, sizeof(ifr))) {
836 rc = -EFAULT;
837 break;
838 }
839
840 sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
841 *sq = ipc->us;
842 if (copy_to_user(argp, &ifr, sizeof(ifr))) {
843 rc = -EFAULT;
844 break;
845 }
846 break;
847 case SIOCGSTAMP:
848 rc = sock_get_timestamp(sk, argp);
849 break;
850 case SIOCADDRT:
851 case SIOCDELRT:
852 case SIOCSIFADDR:
853 case SIOCGIFDSTADDR:
854 case SIOCSIFDSTADDR:
855 case SIOCGIFBRDADDR:
856 case SIOCSIFBRDADDR:
857 case SIOCGIFNETMASK:
858 case SIOCSIFNETMASK:
859 rc = -EINVAL;
860 break;
861 default:
862 rc = -ENOIOCTLCMD;
863 break;
864 }
865
866 release_sock(sk);
867
868 return rc;
869}
870
871static int qrtr_release(struct socket *sock)
872{
873 struct sock *sk = sock->sk;
874 struct qrtr_sock *ipc;
875
876 if (!sk)
877 return 0;
878
879 lock_sock(sk);
880
881 ipc = qrtr_sk(sk);
882 sk->sk_shutdown = SHUTDOWN_MASK;
883 if (!sock_flag(sk, SOCK_DEAD))
884 sk->sk_state_change(sk);
885
886 sock_set_flag(sk, SOCK_DEAD);
887 sock->sk = NULL;
888
889 if (!sock_flag(sk, SOCK_ZAPPED))
890 qrtr_port_remove(ipc);
891
892 skb_queue_purge(&sk->sk_receive_queue);
893
894 release_sock(sk);
895 sock_put(sk);
896
897 return 0;
898}
899
900static const struct proto_ops qrtr_proto_ops = {
901 .owner = THIS_MODULE,
902 .family = AF_QIPCRTR,
903 .bind = qrtr_bind,
904 .connect = qrtr_connect,
905 .socketpair = sock_no_socketpair,
906 .accept = sock_no_accept,
907 .listen = sock_no_listen,
908 .sendmsg = qrtr_sendmsg,
909 .recvmsg = qrtr_recvmsg,
910 .getname = qrtr_getname,
911 .ioctl = qrtr_ioctl,
912 .poll = datagram_poll,
913 .shutdown = sock_no_shutdown,
914 .setsockopt = sock_no_setsockopt,
915 .getsockopt = sock_no_getsockopt,
916 .release = qrtr_release,
917 .mmap = sock_no_mmap,
918 .sendpage = sock_no_sendpage,
919};
920
921static struct proto qrtr_proto = {
922 .name = "QIPCRTR",
923 .owner = THIS_MODULE,
924 .obj_size = sizeof(struct qrtr_sock),
925};
926
927static int qrtr_create(struct net *net, struct socket *sock,
928 int protocol, int kern)
929{
930 struct qrtr_sock *ipc;
931 struct sock *sk;
932
933 if (sock->type != SOCK_DGRAM)
934 return -EPROTOTYPE;
935
936 sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
937 if (!sk)
938 return -ENOMEM;
939
940 sock_set_flag(sk, SOCK_ZAPPED);
941
942 sock_init_data(sock, sk);
943 sock->ops = &qrtr_proto_ops;
944
945 ipc = qrtr_sk(sk);
946 ipc->us.sq_family = AF_QIPCRTR;
947 ipc->us.sq_node = qrtr_local_nid;
948 ipc->us.sq_port = 0;
949
950 return 0;
951}
952
953static const struct nla_policy qrtr_policy[IFA_MAX + 1] = {
954 [IFA_LOCAL] = { .type = NLA_U32 },
955};
956
957static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
958{
959 struct nlattr *tb[IFA_MAX + 1];
960 struct ifaddrmsg *ifm;
961 int rc;
962
963 if (!netlink_capable(skb, CAP_NET_ADMIN))
964 return -EPERM;
965
966 if (!netlink_capable(skb, CAP_SYS_ADMIN))
967 return -EPERM;
968
969 ASSERT_RTNL();
970
971 rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy);
972 if (rc < 0)
973 return rc;
974
975 ifm = nlmsg_data(nlh);
976 if (!tb[IFA_LOCAL])
977 return -EINVAL;
978
979 qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
980 return 0;
981}
982
983static const struct net_proto_family qrtr_family = {
984 .owner = THIS_MODULE,
985 .family = AF_QIPCRTR,
986 .create = qrtr_create,
987};
988
989static int __init qrtr_proto_init(void)
990{
991 int rc;
992
993 rc = proto_register(&qrtr_proto, 1);
994 if (rc)
995 return rc;
996
997 rc = sock_register(&qrtr_family);
998 if (rc) {
999 proto_unregister(&qrtr_proto);
1000 return rc;
1001 }
1002
1003 rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, NULL);
1004
1005 return 0;
1006}
1007module_init(qrtr_proto_init);
1008
1009static void __exit qrtr_proto_fini(void)
1010{
1011 rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
1012 sock_unregister(qrtr_family.family);
1013 proto_unregister(&qrtr_proto);
1014}
1015module_exit(qrtr_proto_fini);
1016
1017MODULE_DESCRIPTION("Qualcomm IPC-router driver");
1018MODULE_LICENSE("GPL v2");