blob: 349a832e72c8ffb4fe4b9dea1ed9071face70641 [file] [log] [blame]
Courtney Cavinbdabad32016-05-06 07:09:08 -07001/*
2 * Copyright (c) 2015, Sony Mobile Communications Inc.
3 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/module.h>
15#include <linux/netlink.h>
16#include <linux/qrtr.h>
17#include <linux/termios.h> /* For TIOCINQ/OUTQ */
18
19#include <net/sock.h>
20
21#include "qrtr.h"
22
23#define QRTR_PROTO_VER 1
24
25/* auto-bind range */
26#define QRTR_MIN_EPH_SOCKET 0x4000
27#define QRTR_MAX_EPH_SOCKET 0x7fff
28
Courtney Cavinbdabad32016-05-06 07:09:08 -070029/**
30 * struct qrtr_hdr - (I|R)PCrouter packet header
31 * @version: protocol version
32 * @type: packet type; one of QRTR_TYPE_*
33 * @src_node_id: source node
34 * @src_port_id: source port
35 * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
36 * @size: length of packet, excluding this header
37 * @dst_node_id: destination node
38 * @dst_port_id: destination port
39 */
40struct qrtr_hdr {
41 __le32 version;
42 __le32 type;
43 __le32 src_node_id;
44 __le32 src_port_id;
45 __le32 confirm_rx;
46 __le32 size;
47 __le32 dst_node_id;
48 __le32 dst_port_id;
49} __packed;
50
51#define QRTR_HDR_SIZE sizeof(struct qrtr_hdr)
Courtney Cavinbdabad32016-05-06 07:09:08 -070052
53struct qrtr_sock {
54 /* WARNING: sk must be the first member */
55 struct sock sk;
56 struct sockaddr_qrtr us;
57 struct sockaddr_qrtr peer;
58};
59
60static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
61{
62 BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
63 return container_of(sk, struct qrtr_sock, sk);
64}
65
66static unsigned int qrtr_local_nid = -1;
67
68/* for node ids */
69static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
70/* broadcast list */
71static LIST_HEAD(qrtr_all_nodes);
72/* lock for qrtr_nodes, qrtr_all_nodes and node reference */
73static DEFINE_MUTEX(qrtr_node_lock);
74
75/* local port allocation management */
76static DEFINE_IDR(qrtr_ports);
77static DEFINE_MUTEX(qrtr_port_lock);
78
79/**
80 * struct qrtr_node - endpoint node
81 * @ep_lock: lock for endpoint management and callbacks
82 * @ep: endpoint
83 * @ref: reference count for node
84 * @nid: node id
85 * @rx_queue: receive queue
86 * @work: scheduled work struct for recv work
87 * @item: list item for broadcast list
88 */
89struct qrtr_node {
90 struct mutex ep_lock;
91 struct qrtr_endpoint *ep;
92 struct kref ref;
93 unsigned int nid;
94
95 struct sk_buff_head rx_queue;
96 struct work_struct work;
97 struct list_head item;
98};
99
100/* Release node resources and free the node.
101 *
102 * Do not call directly, use qrtr_node_release. To be used with
103 * kref_put_mutex. As such, the node mutex is expected to be locked on call.
104 */
105static void __qrtr_node_release(struct kref *kref)
106{
107 struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
108
109 if (node->nid != QRTR_EP_NID_AUTO)
110 radix_tree_delete(&qrtr_nodes, node->nid);
111
112 list_del(&node->item);
113 mutex_unlock(&qrtr_node_lock);
114
Bjorn Andersson68b042d2019-09-18 10:21:17 -0700115 cancel_work_sync(&node->work);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700116 skb_queue_purge(&node->rx_queue);
117 kfree(node);
118}
119
120/* Increment reference to node. */
121static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
122{
123 if (node)
124 kref_get(&node->ref);
125 return node;
126}
127
128/* Decrement reference to node and release as necessary. */
129static void qrtr_node_release(struct qrtr_node *node)
130{
131 if (!node)
132 return;
133 kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
134}
135
136/* Pass an outgoing packet socket buffer to the endpoint driver. */
137static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb)
138{
139 int rc = -ENODEV;
140
141 mutex_lock(&node->ep_lock);
142 if (node->ep)
143 rc = node->ep->xmit(node->ep, skb);
144 else
145 kfree_skb(skb);
146 mutex_unlock(&node->ep_lock);
147
148 return rc;
149}
150
151/* Lookup node by id.
152 *
153 * callers must release with qrtr_node_release()
154 */
155static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
156{
157 struct qrtr_node *node;
158
159 mutex_lock(&qrtr_node_lock);
160 node = radix_tree_lookup(&qrtr_nodes, nid);
161 node = qrtr_node_acquire(node);
162 mutex_unlock(&qrtr_node_lock);
163
164 return node;
165}
166
167/* Assign node id to node.
168 *
169 * This is mostly useful for automatic node id assignment, based on
170 * the source id in the incoming packet.
171 */
172static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
173{
174 if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
175 return;
176
177 mutex_lock(&qrtr_node_lock);
178 radix_tree_insert(&qrtr_nodes, nid, node);
179 node->nid = nid;
180 mutex_unlock(&qrtr_node_lock);
181}
182
183/**
184 * qrtr_endpoint_post() - post incoming data
185 * @ep: endpoint handle
186 * @data: data pointer
187 * @len: size of data in bytes
188 *
189 * Return: 0 on success; negative error code on failure
190 */
191int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
192{
193 struct qrtr_node *node = ep->node;
194 const struct qrtr_hdr *phdr = data;
195 struct sk_buff *skb;
196 unsigned int psize;
197 unsigned int size;
198 unsigned int type;
199 unsigned int ver;
200 unsigned int dst;
201
202 if (len < QRTR_HDR_SIZE || len & 3)
203 return -EINVAL;
204
205 ver = le32_to_cpu(phdr->version);
206 size = le32_to_cpu(phdr->size);
207 type = le32_to_cpu(phdr->type);
208 dst = le32_to_cpu(phdr->dst_port_id);
209
210 psize = (size + 3) & ~3;
211
212 if (ver != QRTR_PROTO_VER)
213 return -EINVAL;
214
215 if (len != psize + QRTR_HDR_SIZE)
216 return -EINVAL;
217
218 if (dst != QRTR_PORT_CTRL && type != QRTR_TYPE_DATA)
219 return -EINVAL;
220
221 skb = netdev_alloc_skb(NULL, len);
222 if (!skb)
223 return -ENOMEM;
224
225 skb_reset_transport_header(skb);
226 memcpy(skb_put(skb, len), data, len);
227
228 skb_queue_tail(&node->rx_queue, skb);
229 schedule_work(&node->work);
230
231 return 0;
232}
233EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
234
235/* Allocate and construct a resume-tx packet. */
236static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
237 u32 dst_node, u32 port)
238{
239 const int pkt_len = 20;
240 struct qrtr_hdr *hdr;
241 struct sk_buff *skb;
Stephen Boyd71ab8622017-01-09 14:31:58 -0800242 __le32 *buf;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700243
244 skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
245 if (!skb)
246 return NULL;
247 skb_reset_transport_header(skb);
248
249 hdr = (struct qrtr_hdr *)skb_put(skb, QRTR_HDR_SIZE);
250 hdr->version = cpu_to_le32(QRTR_PROTO_VER);
251 hdr->type = cpu_to_le32(QRTR_TYPE_RESUME_TX);
252 hdr->src_node_id = cpu_to_le32(src_node);
253 hdr->src_port_id = cpu_to_le32(QRTR_PORT_CTRL);
254 hdr->confirm_rx = cpu_to_le32(0);
255 hdr->size = cpu_to_le32(pkt_len);
256 hdr->dst_node_id = cpu_to_le32(dst_node);
257 hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
258
Stephen Boyd71ab8622017-01-09 14:31:58 -0800259 buf = (__le32 *)skb_put(skb, pkt_len);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700260 memset(buf, 0, pkt_len);
261 buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
262 buf[1] = cpu_to_le32(src_node);
263 buf[2] = cpu_to_le32(port);
264
265 return skb;
266}
267
268static struct qrtr_sock *qrtr_port_lookup(int port);
269static void qrtr_port_put(struct qrtr_sock *ipc);
270
271/* Handle and route a received packet.
272 *
273 * This will auto-reply with resume-tx packet as necessary.
274 */
275static void qrtr_node_rx_work(struct work_struct *work)
276{
277 struct qrtr_node *node = container_of(work, struct qrtr_node, work);
278 struct sk_buff *skb;
279
280 while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
281 const struct qrtr_hdr *phdr;
282 u32 dst_node, dst_port;
283 struct qrtr_sock *ipc;
284 u32 src_node;
285 int confirm;
286
287 phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
288 src_node = le32_to_cpu(phdr->src_node_id);
289 dst_node = le32_to_cpu(phdr->dst_node_id);
290 dst_port = le32_to_cpu(phdr->dst_port_id);
291 confirm = !!phdr->confirm_rx;
292
293 qrtr_node_assign(node, src_node);
294
295 ipc = qrtr_port_lookup(dst_port);
296 if (!ipc) {
297 kfree_skb(skb);
298 } else {
299 if (sock_queue_rcv_skb(&ipc->sk, skb))
300 kfree_skb(skb);
301
302 qrtr_port_put(ipc);
303 }
304
305 if (confirm) {
306 skb = qrtr_alloc_resume_tx(dst_node, node->nid, dst_port);
307 if (!skb)
308 break;
309 if (qrtr_node_enqueue(node, skb))
310 break;
311 }
312 }
313}
314
315/**
316 * qrtr_endpoint_register() - register a new endpoint
317 * @ep: endpoint to register
318 * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
319 * Return: 0 on success; negative error code on failure
320 *
321 * The specified endpoint must have the xmit function pointer set on call.
322 */
323int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
324{
325 struct qrtr_node *node;
326
327 if (!ep || !ep->xmit)
328 return -EINVAL;
329
330 node = kzalloc(sizeof(*node), GFP_KERNEL);
331 if (!node)
332 return -ENOMEM;
333
334 INIT_WORK(&node->work, qrtr_node_rx_work);
335 kref_init(&node->ref);
336 mutex_init(&node->ep_lock);
337 skb_queue_head_init(&node->rx_queue);
338 node->nid = QRTR_EP_NID_AUTO;
339 node->ep = ep;
340
341 qrtr_node_assign(node, nid);
342
343 mutex_lock(&qrtr_node_lock);
344 list_add(&node->item, &qrtr_all_nodes);
345 mutex_unlock(&qrtr_node_lock);
346 ep->node = node;
347
348 return 0;
349}
350EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
351
352/**
353 * qrtr_endpoint_unregister - unregister endpoint
354 * @ep: endpoint to unregister
355 */
356void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
357{
358 struct qrtr_node *node = ep->node;
359
360 mutex_lock(&node->ep_lock);
361 node->ep = NULL;
362 mutex_unlock(&node->ep_lock);
363
364 qrtr_node_release(node);
365 ep->node = NULL;
366}
367EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
368
369/* Lookup socket by port.
370 *
371 * Callers must release with qrtr_port_put()
372 */
373static struct qrtr_sock *qrtr_port_lookup(int port)
374{
375 struct qrtr_sock *ipc;
376
377 if (port == QRTR_PORT_CTRL)
378 port = 0;
379
380 mutex_lock(&qrtr_port_lock);
381 ipc = idr_find(&qrtr_ports, port);
382 if (ipc)
383 sock_hold(&ipc->sk);
384 mutex_unlock(&qrtr_port_lock);
385
386 return ipc;
387}
388
389/* Release acquired socket. */
390static void qrtr_port_put(struct qrtr_sock *ipc)
391{
392 sock_put(&ipc->sk);
393}
394
395/* Remove port assignment. */
396static void qrtr_port_remove(struct qrtr_sock *ipc)
397{
398 int port = ipc->us.sq_port;
399
400 if (port == QRTR_PORT_CTRL)
401 port = 0;
402
403 __sock_put(&ipc->sk);
404
405 mutex_lock(&qrtr_port_lock);
406 idr_remove(&qrtr_ports, port);
407 mutex_unlock(&qrtr_port_lock);
408}
409
410/* Assign port number to socket.
411 *
412 * Specify port in the integer pointed to by port, and it will be adjusted
413 * on return as necesssary.
414 *
415 * Port may be:
416 * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
417 * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
418 * >QRTR_MIN_EPH_SOCKET: Specified; available to all
419 */
420static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
421{
422 int rc;
423
424 mutex_lock(&qrtr_port_lock);
425 if (!*port) {
426 rc = idr_alloc(&qrtr_ports, ipc,
427 QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
428 GFP_ATOMIC);
429 if (rc >= 0)
430 *port = rc;
431 } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
432 rc = -EACCES;
433 } else if (*port == QRTR_PORT_CTRL) {
434 rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
435 } else {
436 rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
437 if (rc >= 0)
438 *port = rc;
439 }
440 mutex_unlock(&qrtr_port_lock);
441
442 if (rc == -ENOSPC)
443 return -EADDRINUSE;
444 else if (rc < 0)
445 return rc;
446
447 sock_hold(&ipc->sk);
448
449 return 0;
450}
451
452/* Bind socket to address.
453 *
454 * Socket should be locked upon call.
455 */
456static int __qrtr_bind(struct socket *sock,
457 const struct sockaddr_qrtr *addr, int zapped)
458{
459 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
460 struct sock *sk = sock->sk;
461 int port;
462 int rc;
463
464 /* rebinding ok */
465 if (!zapped && addr->sq_port == ipc->us.sq_port)
466 return 0;
467
468 port = addr->sq_port;
469 rc = qrtr_port_assign(ipc, &port);
470 if (rc)
471 return rc;
472
473 /* unbind previous, if any */
474 if (!zapped)
475 qrtr_port_remove(ipc);
476 ipc->us.sq_port = port;
477
478 sock_reset_flag(sk, SOCK_ZAPPED);
479
480 return 0;
481}
482
483/* Auto bind to an ephemeral port. */
484static int qrtr_autobind(struct socket *sock)
485{
486 struct sock *sk = sock->sk;
487 struct sockaddr_qrtr addr;
488
489 if (!sock_flag(sk, SOCK_ZAPPED))
490 return 0;
491
492 addr.sq_family = AF_QIPCRTR;
493 addr.sq_node = qrtr_local_nid;
494 addr.sq_port = 0;
495
496 return __qrtr_bind(sock, &addr, 1);
497}
498
499/* Bind socket to specified sockaddr. */
500static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
501{
502 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
503 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
504 struct sock *sk = sock->sk;
505 int rc;
506
507 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
508 return -EINVAL;
509
510 if (addr->sq_node != ipc->us.sq_node)
511 return -EINVAL;
512
513 lock_sock(sk);
514 rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
515 release_sock(sk);
516
517 return rc;
518}
519
520/* Queue packet to local peer socket. */
521static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb)
522{
523 const struct qrtr_hdr *phdr;
524 struct qrtr_sock *ipc;
525
526 phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
527
528 ipc = qrtr_port_lookup(le32_to_cpu(phdr->dst_port_id));
529 if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
530 kfree_skb(skb);
531 return -ENODEV;
532 }
533
534 if (sock_queue_rcv_skb(&ipc->sk, skb)) {
535 qrtr_port_put(ipc);
536 kfree_skb(skb);
537 return -ENOSPC;
538 }
539
540 qrtr_port_put(ipc);
541
542 return 0;
543}
544
545/* Queue packet for broadcast. */
546static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb)
547{
548 struct sk_buff *skbn;
549
550 mutex_lock(&qrtr_node_lock);
551 list_for_each_entry(node, &qrtr_all_nodes, item) {
552 skbn = skb_clone(skb, GFP_KERNEL);
553 if (!skbn)
554 break;
555 skb_set_owner_w(skbn, skb->sk);
556 qrtr_node_enqueue(node, skbn);
557 }
558 mutex_unlock(&qrtr_node_lock);
559
560 qrtr_local_enqueue(node, skb);
561
562 return 0;
563}
564
565static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
566{
567 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
568 int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *);
569 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
570 struct sock *sk = sock->sk;
571 struct qrtr_node *node;
572 struct qrtr_hdr *hdr;
573 struct sk_buff *skb;
574 size_t plen;
575 int rc;
576
577 if (msg->msg_flags & ~(MSG_DONTWAIT))
578 return -EINVAL;
579
580 if (len > 65535)
581 return -EMSGSIZE;
582
583 lock_sock(sk);
584
585 if (addr) {
586 if (msg->msg_namelen < sizeof(*addr)) {
587 release_sock(sk);
588 return -EINVAL;
589 }
590
591 if (addr->sq_family != AF_QIPCRTR) {
592 release_sock(sk);
593 return -EINVAL;
594 }
595
596 rc = qrtr_autobind(sock);
597 if (rc) {
598 release_sock(sk);
599 return rc;
600 }
601 } else if (sk->sk_state == TCP_ESTABLISHED) {
602 addr = &ipc->peer;
603 } else {
604 release_sock(sk);
605 return -ENOTCONN;
606 }
607
608 node = NULL;
609 if (addr->sq_node == QRTR_NODE_BCAST) {
610 enqueue_fn = qrtr_bcast_enqueue;
Arun Kumar Neelakantam865c4f92018-07-04 19:49:32 +0530611 if (addr->sq_port != QRTR_PORT_CTRL) {
612 release_sock(sk);
613 return -ENOTCONN;
614 }
Courtney Cavinbdabad32016-05-06 07:09:08 -0700615 } else if (addr->sq_node == ipc->us.sq_node) {
616 enqueue_fn = qrtr_local_enqueue;
617 } else {
618 enqueue_fn = qrtr_node_enqueue;
619 node = qrtr_node_lookup(addr->sq_node);
620 if (!node) {
621 release_sock(sk);
622 return -ECONNRESET;
623 }
624 }
625
626 plen = (len + 3) & ~3;
627 skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_SIZE,
628 msg->msg_flags & MSG_DONTWAIT, &rc);
629 if (!skb)
630 goto out_node;
631
632 skb_reset_transport_header(skb);
633 skb_put(skb, len + QRTR_HDR_SIZE);
634
635 hdr = (struct qrtr_hdr *)skb_transport_header(skb);
636 hdr->version = cpu_to_le32(QRTR_PROTO_VER);
637 hdr->src_node_id = cpu_to_le32(ipc->us.sq_node);
638 hdr->src_port_id = cpu_to_le32(ipc->us.sq_port);
639 hdr->confirm_rx = cpu_to_le32(0);
640 hdr->size = cpu_to_le32(len);
641 hdr->dst_node_id = cpu_to_le32(addr->sq_node);
642 hdr->dst_port_id = cpu_to_le32(addr->sq_port);
643
644 rc = skb_copy_datagram_from_iter(skb, QRTR_HDR_SIZE,
645 &msg->msg_iter, len);
646 if (rc) {
647 kfree_skb(skb);
648 goto out_node;
649 }
650
651 if (plen != len) {
652 skb_pad(skb, plen - len);
653 skb_put(skb, plen - len);
654 }
655
656 if (ipc->us.sq_port == QRTR_PORT_CTRL) {
657 if (len < 4) {
658 rc = -EINVAL;
659 kfree_skb(skb);
660 goto out_node;
661 }
662
663 /* control messages already require the type as 'command' */
664 skb_copy_bits(skb, QRTR_HDR_SIZE, &hdr->type, 4);
665 } else {
666 hdr->type = cpu_to_le32(QRTR_TYPE_DATA);
667 }
668
669 rc = enqueue_fn(node, skb);
670 if (rc >= 0)
671 rc = len;
672
673out_node:
674 qrtr_node_release(node);
675 release_sock(sk);
676
677 return rc;
678}
679
680static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
681 size_t size, int flags)
682{
683 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
684 const struct qrtr_hdr *phdr;
685 struct sock *sk = sock->sk;
686 struct sk_buff *skb;
687 int copied, rc;
688
689 lock_sock(sk);
690
691 if (sock_flag(sk, SOCK_ZAPPED)) {
692 release_sock(sk);
693 return -EADDRNOTAVAIL;
694 }
695
696 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
697 flags & MSG_DONTWAIT, &rc);
698 if (!skb) {
699 release_sock(sk);
700 return rc;
701 }
702
703 phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
704 copied = le32_to_cpu(phdr->size);
705 if (copied > size) {
706 copied = size;
707 msg->msg_flags |= MSG_TRUNC;
708 }
709
710 rc = skb_copy_datagram_msg(skb, QRTR_HDR_SIZE, msg, copied);
711 if (rc < 0)
712 goto out;
713 rc = copied;
714
715 if (addr) {
Eric Dumazet14643f62021-03-12 08:59:48 -0800716 /* There is an anonymous 2-byte hole after sq_family,
717 * make sure to clear it.
718 */
719 memset(addr, 0, sizeof(*addr));
720
Courtney Cavinbdabad32016-05-06 07:09:08 -0700721 addr->sq_family = AF_QIPCRTR;
722 addr->sq_node = le32_to_cpu(phdr->src_node_id);
723 addr->sq_port = le32_to_cpu(phdr->src_port_id);
724 msg->msg_namelen = sizeof(*addr);
725 }
726
727out:
728 skb_free_datagram(sk, skb);
729 release_sock(sk);
730
731 return rc;
732}
733
734static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
735 int len, int flags)
736{
737 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
738 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
739 struct sock *sk = sock->sk;
740 int rc;
741
742 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
743 return -EINVAL;
744
745 lock_sock(sk);
746
747 sk->sk_state = TCP_CLOSE;
748 sock->state = SS_UNCONNECTED;
749
750 rc = qrtr_autobind(sock);
751 if (rc) {
752 release_sock(sk);
753 return rc;
754 }
755
756 ipc->peer = *addr;
757 sock->state = SS_CONNECTED;
758 sk->sk_state = TCP_ESTABLISHED;
759
760 release_sock(sk);
761
762 return 0;
763}
764
765static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
766 int *len, int peer)
767{
768 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
769 struct sockaddr_qrtr qaddr;
770 struct sock *sk = sock->sk;
771
772 lock_sock(sk);
773 if (peer) {
774 if (sk->sk_state != TCP_ESTABLISHED) {
775 release_sock(sk);
776 return -ENOTCONN;
777 }
778
779 qaddr = ipc->peer;
780 } else {
781 qaddr = ipc->us;
782 }
783 release_sock(sk);
784
785 *len = sizeof(qaddr);
786 qaddr.sq_family = AF_QIPCRTR;
787
788 memcpy(saddr, &qaddr, sizeof(qaddr));
789
790 return 0;
791}
792
793static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
794{
795 void __user *argp = (void __user *)arg;
796 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
797 struct sock *sk = sock->sk;
798 struct sockaddr_qrtr *sq;
799 struct sk_buff *skb;
800 struct ifreq ifr;
801 long len = 0;
802 int rc = 0;
803
804 lock_sock(sk);
805
806 switch (cmd) {
807 case TIOCOUTQ:
808 len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
809 if (len < 0)
810 len = 0;
811 rc = put_user(len, (int __user *)argp);
812 break;
813 case TIOCINQ:
814 skb = skb_peek(&sk->sk_receive_queue);
815 if (skb)
816 len = skb->len - QRTR_HDR_SIZE;
817 rc = put_user(len, (int __user *)argp);
818 break;
819 case SIOCGIFADDR:
820 if (copy_from_user(&ifr, argp, sizeof(ifr))) {
821 rc = -EFAULT;
822 break;
823 }
824
825 sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
826 *sq = ipc->us;
827 if (copy_to_user(argp, &ifr, sizeof(ifr))) {
828 rc = -EFAULT;
829 break;
830 }
831 break;
832 case SIOCGSTAMP:
833 rc = sock_get_timestamp(sk, argp);
834 break;
835 case SIOCADDRT:
836 case SIOCDELRT:
837 case SIOCSIFADDR:
838 case SIOCGIFDSTADDR:
839 case SIOCSIFDSTADDR:
840 case SIOCGIFBRDADDR:
841 case SIOCSIFBRDADDR:
842 case SIOCGIFNETMASK:
843 case SIOCSIFNETMASK:
844 rc = -EINVAL;
845 break;
846 default:
847 rc = -ENOIOCTLCMD;
848 break;
849 }
850
851 release_sock(sk);
852
853 return rc;
854}
855
856static int qrtr_release(struct socket *sock)
857{
858 struct sock *sk = sock->sk;
859 struct qrtr_sock *ipc;
860
861 if (!sk)
862 return 0;
863
864 lock_sock(sk);
865
866 ipc = qrtr_sk(sk);
867 sk->sk_shutdown = SHUTDOWN_MASK;
868 if (!sock_flag(sk, SOCK_DEAD))
869 sk->sk_state_change(sk);
870
871 sock_set_flag(sk, SOCK_DEAD);
872 sock->sk = NULL;
873
874 if (!sock_flag(sk, SOCK_ZAPPED))
875 qrtr_port_remove(ipc);
876
877 skb_queue_purge(&sk->sk_receive_queue);
878
879 release_sock(sk);
880 sock_put(sk);
881
882 return 0;
883}
884
885static const struct proto_ops qrtr_proto_ops = {
886 .owner = THIS_MODULE,
887 .family = AF_QIPCRTR,
888 .bind = qrtr_bind,
889 .connect = qrtr_connect,
890 .socketpair = sock_no_socketpair,
891 .accept = sock_no_accept,
892 .listen = sock_no_listen,
893 .sendmsg = qrtr_sendmsg,
894 .recvmsg = qrtr_recvmsg,
895 .getname = qrtr_getname,
896 .ioctl = qrtr_ioctl,
897 .poll = datagram_poll,
898 .shutdown = sock_no_shutdown,
899 .setsockopt = sock_no_setsockopt,
900 .getsockopt = sock_no_getsockopt,
901 .release = qrtr_release,
902 .mmap = sock_no_mmap,
903 .sendpage = sock_no_sendpage,
904};
905
906static struct proto qrtr_proto = {
907 .name = "QIPCRTR",
908 .owner = THIS_MODULE,
909 .obj_size = sizeof(struct qrtr_sock),
910};
911
912static int qrtr_create(struct net *net, struct socket *sock,
913 int protocol, int kern)
914{
915 struct qrtr_sock *ipc;
916 struct sock *sk;
917
918 if (sock->type != SOCK_DGRAM)
919 return -EPROTOTYPE;
920
921 sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
922 if (!sk)
923 return -ENOMEM;
924
925 sock_set_flag(sk, SOCK_ZAPPED);
926
927 sock_init_data(sock, sk);
928 sock->ops = &qrtr_proto_ops;
929
930 ipc = qrtr_sk(sk);
931 ipc->us.sq_family = AF_QIPCRTR;
932 ipc->us.sq_node = qrtr_local_nid;
933 ipc->us.sq_port = 0;
934
935 return 0;
936}
937
938static const struct nla_policy qrtr_policy[IFA_MAX + 1] = {
939 [IFA_LOCAL] = { .type = NLA_U32 },
940};
941
942static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
943{
944 struct nlattr *tb[IFA_MAX + 1];
945 struct ifaddrmsg *ifm;
946 int rc;
947
948 if (!netlink_capable(skb, CAP_NET_ADMIN))
949 return -EPERM;
950
951 if (!netlink_capable(skb, CAP_SYS_ADMIN))
952 return -EPERM;
953
954 ASSERT_RTNL();
955
956 rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy);
957 if (rc < 0)
958 return rc;
959
960 ifm = nlmsg_data(nlh);
961 if (!tb[IFA_LOCAL])
962 return -EINVAL;
963
964 qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
965 return 0;
966}
967
968static const struct net_proto_family qrtr_family = {
969 .owner = THIS_MODULE,
970 .family = AF_QIPCRTR,
971 .create = qrtr_create,
972};
973
974static int __init qrtr_proto_init(void)
975{
976 int rc;
977
978 rc = proto_register(&qrtr_proto, 1);
979 if (rc)
980 return rc;
981
982 rc = sock_register(&qrtr_family);
983 if (rc) {
984 proto_unregister(&qrtr_proto);
985 return rc;
986 }
987
988 rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, NULL);
989
990 return 0;
991}
992module_init(qrtr_proto_init);
993
994static void __exit qrtr_proto_fini(void)
995{
996 rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
997 sock_unregister(qrtr_family.family);
998 proto_unregister(&qrtr_proto);
999}
1000module_exit(qrtr_proto_fini);
1001
1002MODULE_DESCRIPTION("Qualcomm IPC-router driver");
1003MODULE_LICENSE("GPL v2");