blob: a419201a5cf4a0e9a298e4b6e3ed6b7c20efd688 [file] [log] [blame]
Courtney Cavinbdabad32016-05-06 07:09:08 -07001/*
2 * Copyright (c) 2015, Sony Mobile Communications Inc.
3 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/module.h>
15#include <linux/netlink.h>
16#include <linux/qrtr.h>
17#include <linux/termios.h> /* For TIOCINQ/OUTQ */
18
19#include <net/sock.h>
20
21#include "qrtr.h"
22
23#define QRTR_PROTO_VER 1
24
25/* auto-bind range */
26#define QRTR_MIN_EPH_SOCKET 0x4000
27#define QRTR_MAX_EPH_SOCKET 0x7fff
28
Courtney Cavinbdabad32016-05-06 07:09:08 -070029/**
30 * struct qrtr_hdr - (I|R)PCrouter packet header
31 * @version: protocol version
32 * @type: packet type; one of QRTR_TYPE_*
33 * @src_node_id: source node
34 * @src_port_id: source port
35 * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
36 * @size: length of packet, excluding this header
37 * @dst_node_id: destination node
38 * @dst_port_id: destination port
39 */
40struct qrtr_hdr {
41 __le32 version;
42 __le32 type;
43 __le32 src_node_id;
44 __le32 src_port_id;
45 __le32 confirm_rx;
46 __le32 size;
47 __le32 dst_node_id;
48 __le32 dst_port_id;
49} __packed;
50
51#define QRTR_HDR_SIZE sizeof(struct qrtr_hdr)
Courtney Cavinbdabad32016-05-06 07:09:08 -070052
53struct qrtr_sock {
54 /* WARNING: sk must be the first member */
55 struct sock sk;
56 struct sockaddr_qrtr us;
57 struct sockaddr_qrtr peer;
58};
59
60static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
61{
62 BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
63 return container_of(sk, struct qrtr_sock, sk);
64}
65
66static unsigned int qrtr_local_nid = -1;
67
68/* for node ids */
69static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
70/* broadcast list */
71static LIST_HEAD(qrtr_all_nodes);
72/* lock for qrtr_nodes, qrtr_all_nodes and node reference */
73static DEFINE_MUTEX(qrtr_node_lock);
74
75/* local port allocation management */
76static DEFINE_IDR(qrtr_ports);
77static DEFINE_MUTEX(qrtr_port_lock);
78
79/**
80 * struct qrtr_node - endpoint node
81 * @ep_lock: lock for endpoint management and callbacks
82 * @ep: endpoint
83 * @ref: reference count for node
84 * @nid: node id
85 * @rx_queue: receive queue
86 * @work: scheduled work struct for recv work
87 * @item: list item for broadcast list
88 */
89struct qrtr_node {
90 struct mutex ep_lock;
91 struct qrtr_endpoint *ep;
92 struct kref ref;
93 unsigned int nid;
94
95 struct sk_buff_head rx_queue;
96 struct work_struct work;
97 struct list_head item;
98};
99
100/* Release node resources and free the node.
101 *
102 * Do not call directly, use qrtr_node_release. To be used with
103 * kref_put_mutex. As such, the node mutex is expected to be locked on call.
104 */
105static void __qrtr_node_release(struct kref *kref)
106{
107 struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
108
109 if (node->nid != QRTR_EP_NID_AUTO)
110 radix_tree_delete(&qrtr_nodes, node->nid);
111
112 list_del(&node->item);
113 mutex_unlock(&qrtr_node_lock);
114
Bjorn Andersson68b042d2019-09-18 10:21:17 -0700115 cancel_work_sync(&node->work);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700116 skb_queue_purge(&node->rx_queue);
117 kfree(node);
118}
119
120/* Increment reference to node. */
121static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
122{
123 if (node)
124 kref_get(&node->ref);
125 return node;
126}
127
128/* Decrement reference to node and release as necessary. */
129static void qrtr_node_release(struct qrtr_node *node)
130{
131 if (!node)
132 return;
133 kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
134}
135
136/* Pass an outgoing packet socket buffer to the endpoint driver. */
137static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb)
138{
139 int rc = -ENODEV;
140
141 mutex_lock(&node->ep_lock);
142 if (node->ep)
143 rc = node->ep->xmit(node->ep, skb);
144 else
145 kfree_skb(skb);
146 mutex_unlock(&node->ep_lock);
147
148 return rc;
149}
150
151/* Lookup node by id.
152 *
153 * callers must release with qrtr_node_release()
154 */
155static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
156{
157 struct qrtr_node *node;
158
159 mutex_lock(&qrtr_node_lock);
160 node = radix_tree_lookup(&qrtr_nodes, nid);
161 node = qrtr_node_acquire(node);
162 mutex_unlock(&qrtr_node_lock);
163
164 return node;
165}
166
167/* Assign node id to node.
168 *
169 * This is mostly useful for automatic node id assignment, based on
170 * the source id in the incoming packet.
171 */
172static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
173{
174 if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
175 return;
176
177 mutex_lock(&qrtr_node_lock);
178 radix_tree_insert(&qrtr_nodes, nid, node);
179 node->nid = nid;
180 mutex_unlock(&qrtr_node_lock);
181}
182
183/**
184 * qrtr_endpoint_post() - post incoming data
185 * @ep: endpoint handle
186 * @data: data pointer
187 * @len: size of data in bytes
188 *
189 * Return: 0 on success; negative error code on failure
190 */
191int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
192{
193 struct qrtr_node *node = ep->node;
194 const struct qrtr_hdr *phdr = data;
195 struct sk_buff *skb;
196 unsigned int psize;
197 unsigned int size;
198 unsigned int type;
199 unsigned int ver;
200 unsigned int dst;
201
202 if (len < QRTR_HDR_SIZE || len & 3)
203 return -EINVAL;
204
205 ver = le32_to_cpu(phdr->version);
206 size = le32_to_cpu(phdr->size);
207 type = le32_to_cpu(phdr->type);
208 dst = le32_to_cpu(phdr->dst_port_id);
209
210 psize = (size + 3) & ~3;
211
212 if (ver != QRTR_PROTO_VER)
213 return -EINVAL;
214
215 if (len != psize + QRTR_HDR_SIZE)
216 return -EINVAL;
217
218 if (dst != QRTR_PORT_CTRL && type != QRTR_TYPE_DATA)
219 return -EINVAL;
220
221 skb = netdev_alloc_skb(NULL, len);
222 if (!skb)
223 return -ENOMEM;
224
225 skb_reset_transport_header(skb);
226 memcpy(skb_put(skb, len), data, len);
227
228 skb_queue_tail(&node->rx_queue, skb);
229 schedule_work(&node->work);
230
231 return 0;
232}
233EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
234
235/* Allocate and construct a resume-tx packet. */
236static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
237 u32 dst_node, u32 port)
238{
239 const int pkt_len = 20;
240 struct qrtr_hdr *hdr;
241 struct sk_buff *skb;
Stephen Boyd71ab8622017-01-09 14:31:58 -0800242 __le32 *buf;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700243
244 skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
245 if (!skb)
246 return NULL;
247 skb_reset_transport_header(skb);
248
249 hdr = (struct qrtr_hdr *)skb_put(skb, QRTR_HDR_SIZE);
250 hdr->version = cpu_to_le32(QRTR_PROTO_VER);
251 hdr->type = cpu_to_le32(QRTR_TYPE_RESUME_TX);
252 hdr->src_node_id = cpu_to_le32(src_node);
253 hdr->src_port_id = cpu_to_le32(QRTR_PORT_CTRL);
254 hdr->confirm_rx = cpu_to_le32(0);
255 hdr->size = cpu_to_le32(pkt_len);
256 hdr->dst_node_id = cpu_to_le32(dst_node);
257 hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
258
Stephen Boyd71ab8622017-01-09 14:31:58 -0800259 buf = (__le32 *)skb_put(skb, pkt_len);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700260 memset(buf, 0, pkt_len);
261 buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
262 buf[1] = cpu_to_le32(src_node);
263 buf[2] = cpu_to_le32(port);
264
265 return skb;
266}
267
268static struct qrtr_sock *qrtr_port_lookup(int port);
269static void qrtr_port_put(struct qrtr_sock *ipc);
270
271/* Handle and route a received packet.
272 *
273 * This will auto-reply with resume-tx packet as necessary.
274 */
275static void qrtr_node_rx_work(struct work_struct *work)
276{
277 struct qrtr_node *node = container_of(work, struct qrtr_node, work);
278 struct sk_buff *skb;
279
280 while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
281 const struct qrtr_hdr *phdr;
282 u32 dst_node, dst_port;
283 struct qrtr_sock *ipc;
284 u32 src_node;
285 int confirm;
286
287 phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
288 src_node = le32_to_cpu(phdr->src_node_id);
289 dst_node = le32_to_cpu(phdr->dst_node_id);
290 dst_port = le32_to_cpu(phdr->dst_port_id);
291 confirm = !!phdr->confirm_rx;
292
293 qrtr_node_assign(node, src_node);
294
295 ipc = qrtr_port_lookup(dst_port);
296 if (!ipc) {
297 kfree_skb(skb);
298 } else {
299 if (sock_queue_rcv_skb(&ipc->sk, skb))
300 kfree_skb(skb);
301
302 qrtr_port_put(ipc);
303 }
304
305 if (confirm) {
306 skb = qrtr_alloc_resume_tx(dst_node, node->nid, dst_port);
307 if (!skb)
308 break;
309 if (qrtr_node_enqueue(node, skb))
310 break;
311 }
312 }
313}
314
315/**
316 * qrtr_endpoint_register() - register a new endpoint
317 * @ep: endpoint to register
318 * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
319 * Return: 0 on success; negative error code on failure
320 *
321 * The specified endpoint must have the xmit function pointer set on call.
322 */
323int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
324{
325 struct qrtr_node *node;
326
327 if (!ep || !ep->xmit)
328 return -EINVAL;
329
330 node = kzalloc(sizeof(*node), GFP_KERNEL);
331 if (!node)
332 return -ENOMEM;
333
334 INIT_WORK(&node->work, qrtr_node_rx_work);
335 kref_init(&node->ref);
336 mutex_init(&node->ep_lock);
337 skb_queue_head_init(&node->rx_queue);
338 node->nid = QRTR_EP_NID_AUTO;
339 node->ep = ep;
340
341 qrtr_node_assign(node, nid);
342
343 mutex_lock(&qrtr_node_lock);
344 list_add(&node->item, &qrtr_all_nodes);
345 mutex_unlock(&qrtr_node_lock);
346 ep->node = node;
347
348 return 0;
349}
350EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
351
352/**
353 * qrtr_endpoint_unregister - unregister endpoint
354 * @ep: endpoint to unregister
355 */
356void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
357{
358 struct qrtr_node *node = ep->node;
359
360 mutex_lock(&node->ep_lock);
361 node->ep = NULL;
362 mutex_unlock(&node->ep_lock);
363
364 qrtr_node_release(node);
365 ep->node = NULL;
366}
367EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
368
369/* Lookup socket by port.
370 *
371 * Callers must release with qrtr_port_put()
372 */
373static struct qrtr_sock *qrtr_port_lookup(int port)
374{
375 struct qrtr_sock *ipc;
376
377 if (port == QRTR_PORT_CTRL)
378 port = 0;
379
380 mutex_lock(&qrtr_port_lock);
381 ipc = idr_find(&qrtr_ports, port);
382 if (ipc)
383 sock_hold(&ipc->sk);
384 mutex_unlock(&qrtr_port_lock);
385
386 return ipc;
387}
388
389/* Release acquired socket. */
390static void qrtr_port_put(struct qrtr_sock *ipc)
391{
392 sock_put(&ipc->sk);
393}
394
395/* Remove port assignment. */
396static void qrtr_port_remove(struct qrtr_sock *ipc)
397{
398 int port = ipc->us.sq_port;
399
400 if (port == QRTR_PORT_CTRL)
401 port = 0;
402
403 __sock_put(&ipc->sk);
404
405 mutex_lock(&qrtr_port_lock);
406 idr_remove(&qrtr_ports, port);
407 mutex_unlock(&qrtr_port_lock);
408}
409
410/* Assign port number to socket.
411 *
412 * Specify port in the integer pointed to by port, and it will be adjusted
413 * on return as necesssary.
414 *
415 * Port may be:
416 * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
417 * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
418 * >QRTR_MIN_EPH_SOCKET: Specified; available to all
419 */
420static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
421{
422 int rc;
423
424 mutex_lock(&qrtr_port_lock);
425 if (!*port) {
426 rc = idr_alloc(&qrtr_ports, ipc,
427 QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
428 GFP_ATOMIC);
429 if (rc >= 0)
430 *port = rc;
431 } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
432 rc = -EACCES;
433 } else if (*port == QRTR_PORT_CTRL) {
434 rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
435 } else {
436 rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
437 if (rc >= 0)
438 *port = rc;
439 }
440 mutex_unlock(&qrtr_port_lock);
441
442 if (rc == -ENOSPC)
443 return -EADDRINUSE;
444 else if (rc < 0)
445 return rc;
446
447 sock_hold(&ipc->sk);
448
449 return 0;
450}
451
452/* Bind socket to address.
453 *
454 * Socket should be locked upon call.
455 */
456static int __qrtr_bind(struct socket *sock,
457 const struct sockaddr_qrtr *addr, int zapped)
458{
459 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
460 struct sock *sk = sock->sk;
461 int port;
462 int rc;
463
464 /* rebinding ok */
465 if (!zapped && addr->sq_port == ipc->us.sq_port)
466 return 0;
467
468 port = addr->sq_port;
469 rc = qrtr_port_assign(ipc, &port);
470 if (rc)
471 return rc;
472
473 /* unbind previous, if any */
474 if (!zapped)
475 qrtr_port_remove(ipc);
476 ipc->us.sq_port = port;
477
478 sock_reset_flag(sk, SOCK_ZAPPED);
479
480 return 0;
481}
482
483/* Auto bind to an ephemeral port. */
484static int qrtr_autobind(struct socket *sock)
485{
486 struct sock *sk = sock->sk;
487 struct sockaddr_qrtr addr;
488
489 if (!sock_flag(sk, SOCK_ZAPPED))
490 return 0;
491
492 addr.sq_family = AF_QIPCRTR;
493 addr.sq_node = qrtr_local_nid;
494 addr.sq_port = 0;
495
496 return __qrtr_bind(sock, &addr, 1);
497}
498
499/* Bind socket to specified sockaddr. */
500static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
501{
502 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
503 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
504 struct sock *sk = sock->sk;
505 int rc;
506
507 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
508 return -EINVAL;
509
510 if (addr->sq_node != ipc->us.sq_node)
511 return -EINVAL;
512
513 lock_sock(sk);
514 rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
515 release_sock(sk);
516
517 return rc;
518}
519
520/* Queue packet to local peer socket. */
521static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb)
522{
523 const struct qrtr_hdr *phdr;
524 struct qrtr_sock *ipc;
525
526 phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
527
528 ipc = qrtr_port_lookup(le32_to_cpu(phdr->dst_port_id));
529 if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
530 kfree_skb(skb);
531 return -ENODEV;
532 }
533
534 if (sock_queue_rcv_skb(&ipc->sk, skb)) {
535 qrtr_port_put(ipc);
536 kfree_skb(skb);
537 return -ENOSPC;
538 }
539
540 qrtr_port_put(ipc);
541
542 return 0;
543}
544
545/* Queue packet for broadcast. */
546static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb)
547{
548 struct sk_buff *skbn;
549
550 mutex_lock(&qrtr_node_lock);
551 list_for_each_entry(node, &qrtr_all_nodes, item) {
552 skbn = skb_clone(skb, GFP_KERNEL);
553 if (!skbn)
554 break;
555 skb_set_owner_w(skbn, skb->sk);
556 qrtr_node_enqueue(node, skbn);
557 }
558 mutex_unlock(&qrtr_node_lock);
559
560 qrtr_local_enqueue(node, skb);
561
562 return 0;
563}
564
565static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
566{
567 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
568 int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *);
569 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
570 struct sock *sk = sock->sk;
571 struct qrtr_node *node;
572 struct qrtr_hdr *hdr;
573 struct sk_buff *skb;
574 size_t plen;
575 int rc;
576
577 if (msg->msg_flags & ~(MSG_DONTWAIT))
578 return -EINVAL;
579
580 if (len > 65535)
581 return -EMSGSIZE;
582
583 lock_sock(sk);
584
585 if (addr) {
586 if (msg->msg_namelen < sizeof(*addr)) {
587 release_sock(sk);
588 return -EINVAL;
589 }
590
591 if (addr->sq_family != AF_QIPCRTR) {
592 release_sock(sk);
593 return -EINVAL;
594 }
595
596 rc = qrtr_autobind(sock);
597 if (rc) {
598 release_sock(sk);
599 return rc;
600 }
601 } else if (sk->sk_state == TCP_ESTABLISHED) {
602 addr = &ipc->peer;
603 } else {
604 release_sock(sk);
605 return -ENOTCONN;
606 }
607
608 node = NULL;
609 if (addr->sq_node == QRTR_NODE_BCAST) {
610 enqueue_fn = qrtr_bcast_enqueue;
Arun Kumar Neelakantam865c4f92018-07-04 19:49:32 +0530611 if (addr->sq_port != QRTR_PORT_CTRL) {
612 release_sock(sk);
613 return -ENOTCONN;
614 }
Courtney Cavinbdabad32016-05-06 07:09:08 -0700615 } else if (addr->sq_node == ipc->us.sq_node) {
616 enqueue_fn = qrtr_local_enqueue;
617 } else {
618 enqueue_fn = qrtr_node_enqueue;
619 node = qrtr_node_lookup(addr->sq_node);
620 if (!node) {
621 release_sock(sk);
622 return -ECONNRESET;
623 }
624 }
625
626 plen = (len + 3) & ~3;
627 skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_SIZE,
628 msg->msg_flags & MSG_DONTWAIT, &rc);
629 if (!skb)
630 goto out_node;
631
632 skb_reset_transport_header(skb);
633 skb_put(skb, len + QRTR_HDR_SIZE);
634
635 hdr = (struct qrtr_hdr *)skb_transport_header(skb);
636 hdr->version = cpu_to_le32(QRTR_PROTO_VER);
637 hdr->src_node_id = cpu_to_le32(ipc->us.sq_node);
638 hdr->src_port_id = cpu_to_le32(ipc->us.sq_port);
639 hdr->confirm_rx = cpu_to_le32(0);
640 hdr->size = cpu_to_le32(len);
641 hdr->dst_node_id = cpu_to_le32(addr->sq_node);
642 hdr->dst_port_id = cpu_to_le32(addr->sq_port);
643
644 rc = skb_copy_datagram_from_iter(skb, QRTR_HDR_SIZE,
645 &msg->msg_iter, len);
646 if (rc) {
647 kfree_skb(skb);
648 goto out_node;
649 }
650
651 if (plen != len) {
652 skb_pad(skb, plen - len);
653 skb_put(skb, plen - len);
654 }
655
656 if (ipc->us.sq_port == QRTR_PORT_CTRL) {
657 if (len < 4) {
658 rc = -EINVAL;
659 kfree_skb(skb);
660 goto out_node;
661 }
662
663 /* control messages already require the type as 'command' */
664 skb_copy_bits(skb, QRTR_HDR_SIZE, &hdr->type, 4);
665 } else {
666 hdr->type = cpu_to_le32(QRTR_TYPE_DATA);
667 }
668
669 rc = enqueue_fn(node, skb);
670 if (rc >= 0)
671 rc = len;
672
673out_node:
674 qrtr_node_release(node);
675 release_sock(sk);
676
677 return rc;
678}
679
680static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
681 size_t size, int flags)
682{
683 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
684 const struct qrtr_hdr *phdr;
685 struct sock *sk = sock->sk;
686 struct sk_buff *skb;
687 int copied, rc;
688
689 lock_sock(sk);
690
691 if (sock_flag(sk, SOCK_ZAPPED)) {
692 release_sock(sk);
693 return -EADDRNOTAVAIL;
694 }
695
696 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
697 flags & MSG_DONTWAIT, &rc);
698 if (!skb) {
699 release_sock(sk);
700 return rc;
701 }
702
703 phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
704 copied = le32_to_cpu(phdr->size);
705 if (copied > size) {
706 copied = size;
707 msg->msg_flags |= MSG_TRUNC;
708 }
709
710 rc = skb_copy_datagram_msg(skb, QRTR_HDR_SIZE, msg, copied);
711 if (rc < 0)
712 goto out;
713 rc = copied;
714
715 if (addr) {
716 addr->sq_family = AF_QIPCRTR;
717 addr->sq_node = le32_to_cpu(phdr->src_node_id);
718 addr->sq_port = le32_to_cpu(phdr->src_port_id);
719 msg->msg_namelen = sizeof(*addr);
720 }
721
722out:
723 skb_free_datagram(sk, skb);
724 release_sock(sk);
725
726 return rc;
727}
728
729static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
730 int len, int flags)
731{
732 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
733 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
734 struct sock *sk = sock->sk;
735 int rc;
736
737 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
738 return -EINVAL;
739
740 lock_sock(sk);
741
742 sk->sk_state = TCP_CLOSE;
743 sock->state = SS_UNCONNECTED;
744
745 rc = qrtr_autobind(sock);
746 if (rc) {
747 release_sock(sk);
748 return rc;
749 }
750
751 ipc->peer = *addr;
752 sock->state = SS_CONNECTED;
753 sk->sk_state = TCP_ESTABLISHED;
754
755 release_sock(sk);
756
757 return 0;
758}
759
760static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
761 int *len, int peer)
762{
763 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
764 struct sockaddr_qrtr qaddr;
765 struct sock *sk = sock->sk;
766
767 lock_sock(sk);
768 if (peer) {
769 if (sk->sk_state != TCP_ESTABLISHED) {
770 release_sock(sk);
771 return -ENOTCONN;
772 }
773
774 qaddr = ipc->peer;
775 } else {
776 qaddr = ipc->us;
777 }
778 release_sock(sk);
779
780 *len = sizeof(qaddr);
781 qaddr.sq_family = AF_QIPCRTR;
782
783 memcpy(saddr, &qaddr, sizeof(qaddr));
784
785 return 0;
786}
787
788static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
789{
790 void __user *argp = (void __user *)arg;
791 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
792 struct sock *sk = sock->sk;
793 struct sockaddr_qrtr *sq;
794 struct sk_buff *skb;
795 struct ifreq ifr;
796 long len = 0;
797 int rc = 0;
798
799 lock_sock(sk);
800
801 switch (cmd) {
802 case TIOCOUTQ:
803 len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
804 if (len < 0)
805 len = 0;
806 rc = put_user(len, (int __user *)argp);
807 break;
808 case TIOCINQ:
809 skb = skb_peek(&sk->sk_receive_queue);
810 if (skb)
811 len = skb->len - QRTR_HDR_SIZE;
812 rc = put_user(len, (int __user *)argp);
813 break;
814 case SIOCGIFADDR:
815 if (copy_from_user(&ifr, argp, sizeof(ifr))) {
816 rc = -EFAULT;
817 break;
818 }
819
820 sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
821 *sq = ipc->us;
822 if (copy_to_user(argp, &ifr, sizeof(ifr))) {
823 rc = -EFAULT;
824 break;
825 }
826 break;
827 case SIOCGSTAMP:
828 rc = sock_get_timestamp(sk, argp);
829 break;
830 case SIOCADDRT:
831 case SIOCDELRT:
832 case SIOCSIFADDR:
833 case SIOCGIFDSTADDR:
834 case SIOCSIFDSTADDR:
835 case SIOCGIFBRDADDR:
836 case SIOCSIFBRDADDR:
837 case SIOCGIFNETMASK:
838 case SIOCSIFNETMASK:
839 rc = -EINVAL;
840 break;
841 default:
842 rc = -ENOIOCTLCMD;
843 break;
844 }
845
846 release_sock(sk);
847
848 return rc;
849}
850
851static int qrtr_release(struct socket *sock)
852{
853 struct sock *sk = sock->sk;
854 struct qrtr_sock *ipc;
855
856 if (!sk)
857 return 0;
858
859 lock_sock(sk);
860
861 ipc = qrtr_sk(sk);
862 sk->sk_shutdown = SHUTDOWN_MASK;
863 if (!sock_flag(sk, SOCK_DEAD))
864 sk->sk_state_change(sk);
865
866 sock_set_flag(sk, SOCK_DEAD);
867 sock->sk = NULL;
868
869 if (!sock_flag(sk, SOCK_ZAPPED))
870 qrtr_port_remove(ipc);
871
872 skb_queue_purge(&sk->sk_receive_queue);
873
874 release_sock(sk);
875 sock_put(sk);
876
877 return 0;
878}
879
880static const struct proto_ops qrtr_proto_ops = {
881 .owner = THIS_MODULE,
882 .family = AF_QIPCRTR,
883 .bind = qrtr_bind,
884 .connect = qrtr_connect,
885 .socketpair = sock_no_socketpair,
886 .accept = sock_no_accept,
887 .listen = sock_no_listen,
888 .sendmsg = qrtr_sendmsg,
889 .recvmsg = qrtr_recvmsg,
890 .getname = qrtr_getname,
891 .ioctl = qrtr_ioctl,
892 .poll = datagram_poll,
893 .shutdown = sock_no_shutdown,
894 .setsockopt = sock_no_setsockopt,
895 .getsockopt = sock_no_getsockopt,
896 .release = qrtr_release,
897 .mmap = sock_no_mmap,
898 .sendpage = sock_no_sendpage,
899};
900
901static struct proto qrtr_proto = {
902 .name = "QIPCRTR",
903 .owner = THIS_MODULE,
904 .obj_size = sizeof(struct qrtr_sock),
905};
906
907static int qrtr_create(struct net *net, struct socket *sock,
908 int protocol, int kern)
909{
910 struct qrtr_sock *ipc;
911 struct sock *sk;
912
913 if (sock->type != SOCK_DGRAM)
914 return -EPROTOTYPE;
915
916 sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
917 if (!sk)
918 return -ENOMEM;
919
920 sock_set_flag(sk, SOCK_ZAPPED);
921
922 sock_init_data(sock, sk);
923 sock->ops = &qrtr_proto_ops;
924
925 ipc = qrtr_sk(sk);
926 ipc->us.sq_family = AF_QIPCRTR;
927 ipc->us.sq_node = qrtr_local_nid;
928 ipc->us.sq_port = 0;
929
930 return 0;
931}
932
933static const struct nla_policy qrtr_policy[IFA_MAX + 1] = {
934 [IFA_LOCAL] = { .type = NLA_U32 },
935};
936
937static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
938{
939 struct nlattr *tb[IFA_MAX + 1];
940 struct ifaddrmsg *ifm;
941 int rc;
942
943 if (!netlink_capable(skb, CAP_NET_ADMIN))
944 return -EPERM;
945
946 if (!netlink_capable(skb, CAP_SYS_ADMIN))
947 return -EPERM;
948
949 ASSERT_RTNL();
950
951 rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy);
952 if (rc < 0)
953 return rc;
954
955 ifm = nlmsg_data(nlh);
956 if (!tb[IFA_LOCAL])
957 return -EINVAL;
958
959 qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
960 return 0;
961}
962
963static const struct net_proto_family qrtr_family = {
964 .owner = THIS_MODULE,
965 .family = AF_QIPCRTR,
966 .create = qrtr_create,
967};
968
969static int __init qrtr_proto_init(void)
970{
971 int rc;
972
973 rc = proto_register(&qrtr_proto, 1);
974 if (rc)
975 return rc;
976
977 rc = sock_register(&qrtr_family);
978 if (rc) {
979 proto_unregister(&qrtr_proto);
980 return rc;
981 }
982
983 rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, NULL);
984
985 return 0;
986}
987module_init(qrtr_proto_init);
988
989static void __exit qrtr_proto_fini(void)
990{
991 rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
992 sock_unregister(qrtr_family.family);
993 proto_unregister(&qrtr_proto);
994}
995module_exit(qrtr_proto_fini);
996
997MODULE_DESCRIPTION("Qualcomm IPC-router driver");
998MODULE_LICENSE("GPL v2");