blob: c82e7b52ab1f83858d0da79566ef9fa0ad4a906a [file] [log] [blame]
Thomas Gleixner685a6bf2019-05-29 16:57:36 -07001// SPDX-License-Identifier: GPL-2.0-only
Andy Kingd021c342013-02-06 14:23:56 +00002/*
3 * VMware vSockets Driver
4 *
5 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
Andy Kingd021c342013-02-06 14:23:56 +00006 */
7
8/* Implementation notes:
9 *
10 * - There are two kinds of sockets: those created by user action (such as
11 * calling socket(2)) and those created by incoming connection request packets.
12 *
13 * - There are two "global" tables, one for bound sockets (sockets that have
14 * specified an address that they are responsible for) and one for connected
15 * sockets (sockets that have established a connection with another socket).
16 * These tables are "global" in that all sockets on the system are placed
17 * within them. - Note, though, that the bound table contains an extra entry
18 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
19 * that list. The bound table is used solely for lookup of sockets when packets
20 * are received and that's not necessary for SOCK_DGRAM sockets since we create
21 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
22 * sockets out of the bound hash buckets will reduce the chance of collisions
23 * when looking for SOCK_STREAM sockets and prevents us from having to check the
24 * socket type in the hash table lookups.
25 *
26 * - Sockets created by user action will either be "client" sockets that
27 * initiate a connection or "server" sockets that listen for connections; we do
28 * not support simultaneous connects (two "client" sockets connecting).
29 *
30 * - "Server" sockets are referred to as listener sockets throughout this
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -040031 * implementation because they are in the TCP_LISTEN state. When a
Stefan Hajnocziea3803c2015-10-29 11:57:42 +000032 * connection request is received (the second kind of socket mentioned above),
33 * we create a new socket and refer to it as a pending socket. These pending
34 * sockets are placed on the pending connection list of the listener socket.
35 * When future packets are received for the address the listener socket is
36 * bound to, we check if the source of the packet is from one that has an
37 * existing pending connection. If it does, we process the packet for the
38 * pending socket. When that socket reaches the connected state, it is removed
39 * from the listener socket's pending list and enqueued in the listener
40 * socket's accept queue. Callers of accept(2) will accept connected sockets
41 * from the listener socket's accept queue. If the socket cannot be accepted
42 * for some reason then it is marked rejected. Once the connection is
43 * accepted, it is owned by the user process and the responsibility for cleanup
44 * falls with that user process.
Andy Kingd021c342013-02-06 14:23:56 +000045 *
46 * - It is possible that these pending sockets will never reach the connected
47 * state; in fact, we may never receive another packet after the connection
48 * request. Because of this, we must schedule a cleanup function to run in the
49 * future, after some amount of time passes where a connection should have been
50 * established. This function ensures that the socket is off all lists so it
51 * cannot be retrieved, then drops all references to the socket so it is cleaned
52 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
53 * function will also cleanup rejected sockets, those that reach the connected
54 * state but leave it before they have been accepted.
55 *
Stefan Hajnoczi4192f672016-06-23 16:28:58 +010056 * - Lock ordering for pending or accept queue sockets is:
57 *
58 * lock_sock(listener);
59 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
60 *
61 * Using explicit nested locking keeps lockdep happy since normally only one
62 * lock of a given class may be taken at a time.
63 *
Andy Kingd021c342013-02-06 14:23:56 +000064 * - Sockets created by user action will be cleaned up when the user process
65 * calls close(2), causing our release implementation to be called. Our release
66 * implementation will perform some cleanup then drop the last reference so our
67 * sk_destruct implementation is invoked. Our sk_destruct implementation will
68 * perform additional cleanup that's common for both types of sockets.
69 *
70 * - A socket's reference count is what ensures that the structure won't be
71 * freed. Each entry in a list (such as the "global" bound and connected tables
72 * and the listener socket's pending list and connected queue) ensures a
73 * reference. When we defer work until process context and pass a socket as our
74 * argument, we must ensure the reference count is increased to ensure the
75 * socket isn't freed before the function is run; the deferred function will
76 * then drop the reference.
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -040077 *
78 * - sk->sk_state uses the TCP state constants because they are widely used by
79 * other address families and exposed to userspace tools like ss(8):
80 *
81 * TCP_CLOSE - unconnected
82 * TCP_SYN_SENT - connecting
83 * TCP_ESTABLISHED - connected
84 * TCP_CLOSING - disconnecting
85 * TCP_LISTEN - listening
Andy Kingd021c342013-02-06 14:23:56 +000086 */
87
88#include <linux/types.h>
Andy Kingd021c342013-02-06 14:23:56 +000089#include <linux/bitops.h>
90#include <linux/cred.h>
91#include <linux/init.h>
92#include <linux/io.h>
93#include <linux/kernel.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010094#include <linux/sched/signal.h>
Andy Kingd021c342013-02-06 14:23:56 +000095#include <linux/kmod.h>
96#include <linux/list.h>
97#include <linux/miscdevice.h>
98#include <linux/module.h>
99#include <linux/mutex.h>
100#include <linux/net.h>
101#include <linux/poll.h>
Lepton Wu8236b082018-12-11 11:12:55 -0800102#include <linux/random.h>
Andy Kingd021c342013-02-06 14:23:56 +0000103#include <linux/skbuff.h>
104#include <linux/smp.h>
105#include <linux/socket.h>
106#include <linux/stddef.h>
107#include <linux/unistd.h>
108#include <linux/wait.h>
109#include <linux/workqueue.h>
110#include <net/sock.h>
Asias He82a54d02013-07-25 17:39:34 +0800111#include <net/af_vsock.h>
Andy Kingd021c342013-02-06 14:23:56 +0000112
113static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
114static void vsock_sk_destruct(struct sock *sk);
115static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
116
117/* Protocol family. */
118static struct proto vsock_proto = {
119 .name = "AF_VSOCK",
120 .owner = THIS_MODULE,
121 .obj_size = sizeof(struct vsock_sock),
122};
123
124/* The default peer timeout indicates how long we will wait for a peer response
125 * to a control message.
126 */
127#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
128
Andy Kingd021c342013-02-06 14:23:56 +0000129static const struct vsock_transport *transport;
130static DEFINE_MUTEX(vsock_register_mutex);
131
132/**** EXPORTS ****/
133
134/* Get the ID of the local context. This is transport dependent. */
135
136int vm_sockets_get_local_cid(void)
137{
138 return transport->get_local_cid();
139}
140EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid);
141
142/**** UTILS ****/
143
144/* Each bound VSocket is stored in the bind hash table and each connected
145 * VSocket is stored in the connected hash table.
146 *
147 * Unbound sockets are all put on the same list attached to the end of the hash
148 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
149 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
150 * represents the list that addr hashes to).
151 *
152 * Specifically, we initialize the vsock_bind_table array to a size of
153 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
154 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
155 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
Asias Hea49dd9d2013-06-20 17:20:33 +0800156 * mods with VSOCK_HASH_SIZE to ensure this.
Andy Kingd021c342013-02-06 14:23:56 +0000157 */
Andy Kingd021c342013-02-06 14:23:56 +0000158#define MAX_PORT_RETRIES 24
159
Asias Hea49dd9d2013-06-20 17:20:33 +0800160#define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE)
Andy Kingd021c342013-02-06 14:23:56 +0000161#define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
162#define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
163
164/* XXX This can probably be implemented in a better way. */
165#define VSOCK_CONN_HASH(src, dst) \
Asias Hea49dd9d2013-06-20 17:20:33 +0800166 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
Andy Kingd021c342013-02-06 14:23:56 +0000167#define vsock_connected_sockets(src, dst) \
168 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
169#define vsock_connected_sockets_vsk(vsk) \
170 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
171
Stefan Hajnoczi44f20982017-10-05 16:46:50 -0400172struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
173EXPORT_SYMBOL_GPL(vsock_bind_table);
174struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
175EXPORT_SYMBOL_GPL(vsock_connected_table);
176DEFINE_SPINLOCK(vsock_table_lock);
177EXPORT_SYMBOL_GPL(vsock_table_lock);
Andy Kingd021c342013-02-06 14:23:56 +0000178
Asias Heb3a6dfe2013-06-20 17:20:30 +0800179/* Autobind this socket to the local address if necessary. */
180static int vsock_auto_bind(struct vsock_sock *vsk)
181{
182 struct sock *sk = sk_vsock(vsk);
183 struct sockaddr_vm local_addr;
184
185 if (vsock_addr_bound(&vsk->local_addr))
186 return 0;
187 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
188 return __vsock_bind(sk, &local_addr);
189}
190
Cong Wangc1eef222017-10-24 15:30:37 -0700191static int __init vsock_init_tables(void)
Andy Kingd021c342013-02-06 14:23:56 +0000192{
193 int i;
194
195 for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
196 INIT_LIST_HEAD(&vsock_bind_table[i]);
197
198 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
199 INIT_LIST_HEAD(&vsock_connected_table[i]);
Cong Wangc1eef222017-10-24 15:30:37 -0700200 return 0;
Andy Kingd021c342013-02-06 14:23:56 +0000201}
202
203static void __vsock_insert_bound(struct list_head *list,
204 struct vsock_sock *vsk)
205{
206 sock_hold(&vsk->sk);
207 list_add(&vsk->bound_table, list);
208}
209
210static void __vsock_insert_connected(struct list_head *list,
211 struct vsock_sock *vsk)
212{
213 sock_hold(&vsk->sk);
214 list_add(&vsk->connected_table, list);
215}
216
217static void __vsock_remove_bound(struct vsock_sock *vsk)
218{
219 list_del_init(&vsk->bound_table);
220 sock_put(&vsk->sk);
221}
222
223static void __vsock_remove_connected(struct vsock_sock *vsk)
224{
225 list_del_init(&vsk->connected_table);
226 sock_put(&vsk->sk);
227}
228
229static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
230{
231 struct vsock_sock *vsk;
232
233 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
Reilly Grant990454b2013-04-01 11:41:52 -0700234 if (addr->svm_port == vsk->local_addr.svm_port)
Andy Kingd021c342013-02-06 14:23:56 +0000235 return sk_vsock(vsk);
236
237 return NULL;
238}
239
240static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
241 struct sockaddr_vm *dst)
242{
243 struct vsock_sock *vsk;
244
245 list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
246 connected_table) {
Reilly Grant990454b2013-04-01 11:41:52 -0700247 if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
248 dst->svm_port == vsk->local_addr.svm_port) {
Andy Kingd021c342013-02-06 14:23:56 +0000249 return sk_vsock(vsk);
250 }
251 }
252
253 return NULL;
254}
255
Andy Kingd021c342013-02-06 14:23:56 +0000256static void vsock_insert_unbound(struct vsock_sock *vsk)
257{
258 spin_lock_bh(&vsock_table_lock);
259 __vsock_insert_bound(vsock_unbound_sockets, vsk);
260 spin_unlock_bh(&vsock_table_lock);
261}
262
263void vsock_insert_connected(struct vsock_sock *vsk)
264{
265 struct list_head *list = vsock_connected_sockets(
266 &vsk->remote_addr, &vsk->local_addr);
267
268 spin_lock_bh(&vsock_table_lock);
269 __vsock_insert_connected(list, vsk);
270 spin_unlock_bh(&vsock_table_lock);
271}
272EXPORT_SYMBOL_GPL(vsock_insert_connected);
273
274void vsock_remove_bound(struct vsock_sock *vsk)
275{
276 spin_lock_bh(&vsock_table_lock);
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000277 if (__vsock_in_bound_table(vsk))
278 __vsock_remove_bound(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000279 spin_unlock_bh(&vsock_table_lock);
280}
281EXPORT_SYMBOL_GPL(vsock_remove_bound);
282
283void vsock_remove_connected(struct vsock_sock *vsk)
284{
285 spin_lock_bh(&vsock_table_lock);
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000286 if (__vsock_in_connected_table(vsk))
287 __vsock_remove_connected(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000288 spin_unlock_bh(&vsock_table_lock);
289}
290EXPORT_SYMBOL_GPL(vsock_remove_connected);
291
292struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
293{
294 struct sock *sk;
295
296 spin_lock_bh(&vsock_table_lock);
297 sk = __vsock_find_bound_socket(addr);
298 if (sk)
299 sock_hold(sk);
300
301 spin_unlock_bh(&vsock_table_lock);
302
303 return sk;
304}
305EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
306
307struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
308 struct sockaddr_vm *dst)
309{
310 struct sock *sk;
311
312 spin_lock_bh(&vsock_table_lock);
313 sk = __vsock_find_connected_socket(src, dst);
314 if (sk)
315 sock_hold(sk);
316
317 spin_unlock_bh(&vsock_table_lock);
318
319 return sk;
320}
321EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
322
Stefan Hajnoczi6773b7d2016-07-28 15:36:31 +0100323void vsock_remove_sock(struct vsock_sock *vsk)
324{
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000325 vsock_remove_bound(vsk);
326 vsock_remove_connected(vsk);
Stefan Hajnoczi6773b7d2016-07-28 15:36:31 +0100327}
328EXPORT_SYMBOL_GPL(vsock_remove_sock);
329
Andy Kingd021c342013-02-06 14:23:56 +0000330void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
331{
332 int i;
333
334 spin_lock_bh(&vsock_table_lock);
335
336 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
337 struct vsock_sock *vsk;
338 list_for_each_entry(vsk, &vsock_connected_table[i],
Julia Lawalld9af2d62013-08-05 16:47:38 +0200339 connected_table)
Andy Kingd021c342013-02-06 14:23:56 +0000340 fn(sk_vsock(vsk));
341 }
342
343 spin_unlock_bh(&vsock_table_lock);
344}
345EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
346
347void vsock_add_pending(struct sock *listener, struct sock *pending)
348{
349 struct vsock_sock *vlistener;
350 struct vsock_sock *vpending;
351
352 vlistener = vsock_sk(listener);
353 vpending = vsock_sk(pending);
354
355 sock_hold(pending);
356 sock_hold(listener);
357 list_add_tail(&vpending->pending_links, &vlistener->pending_links);
358}
359EXPORT_SYMBOL_GPL(vsock_add_pending);
360
361void vsock_remove_pending(struct sock *listener, struct sock *pending)
362{
363 struct vsock_sock *vpending = vsock_sk(pending);
364
365 list_del_init(&vpending->pending_links);
366 sock_put(listener);
367 sock_put(pending);
368}
369EXPORT_SYMBOL_GPL(vsock_remove_pending);
370
371void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
372{
373 struct vsock_sock *vlistener;
374 struct vsock_sock *vconnected;
375
376 vlistener = vsock_sk(listener);
377 vconnected = vsock_sk(connected);
378
379 sock_hold(connected);
380 sock_hold(listener);
381 list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
382}
383EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
384
385static struct sock *vsock_dequeue_accept(struct sock *listener)
386{
387 struct vsock_sock *vlistener;
388 struct vsock_sock *vconnected;
389
390 vlistener = vsock_sk(listener);
391
392 if (list_empty(&vlistener->accept_queue))
393 return NULL;
394
395 vconnected = list_entry(vlistener->accept_queue.next,
396 struct vsock_sock, accept_queue);
397
398 list_del_init(&vconnected->accept_queue);
399 sock_put(listener);
400 /* The caller will need a reference on the connected socket so we let
401 * it call sock_put().
402 */
403
404 return sk_vsock(vconnected);
405}
406
407static bool vsock_is_accept_queue_empty(struct sock *sk)
408{
409 struct vsock_sock *vsk = vsock_sk(sk);
410 return list_empty(&vsk->accept_queue);
411}
412
413static bool vsock_is_pending(struct sock *sk)
414{
415 struct vsock_sock *vsk = vsock_sk(sk);
416 return !list_empty(&vsk->pending_links);
417}
418
419static int vsock_send_shutdown(struct sock *sk, int mode)
420{
421 return transport->shutdown(vsock_sk(sk), mode);
422}
423
Cong Wang455f05e2018-08-06 11:06:02 -0700424static void vsock_pending_work(struct work_struct *work)
Andy Kingd021c342013-02-06 14:23:56 +0000425{
426 struct sock *sk;
427 struct sock *listener;
428 struct vsock_sock *vsk;
429 bool cleanup;
430
Cong Wang455f05e2018-08-06 11:06:02 -0700431 vsk = container_of(work, struct vsock_sock, pending_work.work);
Andy Kingd021c342013-02-06 14:23:56 +0000432 sk = sk_vsock(vsk);
433 listener = vsk->listener;
434 cleanup = true;
435
436 lock_sock(listener);
Stefan Hajnoczi4192f672016-06-23 16:28:58 +0100437 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
Andy Kingd021c342013-02-06 14:23:56 +0000438
439 if (vsock_is_pending(sk)) {
440 vsock_remove_pending(listener, sk);
Jorgen Hansen1190cfd2016-09-26 23:59:53 -0700441
442 listener->sk_ack_backlog--;
Andy Kingd021c342013-02-06 14:23:56 +0000443 } else if (!vsk->rejected) {
444 /* We are not on the pending list and accept() did not reject
445 * us, so we must have been accepted by our user process. We
446 * just need to drop our references to the sockets and be on
447 * our way.
448 */
449 cleanup = false;
450 goto out;
451 }
452
Andy Kingd021c342013-02-06 14:23:56 +0000453 /* We need to remove ourself from the global connected sockets list so
454 * incoming packets can't find this socket, and to reduce the reference
455 * count.
456 */
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000457 vsock_remove_connected(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000458
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -0400459 sk->sk_state = TCP_CLOSE;
Andy Kingd021c342013-02-06 14:23:56 +0000460
461out:
462 release_sock(sk);
463 release_sock(listener);
464 if (cleanup)
465 sock_put(sk);
466
467 sock_put(sk);
468 sock_put(listener);
469}
Andy Kingd021c342013-02-06 14:23:56 +0000470
471/**** SOCKET OPERATIONS ****/
472
473static int __vsock_bind_stream(struct vsock_sock *vsk,
474 struct sockaddr_vm *addr)
475{
Lepton Wua22d3252019-01-09 15:45:41 -0800476 static u32 port;
Andy Kingd021c342013-02-06 14:23:56 +0000477 struct sockaddr_vm new_addr;
478
Lepton Wu8236b082018-12-11 11:12:55 -0800479 if (!port)
480 port = LAST_RESERVED_PORT + 1 +
481 prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
482
Andy Kingd021c342013-02-06 14:23:56 +0000483 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
484
485 if (addr->svm_port == VMADDR_PORT_ANY) {
486 bool found = false;
487 unsigned int i;
488
489 for (i = 0; i < MAX_PORT_RETRIES; i++) {
490 if (port <= LAST_RESERVED_PORT)
491 port = LAST_RESERVED_PORT + 1;
492
493 new_addr.svm_port = port++;
494
495 if (!__vsock_find_bound_socket(&new_addr)) {
496 found = true;
497 break;
498 }
499 }
500
501 if (!found)
502 return -EADDRNOTAVAIL;
503 } else {
504 /* If port is in reserved range, ensure caller
505 * has necessary privileges.
506 */
507 if (addr->svm_port <= LAST_RESERVED_PORT &&
508 !capable(CAP_NET_BIND_SERVICE)) {
509 return -EACCES;
510 }
511
512 if (__vsock_find_bound_socket(&new_addr))
513 return -EADDRINUSE;
514 }
515
516 vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
517
518 /* Remove stream sockets from the unbound list and add them to the hash
519 * table for easy lookup by its address. The unbound list is simply an
520 * extra entry at the end of the hash table, a trick used by AF_UNIX.
521 */
522 __vsock_remove_bound(vsk);
523 __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
524
525 return 0;
526}
527
528static int __vsock_bind_dgram(struct vsock_sock *vsk,
529 struct sockaddr_vm *addr)
530{
531 return transport->dgram_bind(vsk, addr);
532}
533
534static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
535{
536 struct vsock_sock *vsk = vsock_sk(sk);
537 u32 cid;
538 int retval;
539
540 /* First ensure this socket isn't already bound. */
541 if (vsock_addr_bound(&vsk->local_addr))
542 return -EINVAL;
543
544 /* Now bind to the provided address or select appropriate values if
545 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
546 * like AF_INET prevents binding to a non-local IP address (in most
547 * cases), we only allow binding to the local CID.
548 */
549 cid = transport->get_local_cid();
550 if (addr->svm_cid != cid && addr->svm_cid != VMADDR_CID_ANY)
551 return -EADDRNOTAVAIL;
552
553 switch (sk->sk_socket->type) {
554 case SOCK_STREAM:
555 spin_lock_bh(&vsock_table_lock);
556 retval = __vsock_bind_stream(vsk, addr);
557 spin_unlock_bh(&vsock_table_lock);
558 break;
559
560 case SOCK_DGRAM:
561 retval = __vsock_bind_dgram(vsk, addr);
562 break;
563
564 default:
565 retval = -EINVAL;
566 break;
567 }
568
569 return retval;
570}
571
Cong Wang455f05e2018-08-06 11:06:02 -0700572static void vsock_connect_timeout(struct work_struct *work);
573
Andy Kingd021c342013-02-06 14:23:56 +0000574struct sock *__vsock_create(struct net *net,
575 struct socket *sock,
576 struct sock *parent,
577 gfp_t priority,
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500578 unsigned short type,
579 int kern)
Andy Kingd021c342013-02-06 14:23:56 +0000580{
581 struct sock *sk;
582 struct vsock_sock *psk;
583 struct vsock_sock *vsk;
584
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500585 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
Andy Kingd021c342013-02-06 14:23:56 +0000586 if (!sk)
587 return NULL;
588
589 sock_init_data(sock, sk);
590
591 /* sk->sk_type is normally set in sock_init_data, but only if sock is
592 * non-NULL. We make sure that our sockets always have a type by
593 * setting it here if needed.
594 */
595 if (!sock)
596 sk->sk_type = type;
597
598 vsk = vsock_sk(sk);
599 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
600 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
601
602 sk->sk_destruct = vsock_sk_destruct;
603 sk->sk_backlog_rcv = vsock_queue_rcv_skb;
Andy Kingd021c342013-02-06 14:23:56 +0000604 sock_reset_flag(sk, SOCK_DONE);
605
606 INIT_LIST_HEAD(&vsk->bound_table);
607 INIT_LIST_HEAD(&vsk->connected_table);
608 vsk->listener = NULL;
609 INIT_LIST_HEAD(&vsk->pending_links);
610 INIT_LIST_HEAD(&vsk->accept_queue);
611 vsk->rejected = false;
612 vsk->sent_request = false;
613 vsk->ignore_connecting_rst = false;
614 vsk->peer_shutdown = 0;
Cong Wang455f05e2018-08-06 11:06:02 -0700615 INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
616 INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
Andy Kingd021c342013-02-06 14:23:56 +0000617
618 psk = parent ? vsock_sk(parent) : NULL;
619 if (parent) {
620 vsk->trusted = psk->trusted;
621 vsk->owner = get_cred(psk->owner);
622 vsk->connect_timeout = psk->connect_timeout;
David Brazdil1225bb42021-03-29 18:24:43 +0000623 security_sk_clone(parent, sk);
Andy Kingd021c342013-02-06 14:23:56 +0000624 } else {
Jeff Vander Stoep2fd9e602020-10-23 16:37:57 +0200625 vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
Andy Kingd021c342013-02-06 14:23:56 +0000626 vsk->owner = get_current_cred();
627 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
628 }
629
630 if (transport->init(vsk, psk) < 0) {
631 sk_free(sk);
632 return NULL;
633 }
634
635 if (sock)
636 vsock_insert_unbound(vsk);
637
638 return sk;
639}
640EXPORT_SYMBOL_GPL(__vsock_create);
641
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000642static void __vsock_release(struct sock *sk, int level)
Andy Kingd021c342013-02-06 14:23:56 +0000643{
644 if (sk) {
645 struct sk_buff *skb;
646 struct sock *pending;
647 struct vsock_sock *vsk;
648
649 vsk = vsock_sk(sk);
650 pending = NULL; /* Compiler warning. */
651
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000652 /* The release call is supposed to use lock_sock_nested()
653 * rather than lock_sock(), if a sock lock should be acquired.
654 */
Andy Kingd021c342013-02-06 14:23:56 +0000655 transport->release(vsk);
656
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000657 /* When "level" is SINGLE_DEPTH_NESTING, use the nested
658 * version to avoid the warning "possible recursive locking
659 * detected". When "level" is 0, lock_sock_nested(sk, level)
660 * is the same as lock_sock(sk).
661 */
662 lock_sock_nested(sk, level);
Andy Kingd021c342013-02-06 14:23:56 +0000663 sock_orphan(sk);
664 sk->sk_shutdown = SHUTDOWN_MASK;
665
666 while ((skb = skb_dequeue(&sk->sk_receive_queue)))
667 kfree_skb(skb);
668
669 /* Clean up any sockets that never were accepted. */
670 while ((pending = vsock_dequeue_accept(sk)) != NULL) {
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000671 __vsock_release(pending, SINGLE_DEPTH_NESTING);
Andy Kingd021c342013-02-06 14:23:56 +0000672 sock_put(pending);
673 }
674
675 release_sock(sk);
676 sock_put(sk);
677 }
678}
679
680static void vsock_sk_destruct(struct sock *sk)
681{
682 struct vsock_sock *vsk = vsock_sk(sk);
683
684 transport->destruct(vsk);
685
686 /* When clearing these addresses, there's no need to set the family and
687 * possibly register the address family with the kernel.
688 */
689 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
690 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
691
692 put_cred(vsk->owner);
693}
694
695static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
696{
697 int err;
698
699 err = sock_queue_rcv_skb(sk, skb);
700 if (err)
701 kfree_skb(skb);
702
703 return err;
704}
705
706s64 vsock_stream_has_data(struct vsock_sock *vsk)
707{
708 return transport->stream_has_data(vsk);
709}
710EXPORT_SYMBOL_GPL(vsock_stream_has_data);
711
712s64 vsock_stream_has_space(struct vsock_sock *vsk)
713{
714 return transport->stream_has_space(vsk);
715}
716EXPORT_SYMBOL_GPL(vsock_stream_has_space);
717
718static int vsock_release(struct socket *sock)
719{
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000720 __vsock_release(sock->sk, 0);
Andy Kingd021c342013-02-06 14:23:56 +0000721 sock->sk = NULL;
722 sock->state = SS_FREE;
723
724 return 0;
725}
726
727static int
728vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
729{
730 int err;
731 struct sock *sk;
732 struct sockaddr_vm *vm_addr;
733
734 sk = sock->sk;
735
736 if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
737 return -EINVAL;
738
739 lock_sock(sk);
740 err = __vsock_bind(sk, vm_addr);
741 release_sock(sk);
742
743 return err;
744}
745
746static int vsock_getname(struct socket *sock,
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100747 struct sockaddr *addr, int peer)
Andy Kingd021c342013-02-06 14:23:56 +0000748{
749 int err;
750 struct sock *sk;
751 struct vsock_sock *vsk;
752 struct sockaddr_vm *vm_addr;
753
754 sk = sock->sk;
755 vsk = vsock_sk(sk);
756 err = 0;
757
758 lock_sock(sk);
759
760 if (peer) {
761 if (sock->state != SS_CONNECTED) {
762 err = -ENOTCONN;
763 goto out;
764 }
765 vm_addr = &vsk->remote_addr;
766 } else {
767 vm_addr = &vsk->local_addr;
768 }
769
770 if (!vm_addr) {
771 err = -EINVAL;
772 goto out;
773 }
774
775 /* sys_getsockname() and sys_getpeername() pass us a
776 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
777 * that macro is defined in socket.c instead of .h, so we hardcode its
778 * value here.
779 */
780 BUILD_BUG_ON(sizeof(*vm_addr) > 128);
781 memcpy(addr, vm_addr, sizeof(*vm_addr));
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100782 err = sizeof(*vm_addr);
Andy Kingd021c342013-02-06 14:23:56 +0000783
784out:
785 release_sock(sk);
786 return err;
787}
788
789static int vsock_shutdown(struct socket *sock, int mode)
790{
791 int err;
792 struct sock *sk;
793
794 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
795 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
796 * here like the other address families do. Note also that the
797 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
798 * which is what we want.
799 */
800 mode++;
801
802 if ((mode & ~SHUTDOWN_MASK) || !mode)
803 return -EINVAL;
804
805 /* If this is a STREAM socket and it is not connected then bail out
806 * immediately. If it is a DGRAM socket then we must first kick the
807 * socket so that it wakes up from any sleeping calls, for example
808 * recv(), and then afterwards return the error.
809 */
810
811 sk = sock->sk;
Stefano Garzarellaac79b1d2021-02-09 09:52:19 +0100812
813 lock_sock(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000814 if (sock->state == SS_UNCONNECTED) {
815 err = -ENOTCONN;
816 if (sk->sk_type == SOCK_STREAM)
Stefano Garzarellaac79b1d2021-02-09 09:52:19 +0100817 goto out;
Andy Kingd021c342013-02-06 14:23:56 +0000818 } else {
819 sock->state = SS_DISCONNECTING;
820 err = 0;
821 }
822
823 /* Receive and send shutdowns are treated alike. */
824 mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
825 if (mode) {
Andy Kingd021c342013-02-06 14:23:56 +0000826 sk->sk_shutdown |= mode;
827 sk->sk_state_change(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000828
829 if (sk->sk_type == SOCK_STREAM) {
830 sock_reset_flag(sk, SOCK_DONE);
831 vsock_send_shutdown(sk, mode);
832 }
833 }
834
Stefano Garzarellaac79b1d2021-02-09 09:52:19 +0100835out:
836 release_sock(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000837 return err;
838}
839
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700840static __poll_t vsock_poll(struct file *file, struct socket *sock,
841 poll_table *wait)
Andy Kingd021c342013-02-06 14:23:56 +0000842{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700843 struct sock *sk;
844 __poll_t mask;
845 struct vsock_sock *vsk;
846
847 sk = sock->sk;
848 vsk = vsock_sk(sk);
849
850 poll_wait(file, sk_sleep(sk), wait);
851 mask = 0;
Andy Kingd021c342013-02-06 14:23:56 +0000852
853 if (sk->sk_err)
854 /* Signify that there has been an error on this socket. */
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800855 mask |= EPOLLERR;
Andy Kingd021c342013-02-06 14:23:56 +0000856
857 /* INET sockets treat local write shutdown and peer write shutdown as a
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800858 * case of EPOLLHUP set.
Andy Kingd021c342013-02-06 14:23:56 +0000859 */
860 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
861 ((sk->sk_shutdown & SEND_SHUTDOWN) &&
862 (vsk->peer_shutdown & SEND_SHUTDOWN))) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800863 mask |= EPOLLHUP;
Andy Kingd021c342013-02-06 14:23:56 +0000864 }
865
866 if (sk->sk_shutdown & RCV_SHUTDOWN ||
867 vsk->peer_shutdown & SEND_SHUTDOWN) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800868 mask |= EPOLLRDHUP;
Andy Kingd021c342013-02-06 14:23:56 +0000869 }
870
871 if (sock->type == SOCK_DGRAM) {
872 /* For datagram sockets we can read if there is something in
873 * the queue and write as long as the socket isn't shutdown for
874 * sending.
875 */
Eric Dumazet3ef7cf52019-10-23 22:44:50 -0700876 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
Andy Kingd021c342013-02-06 14:23:56 +0000877 (sk->sk_shutdown & RCV_SHUTDOWN)) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800878 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +0000879 }
880
881 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800882 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
Andy Kingd021c342013-02-06 14:23:56 +0000883
884 } else if (sock->type == SOCK_STREAM) {
885 lock_sock(sk);
886
887 /* Listening sockets that have connections in their accept
888 * queue can be read.
889 */
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -0400890 if (sk->sk_state == TCP_LISTEN
Andy Kingd021c342013-02-06 14:23:56 +0000891 && !vsock_is_accept_queue_empty(sk))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800892 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +0000893
894 /* If there is something in the queue then we can read. */
895 if (transport->stream_is_active(vsk) &&
896 !(sk->sk_shutdown & RCV_SHUTDOWN)) {
897 bool data_ready_now = false;
898 int ret = transport->notify_poll_in(
899 vsk, 1, &data_ready_now);
900 if (ret < 0) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800901 mask |= EPOLLERR;
Andy Kingd021c342013-02-06 14:23:56 +0000902 } else {
903 if (data_ready_now)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800904 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +0000905
906 }
907 }
908
909 /* Sockets whose connections have been closed, reset, or
910 * terminated should also be considered read, and we check the
911 * shutdown flag for that.
912 */
913 if (sk->sk_shutdown & RCV_SHUTDOWN ||
914 vsk->peer_shutdown & SEND_SHUTDOWN) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800915 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +0000916 }
917
918 /* Connected sockets that can produce data can be written. */
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -0400919 if (sk->sk_state == TCP_ESTABLISHED) {
Andy Kingd021c342013-02-06 14:23:56 +0000920 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
921 bool space_avail_now = false;
922 int ret = transport->notify_poll_out(
923 vsk, 1, &space_avail_now);
924 if (ret < 0) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800925 mask |= EPOLLERR;
Andy Kingd021c342013-02-06 14:23:56 +0000926 } else {
927 if (space_avail_now)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800928 /* Remove EPOLLWRBAND since INET
Andy Kingd021c342013-02-06 14:23:56 +0000929 * sockets are not setting it.
930 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800931 mask |= EPOLLOUT | EPOLLWRNORM;
Andy Kingd021c342013-02-06 14:23:56 +0000932
933 }
934 }
935 }
936
937 /* Simulate INET socket poll behaviors, which sets
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800938 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
Andy Kingd021c342013-02-06 14:23:56 +0000939 * but local send is not shutdown.
940 */
Stefan Hajnocziba3169f2018-01-26 11:48:25 +0000941 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
Andy Kingd021c342013-02-06 14:23:56 +0000942 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800943 mask |= EPOLLOUT | EPOLLWRNORM;
Andy Kingd021c342013-02-06 14:23:56 +0000944
945 }
946
947 release_sock(sk);
948 }
949
950 return mask;
951}
952
Ying Xue1b784142015-03-02 15:37:48 +0800953static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
954 size_t len)
Andy Kingd021c342013-02-06 14:23:56 +0000955{
956 int err;
957 struct sock *sk;
958 struct vsock_sock *vsk;
959 struct sockaddr_vm *remote_addr;
960
961 if (msg->msg_flags & MSG_OOB)
962 return -EOPNOTSUPP;
963
964 /* For now, MSG_DONTWAIT is always assumed... */
965 err = 0;
966 sk = sock->sk;
967 vsk = vsock_sk(sk);
968
969 lock_sock(sk);
970
Asias Heb3a6dfe2013-06-20 17:20:30 +0800971 err = vsock_auto_bind(vsk);
972 if (err)
973 goto out;
Andy Kingd021c342013-02-06 14:23:56 +0000974
Andy Kingd021c342013-02-06 14:23:56 +0000975
976 /* If the provided message contains an address, use that. Otherwise
977 * fall back on the socket's remote handle (if it has been connected).
978 */
979 if (msg->msg_name &&
980 vsock_addr_cast(msg->msg_name, msg->msg_namelen,
981 &remote_addr) == 0) {
982 /* Ensure this address is of the right type and is a valid
983 * destination.
984 */
985
986 if (remote_addr->svm_cid == VMADDR_CID_ANY)
987 remote_addr->svm_cid = transport->get_local_cid();
988
989 if (!vsock_addr_bound(remote_addr)) {
990 err = -EINVAL;
991 goto out;
992 }
993 } else if (sock->state == SS_CONNECTED) {
994 remote_addr = &vsk->remote_addr;
995
996 if (remote_addr->svm_cid == VMADDR_CID_ANY)
997 remote_addr->svm_cid = transport->get_local_cid();
998
999 /* XXX Should connect() or this function ensure remote_addr is
1000 * bound?
1001 */
1002 if (!vsock_addr_bound(&vsk->remote_addr)) {
1003 err = -EINVAL;
1004 goto out;
1005 }
1006 } else {
1007 err = -EINVAL;
1008 goto out;
1009 }
1010
1011 if (!transport->dgram_allow(remote_addr->svm_cid,
1012 remote_addr->svm_port)) {
1013 err = -EINVAL;
1014 goto out;
1015 }
1016
Al Viro0f7db232014-11-20 04:05:34 -05001017 err = transport->dgram_enqueue(vsk, remote_addr, msg, len);
Andy Kingd021c342013-02-06 14:23:56 +00001018
1019out:
1020 release_sock(sk);
1021 return err;
1022}
1023
1024static int vsock_dgram_connect(struct socket *sock,
1025 struct sockaddr *addr, int addr_len, int flags)
1026{
1027 int err;
1028 struct sock *sk;
1029 struct vsock_sock *vsk;
1030 struct sockaddr_vm *remote_addr;
1031
1032 sk = sock->sk;
1033 vsk = vsock_sk(sk);
1034
1035 err = vsock_addr_cast(addr, addr_len, &remote_addr);
1036 if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
1037 lock_sock(sk);
1038 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
1039 VMADDR_PORT_ANY);
1040 sock->state = SS_UNCONNECTED;
1041 release_sock(sk);
1042 return 0;
1043 } else if (err != 0)
1044 return -EINVAL;
1045
1046 lock_sock(sk);
1047
Asias Heb3a6dfe2013-06-20 17:20:30 +08001048 err = vsock_auto_bind(vsk);
1049 if (err)
1050 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001051
1052 if (!transport->dgram_allow(remote_addr->svm_cid,
1053 remote_addr->svm_port)) {
1054 err = -EINVAL;
1055 goto out;
1056 }
1057
1058 memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
1059 sock->state = SS_CONNECTED;
1060
1061out:
1062 release_sock(sk);
1063 return err;
1064}
1065
Ying Xue1b784142015-03-02 15:37:48 +08001066static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1067 size_t len, int flags)
Andy Kingd021c342013-02-06 14:23:56 +00001068{
Ying Xue1b784142015-03-02 15:37:48 +08001069 return transport->dgram_dequeue(vsock_sk(sock->sk), msg, len, flags);
Andy Kingd021c342013-02-06 14:23:56 +00001070}
1071
1072static const struct proto_ops vsock_dgram_ops = {
1073 .family = PF_VSOCK,
1074 .owner = THIS_MODULE,
1075 .release = vsock_release,
1076 .bind = vsock_bind,
1077 .connect = vsock_dgram_connect,
1078 .socketpair = sock_no_socketpair,
1079 .accept = sock_no_accept,
1080 .getname = vsock_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001081 .poll = vsock_poll,
Andy Kingd021c342013-02-06 14:23:56 +00001082 .ioctl = sock_no_ioctl,
1083 .listen = sock_no_listen,
1084 .shutdown = vsock_shutdown,
1085 .setsockopt = sock_no_setsockopt,
1086 .getsockopt = sock_no_getsockopt,
1087 .sendmsg = vsock_dgram_sendmsg,
1088 .recvmsg = vsock_dgram_recvmsg,
1089 .mmap = sock_no_mmap,
1090 .sendpage = sock_no_sendpage,
1091};
1092
Peng Tao380feae2017-03-15 09:32:17 +08001093static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
1094{
1095 if (!transport->cancel_pkt)
1096 return -EOPNOTSUPP;
1097
1098 return transport->cancel_pkt(vsk);
1099}
1100
Andy Kingd021c342013-02-06 14:23:56 +00001101static void vsock_connect_timeout(struct work_struct *work)
1102{
1103 struct sock *sk;
1104 struct vsock_sock *vsk;
1105
Cong Wang455f05e2018-08-06 11:06:02 -07001106 vsk = container_of(work, struct vsock_sock, connect_work.work);
Andy Kingd021c342013-02-06 14:23:56 +00001107 sk = sk_vsock(vsk);
1108
1109 lock_sock(sk);
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001110 if (sk->sk_state == TCP_SYN_SENT &&
Andy Kingd021c342013-02-06 14:23:56 +00001111 (sk->sk_shutdown != SHUTDOWN_MASK)) {
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001112 sk->sk_state = TCP_CLOSE;
Andy Kingd021c342013-02-06 14:23:56 +00001113 sk->sk_err = ETIMEDOUT;
1114 sk->sk_error_report(sk);
Norbert Slusareka5f0b6f2021-02-05 13:14:05 +01001115 vsock_transport_cancel_pkt(vsk);
Andy Kingd021c342013-02-06 14:23:56 +00001116 }
1117 release_sock(sk);
1118
1119 sock_put(sk);
1120}
1121
1122static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1123 int addr_len, int flags)
1124{
1125 int err;
1126 struct sock *sk;
1127 struct vsock_sock *vsk;
1128 struct sockaddr_vm *remote_addr;
1129 long timeout;
1130 DEFINE_WAIT(wait);
1131
1132 err = 0;
1133 sk = sock->sk;
1134 vsk = vsock_sk(sk);
1135
1136 lock_sock(sk);
1137
1138 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1139 switch (sock->state) {
1140 case SS_CONNECTED:
1141 err = -EISCONN;
1142 goto out;
1143 case SS_DISCONNECTING:
1144 err = -EINVAL;
1145 goto out;
1146 case SS_CONNECTING:
1147 /* This continues on so we can move sock into the SS_CONNECTED
1148 * state once the connection has completed (at which point err
1149 * will be set to zero also). Otherwise, we will either wait
1150 * for the connection or return -EALREADY should this be a
1151 * non-blocking call.
1152 */
1153 err = -EALREADY;
1154 break;
1155 default:
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001156 if ((sk->sk_state == TCP_LISTEN) ||
Andy Kingd021c342013-02-06 14:23:56 +00001157 vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1158 err = -EINVAL;
1159 goto out;
1160 }
1161
1162 /* The hypervisor and well-known contexts do not have socket
1163 * endpoints.
1164 */
1165 if (!transport->stream_allow(remote_addr->svm_cid,
1166 remote_addr->svm_port)) {
1167 err = -ENETUNREACH;
1168 goto out;
1169 }
1170
1171 /* Set the remote address that we are connecting to. */
1172 memcpy(&vsk->remote_addr, remote_addr,
1173 sizeof(vsk->remote_addr));
1174
Asias Heb3a6dfe2013-06-20 17:20:30 +08001175 err = vsock_auto_bind(vsk);
1176 if (err)
1177 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001178
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001179 sk->sk_state = TCP_SYN_SENT;
Andy Kingd021c342013-02-06 14:23:56 +00001180
1181 err = transport->connect(vsk);
1182 if (err < 0)
1183 goto out;
1184
1185 /* Mark sock as connecting and set the error code to in
1186 * progress in case this is a non-blocking connect.
1187 */
1188 sock->state = SS_CONNECTING;
1189 err = -EINPROGRESS;
1190 }
1191
1192 /* The receive path will handle all communication until we are able to
1193 * enter the connected state. Here we wait for the connection to be
1194 * completed or a notification of an error.
1195 */
1196 timeout = vsk->connect_timeout;
1197 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1198
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001199 while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) {
Andy Kingd021c342013-02-06 14:23:56 +00001200 if (flags & O_NONBLOCK) {
1201 /* If we're not going to block, we schedule a timeout
1202 * function to generate a timeout on the connection
1203 * attempt, in case the peer doesn't respond in a
1204 * timely manner. We hold on to the socket until the
1205 * timeout fires.
1206 */
1207 sock_hold(sk);
Cong Wang455f05e2018-08-06 11:06:02 -07001208 schedule_delayed_work(&vsk->connect_work, timeout);
Andy Kingd021c342013-02-06 14:23:56 +00001209
1210 /* Skip ahead to preserve error code set above. */
1211 goto out_wait;
1212 }
1213
1214 release_sock(sk);
1215 timeout = schedule_timeout(timeout);
1216 lock_sock(sk);
1217
1218 if (signal_pending(current)) {
1219 err = sock_intr_errno(timeout);
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001220 sk->sk_state = TCP_CLOSE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001221 sock->state = SS_UNCONNECTED;
Peng Tao380feae2017-03-15 09:32:17 +08001222 vsock_transport_cancel_pkt(vsk);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001223 goto out_wait;
Andy Kingd021c342013-02-06 14:23:56 +00001224 } else if (timeout == 0) {
1225 err = -ETIMEDOUT;
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001226 sk->sk_state = TCP_CLOSE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001227 sock->state = SS_UNCONNECTED;
Peng Tao380feae2017-03-15 09:32:17 +08001228 vsock_transport_cancel_pkt(vsk);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001229 goto out_wait;
Andy Kingd021c342013-02-06 14:23:56 +00001230 }
1231
1232 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1233 }
1234
1235 if (sk->sk_err) {
1236 err = -sk->sk_err;
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001237 sk->sk_state = TCP_CLOSE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001238 sock->state = SS_UNCONNECTED;
1239 } else {
Andy Kingd021c342013-02-06 14:23:56 +00001240 err = 0;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001241 }
Andy Kingd021c342013-02-06 14:23:56 +00001242
1243out_wait:
1244 finish_wait(sk_sleep(sk), &wait);
1245out:
1246 release_sock(sk);
1247 return err;
Andy Kingd021c342013-02-06 14:23:56 +00001248}
1249
David Howellscdfbabf2017-03-09 08:09:05 +00001250static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
1251 bool kern)
Andy Kingd021c342013-02-06 14:23:56 +00001252{
1253 struct sock *listener;
1254 int err;
1255 struct sock *connected;
1256 struct vsock_sock *vconnected;
1257 long timeout;
1258 DEFINE_WAIT(wait);
1259
1260 err = 0;
1261 listener = sock->sk;
1262
1263 lock_sock(listener);
1264
1265 if (sock->type != SOCK_STREAM) {
1266 err = -EOPNOTSUPP;
1267 goto out;
1268 }
1269
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001270 if (listener->sk_state != TCP_LISTEN) {
Andy Kingd021c342013-02-06 14:23:56 +00001271 err = -EINVAL;
1272 goto out;
1273 }
1274
1275 /* Wait for children sockets to appear; these are the new sockets
1276 * created upon connection establishment.
1277 */
Stefano Garzarellaa0220332020-05-27 09:56:55 +02001278 timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK);
Andy Kingd021c342013-02-06 14:23:56 +00001279 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1280
1281 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
1282 listener->sk_err == 0) {
1283 release_sock(listener);
1284 timeout = schedule_timeout(timeout);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001285 finish_wait(sk_sleep(listener), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001286 lock_sock(listener);
1287
1288 if (signal_pending(current)) {
1289 err = sock_intr_errno(timeout);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001290 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001291 } else if (timeout == 0) {
1292 err = -EAGAIN;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001293 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001294 }
1295
1296 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1297 }
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001298 finish_wait(sk_sleep(listener), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001299
1300 if (listener->sk_err)
1301 err = -listener->sk_err;
1302
1303 if (connected) {
1304 listener->sk_ack_backlog--;
1305
Stefan Hajnoczi4192f672016-06-23 16:28:58 +01001306 lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
Andy Kingd021c342013-02-06 14:23:56 +00001307 vconnected = vsock_sk(connected);
1308
1309 /* If the listener socket has received an error, then we should
1310 * reject this socket and return. Note that we simply mark the
1311 * socket rejected, drop our reference, and let the cleanup
1312 * function handle the cleanup; the fact that we found it in
1313 * the listener's accept queue guarantees that the cleanup
1314 * function hasn't run yet.
1315 */
1316 if (err) {
1317 vconnected->rejected = true;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001318 } else {
1319 newsock->state = SS_CONNECTED;
1320 sock_graft(connected, newsock);
Andy Kingd021c342013-02-06 14:23:56 +00001321 }
1322
Andy Kingd021c342013-02-06 14:23:56 +00001323 release_sock(connected);
1324 sock_put(connected);
1325 }
1326
Andy Kingd021c342013-02-06 14:23:56 +00001327out:
1328 release_sock(listener);
1329 return err;
1330}
1331
1332static int vsock_listen(struct socket *sock, int backlog)
1333{
1334 int err;
1335 struct sock *sk;
1336 struct vsock_sock *vsk;
1337
1338 sk = sock->sk;
1339
1340 lock_sock(sk);
1341
1342 if (sock->type != SOCK_STREAM) {
1343 err = -EOPNOTSUPP;
1344 goto out;
1345 }
1346
1347 if (sock->state != SS_UNCONNECTED) {
1348 err = -EINVAL;
1349 goto out;
1350 }
1351
1352 vsk = vsock_sk(sk);
1353
1354 if (!vsock_addr_bound(&vsk->local_addr)) {
1355 err = -EINVAL;
1356 goto out;
1357 }
1358
1359 sk->sk_max_ack_backlog = backlog;
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001360 sk->sk_state = TCP_LISTEN;
Andy Kingd021c342013-02-06 14:23:56 +00001361
1362 err = 0;
1363
1364out:
1365 release_sock(sk);
1366 return err;
1367}
1368
1369static int vsock_stream_setsockopt(struct socket *sock,
1370 int level,
1371 int optname,
1372 char __user *optval,
1373 unsigned int optlen)
1374{
1375 int err;
1376 struct sock *sk;
1377 struct vsock_sock *vsk;
1378 u64 val;
1379
1380 if (level != AF_VSOCK)
1381 return -ENOPROTOOPT;
1382
1383#define COPY_IN(_v) \
1384 do { \
1385 if (optlen < sizeof(_v)) { \
1386 err = -EINVAL; \
1387 goto exit; \
1388 } \
1389 if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \
1390 err = -EFAULT; \
1391 goto exit; \
1392 } \
1393 } while (0)
1394
1395 err = 0;
1396 sk = sock->sk;
1397 vsk = vsock_sk(sk);
1398
1399 lock_sock(sk);
1400
1401 switch (optname) {
1402 case SO_VM_SOCKETS_BUFFER_SIZE:
1403 COPY_IN(val);
1404 transport->set_buffer_size(vsk, val);
1405 break;
1406
1407 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1408 COPY_IN(val);
1409 transport->set_max_buffer_size(vsk, val);
1410 break;
1411
1412 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1413 COPY_IN(val);
1414 transport->set_min_buffer_size(vsk, val);
1415 break;
1416
1417 case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -08001418 struct __kernel_old_timeval tv;
Andy Kingd021c342013-02-06 14:23:56 +00001419 COPY_IN(tv);
1420 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
1421 tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
1422 vsk->connect_timeout = tv.tv_sec * HZ +
1423 DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ));
1424 if (vsk->connect_timeout == 0)
1425 vsk->connect_timeout =
1426 VSOCK_DEFAULT_CONNECT_TIMEOUT;
1427
1428 } else {
1429 err = -ERANGE;
1430 }
1431 break;
1432 }
1433
1434 default:
1435 err = -ENOPROTOOPT;
1436 break;
1437 }
1438
1439#undef COPY_IN
1440
1441exit:
1442 release_sock(sk);
1443 return err;
1444}
1445
1446static int vsock_stream_getsockopt(struct socket *sock,
1447 int level, int optname,
1448 char __user *optval,
1449 int __user *optlen)
1450{
1451 int err;
1452 int len;
1453 struct sock *sk;
1454 struct vsock_sock *vsk;
1455 u64 val;
1456
1457 if (level != AF_VSOCK)
1458 return -ENOPROTOOPT;
1459
1460 err = get_user(len, optlen);
1461 if (err != 0)
1462 return err;
1463
1464#define COPY_OUT(_v) \
1465 do { \
1466 if (len < sizeof(_v)) \
1467 return -EINVAL; \
1468 \
1469 len = sizeof(_v); \
1470 if (copy_to_user(optval, &_v, len) != 0) \
1471 return -EFAULT; \
1472 \
1473 } while (0)
1474
1475 err = 0;
1476 sk = sock->sk;
1477 vsk = vsock_sk(sk);
1478
1479 switch (optname) {
1480 case SO_VM_SOCKETS_BUFFER_SIZE:
1481 val = transport->get_buffer_size(vsk);
1482 COPY_OUT(val);
1483 break;
1484
1485 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1486 val = transport->get_max_buffer_size(vsk);
1487 COPY_OUT(val);
1488 break;
1489
1490 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1491 val = transport->get_min_buffer_size(vsk);
1492 COPY_OUT(val);
1493 break;
1494
1495 case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -08001496 struct __kernel_old_timeval tv;
Andy Kingd021c342013-02-06 14:23:56 +00001497 tv.tv_sec = vsk->connect_timeout / HZ;
1498 tv.tv_usec =
1499 (vsk->connect_timeout -
1500 tv.tv_sec * HZ) * (1000000 / HZ);
1501 COPY_OUT(tv);
1502 break;
1503 }
1504 default:
1505 return -ENOPROTOOPT;
1506 }
1507
1508 err = put_user(len, optlen);
1509 if (err != 0)
1510 return -EFAULT;
1511
1512#undef COPY_OUT
1513
1514 return 0;
1515}
1516
Ying Xue1b784142015-03-02 15:37:48 +08001517static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1518 size_t len)
Andy Kingd021c342013-02-06 14:23:56 +00001519{
1520 struct sock *sk;
1521 struct vsock_sock *vsk;
1522 ssize_t total_written;
1523 long timeout;
1524 int err;
1525 struct vsock_transport_send_notify_data send_data;
WANG Cong499fde62017-05-19 11:21:59 -07001526 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Andy Kingd021c342013-02-06 14:23:56 +00001527
1528 sk = sock->sk;
1529 vsk = vsock_sk(sk);
1530 total_written = 0;
1531 err = 0;
1532
1533 if (msg->msg_flags & MSG_OOB)
1534 return -EOPNOTSUPP;
1535
1536 lock_sock(sk);
1537
1538 /* Callers should not provide a destination with stream sockets. */
1539 if (msg->msg_namelen) {
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001540 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
Andy Kingd021c342013-02-06 14:23:56 +00001541 goto out;
1542 }
1543
1544 /* Send data only if both sides are not shutdown in the direction. */
1545 if (sk->sk_shutdown & SEND_SHUTDOWN ||
1546 vsk->peer_shutdown & RCV_SHUTDOWN) {
1547 err = -EPIPE;
1548 goto out;
1549 }
1550
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001551 if (sk->sk_state != TCP_ESTABLISHED ||
Andy Kingd021c342013-02-06 14:23:56 +00001552 !vsock_addr_bound(&vsk->local_addr)) {
1553 err = -ENOTCONN;
1554 goto out;
1555 }
1556
1557 if (!vsock_addr_bound(&vsk->remote_addr)) {
1558 err = -EDESTADDRREQ;
1559 goto out;
1560 }
1561
1562 /* Wait for room in the produce queue to enqueue our user's data. */
1563 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1564
1565 err = transport->notify_send_init(vsk, &send_data);
1566 if (err < 0)
1567 goto out;
1568
Andy Kingd021c342013-02-06 14:23:56 +00001569 while (total_written < len) {
1570 ssize_t written;
1571
WANG Cong499fde62017-05-19 11:21:59 -07001572 add_wait_queue(sk_sleep(sk), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001573 while (vsock_stream_has_space(vsk) == 0 &&
1574 sk->sk_err == 0 &&
1575 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
1576 !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
1577
1578 /* Don't wait for non-blocking sockets. */
1579 if (timeout == 0) {
1580 err = -EAGAIN;
WANG Cong499fde62017-05-19 11:21:59 -07001581 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001582 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001583 }
1584
1585 err = transport->notify_send_pre_block(vsk, &send_data);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001586 if (err < 0) {
WANG Cong499fde62017-05-19 11:21:59 -07001587 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001588 goto out_err;
1589 }
Andy Kingd021c342013-02-06 14:23:56 +00001590
1591 release_sock(sk);
WANG Cong499fde62017-05-19 11:21:59 -07001592 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
Andy Kingd021c342013-02-06 14:23:56 +00001593 lock_sock(sk);
1594 if (signal_pending(current)) {
1595 err = sock_intr_errno(timeout);
WANG Cong499fde62017-05-19 11:21:59 -07001596 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001597 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001598 } else if (timeout == 0) {
1599 err = -EAGAIN;
WANG Cong499fde62017-05-19 11:21:59 -07001600 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001601 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001602 }
Andy Kingd021c342013-02-06 14:23:56 +00001603 }
WANG Cong499fde62017-05-19 11:21:59 -07001604 remove_wait_queue(sk_sleep(sk), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001605
1606 /* These checks occur both as part of and after the loop
1607 * conditional since we need to check before and after
1608 * sleeping.
1609 */
1610 if (sk->sk_err) {
1611 err = -sk->sk_err;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001612 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001613 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1614 (vsk->peer_shutdown & RCV_SHUTDOWN)) {
1615 err = -EPIPE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001616 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001617 }
1618
1619 err = transport->notify_send_pre_enqueue(vsk, &send_data);
1620 if (err < 0)
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001621 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001622
1623 /* Note that enqueue will only write as many bytes as are free
1624 * in the produce queue, so we don't need to ensure len is
1625 * smaller than the queue size. It is the caller's
1626 * responsibility to check how many bytes we were able to send.
1627 */
1628
1629 written = transport->stream_enqueue(
Al Viro0f7db232014-11-20 04:05:34 -05001630 vsk, msg,
Andy Kingd021c342013-02-06 14:23:56 +00001631 len - total_written);
1632 if (written < 0) {
1633 err = -ENOMEM;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001634 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001635 }
1636
1637 total_written += written;
1638
1639 err = transport->notify_send_post_enqueue(
1640 vsk, written, &send_data);
1641 if (err < 0)
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001642 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001643
1644 }
1645
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001646out_err:
Andy Kingd021c342013-02-06 14:23:56 +00001647 if (total_written > 0)
1648 err = total_written;
Andy Kingd021c342013-02-06 14:23:56 +00001649out:
1650 release_sock(sk);
1651 return err;
1652}
1653
1654
1655static int
Ying Xue1b784142015-03-02 15:37:48 +08001656vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1657 int flags)
Andy Kingd021c342013-02-06 14:23:56 +00001658{
1659 struct sock *sk;
1660 struct vsock_sock *vsk;
1661 int err;
1662 size_t target;
1663 ssize_t copied;
1664 long timeout;
1665 struct vsock_transport_recv_notify_data recv_data;
1666
1667 DEFINE_WAIT(wait);
1668
1669 sk = sock->sk;
1670 vsk = vsock_sk(sk);
1671 err = 0;
1672
1673 lock_sock(sk);
1674
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001675 if (sk->sk_state != TCP_ESTABLISHED) {
Andy Kingd021c342013-02-06 14:23:56 +00001676 /* Recvmsg is supposed to return 0 if a peer performs an
1677 * orderly shutdown. Differentiate between that case and when a
1678 * peer has not connected or a local shutdown occured with the
1679 * SOCK_DONE flag.
1680 */
1681 if (sock_flag(sk, SOCK_DONE))
1682 err = 0;
1683 else
1684 err = -ENOTCONN;
1685
1686 goto out;
1687 }
1688
1689 if (flags & MSG_OOB) {
1690 err = -EOPNOTSUPP;
1691 goto out;
1692 }
1693
1694 /* We don't check peer_shutdown flag here since peer may actually shut
1695 * down, but there can be data in the queue that a local socket can
1696 * receive.
1697 */
1698 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1699 err = 0;
1700 goto out;
1701 }
1702
1703 /* It is valid on Linux to pass in a zero-length receive buffer. This
1704 * is not an error. We may as well bail out now.
1705 */
1706 if (!len) {
1707 err = 0;
1708 goto out;
1709 }
1710
1711 /* We must not copy less than target bytes into the user's buffer
1712 * before returning successfully, so we wait for the consume queue to
1713 * have that much data to consume before dequeueing. Note that this
1714 * makes it impossible to handle cases where target is greater than the
1715 * queue size.
1716 */
1717 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1718 if (target >= transport->stream_rcvhiwat(vsk)) {
1719 err = -ENOMEM;
1720 goto out;
1721 }
1722 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1723 copied = 0;
1724
1725 err = transport->notify_recv_init(vsk, target, &recv_data);
1726 if (err < 0)
1727 goto out;
1728
Andy Kingd021c342013-02-06 14:23:56 +00001729
1730 while (1) {
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001731 s64 ready;
Andy Kingd021c342013-02-06 14:23:56 +00001732
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001733 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1734 ready = vsock_stream_has_data(vsk);
Andy Kingd021c342013-02-06 14:23:56 +00001735
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001736 if (ready == 0) {
1737 if (sk->sk_err != 0 ||
1738 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1739 (vsk->peer_shutdown & SEND_SHUTDOWN)) {
1740 finish_wait(sk_sleep(sk), &wait);
1741 break;
1742 }
1743 /* Don't wait for non-blocking sockets. */
1744 if (timeout == 0) {
1745 err = -EAGAIN;
1746 finish_wait(sk_sleep(sk), &wait);
1747 break;
1748 }
1749
1750 err = transport->notify_recv_pre_block(
1751 vsk, target, &recv_data);
1752 if (err < 0) {
1753 finish_wait(sk_sleep(sk), &wait);
1754 break;
1755 }
1756 release_sock(sk);
1757 timeout = schedule_timeout(timeout);
1758 lock_sock(sk);
1759
1760 if (signal_pending(current)) {
1761 err = sock_intr_errno(timeout);
1762 finish_wait(sk_sleep(sk), &wait);
1763 break;
1764 } else if (timeout == 0) {
1765 err = -EAGAIN;
1766 finish_wait(sk_sleep(sk), &wait);
1767 break;
1768 }
1769 } else {
Andy Kingd021c342013-02-06 14:23:56 +00001770 ssize_t read;
1771
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001772 finish_wait(sk_sleep(sk), &wait);
1773
1774 if (ready < 0) {
1775 /* Invalid queue pair content. XXX This should
1776 * be changed to a connection reset in a later
1777 * change.
1778 */
1779
1780 err = -ENOMEM;
1781 goto out;
1782 }
1783
Andy Kingd021c342013-02-06 14:23:56 +00001784 err = transport->notify_recv_pre_dequeue(
1785 vsk, target, &recv_data);
1786 if (err < 0)
1787 break;
1788
1789 read = transport->stream_dequeue(
Al Viro0f7db232014-11-20 04:05:34 -05001790 vsk, msg,
Andy Kingd021c342013-02-06 14:23:56 +00001791 len - copied, flags);
1792 if (read < 0) {
1793 err = -ENOMEM;
1794 break;
1795 }
1796
1797 copied += read;
1798
1799 err = transport->notify_recv_post_dequeue(
1800 vsk, target, read,
1801 !(flags & MSG_PEEK), &recv_data);
1802 if (err < 0)
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001803 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001804
1805 if (read >= target || flags & MSG_PEEK)
1806 break;
1807
1808 target -= read;
Andy Kingd021c342013-02-06 14:23:56 +00001809 }
1810 }
1811
1812 if (sk->sk_err)
1813 err = -sk->sk_err;
1814 else if (sk->sk_shutdown & RCV_SHUTDOWN)
1815 err = 0;
1816
Ian Campbelldedc58e2016-05-04 14:21:53 +01001817 if (copied > 0)
Andy Kingd021c342013-02-06 14:23:56 +00001818 err = copied;
Andy Kingd021c342013-02-06 14:23:56 +00001819
Andy Kingd021c342013-02-06 14:23:56 +00001820out:
1821 release_sock(sk);
1822 return err;
1823}
1824
1825static const struct proto_ops vsock_stream_ops = {
1826 .family = PF_VSOCK,
1827 .owner = THIS_MODULE,
1828 .release = vsock_release,
1829 .bind = vsock_bind,
1830 .connect = vsock_stream_connect,
1831 .socketpair = sock_no_socketpair,
1832 .accept = vsock_accept,
1833 .getname = vsock_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001834 .poll = vsock_poll,
Andy Kingd021c342013-02-06 14:23:56 +00001835 .ioctl = sock_no_ioctl,
1836 .listen = vsock_listen,
1837 .shutdown = vsock_shutdown,
1838 .setsockopt = vsock_stream_setsockopt,
1839 .getsockopt = vsock_stream_getsockopt,
1840 .sendmsg = vsock_stream_sendmsg,
1841 .recvmsg = vsock_stream_recvmsg,
1842 .mmap = sock_no_mmap,
1843 .sendpage = sock_no_sendpage,
1844};
1845
1846static int vsock_create(struct net *net, struct socket *sock,
1847 int protocol, int kern)
1848{
1849 if (!sock)
1850 return -EINVAL;
1851
Andy King6cf1c5f2013-02-18 06:04:13 +00001852 if (protocol && protocol != PF_VSOCK)
Andy Kingd021c342013-02-06 14:23:56 +00001853 return -EPROTONOSUPPORT;
1854
1855 switch (sock->type) {
1856 case SOCK_DGRAM:
1857 sock->ops = &vsock_dgram_ops;
1858 break;
1859 case SOCK_STREAM:
1860 sock->ops = &vsock_stream_ops;
1861 break;
1862 default:
1863 return -ESOCKTNOSUPPORT;
1864 }
1865
1866 sock->state = SS_UNCONNECTED;
1867
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001868 return __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern) ? 0 : -ENOMEM;
Andy Kingd021c342013-02-06 14:23:56 +00001869}
1870
1871static const struct net_proto_family vsock_family_ops = {
1872 .family = AF_VSOCK,
1873 .create = vsock_create,
1874 .owner = THIS_MODULE,
1875};
1876
1877static long vsock_dev_do_ioctl(struct file *filp,
1878 unsigned int cmd, void __user *ptr)
1879{
1880 u32 __user *p = ptr;
1881 int retval = 0;
1882
1883 switch (cmd) {
1884 case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
1885 if (put_user(transport->get_local_cid(), p) != 0)
1886 retval = -EFAULT;
1887 break;
1888
1889 default:
1890 pr_err("Unknown ioctl %d\n", cmd);
1891 retval = -EINVAL;
1892 }
1893
1894 return retval;
1895}
1896
1897static long vsock_dev_ioctl(struct file *filp,
1898 unsigned int cmd, unsigned long arg)
1899{
1900 return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
1901}
1902
1903#ifdef CONFIG_COMPAT
1904static long vsock_dev_compat_ioctl(struct file *filp,
1905 unsigned int cmd, unsigned long arg)
1906{
1907 return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
1908}
1909#endif
1910
1911static const struct file_operations vsock_device_ops = {
1912 .owner = THIS_MODULE,
1913 .unlocked_ioctl = vsock_dev_ioctl,
1914#ifdef CONFIG_COMPAT
1915 .compat_ioctl = vsock_dev_compat_ioctl,
1916#endif
1917 .open = nonseekable_open,
1918};
1919
1920static struct miscdevice vsock_device = {
1921 .name = "vsock",
Andy Kingd021c342013-02-06 14:23:56 +00001922 .fops = &vsock_device_ops,
1923};
1924
Andy King2c4a3362014-05-01 15:20:43 -07001925int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
Andy Kingd021c342013-02-06 14:23:56 +00001926{
Andy King2c4a3362014-05-01 15:20:43 -07001927 int err = mutex_lock_interruptible(&vsock_register_mutex);
1928
1929 if (err)
1930 return err;
1931
1932 if (transport) {
1933 err = -EBUSY;
1934 goto err_busy;
1935 }
1936
1937 /* Transport must be the owner of the protocol so that it can't
1938 * unload while there are open sockets.
1939 */
1940 vsock_proto.owner = owner;
1941 transport = t;
Andy Kingd021c342013-02-06 14:23:56 +00001942
Asias He6ad0b2f2013-04-23 20:33:52 +00001943 vsock_device.minor = MISC_DYNAMIC_MINOR;
Andy Kingd021c342013-02-06 14:23:56 +00001944 err = misc_register(&vsock_device);
1945 if (err) {
1946 pr_err("Failed to register misc device\n");
Gao fengf6a835b2015-10-18 23:35:56 +08001947 goto err_reset_transport;
Andy Kingd021c342013-02-06 14:23:56 +00001948 }
1949
1950 err = proto_register(&vsock_proto, 1); /* we want our slab */
1951 if (err) {
1952 pr_err("Cannot register vsock protocol\n");
Gao fengf6a835b2015-10-18 23:35:56 +08001953 goto err_deregister_misc;
Andy Kingd021c342013-02-06 14:23:56 +00001954 }
1955
1956 err = sock_register(&vsock_family_ops);
1957 if (err) {
1958 pr_err("could not register af_vsock (%d) address family: %d\n",
1959 AF_VSOCK, err);
1960 goto err_unregister_proto;
1961 }
1962
Andy King2c4a3362014-05-01 15:20:43 -07001963 mutex_unlock(&vsock_register_mutex);
Andy Kingd021c342013-02-06 14:23:56 +00001964 return 0;
1965
1966err_unregister_proto:
1967 proto_unregister(&vsock_proto);
Gao fengf6a835b2015-10-18 23:35:56 +08001968err_deregister_misc:
Andy Kingd021c342013-02-06 14:23:56 +00001969 misc_deregister(&vsock_device);
Gao fengf6a835b2015-10-18 23:35:56 +08001970err_reset_transport:
Andy King2c4a3362014-05-01 15:20:43 -07001971 transport = NULL;
1972err_busy:
1973 mutex_unlock(&vsock_register_mutex);
Andy Kingd021c342013-02-06 14:23:56 +00001974 return err;
1975}
Andy King2c4a3362014-05-01 15:20:43 -07001976EXPORT_SYMBOL_GPL(__vsock_core_init);
Andy Kingd021c342013-02-06 14:23:56 +00001977
1978void vsock_core_exit(void)
1979{
1980 mutex_lock(&vsock_register_mutex);
1981
1982 misc_deregister(&vsock_device);
1983 sock_unregister(AF_VSOCK);
1984 proto_unregister(&vsock_proto);
1985
1986 /* We do not want the assignment below re-ordered. */
1987 mb();
1988 transport = NULL;
1989
1990 mutex_unlock(&vsock_register_mutex);
1991}
1992EXPORT_SYMBOL_GPL(vsock_core_exit);
1993
Stefan Hajnoczi0b01aeb2016-07-28 15:36:30 +01001994const struct vsock_transport *vsock_core_get_transport(void)
1995{
1996 /* vsock_register_mutex not taken since only the transport uses this
1997 * function and only while registered.
1998 */
1999 return transport;
2000}
2001EXPORT_SYMBOL_GPL(vsock_core_get_transport);
2002
Stefan Hajnoczi05e489b2018-04-17 14:25:58 +08002003static void __exit vsock_exit(void)
2004{
2005 /* Do nothing. This function makes this module removable. */
2006}
2007
Cong Wangc1eef222017-10-24 15:30:37 -07002008module_init(vsock_init_tables);
Stefan Hajnoczi05e489b2018-04-17 14:25:58 +08002009module_exit(vsock_exit);
Cong Wangc1eef222017-10-24 15:30:37 -07002010
Andy Kingd021c342013-02-06 14:23:56 +00002011MODULE_AUTHOR("VMware, Inc.");
2012MODULE_DESCRIPTION("VMware Virtual Socket Family");
Jorgen Hansen1190cfd2016-09-26 23:59:53 -07002013MODULE_VERSION("1.0.2.0-k");
Andy Kingd021c342013-02-06 14:23:56 +00002014MODULE_LICENSE("GPL v2");