blob: f195d9316e55d3d7ef6dc5285606ea9de0f52a51 [file] [log] [blame]
Jerry Chu10467162012-08-31 12:29:11 +00001#include <linux/err.h>
Yuchung Cheng2100c8d2012-07-19 06:43:05 +00002#include <linux/init.h>
3#include <linux/kernel.h>
Jerry Chu10467162012-08-31 12:29:11 +00004#include <linux/list.h>
5#include <linux/tcp.h>
6#include <linux/rcupdate.h>
7#include <linux/rculist.h>
8#include <net/inetpeer.h>
9#include <net/tcp.h>
Yuchung Cheng2100c8d2012-07-19 06:43:05 +000010
Yuchung Cheng0d41cca2013-10-31 09:19:32 -070011int sysctl_tcp_fastopen __read_mostly = TFO_CLIENT_ENABLE;
Jerry Chu10467162012-08-31 12:29:11 +000012
13struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
14
15static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
16
Hannes Frederic Sowa222e83d2013-10-19 21:48:58 +020017void tcp_fastopen_init_key_once(bool publish)
18{
19 static u8 key[TCP_FASTOPEN_KEY_LENGTH];
20
21 /* tcp_fastopen_reset_cipher publishes the new context
22 * atomically, so we allow this race happening here.
23 *
24 * All call sites of tcp_fastopen_cookie_gen also check
25 * for a valid cookie, so this is an acceptable risk.
26 */
27 if (net_get_random_once(key, sizeof(key)) && publish)
28 tcp_fastopen_reset_cipher(key, sizeof(key));
29}
30
Jerry Chu10467162012-08-31 12:29:11 +000031static void tcp_fastopen_ctx_free(struct rcu_head *head)
32{
33 struct tcp_fastopen_context *ctx =
34 container_of(head, struct tcp_fastopen_context, rcu);
35 crypto_free_cipher(ctx->tfm);
36 kfree(ctx);
37}
38
39int tcp_fastopen_reset_cipher(void *key, unsigned int len)
40{
41 int err;
42 struct tcp_fastopen_context *ctx, *octx;
43
44 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
45 if (!ctx)
46 return -ENOMEM;
47 ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
48
49 if (IS_ERR(ctx->tfm)) {
50 err = PTR_ERR(ctx->tfm);
51error: kfree(ctx);
52 pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
53 return err;
54 }
55 err = crypto_cipher_setkey(ctx->tfm, key, len);
56 if (err) {
57 pr_err("TCP: TFO cipher key error: %d\n", err);
58 crypto_free_cipher(ctx->tfm);
59 goto error;
60 }
61 memcpy(ctx->key, key, len);
62
63 spin_lock(&tcp_fastopen_ctx_lock);
64
65 octx = rcu_dereference_protected(tcp_fastopen_ctx,
66 lockdep_is_held(&tcp_fastopen_ctx_lock));
67 rcu_assign_pointer(tcp_fastopen_ctx, ctx);
68 spin_unlock(&tcp_fastopen_ctx_lock);
69
70 if (octx)
71 call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
72 return err;
73}
74
Yuchung Cheng149479d2013-08-08 14:06:22 -070075/* Computes the fastopen cookie for the IP path.
76 * The path is a 128 bits long (pad with zeros for IPv4).
Jerry Chu10467162012-08-31 12:29:11 +000077 *
78 * The caller must check foc->len to determine if a valid cookie
79 * has been generated successfully.
80*/
Yuchung Cheng149479d2013-08-08 14:06:22 -070081void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
82 struct tcp_fastopen_cookie *foc)
Jerry Chu10467162012-08-31 12:29:11 +000083{
Yuchung Cheng149479d2013-08-08 14:06:22 -070084 __be32 path[4] = { src, dst, 0, 0 };
Jerry Chu10467162012-08-31 12:29:11 +000085 struct tcp_fastopen_context *ctx;
86
Hannes Frederic Sowa222e83d2013-10-19 21:48:58 +020087 tcp_fastopen_init_key_once(true);
88
Jerry Chu10467162012-08-31 12:29:11 +000089 rcu_read_lock();
90 ctx = rcu_dereference(tcp_fastopen_ctx);
91 if (ctx) {
Yuchung Cheng149479d2013-08-08 14:06:22 -070092 crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path);
Jerry Chu10467162012-08-31 12:29:11 +000093 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
94 }
95 rcu_read_unlock();
96}