1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/crypto.h>
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/list.h>
8 #include <linux/rcupdate.h>
9 #include <linux/rculist.h>
10 #include <net/inetpeer.h>
13 void tcp_fastopen_init_key_once(struct net *net)
15 u8 key[TCP_FASTOPEN_KEY_LENGTH];
16 struct tcp_fastopen_context *ctxt;
19 ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
26 /* tcp_fastopen_reset_cipher publishes the new context
27 * atomically, so we allow this race happening here.
29 * All call sites of tcp_fastopen_cookie_gen also check
30 * for a valid cookie, so this is an acceptable risk.
32 get_random_bytes(key, sizeof(key));
33 tcp_fastopen_reset_cipher(net, NULL, key, sizeof(key));
36 static void tcp_fastopen_ctx_free(struct rcu_head *head)
38 struct tcp_fastopen_context *ctx =
39 container_of(head, struct tcp_fastopen_context, rcu);
40 crypto_free_cipher(ctx->tfm);
44 void tcp_fastopen_destroy_cipher(struct sock *sk)
46 struct tcp_fastopen_context *ctx;
48 ctx = rcu_dereference_protected(
49 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
51 call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
54 void tcp_fastopen_ctx_destroy(struct net *net)
56 struct tcp_fastopen_context *ctxt;
58 spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
60 ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
61 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
62 rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
63 spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
66 call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
69 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
70 void *key, unsigned int len)
72 struct tcp_fastopen_context *ctx, *octx;
73 struct fastopen_queue *q;
76 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
79 ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
81 if (IS_ERR(ctx->tfm)) {
82 err = PTR_ERR(ctx->tfm);
84 pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
87 err = crypto_cipher_setkey(ctx->tfm, key, len);
89 pr_err("TCP: TFO cipher key error: %d\n", err);
90 crypto_free_cipher(ctx->tfm);
93 memcpy(ctx->key, key, len);
96 spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
98 q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
99 octx = rcu_dereference_protected(q->ctx,
100 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
101 rcu_assign_pointer(q->ctx, ctx);
103 octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
104 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
105 rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
107 spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
110 call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
114 static bool __tcp_fastopen_cookie_gen(struct sock *sk, const void *path,
115 struct tcp_fastopen_cookie *foc)
117 struct tcp_fastopen_context *ctx;
122 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
124 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
127 crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
128 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
135 /* Generate the fastopen cookie by doing aes128 encryption on both
136 * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
137 * addresses. For the longer IPv6 addresses use CBC-MAC.
139 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
141 static bool tcp_fastopen_cookie_gen(struct sock *sk,
142 struct request_sock *req,
144 struct tcp_fastopen_cookie *foc)
146 if (req->rsk_ops->family == AF_INET) {
147 const struct iphdr *iph = ip_hdr(syn);
149 __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
150 return __tcp_fastopen_cookie_gen(sk, path, foc);
153 #if IS_ENABLED(CONFIG_IPV6)
154 if (req->rsk_ops->family == AF_INET6) {
155 const struct ipv6hdr *ip6h = ipv6_hdr(syn);
156 struct tcp_fastopen_cookie tmp;
158 if (__tcp_fastopen_cookie_gen(sk, &ip6h->saddr, &tmp)) {
159 struct in6_addr *buf = &tmp.addr;
162 for (i = 0; i < 4; i++)
163 buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
164 return __tcp_fastopen_cookie_gen(sk, buf, foc);
172 /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
173 * queue this additional data / FIN.
175 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
177 struct tcp_sock *tp = tcp_sk(sk);
179 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
182 skb = skb_clone(skb, GFP_ATOMIC);
187 /* segs_in has been initialized to 1 in tcp_create_openreq_child().
188 * Hence, reset segs_in to 0 before calling tcp_segs_in()
189 * to avoid double counting. Also, tcp_segs_in() expects
190 * skb->len to include the tcp_hdrlen. Hence, it should
191 * be called before __skb_pull().
194 tcp_segs_in(tp, skb);
195 __skb_pull(skb, tcp_hdrlen(skb));
196 sk_forced_mem_schedule(sk, skb->truesize);
197 skb_set_owner_r(skb, sk);
199 TCP_SKB_CB(skb)->seq++;
200 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
202 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
203 __skb_queue_tail(&sk->sk_receive_queue, skb);
204 tp->syn_data_acked = 1;
206 /* u64_stats_update_begin(&tp->syncp) not needed here,
207 * as we certainly are not changing upper 32bit value (0)
209 tp->bytes_received = skb->len;
211 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
215 static struct sock *tcp_fastopen_create_child(struct sock *sk,
217 struct request_sock *req)
220 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
224 req->num_retrans = 0;
225 req->num_timeout = 0;
228 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
233 spin_lock(&queue->fastopenq.lock);
234 queue->fastopenq.qlen++;
235 spin_unlock(&queue->fastopenq.lock);
237 /* Initialize the child socket. Have to fix some values to take
238 * into account the child is a Fast Open socket and is created
239 * only out of the bits carried in the SYN packet.
243 tp->fastopen_rsk = req;
244 tcp_rsk(req)->tfo_listener = true;
246 /* RFC1323: The window in SYN & SYN/ACK segments is never
247 * scaled. So correct it appropriately.
249 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
250 tp->max_window = tp->snd_wnd;
252 /* Activate the retrans timer so that SYNACK can be retransmitted.
253 * The request socket is not added to the ehash
254 * because it's been added to the accept queue directly.
256 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
257 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
259 refcount_set(&req->rsk_refcnt, 2);
261 /* Now finish processing the fastopen child socket. */
262 tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
264 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
266 tcp_fastopen_add_skb(child, skb);
268 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
269 tp->rcv_wup = tp->rcv_nxt;
270 /* tcp_conn_request() is sending the SYNACK,
271 * and queues the child into listener accept queue.
276 static bool tcp_fastopen_queue_check(struct sock *sk)
278 struct fastopen_queue *fastopenq;
280 /* Make sure the listener has enabled fastopen, and we don't
281 * exceed the max # of pending TFO requests allowed before trying
282 * to validating the cookie in order to avoid burning CPU cycles
285 * XXX (TFO) - The implication of checking the max_qlen before
286 * processing a cookie request is that clients can't differentiate
287 * between qlen overflow causing Fast Open to be disabled
288 * temporarily vs a server not supporting Fast Open at all.
290 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
291 if (fastopenq->max_qlen == 0)
294 if (fastopenq->qlen >= fastopenq->max_qlen) {
295 struct request_sock *req1;
296 spin_lock(&fastopenq->lock);
297 req1 = fastopenq->rskq_rst_head;
298 if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
299 __NET_INC_STATS(sock_net(sk),
300 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
301 spin_unlock(&fastopenq->lock);
304 fastopenq->rskq_rst_head = req1->dl_next;
306 spin_unlock(&fastopenq->lock);
312 static bool tcp_fastopen_no_cookie(const struct sock *sk,
313 const struct dst_entry *dst,
316 return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) ||
317 tcp_sk(sk)->fastopen_no_cookie ||
318 (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
321 /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
322 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
323 * cookie request (foc->len == 0).
325 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
326 struct request_sock *req,
327 struct tcp_fastopen_cookie *foc,
328 const struct dst_entry *dst)
330 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
331 int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
332 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
335 if (foc->len == 0) /* Client requests a cookie */
336 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
338 if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
339 (syn_data || foc->len >= 0) &&
340 tcp_fastopen_queue_check(sk))) {
345 if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
348 if (foc->len >= 0 && /* Client presents or requests a cookie */
349 tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc) &&
350 foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
351 foc->len == valid_foc.len &&
352 !memcmp(foc->val, valid_foc.val, foc->len)) {
353 /* Cookie is valid. Create a (full) child socket to accept
354 * the data in SYN before returning a SYN-ACK to ack the
355 * data. If we fail to create the socket, fall back and
356 * ack the ISN only but includes the same cookie.
358 * Note: Data-less SYN with valid cookie is allowed to send
359 * data in SYN_RECV state.
362 child = tcp_fastopen_create_child(sk, skb, req);
365 NET_INC_STATS(sock_net(sk),
366 LINUX_MIB_TCPFASTOPENPASSIVE);
369 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
370 } else if (foc->len > 0) /* Client presents an invalid cookie */
371 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
373 valid_foc.exp = foc->exp;
378 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
379 struct tcp_fastopen_cookie *cookie)
381 const struct dst_entry *dst;
383 tcp_fastopen_cache_get(sk, mss, cookie);
385 /* Firewall blackhole issue check */
386 if (tcp_fastopen_active_should_disable(sk)) {
391 dst = __sk_dst_get(sk);
393 if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
397 return cookie->len > 0;
400 /* This function checks if we want to defer sending SYN until the first
401 * write(). We defer under the following conditions:
402 * 1. fastopen_connect sockopt is set
403 * 2. we have a valid cookie
404 * Return value: return true if we want to defer until application writes data
405 * return false if we want to send out SYN immediately
407 bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
409 struct tcp_fastopen_cookie cookie = { .len = 0 };
410 struct tcp_sock *tp = tcp_sk(sk);
413 if (tp->fastopen_connect && !tp->fastopen_req) {
414 if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
415 inet_sk(sk)->defer_connect = 1;
419 /* Alloc fastopen_req in order for FO option to be included
422 tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
424 if (tp->fastopen_req)
425 tp->fastopen_req->cookie = cookie;
431 EXPORT_SYMBOL(tcp_fastopen_defer_connect);
434 * The following code block is to deal with middle box issues with TFO:
435 * Middlebox firewall issues can potentially cause server's data being
436 * blackholed after a successful 3WHS using TFO.
437 * The proposed solution is to disable active TFO globally under the
438 * following circumstances:
439 * 1. client side TFO socket receives out of order FIN
440 * 2. client side TFO socket receives out of order RST
441 * 3. client side TFO socket has timed out three times consecutively during
443 * We disable active side TFO globally for 1hr at first. Then if it
444 * happens again, we disable it for 2h, then 4h, 8h, ...
445 * And we reset the timeout back to 1hr when we see a successful active
446 * TFO connection with data exchanges.
449 /* Disable active TFO and record current jiffies and
450 * tfo_active_disable_times
452 void tcp_fastopen_active_disable(struct sock *sk)
454 struct net *net = sock_net(sk);
456 /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
457 WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
459 /* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
460 * We want net->ipv4.tfo_active_disable_stamp to be updated first.
462 smp_mb__before_atomic();
463 atomic_inc(&net->ipv4.tfo_active_disable_times);
465 NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
468 /* Calculate timeout for tfo active disable
469 * Return true if we are still in the active TFO disable period
470 * Return false if timeout already expired and we should use active TFO
472 bool tcp_fastopen_active_should_disable(struct sock *sk)
474 unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
475 int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
476 unsigned long timeout;
482 /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
485 /* Limit timout to max: 2^6 * initial timeout */
486 multiplier = 1 << min(tfo_da_times - 1, 6);
488 /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
489 timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
490 multiplier * tfo_bh_timeout * HZ;
491 if (time_before(jiffies, timeout))
494 /* Mark check bit so we can check for successful active TFO
495 * condition and reset tfo_active_disable_times
497 tcp_sk(sk)->syn_fastopen_ch = 1;
501 /* Disable active TFO if FIN is the only packet in the ofo queue
502 * and no data is received.
503 * Also check if we can reset tfo_active_disable_times if data is
504 * received successfully on a marked active TFO sockets opened on
505 * a non-loopback interface
507 void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
509 struct tcp_sock *tp = tcp_sk(sk);
510 struct dst_entry *dst;
513 if (!tp->syn_fastopen)
516 if (!tp->data_segs_in) {
517 skb = skb_rb_first(&tp->out_of_order_queue);
518 if (skb && !skb_rb_next(skb)) {
519 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
520 tcp_fastopen_active_disable(sk);
524 } else if (tp->syn_fastopen_ch &&
525 atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
526 dst = sk_dst_get(sk);
527 if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
528 atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
533 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
535 u32 timeouts = inet_csk(sk)->icsk_retransmits;
536 struct tcp_sock *tp = tcp_sk(sk);
538 /* Broken middle-boxes may black-hole Fast Open connection during or
539 * even after the handshake. Be extremely conservative and pause
540 * Fast Open globally after hitting the third consecutive timeout or
541 * exceeding the configured timeout limit.
543 if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
544 (timeouts == 2 || (timeouts < 2 && expired))) {
545 tcp_fastopen_active_disable(sk);
546 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);