1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <crypto/aead.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
37 #include <net/inet_connection_sock.h>
41 /* device_offload_lock is used to synchronize tls_dev_add
42 * against NETDEV_DOWN notifications.
44 static DECLARE_RWSEM(device_offload_lock);
46 static void tls_device_gc_task(struct work_struct *work);
48 static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
49 static LIST_HEAD(tls_device_gc_list);
50 static LIST_HEAD(tls_device_list);
51 static DEFINE_SPINLOCK(tls_device_lock);
53 static void tls_device_free_ctx(struct tls_context *ctx)
55 if (ctx->tx_conf == TLS_HW) {
56 kfree(tls_offload_ctx_tx(ctx));
57 kfree(ctx->tx.rec_seq);
61 if (ctx->rx_conf == TLS_HW)
62 kfree(tls_offload_ctx_rx(ctx));
67 static void tls_device_gc_task(struct work_struct *work)
69 struct tls_context *ctx, *tmp;
73 spin_lock_irqsave(&tls_device_lock, flags);
74 list_splice_init(&tls_device_gc_list, &gc_list);
75 spin_unlock_irqrestore(&tls_device_lock, flags);
77 list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
78 struct net_device *netdev = ctx->netdev;
80 if (netdev && ctx->tx_conf == TLS_HW) {
81 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
82 TLS_OFFLOAD_CTX_DIR_TX);
88 tls_device_free_ctx(ctx);
92 static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
93 struct net_device *netdev)
95 if (sk->sk_destruct != tls_device_sk_destruct) {
96 refcount_set(&ctx->refcount, 1);
99 spin_lock_irq(&tls_device_lock);
100 list_add_tail(&ctx->list, &tls_device_list);
101 spin_unlock_irq(&tls_device_lock);
103 ctx->sk_destruct = sk->sk_destruct;
104 sk->sk_destruct = tls_device_sk_destruct;
108 static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
112 spin_lock_irqsave(&tls_device_lock, flags);
113 if (unlikely(!refcount_dec_and_test(&ctx->refcount)))
116 list_move_tail(&ctx->list, &tls_device_gc_list);
118 /* schedule_work inside the spinlock
119 * to make sure tls_device_down waits for that work.
121 schedule_work(&tls_device_gc_work);
123 spin_unlock_irqrestore(&tls_device_lock, flags);
126 /* We assume that the socket is already connected */
127 static struct net_device *get_netdev_for_sock(struct sock *sk)
129 struct dst_entry *dst = sk_dst_get(sk);
130 struct net_device *netdev = NULL;
142 static void destroy_record(struct tls_record_info *record)
144 int nr_frags = record->num_frags;
147 while (nr_frags-- > 0) {
148 frag = &record->frags[nr_frags];
149 __skb_frag_unref(frag);
154 static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
156 struct tls_record_info *info, *temp;
158 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
159 list_del(&info->list);
160 destroy_record(info);
163 offload_ctx->retransmit_hint = NULL;
166 static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
168 struct tls_context *tls_ctx = tls_get_ctx(sk);
169 struct tls_record_info *info, *temp;
170 struct tls_offload_context_tx *ctx;
171 u64 deleted_records = 0;
177 ctx = tls_offload_ctx_tx(tls_ctx);
179 spin_lock_irqsave(&ctx->lock, flags);
180 info = ctx->retransmit_hint;
181 if (info && !before(acked_seq, info->end_seq)) {
182 ctx->retransmit_hint = NULL;
183 list_del(&info->list);
184 destroy_record(info);
188 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
189 if (before(acked_seq, info->end_seq))
191 list_del(&info->list);
193 destroy_record(info);
197 ctx->unacked_record_sn += deleted_records;
198 spin_unlock_irqrestore(&ctx->lock, flags);
201 /* At this point, there should be no references on this
202 * socket and no in-flight SKBs associated with this
203 * socket, so it is safe to free all the resources.
205 void tls_device_sk_destruct(struct sock *sk)
207 struct tls_context *tls_ctx = tls_get_ctx(sk);
208 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
210 tls_ctx->sk_destruct(sk);
212 if (tls_ctx->tx_conf == TLS_HW) {
213 if (ctx->open_record)
214 destroy_record(ctx->open_record);
215 delete_all_records(ctx);
216 crypto_free_aead(ctx->aead_send);
217 clean_acked_data_disable(inet_csk(sk));
220 tls_device_queue_ctx_destruction(tls_ctx);
222 EXPORT_SYMBOL(tls_device_sk_destruct);
224 static void tls_append_frag(struct tls_record_info *record,
225 struct page_frag *pfrag,
230 frag = &record->frags[record->num_frags - 1];
231 if (frag->page.p == pfrag->page &&
232 frag->page_offset + frag->size == pfrag->offset) {
236 frag->page.p = pfrag->page;
237 frag->page_offset = pfrag->offset;
240 get_page(pfrag->page);
243 pfrag->offset += size;
247 static int tls_push_record(struct sock *sk,
248 struct tls_context *ctx,
249 struct tls_offload_context_tx *offload_ctx,
250 struct tls_record_info *record,
251 struct page_frag *pfrag,
253 unsigned char record_type)
255 struct tcp_sock *tp = tcp_sk(sk);
256 struct page_frag dummy_tag_frag;
261 frag = &record->frags[0];
262 tls_fill_prepend(ctx,
263 skb_frag_address(frag),
264 record->len - ctx->tx.prepend_size,
267 /* HW doesn't care about the data in the tag, because it fills it. */
268 dummy_tag_frag.page = skb_frag_page(frag);
269 dummy_tag_frag.offset = 0;
271 tls_append_frag(record, &dummy_tag_frag, ctx->tx.tag_size);
272 record->end_seq = tp->write_seq + record->len;
273 spin_lock_irq(&offload_ctx->lock);
274 list_add_tail(&record->list, &offload_ctx->records_list);
275 spin_unlock_irq(&offload_ctx->lock);
276 offload_ctx->open_record = NULL;
277 set_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
278 tls_advance_record_sn(sk, &ctx->tx);
280 for (i = 0; i < record->num_frags; i++) {
281 frag = &record->frags[i];
282 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
283 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
284 frag->size, frag->page_offset);
285 sk_mem_charge(sk, frag->size);
286 get_page(skb_frag_page(frag));
288 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
290 /* all ready, send */
291 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
294 static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
295 struct page_frag *pfrag,
298 struct tls_record_info *record;
301 record = kmalloc(sizeof(*record), GFP_KERNEL);
305 frag = &record->frags[0];
306 __skb_frag_set_page(frag, pfrag->page);
307 frag->page_offset = pfrag->offset;
308 skb_frag_size_set(frag, prepend_size);
310 get_page(pfrag->page);
311 pfrag->offset += prepend_size;
313 record->num_frags = 1;
314 record->len = prepend_size;
315 offload_ctx->open_record = record;
319 static int tls_do_allocation(struct sock *sk,
320 struct tls_offload_context_tx *offload_ctx,
321 struct page_frag *pfrag,
326 if (!offload_ctx->open_record) {
327 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
328 sk->sk_allocation))) {
329 sk->sk_prot->enter_memory_pressure(sk);
330 sk_stream_moderate_sndbuf(sk);
334 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
338 if (pfrag->size > pfrag->offset)
342 if (!sk_page_frag_refill(sk, pfrag))
348 static int tls_push_data(struct sock *sk,
349 struct iov_iter *msg_iter,
350 size_t size, int flags,
351 unsigned char record_type)
353 struct tls_context *tls_ctx = tls_get_ctx(sk);
354 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
355 int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
356 struct tls_record_info *record = ctx->open_record;
357 struct page_frag *pfrag;
358 size_t orig_size = size;
359 u32 max_open_record_len;
366 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
372 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
373 rc = tls_complete_pending_work(sk, tls_ctx, flags, &timeo);
377 pfrag = sk_page_frag(sk);
379 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
380 * we need to leave room for an authentication tag.
382 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
383 tls_ctx->tx.prepend_size;
385 rc = tls_do_allocation(sk, ctx, pfrag,
386 tls_ctx->tx.prepend_size);
388 rc = sk_stream_wait_memory(sk, &timeo);
392 record = ctx->open_record;
396 if (record_type != TLS_RECORD_TYPE_DATA) {
397 /* avoid sending partial
398 * record with type !=
402 destroy_record(record);
403 ctx->open_record = NULL;
404 } else if (record->len > tls_ctx->tx.prepend_size) {
411 record = ctx->open_record;
412 copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
413 copy = min_t(size_t, copy, (max_open_record_len - record->len));
415 if (copy_from_iter_nocache(page_address(pfrag->page) +
417 copy, msg_iter) != copy) {
421 tls_append_frag(record, pfrag, copy);
426 tls_push_record_flags = flags;
427 if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
435 if (done || record->len >= max_open_record_len ||
436 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
437 rc = tls_push_record(sk,
442 tls_push_record_flags,
449 tls_ctx->pending_open_record_frags = more;
451 if (orig_size - size > 0)
452 rc = orig_size - size;
457 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
459 unsigned char record_type = TLS_RECORD_TYPE_DATA;
464 if (unlikely(msg->msg_controllen)) {
465 rc = tls_proccess_cmsg(sk, msg, &record_type);
470 rc = tls_push_data(sk, &msg->msg_iter, size,
471 msg->msg_flags, record_type);
478 int tls_device_sendpage(struct sock *sk, struct page *page,
479 int offset, size_t size, int flags)
481 struct iov_iter msg_iter;
486 if (flags & MSG_SENDPAGE_NOTLAST)
491 if (flags & MSG_OOB) {
497 iov.iov_base = kaddr + offset;
499 iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
500 rc = tls_push_data(sk, &msg_iter, size,
501 flags, TLS_RECORD_TYPE_DATA);
509 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
510 u32 seq, u64 *p_record_sn)
512 u64 record_sn = context->hint_record_sn;
513 struct tls_record_info *info, *last;
515 info = context->retransmit_hint;
517 before(seq, info->end_seq - info->len)) {
518 /* if retransmit_hint is irrelevant start
519 * from the beggining of the list
521 info = list_first_entry(&context->records_list,
522 struct tls_record_info, list);
524 /* send the start_marker record if seq number is before the
525 * tls offload start marker sequence number. This record is
526 * required to handle TCP packets which are before TLS offload
528 * And if it's not start marker, look if this seq number
529 * belongs to the list.
531 if (likely(!tls_record_is_start_marker(info))) {
532 /* we have the first record, get the last record to see
533 * if this seq number belongs to the list.
535 last = list_last_entry(&context->records_list,
536 struct tls_record_info, list);
538 if (!between(seq, tls_record_start_seq(info),
542 record_sn = context->unacked_record_sn;
545 list_for_each_entry_from(info, &context->records_list, list) {
546 if (before(seq, info->end_seq)) {
547 if (!context->retransmit_hint ||
549 context->retransmit_hint->end_seq)) {
550 context->hint_record_sn = record_sn;
551 context->retransmit_hint = info;
553 *p_record_sn = record_sn;
561 EXPORT_SYMBOL(tls_get_record);
563 static int tls_device_push_pending_record(struct sock *sk, int flags)
565 struct iov_iter msg_iter;
567 iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
568 return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
571 static void tls_device_resync_rx(struct tls_context *tls_ctx,
572 struct sock *sk, u32 seq, u64 rcd_sn)
574 struct net_device *netdev;
576 if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
578 netdev = READ_ONCE(tls_ctx->netdev);
580 netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
581 clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
584 void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
586 struct tls_context *tls_ctx = tls_get_ctx(sk);
587 struct tls_offload_context_rx *rx_ctx;
592 if (tls_ctx->rx_conf != TLS_HW)
595 rx_ctx = tls_offload_ctx_rx(tls_ctx);
596 resync_req = atomic64_read(&rx_ctx->resync_req);
597 req_seq = ntohl(resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
598 is_req_pending = resync_req;
600 if (unlikely(is_req_pending) && req_seq == seq &&
601 atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
602 seq += TLS_HEADER_SIZE - 1;
603 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
607 static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
609 struct strp_msg *rxm = strp_msg(skb);
610 int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
611 struct sk_buff *skb_iter, *unused;
612 struct scatterlist sg[1];
613 char *orig_buf, *buf;
615 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
616 TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
621 nsg = skb_cow_data(skb, 0, &unused);
622 if (unlikely(nsg < 0)) {
627 sg_init_table(sg, 1);
628 sg_set_buf(&sg[0], buf,
629 rxm->full_len + TLS_HEADER_SIZE +
630 TLS_CIPHER_AES_GCM_128_IV_SIZE);
631 skb_copy_bits(skb, offset, buf,
632 TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
634 /* We are interested only in the decrypted data not the auth */
635 err = decrypt_skb(sk, skb, sg);
641 data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
643 if (skb_pagelen(skb) > offset) {
644 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
647 skb_store_bits(skb, offset, buf, copy);
653 pos = skb_pagelen(skb);
654 skb_walk_frags(skb, skb_iter) {
657 /* Practically all frags must belong to msg if reencrypt
658 * is needed with current strparser and coalescing logic,
659 * but strparser may "get optimized", so let's be safe.
661 if (pos + skb_iter->len <= offset)
663 if (pos >= data_len + rxm->offset)
666 frag_pos = offset - pos;
667 copy = min_t(int, skb_iter->len - frag_pos,
668 data_len + rxm->offset - offset);
670 if (skb_iter->decrypted)
671 skb_store_bits(skb_iter, frag_pos, buf, copy);
676 pos += skb_iter->len;
684 int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
686 struct tls_context *tls_ctx = tls_get_ctx(sk);
687 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
688 int is_decrypted = skb->decrypted;
689 int is_encrypted = !is_decrypted;
690 struct sk_buff *skb_iter;
692 /* Skip if it is already decrypted */
693 if (ctx->sw.decrypted)
696 /* Check if all the data is decrypted already */
697 skb_walk_frags(skb, skb_iter) {
698 is_decrypted &= skb_iter->decrypted;
699 is_encrypted &= !skb_iter->decrypted;
702 ctx->sw.decrypted |= is_decrypted;
704 /* Return immedeatly if the record is either entirely plaintext or
705 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
708 return (is_encrypted || is_decrypted) ? 0 :
709 tls_device_reencrypt(sk, skb);
712 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
714 u16 nonce_size, tag_size, iv_size, rec_seq_size;
715 struct tls_record_info *start_marker_record;
716 struct tls_offload_context_tx *offload_ctx;
717 struct tls_crypto_info *crypto_info;
718 struct net_device *netdev;
727 if (ctx->priv_ctx_tx) {
732 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
733 if (!start_marker_record) {
738 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
741 goto free_marker_record;
744 crypto_info = &ctx->crypto_send.info;
745 switch (crypto_info->cipher_type) {
746 case TLS_CIPHER_AES_GCM_128:
747 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
748 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
749 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
750 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
751 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
753 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
757 goto free_offload_ctx;
760 ctx->tx.prepend_size = TLS_HEADER_SIZE + nonce_size;
761 ctx->tx.tag_size = tag_size;
762 ctx->tx.overhead_size = ctx->tx.prepend_size + ctx->tx.tag_size;
763 ctx->tx.iv_size = iv_size;
764 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
768 goto free_offload_ctx;
771 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
773 ctx->tx.rec_seq_size = rec_seq_size;
774 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
775 if (!ctx->tx.rec_seq) {
780 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
784 /* start at rec_seq - 1 to account for the start marker record */
785 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
786 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
788 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
789 start_marker_record->len = 0;
790 start_marker_record->num_frags = 0;
792 INIT_LIST_HEAD(&offload_ctx->records_list);
793 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
794 spin_lock_init(&offload_ctx->lock);
795 sg_init_table(offload_ctx->sg_tx_data,
796 ARRAY_SIZE(offload_ctx->sg_tx_data));
798 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
799 ctx->push_pending_record = tls_device_push_pending_record;
801 /* TLS offload is greatly simplified if we don't send
802 * SKBs where only part of the payload needs to be encrypted.
803 * So mark the last skb in the write queue as end of record.
805 skb = tcp_write_queue_tail(sk);
807 TCP_SKB_CB(skb)->eor = 1;
809 /* We support starting offload on multiple sockets
810 * concurrently, so we only need a read lock here.
811 * This lock must precede get_netdev_for_sock to prevent races between
812 * NETDEV_DOWN and setsockopt.
814 down_read(&device_offload_lock);
815 netdev = get_netdev_for_sock(sk);
817 pr_err_ratelimited("%s: netdev not found\n", __func__);
822 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
827 /* Avoid offloading if the device is down
828 * We don't want to offload new flows after
829 * the NETDEV_DOWN event
831 if (!(netdev->flags & IFF_UP)) {
836 ctx->priv_ctx_tx = offload_ctx;
837 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
838 &ctx->crypto_send.info,
839 tcp_sk(sk)->write_seq);
843 tls_device_attach(ctx, sk, netdev);
845 /* following this assignment tls_is_sk_tx_device_offloaded
846 * will return true and the context might be accessed
847 * by the netdev's xmit function.
849 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
851 up_read(&device_offload_lock);
857 up_read(&device_offload_lock);
858 clean_acked_data_disable(inet_csk(sk));
859 crypto_free_aead(offload_ctx->aead_send);
861 kfree(ctx->tx.rec_seq);
866 ctx->priv_ctx_tx = NULL;
868 kfree(start_marker_record);
873 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
875 struct tls_offload_context_rx *context;
876 struct net_device *netdev;
879 /* We support starting offload on multiple sockets
880 * concurrently, so we only need a read lock here.
881 * This lock must precede get_netdev_for_sock to prevent races between
882 * NETDEV_DOWN and setsockopt.
884 down_read(&device_offload_lock);
885 netdev = get_netdev_for_sock(sk);
887 pr_err_ratelimited("%s: netdev not found\n", __func__);
892 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
893 pr_err_ratelimited("%s: netdev %s with no TLS offload\n",
894 __func__, netdev->name);
899 /* Avoid offloading if the device is down
900 * We don't want to offload new flows after
901 * the NETDEV_DOWN event
903 if (!(netdev->flags & IFF_UP)) {
908 context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
914 ctx->priv_ctx_rx = context;
915 rc = tls_set_sw_offload(sk, ctx, 0);
919 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
920 &ctx->crypto_recv.info,
921 tcp_sk(sk)->copied_seq);
923 pr_err_ratelimited("%s: The netdev has refused to offload this socket\n",
925 goto free_sw_resources;
928 tls_device_attach(ctx, sk, netdev);
932 up_read(&device_offload_lock);
933 tls_sw_free_resources_rx(sk);
934 down_read(&device_offload_lock);
936 ctx->priv_ctx_rx = NULL;
940 up_read(&device_offload_lock);
944 void tls_device_offload_cleanup_rx(struct sock *sk)
946 struct tls_context *tls_ctx = tls_get_ctx(sk);
947 struct net_device *netdev;
949 down_read(&device_offload_lock);
950 netdev = tls_ctx->netdev;
954 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
955 TLS_OFFLOAD_CTX_DIR_RX);
957 if (tls_ctx->tx_conf != TLS_HW) {
959 tls_ctx->netdev = NULL;
961 set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
964 up_read(&device_offload_lock);
965 tls_sw_release_resources_rx(sk);
968 static int tls_device_down(struct net_device *netdev)
970 struct tls_context *ctx, *tmp;
974 /* Request a write lock to block new offload attempts */
975 down_write(&device_offload_lock);
977 spin_lock_irqsave(&tls_device_lock, flags);
978 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
979 if (ctx->netdev != netdev ||
980 !refcount_inc_not_zero(&ctx->refcount))
983 list_move(&ctx->list, &list);
985 spin_unlock_irqrestore(&tls_device_lock, flags);
987 list_for_each_entry_safe(ctx, tmp, &list, list) {
988 if (ctx->tx_conf == TLS_HW)
989 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
990 TLS_OFFLOAD_CTX_DIR_TX);
991 if (ctx->rx_conf == TLS_HW &&
992 !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
993 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
994 TLS_OFFLOAD_CTX_DIR_RX);
995 WRITE_ONCE(ctx->netdev, NULL);
996 smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
997 while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
998 usleep_range(10, 200);
1000 list_del_init(&ctx->list);
1002 if (refcount_dec_and_test(&ctx->refcount))
1003 tls_device_free_ctx(ctx);
1006 up_write(&device_offload_lock);
1008 flush_work(&tls_device_gc_work);
1013 static int tls_dev_event(struct notifier_block *this, unsigned long event,
1016 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1018 if (!dev->tlsdev_ops &&
1019 !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1023 case NETDEV_REGISTER:
1024 case NETDEV_FEAT_CHANGE:
1025 if ((dev->features & NETIF_F_HW_TLS_RX) &&
1026 !dev->tlsdev_ops->tls_dev_resync_rx)
1029 if (dev->tlsdev_ops &&
1030 dev->tlsdev_ops->tls_dev_add &&
1031 dev->tlsdev_ops->tls_dev_del)
1036 return tls_device_down(dev);
1041 static struct notifier_block tls_dev_notifier = {
1042 .notifier_call = tls_dev_event,
1045 void __init tls_device_init(void)
1047 register_netdevice_notifier(&tls_dev_notifier);
1050 void __exit tls_device_cleanup(void)
1052 unregister_netdevice_notifier(&tls_dev_notifier);
1053 flush_work(&tls_device_gc_work);