2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <crypto/internal/geniv.h>
35 #include <crypto/aead.h>
36 #include <linux/inetdevice.h>
37 #include <linux/netdevice.h>
38 #include <linux/module.h>
41 #include "accel/ipsec.h"
42 #include "en_accel/ipsec.h"
43 #include "en_accel/ipsec_rxtx.h"
45 struct mlx5e_ipsec_sa_entry {
46 struct hlist_node hlist; /* Item in SADB_RX hashtable */
47 unsigned int handle; /* Handle in SADB_RX */
49 struct mlx5e_ipsec *ipsec;
53 struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
56 struct mlx5e_ipsec_sa_entry *sa_entry;
57 struct xfrm_state *ret = NULL;
60 hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
61 if (sa_entry->handle == handle) {
71 static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
73 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
77 spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
78 ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
82 sa_entry->handle = ret;
83 hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
87 spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
91 static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
93 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
96 spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
97 hash_del_rcu(&sa_entry->hlist);
98 spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
101 static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
103 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
106 /* Wait for the hash_del_rcu call in sadb_rx_del to affect data path */
108 spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
109 ida_simple_remove(&ipsec->halloc, sa_entry->handle);
110 spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
113 static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x)
115 unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4;
119 return MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128;
121 return MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128;
123 netdev_warn(x->xso.dev, "Bad key len: %d for alg %s\n",
124 key_len, x->aead->alg_name);
129 static void mlx5e_ipsec_build_hw_sa(u32 op, struct mlx5e_ipsec_sa_entry *sa_entry,
130 struct mlx5_accel_ipsec_sa *hw_sa)
132 struct xfrm_state *x = sa_entry->x;
133 struct aead_geniv_ctx *geniv_ctx;
134 unsigned int crypto_data_len;
135 struct crypto_aead *aead;
136 unsigned int key_len;
139 memset(hw_sa, 0, sizeof(*hw_sa));
141 if (op == MLX5_IPSEC_CMD_ADD_SA) {
142 crypto_data_len = (x->aead->alg_key_len + 7) / 8;
143 key_len = crypto_data_len - 4; /* 4 bytes salt at end */
145 geniv_ctx = crypto_aead_ctx(aead);
146 ivsize = crypto_aead_ivsize(aead);
148 memcpy(&hw_sa->key_enc, x->aead->alg_key, key_len);
149 /* Duplicate 128 bit key twice according to HW layout */
151 memcpy(&hw_sa->key_enc[16], x->aead->alg_key, key_len);
152 memcpy(&hw_sa->gcm.salt_iv, geniv_ctx->salt, ivsize);
153 hw_sa->gcm.salt = *((__be32 *)(x->aead->alg_key + key_len));
156 hw_sa->cmd = htonl(op);
157 hw_sa->flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN;
158 if (x->props.family == AF_INET) {
159 hw_sa->sip[3] = x->props.saddr.a4;
160 hw_sa->dip[3] = x->id.daddr.a4;
161 hw_sa->sip_masklen = 32;
162 hw_sa->dip_masklen = 32;
164 memcpy(hw_sa->sip, x->props.saddr.a6, sizeof(hw_sa->sip));
165 memcpy(hw_sa->dip, x->id.daddr.a6, sizeof(hw_sa->dip));
166 hw_sa->sip_masklen = 128;
167 hw_sa->dip_masklen = 128;
168 hw_sa->flags |= MLX5_IPSEC_SADB_IPV6;
170 hw_sa->spi = x->id.spi;
171 hw_sa->sw_sa_handle = htonl(sa_entry->handle);
172 switch (x->id.proto) {
174 hw_sa->flags |= MLX5_IPSEC_SADB_IP_ESP;
177 hw_sa->flags |= MLX5_IPSEC_SADB_IP_AH;
182 hw_sa->enc_mode = mlx5e_ipsec_enc_mode(x);
183 if (!(x->xso.flags & XFRM_OFFLOAD_INBOUND))
184 hw_sa->flags |= MLX5_IPSEC_SADB_DIR_SX;
187 static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
189 struct net_device *netdev = x->xso.dev;
190 struct mlx5e_priv *priv;
192 priv = netdev_priv(netdev);
194 if (x->props.aalgo != SADB_AALG_NONE) {
195 netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
198 if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
199 netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
202 if (x->props.calgo != SADB_X_CALG_NONE) {
203 netdev_info(netdev, "Cannot offload compressed xfrm states\n");
206 if (x->props.flags & XFRM_STATE_ESN) {
207 netdev_info(netdev, "Cannot offload ESN xfrm states\n");
210 if (x->props.family != AF_INET &&
211 x->props.family != AF_INET6) {
212 netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
215 if (x->props.mode != XFRM_MODE_TRANSPORT &&
216 x->props.mode != XFRM_MODE_TUNNEL) {
217 dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
220 if (x->id.proto != IPPROTO_ESP) {
221 netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
225 netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
229 netdev_info(netdev, "Cannot offload xfrm states without aead\n");
232 if (x->aead->alg_icv_len != 128) {
233 netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
236 if ((x->aead->alg_key_len != 128 + 32) &&
237 (x->aead->alg_key_len != 256 + 32)) {
238 netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
242 netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
246 netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
249 if (strcmp(x->geniv, "seqiv")) {
250 netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
253 if (x->props.family == AF_INET6 &&
254 !(mlx5_accel_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_IPV6)) {
255 netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
261 static int mlx5e_xfrm_add_state(struct xfrm_state *x)
263 struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
264 struct net_device *netdev = x->xso.dev;
265 struct mlx5_accel_ipsec_sa hw_sa;
266 struct mlx5e_priv *priv;
270 priv = netdev_priv(netdev);
272 err = mlx5e_xfrm_validate_state(x);
276 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
283 sa_entry->ipsec = priv->ipsec;
285 /* Add the SA to handle processed incoming packets before the add SA
286 * completion was received
288 if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
289 err = mlx5e_ipsec_sadb_rx_add(sa_entry);
291 netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
296 mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_ADD_SA, sa_entry, &hw_sa);
297 context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
298 if (IS_ERR(context)) {
299 err = PTR_ERR(context);
303 err = mlx5_accel_ipsec_sa_cmd_wait(context);
307 x->xso.offload_handle = (unsigned long)sa_entry;
311 if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
312 mlx5e_ipsec_sadb_rx_del(sa_entry);
313 mlx5e_ipsec_sadb_rx_free(sa_entry);
321 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
323 struct mlx5e_ipsec_sa_entry *sa_entry;
324 struct mlx5_accel_ipsec_sa hw_sa;
327 if (!x->xso.offload_handle)
330 sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
331 WARN_ON(sa_entry->x != x);
333 if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
334 mlx5e_ipsec_sadb_rx_del(sa_entry);
336 mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_DEL_SA, sa_entry, &hw_sa);
337 context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
341 sa_entry->context = context;
344 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
346 struct mlx5e_ipsec_sa_entry *sa_entry;
349 if (!x->xso.offload_handle)
352 sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
353 WARN_ON(sa_entry->x != x);
355 res = mlx5_accel_ipsec_sa_cmd_wait(sa_entry->context);
356 sa_entry->context = NULL;
358 /* Leftover object will leak */
362 if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
363 mlx5e_ipsec_sadb_rx_free(sa_entry);
368 int mlx5e_ipsec_init(struct mlx5e_priv *priv)
370 struct mlx5e_ipsec *ipsec = NULL;
372 if (!MLX5_IPSEC_DEV(priv->mdev)) {
373 netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
377 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
381 hash_init(ipsec->sadb_rx);
382 spin_lock_init(&ipsec->sadb_rx_lock);
383 ida_init(&ipsec->halloc);
384 ipsec->en_priv = priv;
385 ipsec->en_priv->ipsec = ipsec;
386 netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
390 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
392 struct mlx5e_ipsec *ipsec = priv->ipsec;
397 ida_destroy(&ipsec->halloc);
402 static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
404 if (x->props.family == AF_INET) {
405 /* Offload with IPv4 options is not supported yet */
406 if (ip_hdr(skb)->ihl > 5)
409 /* Offload with IPv6 extension headers is not support yet */
410 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
417 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
418 .xdo_dev_state_add = mlx5e_xfrm_add_state,
419 .xdo_dev_state_delete = mlx5e_xfrm_del_state,
420 .xdo_dev_state_free = mlx5e_xfrm_free_state,
421 .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
424 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
426 struct mlx5_core_dev *mdev = priv->mdev;
427 struct net_device *netdev = priv->netdev;
432 if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_ESP) ||
433 !MLX5_CAP_ETH(mdev, swp)) {
434 mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
438 mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
439 netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
440 netdev->features |= NETIF_F_HW_ESP;
441 netdev->hw_enc_features |= NETIF_F_HW_ESP;
443 if (!MLX5_CAP_ETH(mdev, swp_csum)) {
444 mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
448 netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
449 netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
451 if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_LSO) ||
452 !MLX5_CAP_ETH(mdev, swp_lso)) {
453 mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
457 mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
458 netdev->features |= NETIF_F_GSO_ESP;
459 netdev->hw_features |= NETIF_F_GSO_ESP;
460 netdev->hw_enc_features |= NETIF_F_GSO_ESP;