aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/netronome/nfp/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp/crypto')
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h65
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/fw.h92
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c613
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c602
4 files changed, 1372 insertions, 0 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
new file mode 100644
index 000000000..1df73d658
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CRYPTO_H
+#define NFP_CRYPTO_H 1
+
+struct net_device;
+struct nfp_net;
+struct nfp_net_tls_resync_req;
+
+struct nfp_net_tls_offload_ctx {
+ __be32 fw_handle[2];
+
+ u8 rx_end[0];
+ /* Tx only fields follow - Rx side does not have enough driver state
+ * to fit these
+ */
+
+ u32 next_seq;
+};
+
+#ifdef CONFIG_TLS_DEVICE
+int nfp_net_tls_init(struct nfp_net *nn);
+int nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len);
+#else
+static inline int nfp_net_tls_init(struct nfp_net *nn)
+{
+ return 0;
+}
+
+static inline int
+nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+/* IPsec related structures and functions */
+struct nfp_ipsec_offload {
+ u32 seq_hi;
+ u32 seq_low;
+ u32 handle;
+};
+
+#ifndef CONFIG_NFP_NET_IPSEC
+static inline void nfp_net_ipsec_init(struct nfp_net *nn)
+{
+}
+
+static inline void nfp_net_ipsec_clean(struct nfp_net *nn)
+{
+}
+#else
+void nfp_net_ipsec_init(struct nfp_net *nn);
+void nfp_net_ipsec_clean(struct nfp_net *nn);
+bool nfp_net_ipsec_tx_prep(struct nfp_net_dp *dp, struct sk_buff *skb,
+ struct nfp_ipsec_offload *offload_info);
+int nfp_net_ipsec_rx(struct nfp_meta_parsed *meta, struct sk_buff *skb);
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
new file mode 100644
index 000000000..dcb67c2b5
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CRYPTO_FW_H
+#define NFP_CRYPTO_FW_H 1
+
+#include "../ccm.h"
+
+#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC 0
+#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC 1
+
+struct nfp_net_tls_resync_req {
+ __be32 fw_handle[2];
+ __be32 tcp_seq;
+ u8 l3_offset;
+ u8 l4_offset;
+ u8 resv[2];
+};
+
+struct nfp_crypto_reply_simple {
+ struct nfp_ccm_hdr hdr;
+ __be32 error;
+};
+
+struct nfp_crypto_req_reset {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+};
+
+#define NFP_NET_TLS_IPVER GENMASK(15, 12)
+#define NFP_NET_TLS_VLAN GENMASK(11, 0)
+#define NFP_NET_TLS_VLAN_UNUSED 4095
+
+struct nfp_crypto_req_add_front {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ u8 resv[3];
+ u8 opcode;
+ u8 key_len;
+ __be16 ipver_vlan __packed;
+ u8 l4_proto;
+#define NFP_NET_TLS_NON_ADDR_KEY_LEN 8
+ u8 l3_addrs[];
+};
+
+struct nfp_crypto_req_add_back {
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 key[8];
+ __be32 salt;
+ __be32 iv[2];
+ __be32 counter;
+ __be32 rec_no[2];
+ __be32 tcp_seq;
+};
+
+struct nfp_crypto_req_add_v4 {
+ struct nfp_crypto_req_add_front front;
+ __be32 src_ip;
+ __be32 dst_ip;
+ struct nfp_crypto_req_add_back back;
+};
+
+struct nfp_crypto_req_add_v6 {
+ struct nfp_crypto_req_add_front front;
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ struct nfp_crypto_req_add_back back;
+};
+
+struct nfp_crypto_reply_add {
+ struct nfp_ccm_hdr hdr;
+ __be32 error;
+ __be32 handle[2];
+};
+
+struct nfp_crypto_req_del {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ __be32 handle[2];
+};
+
+struct nfp_crypto_req_update {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ u8 resv[3];
+ u8 opcode;
+ __be32 handle[2];
+ __be32 rec_no[2];
+ __be32 tcp_seq;
+};
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
new file mode 100644
index 000000000..c0dcce8ae
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <asm/unaligned.h>
+#include <linux/ktime.h>
+#include <net/xfrm.h>
+
+#include "../nfpcore/nfp_dev.h"
+#include "../nfp_net_ctrl.h"
+#include "../nfp_net.h"
+#include "crypto.h"
+
+#define NFP_NET_IPSEC_MAX_SA_CNT (16 * 1024) /* Firmware support a maximum of 16K SA offload */
+
+/* IPsec config message cmd codes */
+enum nfp_ipsec_cfg_mssg_cmd_codes {
+ NFP_IPSEC_CFG_MSSG_ADD_SA, /* Add a new SA */
+ NFP_IPSEC_CFG_MSSG_INV_SA /* Invalidate an existing SA */
+};
+
+/* IPsec config message response codes */
+enum nfp_ipsec_cfg_mssg_rsp_codes {
+ NFP_IPSEC_CFG_MSSG_OK,
+ NFP_IPSEC_CFG_MSSG_FAILED,
+ NFP_IPSEC_CFG_MSSG_SA_VALID,
+ NFP_IPSEC_CFG_MSSG_SA_HASH_ADD_FAILED,
+ NFP_IPSEC_CFG_MSSG_SA_HASH_DEL_FAILED,
+ NFP_IPSEC_CFG_MSSG_SA_INVALID_CMD
+};
+
+/* Protocol */
+enum nfp_ipsec_sa_prot {
+ NFP_IPSEC_PROTOCOL_AH = 0,
+ NFP_IPSEC_PROTOCOL_ESP = 1
+};
+
+/* Mode */
+enum nfp_ipsec_sa_mode {
+ NFP_IPSEC_PROTMODE_TRANSPORT = 0,
+ NFP_IPSEC_PROTMODE_TUNNEL = 1
+};
+
+/* Cipher types */
+enum nfp_ipsec_sa_cipher {
+ NFP_IPSEC_CIPHER_NULL,
+ NFP_IPSEC_CIPHER_3DES,
+ NFP_IPSEC_CIPHER_AES128,
+ NFP_IPSEC_CIPHER_AES192,
+ NFP_IPSEC_CIPHER_AES256,
+ NFP_IPSEC_CIPHER_AES128_NULL,
+ NFP_IPSEC_CIPHER_AES192_NULL,
+ NFP_IPSEC_CIPHER_AES256_NULL,
+ NFP_IPSEC_CIPHER_CHACHA20
+};
+
+/* Cipher modes */
+enum nfp_ipsec_sa_cipher_mode {
+ NFP_IPSEC_CIMODE_ECB,
+ NFP_IPSEC_CIMODE_CBC,
+ NFP_IPSEC_CIMODE_CFB,
+ NFP_IPSEC_CIMODE_OFB,
+ NFP_IPSEC_CIMODE_CTR
+};
+
+/* Hash types */
+enum nfp_ipsec_sa_hash_type {
+ NFP_IPSEC_HASH_NONE,
+ NFP_IPSEC_HASH_MD5_96,
+ NFP_IPSEC_HASH_SHA1_96,
+ NFP_IPSEC_HASH_SHA256_96,
+ NFP_IPSEC_HASH_SHA384_96,
+ NFP_IPSEC_HASH_SHA512_96,
+ NFP_IPSEC_HASH_MD5_128,
+ NFP_IPSEC_HASH_SHA1_80,
+ NFP_IPSEC_HASH_SHA256_128,
+ NFP_IPSEC_HASH_SHA384_192,
+ NFP_IPSEC_HASH_SHA512_256,
+ NFP_IPSEC_HASH_GF128_128,
+ NFP_IPSEC_HASH_POLY1305_128
+};
+
+/* IPSEC_CFG_MSSG_ADD_SA */
+struct nfp_ipsec_cfg_add_sa {
+ u32 ciph_key[8]; /* Cipher Key */
+ union {
+ u32 auth_key[16]; /* Authentication Key */
+ struct nfp_ipsec_aesgcm { /* AES-GCM-ESP fields */
+ u32 salt; /* Initialized with SA */
+ u32 resv[15];
+ } aesgcm_fields;
+ };
+ struct sa_ctrl_word {
+ uint32_t hash :4; /* From nfp_ipsec_sa_hash_type */
+ uint32_t cimode :4; /* From nfp_ipsec_sa_cipher_mode */
+ uint32_t cipher :4; /* From nfp_ipsec_sa_cipher */
+ uint32_t mode :2; /* From nfp_ipsec_sa_mode */
+ uint32_t proto :2; /* From nfp_ipsec_sa_prot */
+ uint32_t dir :1; /* SA direction */
+ uint32_t resv0 :12;
+ uint32_t encap_dsbl:1; /* Encap/Decap disable */
+ uint32_t resv1 :2; /* Must be set to 0 */
+ } ctrl_word;
+ u32 spi; /* SPI Value */
+ uint32_t pmtu_limit :16; /* PMTU Limit */
+ uint32_t resv0 :5;
+ uint32_t ipv6 :1; /* Outbound IPv6 addr format */
+ uint32_t resv1 :10;
+ u32 resv2[2];
+ u32 src_ip[4]; /* Src IP addr */
+ u32 dst_ip[4]; /* Dst IP addr */
+ u32 resv3[6];
+};
+
+/* IPSEC_CFG_MSSG */
+struct nfp_ipsec_cfg_mssg {
+ union {
+ struct{
+ uint32_t cmd:16; /* One of nfp_ipsec_cfg_mssg_cmd_codes */
+ uint32_t rsp:16; /* One of nfp_ipsec_cfg_mssg_rsp_codes */
+ uint32_t sa_idx:16; /* SA table index */
+ uint32_t spare0:16;
+ struct nfp_ipsec_cfg_add_sa cfg_add_sa;
+ };
+ u32 raw[64];
+ };
+};
+
+static int nfp_net_ipsec_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
+{
+ unsigned int offset = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ struct nfp_ipsec_cfg_mssg *msg = (struct nfp_ipsec_cfg_mssg *)entry->msg;
+ int i, msg_size, ret;
+
+ ret = nfp_net_mbox_lock(nn, sizeof(*msg));
+ if (ret)
+ return ret;
+
+ msg_size = ARRAY_SIZE(msg->raw);
+ for (i = 0; i < msg_size; i++)
+ nn_writel(nn, offset + 4 * i, msg->raw[i]);
+
+ ret = nfp_net_mbox_reconfig(nn, entry->cmd);
+ if (ret < 0) {
+ nn_ctrl_bar_unlock(nn);
+ return ret;
+ }
+
+ /* For now we always read the whole message response back */
+ for (i = 0; i < msg_size; i++)
+ msg->raw[i] = nn_readl(nn, offset + 4 * i);
+
+ nn_ctrl_bar_unlock(nn);
+
+ switch (msg->rsp) {
+ case NFP_IPSEC_CFG_MSSG_OK:
+ return 0;
+ case NFP_IPSEC_CFG_MSSG_SA_INVALID_CMD:
+ return -EINVAL;
+ case NFP_IPSEC_CFG_MSSG_SA_VALID:
+ return -EEXIST;
+ case NFP_IPSEC_CFG_MSSG_FAILED:
+ case NFP_IPSEC_CFG_MSSG_SA_HASH_ADD_FAILED:
+ case NFP_IPSEC_CFG_MSSG_SA_HASH_DEL_FAILED:
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int set_aes_keylen(struct nfp_ipsec_cfg_add_sa *cfg, int alg, int keylen)
+{
+ bool aes_gmac = (alg == SADB_X_EALG_NULL_AES_GMAC);
+
+ switch (keylen) {
+ case 128:
+ cfg->ctrl_word.cipher = aes_gmac ? NFP_IPSEC_CIPHER_AES128_NULL :
+ NFP_IPSEC_CIPHER_AES128;
+ break;
+ case 192:
+ cfg->ctrl_word.cipher = aes_gmac ? NFP_IPSEC_CIPHER_AES192_NULL :
+ NFP_IPSEC_CIPHER_AES192;
+ break;
+ case 256:
+ cfg->ctrl_word.cipher = aes_gmac ? NFP_IPSEC_CIPHER_AES256_NULL :
+ NFP_IPSEC_CIPHER_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void set_md5hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_MD5_96;
+ break;
+ case 128:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_MD5_128;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static void set_sha1hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA1_96;
+ break;
+ case 80:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA1_80;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static void set_sha2_256hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA256_96;
+ break;
+ case 128:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA256_128;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static void set_sha2_384hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA384_96;
+ break;
+ case 192:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA384_192;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA512_96;
+ break;
+ case 256:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA512_256;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static int nfp_net_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct nfp_ipsec_cfg_mssg msg = {};
+ int i, key_len, trunc_len, err = 0;
+ struct nfp_ipsec_cfg_add_sa *cfg;
+ struct nfp_net *nn;
+ unsigned int saidx;
+
+ nn = netdev_priv(netdev);
+ cfg = &msg.cfg_add_sa;
+
+ /* General */
+ switch (x->props.mode) {
+ case XFRM_MODE_TUNNEL:
+ cfg->ctrl_word.mode = NFP_IPSEC_PROTMODE_TUNNEL;
+ break;
+ case XFRM_MODE_TRANSPORT:
+ cfg->ctrl_word.mode = NFP_IPSEC_PROTMODE_TRANSPORT;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for xfrm offload");
+ return -EINVAL;
+ }
+
+ switch (x->id.proto) {
+ case IPPROTO_ESP:
+ cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_ESP;
+ break;
+ case IPPROTO_AH:
+ cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_AH;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for xfrm offload");
+ return -EINVAL;
+ }
+
+ if (x->props.flags & XFRM_STATE_ESN) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported XFRM_REPLAY_MODE_ESN for xfrm offload");
+ return -EINVAL;
+ }
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
+ return -EINVAL;
+ }
+
+ cfg->spi = ntohl(x->id.spi);
+
+ /* Hash/Authentication */
+ if (x->aalg)
+ trunc_len = x->aalg->alg_trunc_len;
+ else
+ trunc_len = 0;
+
+ switch (x->props.aalgo) {
+ case SADB_AALG_NONE:
+ if (x->aead) {
+ trunc_len = -1;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
+ return -EINVAL;
+ }
+ break;
+ case SADB_X_AALG_NULL:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_NONE;
+ trunc_len = -1;
+ break;
+ case SADB_AALG_MD5HMAC:
+ if (nn->pdev->device == PCI_DEVICE_ID_NFP3800) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
+ return -EINVAL;
+ }
+ set_md5hmac(cfg, &trunc_len);
+ break;
+ case SADB_AALG_SHA1HMAC:
+ set_sha1hmac(cfg, &trunc_len);
+ break;
+ case SADB_X_AALG_SHA2_256HMAC:
+ set_sha2_256hmac(cfg, &trunc_len);
+ break;
+ case SADB_X_AALG_SHA2_384HMAC:
+ set_sha2_384hmac(cfg, &trunc_len);
+ break;
+ case SADB_X_AALG_SHA2_512HMAC:
+ set_sha2_512hmac(cfg, &trunc_len);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
+ return -EINVAL;
+ }
+
+ if (!trunc_len) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm trunc length");
+ return -EINVAL;
+ }
+
+ if (x->aalg) {
+ key_len = DIV_ROUND_UP(x->aalg->alg_key_len, BITS_PER_BYTE);
+ if (key_len > sizeof(cfg->auth_key)) {
+ NL_SET_ERR_MSG_MOD(extack, "Insufficient space for offloaded auth key");
+ return -EINVAL;
+ }
+ for (i = 0; i < key_len / sizeof(cfg->auth_key[0]) ; i++)
+ cfg->auth_key[i] = get_unaligned_be32(x->aalg->alg_key +
+ sizeof(cfg->auth_key[0]) * i);
+ }
+
+ /* Encryption */
+ switch (x->props.ealgo) {
+ case SADB_EALG_NONE:
+ case SADB_EALG_NULL:
+ cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
+ cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_NULL;
+ break;
+ case SADB_EALG_3DESCBC:
+ if (nn->pdev->device == PCI_DEVICE_ID_NFP3800) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported encryption algorithm for offload");
+ return -EINVAL;
+ }
+ cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
+ cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_3DES;
+ break;
+ case SADB_X_EALG_AES_GCM_ICV16:
+ case SADB_X_EALG_NULL_AES_GMAC:
+ if (!x->aead) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid AES key data");
+ return -EINVAL;
+ }
+
+ if (x->aead->alg_icv_len != 128) {
+ NL_SET_ERR_MSG_MOD(extack, "ICV must be 128bit with SADB_X_EALG_AES_GCM_ICV16");
+ return -EINVAL;
+ }
+ cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CTR;
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_GF128_128;
+
+ /* Aead->alg_key_len includes 32-bit salt */
+ if (set_aes_keylen(cfg, x->props.ealgo, x->aead->alg_key_len - 32)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported AES key length");
+ return -EINVAL;
+ }
+ break;
+ case SADB_X_EALG_AESCBC:
+ cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
+ if (!x->ealg) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid AES key data");
+ return -EINVAL;
+ }
+ if (set_aes_keylen(cfg, x->props.ealgo, x->ealg->alg_key_len) < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported AES key length");
+ return -EINVAL;
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported encryption algorithm for offload");
+ return -EINVAL;
+ }
+
+ if (x->aead) {
+ int salt_len = 4;
+
+ key_len = DIV_ROUND_UP(x->aead->alg_key_len, BITS_PER_BYTE);
+ key_len -= salt_len;
+
+ if (key_len > sizeof(cfg->ciph_key)) {
+ NL_SET_ERR_MSG_MOD(extack, "aead: Insufficient space for offloaded key");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]) ; i++)
+ cfg->ciph_key[i] = get_unaligned_be32(x->aead->alg_key +
+ sizeof(cfg->ciph_key[0]) * i);
+
+ /* Load up the salt */
+ cfg->aesgcm_fields.salt = get_unaligned_be32(x->aead->alg_key + key_len);
+ }
+
+ if (x->ealg) {
+ key_len = DIV_ROUND_UP(x->ealg->alg_key_len, BITS_PER_BYTE);
+
+ if (key_len > sizeof(cfg->ciph_key)) {
+ NL_SET_ERR_MSG_MOD(extack, "ealg: Insufficient space for offloaded key");
+ return -EINVAL;
+ }
+ for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]) ; i++)
+ cfg->ciph_key[i] = get_unaligned_be32(x->ealg->alg_key +
+ sizeof(cfg->ciph_key[0]) * i);
+ }
+
+ /* IP related info */
+ switch (x->props.family) {
+ case AF_INET:
+ cfg->ipv6 = 0;
+ cfg->src_ip[0] = ntohl(x->props.saddr.a4);
+ cfg->dst_ip[0] = ntohl(x->id.daddr.a4);
+ break;
+ case AF_INET6:
+ cfg->ipv6 = 1;
+ for (i = 0; i < 4; i++) {
+ cfg->src_ip[i] = ntohl(x->props.saddr.a6[i]);
+ cfg->dst_ip[i] = ntohl(x->id.daddr.a6[i]);
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported address family");
+ return -EINVAL;
+ }
+
+ /* Maximum nic IPsec code could handle. Other limits may apply. */
+ cfg->pmtu_limit = 0xffff;
+ cfg->ctrl_word.encap_dsbl = 1;
+
+ /* SA direction */
+ cfg->ctrl_word.dir = x->xso.dir;
+
+ /* Find unused SA data*/
+ err = xa_alloc(&nn->xa_ipsec, &saidx, x,
+ XA_LIMIT(0, NFP_NET_IPSEC_MAX_SA_CNT - 1), GFP_KERNEL);
+ if (err < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to get sa_data number for IPsec");
+ return err;
+ }
+
+ /* Allocate saidx and commit the SA */
+ msg.cmd = NFP_IPSEC_CFG_MSSG_ADD_SA;
+ msg.sa_idx = saidx;
+ err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
+ sizeof(msg), nfp_net_ipsec_cfg);
+ if (err) {
+ xa_erase(&nn->xa_ipsec, saidx);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue IPsec command");
+ return err;
+ }
+
+ /* 0 is invalid offload_handle for kernel */
+ x->xso.offload_handle = saidx + 1;
+ return 0;
+}
+
+static void nfp_net_xfrm_del_state(struct xfrm_state *x)
+{
+ struct nfp_ipsec_cfg_mssg msg = {
+ .cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
+ .sa_idx = x->xso.offload_handle - 1,
+ };
+ struct net_device *netdev = x->xso.dev;
+ struct nfp_net *nn;
+ int err;
+
+ nn = netdev_priv(netdev);
+ err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
+ sizeof(msg), nfp_net_ipsec_cfg);
+ if (err)
+ nn_warn(nn, "Failed to invalidate SA in hardware\n");
+
+ xa_erase(&nn->xa_ipsec, x->xso.offload_handle - 1);
+}
+
+static bool nfp_net_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ if (x->props.family == AF_INET)
+ /* Offload with IPv4 options is not supported yet */
+ return ip_hdr(skb)->ihl == 5;
+
+ /* Offload with IPv6 extension headers is not support yet */
+ return !(ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr));
+}
+
+static const struct xfrmdev_ops nfp_net_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = nfp_net_xfrm_add_state,
+ .xdo_dev_state_delete = nfp_net_xfrm_del_state,
+ .xdo_dev_offload_ok = nfp_net_ipsec_offload_ok,
+};
+
+void nfp_net_ipsec_init(struct nfp_net *nn)
+{
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_IPSEC))
+ return;
+
+ xa_init_flags(&nn->xa_ipsec, XA_FLAGS_ALLOC);
+ nn->dp.netdev->xfrmdev_ops = &nfp_net_ipsec_xfrmdev_ops;
+}
+
+void nfp_net_ipsec_clean(struct nfp_net *nn)
+{
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_IPSEC))
+ return;
+
+ WARN_ON(!xa_empty(&nn->xa_ipsec));
+ xa_destroy(&nn->xa_ipsec);
+}
+
+bool nfp_net_ipsec_tx_prep(struct nfp_net_dp *dp, struct sk_buff *skb,
+ struct nfp_ipsec_offload *offload_info)
+{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct xfrm_state *x;
+
+ x = xfrm_input_state(skb);
+ if (!x)
+ return false;
+
+ offload_info->seq_hi = xo->seq.hi;
+ offload_info->seq_low = xo->seq.low;
+ offload_info->handle = x->xso.offload_handle;
+
+ return true;
+}
+
+int nfp_net_ipsec_rx(struct nfp_meta_parsed *meta, struct sk_buff *skb)
+{
+ struct net_device *netdev = skb->dev;
+ struct xfrm_offload *xo;
+ struct xfrm_state *x;
+ struct sec_path *sp;
+ struct nfp_net *nn;
+ u32 saidx;
+
+ nn = netdev_priv(netdev);
+
+ saidx = meta->ipsec_saidx - 1;
+ if (saidx >= NFP_NET_IPSEC_MAX_SA_CNT)
+ return -EINVAL;
+
+ sp = secpath_set(skb);
+ if (unlikely(!sp))
+ return -ENOMEM;
+
+ xa_lock(&nn->xa_ipsec);
+ x = xa_load(&nn->xa_ipsec, saidx);
+ xa_unlock(&nn->xa_ipsec);
+ if (!x)
+ return -EINVAL;
+
+ xfrm_state_hold(x);
+ sp->xvec[sp->len++] = x;
+ sp->olen++;
+ xo = xfrm_offload(skb);
+ xo->flags = CRYPTO_DONE;
+ xo->status = CRYPTO_SUCCESS;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
new file mode 100644
index 000000000..f80f1a695
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <net/inet6_hashtables.h>
+#include <net/tls.h>
+
+#include "../ccm.h"
+#include "../nfp_net.h"
+#include "crypto.h"
+#include "fw.h"
+
+#define NFP_NET_TLS_CCM_MBOX_OPS_MASK \
+ (BIT(NFP_CCM_TYPE_CRYPTO_RESET) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_ADD) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_DEL) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_UPDATE))
+
+#define NFP_NET_TLS_OPCODE_MASK_RX \
+ BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC)
+
+#define NFP_NET_TLS_OPCODE_MASK_TX \
+ BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC)
+
+#define NFP_NET_TLS_OPCODE_MASK \
+ (NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX)
+
+static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on)
+{
+ u32 off, val;
+
+ off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4);
+
+ val = nn_readl(nn, off);
+ if (on)
+ val |= BIT(opcode & 31);
+ else
+ val &= ~BIT(opcode & 31);
+ nn_writel(nn, off, val);
+}
+
+static bool
+__nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
+ enum tls_offload_ctx_dir direction)
+{
+ u8 opcode;
+ int cnt;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ nn->ktls_tx_conn_cnt += add;
+ cnt = nn->ktls_tx_conn_cnt;
+ nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt;
+ } else {
+ opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ nn->ktls_rx_conn_cnt += add;
+ cnt = nn->ktls_rx_conn_cnt;
+ }
+
+ /* Care only about 0 -> 1 and 1 -> 0 transitions */
+ if (cnt > 1)
+ return false;
+
+ nfp_net_crypto_set_op(nn, opcode, cnt);
+ return true;
+}
+
+static int
+nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
+ enum tls_offload_ctx_dir direction)
+{
+ int ret = 0;
+
+ /* Use the BAR lock to protect the connection counts */
+ nn_ctrl_bar_lock(nn);
+ if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) {
+ ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
+ /* Undo the cnt adjustment if failed */
+ if (ret)
+ __nfp_net_tls_conn_cnt_changed(nn, -add, direction);
+ }
+ nn_ctrl_bar_unlock(nn);
+
+ return ret;
+}
+
+static int
+nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
+{
+ return nfp_net_tls_conn_cnt_changed(nn, 1, direction);
+}
+
+static int
+nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
+{
+ return nfp_net_tls_conn_cnt_changed(nn, -1, direction);
+}
+
+static struct sk_buff *
+nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags)
+{
+ return nfp_ccm_mbox_msg_alloc(nn, req_sz,
+ sizeof(struct nfp_crypto_reply_simple),
+ flags);
+}
+
+static int
+nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
+ const char *name, enum nfp_ccm_type type)
+{
+ struct nfp_crypto_reply_simple *reply;
+ int err;
+
+ err = __nfp_ccm_mbox_communicate(nn, skb, type,
+ sizeof(*reply), sizeof(*reply),
+ type == NFP_CCM_TYPE_CRYPTO_DEL);
+ if (err) {
+ nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
+ return err;
+ }
+
+ reply = (void *)skb->data;
+ err = -be32_to_cpu(reply->error);
+ if (err)
+ nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n",
+ name, err);
+ dev_consume_skb_any(skb);
+
+ return err;
+}
+
+static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
+{
+ struct nfp_crypto_req_del *req;
+ struct sk_buff *skb;
+
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return;
+
+ req = (void *)skb->data;
+ req->ep_id = 0;
+ memcpy(req->handle, fw_handle, sizeof(req->handle));
+
+ nfp_net_tls_communicate_simple(nn, skb, "delete",
+ NFP_CCM_TYPE_CRYPTO_DEL);
+}
+
+static void
+nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver)
+{
+ front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) |
+ FIELD_PREP(NFP_NET_TLS_VLAN,
+ NFP_NET_TLS_VLAN_UNUSED));
+}
+
+static void
+nfp_net_tls_assign_conn_id(struct nfp_net *nn,
+ struct nfp_crypto_req_add_front *front)
+{
+ u32 len;
+ u64 id;
+
+ id = atomic64_inc_return(&nn->ktls_conn_id_gen);
+ len = front->key_len - NFP_NET_TLS_NON_ADDR_KEY_LEN;
+
+ memcpy(front->l3_addrs, &id, sizeof(id));
+ memset(front->l3_addrs + sizeof(id), 0, len - sizeof(id));
+}
+
+static struct nfp_crypto_req_add_back *
+nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req,
+ struct sock *sk, int direction)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ req->front.key_len += sizeof(__be32) * 2;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ nfp_net_tls_assign_conn_id(nn, &req->front);
+ } else {
+ req->src_ip = inet->inet_daddr;
+ req->dst_ip = inet->inet_saddr;
+ }
+
+ return &req->back;
+}
+
+static struct nfp_crypto_req_add_back *
+nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req,
+ struct sock *sk, int direction)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ req->front.key_len += sizeof(struct in6_addr) * 2;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ nfp_net_tls_assign_conn_id(nn, &req->front);
+ } else {
+ memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
+ memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
+ }
+
+#endif
+ return &req->back;
+}
+
+static void
+nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
+ struct nfp_crypto_req_add_back *back, struct sock *sk,
+ int direction)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ front->l4_proto = IPPROTO_TCP;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ back->src_port = 0;
+ back->dst_port = 0;
+ } else {
+ back->src_port = inet->inet_dport;
+ back->dst_port = inet->inet_sport;
+ }
+}
+
+static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction)
+{
+ switch (direction) {
+ case TLS_OFFLOAD_CTX_DIR_TX:
+ return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ case TLS_OFFLOAD_CTX_DIR_RX:
+ return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static bool
+nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type,
+ enum tls_offload_ctx_dir direction)
+{
+ u8 bit;
+
+ switch (cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ else
+ bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ break;
+ default:
+ return false;
+ }
+
+ return nn->tlv_caps.crypto_ops & BIT(bit);
+}
+
+static int
+nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct tls12_crypto_info_aes_gcm_128 *tls_ci;
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_crypto_req_add_front *front;
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct nfp_crypto_req_add_back *back;
+ struct nfp_crypto_reply_add *reply;
+ struct sk_buff *skb;
+ size_t req_sz;
+ void *req;
+ bool ipv6;
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) >
+ TLS_DRIVER_STATE_SIZE_TX);
+ BUILD_BUG_ON(offsetof(struct nfp_net_tls_offload_ctx, rx_end) >
+ TLS_DRIVER_STATE_SIZE_RX);
+
+ if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction))
+ return -EOPNOTSUPP;
+
+ switch (sk->sk_family) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ if (ipv6_only_sock(sk) ||
+ ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
+ req_sz = sizeof(struct nfp_crypto_req_add_v6);
+ ipv6 = true;
+ break;
+ }
+ fallthrough;
+#endif
+ case AF_INET:
+ req_sz = sizeof(struct nfp_crypto_req_add_v4);
+ ipv6 = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_net_tls_conn_add(nn, direction);
+ if (err)
+ return err;
+
+ skb = nfp_ccm_mbox_msg_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL);
+ if (!skb) {
+ err = -ENOMEM;
+ goto err_conn_remove;
+ }
+
+ front = (void *)skb->data;
+ front->ep_id = 0;
+ front->key_len = NFP_NET_TLS_NON_ADDR_KEY_LEN;
+ front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
+ memset(front->resv, 0, sizeof(front->resv));
+
+ nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4);
+
+ req = (void *)skb->data;
+ if (ipv6)
+ back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
+ else
+ back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
+
+ nfp_net_tls_set_l4(front, back, sk, direction);
+
+ back->counter = 0;
+ back->tcp_seq = cpu_to_be32(start_offload_tcp_sn);
+
+ tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0,
+ sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
+
+ /* Get an extra ref on the skb so we can wipe the key after */
+ skb_get(skb);
+
+ err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
+ sizeof(*reply), sizeof(*reply));
+ reply = (void *)skb->data;
+
+ /* We depend on CCM MBOX code not reallocating skb we sent
+ * so we can clear the key material out of the memory.
+ */
+ if (!WARN_ON_ONCE((u8 *)back < skb->head ||
+ (u8 *)back > skb_end_pointer(skb)) &&
+ !WARN_ON_ONCE((u8 *)&reply[1] > (u8 *)back))
+ memzero_explicit(back, sizeof(*back));
+ dev_consume_skb_any(skb); /* the extra ref from skb_get() above */
+
+ if (err) {
+ nn_dp_warn(&nn->dp, "failed to add TLS: %d (%d)\n",
+ err, direction == TLS_OFFLOAD_CTX_DIR_TX);
+ /* communicate frees skb on error */
+ goto err_conn_remove;
+ }
+
+ err = -be32_to_cpu(reply->error);
+ if (err) {
+ if (err == -ENOSPC) {
+ if (!atomic_fetch_inc(&nn->ktls_no_space))
+ nn_info(nn, "HW TLS table full\n");
+ } else {
+ nn_dp_warn(&nn->dp,
+ "failed to add TLS, FW replied: %d\n", err);
+ }
+ goto err_free_skb;
+ }
+
+ if (!reply->handle[0] && !reply->handle[1]) {
+ nn_dp_warn(&nn->dp, "FW returned NULL handle\n");
+ err = -EINVAL;
+ goto err_fw_remove;
+ }
+
+ ntls = tls_driver_ctx(sk, direction);
+ memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle));
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ ntls->next_seq = start_offload_tcp_sn;
+ dev_consume_skb_any(skb);
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ return 0;
+
+ if (!nn->tlv_caps.tls_resync_ss)
+ tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
+
+ return 0;
+
+err_fw_remove:
+ nfp_net_tls_del_fw(nn, reply->handle);
+err_free_skb:
+ dev_consume_skb_any(skb);
+err_conn_remove:
+ nfp_net_tls_conn_remove(nn, direction);
+ return err;
+}
+
+static void
+nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+
+ nfp_net_tls_conn_remove(nn, direction);
+
+ ntls = __tls_driver_ctx(tls_ctx, direction);
+ nfp_net_tls_del_fw(nn, ntls->fw_handle);
+}
+
+static int
+nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
+ u8 *rcd_sn, enum tls_offload_ctx_dir direction)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct nfp_crypto_req_update *req;
+ enum nfp_ccm_type type;
+ struct sk_buff *skb;
+ gfp_t flags;
+ int err;
+
+ flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
+ if (!skb)
+ return -ENOMEM;
+
+ ntls = tls_driver_ctx(sk, direction);
+ req = (void *)skb->data;
+ req->ep_id = 0;
+ req->opcode = nfp_tls_1_2_dir_to_opcode(direction);
+ memset(req->resv, 0, sizeof(req->resv));
+ memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle));
+ req->tcp_seq = cpu_to_be32(seq);
+ memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
+
+ type = NFP_CCM_TYPE_CRYPTO_UPDATE;
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ err = nfp_net_tls_communicate_simple(nn, skb, "sync", type);
+ if (err)
+ return err;
+ ntls->next_seq = seq;
+ } else {
+ if (nn->tlv_caps.tls_resync_ss)
+ type = NFP_CCM_TYPE_CRYPTO_RESYNC;
+ nfp_ccm_mbox_post(nn, skb, type,
+ sizeof(struct nfp_crypto_reply_simple));
+ atomic_inc(&nn->ktls_rx_resync_sent);
+ }
+
+ return 0;
+}
+
+static const struct tlsdev_ops nfp_net_tls_ops = {
+ .tls_dev_add = nfp_net_tls_add,
+ .tls_dev_del = nfp_net_tls_del,
+ .tls_dev_resync = nfp_net_tls_resync,
+};
+
+int nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct net *net = dev_net(netdev);
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ struct iphdr *iph;
+ struct sock *sk;
+ __be32 tcp_seq;
+ int err;
+
+ iph = pkt + req->l3_offset;
+ ipv6h = pkt + req->l3_offset;
+ th = pkt + req->l4_offset;
+
+ if ((u8 *)&th[1] > (u8 *)pkt + pkt_len) {
+ netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu pkt_len: %u)\n",
+ req->l3_offset, req->l4_offset, pkt_len);
+ err = -EINVAL;
+ goto err_cnt_ign;
+ }
+
+ switch (ipv6h->version) {
+ case 4:
+ sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ iph->saddr, th->source, iph->daddr,
+ th->dest, netdev->ifindex);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case 6:
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ &ipv6h->saddr, th->source,
+ &ipv6h->daddr, ntohs(th->dest),
+ netdev->ifindex, 0);
+ break;
+#endif
+ default:
+ netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu ipver: %u)\n",
+ req->l3_offset, req->l4_offset, iph->version);
+ err = -EINVAL;
+ goto err_cnt_ign;
+ }
+
+ err = 0;
+ if (!sk)
+ goto err_cnt_ign;
+ if (!tls_is_sk_rx_device_offloaded(sk) ||
+ sk->sk_shutdown & RCV_SHUTDOWN)
+ goto err_put_sock;
+
+ ntls = tls_driver_ctx(sk, TLS_OFFLOAD_CTX_DIR_RX);
+ /* some FW versions can't report the handle and report 0s */
+ if (memchr_inv(&req->fw_handle, 0, sizeof(req->fw_handle)) &&
+ memcmp(&req->fw_handle, &ntls->fw_handle, sizeof(ntls->fw_handle)))
+ goto err_put_sock;
+
+ /* copy to ensure alignment */
+ memcpy(&tcp_seq, &req->tcp_seq, sizeof(tcp_seq));
+ tls_offload_rx_resync_request(sk, tcp_seq);
+ atomic_inc(&nn->ktls_rx_resync_req);
+
+ sock_gen_put(sk);
+ return 0;
+
+err_put_sock:
+ sock_gen_put(sk);
+err_cnt_ign:
+ atomic_inc(&nn->ktls_rx_resync_ign);
+ return err;
+}
+
+static int nfp_net_tls_reset(struct nfp_net *nn)
+{
+ struct nfp_crypto_req_reset *req;
+ struct sk_buff *skb;
+
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->ep_id = 0;
+
+ return nfp_net_tls_communicate_simple(nn, skb, "reset",
+ NFP_CCM_TYPE_CRYPTO_RESET);
+}
+
+int nfp_net_tls_init(struct nfp_net *nn)
+{
+ struct net_device *netdev = nn->dp.netdev;
+ int err;
+
+ if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK))
+ return 0;
+
+ if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) !=
+ NFP_NET_TLS_CCM_MBOX_OPS_MASK)
+ return 0;
+
+ if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) {
+ nn_warn(nn, "disabling TLS offload - mbox too small: %d\n",
+ nn->tlv_caps.mbox_len);
+ return 0;
+ }
+
+ err = nfp_net_tls_reset(nn);
+ if (err)
+ return err;
+
+ nn_ctrl_bar_lock(nn);
+ nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0);
+ err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
+ nn_ctrl_bar_unlock(nn);
+ if (err)
+ return err;
+
+ if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_RX) {
+ netdev->hw_features |= NETIF_F_HW_TLS_RX;
+ netdev->features |= NETIF_F_HW_TLS_RX;
+ }
+ if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) {
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ }
+
+ netdev->tlsdev_ops = &nfp_net_tls_ops;
+
+ return 0;
+}