From: Ralf Lici ralf@mandelbit.com
Currently ovpn uses three separate dynamically allocated structures to set up cryptographic operations for both encryption and decryption. This adds overhead to performance-critical paths and contribute to memory fragmentation.
This commit consolidates those allocations into a single temporary blob, similar to what esp_alloc_tmp() does.
The resulting performance gain is +7.7% and +4.3% for UDP when using AES and ChaChaPoly respectively, and +4.3% for TCP.
Reviewed-by: Sabrina Dubroca sd@queasysnail.net Signed-off-by: Ralf Lici ralf@mandelbit.com Signed-off-by: Antonio Quartulli antonio@openvpn.net --- drivers/net/ovpn/crypto_aead.c | 160 +++++++++++++++++++++++++-------- drivers/net/ovpn/io.c | 8 +- drivers/net/ovpn/skb.h | 13 ++- 3 files changed, 135 insertions(+), 46 deletions(-)
diff --git a/drivers/net/ovpn/crypto_aead.c b/drivers/net/ovpn/crypto_aead.c index cb6cdf8ec317..cd723140c8d0 100644 --- a/drivers/net/ovpn/crypto_aead.c +++ b/drivers/net/ovpn/crypto_aead.c @@ -36,6 +36,104 @@ static int ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot *ks) crypto_aead_authsize(ks->encrypt); /* Auth Tag */ }
+/** + * ovpn_aead_crypto_tmp_size - compute the size of a temporary object containing + * an AEAD request structure with extra space for SG + * and IV. + * @tfm: the AEAD cipher handle + * @nfrags: the number of fragments in the skb + * + * This function calculates the size of a contiguous memory block that includes + * the initialization vector (IV), the AEAD request, and an array of scatterlist + * entries. For alignment considerations, the IV is placed first, followed by + * the request, and then the scatterlist. + * Additional alignment is applied according to the requirements of the + * underlying structures. + * + * Return: the size of the temporary memory that needs to be allocated + */ +static unsigned int ovpn_aead_crypto_tmp_size(struct crypto_aead *tfm, + const unsigned int nfrags) +{ + unsigned int len = OVPN_NONCE_SIZE; + + DEBUG_NET_WARN_ON_ONCE(OVPN_NONCE_SIZE != crypto_aead_ivsize(tfm)); + + /* min size for a buffer of ivsize, aligned to alignmask */ + len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); + /* round up to the next multiple of the crypto ctx alignment */ + len = ALIGN(len, crypto_tfm_ctx_alignment()); + + /* reserve space for the AEAD request */ + len += sizeof(struct aead_request) + crypto_aead_reqsize(tfm); + /* round up to the next multiple of the scatterlist alignment */ + len = ALIGN(len, __alignof__(struct scatterlist)); + + /* add enough space for nfrags + 2 scatterlist entries */ + len += array_size(sizeof(struct scatterlist), nfrags + 2); + return len; +} + +/** + * ovpn_aead_crypto_tmp_iv - retrieve the pointer to the IV within a temporary + * buffer allocated using ovpn_aead_crypto_tmp_size + * @aead: the AEAD cipher handle + * @tmp: a pointer to the beginning of the temporary buffer + * + * This function retrieves a pointer to the initialization vector (IV) in the + * temporary buffer. If the AEAD cipher specifies an IV size, the pointer is + * adjusted using the AEAD's alignment mask to ensure proper alignment. + * + * Returns: a pointer to the IV within the temporary buffer + */ +static u8 *ovpn_aead_crypto_tmp_iv(struct crypto_aead *aead, void *tmp) +{ + return likely(crypto_aead_ivsize(aead)) ? + PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : + tmp; +} + +/** + * ovpn_aead_crypto_tmp_req - retrieve the pointer to the AEAD request structure + * within a temporary buffer allocated using + * ovpn_aead_crypto_tmp_size + * @aead: the AEAD cipher handle + * @iv: a pointer to the initialization vector in the temporary buffer + * + * This function computes the location of the AEAD request structure that + * immediately follows the IV in the temporary buffer and it ensures the request + * is aligned to the crypto transform context alignment. + * + * Returns: a pointer to the AEAD request structure + */ +static struct aead_request *ovpn_aead_crypto_tmp_req(struct crypto_aead *aead, + const u8 *iv) +{ + return (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), + crypto_tfm_ctx_alignment()); +} + +/** + * ovpn_aead_crypto_req_sg - locate the scatterlist following the AEAD request + * within a temporary buffer allocated using + * ovpn_aead_crypto_tmp_size + * @aead: the AEAD cipher handle + * @req: a pointer to the AEAD request structure in the temporary buffer + * + * This function computes the starting address of the scatterlist that is + * allocated immediately after the AEAD request structure. It aligns the pointer + * based on the alignment requirements of the scatterlist structure. + * + * Returns: a pointer to the scatterlist + */ +static struct scatterlist *ovpn_aead_crypto_req_sg(struct crypto_aead *aead, + struct aead_request *req) +{ + return (void *)ALIGN((unsigned long)(req + 1) + + crypto_aead_reqsize(aead), + __alignof__(struct scatterlist)); +} + int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, struct sk_buff *skb) { @@ -45,6 +143,7 @@ int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, struct scatterlist *sg; int nfrags, ret; u32 pktid, op; + void *tmp; u8 *iv;
ovpn_skb_cb(skb)->peer = peer; @@ -71,13 +170,17 @@ int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2))) return -ENOSPC;
- /* sg may be required by async crypto */ - ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) * - (nfrags + 2), GFP_ATOMIC); - if (unlikely(!ovpn_skb_cb(skb)->sg)) + /* allocate temporary memory for iv, sg and req */ + tmp = kmalloc(ovpn_aead_crypto_tmp_size(ks->encrypt, nfrags), + GFP_ATOMIC); + if (unlikely(!tmp)) return -ENOMEM;
- sg = ovpn_skb_cb(skb)->sg; + ovpn_skb_cb(skb)->crypto_tmp = tmp; + + iv = ovpn_aead_crypto_tmp_iv(ks->encrypt, tmp); + req = ovpn_aead_crypto_tmp_req(ks->encrypt, iv); + sg = ovpn_aead_crypto_req_sg(ks->encrypt, req);
/* sg table: * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+OVPN_NONCE_WIRE_SIZE), @@ -105,13 +208,6 @@ int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, if (unlikely(ret < 0)) return ret;
- /* iv may be required by async crypto */ - ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC); - if (unlikely(!ovpn_skb_cb(skb)->iv)) - return -ENOMEM; - - iv = ovpn_skb_cb(skb)->iv; - /* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes * nonce */ @@ -130,12 +226,6 @@ int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, /* AEAD Additional data */ sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
- req = aead_request_alloc(ks->encrypt, GFP_ATOMIC); - if (unlikely(!req)) - return -ENOMEM; - - ovpn_skb_cb(skb)->req = req; - /* setup async crypto operation */ aead_request_set_tfm(req, ks->encrypt); aead_request_set_callback(req, 0, ovpn_encrypt_post, skb); @@ -156,6 +246,7 @@ int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, struct aead_request *req; struct sk_buff *trailer; struct scatterlist *sg; + void *tmp; u8 *iv;
payload_offset = OVPN_AAD_SIZE + tag_size; @@ -184,13 +275,17 @@ int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2))) return -ENOSPC;
- /* sg may be required by async crypto */ - ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) * - (nfrags + 2), GFP_ATOMIC); - if (unlikely(!ovpn_skb_cb(skb)->sg)) + /* allocate temporary memory for iv, sg and req */ + tmp = kmalloc(ovpn_aead_crypto_tmp_size(ks->decrypt, nfrags), + GFP_ATOMIC); + if (unlikely(!tmp)) return -ENOMEM;
- sg = ovpn_skb_cb(skb)->sg; + ovpn_skb_cb(skb)->crypto_tmp = tmp; + + iv = ovpn_aead_crypto_tmp_iv(ks->decrypt, tmp); + req = ovpn_aead_crypto_tmp_req(ks->decrypt, iv); + sg = ovpn_aead_crypto_req_sg(ks->decrypt, req);
/* sg table: * 0: op, wire nonce (AD, len=OVPN_OPCODE_SIZE+OVPN_NONCE_WIRE_SIZE), @@ -213,24 +308,11 @@ int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, /* append auth_tag onto scatterlist */ sg_set_buf(sg + ret + 1, skb->data + OVPN_AAD_SIZE, tag_size);
- /* iv may be required by async crypto */ - ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC); - if (unlikely(!ovpn_skb_cb(skb)->iv)) - return -ENOMEM; - - iv = ovpn_skb_cb(skb)->iv; - /* copy nonce into IV buffer */ memcpy(iv, skb->data + OVPN_OPCODE_SIZE, OVPN_NONCE_WIRE_SIZE); memcpy(iv + OVPN_NONCE_WIRE_SIZE, ks->nonce_tail_recv, OVPN_NONCE_TAIL_SIZE);
- req = aead_request_alloc(ks->decrypt, GFP_ATOMIC); - if (unlikely(!req)) - return -ENOMEM; - - ovpn_skb_cb(skb)->req = req; - /* setup async crypto operation */ aead_request_set_tfm(req, ks->decrypt); aead_request_set_callback(req, 0, ovpn_decrypt_post, skb); @@ -273,7 +355,11 @@ static struct crypto_aead *ovpn_aead_init(const char *title, goto error; }
- /* basic AEAD assumption */ + /* basic AEAD assumption + * all current algorithms use OVPN_NONCE_SIZE. + * ovpn_aead_crypto_tmp_size and ovpn_aead_encrypt/decrypt + * expect this. + */ if (crypto_aead_ivsize(aead) != OVPN_NONCE_SIZE) { pr_err("%s IV size must be %d\n", title, OVPN_NONCE_SIZE); ret = -EINVAL; diff --git a/drivers/net/ovpn/io.c b/drivers/net/ovpn/io.c index 3e9e7f8444b3..2721ee8268b2 100644 --- a/drivers/net/ovpn/io.c +++ b/drivers/net/ovpn/io.c @@ -119,9 +119,7 @@ void ovpn_decrypt_post(void *data, int ret) peer = ovpn_skb_cb(skb)->peer;
/* crypto is done, cleanup skb CB and its members */ - kfree(ovpn_skb_cb(skb)->iv); - kfree(ovpn_skb_cb(skb)->sg); - aead_request_free(ovpn_skb_cb(skb)->req); + kfree(ovpn_skb_cb(skb)->crypto_tmp);
if (unlikely(ret < 0)) goto drop; @@ -248,9 +246,7 @@ void ovpn_encrypt_post(void *data, int ret) peer = ovpn_skb_cb(skb)->peer;
/* crypto is done, cleanup skb CB and its members */ - kfree(ovpn_skb_cb(skb)->iv); - kfree(ovpn_skb_cb(skb)->sg); - aead_request_free(ovpn_skb_cb(skb)->req); + kfree(ovpn_skb_cb(skb)->crypto_tmp);
if (unlikely(ret == -ERANGE)) { /* we ran out of IVs and we must kill the key as it can't be diff --git a/drivers/net/ovpn/skb.h b/drivers/net/ovpn/skb.h index 64430880f1da..4fb7ea025426 100644 --- a/drivers/net/ovpn/skb.h +++ b/drivers/net/ovpn/skb.h @@ -18,12 +18,19 @@ #include <linux/socket.h> #include <linux/types.h>
+/** + * struct ovpn_cb - ovpn skb control block + * @peer: the peer this skb was received from/sent to + * @ks: the crypto key slot used to encrypt/decrypt this skb + * @crypto_tmp: pointer to temporary memory used for crypto operations + * containing the IV, the scatter gather list and the aead request + * @payload_offset: offset in the skb where the payload starts + * @nosignal: whether this skb should be sent with the MSG_NOSIGNAL flag (TCP) + */ struct ovpn_cb { struct ovpn_peer *peer; struct ovpn_crypto_key_slot *ks; - struct aead_request *req; - struct scatterlist *sg; - u8 *iv; + void *crypto_tmp; unsigned int payload_offset; bool nosignal; };