Skip to content

Commit

Permalink
crypto: x86/aes-xts - wire up VAES + AVX2 implementation
Browse files Browse the repository at this point in the history
Add an AES-XTS implementation "xts-aes-vaes-avx2" for x86_64 CPUs with
the VAES, VPCLMULQDQ, and AVX2 extensions, but not AVX512 or AVX10.
This implementation uses ymm registers to operate on two AES blocks at a
time.  The assembly code is instantiated using a macro so that most of
the source code is shared with other implementations.

This is the optimal implementation on AMD Zen 3.  It should also be the
optimal implementation on Intel Alder Lake, which similarly supports
VAES but not AVX512.  Comparing to xts-aes-aesni-avx on Zen 3,
xts-aes-vaes-avx2 provides 70% higher AES-256-XTS decryption throughput
with 4096-byte messages, or 23% higher with 512-byte messages.

A large improvement is also seen with CPUs that do support AVX512 (e.g.,
98% higher AES-256-XTS decryption throughput on Ice Lake with 4096-byte
messages), though the following patches add AVX512 optimized
implementations to get a bit more performance on those CPUs.

Signed-off-by: Eric Biggers <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
  • Loading branch information
ebiggers authored and herbertx committed Apr 5, 2024
1 parent 996f4dc commit e787060
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 0 deletions.
11 changes: 11 additions & 0 deletions arch/x86/crypto/aes-xts-avx-x86_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -807,3 +807,14 @@ SYM_FUNC_END(aes_xts_encrypt_aesni_avx)
SYM_TYPED_FUNC_START(aes_xts_decrypt_aesni_avx)
_aes_xts_crypt 0
SYM_FUNC_END(aes_xts_decrypt_aesni_avx)

#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
.set VL, 32
.set USE_AVX10, 0
SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx2)
_aes_xts_crypt 1
SYM_FUNC_END(aes_xts_encrypt_vaes_avx2)
SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx2)
_aes_xts_crypt 0
SYM_FUNC_END(aes_xts_decrypt_vaes_avx2)
#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
20 changes: 20 additions & 0 deletions arch/x86/crypto/aesni-intel_glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -1295,6 +1295,9 @@ static struct skcipher_alg aes_xts_alg_##suffix = { \
static struct simd_skcipher_alg *aes_xts_simdalg_##suffix

DEFINE_XTS_ALG(aesni_avx, "xts-aes-aesni-avx", 500);
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
DEFINE_XTS_ALG(vaes_avx2, "xts-aes-vaes-avx2", 600);
#endif

static int __init register_xts_algs(void)
{
Expand All @@ -1306,6 +1309,18 @@ static int __init register_xts_algs(void)
&aes_xts_simdalg_aesni_avx);
if (err)
return err;
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
if (!boot_cpu_has(X86_FEATURE_AVX2) ||
!boot_cpu_has(X86_FEATURE_VAES) ||
!boot_cpu_has(X86_FEATURE_VPCLMULQDQ) ||
!boot_cpu_has(X86_FEATURE_PCLMULQDQ) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
return 0;
err = simd_register_skciphers_compat(&aes_xts_alg_vaes_avx2, 1,
&aes_xts_simdalg_vaes_avx2);
if (err)
return err;
#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
return 0;
}

Expand All @@ -1314,6 +1329,11 @@ static void unregister_xts_algs(void)
if (aes_xts_simdalg_aesni_avx)
simd_unregister_skciphers(&aes_xts_alg_aesni_avx, 1,
&aes_xts_simdalg_aesni_avx);
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
if (aes_xts_simdalg_vaes_avx2)
simd_unregister_skciphers(&aes_xts_alg_vaes_avx2, 1,
&aes_xts_simdalg_vaes_avx2);
#endif
}
#else /* CONFIG_X86_64 */
static int __init register_xts_algs(void)
Expand Down

0 comments on commit e787060

Please sign in to comment.