Skip to content

Commit

Permalink
core: crypto: fix internal AES-GCM counter implementation
Browse files Browse the repository at this point in the history
We have several AES-GCM implementations in crypto libraries and
internal. The internal implementation comes in two flavours, with Arm
crypto extensions (CFG_CRYPTO_WITH_CE=y) and a pure software
implementation.

Each block to be encrypted is xored with an encrypted counter block of
equal size (16 bytes). For each block the counter is increased.

Prior to this patch the entire counter block was increased as a 128-bit
integer, but that's not how AES-GCM is defined. In AES-GCM only the
least significant 32 bits of the counter block are increased, leaving
the rest untouched.  The difference is only noticeable when the 32 bits
has reached 0xffffffff and wraps to 0x00000000 on next increment. With a
128-bit integer this would propagate into other parts of the block.

Fix this by only incrementing the last 32-bit word in the counter block,
both in the pure software implementation and when using Arm crypto
extensions.

Link: #6659
Fixes: 1fca7e2 ("core: crypto: add new AES-GCM implementation")
Signed-off-by: Jens Wiklander <[email protected]>
Acked-by: Jerome Forissier <[email protected]>
Acked-by: Etienne Carriere <[email protected]>
  • Loading branch information
jenswi-linaro authored and jforissier committed Feb 1, 2024
1 parent b4d33ca commit fcabe15
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 23 deletions.
28 changes: 15 additions & 13 deletions core/arch/arm/crypto/ghash-ce-core_a64.S
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (c) 2020 Linaro Limited
* Copyright (c) 2020, 2024 Linaro Limited
* Copyright (C) 2014 - 2017 Linaro Ltd. <[email protected]>
*
* Accelerated GHASH implementation with ARMv8 PMULL instructions.
Expand All @@ -11,11 +11,10 @@
#define CPU_LE(x...) x

/*
* If the lower half of CTR is initialized with zeroes or a low value we
* can expect that the upper half will remain unchanged. As an optimization
* make the code to increase the upper half optional.
* Only increase the lowest quarter, that is, 32-bits of the counter. If
* it wraps it must not propagate into the upper bits.
*/
#define INC_HALF_CTR 0
#define INC_QUART_CTR 1

SHASH .req v0
SHASH2 .req v1
Expand Down Expand Up @@ -412,7 +411,7 @@ END_FUNC pmull_ghash_update_p8
ld1 {SHASH.2d}, [x4], #16
ld1 {HH.2d}, [x4]
ld1 {XL.2d}, [x1]
#if INC_HALF_CTR
#if INC_QUART_CTR
ldr x8, [x5, #8] // load lower counter
#else
ldp x9, x8, [x5] // load counter
Expand All @@ -422,7 +421,7 @@ END_FUNC pmull_ghash_update_p8
trn1 SHASH2.2d, SHASH.2d, HH.2d
trn2 T1.2d, SHASH.2d, HH.2d
CPU_LE( rev x8, x8 )
#if !INC_HALF_CTR
#if !INC_QUART_CTR
CPU_LE( rev x9, x9 )
#endif
shl MASK.2d, MASK.2d, #57
Expand All @@ -437,10 +436,13 @@ CPU_LE( rev x9, x9 )

0: ld1 {INP0.16b-INP1.16b}, [x3], #32

#if INC_HALF_CTR
#if INC_QUART_CTR
lsr x12, x8, #32 // Save the upper 32 bits
rev x9, x8
add x11, x8, #1
add x8, x8, #2
add w11, w8, #1
add w8, w8, #2
add x11, x11, x12, lsl #32 // Restore the upper 32 bits
add x8, x8, x12, lsl #32
#endif

.if \enc == 1
Expand All @@ -450,7 +452,7 @@ CPU_LE( rev x9, x9 )

sub w0, w0, #2

#if INC_HALF_CTR
#if INC_QUART_CTR
ld1 {KS0.8b}, [x5] // load upper counter
rev x11, x11
mov KS1.8b, KS0.8b
Expand Down Expand Up @@ -563,11 +565,11 @@ CPU_LE( rev x9, x9 )
cbnz w0, 0b

CPU_LE( rev x8, x8 )
#if !INC_HALF_CTR
#if !INC_QUART_CTR
CPU_LE( rev x9, x9 )
#endif
st1 {XL.2d}, [x1]
#if INC_HALF_CTR
#if INC_QUART_CTR
str x8, [x5, #8] // store lower counter
#else
stp x9, x8, [x5] // store counter
Expand Down
18 changes: 8 additions & 10 deletions core/crypto/aes-gcm.c
Original file line number Diff line number Diff line change
Expand Up @@ -346,25 +346,23 @@ TEE_Result internal_aes_gcm_dec_final(struct internal_aes_gcm_ctx *ctx,
void internal_aes_gcm_inc_ctr(struct internal_aes_gcm_state *state)
{
uint64_t c = 0;
uint32_t lower = 0;

c = TEE_U64_FROM_BIG_ENDIAN(state->ctr[1]) + 1;
c = TEE_U64_FROM_BIG_ENDIAN(state->ctr[1]);
lower = c + 1;
c = (c & GENMASK_64(63, 32)) | lower;
state->ctr[1] = TEE_U64_TO_BIG_ENDIAN(c);
if (!c) {
c = TEE_U64_FROM_BIG_ENDIAN(state->ctr[0]) + 1;
state->ctr[0] = TEE_U64_TO_BIG_ENDIAN(c);
}
}

void internal_aes_gcm_dec_ctr(struct internal_aes_gcm_state *state)
{
uint64_t c = 0;
uint32_t lower = 0;

c = TEE_U64_FROM_BIG_ENDIAN(state->ctr[1]) - 1;
c = TEE_U64_FROM_BIG_ENDIAN(state->ctr[1]);
lower = c - 1;
c = (c & GENMASK_64(63, 32)) | lower;
state->ctr[1] = TEE_U64_TO_BIG_ENDIAN(c);
if (c == UINT64_MAX) {
c = TEE_U64_FROM_BIG_ENDIAN(state->ctr[0]) - 1;
state->ctr[0] = TEE_U64_TO_BIG_ENDIAN(c);
}
}

TEE_Result internal_aes_gcm_enc(const struct internal_aes_gcm_key *enc_key,
Expand Down

0 comments on commit fcabe15

Please sign in to comment.