Skip to content

Commit

Permalink
lint
Browse files Browse the repository at this point in the history
  • Loading branch information
DmitriyMusatkin committed Jun 20, 2024
1 parent c8af0eb commit dd49ed2
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 35 deletions.
12 changes: 6 additions & 6 deletions include/aws/cal/symmetric_cipher.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ AWS_CAL_API struct aws_symmetric_cipher *aws_aes_ctr_256_new(
* aws_symmetric_cipher_get_initialization_vector()
*
* respectively.
*
*
* If aad is set it will be copied and applied to the cipher.
*
* If they are set, that key and iv will be copied internally and used by the cipher.
Expand Down Expand Up @@ -190,11 +190,11 @@ AWS_CAL_API int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_ci
* Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the
* same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for
* immediate reuse.
* Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make sure
* to make a copy of tag before reseting the cipher and pass that copy for decryption.
*
* Warning: In most cases its a really bad idea to reset a cipher and perform another operation using that cipher.
* Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher
* Note: GCM tag is not preserved between operations. If you intend to do encrypt followed directly by decrypt, make
* sure to make a copy of tag before reseting the cipher and pass that copy for decryption.
*
* Warning: In most cases its a really bad idea to reset a cipher and perform another operation using that cipher.
* Key and IV should not be reused for different operations. Instead of reseting the cipher, destroy the cipher
* and create new one with a new key/iv pair. Use reset at your own risk, and only after careful consideration.
*
* returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns
Expand Down
5 changes: 1 addition & 4 deletions source/unix/openssl_aes.c
Original file line number Diff line number Diff line change
Expand Up @@ -343,10 +343,7 @@ static int s_finalize_gcm_encryption(struct aws_symmetric_cipher *cipher, struct

if (ret_val == AWS_OP_SUCCESS) {
if (!EVP_CIPHER_CTX_ctrl(
openssl_cipher->encryptor_ctx,
EVP_CTRL_GCM_GET_TAG,
(int)cipher->tag.capacity,
cipher->tag.buffer)) {
openssl_cipher->encryptor_ctx, EVP_CTRL_GCM_GET_TAG, (int)cipher->tag.capacity, cipher->tag.buffer)) {
cipher->state = AWS_SYMMETRIC_CIPHER_ERROR;
return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
Expand Down
30 changes: 14 additions & 16 deletions source/windows/bcrypt_aes.c
Original file line number Diff line number Diff line change
Expand Up @@ -277,8 +277,7 @@ static int s_reset_cbc_cipher(struct aws_symmetric_cipher *cipher) {
struct aes_bcrypt_cipher *cipher_impl = cipher->impl;

s_clear_reusable_components(cipher);
return s_initialize_cipher_materials(
cipher_impl, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, false, false);
return s_initialize_cipher_materials(cipher_impl, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, false, false);
}

static int s_reset_ctr_cipher(struct aws_symmetric_cipher *cipher) {
Expand All @@ -289,16 +288,14 @@ static int s_reset_ctr_cipher(struct aws_symmetric_cipher *cipher) {
/* reset the working iv back to the original IV. We do this because
we're manually maintaining the counter. */
aws_byte_buf_append_dynamic(&cipher_impl->working_iv, &iv_cur);
return s_initialize_cipher_materials(
cipher_impl, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, true, false);
return s_initialize_cipher_materials(cipher_impl, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, true, false);
}

static int s_reset_gcm_cipher(struct aws_symmetric_cipher *cipher) {
struct aes_bcrypt_cipher *cipher_impl = cipher->impl;

s_clear_reusable_components(cipher);
return s_initialize_cipher_materials(
cipher_impl, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true);
return s_initialize_cipher_materials(cipher_impl, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true);
}

static int s_aes_default_encrypt(
Expand Down Expand Up @@ -539,17 +536,18 @@ struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl(
return NULL;
}

/*
/*
* The buffer management for this mode is a good deal easier because we don't care about padding.
* In chained mode, BCrypt expects the data to be passed in in multiples of block size,
* followed by a finalize call that turns off chaining and provides any remaining data.
* This function takes care of managing this state - you give it data to work and cipher state and
* This function takes care of managing this state - you give it data to work and cipher state and
* it will return what data can be sent to bcrypt now and as side effect will update the cipher state
* with any leftover data.
* Note: this function takes a scratch buffer that might be used for to back data returned by the cursor.
* It is on caller to cleanup that scratch buffer.
*/
static struct aws_byte_cursor s_gcm_get_working_slice(struct aes_bcrypt_cipher *cipher_impl,
static struct aws_byte_cursor s_gcm_get_working_slice(
struct aes_bcrypt_cipher *cipher_impl,
struct aws_byte_cursor data,
struct aws_byte_buf *scratch) {
AWS_PRECONDITION(cipher_impl);
Expand Down Expand Up @@ -587,9 +585,9 @@ static struct aws_byte_cursor s_gcm_get_working_slice(struct aes_bcrypt_cipher *
/*
* bcrypt requires pbTag and cbTag initialized before starting chained encrypt or decrypt.
* why bcrypt needs it initialized early and every other lib can wait until is a mystery.
* following function is a helper to init the state correctly for encrypt (and decrypt has a similar function later).
* following function is a helper to init the state correctly for encrypt (and decrypt has a similar function later).
* For encrypt this blows away whatever tag user might have set and ensures that its atleast block size.
* Note: gcm supports shorter tags, but bcrypt always generates block sized one
* Note: gcm supports shorter tags, but bcrypt always generates block sized one
* (caller can decide to make them shorter by removing bytes from the end).
*/
static void s_gcm_ensure_tag_setup_for_encrypt(struct aws_symmetric_cipher *cipher) {
Expand Down Expand Up @@ -618,9 +616,9 @@ static int s_aes_gcm_encrypt(

s_gcm_ensure_tag_setup_for_encrypt(cipher);

struct aws_byte_buf working_buffer;
struct aws_byte_buf working_buffer;
struct aws_byte_cursor working_cur = s_gcm_get_working_slice(cipher_impl, to_encrypt, &working_buffer);

int ret_val = AWS_OP_SUCCESS;
if (working_cur.len > 0) {
ret_val = s_aes_default_encrypt(cipher, &working_cur, out);
Expand Down Expand Up @@ -657,7 +655,7 @@ static int s_aes_gcm_decrypt(

struct aws_byte_buf working_buffer;
struct aws_byte_cursor working_cur = s_gcm_get_working_slice(cipher_impl, to_decrypt, &working_buffer);

int ret_val = AWS_OP_SUCCESS;
if (working_cur.len > 0) {
ret_val = s_default_aes_decrypt(cipher, &working_cur, out);
Expand Down Expand Up @@ -729,8 +727,8 @@ struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl(
cipher->cipher.vtable = &s_aes_gcm_vtable;

/* GCM does the counting under the hood, so we let it handle the final 4 bytes of the IV. */
if (s_initialize_cipher_materials(
cipher, key, iv, aad, AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true) != AWS_OP_SUCCESS) {
if (s_initialize_cipher_materials(cipher, key, iv, aad, AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true) !=
AWS_OP_SUCCESS) {
goto error;
}

Expand Down
19 changes: 10 additions & 9 deletions tests/aes256_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -1565,7 +1565,7 @@ static int s_aes_gcm_corner_case_checker(
struct aws_byte_cursor aad_cur,
struct aws_byte_cursor data_cur,
struct aws_byte_cursor expected_tag_cur) {

/* just a random tag value which should not match anything*/
uint8_t wrong_tag[] = {
0x83, 0xC0, 0xE4, 0x2B, 0xB1, 0x95, 0xE2, 0x62, 0xCB, 0x3B, 0x3A, 0x74, 0xA0, 0xDA, 0xE1, 0xC8};
Expand Down Expand Up @@ -1617,8 +1617,7 @@ static int s_aes_gcm_corner_case_checker(
aws_symmetric_cipher_set_tag(cipher, wrong_tag_cur);
ciphertext_cur = aws_byte_cursor_from_buf(&encrypt_buf);
ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, ciphertext_cur, &decrypted_buf));
ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT,
aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf));
ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf));

/* reset and verify decrypt with no tag fails */
aws_symmetric_cipher_reset(cipher);
Expand Down Expand Up @@ -1669,7 +1668,8 @@ static int s_aes_test_gcm_tag_corner_cases(struct aws_allocator *allocator, void
0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47,
0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D};

uint8_t data[] = {0x84, 0x99, 0x89, 0x3E, 0x16, 0xB0, 0xBA, 0x8B, 0x00, 0x7D, 0x54, 0x66, 0x5A, 0x84, 0x99, 0x89, 0x3E};
uint8_t data[] = {
0x84, 0x99, 0x89, 0x3E, 0x16, 0xB0, 0xBA, 0x8B, 0x00, 0x7D, 0x54, 0x66, 0x5A, 0x84, 0x99, 0x89, 0x3E};

uint8_t expected_tag[] = {
0x76, 0x4D, 0x21, 0xD6, 0xC0, 0xD8, 0xC7, 0xF9, 0xCA, 0x6D, 0xF2, 0x19, 0xAE, 0x56, 0xDC, 0x1F};
Expand All @@ -1679,7 +1679,7 @@ static int s_aes_test_gcm_tag_corner_cases(struct aws_allocator *allocator, void
struct aws_byte_cursor aad_cur = aws_byte_cursor_from_array(aad, sizeof(aad));
struct aws_byte_cursor expected_tag_cur = aws_byte_cursor_from_array(expected_tag, sizeof(expected_tag));
struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data));

return s_aes_gcm_corner_case_checker(allocator, key_cur, iv_cur, aad_cur, data_cur, expected_tag_cur);
}
AWS_TEST_CASE(aes_test_gcm_tag_corner_cases, s_aes_test_gcm_tag_corner_cases)
Expand All @@ -1696,9 +1696,10 @@ static int s_aes_test_gcm_tag_large_input_corner_cases(struct aws_allocator *all
0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47,
0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D};

uint8_t data[] = {0x84, 0x99, 0x89, 0x3E, 0x16, 0xB0, 0xBA, 0x8B, 0x00, 0x7D, 0x54, 0x66, 0x5A, 0x84, 0x99, 0x89, 0x3E,
0x84, 0x99, 0x89, 0x3E, 0x16, 0xB0, 0xBA, 0x8B, 0x00, 0x7D, 0x54, 0x66, 0x5A, 0x84, 0x99, 0x89, 0x3E,
0x84, 0x99, 0x89, 0x3E, 0x16, 0xB0, 0xBA, 0x8B, 0x00, 0x7D, 0x54, 0x66, 0x5A, 0x84, 0x99, 0x89, 0x3E};
uint8_t data[] = {0x84, 0x99, 0x89, 0x3E, 0x16, 0xB0, 0xBA, 0x8B, 0x00, 0x7D, 0x54, 0x66, 0x5A,
0x84, 0x99, 0x89, 0x3E, 0x84, 0x99, 0x89, 0x3E, 0x16, 0xB0, 0xBA, 0x8B, 0x00,
0x7D, 0x54, 0x66, 0x5A, 0x84, 0x99, 0x89, 0x3E, 0x84, 0x99, 0x89, 0x3E, 0x16,
0xB0, 0xBA, 0x8B, 0x00, 0x7D, 0x54, 0x66, 0x5A, 0x84, 0x99, 0x89, 0x3E};

uint8_t expected_tag[] = {
0xEA, 0x5E, 0x8A, 0x4B, 0x76, 0xE8, 0x9D, 0xC5, 0xF1, 0x32, 0x14, 0x64, 0xD0, 0x93, 0x74, 0xB7};
Expand All @@ -1708,7 +1709,7 @@ static int s_aes_test_gcm_tag_large_input_corner_cases(struct aws_allocator *all
struct aws_byte_cursor aad_cur = aws_byte_cursor_from_array(aad, sizeof(aad));
struct aws_byte_cursor expected_tag_cur = aws_byte_cursor_from_array(expected_tag, sizeof(expected_tag));
struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data));

return s_aes_gcm_corner_case_checker(allocator, key_cur, iv_cur, aad_cur, data_cur, expected_tag_cur);
}
AWS_TEST_CASE(aes_test_gcm_tag_large_input_corner_cases, s_aes_test_gcm_tag_large_input_corner_cases)

0 comments on commit dd49ed2

Please sign in to comment.