mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-26 03:09:34 +03:00
Remove checks for null out value in encryption paths
These paths are never exercised, as the parameters given are always different cipher and plaintext `crypto_data_t` pointers. Reviewed-by: Richard Laager <rlaager@wiktel.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Attila Fueloep <attila@fueloep.org> Signed-off-by: Dirkjan Bussink <d.bussink@gmail.com> Closes #9661 Closes #10015
This commit is contained in:
parent
1d2ddb9bb9
commit
112c1bff94
@ -60,7 +60,6 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
|
||||
}
|
||||
|
||||
lastp = (uint8_t *)ctx->cbc_iv;
|
||||
if (out != NULL)
|
||||
crypto_init_ptrs(out, &iov_or_mp, &offset);
|
||||
|
||||
do {
|
||||
@ -79,24 +78,6 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
|
||||
blockp = datap;
|
||||
}
|
||||
|
||||
if (out == NULL) {
|
||||
/*
|
||||
* XOR the previous cipher block or IV with the
|
||||
* current clear block.
|
||||
*/
|
||||
xor_block(lastp, blockp);
|
||||
encrypt(ctx->cbc_keysched, blockp, blockp);
|
||||
|
||||
ctx->cbc_lastp = blockp;
|
||||
lastp = blockp;
|
||||
|
||||
if (ctx->cbc_remainder_len > 0) {
|
||||
bcopy(blockp, ctx->cbc_copy_to,
|
||||
ctx->cbc_remainder_len);
|
||||
bcopy(blockp + ctx->cbc_remainder_len, datap,
|
||||
need);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* XOR the previous cipher block or IV with the
|
||||
* current clear block.
|
||||
@ -119,7 +100,6 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
|
||||
}
|
||||
/* update offset */
|
||||
out->cd_offset += block_size;
|
||||
}
|
||||
|
||||
/* Update pointer to next block of data to be processed. */
|
||||
if (ctx->cbc_remainder_len != 0) {
|
||||
@ -187,7 +167,6 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
|
||||
}
|
||||
|
||||
lastp = ctx->cbc_lastp;
|
||||
if (out != NULL)
|
||||
crypto_init_ptrs(out, &iov_or_mp, &offset);
|
||||
|
||||
do {
|
||||
@ -209,13 +188,9 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
|
||||
/* LINTED: pointer alignment */
|
||||
copy_block(blockp, (uint8_t *)OTHER((uint64_t *)lastp, ctx));
|
||||
|
||||
if (out != NULL) {
|
||||
decrypt(ctx->cbc_keysched, blockp,
|
||||
(uint8_t *)ctx->cbc_remainder);
|
||||
blockp = (uint8_t *)ctx->cbc_remainder;
|
||||
} else {
|
||||
decrypt(ctx->cbc_keysched, blockp, blockp);
|
||||
}
|
||||
|
||||
/*
|
||||
* XOR the previous cipher block or IV with the
|
||||
@ -226,7 +201,6 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
|
||||
/* LINTED: pointer alignment */
|
||||
lastp = (uint8_t *)OTHER((uint64_t *)lastp, ctx);
|
||||
|
||||
if (out != NULL) {
|
||||
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
|
||||
&out_data_1_len, &out_data_2, block_size);
|
||||
|
||||
@ -239,12 +213,6 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
|
||||
/* update offset */
|
||||
out->cd_offset += block_size;
|
||||
|
||||
} else if (ctx->cbc_remainder_len > 0) {
|
||||
/* copy temporary block to where it belongs */
|
||||
bcopy(blockp, ctx->cbc_copy_to, ctx->cbc_remainder_len);
|
||||
bcopy(blockp + ctx->cbc_remainder_len, datap, need);
|
||||
}
|
||||
|
||||
/* Update pointer to next block of data to be processed. */
|
||||
if (ctx->cbc_remainder_len != 0) {
|
||||
datap += need;
|
||||
|
@ -68,7 +68,6 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
|
||||
}
|
||||
|
||||
lastp = (uint8_t *)ctx->ccm_cb;
|
||||
if (out != NULL)
|
||||
crypto_init_ptrs(out, &iov_or_mp, &offset);
|
||||
|
||||
mac_buf = (uint8_t *)ctx->ccm_mac_buf;
|
||||
@ -126,14 +125,6 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
|
||||
|
||||
ctx->ccm_processed_data_len += block_size;
|
||||
|
||||
if (out == NULL) {
|
||||
if (ctx->ccm_remainder_len > 0) {
|
||||
bcopy(blockp, ctx->ccm_copy_to,
|
||||
ctx->ccm_remainder_len);
|
||||
bcopy(blockp + ctx->ccm_remainder_len, datap,
|
||||
need);
|
||||
}
|
||||
} else {
|
||||
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
|
||||
&out_data_1_len, &out_data_2, block_size);
|
||||
|
||||
@ -150,7 +141,6 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
|
||||
}
|
||||
/* update offset */
|
||||
out->cd_offset += block_size;
|
||||
}
|
||||
|
||||
/* Update pointer to next block of data to be processed. */
|
||||
if (ctx->ccm_remainder_len != 0) {
|
||||
|
@ -61,7 +61,6 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
|
||||
}
|
||||
|
||||
lastp = (uint8_t *)ctx->ctr_cb;
|
||||
if (out != NULL)
|
||||
crypto_init_ptrs(out, &iov_or_mp, &offset);
|
||||
|
||||
do {
|
||||
@ -111,14 +110,6 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
|
||||
*/
|
||||
xor_block(blockp, lastp);
|
||||
|
||||
if (out == NULL) {
|
||||
if (ctx->ctr_remainder_len > 0) {
|
||||
bcopy(lastp, ctx->ctr_copy_to,
|
||||
ctx->ctr_remainder_len);
|
||||
bcopy(lastp + ctx->ctr_remainder_len, datap,
|
||||
need);
|
||||
}
|
||||
} else {
|
||||
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
|
||||
&out_data_1_len, &out_data_2, block_size);
|
||||
|
||||
@ -130,7 +121,6 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
|
||||
}
|
||||
/* update offset */
|
||||
out->cd_offset += block_size;
|
||||
}
|
||||
|
||||
/* Update pointer to next block of data to be processed. */
|
||||
if (ctx->ctr_remainder_len != 0) {
|
||||
|
@ -58,7 +58,6 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
|
||||
}
|
||||
|
||||
lastp = (uint8_t *)ctx->ecb_iv;
|
||||
if (out != NULL)
|
||||
crypto_init_ptrs(out, &iov_or_mp, &offset);
|
||||
|
||||
do {
|
||||
@ -77,19 +76,6 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
|
||||
blockp = datap;
|
||||
}
|
||||
|
||||
if (out == NULL) {
|
||||
cipher(ctx->ecb_keysched, blockp, blockp);
|
||||
|
||||
ctx->ecb_lastp = blockp;
|
||||
lastp = blockp;
|
||||
|
||||
if (ctx->ecb_remainder_len > 0) {
|
||||
bcopy(blockp, ctx->ecb_copy_to,
|
||||
ctx->ecb_remainder_len);
|
||||
bcopy(blockp + ctx->ecb_remainder_len, datap,
|
||||
need);
|
||||
}
|
||||
} else {
|
||||
cipher(ctx->ecb_keysched, blockp, lastp);
|
||||
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
|
||||
&out_data_1_len, &out_data_2, block_size);
|
||||
@ -102,7 +88,6 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
|
||||
}
|
||||
/* update offset */
|
||||
out->cd_offset += block_size;
|
||||
}
|
||||
|
||||
/* Update pointer to next block of data to be processed. */
|
||||
if (ctx->ecb_remainder_len != 0) {
|
||||
|
@ -117,7 +117,6 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
|
||||
}
|
||||
|
||||
lastp = (uint8_t *)ctx->gcm_cb;
|
||||
if (out != NULL)
|
||||
crypto_init_ptrs(out, &iov_or_mp, &offset);
|
||||
|
||||
gops = gcm_impl_get_ops();
|
||||
@ -154,22 +153,6 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
|
||||
|
||||
ctx->gcm_processed_data_len += block_size;
|
||||
|
||||
/*
|
||||
* The following copies a complete GCM block back to where it
|
||||
* came from if there was a remainder in the last call and out
|
||||
* is NULL. That doesn't seem to make sense. So we assert this
|
||||
* can't happen and leave the code in for reference.
|
||||
* See https://github.com/zfsonlinux/zfs/issues/9661
|
||||
*/
|
||||
ASSERT(out != NULL);
|
||||
if (out == NULL) {
|
||||
if (ctx->gcm_remainder_len > 0) {
|
||||
bcopy(blockp, ctx->gcm_copy_to,
|
||||
ctx->gcm_remainder_len);
|
||||
bcopy(blockp + ctx->gcm_remainder_len, datap,
|
||||
need);
|
||||
}
|
||||
} else {
|
||||
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
|
||||
&out_data_1_len, &out_data_2, block_size);
|
||||
|
||||
@ -186,7 +169,6 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
|
||||
}
|
||||
/* update offset */
|
||||
out->cd_offset += block_size;
|
||||
}
|
||||
|
||||
/* add ciphertext to the hash */
|
||||
GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash, gops);
|
||||
@ -1093,7 +1075,7 @@ gcm_toggle_avx(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear senssitve data in the context.
|
||||
* Clear sensitive data in the context.
|
||||
*
|
||||
* ctx->gcm_remainder may contain a plaintext remainder. ctx->gcm_H and
|
||||
* ctx->gcm_Htable contain the hash sub key which protects authentication.
|
||||
@ -1189,13 +1171,6 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
|
||||
GHASH_AVX(ctx, tmp, block_size);
|
||||
clear_fpu_regs();
|
||||
kfpu_end();
|
||||
/*
|
||||
* We don't follow gcm_mode_encrypt_contiguous_blocks() here
|
||||
* but assert that out is not null.
|
||||
* See gcm_mode_encrypt_contiguous_blocks() above and
|
||||
* https://github.com/zfsonlinux/zfs/issues/9661
|
||||
*/
|
||||
ASSERT(out != NULL);
|
||||
rv = crypto_put_output_data(tmp, out, block_size);
|
||||
out->cd_offset += block_size;
|
||||
gcm_incr_counter_block(ctx);
|
||||
@ -1217,13 +1192,11 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
|
||||
rv = CRYPTO_FAILED;
|
||||
goto out_nofpu;
|
||||
}
|
||||
if (out != NULL) {
|
||||
rv = crypto_put_output_data(ct_buf, out, chunk_size);
|
||||
if (rv != CRYPTO_SUCCESS) {
|
||||
goto out_nofpu;
|
||||
}
|
||||
out->cd_offset += chunk_size;
|
||||
}
|
||||
datap += chunk_size;
|
||||
ctx->gcm_processed_data_len += chunk_size;
|
||||
}
|
||||
@ -1239,13 +1212,11 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
|
||||
rv = CRYPTO_FAILED;
|
||||
goto out;
|
||||
}
|
||||
if (out != NULL) {
|
||||
rv = crypto_put_output_data(ct_buf, out, done);
|
||||
if (rv != CRYPTO_SUCCESS) {
|
||||
goto out;
|
||||
}
|
||||
out->cd_offset += done;
|
||||
}
|
||||
ctx->gcm_processed_data_len += done;
|
||||
datap += done;
|
||||
bleft -= done;
|
||||
@ -1265,13 +1236,11 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
|
||||
|
||||
gcm_xor_avx(datap, tmp);
|
||||
GHASH_AVX(ctx, tmp, block_size);
|
||||
if (out != NULL) {
|
||||
rv = crypto_put_output_data(tmp, out, block_size);
|
||||
if (rv != CRYPTO_SUCCESS) {
|
||||
goto out;
|
||||
}
|
||||
out->cd_offset += block_size;
|
||||
}
|
||||
gcm_incr_counter_block(ctx);
|
||||
ctx->gcm_processed_data_len += block_size;
|
||||
datap += block_size;
|
||||
|
@ -30,9 +30,6 @@
|
||||
#include <sys/crypto/spi.h>
|
||||
#include <sys/crypto/sched_impl.h>
|
||||
|
||||
#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
|
||||
#define CRYPTO_CIPHER_OFFSET(f) offsetof(crypto_cipher_ops_t, f)
|
||||
|
||||
/*
|
||||
* Encryption and decryption routines.
|
||||
*/
|
||||
|
@ -30,9 +30,6 @@
|
||||
#include <sys/crypto/spi.h>
|
||||
#include <sys/crypto/sched_impl.h>
|
||||
|
||||
#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
|
||||
#define CRYPTO_DIGEST_OFFSET(f) offsetof(crypto_digest_ops_t, f)
|
||||
|
||||
/*
|
||||
* Message digest routines
|
||||
*/
|
||||
|
@ -30,9 +30,6 @@
|
||||
#include <sys/crypto/spi.h>
|
||||
#include <sys/crypto/sched_impl.h>
|
||||
|
||||
#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
|
||||
#define CRYPTO_MAC_OFFSET(f) offsetof(crypto_mac_ops_t, f)
|
||||
|
||||
/*
|
||||
* Message authentication codes routines.
|
||||
*/
|
||||
|
@ -149,6 +149,7 @@ crypto_update_iov(void *ctx, crypto_data_t *input, crypto_data_t *output,
|
||||
common_ctx_t *common_ctx = ctx;
|
||||
int rv;
|
||||
|
||||
ASSERT(input != output);
|
||||
if (input->cd_miscdata != NULL) {
|
||||
copy_block((uint8_t *)input->cd_miscdata,
|
||||
&common_ctx->cc_iv[0]);
|
||||
@ -158,7 +159,7 @@ crypto_update_iov(void *ctx, crypto_data_t *input, crypto_data_t *output,
|
||||
return (CRYPTO_ARGUMENTS_BAD);
|
||||
|
||||
rv = (cipher)(ctx, input->cd_raw.iov_base + input->cd_offset,
|
||||
input->cd_length, (input == output) ? NULL : output);
|
||||
input->cd_length, output);
|
||||
|
||||
return (rv);
|
||||
}
|
||||
@ -175,6 +176,7 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output,
|
||||
uint_t vec_idx;
|
||||
size_t cur_len;
|
||||
|
||||
ASSERT(input != output);
|
||||
if (input->cd_miscdata != NULL) {
|
||||
copy_block((uint8_t *)input->cd_miscdata,
|
||||
&common_ctx->cc_iv[0]);
|
||||
@ -208,7 +210,7 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output,
|
||||
offset, length);
|
||||
|
||||
int rv = (cipher)(ctx, uiop->uio_iov[vec_idx].iov_base + offset,
|
||||
cur_len, (input == output) ? NULL : output);
|
||||
cur_len, output);
|
||||
|
||||
if (rv != CRYPTO_SUCCESS) {
|
||||
return (rv);
|
||||
|
@ -241,9 +241,6 @@ typedef struct crypto_logout32 {
|
||||
#define CRYPTO_LOGIN CRYPTO(40)
|
||||
#define CRYPTO_LOGOUT CRYPTO(41)
|
||||
|
||||
/* flag for encrypt and decrypt operations */
|
||||
#define CRYPTO_INPLACE_OPERATION 0x00000001
|
||||
|
||||
/*
|
||||
* Cryptographic Ioctls
|
||||
*/
|
||||
|
@ -92,11 +92,6 @@ static crypto_mech_info_t aes_mech_info_tab[] = {
|
||||
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
|
||||
};
|
||||
|
||||
/* operations are in-place if the output buffer is NULL */
|
||||
#define AES_ARG_INPLACE(input, output) \
|
||||
if ((output) == NULL) \
|
||||
(output) = (input);
|
||||
|
||||
static void aes_provider_status(crypto_provider_handle_t, uint_t *);
|
||||
|
||||
static crypto_control_ops_t aes_control_ops = {
|
||||
@ -413,7 +408,7 @@ aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
|
||||
== 0) && (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
|
||||
return (CRYPTO_DATA_LEN_RANGE);
|
||||
|
||||
AES_ARG_INPLACE(plaintext, ciphertext);
|
||||
ASSERT(ciphertext != NULL);
|
||||
|
||||
/*
|
||||
* We need to just return the length needed to store the output.
|
||||
@ -530,7 +525,7 @@ aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
|
||||
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
|
||||
}
|
||||
|
||||
AES_ARG_INPLACE(ciphertext, plaintext);
|
||||
ASSERT(plaintext != NULL);
|
||||
|
||||
/*
|
||||
* Return length needed to store the output.
|
||||
@ -635,7 +630,7 @@ aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
|
||||
ASSERT(ctx->cc_provider_private != NULL);
|
||||
aes_ctx = ctx->cc_provider_private;
|
||||
|
||||
AES_ARG_INPLACE(plaintext, ciphertext);
|
||||
ASSERT(ciphertext != NULL);
|
||||
|
||||
/* compute number of bytes that will hold the ciphertext */
|
||||
out_len = aes_ctx->ac_remainder_len;
|
||||
@ -705,7 +700,7 @@ aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
|
||||
ASSERT(ctx->cc_provider_private != NULL);
|
||||
aes_ctx = ctx->cc_provider_private;
|
||||
|
||||
AES_ARG_INPLACE(ciphertext, plaintext);
|
||||
ASSERT(plaintext != NULL);
|
||||
|
||||
/*
|
||||
* Compute number of bytes that will hold the plaintext.
|
||||
@ -947,7 +942,7 @@ aes_encrypt_atomic(crypto_provider_handle_t provider,
|
||||
size_t length_needed;
|
||||
int ret;
|
||||
|
||||
AES_ARG_INPLACE(plaintext, ciphertext);
|
||||
ASSERT(ciphertext != NULL);
|
||||
|
||||
/*
|
||||
* CTR, CCM, GCM, and GMAC modes do not require that plaintext
|
||||
@ -1073,7 +1068,7 @@ aes_decrypt_atomic(crypto_provider_handle_t provider,
|
||||
size_t length_needed;
|
||||
int ret;
|
||||
|
||||
AES_ARG_INPLACE(ciphertext, plaintext);
|
||||
ASSERT(plaintext != NULL);
|
||||
|
||||
/*
|
||||
* CCM, GCM, CTR, and GMAC modes do not require that ciphertext
|
||||
|
Loading…
Reference in New Issue
Block a user